filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
tools/precommit.py | #!/usr/bin/env python
# Copyright 2016 Samsung Electronics Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import os
from common_py import path
from common_py.system.filesystem import FileSystem as fs
from common_py.system.executor import Executor as ex
from common_py.system.platform import Platform
from check_tidy import check_tidy
TESTS=['host', 'rpi2', 'nuttx', 'misc'] # TODO: support darwin
BUILDTYPES=['debug', 'release']
def parse_option():
parser = argparse.ArgumentParser(
description='IoT.js pre-commit script.',
epilog='If no arguments are given, runs full test.')
parser.add_argument('--test', choices=TESTS, action='append')
parser.add_argument('--buildtype', choices=BUILDTYPES, action='append')
option = parser.parse_args(sys.argv[1:])
if option.test is None:
option.test = TESTS
if option.buildtype is None:
option.buildtype = BUILDTYPES
return option
def setup_nuttx_root(nuttx_root):
# Step 1
fs.maybe_make_directory(nuttx_root)
fs.chdir(nuttx_root)
if fs.exists('nuttx'):
fs.chdir('nuttx')
ex.check_run_cmd('git', ['pull'])
fs.chdir('..')
else:
ex.check_run_cmd('git', ['clone',
'https://bitbucket.org/nuttx/nuttx.git'])
if fs.exists('apps'):
fs.chdir('apps')
ex.check_run_cmd('git', ['pull'])
fs.chdir('..')
else:
ex.check_run_cmd('git', ['clone',
'https://bitbucket.org/nuttx/apps.git'])
# Step 2
fs.maybe_make_directory(fs.join(nuttx_root, 'apps', 'system', 'iotjs'))
for file in fs.listdir(fs.join(path.PROJECT_ROOT,
'targets', 'nuttx-stm32f4', 'app')):
fs.copy(fs.join(path.PROJECT_ROOT,'targets','nuttx-stm32f4','app',file),
fs.join(nuttx_root, 'apps', 'system', 'iotjs'))
# Step 3
fs.chdir(fs.join(nuttx_root, 'nuttx', 'tools'))
ex.check_run_cmd('./configure.sh', ['stm32f4discovery/usbnsh'])
fs.chdir('..')
fs.copy(fs.join(path.PROJECT_ROOT,
'targets',
'nuttx-stm32f4',
'nuttx',
'.config.travis'),
'.config')
def build_nuttx(nuttx_root, buildtype):
fs.chdir(fs.join(nuttx_root, 'nuttx'))
try:
code = 0
if buildtype == "release":
code = ex.run_cmd('make',
['IOTJS_ROOT_DIR=' + path.PROJECT_ROOT, 'R=1'])
else:
code = ex.run_cmd('make',
['IOTJS_ROOT_DIR=' + path.PROJECT_ROOT, 'R=0'])
if code == 0:
return True
else:
print 'Failed to build nuttx'
return False
except OSError as err:
print 'Failed to build nuttx: %s' % err
return False
def build(buildtype, args=[]):
fs.chdir(path.PROJECT_ROOT)
ex.check_run_cmd('./tools/build.py', args + ['--buildtype=' + buildtype])
option = parse_option()
for test in option.test:
if test == "host":
for buildtype in option.buildtype:
build(buildtype)
elif test == "rpi2":
for buildtype in option.buildtype:
build(buildtype, ['--target-arch=arm',
'--target-board=rpi2'])
elif test == "nuttx":
for buildtype in option.buildtype:
nuttx_root=fs.join(path.PROJECT_ROOT, 'deps', 'nuttx')
setup_nuttx_root(nuttx_root)
build_nuttx(nuttx_root, buildtype)
build(buildtype, ['--target-arch=arm',
'--target-os=nuttx',
'--nuttx-home=' + fs.join(nuttx_root, 'nuttx'),
'--target-board=stm32f4dis',
'--jerry-heaplimit=78'])
if not build_nuttx(nuttx_root, buildtype):
ex.fail('nuttx ' + buildtype + ' build failed')
elif test == "misc":
args = []
if os.getenv('TRAVIS') != None:
args = ['--travis']
ex.check_run_cmd('tools/check_signed_off.sh', args)
if not check_tidy(path.PROJECT_ROOT):
ex.fail("Failed tidy check")
build("debug", ['--no-snapshot', '--jerry-lto'])
build("debug", ['--iotjs-minimal-profile'])
| []
| []
| [
"TRAVIS"
]
| [] | ["TRAVIS"] | python | 1 | 0 | |
test/e2e-apiserver-test/addon_test.go | /*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e_apiserver_test
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"os"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/pkg/errors"
"github.com/oam-dev/kubevela/pkg/addon"
apis "github.com/oam-dev/kubevela/pkg/apiserver/rest/apis/v1"
)
const baseURL = "http://127.0.0.1:8000"
func post(path string, body interface{}) *http.Response {
b, err := json.Marshal(body)
Expect(err).Should(BeNil())
res, err := http.Post(baseURL+path, "application/json", bytes.NewBuffer(b))
Expect(err).Should(BeNil())
return res
}
func get(path string) *http.Response {
res, err := http.Get(baseURL + path)
Expect(err).Should(BeNil())
return res
}
var _ = Describe("Test addon rest api", func() {
createReq := apis.CreateAddonRegistryRequest{
Name: "test-addon-registry-1",
Git: &addon.GitAddonSource{
URL: "https://github.com/oam-dev/catalog",
Path: "addons/",
Token: os.Getenv("GITHUB_TOKEN"),
},
}
It("should add a registry and list addons from it", func() {
defer GinkgoRecover()
By("add registry")
createRes := post("/api/v1/addon_registries", createReq)
Expect(createRes).ShouldNot(BeNil())
Expect(createRes.Body).ShouldNot(BeNil())
Expect(createRes.StatusCode).Should(Equal(200))
defer createRes.Body.Close()
var rmeta apis.AddonRegistryMeta
err := json.NewDecoder(createRes.Body).Decode(&rmeta)
Expect(err).Should(BeNil())
Expect(rmeta.Name).Should(Equal(createReq.Name))
Expect(rmeta.Git).Should(Equal(createReq.Git))
By("list addons")
listRes := get("/api/v1/addons/")
defer listRes.Body.Close()
var lres apis.ListAddonResponse
err = json.NewDecoder(listRes.Body).Decode(&lres)
Expect(err).Should(BeNil())
Expect(lres.Addons).ShouldNot(BeZero())
firstAddon := lres.Addons[0]
Expect(firstAddon.Name).Should(Equal("example"))
})
PIt("should enable and disable an addon", func() {
defer GinkgoRecover()
req := apis.EnableAddonRequest{
Args: map[string]string{
"example": "test-args",
},
}
testAddon := "example"
res := post("/api/v1/addons/"+testAddon+"/enable", req)
Expect(res).ShouldNot(BeNil())
Expect(res.StatusCode).Should(Equal(200))
Expect(res.Body).ShouldNot(BeNil())
defer res.Body.Close()
var statusRes apis.AddonStatusResponse
err := json.NewDecoder(res.Body).Decode(&statusRes)
Expect(err).Should(BeNil())
Expect(statusRes.Phase).Should(Equal(apis.AddonPhaseEnabling))
// Wait for addon enabled
period := 30 * time.Second
timeout := 2 * time.Minute
Eventually(func() error {
res = get("/api/v1/addons/" + testAddon + "/status")
err = json.NewDecoder(res.Body).Decode(&statusRes)
Expect(err).Should(BeNil())
if statusRes.Phase == apis.AddonPhaseEnabled {
return nil
}
fmt.Println(statusRes.Phase)
return errors.New("not ready")
}, timeout, period).Should(BeNil())
res = post("/api/v1/addons/"+testAddon+"/disable", req)
Expect(res).ShouldNot(BeNil())
Expect(res.StatusCode).Should(Equal(200))
Expect(res.Body).ShouldNot(BeNil())
err = json.NewDecoder(res.Body).Decode(&statusRes)
Expect(err).Should(BeNil())
})
It("should delete test registry", func() {
defer GinkgoRecover()
deleteReq, err := http.NewRequest(http.MethodDelete, baseURL+"/api/v1/addon_registries/"+createReq.Name, nil)
Expect(err).Should(BeNil())
deleteRes, err := http.DefaultClient.Do(deleteReq)
Expect(err).Should(BeNil())
Expect(deleteRes).ShouldNot(BeNil())
Expect(deleteRes.StatusCode).Should(Equal(200))
})
})
| [
"\"GITHUB_TOKEN\""
]
| []
| [
"GITHUB_TOKEN"
]
| [] | ["GITHUB_TOKEN"] | go | 1 | 0 | |
cmd/completion.go | package cmd
import (
"fmt"
"log"
"os"
"os/exec"
"strings"
"github.com/spf13/cobra"
)
// var completionOut string
var completionShell string
func init() {
// completionCmd.Flags().StringVarP(&completionOut, "out", "o", "", "Output directory to write to")
completionCmd.Flags().StringVarP(&completionShell, "shell", "s", os.Getenv("SHELL"), "Your $SHELL, default read it from system environment $SHELL")
completionCmd.MarkFlagFilename("shell")
rootCmd.AddCommand(completionCmd)
}
var completionCmd = &cobra.Command{
Use: "completion",
Short: "Generating bash/zsh completions",
Long: `Generating bash/zsh completions`,
Run: func(cmd *cobra.Command, args []string) {
oss := strings.Split(completionShell, "/")
switch oss[len(oss)-1] {
case "zsh":
fmt.Println("your shell is zsh")
if err := rootCmd.GenZshCompletionFile("/usr/share/zsh/functions/Completion/_godev"); err != nil {
log.Fatal(err)
}
fmt.Println(`Generating success~ if completions can't be used, you can run this command: "compinit"`)
case "bash":
fmt.Println("your shell is bash")
if err := rootCmd.GenBashCompletionFile("/etc/bash_completion.d/godev"); err != nil {
log.Fatal(err)
}
command := exec.Command("bash", `/etc/bash_completion`)
if err := command.Run(); err != nil {
fmt.Printf("%#v", err)
log.Fatal(err)
}
fmt.Println(`Generating success~ if completions can't be used, you can run this command: ". /etc/bash_completion"`)
default:
fmt.Println("This Shell is not supported")
}
},
}
| [
"\"SHELL\""
]
| []
| [
"SHELL"
]
| [] | ["SHELL"] | go | 1 | 0 | |
src/data/get_raw_data.py | # -*- coding: utf-8 -*-
import os
import kaggle as kg
import logging
from dotenv import load_dotenv, find_dotenv
def get_logger():
'''
get the logger
'''
logger = logging.getLogger(__name__)
logger.info('getting raw data')
def download_data_from_kaggle():
'''
use kaggle api to download files with username and key from .env file
'''
# find .env automatically by walking up directories until it's found
dotenv_path = find_dotenv()
# load up the entries as environmental variables
load_dotenv(dotenv_path)
# get kaggle username and password from .env file
os.environ['KAGGLE_USERNAME'] = os.environ.get("KAGGLE_USERNAME")
os.environ['KAGGLE_KEY'] = os.environ.get("KAGGLE_PASSWORD")
# authenticate with kaggle api and download
kg.api.authenticate()
os.system('kaggle competitions download -c "titanic" -p ..\\data\\raw')
def main(project_dir):
'''
main method
'''
get_logger()
download_data_from_kaggle()
if __name__ == '__main__':
# getting the root directory
project_dir = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)
# setup logger
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - $(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
main(project_dir)
| []
| []
| [
"KAGGLE_KEY",
"KAGGLE_PASSWORD",
"KAGGLE_USERNAME"
]
| [] | ["KAGGLE_KEY", "KAGGLE_PASSWORD", "KAGGLE_USERNAME"] | python | 3 | 0 | |
httpclient.py | #=======================================================================================================================
#
# python3 httpclient.py --running=redis 表示按照redis任务模式执行,定时执行reloadRedis.initJobs()中的程序,定时时间在setting中的
# RELOADREDISTIME参数定义,单位秒
# 日志保存在启动目录中,不同端口号保存在不同文件中,reids模式运行保存在独立文件中
#=======================================================================================================================
__author__ = 'menghui'
# coding=utf-8
import os
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.httpclient
import mgr.redistip
import mgr.url
import service.url
import ResSearch.url
import clouds.url
import settings
import traceback
from tornado.options import define, options
import jobs.reloadRedis as reloadRedis
import service.utils as utils
import logging
import time
import pymssql
from DBUtils.PooledDB import PooledDB
define("port", default=8000, help="run on the given port", type=int)
define("running", default="reload", type=str)
os.environ['NLS_LANG'] = 'SIMPLIFIED CHINESE_CHINA.UTF8'
def callback():
print("reload redis")
def mainrun():
# ==================================================================================================================
# 配置日志
# AU:云服务日志
# SR:数据服务日志
format = '%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s'
formatter = logging.Formatter(format)
logging.basicConfig(level=logging.DEBUG,
format=format,
datefmt='%a, %d %b %Y %H:%M:%S',
filename='root' + str(options.port) + '.log',
filemode='w')
infoHandler = logging.FileHandler("logs/infoLog" + str(options.port) + '.log', 'a')
infoHandler.setLevel(logging.INFO)
infoHandler.setFormatter(formatter)
settings.infoLogger = logging.getLogger("infoLog" + str(options.port) + '.log')
settings.infoLogger.setLevel(logging.INFO)
settings.infoLogger.addHandler(infoHandler)
errorHandler = logging.FileHandler("logs/errorLog" + str(options.port) + '.log', 'a')
errorHandler.setLevel(logging.ERROR)
errorHandler.setFormatter(formatter)
settings.errorLogger = logging.getLogger("errorLog" + str(options.port) + '.log')
settings.errorLogger.setLevel(logging.ERROR)
settings.errorLogger.addHandler(errorHandler)
debugHandler = logging.FileHandler("logs/debugLog" + str(options.port) + '.log', 'a')
debugHandler.setLevel(logging.DEBUG)
debugHandler.setFormatter(formatter)
settings.debugLogger = logging.getLogger("debugLog" + str(options.port) + '.log')
settings.debugLogger.setLevel(logging.DEBUG)
settings.debugLogger.addHandler(debugHandler)
urls = [
]
urls += mgr.url.urls
urls += service.url.urls
urls += ResSearch.url.urls
urls += clouds.url.urls
tornado.options.parse_command_line()
settings.infoLogger.info("===========================================")
settings.infoLogger.info(" rEstgAng starting")
settings.infoLogger.info(" V1.0b")
settings.infoLogger.info("===========================================")
settings.infoLogger.info("Listening port:%s" % options.port)
settings.infoLogger.info("")
settings.infoLogger.info("DATABASE SETTING:")
utils.reloadDataSource()
ids = settings.DATABASES.keys()
for key in ids:
item = settings.DATABASES[key]
settings.infoLogger.info(" ID:%s" % key)
settings.infoLogger.info(" host:%s:%s/%s" % (item["HOST"], item["PORT"], item["NAME"]))
settings.infoLogger.info(" username:%s" % item["USER"])
app = tornado.web.Application(
debug=True,
handlers=urls,
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
autoreload=True
)
if not(os.path.exists(settings.MSGLOGPATH)):
print("message log file path not exist!"+settings.MSGLOGPATH)
return
reboottimes = 0
while True:
try:
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(options.port)
if options.running != "run":
reloadRedis.initJobs()
tornado.ioloop.IOLoop.instance().start()
except Exception as e:
reboottimes += 1
if reboottimes > 3:
settings.errorLogger.error("服务引擎发生致命错误,重新启动三次后仍然错误,系统退出")
return
settings.errorLogger.error("服务引擎发生致命错误,3秒后系统尝试重新启动")
settings.errorLogger.error(traceback.format_exc())
time.sleep(3)
if __name__ == "__main__":
mainrun()
#db_conn=pymssql.connect(server="192.168.1.128", user="sa", password="qweqwe1", database="stu", charset="GBK")
# args = (0, 0, 0,5, 0, 0, None)
# conn_kwargs = {"host": "192.168.1.128" , "user": "sa", "password": "qweqwe1","database": "stu","charset":"GBK"}
# pool=PooledDB(pymssql, *args, **conn_kwargs)
# db_conn = pool.connection()
# cur = db_conn.cursor()
# cur.execute("select * from jeda_menu")
# resList = cur.fetchall()
# for i in resList:
# print(i)
# db_conn.close()
| []
| []
| [
"NLS_LANG"
]
| [] | ["NLS_LANG"] | python | 1 | 0 | |
vendor/github.com/ugorji/go/codec/gen.go | // Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a MIT license found in the LICENSE file.
package codec
import (
"bytes"
"encoding/base64"
"errors"
"fmt"
"go/format"
"io"
"io/ioutil"
"math/rand"
"os"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"sync"
"text/template"
"time"
"unicode"
"unicode/utf8"
)
// ---------------------------------------------------
// codecgen supports the full cycle of reflection-based codec:
// - RawExt
// - Builtins
// - Extensions
// - (Binary|Text|JSON)(Unm|M)arshal
// - generic by-kind
//
// This means that, for dynamic things, we MUST use reflection to at least get the reflect.Type.
// In those areas, we try to only do reflection or interface-conversion when NECESSARY:
// - Extensions, only if Extensions are configured.
//
// However, codecgen doesn't support the following:
// - Canonical option. (codecgen IGNORES it currently)
// This is just because it has not been implemented.
//
// During encode/decode, Selfer takes precedence.
// A type implementing Selfer will know how to encode/decode itself statically.
//
// The following field types are supported:
// array: [n]T
// slice: []T
// map: map[K]V
// primitive: [u]int[n], float(32|64), bool, string
// struct
//
// ---------------------------------------------------
// Note that a Selfer cannot call (e|d).(En|De)code on itself,
// as this will cause a circular reference, as (En|De)code will call Selfer methods.
// Any type that implements Selfer must implement completely and not fallback to (En|De)code.
//
// In addition, code in this file manages the generation of fast-path implementations of
// encode/decode of slices/maps of primitive keys/values.
//
// Users MUST re-generate their implementations whenever the code shape changes.
// The generated code will panic if it was generated with a version older than the supporting library.
// ---------------------------------------------------
//
// codec framework is very feature rich.
// When encoding or decoding into an interface, it depends on the runtime type of the interface.
// The type of the interface may be a named type, an extension, etc.
// Consequently, we fallback to runtime codec for encoding/decoding interfaces.
// In addition, we fallback for any value which cannot be guaranteed at runtime.
// This allows us support ANY value, including any named types, specifically those which
// do not implement our interfaces (e.g. Selfer).
//
// This explains some slowness compared to other code generation codecs (e.g. msgp).
// This reduction in speed is only seen when your refers to interfaces,
// e.g. type T struct { A interface{}; B []interface{}; C map[string]interface{} }
//
// codecgen will panic if the file was generated with an old version of the library in use.
//
// Note:
// It was a concious decision to have gen.go always explicitly call EncodeNil or TryDecodeAsNil.
// This way, there isn't a function call overhead just to see that we should not enter a block of code.
// GenVersion is the current version of codecgen.
//
// NOTE: Increment this value each time codecgen changes fundamentally.
// Fundamental changes are:
// - helper methods change (signature change, new ones added, some removed, etc)
// - codecgen command line changes
//
// v1: Initial Version
// v2:
// v3: Changes for Kubernetes:
// changes in signature of some unpublished helper methods and codecgen cmdline arguments.
// v4: Removed separator support from (en|de)cDriver, and refactored codec(gen)
// v5: changes to support faster json decoding. Let encoder/decoder maintain state of collections.
const GenVersion = 5
const (
genCodecPkg = "codec1978"
genTempVarPfx = "yy"
genTopLevelVarName = "x"
// ignore canBeNil parameter, and always set to true.
// This is because nil can appear anywhere, so we should always check.
genAnythingCanBeNil = true
// if genUseOneFunctionForDecStructMap, make a single codecDecodeSelferFromMap function;
// else make codecDecodeSelferFromMap{LenPrefix,CheckBreak} so that conditionals
// are not executed a lot.
//
// From testing, it didn't make much difference in runtime, so keep as true (one function only)
genUseOneFunctionForDecStructMap = true
)
type genStructMapStyle uint8
const (
genStructMapStyleConsolidated genStructMapStyle = iota
genStructMapStyleLenPrefix
genStructMapStyleCheckBreak
)
var (
genAllTypesSamePkgErr = errors.New("All types must be in the same package")
genExpectArrayOrMapErr = errors.New("unexpected type. Expecting array/map/slice")
genBase64enc = base64.NewEncoding("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789__")
genQNameRegex = regexp.MustCompile(`[A-Za-z_.]+`)
)
// genRunner holds some state used during a Gen run.
type genRunner struct {
w io.Writer // output
c uint64 // counter used for generating varsfx
t []reflect.Type // list of types to run selfer on
tc reflect.Type // currently running selfer on this type
te map[uintptr]bool // types for which the encoder has been created
td map[uintptr]bool // types for which the decoder has been created
cp string // codec import path
im map[string]reflect.Type // imports to add
imn map[string]string // package names of imports to add
imc uint64 // counter for import numbers
is map[reflect.Type]struct{} // types seen during import search
bp string // base PkgPath, for which we are generating for
cpfx string // codec package prefix
unsafe bool // is unsafe to be used in generated code?
tm map[reflect.Type]struct{} // types for which enc/dec must be generated
ts []reflect.Type // types for which enc/dec must be generated
xs string // top level variable/constant suffix
hn string // fn helper type name
ti *TypeInfos
// rr *rand.Rand // random generator for file-specific types
}
// Gen will write a complete go file containing Selfer implementations for each
// type passed. All the types must be in the same package.
//
// Library users: *DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.*
func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeInfos, typ ...reflect.Type) {
if len(typ) == 0 {
return
}
x := genRunner{
unsafe: useUnsafe,
w: w,
t: typ,
te: make(map[uintptr]bool),
td: make(map[uintptr]bool),
im: make(map[string]reflect.Type),
imn: make(map[string]string),
is: make(map[reflect.Type]struct{}),
tm: make(map[reflect.Type]struct{}),
ts: []reflect.Type{},
bp: genImportPath(typ[0]),
xs: uid,
ti: ti,
}
if x.ti == nil {
x.ti = defTypeInfos
}
if x.xs == "" {
rr := rand.New(rand.NewSource(time.Now().UnixNano()))
x.xs = strconv.FormatInt(rr.Int63n(9999), 10)
}
// gather imports first:
x.cp = genImportPath(reflect.TypeOf(x))
x.imn[x.cp] = genCodecPkg
for _, t := range typ {
// fmt.Printf("###########: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name())
if genImportPath(t) != x.bp {
panic(genAllTypesSamePkgErr)
}
x.genRefPkgs(t)
}
if buildTags != "" {
x.line("//+build " + buildTags)
x.line("")
}
x.line(`
// ************************************************************
// DO NOT EDIT.
// THIS FILE IS AUTO-GENERATED BY codecgen.
// ************************************************************
`)
x.line("package " + pkgName)
x.line("")
x.line("import (")
if x.cp != x.bp {
x.cpfx = genCodecPkg + "."
x.linef("%s \"%s\"", genCodecPkg, x.cp)
}
// use a sorted set of im keys, so that we can get consistent output
imKeys := make([]string, 0, len(x.im))
for k, _ := range x.im {
imKeys = append(imKeys, k)
}
sort.Strings(imKeys)
for _, k := range imKeys { // for k, _ := range x.im {
x.linef("%s \"%s\"", x.imn[k], k)
}
// add required packages
for _, k := range [...]string{"reflect", "unsafe", "runtime", "fmt", "errors"} {
if _, ok := x.im[k]; !ok {
if k == "unsafe" && !x.unsafe {
continue
}
x.line("\"" + k + "\"")
}
}
x.line(")")
x.line("")
x.line("const (")
x.linef("// ----- content types ----")
x.linef("codecSelferC_UTF8%s = %v", x.xs, int64(c_UTF8))
x.linef("codecSelferC_RAW%s = %v", x.xs, int64(c_RAW))
x.linef("// ----- value types used ----")
x.linef("codecSelferValueTypeArray%s = %v", x.xs, int64(valueTypeArray))
x.linef("codecSelferValueTypeMap%s = %v", x.xs, int64(valueTypeMap))
x.linef("// ----- containerStateValues ----")
x.linef("codecSelfer_containerMapKey%s = %v", x.xs, int64(containerMapKey))
x.linef("codecSelfer_containerMapValue%s = %v", x.xs, int64(containerMapValue))
x.linef("codecSelfer_containerMapEnd%s = %v", x.xs, int64(containerMapEnd))
x.linef("codecSelfer_containerArrayElem%s = %v", x.xs, int64(containerArrayElem))
x.linef("codecSelfer_containerArrayEnd%s = %v", x.xs, int64(containerArrayEnd))
x.line(")")
x.line("var (")
x.line("codecSelferBitsize" + x.xs + " = uint8(reflect.TypeOf(uint(0)).Bits())")
x.line("codecSelferOnlyMapOrArrayEncodeToStructErr" + x.xs + " = errors.New(`only encoded map or array can be decoded into a struct`)")
x.line(")")
x.line("")
if x.unsafe {
x.line("type codecSelferUnsafeString" + x.xs + " struct { Data uintptr; Len int}")
x.line("")
}
x.hn = "codecSelfer" + x.xs
x.line("type " + x.hn + " struct{}")
x.line("")
x.varsfxreset()
x.line("func init() {")
x.linef("if %sGenVersion != %v {", x.cpfx, GenVersion)
x.line("_, file, _, _ := runtime.Caller(0)")
x.line(`err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", `)
x.linef(`%v, %sGenVersion, file)`, GenVersion, x.cpfx)
x.line("panic(err)")
x.linef("}")
x.line("if false { // reference the types, but skip this branch at build/run time")
var n int
// for k, t := range x.im {
for _, k := range imKeys {
t := x.im[k]
x.linef("var v%v %s.%s", n, x.imn[k], t.Name())
n++
}
if x.unsafe {
x.linef("var v%v unsafe.Pointer", n)
n++
}
if n > 0 {
x.out("_")
for i := 1; i < n; i++ {
x.out(", _")
}
x.out(" = v0")
for i := 1; i < n; i++ {
x.outf(", v%v", i)
}
}
x.line("} ") // close if false
x.line("}") // close init
x.line("")
// generate rest of type info
for _, t := range typ {
x.tc = t
x.selfer(true)
x.selfer(false)
}
for _, t := range x.ts {
rtid := reflect.ValueOf(t).Pointer()
// generate enc functions for all these slice/map types.
x.varsfxreset()
x.linef("func (x %s) enc%s(v %s%s, e *%sEncoder) {", x.hn, x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), x.cpfx)
x.genRequiredMethodVars(true)
switch t.Kind() {
case reflect.Array, reflect.Slice, reflect.Chan:
x.encListFallback("v", t)
case reflect.Map:
x.encMapFallback("v", t)
default:
panic(genExpectArrayOrMapErr)
}
x.line("}")
x.line("")
// generate dec functions for all these slice/map types.
x.varsfxreset()
x.linef("func (x %s) dec%s(v *%s, d *%sDecoder) {", x.hn, x.genMethodNameT(t), x.genTypeName(t), x.cpfx)
x.genRequiredMethodVars(false)
switch t.Kind() {
case reflect.Array, reflect.Slice, reflect.Chan:
x.decListFallback("v", rtid, t)
case reflect.Map:
x.decMapFallback("v", rtid, t)
default:
panic(genExpectArrayOrMapErr)
}
x.line("}")
x.line("")
}
x.line("")
}
func (x *genRunner) checkForSelfer(t reflect.Type, varname string) bool {
// return varname != genTopLevelVarName && t != x.tc
// the only time we checkForSelfer is if we are not at the TOP of the generated code.
return varname != genTopLevelVarName
}
func (x *genRunner) arr2str(t reflect.Type, s string) string {
if t.Kind() == reflect.Array {
return s
}
return ""
}
func (x *genRunner) genRequiredMethodVars(encode bool) {
x.line("var h " + x.hn)
if encode {
x.line("z, r := " + x.cpfx + "GenHelperEncoder(e)")
} else {
x.line("z, r := " + x.cpfx + "GenHelperDecoder(d)")
}
x.line("_, _, _ = h, z, r")
}
func (x *genRunner) genRefPkgs(t reflect.Type) {
if _, ok := x.is[t]; ok {
return
}
// fmt.Printf(">>>>>>: PkgPath: '%v', Name: '%s'\n", genImportPath(t), t.Name())
x.is[t] = struct{}{}
tpkg, tname := genImportPath(t), t.Name()
if tpkg != "" && tpkg != x.bp && tpkg != x.cp && tname != "" && tname[0] >= 'A' && tname[0] <= 'Z' {
if _, ok := x.im[tpkg]; !ok {
x.im[tpkg] = t
if idx := strings.LastIndex(tpkg, "/"); idx < 0 {
x.imn[tpkg] = tpkg
} else {
x.imc++
x.imn[tpkg] = "pkg" + strconv.FormatUint(x.imc, 10) + "_" + genGoIdentifier(tpkg[idx+1:], false)
}
}
}
switch t.Kind() {
case reflect.Array, reflect.Slice, reflect.Ptr, reflect.Chan:
x.genRefPkgs(t.Elem())
case reflect.Map:
x.genRefPkgs(t.Elem())
x.genRefPkgs(t.Key())
case reflect.Struct:
for i := 0; i < t.NumField(); i++ {
if fname := t.Field(i).Name; fname != "" && fname[0] >= 'A' && fname[0] <= 'Z' {
x.genRefPkgs(t.Field(i).Type)
}
}
}
}
func (x *genRunner) line(s string) {
x.out(s)
if len(s) == 0 || s[len(s)-1] != '\n' {
x.out("\n")
}
}
func (x *genRunner) varsfx() string {
x.c++
return strconv.FormatUint(x.c, 10)
}
func (x *genRunner) varsfxreset() {
x.c = 0
}
func (x *genRunner) out(s string) {
if _, err := io.WriteString(x.w, s); err != nil {
panic(err)
}
}
func (x *genRunner) linef(s string, params ...interface{}) {
x.line(fmt.Sprintf(s, params...))
}
func (x *genRunner) outf(s string, params ...interface{}) {
x.out(fmt.Sprintf(s, params...))
}
func (x *genRunner) genTypeName(t reflect.Type) (n string) {
// defer func() { fmt.Printf(">>>> ####: genTypeName: t: %v, name: '%s'\n", t, n) }()
// if the type has a PkgPath, which doesn't match the current package,
// then include it.
// We cannot depend on t.String() because it includes current package,
// or t.PkgPath because it includes full import path,
//
var ptrPfx string
for t.Kind() == reflect.Ptr {
ptrPfx += "*"
t = t.Elem()
}
if tn := t.Name(); tn != "" {
return ptrPfx + x.genTypeNamePrim(t)
}
switch t.Kind() {
case reflect.Map:
return ptrPfx + "map[" + x.genTypeName(t.Key()) + "]" + x.genTypeName(t.Elem())
case reflect.Slice:
return ptrPfx + "[]" + x.genTypeName(t.Elem())
case reflect.Array:
return ptrPfx + "[" + strconv.FormatInt(int64(t.Len()), 10) + "]" + x.genTypeName(t.Elem())
case reflect.Chan:
return ptrPfx + t.ChanDir().String() + " " + x.genTypeName(t.Elem())
default:
if t == intfTyp {
return ptrPfx + "interface{}"
} else {
return ptrPfx + x.genTypeNamePrim(t)
}
}
}
func (x *genRunner) genTypeNamePrim(t reflect.Type) (n string) {
if t.Name() == "" {
return t.String()
} else if genImportPath(t) == "" || genImportPath(t) == genImportPath(x.tc) {
return t.Name()
} else {
return x.imn[genImportPath(t)] + "." + t.Name()
// return t.String() // best way to get the package name inclusive
}
}
func (x *genRunner) genZeroValueR(t reflect.Type) string {
// if t is a named type, w
switch t.Kind() {
case reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func,
reflect.Slice, reflect.Map, reflect.Invalid:
return "nil"
case reflect.Bool:
return "false"
case reflect.String:
return `""`
case reflect.Struct, reflect.Array:
return x.genTypeName(t) + "{}"
default: // all numbers
return "0"
}
}
func (x *genRunner) genMethodNameT(t reflect.Type) (s string) {
return genMethodNameT(t, x.tc)
}
func (x *genRunner) selfer(encode bool) {
t := x.tc
t0 := t
// always make decode use a pointer receiver,
// and structs always use a ptr receiver (encode|decode)
isptr := !encode || t.Kind() == reflect.Struct
x.varsfxreset()
fnSigPfx := "func (x "
if isptr {
fnSigPfx += "*"
}
fnSigPfx += x.genTypeName(t)
x.out(fnSigPfx)
if isptr {
t = reflect.PtrTo(t)
}
if encode {
x.line(") CodecEncodeSelf(e *" + x.cpfx + "Encoder) {")
x.genRequiredMethodVars(true)
// x.enc(genTopLevelVarName, t)
x.encVar(genTopLevelVarName, t)
} else {
x.line(") CodecDecodeSelf(d *" + x.cpfx + "Decoder) {")
x.genRequiredMethodVars(false)
// do not use decVar, as there is no need to check TryDecodeAsNil
// or way to elegantly handle that, and also setting it to a
// non-nil value doesn't affect the pointer passed.
// x.decVar(genTopLevelVarName, t, false)
x.dec(genTopLevelVarName, t0)
}
x.line("}")
x.line("")
if encode || t0.Kind() != reflect.Struct {
return
}
// write is containerMap
if genUseOneFunctionForDecStructMap {
x.out(fnSigPfx)
x.line(") codecDecodeSelfFromMap(l int, d *" + x.cpfx + "Decoder) {")
x.genRequiredMethodVars(false)
x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleConsolidated)
x.line("}")
x.line("")
} else {
x.out(fnSigPfx)
x.line(") codecDecodeSelfFromMapLenPrefix(l int, d *" + x.cpfx + "Decoder) {")
x.genRequiredMethodVars(false)
x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleLenPrefix)
x.line("}")
x.line("")
x.out(fnSigPfx)
x.line(") codecDecodeSelfFromMapCheckBreak(l int, d *" + x.cpfx + "Decoder) {")
x.genRequiredMethodVars(false)
x.decStructMap(genTopLevelVarName, "l", reflect.ValueOf(t0).Pointer(), t0, genStructMapStyleCheckBreak)
x.line("}")
x.line("")
}
// write containerArray
x.out(fnSigPfx)
x.line(") codecDecodeSelfFromArray(l int, d *" + x.cpfx + "Decoder) {")
x.genRequiredMethodVars(false)
x.decStructArray(genTopLevelVarName, "l", "return", reflect.ValueOf(t0).Pointer(), t0)
x.line("}")
x.line("")
}
// used for chan, array, slice, map
func (x *genRunner) xtraSM(varname string, encode bool, t reflect.Type) {
if encode {
x.linef("h.enc%s((%s%s)(%s), e)", x.genMethodNameT(t), x.arr2str(t, "*"), x.genTypeName(t), varname)
} else {
x.linef("h.dec%s((*%s)(%s), d)", x.genMethodNameT(t), x.genTypeName(t), varname)
}
x.registerXtraT(t)
}
func (x *genRunner) registerXtraT(t reflect.Type) {
// recursively register the types
if _, ok := x.tm[t]; ok {
return
}
var tkey reflect.Type
switch t.Kind() {
case reflect.Chan, reflect.Slice, reflect.Array:
case reflect.Map:
tkey = t.Key()
default:
return
}
x.tm[t] = struct{}{}
x.ts = append(x.ts, t)
// check if this refers to any xtra types eg. a slice of array: add the array
x.registerXtraT(t.Elem())
if tkey != nil {
x.registerXtraT(tkey)
}
}
// encVar will encode a variable.
// The parameter, t, is the reflect.Type of the variable itself
func (x *genRunner) encVar(varname string, t reflect.Type) {
// fmt.Printf(">>>>>> varname: %s, t: %v\n", varname, t)
var checkNil bool
switch t.Kind() {
case reflect.Ptr, reflect.Interface, reflect.Slice, reflect.Map, reflect.Chan:
checkNil = true
}
if checkNil {
x.linef("if %s == nil { r.EncodeNil() } else { ", varname)
}
switch t.Kind() {
case reflect.Ptr:
switch t.Elem().Kind() {
case reflect.Struct, reflect.Array:
x.enc(varname, genNonPtr(t))
default:
i := x.varsfx()
x.line(genTempVarPfx + i + " := *" + varname)
x.enc(genTempVarPfx+i, genNonPtr(t))
}
case reflect.Struct, reflect.Array:
i := x.varsfx()
x.line(genTempVarPfx + i + " := &" + varname)
x.enc(genTempVarPfx+i, t)
default:
x.enc(varname, t)
}
if checkNil {
x.line("}")
}
}
// enc will encode a variable (varname) of type t,
// except t is of kind reflect.Struct or reflect.Array, wherein varname is of type ptrTo(T) (to prevent copying)
func (x *genRunner) enc(varname string, t reflect.Type) {
rtid := reflect.ValueOf(t).Pointer()
// We call CodecEncodeSelf if one of the following are honored:
// - the type already implements Selfer, call that
// - the type has a Selfer implementation just created, use that
// - the type is in the list of the ones we will generate for, but it is not currently being generated
mi := x.varsfx()
tptr := reflect.PtrTo(t)
tk := t.Kind()
if x.checkForSelfer(t, varname) {
if tk == reflect.Array || tk == reflect.Struct { // varname is of type *T
if tptr.Implements(selferTyp) || t.Implements(selferTyp) {
x.line(varname + ".CodecEncodeSelf(e)")
return
}
} else { // varname is of type T
if t.Implements(selferTyp) {
x.line(varname + ".CodecEncodeSelf(e)")
return
} else if tptr.Implements(selferTyp) {
x.linef("%ssf%s := &%s", genTempVarPfx, mi, varname)
x.linef("%ssf%s.CodecEncodeSelf(e)", genTempVarPfx, mi)
return
}
}
if _, ok := x.te[rtid]; ok {
x.line(varname + ".CodecEncodeSelf(e)")
return
}
}
inlist := false
for _, t0 := range x.t {
if t == t0 {
inlist = true
if x.checkForSelfer(t, varname) {
x.line(varname + ".CodecEncodeSelf(e)")
return
}
break
}
}
var rtidAdded bool
if t == x.tc {
x.te[rtid] = true
rtidAdded = true
}
// check if
// - type is RawExt
// - the type implements (Text|JSON|Binary)(Unm|M)arshal
x.linef("%sm%s := z.EncBinary()", genTempVarPfx, mi)
x.linef("_ = %sm%s", genTempVarPfx, mi)
x.line("if false {") //start if block
defer func() { x.line("}") }() //end if block
if t == rawExtTyp {
x.linef("} else { r.EncodeRawExt(%v, e)", varname)
return
}
// HACK: Support for Builtins.
// Currently, only Binc supports builtins, and the only builtin type is time.Time.
// Have a method that returns the rtid for time.Time if Handle is Binc.
if t == timeTyp {
vrtid := genTempVarPfx + "m" + x.varsfx()
x.linef("} else if %s := z.TimeRtidIfBinc(); %s != 0 { ", vrtid, vrtid)
x.linef("r.EncodeBuiltin(%s, %s)", vrtid, varname)
}
// only check for extensions if the type is named, and has a packagePath.
if genImportPath(t) != "" && t.Name() != "" {
// first check if extensions are configued, before doing the interface conversion
x.linef("} else if z.HasExtensions() && z.EncExt(%s) {", varname)
}
if tk == reflect.Array || tk == reflect.Struct { // varname is of type *T
if t.Implements(binaryMarshalerTyp) || tptr.Implements(binaryMarshalerTyp) {
x.linef("} else if %sm%s { z.EncBinaryMarshal(%v) ", genTempVarPfx, mi, varname)
}
if t.Implements(jsonMarshalerTyp) || tptr.Implements(jsonMarshalerTyp) {
x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", genTempVarPfx, mi, varname)
} else if t.Implements(textMarshalerTyp) || tptr.Implements(textMarshalerTyp) {
x.linef("} else if !%sm%s { z.EncTextMarshal(%v) ", genTempVarPfx, mi, varname)
}
} else { // varname is of type T
if t.Implements(binaryMarshalerTyp) {
x.linef("} else if %sm%s { z.EncBinaryMarshal(%v) ", genTempVarPfx, mi, varname)
} else if tptr.Implements(binaryMarshalerTyp) {
x.linef("} else if %sm%s { z.EncBinaryMarshal(&%v) ", genTempVarPfx, mi, varname)
}
if t.Implements(jsonMarshalerTyp) {
x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(%v) ", genTempVarPfx, mi, varname)
} else if tptr.Implements(jsonMarshalerTyp) {
x.linef("} else if !%sm%s && z.IsJSONHandle() { z.EncJSONMarshal(&%v) ", genTempVarPfx, mi, varname)
} else if t.Implements(textMarshalerTyp) {
x.linef("} else if !%sm%s { z.EncTextMarshal(%v) ", genTempVarPfx, mi, varname)
} else if tptr.Implements(textMarshalerTyp) {
x.linef("} else if !%sm%s { z.EncTextMarshal(&%v) ", genTempVarPfx, mi, varname)
}
}
x.line("} else {")
switch t.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
x.line("r.EncodeInt(int64(" + varname + "))")
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
x.line("r.EncodeUint(uint64(" + varname + "))")
case reflect.Float32:
x.line("r.EncodeFloat32(float32(" + varname + "))")
case reflect.Float64:
x.line("r.EncodeFloat64(float64(" + varname + "))")
case reflect.Bool:
x.line("r.EncodeBool(bool(" + varname + "))")
case reflect.String:
x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + ", string(" + varname + "))")
case reflect.Chan:
x.xtraSM(varname, true, t)
// x.encListFallback(varname, rtid, t)
case reflect.Array:
x.xtraSM(varname, true, t)
case reflect.Slice:
// if nil, call dedicated function
// if a []uint8, call dedicated function
// if a known fastpath slice, call dedicated function
// else write encode function in-line.
// - if elements are primitives or Selfers, call dedicated function on each member.
// - else call Encoder.encode(XXX) on it.
if rtid == uint8SliceTypId {
x.line("r.EncodeStringBytes(codecSelferC_RAW" + x.xs + ", []byte(" + varname + "))")
} else if fastpathAV.index(rtid) != -1 {
g := x.newGenV(t)
x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", false, e)")
} else {
x.xtraSM(varname, true, t)
// x.encListFallback(varname, rtid, t)
}
case reflect.Map:
// if nil, call dedicated function
// if a known fastpath map, call dedicated function
// else write encode function in-line.
// - if elements are primitives or Selfers, call dedicated function on each member.
// - else call Encoder.encode(XXX) on it.
// x.line("if " + varname + " == nil { \nr.EncodeNil()\n } else { ")
if fastpathAV.index(rtid) != -1 {
g := x.newGenV(t)
x.line("z.F." + g.MethodNamePfx("Enc", false) + "V(" + varname + ", false, e)")
} else {
x.xtraSM(varname, true, t)
// x.encMapFallback(varname, rtid, t)
}
case reflect.Struct:
if !inlist {
delete(x.te, rtid)
x.line("z.EncFallback(" + varname + ")")
break
}
x.encStruct(varname, rtid, t)
default:
if rtidAdded {
delete(x.te, rtid)
}
x.line("z.EncFallback(" + varname + ")")
}
}
func (x *genRunner) encZero(t reflect.Type) {
switch t.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
x.line("r.EncodeInt(0)")
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
x.line("r.EncodeUint(0)")
case reflect.Float32:
x.line("r.EncodeFloat32(0)")
case reflect.Float64:
x.line("r.EncodeFloat64(0)")
case reflect.Bool:
x.line("r.EncodeBool(false)")
case reflect.String:
x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + `, "")`)
default:
x.line("r.EncodeNil()")
}
}
func (x *genRunner) encStruct(varname string, rtid uintptr, t reflect.Type) {
// Use knowledge from structfieldinfo (mbs, encodable fields. Ignore omitempty. )
// replicate code in kStruct i.e. for each field, deref type to non-pointer, and call x.enc on it
// if t === type currently running selfer on, do for all
ti := x.ti.get(rtid, t)
i := x.varsfx()
sepVarname := genTempVarPfx + "sep" + i
numfieldsvar := genTempVarPfx + "q" + i
ti2arrayvar := genTempVarPfx + "r" + i
struct2arrvar := genTempVarPfx + "2arr" + i
x.line(sepVarname + " := !z.EncBinary()")
x.linef("%s := z.EncBasicHandle().StructToArray", struct2arrvar)
tisfi := ti.sfip // always use sequence from file. decStruct expects same thing.
// due to omitEmpty, we need to calculate the
// number of non-empty things we write out first.
// This is required as we need to pre-determine the size of the container,
// to support length-prefixing.
x.linef("var %s [%v]bool", numfieldsvar, len(tisfi))
x.linef("_, _, _ = %s, %s, %s", sepVarname, numfieldsvar, struct2arrvar)
x.linef("const %s bool = %v", ti2arrayvar, ti.toArray)
nn := 0
for j, si := range tisfi {
if !si.omitEmpty {
nn++
continue
}
var t2 reflect.StructField
var omitline string
if si.i != -1 {
t2 = t.Field(int(si.i))
} else {
t2typ := t
varname3 := varname
for _, ix := range si.is {
for t2typ.Kind() == reflect.Ptr {
t2typ = t2typ.Elem()
}
t2 = t2typ.Field(ix)
t2typ = t2.Type
varname3 = varname3 + "." + t2.Name
if t2typ.Kind() == reflect.Ptr {
omitline += varname3 + " != nil && "
}
}
}
// never check omitEmpty on a struct type, as it may contain uncomparable map/slice/etc.
// also, for maps/slices/arrays, check if len ! 0 (not if == zero value)
switch t2.Type.Kind() {
case reflect.Struct:
omitline += " true"
case reflect.Map, reflect.Slice, reflect.Array, reflect.Chan:
omitline += "len(" + varname + "." + t2.Name + ") != 0"
default:
omitline += varname + "." + t2.Name + " != " + x.genZeroValueR(t2.Type)
}
x.linef("%s[%v] = %s", numfieldsvar, j, omitline)
}
x.linef("var %snn%s int", genTempVarPfx, i)
x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray {
x.line("r.EncodeArrayStart(" + strconv.FormatInt(int64(len(tisfi)), 10) + ")")
x.linef("} else {") // if not ti.toArray
x.linef("%snn%s = %v", genTempVarPfx, i, nn)
x.linef("for _, b := range %s { if b { %snn%s++ } }", numfieldsvar, genTempVarPfx, i)
x.linef("r.EncodeMapStart(%snn%s)", genTempVarPfx, i)
x.linef("%snn%s = %v", genTempVarPfx, i, 0)
// x.line("r.EncodeMapStart(" + strconv.FormatInt(int64(len(tisfi)), 10) + ")")
x.line("}") // close if not StructToArray
for j, si := range tisfi {
i := x.varsfx()
isNilVarName := genTempVarPfx + "n" + i
var labelUsed bool
var t2 reflect.StructField
if si.i != -1 {
t2 = t.Field(int(si.i))
} else {
t2typ := t
varname3 := varname
for _, ix := range si.is {
// fmt.Printf("%%%% %v, ix: %v\n", t2typ, ix)
for t2typ.Kind() == reflect.Ptr {
t2typ = t2typ.Elem()
}
t2 = t2typ.Field(ix)
t2typ = t2.Type
varname3 = varname3 + "." + t2.Name
if t2typ.Kind() == reflect.Ptr {
if !labelUsed {
x.line("var " + isNilVarName + " bool")
}
x.line("if " + varname3 + " == nil { " + isNilVarName + " = true ")
x.line("goto LABEL" + i)
x.line("}")
labelUsed = true
// "varname3 = new(" + x.genTypeName(t3.Elem()) + ") }")
}
}
// t2 = t.FieldByIndex(si.is)
}
if labelUsed {
x.line("LABEL" + i + ":")
}
// if the type of the field is a Selfer, or one of the ones
x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray
if labelUsed {
x.line("if " + isNilVarName + " { r.EncodeNil() } else { ")
}
x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs)
if si.omitEmpty {
x.linef("if %s[%v] {", numfieldsvar, j)
}
x.encVar(varname+"."+t2.Name, t2.Type)
if si.omitEmpty {
x.linef("} else {")
x.encZero(t2.Type)
x.linef("}")
}
if labelUsed {
x.line("}")
}
x.linef("} else {") // if not ti.toArray
if si.omitEmpty {
x.linef("if %s[%v] {", numfieldsvar, j)
}
x.linef("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs)
x.line("r.EncodeString(codecSelferC_UTF8" + x.xs + ", string(\"" + si.encName + "\"))")
x.linef("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs)
if labelUsed {
x.line("if " + isNilVarName + " { r.EncodeNil() } else { ")
x.encVar(varname+"."+t2.Name, t2.Type)
x.line("}")
} else {
x.encVar(varname+"."+t2.Name, t2.Type)
}
if si.omitEmpty {
x.line("}")
}
x.linef("} ") // end if/else ti.toArray
}
x.linef("if %s || %s {", ti2arrayvar, struct2arrvar) // if ti.toArray {
x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs)
x.line("} else {")
x.linef("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs)
x.line("}")
}
func (x *genRunner) encListFallback(varname string, t reflect.Type) {
i := x.varsfx()
g := genTempVarPfx
x.line("r.EncodeArrayStart(len(" + varname + "))")
if t.Kind() == reflect.Chan {
x.linef("for %si%s, %si2%s := 0, len(%s); %si%s < %si2%s; %si%s++ {", g, i, g, i, varname, g, i, g, i, g, i)
x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs)
x.linef("%sv%s := <-%s", g, i, varname)
} else {
// x.linef("for %si%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname)
x.linef("for _, %sv%s := range %s {", genTempVarPfx, i, varname)
x.linef("z.EncSendContainerState(codecSelfer_containerArrayElem%s)", x.xs)
}
x.encVar(genTempVarPfx+"v"+i, t.Elem())
x.line("}")
x.linef("z.EncSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs)
}
func (x *genRunner) encMapFallback(varname string, t reflect.Type) {
// TODO: expand this to handle canonical.
i := x.varsfx()
x.line("r.EncodeMapStart(len(" + varname + "))")
x.linef("for %sk%s, %sv%s := range %s {", genTempVarPfx, i, genTempVarPfx, i, varname)
// x.line("for " + genTempVarPfx + "k" + i + ", " + genTempVarPfx + "v" + i + " := range " + varname + " {")
x.linef("z.EncSendContainerState(codecSelfer_containerMapKey%s)", x.xs)
x.encVar(genTempVarPfx+"k"+i, t.Key())
x.linef("z.EncSendContainerState(codecSelfer_containerMapValue%s)", x.xs)
x.encVar(genTempVarPfx+"v"+i, t.Elem())
x.line("}")
x.linef("z.EncSendContainerState(codecSelfer_containerMapEnd%s)", x.xs)
}
func (x *genRunner) decVar(varname string, t reflect.Type, canBeNil bool) {
// We only encode as nil if a nillable value.
// This removes some of the wasted checks for TryDecodeAsNil.
// We need to think about this more, to see what happens if omitempty, etc
// cause a nil value to be stored when something is expected.
// This could happen when decoding from a struct encoded as an array.
// For that, decVar should be called with canNil=true, to force true as its value.
i := x.varsfx()
if !canBeNil {
canBeNil = genAnythingCanBeNil || !genIsImmutable(t)
}
if canBeNil {
x.line("if r.TryDecodeAsNil() {")
if t.Kind() == reflect.Ptr {
x.line("if " + varname + " != nil { ")
// if varname is a field of a struct (has a dot in it),
// then just set it to nil
if strings.IndexByte(varname, '.') != -1 {
x.line(varname + " = nil")
} else {
x.line("*" + varname + " = " + x.genZeroValueR(t.Elem()))
}
x.line("}")
} else {
x.line(varname + " = " + x.genZeroValueR(t))
}
x.line("} else {")
} else {
x.line("// cannot be nil")
}
if t.Kind() != reflect.Ptr {
if x.decTryAssignPrimitive(varname, t) {
x.line(genTempVarPfx + "v" + i + " := &" + varname)
x.dec(genTempVarPfx+"v"+i, t)
}
} else {
x.linef("if %s == nil { %s = new(%s) }", varname, varname, x.genTypeName(t.Elem()))
// Ensure we set underlying ptr to a non-nil value (so we can deref to it later).
// There's a chance of a **T in here which is nil.
var ptrPfx string
for t = t.Elem(); t.Kind() == reflect.Ptr; t = t.Elem() {
ptrPfx += "*"
x.linef("if %s%s == nil { %s%s = new(%s)}",
ptrPfx, varname, ptrPfx, varname, x.genTypeName(t))
}
// if varname has [ in it, then create temp variable for this ptr thingie
if strings.Index(varname, "[") >= 0 {
varname2 := genTempVarPfx + "w" + i
x.line(varname2 + " := " + varname)
varname = varname2
}
if ptrPfx == "" {
x.dec(varname, t)
} else {
x.line(genTempVarPfx + "z" + i + " := " + ptrPfx + varname)
x.dec(genTempVarPfx+"z"+i, t)
}
}
if canBeNil {
x.line("} ")
}
}
// dec will decode a variable (varname) of type ptrTo(t).
// t is always a basetype (i.e. not of kind reflect.Ptr).
func (x *genRunner) dec(varname string, t reflect.Type) {
// assumptions:
// - the varname is to a pointer already. No need to take address of it
// - t is always a baseType T (not a *T, etc).
rtid := reflect.ValueOf(t).Pointer()
tptr := reflect.PtrTo(t)
if x.checkForSelfer(t, varname) {
if t.Implements(selferTyp) || tptr.Implements(selferTyp) {
x.line(varname + ".CodecDecodeSelf(d)")
return
}
if _, ok := x.td[rtid]; ok {
x.line(varname + ".CodecDecodeSelf(d)")
return
}
}
inlist := false
for _, t0 := range x.t {
if t == t0 {
inlist = true
if x.checkForSelfer(t, varname) {
x.line(varname + ".CodecDecodeSelf(d)")
return
}
break
}
}
var rtidAdded bool
if t == x.tc {
x.td[rtid] = true
rtidAdded = true
}
// check if
// - type is RawExt
// - the type implements (Text|JSON|Binary)(Unm|M)arshal
mi := x.varsfx()
x.linef("%sm%s := z.DecBinary()", genTempVarPfx, mi)
x.linef("_ = %sm%s", genTempVarPfx, mi)
x.line("if false {") //start if block
defer func() { x.line("}") }() //end if block
if t == rawExtTyp {
x.linef("} else { r.DecodeExt(%v, 0, nil)", varname)
return
}
// HACK: Support for Builtins.
// Currently, only Binc supports builtins, and the only builtin type is time.Time.
// Have a method that returns the rtid for time.Time if Handle is Binc.
if t == timeTyp {
vrtid := genTempVarPfx + "m" + x.varsfx()
x.linef("} else if %s := z.TimeRtidIfBinc(); %s != 0 { ", vrtid, vrtid)
x.linef("r.DecodeBuiltin(%s, %s)", vrtid, varname)
}
// only check for extensions if the type is named, and has a packagePath.
if genImportPath(t) != "" && t.Name() != "" {
// first check if extensions are configued, before doing the interface conversion
x.linef("} else if z.HasExtensions() && z.DecExt(%s) {", varname)
}
if t.Implements(binaryUnmarshalerTyp) || tptr.Implements(binaryUnmarshalerTyp) {
x.linef("} else if %sm%s { z.DecBinaryUnmarshal(%v) ", genTempVarPfx, mi, varname)
}
if t.Implements(jsonUnmarshalerTyp) || tptr.Implements(jsonUnmarshalerTyp) {
x.linef("} else if !%sm%s && z.IsJSONHandle() { z.DecJSONUnmarshal(%v)", genTempVarPfx, mi, varname)
} else if t.Implements(textUnmarshalerTyp) || tptr.Implements(textUnmarshalerTyp) {
x.linef("} else if !%sm%s { z.DecTextUnmarshal(%v)", genTempVarPfx, mi, varname)
}
x.line("} else {")
// Since these are pointers, we cannot share, and have to use them one by one
switch t.Kind() {
case reflect.Int:
x.line("*((*int)(" + varname + ")) = int(r.DecodeInt(codecSelferBitsize" + x.xs + "))")
// x.line("z.DecInt((*int)(" + varname + "))")
case reflect.Int8:
x.line("*((*int8)(" + varname + ")) = int8(r.DecodeInt(8))")
// x.line("z.DecInt8((*int8)(" + varname + "))")
case reflect.Int16:
x.line("*((*int16)(" + varname + ")) = int16(r.DecodeInt(16))")
// x.line("z.DecInt16((*int16)(" + varname + "))")
case reflect.Int32:
x.line("*((*int32)(" + varname + ")) = int32(r.DecodeInt(32))")
// x.line("z.DecInt32((*int32)(" + varname + "))")
case reflect.Int64:
x.line("*((*int64)(" + varname + ")) = int64(r.DecodeInt(64))")
// x.line("z.DecInt64((*int64)(" + varname + "))")
case reflect.Uint:
x.line("*((*uint)(" + varname + ")) = uint(r.DecodeUint(codecSelferBitsize" + x.xs + "))")
// x.line("z.DecUint((*uint)(" + varname + "))")
case reflect.Uint8:
x.line("*((*uint8)(" + varname + ")) = uint8(r.DecodeUint(8))")
// x.line("z.DecUint8((*uint8)(" + varname + "))")
case reflect.Uint16:
x.line("*((*uint16)(" + varname + ")) = uint16(r.DecodeUint(16))")
//x.line("z.DecUint16((*uint16)(" + varname + "))")
case reflect.Uint32:
x.line("*((*uint32)(" + varname + ")) = uint32(r.DecodeUint(32))")
//x.line("z.DecUint32((*uint32)(" + varname + "))")
case reflect.Uint64:
x.line("*((*uint64)(" + varname + ")) = uint64(r.DecodeUint(64))")
//x.line("z.DecUint64((*uint64)(" + varname + "))")
case reflect.Uintptr:
x.line("*((*uintptr)(" + varname + ")) = uintptr(r.DecodeUint(codecSelferBitsize" + x.xs + "))")
case reflect.Float32:
x.line("*((*float32)(" + varname + ")) = float32(r.DecodeFloat(true))")
//x.line("z.DecFloat32((*float32)(" + varname + "))")
case reflect.Float64:
x.line("*((*float64)(" + varname + ")) = float64(r.DecodeFloat(false))")
// x.line("z.DecFloat64((*float64)(" + varname + "))")
case reflect.Bool:
x.line("*((*bool)(" + varname + ")) = r.DecodeBool()")
// x.line("z.DecBool((*bool)(" + varname + "))")
case reflect.String:
x.line("*((*string)(" + varname + ")) = r.DecodeString()")
// x.line("z.DecString((*string)(" + varname + "))")
case reflect.Array, reflect.Chan:
x.xtraSM(varname, false, t)
// x.decListFallback(varname, rtid, true, t)
case reflect.Slice:
// if a []uint8, call dedicated function
// if a known fastpath slice, call dedicated function
// else write encode function in-line.
// - if elements are primitives or Selfers, call dedicated function on each member.
// - else call Encoder.encode(XXX) on it.
if rtid == uint8SliceTypId {
x.line("*" + varname + " = r.DecodeBytes(*(*[]byte)(" + varname + "), false, false)")
} else if fastpathAV.index(rtid) != -1 {
g := x.newGenV(t)
x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", false, d)")
} else {
x.xtraSM(varname, false, t)
// x.decListFallback(varname, rtid, false, t)
}
case reflect.Map:
// if a known fastpath map, call dedicated function
// else write encode function in-line.
// - if elements are primitives or Selfers, call dedicated function on each member.
// - else call Encoder.encode(XXX) on it.
if fastpathAV.index(rtid) != -1 {
g := x.newGenV(t)
x.line("z.F." + g.MethodNamePfx("Dec", false) + "X(" + varname + ", false, d)")
} else {
x.xtraSM(varname, false, t)
// x.decMapFallback(varname, rtid, t)
}
case reflect.Struct:
if inlist {
x.decStruct(varname, rtid, t)
} else {
// delete(x.td, rtid)
x.line("z.DecFallback(" + varname + ", false)")
}
default:
if rtidAdded {
delete(x.te, rtid)
}
x.line("z.DecFallback(" + varname + ", true)")
}
}
func (x *genRunner) decTryAssignPrimitive(varname string, t reflect.Type) (tryAsPtr bool) {
// We have to use the actual type name when doing a direct assignment.
// We don't have the luxury of casting the pointer to the underlying type.
//
// Consequently, in the situation of a
// type Message int32
// var x Message
// var i int32 = 32
// x = i // this will bomb
// x = Message(i) // this will work
// *((*int32)(&x)) = i // this will work
//
// Consequently, we replace:
// case reflect.Uint32: x.line(varname + " = uint32(r.DecodeUint(32))")
// with:
// case reflect.Uint32: x.line(varname + " = " + genTypeNamePrim(t, x.tc) + "(r.DecodeUint(32))")
xfn := func(t reflect.Type) string {
return x.genTypeNamePrim(t)
}
switch t.Kind() {
case reflect.Int:
x.linef("%s = %s(r.DecodeInt(codecSelferBitsize%s))", varname, xfn(t), x.xs)
case reflect.Int8:
x.linef("%s = %s(r.DecodeInt(8))", varname, xfn(t))
case reflect.Int16:
x.linef("%s = %s(r.DecodeInt(16))", varname, xfn(t))
case reflect.Int32:
x.linef("%s = %s(r.DecodeInt(32))", varname, xfn(t))
case reflect.Int64:
x.linef("%s = %s(r.DecodeInt(64))", varname, xfn(t))
case reflect.Uint:
x.linef("%s = %s(r.DecodeUint(codecSelferBitsize%s))", varname, xfn(t), x.xs)
case reflect.Uint8:
x.linef("%s = %s(r.DecodeUint(8))", varname, xfn(t))
case reflect.Uint16:
x.linef("%s = %s(r.DecodeUint(16))", varname, xfn(t))
case reflect.Uint32:
x.linef("%s = %s(r.DecodeUint(32))", varname, xfn(t))
case reflect.Uint64:
x.linef("%s = %s(r.DecodeUint(64))", varname, xfn(t))
case reflect.Uintptr:
x.linef("%s = %s(r.DecodeUint(codecSelferBitsize%s))", varname, xfn(t), x.xs)
case reflect.Float32:
x.linef("%s = %s(r.DecodeFloat(true))", varname, xfn(t))
case reflect.Float64:
x.linef("%s = %s(r.DecodeFloat(false))", varname, xfn(t))
case reflect.Bool:
x.linef("%s = %s(r.DecodeBool())", varname, xfn(t))
case reflect.String:
x.linef("%s = %s(r.DecodeString())", varname, xfn(t))
default:
tryAsPtr = true
}
return
}
func (x *genRunner) decListFallback(varname string, rtid uintptr, t reflect.Type) {
type tstruc struct {
TempVar string
Rand string
Varname string
CTyp string
Typ string
Immutable bool
Size int
}
telem := t.Elem()
ts := tstruc{genTempVarPfx, x.varsfx(), varname, x.genTypeName(t), x.genTypeName(telem), genIsImmutable(telem), int(telem.Size())}
funcs := make(template.FuncMap)
funcs["decLineVar"] = func(varname string) string {
x.decVar(varname, telem, false)
return ""
}
funcs["decLine"] = func(pfx string) string {
x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(telem), false)
return ""
}
funcs["var"] = func(s string) string {
return ts.TempVar + s + ts.Rand
}
funcs["zero"] = func() string {
return x.genZeroValueR(telem)
}
funcs["isArray"] = func() bool {
return t.Kind() == reflect.Array
}
funcs["isSlice"] = func() bool {
return t.Kind() == reflect.Slice
}
funcs["isChan"] = func() bool {
return t.Kind() == reflect.Chan
}
tm, err := template.New("").Funcs(funcs).Parse(genDecListTmpl)
if err != nil {
panic(err)
}
if err = tm.Execute(x.w, &ts); err != nil {
panic(err)
}
}
func (x *genRunner) decMapFallback(varname string, rtid uintptr, t reflect.Type) {
type tstruc struct {
TempVar string
Sfx string
Rand string
Varname string
KTyp string
Typ string
Size int
}
telem := t.Elem()
tkey := t.Key()
ts := tstruc{
genTempVarPfx, x.xs, x.varsfx(), varname, x.genTypeName(tkey),
x.genTypeName(telem), int(telem.Size() + tkey.Size()),
}
funcs := make(template.FuncMap)
funcs["decElemZero"] = func() string {
return x.genZeroValueR(telem)
}
funcs["decElemKindImmutable"] = func() bool {
return genIsImmutable(telem)
}
funcs["decElemKindPtr"] = func() bool {
return telem.Kind() == reflect.Ptr
}
funcs["decElemKindIntf"] = func() bool {
return telem.Kind() == reflect.Interface
}
funcs["decLineVarK"] = func(varname string) string {
x.decVar(varname, tkey, false)
return ""
}
funcs["decLineVar"] = func(varname string) string {
x.decVar(varname, telem, false)
return ""
}
funcs["decLineK"] = func(pfx string) string {
x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(tkey), false)
return ""
}
funcs["decLine"] = func(pfx string) string {
x.decVar(ts.TempVar+pfx+ts.Rand, reflect.PtrTo(telem), false)
return ""
}
funcs["var"] = func(s string) string {
return ts.TempVar + s + ts.Rand
}
tm, err := template.New("").Funcs(funcs).Parse(genDecMapTmpl)
if err != nil {
panic(err)
}
if err = tm.Execute(x.w, &ts); err != nil {
panic(err)
}
}
func (x *genRunner) decStructMapSwitch(kName string, varname string, rtid uintptr, t reflect.Type) {
ti := x.ti.get(rtid, t)
tisfi := ti.sfip // always use sequence from file. decStruct expects same thing.
x.line("switch (" + kName + ") {")
for _, si := range tisfi {
x.line("case \"" + si.encName + "\":")
var t2 reflect.StructField
if si.i != -1 {
t2 = t.Field(int(si.i))
} else {
//we must accomodate anonymous fields, where the embedded field is a nil pointer in the value.
// t2 = t.FieldByIndex(si.is)
t2typ := t
varname3 := varname
for _, ix := range si.is {
for t2typ.Kind() == reflect.Ptr {
t2typ = t2typ.Elem()
}
t2 = t2typ.Field(ix)
t2typ = t2.Type
varname3 = varname3 + "." + t2.Name
if t2typ.Kind() == reflect.Ptr {
x.linef("if %s == nil { %s = new(%s) }", varname3, varname3, x.genTypeName(t2typ.Elem()))
}
}
}
x.decVar(varname+"."+t2.Name, t2.Type, false)
}
x.line("default:")
// pass the slice here, so that the string will not escape, and maybe save allocation
x.line("z.DecStructFieldNotFound(-1, " + kName + ")")
x.line("} // end switch " + kName)
}
func (x *genRunner) decStructMap(varname, lenvarname string, rtid uintptr, t reflect.Type, style genStructMapStyle) {
tpfx := genTempVarPfx
i := x.varsfx()
kName := tpfx + "s" + i
// We thought to use ReadStringAsBytes, as go compiler might optimize the copy out.
// However, using that was more expensive, as it seems that the switch expression
// is evaluated each time.
//
// We could depend on decodeString using a temporary/shared buffer internally.
// However, this model of creating a byte array, and using explicitly is faster,
// and allows optional use of unsafe []byte->string conversion without alloc.
// Also, ensure that the slice array doesn't escape.
// That will help escape analysis prevent allocation when it gets better.
// x.line("var " + kName + "Arr = [32]byte{} // default string to decode into")
// x.line("var " + kName + "Slc = " + kName + "Arr[:] // default slice to decode into")
// use the scratch buffer to avoid allocation (most field names are < 32).
x.line("var " + kName + "Slc = z.DecScratchBuffer() // default slice to decode into")
x.line("_ = " + kName + "Slc")
switch style {
case genStructMapStyleLenPrefix:
x.linef("for %sj%s := 0; %sj%s < %s; %sj%s++ {", tpfx, i, tpfx, i, lenvarname, tpfx, i)
case genStructMapStyleCheckBreak:
x.linef("for %sj%s := 0; !r.CheckBreak(); %sj%s++ {", tpfx, i, tpfx, i)
default: // 0, otherwise.
x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length
x.linef("for %sj%s := 0; ; %sj%s++ {", tpfx, i, tpfx, i)
x.linef("if %shl%s { if %sj%s >= %s { break }", tpfx, i, tpfx, i, lenvarname)
x.line("} else { if r.CheckBreak() { break }; }")
}
x.linef("z.DecSendContainerState(codecSelfer_containerMapKey%s)", x.xs)
x.line(kName + "Slc = r.DecodeBytes(" + kName + "Slc, true, true)")
// let string be scoped to this loop alone, so it doesn't escape.
if x.unsafe {
x.line(kName + "SlcHdr := codecSelferUnsafeString" + x.xs + "{uintptr(unsafe.Pointer(&" +
kName + "Slc[0])), len(" + kName + "Slc)}")
x.line(kName + " := *(*string)(unsafe.Pointer(&" + kName + "SlcHdr))")
} else {
x.line(kName + " := string(" + kName + "Slc)")
}
x.linef("z.DecSendContainerState(codecSelfer_containerMapValue%s)", x.xs)
x.decStructMapSwitch(kName, varname, rtid, t)
x.line("} // end for " + tpfx + "j" + i)
x.linef("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs)
}
func (x *genRunner) decStructArray(varname, lenvarname, breakString string, rtid uintptr, t reflect.Type) {
tpfx := genTempVarPfx
i := x.varsfx()
ti := x.ti.get(rtid, t)
tisfi := ti.sfip // always use sequence from file. decStruct expects same thing.
x.linef("var %sj%s int", tpfx, i)
x.linef("var %sb%s bool", tpfx, i) // break
x.linef("var %shl%s bool = %s >= 0", tpfx, i, lenvarname) // has length
for _, si := range tisfi {
var t2 reflect.StructField
if si.i != -1 {
t2 = t.Field(int(si.i))
} else {
//we must accomodate anonymous fields, where the embedded field is a nil pointer in the value.
// t2 = t.FieldByIndex(si.is)
t2typ := t
varname3 := varname
for _, ix := range si.is {
for t2typ.Kind() == reflect.Ptr {
t2typ = t2typ.Elem()
}
t2 = t2typ.Field(ix)
t2typ = t2.Type
varname3 = varname3 + "." + t2.Name
if t2typ.Kind() == reflect.Ptr {
x.linef("if %s == nil { %s = new(%s) }", varname3, varname3, x.genTypeName(t2typ.Elem()))
}
}
}
x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }",
tpfx, i, tpfx, i, tpfx, i,
tpfx, i, lenvarname, tpfx, i)
x.linef("if %sb%s { z.DecSendContainerState(codecSelfer_containerArrayEnd%s); %s }",
tpfx, i, x.xs, breakString)
x.linef("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs)
x.decVar(varname+"."+t2.Name, t2.Type, true)
}
// read remaining values and throw away.
x.line("for {")
x.linef("%sj%s++; if %shl%s { %sb%s = %sj%s > %s } else { %sb%s = r.CheckBreak() }",
tpfx, i, tpfx, i, tpfx, i,
tpfx, i, lenvarname, tpfx, i)
x.linef("if %sb%s { break }", tpfx, i)
x.linef("z.DecSendContainerState(codecSelfer_containerArrayElem%s)", x.xs)
x.linef(`z.DecStructFieldNotFound(%sj%s - 1, "")`, tpfx, i)
x.line("}")
x.linef("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs)
}
func (x *genRunner) decStruct(varname string, rtid uintptr, t reflect.Type) {
// if container is map
i := x.varsfx()
x.linef("%sct%s := r.ContainerType()", genTempVarPfx, i)
x.linef("if %sct%s == codecSelferValueTypeMap%s {", genTempVarPfx, i, x.xs)
x.line(genTempVarPfx + "l" + i + " := r.ReadMapStart()")
x.linef("if %sl%s == 0 {", genTempVarPfx, i)
x.linef("z.DecSendContainerState(codecSelfer_containerMapEnd%s)", x.xs)
if genUseOneFunctionForDecStructMap {
x.line("} else { ")
x.linef("x.codecDecodeSelfFromMap(%sl%s, d)", genTempVarPfx, i)
} else {
x.line("} else if " + genTempVarPfx + "l" + i + " > 0 { ")
x.line("x.codecDecodeSelfFromMapLenPrefix(" + genTempVarPfx + "l" + i + ", d)")
x.line("} else {")
x.line("x.codecDecodeSelfFromMapCheckBreak(" + genTempVarPfx + "l" + i + ", d)")
}
x.line("}")
// else if container is array
x.linef("} else if %sct%s == codecSelferValueTypeArray%s {", genTempVarPfx, i, x.xs)
x.line(genTempVarPfx + "l" + i + " := r.ReadArrayStart()")
x.linef("if %sl%s == 0 {", genTempVarPfx, i)
x.linef("z.DecSendContainerState(codecSelfer_containerArrayEnd%s)", x.xs)
x.line("} else { ")
x.linef("x.codecDecodeSelfFromArray(%sl%s, d)", genTempVarPfx, i)
x.line("}")
// else panic
x.line("} else { ")
x.line("panic(codecSelferOnlyMapOrArrayEncodeToStructErr" + x.xs + ")")
x.line("} ")
}
// --------
type genV struct {
// genV is either a primitive (Primitive != "") or a map (MapKey != "") or a slice
MapKey string
Elem string
Primitive string
Size int
}
func (x *genRunner) newGenV(t reflect.Type) (v genV) {
switch t.Kind() {
case reflect.Slice, reflect.Array:
te := t.Elem()
v.Elem = x.genTypeName(te)
v.Size = int(te.Size())
case reflect.Map:
te, tk := t.Elem(), t.Key()
v.Elem = x.genTypeName(te)
v.MapKey = x.genTypeName(tk)
v.Size = int(te.Size() + tk.Size())
default:
panic("unexpected type for newGenV. Requires map or slice type")
}
return
}
func (x *genV) MethodNamePfx(prefix string, prim bool) string {
var name []byte
if prefix != "" {
name = append(name, prefix...)
}
if prim {
name = append(name, genTitleCaseName(x.Primitive)...)
} else {
if x.MapKey == "" {
name = append(name, "Slice"...)
} else {
name = append(name, "Map"...)
name = append(name, genTitleCaseName(x.MapKey)...)
}
name = append(name, genTitleCaseName(x.Elem)...)
}
return string(name)
}
var genCheckVendor = os.Getenv("GO15VENDOREXPERIMENT") == "1"
// genImportPath returns import path of a non-predeclared named typed, or an empty string otherwise.
//
// This handles the misbehaviour that occurs when 1.5-style vendoring is enabled,
// where PkgPath returns the full path, including the vendoring pre-fix that should have been stripped.
// We strip it here.
func genImportPath(t reflect.Type) (s string) {
s = t.PkgPath()
if genCheckVendor {
// HACK: Misbehaviour occurs in go 1.5. May have to re-visit this later.
// if s contains /vendor/ OR startsWith vendor/, then return everything after it.
const vendorStart = "vendor/"
const vendorInline = "/vendor/"
if i := strings.LastIndex(s, vendorInline); i >= 0 {
s = s[i+len(vendorInline):]
} else if strings.HasPrefix(s, vendorStart) {
s = s[len(vendorStart):]
}
}
return
}
// A go identifier is (letter|_)[letter|number|_]*
func genGoIdentifier(s string, checkFirstChar bool) string {
b := make([]byte, 0, len(s))
t := make([]byte, 4)
var n int
for i, r := range s {
if checkFirstChar && i == 0 && !unicode.IsLetter(r) {
b = append(b, '_')
}
// r must be unicode_letter, unicode_digit or _
if unicode.IsLetter(r) || unicode.IsDigit(r) {
n = utf8.EncodeRune(t, r)
b = append(b, t[:n]...)
} else {
b = append(b, '_')
}
}
return string(b)
}
func genNonPtr(t reflect.Type) reflect.Type {
for t.Kind() == reflect.Ptr {
t = t.Elem()
}
return t
}
func genTitleCaseName(s string) string {
switch s {
case "interface{}":
return "Intf"
default:
return strings.ToUpper(s[0:1]) + s[1:]
}
}
func genMethodNameT(t reflect.Type, tRef reflect.Type) (n string) {
var ptrPfx string
for t.Kind() == reflect.Ptr {
ptrPfx += "Ptrto"
t = t.Elem()
}
tstr := t.String()
if tn := t.Name(); tn != "" {
if tRef != nil && genImportPath(t) == genImportPath(tRef) {
return ptrPfx + tn
} else {
if genQNameRegex.MatchString(tstr) {
return ptrPfx + strings.Replace(tstr, ".", "_", 1000)
} else {
return ptrPfx + genCustomTypeName(tstr)
}
}
}
switch t.Kind() {
case reflect.Map:
return ptrPfx + "Map" + genMethodNameT(t.Key(), tRef) + genMethodNameT(t.Elem(), tRef)
case reflect.Slice:
return ptrPfx + "Slice" + genMethodNameT(t.Elem(), tRef)
case reflect.Array:
return ptrPfx + "Array" + strconv.FormatInt(int64(t.Len()), 10) + genMethodNameT(t.Elem(), tRef)
case reflect.Chan:
var cx string
switch t.ChanDir() {
case reflect.SendDir:
cx = "ChanSend"
case reflect.RecvDir:
cx = "ChanRecv"
default:
cx = "Chan"
}
return ptrPfx + cx + genMethodNameT(t.Elem(), tRef)
default:
if t == intfTyp {
return ptrPfx + "Interface"
} else {
if tRef != nil && genImportPath(t) == genImportPath(tRef) {
if t.Name() != "" {
return ptrPfx + t.Name()
} else {
return ptrPfx + genCustomTypeName(tstr)
}
} else {
// best way to get the package name inclusive
// return ptrPfx + strings.Replace(tstr, ".", "_", 1000)
// return ptrPfx + genBase64enc.EncodeToString([]byte(tstr))
if t.Name() != "" && genQNameRegex.MatchString(tstr) {
return ptrPfx + strings.Replace(tstr, ".", "_", 1000)
} else {
return ptrPfx + genCustomTypeName(tstr)
}
}
}
}
}
// genCustomNameForType base64encodes the t.String() value in such a way
// that it can be used within a function name.
func genCustomTypeName(tstr string) string {
len2 := genBase64enc.EncodedLen(len(tstr))
bufx := make([]byte, len2)
genBase64enc.Encode(bufx, []byte(tstr))
for i := len2 - 1; i >= 0; i-- {
if bufx[i] == '=' {
len2--
} else {
break
}
}
return string(bufx[:len2])
}
func genIsImmutable(t reflect.Type) (v bool) {
return isImmutableKind(t.Kind())
}
type genInternal struct {
Values []genV
Unsafe bool
}
func (x genInternal) FastpathLen() (l int) {
for _, v := range x.Values {
if v.Primitive == "" {
l++
}
}
return
}
func genInternalZeroValue(s string) string {
switch s {
case "interface{}":
return "nil"
case "bool":
return "false"
case "string":
return `""`
default:
return "0"
}
}
func genInternalEncCommandAsString(s string, vname string) string {
switch s {
case "uint", "uint8", "uint16", "uint32", "uint64":
return "ee.EncodeUint(uint64(" + vname + "))"
case "int", "int8", "int16", "int32", "int64":
return "ee.EncodeInt(int64(" + vname + "))"
case "string":
return "ee.EncodeString(c_UTF8, " + vname + ")"
case "float32":
return "ee.EncodeFloat32(" + vname + ")"
case "float64":
return "ee.EncodeFloat64(" + vname + ")"
case "bool":
return "ee.EncodeBool(" + vname + ")"
case "symbol":
return "ee.EncodeSymbol(" + vname + ")"
default:
return "e.encode(" + vname + ")"
}
}
func genInternalDecCommandAsString(s string) string {
switch s {
case "uint":
return "uint(dd.DecodeUint(uintBitsize))"
case "uint8":
return "uint8(dd.DecodeUint(8))"
case "uint16":
return "uint16(dd.DecodeUint(16))"
case "uint32":
return "uint32(dd.DecodeUint(32))"
case "uint64":
return "dd.DecodeUint(64)"
case "uintptr":
return "uintptr(dd.DecodeUint(uintBitsize))"
case "int":
return "int(dd.DecodeInt(intBitsize))"
case "int8":
return "int8(dd.DecodeInt(8))"
case "int16":
return "int16(dd.DecodeInt(16))"
case "int32":
return "int32(dd.DecodeInt(32))"
case "int64":
return "dd.DecodeInt(64)"
case "string":
return "dd.DecodeString()"
case "float32":
return "float32(dd.DecodeFloat(true))"
case "float64":
return "dd.DecodeFloat(false)"
case "bool":
return "dd.DecodeBool()"
default:
panic(errors.New("gen internal: unknown type for decode: " + s))
}
}
func genInternalSortType(s string, elem bool) string {
for _, v := range [...]string{"int", "uint", "float", "bool", "string"} {
if strings.HasPrefix(s, v) {
if elem {
if v == "int" || v == "uint" || v == "float" {
return v + "64"
} else {
return v
}
}
return v + "Slice"
}
}
panic("sorttype: unexpected type: " + s)
}
// var genInternalMu sync.Mutex
var genInternalV genInternal
var genInternalTmplFuncs template.FuncMap
var genInternalOnce sync.Once
func genInternalInit() {
types := [...]string{
"interface{}",
"string",
"float32",
"float64",
"uint",
"uint8",
"uint16",
"uint32",
"uint64",
"uintptr",
"int",
"int8",
"int16",
"int32",
"int64",
"bool",
}
// keep as slice, so it is in specific iteration order.
// Initial order was uint64, string, interface{}, int, int64
mapvaltypes := [...]string{
"interface{}",
"string",
"uint",
"uint8",
"uint16",
"uint32",
"uint64",
"uintptr",
"int",
"int8",
"int16",
"int32",
"int64",
"float32",
"float64",
"bool",
}
wordSizeBytes := int(intBitsize) / 8
mapvaltypes2 := map[string]int{
"interface{}": 2 * wordSizeBytes,
"string": 2 * wordSizeBytes,
"uint": 1 * wordSizeBytes,
"uint8": 1,
"uint16": 2,
"uint32": 4,
"uint64": 8,
"uintptr": 1 * wordSizeBytes,
"int": 1 * wordSizeBytes,
"int8": 1,
"int16": 2,
"int32": 4,
"int64": 8,
"float32": 4,
"float64": 8,
"bool": 1,
}
var gt genInternal
// For each slice or map type, there must be a (symetrical) Encode and Decode fast-path function
for _, s := range types {
gt.Values = append(gt.Values, genV{Primitive: s, Size: mapvaltypes2[s]})
if s != "uint8" { // do not generate fast path for slice of bytes. Treat specially already.
gt.Values = append(gt.Values, genV{Elem: s, Size: mapvaltypes2[s]})
}
if _, ok := mapvaltypes2[s]; !ok {
gt.Values = append(gt.Values, genV{MapKey: s, Elem: s, Size: 2 * mapvaltypes2[s]})
}
for _, ms := range mapvaltypes {
gt.Values = append(gt.Values, genV{MapKey: s, Elem: ms, Size: mapvaltypes2[s] + mapvaltypes2[ms]})
}
}
funcs := make(template.FuncMap)
// funcs["haspfx"] = strings.HasPrefix
funcs["encmd"] = genInternalEncCommandAsString
funcs["decmd"] = genInternalDecCommandAsString
funcs["zerocmd"] = genInternalZeroValue
funcs["hasprefix"] = strings.HasPrefix
funcs["sorttype"] = genInternalSortType
genInternalV = gt
genInternalTmplFuncs = funcs
}
// genInternalGoFile is used to generate source files from templates.
// It is run by the program author alone.
// Unfortunately, it has to be exported so that it can be called from a command line tool.
// *** DO NOT USE ***
func genInternalGoFile(r io.Reader, w io.Writer, safe bool) (err error) {
genInternalOnce.Do(genInternalInit)
gt := genInternalV
gt.Unsafe = !safe
t := template.New("").Funcs(genInternalTmplFuncs)
tmplstr, err := ioutil.ReadAll(r)
if err != nil {
return
}
if t, err = t.Parse(string(tmplstr)); err != nil {
return
}
var out bytes.Buffer
err = t.Execute(&out, gt)
if err != nil {
return
}
bout, err := format.Source(out.Bytes())
if err != nil {
w.Write(out.Bytes()) // write out if error, so we can still see.
// w.Write(bout) // write out if error, as much as possible, so we can still see.
return
}
w.Write(bout)
return
}
| [
"\"GO15VENDOREXPERIMENT\""
]
| []
| [
"GO15VENDOREXPERIMENT"
]
| [] | ["GO15VENDOREXPERIMENT"] | go | 1 | 0 | |
environment/lib/python3.8/site-packages/seaborn/utils.py | """Small plotting-related utility functions."""
import colorsys
import os
import numpy as np
from scipy import stats
import pandas as pd
import matplotlib as mpl
import matplotlib.colors as mplcol
import matplotlib.pyplot as plt
import warnings
from urllib.request import urlopen, urlretrieve
from http.client import HTTPException
__all__ = ["desaturate", "saturate", "set_hls_values",
"despine", "get_dataset_names", "get_data_home", "load_dataset"]
def remove_na(arr):
"""Helper method for removing NA values from array-like.
Parameters
----------
arr : array-like
The array-like from which to remove NA values.
Returns
-------
clean_arr : array-like
The original array with NA values removed.
"""
return arr[pd.notnull(arr)]
def sort_df(df, *args, **kwargs):
"""Wrapper to handle different pandas sorting API pre/post 0.17."""
msg = "This function is deprecated and will be removed in a future version"
warnings.warn(msg)
try:
return df.sort_values(*args, **kwargs)
except AttributeError:
return df.sort(*args, **kwargs)
def ci_to_errsize(cis, heights):
"""Convert intervals to error arguments relative to plot heights.
Parameters
----------
cis: 2 x n sequence
sequence of confidence interval limits
heights : n sequence
sequence of plot heights
Returns
-------
errsize : 2 x n array
sequence of error size relative to height values in correct
format as argument for plt.bar
"""
cis = np.atleast_2d(cis).reshape(2, -1)
heights = np.atleast_1d(heights)
errsize = []
for i, (low, high) in enumerate(np.transpose(cis)):
h = heights[i]
elow = h - low
ehigh = high - h
errsize.append([elow, ehigh])
errsize = np.asarray(errsize).T
return errsize
def pmf_hist(a, bins=10):
"""Return arguments to plt.bar for pmf-like histogram of an array.
DEPRECATED: will be removed in a future version.
Parameters
----------
a: array-like
array to make histogram of
bins: int
number of bins
Returns
-------
x: array
left x position of bars
h: array
height of bars
w: float
width of bars
"""
msg = "This function is deprecated and will be removed in a future version"
warnings.warn(msg)
n, x = np.histogram(a, bins)
h = n / n.sum()
w = x[1] - x[0]
return x[:-1], h, w
def desaturate(color, prop):
"""Decrease the saturation channel of a color by some percent.
Parameters
----------
color : matplotlib color
hex, rgb-tuple, or html color name
prop : float
saturation channel of color will be multiplied by this value
Returns
-------
new_color : rgb tuple
desaturated color code in RGB tuple representation
"""
# Check inputs
if not 0 <= prop <= 1:
raise ValueError("prop must be between 0 and 1")
# Get rgb tuple rep
rgb = mplcol.colorConverter.to_rgb(color)
# Convert to hls
h, l, s = colorsys.rgb_to_hls(*rgb)
# Desaturate the saturation channel
s *= prop
# Convert back to rgb
new_color = colorsys.hls_to_rgb(h, l, s)
return new_color
def saturate(color):
"""Return a fully saturated color with the same hue.
Parameters
----------
color : matplotlib color
hex, rgb-tuple, or html color name
Returns
-------
new_color : rgb tuple
saturated color code in RGB tuple representation
"""
return set_hls_values(color, s=1)
def set_hls_values(color, h=None, l=None, s=None): # noqa
"""Independently manipulate the h, l, or s channels of a color.
Parameters
----------
color : matplotlib color
hex, rgb-tuple, or html color name
h, l, s : floats between 0 and 1, or None
new values for each channel in hls space
Returns
-------
new_color : rgb tuple
new color code in RGB tuple representation
"""
# Get an RGB tuple representation
rgb = mplcol.colorConverter.to_rgb(color)
vals = list(colorsys.rgb_to_hls(*rgb))
for i, val in enumerate([h, l, s]):
if val is not None:
vals[i] = val
rgb = colorsys.hls_to_rgb(*vals)
return rgb
def axlabel(xlabel, ylabel, **kwargs):
"""Grab current axis and label it."""
ax = plt.gca()
ax.set_xlabel(xlabel, **kwargs)
ax.set_ylabel(ylabel, **kwargs)
def despine(fig=None, ax=None, top=True, right=True, left=False,
bottom=False, offset=None, trim=False):
"""Remove the top and right spines from plot(s).
fig : matplotlib figure, optional
Figure to despine all axes of, default uses current figure.
ax : matplotlib axes, optional
Specific axes object to despine.
top, right, left, bottom : boolean, optional
If True, remove that spine.
offset : int or dict, optional
Absolute distance, in points, spines should be moved away
from the axes (negative values move spines inward). A single value
applies to all spines; a dict can be used to set offset values per
side.
trim : bool, optional
If True, limit spines to the smallest and largest major tick
on each non-despined axis.
Returns
-------
None
"""
# Get references to the axes we want
if fig is None and ax is None:
axes = plt.gcf().axes
elif fig is not None:
axes = fig.axes
elif ax is not None:
axes = [ax]
for ax_i in axes:
for side in ["top", "right", "left", "bottom"]:
# Toggle the spine objects
is_visible = not locals()[side]
ax_i.spines[side].set_visible(is_visible)
if offset is not None and is_visible:
try:
val = offset.get(side, 0)
except AttributeError:
val = offset
ax_i.spines[side].set_position(('outward', val))
# Potentially move the ticks
if left and not right:
maj_on = any(
t.tick1line.get_visible()
for t in ax_i.yaxis.majorTicks
)
min_on = any(
t.tick1line.get_visible()
for t in ax_i.yaxis.minorTicks
)
ax_i.yaxis.set_ticks_position("right")
for t in ax_i.yaxis.majorTicks:
t.tick2line.set_visible(maj_on)
for t in ax_i.yaxis.minorTicks:
t.tick2line.set_visible(min_on)
if bottom and not top:
maj_on = any(
t.tick1line.get_visible()
for t in ax_i.xaxis.majorTicks
)
min_on = any(
t.tick1line.get_visible()
for t in ax_i.xaxis.minorTicks
)
ax_i.xaxis.set_ticks_position("top")
for t in ax_i.xaxis.majorTicks:
t.tick2line.set_visible(maj_on)
for t in ax_i.xaxis.minorTicks:
t.tick2line.set_visible(min_on)
if trim:
# clip off the parts of the spines that extend past major ticks
xticks = np.asarray(ax_i.get_xticks())
if xticks.size:
firsttick = np.compress(xticks >= min(ax_i.get_xlim()),
xticks)[0]
lasttick = np.compress(xticks <= max(ax_i.get_xlim()),
xticks)[-1]
ax_i.spines['bottom'].set_bounds(firsttick, lasttick)
ax_i.spines['top'].set_bounds(firsttick, lasttick)
newticks = xticks.compress(xticks <= lasttick)
newticks = newticks.compress(newticks >= firsttick)
ax_i.set_xticks(newticks)
yticks = np.asarray(ax_i.get_yticks())
if yticks.size:
firsttick = np.compress(yticks >= min(ax_i.get_ylim()),
yticks)[0]
lasttick = np.compress(yticks <= max(ax_i.get_ylim()),
yticks)[-1]
ax_i.spines['left'].set_bounds(firsttick, lasttick)
ax_i.spines['right'].set_bounds(firsttick, lasttick)
newticks = yticks.compress(yticks <= lasttick)
newticks = newticks.compress(newticks >= firsttick)
ax_i.set_yticks(newticks)
def _kde_support(data, bw, gridsize, cut, clip):
"""Establish support for a kernel density estimate."""
support_min = max(data.min() - bw * cut, clip[0])
support_max = min(data.max() + bw * cut, clip[1])
return np.linspace(support_min, support_max, gridsize)
def percentiles(a, pcts, axis=None):
"""Like scoreatpercentile but can take and return array of percentiles.
DEPRECATED: will be removed in a future version.
Parameters
----------
a : array
data
pcts : sequence of percentile values
percentile or percentiles to find score at
axis : int or None
if not None, computes scores over this axis
Returns
-------
scores: array
array of scores at requested percentiles
first dimension is length of object passed to ``pcts``
"""
msg = "This function is deprecated and will be removed in a future version"
warnings.warn(msg)
scores = []
try:
n = len(pcts)
except TypeError:
pcts = [pcts]
n = 0
for i, p in enumerate(pcts):
if axis is None:
score = stats.scoreatpercentile(a.ravel(), p)
else:
score = np.apply_along_axis(stats.scoreatpercentile, axis, a, p)
scores.append(score)
scores = np.asarray(scores)
if not n:
scores = scores.squeeze()
return scores
def ci(a, which=95, axis=None):
"""Return a percentile range from an array of values."""
p = 50 - which / 2, 50 + which / 2
return np.percentile(a, p, axis)
def sig_stars(p):
"""Return a R-style significance string corresponding to p values.
DEPRECATED: will be removed in a future version.
"""
msg = "This function is deprecated and will be removed in a future version"
warnings.warn(msg)
if p < 0.001:
return "***"
elif p < 0.01:
return "**"
elif p < 0.05:
return "*"
elif p < 0.1:
return "."
return ""
def iqr(a):
"""Calculate the IQR for an array of numbers."""
a = np.asarray(a)
q1 = stats.scoreatpercentile(a, 25)
q3 = stats.scoreatpercentile(a, 75)
return q3 - q1
def get_dataset_names():
"""Report available example datasets, useful for reporting issues."""
# delayed import to not demand bs4 unless this function is actually used
from bs4 import BeautifulSoup
http = urlopen('https://github.com/mwaskom/seaborn-data/')
gh_list = BeautifulSoup(http)
return [l.text.replace('.csv', '')
for l in gh_list.find_all("a", {"class": "js-navigation-open"})
if l.text.endswith('.csv')]
def get_data_home(data_home=None):
"""Return a path to the cache directory for example datasets.
This directory is then used by :func:`load_dataset`.
If the ``data_home`` argument is not specified, it tries to read from the
``SEABORN_DATA`` environment variable and defaults to ``~/seaborn-data``.
"""
if data_home is None:
data_home = os.environ.get('SEABORN_DATA',
os.path.join('~', 'seaborn-data'))
data_home = os.path.expanduser(data_home)
if not os.path.exists(data_home):
os.makedirs(data_home)
return data_home
def load_dataset(name, cache=True, data_home=None, **kws):
"""Load an example dataset from the online repository (requires internet).
This function provides quick access to a small number of example datasets
that are useful for documenting seaborn or generating reproducible examples
for bug reports. It is not necessary for normal usage.
Note that some of the datasets have a small amount of preprocessing applied
to define a proper ordering for categorical variables.
Use :func:`get_dataset_names` to see a list of available datasets.
Parameters
----------
name : str
Name of the dataset (``{name}.csv`` on
https://github.com/mwaskom/seaborn-data).
cache : boolean, optional
If True, try to load from the local cache first, and save to the cache
if a download is required.
data_home : string, optional
The directory in which to cache data; see :func:`get_data_home`.
kws : keys and values, optional
Additional keyword arguments are passed to passed through to
:func:`pandas.read_csv`.
Returns
-------
df : :class:`pandas.DataFrame`
Tabular data, possibly with some preprocessing applied.
"""
path = ("https://raw.githubusercontent.com/"
"mwaskom/seaborn-data/master/{}.csv")
full_path = path.format(name)
if cache:
cache_path = os.path.join(get_data_home(data_home),
os.path.basename(full_path))
if not os.path.exists(cache_path):
urlretrieve(full_path, cache_path)
full_path = cache_path
df = pd.read_csv(full_path, **kws)
if df.iloc[-1].isnull().all():
df = df.iloc[:-1]
# Set some columns as a categorical type with ordered levels
if name == "tips":
df["day"] = pd.Categorical(df["day"], ["Thur", "Fri", "Sat", "Sun"])
df["sex"] = pd.Categorical(df["sex"], ["Male", "Female"])
df["time"] = pd.Categorical(df["time"], ["Lunch", "Dinner"])
df["smoker"] = pd.Categorical(df["smoker"], ["Yes", "No"])
if name == "flights":
df["month"] = pd.Categorical(df["month"], df.month.unique())
if name == "exercise":
df["time"] = pd.Categorical(df["time"], ["1 min", "15 min", "30 min"])
df["kind"] = pd.Categorical(df["kind"], ["rest", "walking", "running"])
df["diet"] = pd.Categorical(df["diet"], ["no fat", "low fat"])
if name == "titanic":
df["class"] = pd.Categorical(df["class"], ["First", "Second", "Third"])
df["deck"] = pd.Categorical(df["deck"], list("ABCDEFG"))
return df
def axis_ticklabels_overlap(labels):
"""Return a boolean for whether the list of ticklabels have overlaps.
Parameters
----------
labels : list of matplotlib ticklabels
Returns
-------
overlap : boolean
True if any of the labels overlap.
"""
if not labels:
return False
try:
bboxes = [l.get_window_extent() for l in labels]
overlaps = [b.count_overlaps(bboxes) for b in bboxes]
return max(overlaps) > 1
except RuntimeError:
# Issue on macos backend raises an error in the above code
return False
def axes_ticklabels_overlap(ax):
"""Return booleans for whether the x and y ticklabels on an Axes overlap.
Parameters
----------
ax : matplotlib Axes
Returns
-------
x_overlap, y_overlap : booleans
True when the labels on that axis overlap.
"""
return (axis_ticklabels_overlap(ax.get_xticklabels()),
axis_ticklabels_overlap(ax.get_yticklabels()))
def categorical_order(values, order=None):
"""Return a list of unique data values.
Determine an ordered list of levels in ``values``.
Parameters
----------
values : list, array, Categorical, or Series
Vector of "categorical" values
order : list-like, optional
Desired order of category levels to override the order determined
from the ``values`` object.
Returns
-------
order : list
Ordered list of category levels not including null values.
"""
if order is None:
if hasattr(values, "categories"):
order = values.categories
else:
try:
order = values.cat.categories
except (TypeError, AttributeError):
try:
order = values.unique()
except AttributeError:
order = pd.unique(values)
try:
np.asarray(values).astype(np.float)
order = np.sort(order)
except (ValueError, TypeError):
order = order
order = filter(pd.notnull, order)
return list(order)
def locator_to_legend_entries(locator, limits, dtype):
"""Return levels and formatted levels for brief numeric legends."""
raw_levels = locator.tick_values(*limits).astype(dtype)
class dummy_axis:
def get_view_interval(self):
return limits
if isinstance(locator, mpl.ticker.LogLocator):
formatter = mpl.ticker.LogFormatter()
else:
formatter = mpl.ticker.ScalarFormatter()
formatter.axis = dummy_axis()
# TODO: The following two lines should be replaced
# once pinned matplotlib>=3.1.0 with:
# formatted_levels = formatter.format_ticks(raw_levels)
formatter.set_locs(raw_levels)
formatted_levels = [formatter(x) for x in raw_levels]
return raw_levels, formatted_levels
def get_color_cycle():
"""Return the list of colors in the current matplotlib color cycle
Parameters
----------
None
Returns
-------
colors : list
List of matplotlib colors in the current cycle, or dark gray if
the current color cycle is empty.
"""
cycler = mpl.rcParams['axes.prop_cycle']
return cycler.by_key()['color'] if 'color' in cycler.keys else [".15"]
def relative_luminance(color):
"""Calculate the relative luminance of a color according to W3C standards
Parameters
----------
color : matplotlib color or sequence of matplotlib colors
Hex code, rgb-tuple, or html color name.
Returns
-------
luminance : float(s) between 0 and 1
"""
rgb = mpl.colors.colorConverter.to_rgba_array(color)[:, :3]
rgb = np.where(rgb <= .03928, rgb / 12.92, ((rgb + .055) / 1.055) ** 2.4)
lum = rgb.dot([.2126, .7152, .0722])
try:
return lum.item()
except ValueError:
return lum
def to_utf8(obj):
"""Return a string representing a Python object.
Strings (i.e. type ``str``) are returned unchanged.
Byte strings (i.e. type ``bytes``) are returned as UTF-8-decoded strings.
For other objects, the method ``__str__()`` is called, and the result is
returned as a string.
Parameters
----------
obj : object
Any Python object
Returns
-------
s : str
UTF-8-decoded string representation of ``obj``
"""
if isinstance(obj, str):
return obj
try:
return obj.decode(encoding="utf-8")
except AttributeError: # obj is not bytes-like
return str(obj)
def _network(t=None, url='https://google.com'):
"""
Decorator that will skip a test if `url` is unreachable.
Parameters
----------
t : function, optional
url : str, optional
"""
import nose
if t is None:
return lambda x: _network(x, url=url)
def wrapper(*args, **kwargs):
# attempt to connect
try:
f = urlopen(url)
except (IOError, HTTPException):
raise nose.SkipTest()
else:
f.close()
return t(*args, **kwargs)
return wrapper
| []
| []
| [
"SEABORN_DATA"
]
| [] | ["SEABORN_DATA"] | python | 1 | 0 | |
twitter.py | #!/usr/bin/python2.4
#
# Copyright 2007 The Python-Twitter Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''A library that provides a Python interface to the Twitter API'''
__author__ = '[email protected]'
__version__ = '0.8.3'
import base64
import calendar
import datetime
import httplib
import os
import rfc822
import sys
import tempfile
import textwrap
import time
import calendar
import urllib
import urllib2
import urlparse
import gzip
import StringIO
try:
# Python >= 2.6
import json as simplejson
except ImportError:
try:
# Python < 2.6
import simplejson
except ImportError:
try:
# Google App Engine
from django.utils import simplejson
except ImportError:
raise ImportError, "Unable to load a json library"
# parse_qsl moved to urlparse module in v2.6
try:
from urlparse import parse_qsl, parse_qs
except ImportError:
from cgi import parse_qsl, parse_qs
try:
from hashlib import md5
except ImportError:
from md5 import md5
import oauth2 as oauth
CHARACTER_LIMIT = 140
# A singleton representing a lazily instantiated FileCache.
DEFAULT_CACHE = object()
REQUEST_TOKEN_URL = 'https://api.twitter.com/oauth/request_token'
ACCESS_TOKEN_URL = 'https://api.twitter.com/oauth/access_token'
AUTHORIZATION_URL = 'https://api.twitter.com/oauth/authorize'
SIGNIN_URL = 'https://api.twitter.com/oauth/authenticate'
class TwitterError(Exception):
'''Base class for Twitter errors'''
@property
def message(self):
'''Returns the first argument used to construct this error.'''
return self.args[0]
class Status(object):
'''A class representing the Status structure used by the twitter API.
The Status structure exposes the following properties:
status.created_at
status.created_at_in_seconds # read only
status.favorited
status.in_reply_to_screen_name
status.in_reply_to_user_id
status.in_reply_to_status_id
status.truncated
status.source
status.id
status.text
status.location
status.relative_created_at # read only
status.user
status.urls
status.user_mentions
status.hashtags
status.geo
status.place
status.coordinates
status.contributors
'''
def __init__(self,
created_at=None,
favorited=None,
id=None,
text=None,
location=None,
user=None,
in_reply_to_screen_name=None,
in_reply_to_user_id=None,
in_reply_to_status_id=None,
truncated=None,
source=None,
now=None,
urls=None,
user_mentions=None,
hashtags=None,
geo=None,
place=None,
coordinates=None,
contributors=None,
retweeted=None,
retweeted_status=None,
retweet_count=None):
'''An object to hold a Twitter status message.
This class is normally instantiated by the twitter.Api class and
returned in a sequence.
Note: Dates are posted in the form "Sat Jan 27 04:17:38 +0000 2007"
Args:
created_at:
The time this status message was posted. [Optional]
favorited:
Whether this is a favorite of the authenticated user. [Optional]
id:
The unique id of this status message. [Optional]
text:
The text of this status message. [Optional]
location:
the geolocation string associated with this message. [Optional]
relative_created_at:
A human readable string representing the posting time. [Optional]
user:
A twitter.User instance representing the person posting the
message. [Optional]
now:
The current time, if the client choses to set it.
Defaults to the wall clock time. [Optional]
urls:
user_mentions:
hashtags:
geo:
place:
coordinates:
contributors:
retweeted:
retweeted_status:
retweet_count:
'''
self.created_at = created_at
self.favorited = favorited
self.id = id
self.text = text
self.location = location
self.user = user
self.now = now
self.in_reply_to_screen_name = in_reply_to_screen_name
self.in_reply_to_user_id = in_reply_to_user_id
self.in_reply_to_status_id = in_reply_to_status_id
self.truncated = truncated
self.retweeted = retweeted
self.source = source
self.urls = urls
self.user_mentions = user_mentions
self.hashtags = hashtags
self.geo = geo
self.place = place
self.coordinates = coordinates
self.contributors = contributors
self.retweeted_status = retweeted_status
self.retweet_count = retweet_count
def GetCreatedAt(self):
'''Get the time this status message was posted.
Returns:
The time this status message was posted
'''
return self._created_at
def SetCreatedAt(self, created_at):
'''Set the time this status message was posted.
Args:
created_at:
The time this status message was created
'''
self._created_at = created_at
created_at = property(GetCreatedAt, SetCreatedAt,
doc='The time this status message was posted.')
def GetCreatedAtInSeconds(self):
'''Get the time this status message was posted, in seconds since the epoch.
Returns:
The time this status message was posted, in seconds since the epoch.
'''
return calendar.timegm(rfc822.parsedate(self.created_at))
created_at_in_seconds = property(GetCreatedAtInSeconds,
doc="The time this status message was "
"posted, in seconds since the epoch")
def GetFavorited(self):
'''Get the favorited setting of this status message.
Returns:
True if this status message is favorited; False otherwise
'''
return self._favorited
def SetFavorited(self, favorited):
'''Set the favorited state of this status message.
Args:
favorited:
boolean True/False favorited state of this status message
'''
self._favorited = favorited
favorited = property(GetFavorited, SetFavorited,
doc='The favorited state of this status message.')
def GetId(self):
'''Get the unique id of this status message.
Returns:
The unique id of this status message
'''
return self._id
def SetId(self, id):
'''Set the unique id of this status message.
Args:
id:
The unique id of this status message
'''
self._id = id
id = property(GetId, SetId,
doc='The unique id of this status message.')
def GetInReplyToScreenName(self):
return self._in_reply_to_screen_name
def SetInReplyToScreenName(self, in_reply_to_screen_name):
self._in_reply_to_screen_name = in_reply_to_screen_name
in_reply_to_screen_name = property(GetInReplyToScreenName, SetInReplyToScreenName,
doc='')
def GetInReplyToUserId(self):
return self._in_reply_to_user_id
def SetInReplyToUserId(self, in_reply_to_user_id):
self._in_reply_to_user_id = in_reply_to_user_id
in_reply_to_user_id = property(GetInReplyToUserId, SetInReplyToUserId,
doc='')
def GetInReplyToStatusId(self):
return self._in_reply_to_status_id
def SetInReplyToStatusId(self, in_reply_to_status_id):
self._in_reply_to_status_id = in_reply_to_status_id
in_reply_to_status_id = property(GetInReplyToStatusId, SetInReplyToStatusId,
doc='')
def GetTruncated(self):
return self._truncated
def SetTruncated(self, truncated):
self._truncated = truncated
truncated = property(GetTruncated, SetTruncated,
doc='')
def GetRetweeted(self):
return self._retweeted
def SetRetweeted(self, retweeted):
self._retweeted = retweeted
retweeted = property(GetRetweeted, SetRetweeted,
doc='')
def GetSource(self):
return self._source
def SetSource(self, source):
self._source = source
source = property(GetSource, SetSource,
doc='')
def GetText(self):
'''Get the text of this status message.
Returns:
The text of this status message.
'''
return self._text
def SetText(self, text):
'''Set the text of this status message.
Args:
text:
The text of this status message
'''
self._text = text
text = property(GetText, SetText,
doc='The text of this status message')
def GetLocation(self):
'''Get the geolocation associated with this status message
Returns:
The geolocation string of this status message.
'''
return self._location
def SetLocation(self, location):
'''Set the geolocation associated with this status message
Args:
location:
The geolocation string of this status message
'''
self._location = location
location = property(GetLocation, SetLocation,
doc='The geolocation string of this status message')
def GetRelativeCreatedAt(self):
'''Get a human redable string representing the posting time
Returns:
A human readable string representing the posting time
'''
fudge = 1.25
delta = long(self.now) - long(self.created_at_in_seconds)
if delta < (1 * fudge):
return 'about a second ago'
elif delta < (60 * (1/fudge)):
return 'about %d seconds ago' % (delta)
elif delta < (60 * fudge):
return 'about a minute ago'
elif delta < (60 * 60 * (1/fudge)):
return 'about %d minutes ago' % (delta / 60)
elif delta < (60 * 60 * fudge) or delta / (60 * 60) == 1:
return 'about an hour ago'
elif delta < (60 * 60 * 24 * (1/fudge)):
return 'about %d hours ago' % (delta / (60 * 60))
elif delta < (60 * 60 * 24 * fudge) or delta / (60 * 60 * 24) == 1:
return 'about a day ago'
else:
return 'about %d days ago' % (delta / (60 * 60 * 24))
relative_created_at = property(GetRelativeCreatedAt,
doc='Get a human readable string representing '
'the posting time')
def GetUser(self):
'''Get a twitter.User reprenting the entity posting this status message.
Returns:
A twitter.User reprenting the entity posting this status message
'''
return self._user
def SetUser(self, user):
'''Set a twitter.User reprenting the entity posting this status message.
Args:
user:
A twitter.User reprenting the entity posting this status message
'''
self._user = user
user = property(GetUser, SetUser,
doc='A twitter.User reprenting the entity posting this '
'status message')
def GetNow(self):
'''Get the wallclock time for this status message.
Used to calculate relative_created_at. Defaults to the time
the object was instantiated.
Returns:
Whatever the status instance believes the current time to be,
in seconds since the epoch.
'''
if self._now is None:
self._now = time.time()
return self._now
def SetNow(self, now):
'''Set the wallclock time for this status message.
Used to calculate relative_created_at. Defaults to the time
the object was instantiated.
Args:
now:
The wallclock time for this instance.
'''
self._now = now
now = property(GetNow, SetNow,
doc='The wallclock time for this status instance.')
def GetGeo(self):
return self._geo
def SetGeo(self, geo):
self._geo = geo
geo = property(GetGeo, SetGeo,
doc='')
def GetPlace(self):
return self._place
def SetPlace(self, place):
self._place = place
place = property(GetPlace, SetPlace,
doc='')
def GetCoordinates(self):
return self._coordinates
def SetCoordinates(self, coordinates):
self._coordinates = coordinates
coordinates = property(GetCoordinates, SetCoordinates,
doc='')
def GetContributors(self):
return self._contributors
def SetContributors(self, contributors):
self._contributors = contributors
contributors = property(GetContributors, SetContributors,
doc='')
def GetRetweeted_status(self):
return self._retweeted_status
def SetRetweeted_status(self, retweeted_status):
self._retweeted_status = retweeted_status
retweeted_status = property(GetRetweeted_status, SetRetweeted_status,
doc='')
def GetRetweetCount(self):
return self._retweet_count
def SetRetweetCount(self, retweet_count):
self._retweet_count = retweet_count
retweet_count = property(GetRetweetCount, SetRetweetCount,
doc='')
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.created_at == other.created_at and \
self.id == other.id and \
self.text == other.text and \
self.location == other.location and \
self.user == other.user and \
self.in_reply_to_screen_name == other.in_reply_to_screen_name and \
self.in_reply_to_user_id == other.in_reply_to_user_id and \
self.in_reply_to_status_id == other.in_reply_to_status_id and \
self.truncated == other.truncated and \
self.retweeted == other.retweeted and \
self.favorited == other.favorited and \
self.source == other.source and \
self.geo == other.geo and \
self.place == other.place and \
self.coordinates == other.coordinates and \
self.contributors == other.contributors and \
self.retweeted_status == other.retweeted_status and \
self.retweet_count == other.retweet_count
except AttributeError:
return False
def __str__(self):
'''A string representation of this twitter.Status instance.
The return value is the same as the JSON string representation.
Returns:
A string representation of this twitter.Status instance.
'''
return self.AsJsonString()
def AsJsonString(self):
'''A JSON string representation of this twitter.Status instance.
Returns:
A JSON string representation of this twitter.Status instance
'''
return simplejson.dumps(self.AsDict(), sort_keys=True)
def AsDict(self):
'''A dict representation of this twitter.Status instance.
The return value uses the same key names as the JSON representation.
Return:
A dict representing this twitter.Status instance
'''
data = {}
if self.created_at:
data['created_at'] = self.created_at
if self.favorited:
data['favorited'] = self.favorited
if self.id:
data['id'] = self.id
if self.text:
data['text'] = self.text
if self.location:
data['location'] = self.location
if self.user:
data['user'] = self.user.AsDict()
if self.in_reply_to_screen_name:
data['in_reply_to_screen_name'] = self.in_reply_to_screen_name
if self.in_reply_to_user_id:
data['in_reply_to_user_id'] = self.in_reply_to_user_id
if self.in_reply_to_status_id:
data['in_reply_to_status_id'] = self.in_reply_to_status_id
if self.truncated is not None:
data['truncated'] = self.truncated
if self.retweeted is not None:
data['retweeted'] = self.retweeted
if self.favorited is not None:
data['favorited'] = self.favorited
if self.source:
data['source'] = self.source
if self.geo:
data['geo'] = self.geo
if self.place:
data['place'] = self.place
if self.coordinates:
data['coordinates'] = self.coordinates
if self.contributors:
data['contributors'] = self.contributors
if self.hashtags:
data['hashtags'] = [h.text for h in self.hashtags]
if self.retweeted_status:
data['retweeted_status'] = self.retweeted_status.AsDict()
if self.retweet_count:
data['retweet_count'] = self.retweet_count
if self.urls:
data['urls'] = dict([(url.url, url.expanded_url) for url in self.urls])
if self.user_mentions:
data['user_mentions'] = [um.AsDict() for um in self.user_mentions]
return data
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data: A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.Status instance
'''
if 'user' in data:
user = User.NewFromJsonDict(data['user'])
else:
user = None
if 'retweeted_status' in data:
retweeted_status = Status.NewFromJsonDict(data['retweeted_status'])
else:
retweeted_status = None
urls = None
user_mentions = None
hashtags = None
if 'entities' in data:
if 'urls' in data['entities']:
urls = [Url.NewFromJsonDict(u) for u in data['entities']['urls']]
if 'user_mentions' in data['entities']:
user_mentions = [User.NewFromJsonDict(u) for u in data['entities']['user_mentions']]
if 'hashtags' in data['entities']:
hashtags = [Hashtag.NewFromJsonDict(h) for h in data['entities']['hashtags']]
return Status(created_at=data.get('created_at', None),
favorited=data.get('favorited', None),
id=data.get('id', None),
text=data.get('text', None),
location=data.get('location', None),
in_reply_to_screen_name=data.get('in_reply_to_screen_name', None),
in_reply_to_user_id=data.get('in_reply_to_user_id', None),
in_reply_to_status_id=data.get('in_reply_to_status_id', None),
truncated=data.get('truncated', None),
retweeted=data.get('retweeted', None),
source=data.get('source', None),
user=user,
urls=urls,
user_mentions=user_mentions,
hashtags=hashtags,
geo=data.get('geo', None),
place=data.get('place', None),
coordinates=data.get('coordinates', None),
contributors=data.get('contributors', None),
retweeted_status=retweeted_status,
retweet_count=data.get('retweet_count', None))
class User(object):
'''A class representing the User structure used by the twitter API.
The User structure exposes the following properties:
user.id
user.name
user.screen_name
user.location
user.description
user.profile_image_url
user.profile_background_tile
user.profile_background_image_url
user.profile_sidebar_fill_color
user.profile_background_color
user.profile_link_color
user.profile_text_color
user.protected
user.utc_offset
user.time_zone
user.url
user.status
user.statuses_count
user.followers_count
user.friends_count
user.favourites_count
user.geo_enabled
user.verified
user.lang
user.notifications
user.contributors_enabled
user.created_at
user.listed_count
'''
def __init__(self,
id=None,
name=None,
screen_name=None,
location=None,
description=None,
profile_image_url=None,
profile_background_tile=None,
profile_background_image_url=None,
profile_sidebar_fill_color=None,
profile_background_color=None,
profile_link_color=None,
profile_text_color=None,
protected=None,
utc_offset=None,
time_zone=None,
followers_count=None,
friends_count=None,
statuses_count=None,
favourites_count=None,
url=None,
status=None,
geo_enabled=None,
verified=None,
lang=None,
notifications=None,
contributors_enabled=None,
created_at=None,
listed_count=None):
self.id = id
self.name = name
self.screen_name = screen_name
self.location = location
self.description = description
self.profile_image_url = profile_image_url
self.profile_background_tile = profile_background_tile
self.profile_background_image_url = profile_background_image_url
self.profile_sidebar_fill_color = profile_sidebar_fill_color
self.profile_background_color = profile_background_color
self.profile_link_color = profile_link_color
self.profile_text_color = profile_text_color
self.protected = protected
self.utc_offset = utc_offset
self.time_zone = time_zone
self.followers_count = followers_count
self.friends_count = friends_count
self.statuses_count = statuses_count
self.favourites_count = favourites_count
self.url = url
self.status = status
self.geo_enabled = geo_enabled
self.verified = verified
self.lang = lang
self.notifications = notifications
self.contributors_enabled = contributors_enabled
self.created_at = created_at
self.listed_count = listed_count
def GetId(self):
'''Get the unique id of this user.
Returns:
The unique id of this user
'''
return self._id
def SetId(self, id):
'''Set the unique id of this user.
Args:
id: The unique id of this user.
'''
self._id = id
id = property(GetId, SetId,
doc='The unique id of this user.')
def GetName(self):
'''Get the real name of this user.
Returns:
The real name of this user
'''
return self._name
def SetName(self, name):
'''Set the real name of this user.
Args:
name: The real name of this user
'''
self._name = name
name = property(GetName, SetName,
doc='The real name of this user.')
def GetScreenName(self):
'''Get the short twitter name of this user.
Returns:
The short twitter name of this user
'''
return self._screen_name
def SetScreenName(self, screen_name):
'''Set the short twitter name of this user.
Args:
screen_name: the short twitter name of this user
'''
self._screen_name = screen_name
screen_name = property(GetScreenName, SetScreenName,
doc='The short twitter name of this user.')
def GetLocation(self):
'''Get the geographic location of this user.
Returns:
The geographic location of this user
'''
return self._location
def SetLocation(self, location):
'''Set the geographic location of this user.
Args:
location: The geographic location of this user
'''
self._location = location
location = property(GetLocation, SetLocation,
doc='The geographic location of this user.')
def GetDescription(self):
'''Get the short text description of this user.
Returns:
The short text description of this user
'''
return self._description
def SetDescription(self, description):
'''Set the short text description of this user.
Args:
description: The short text description of this user
'''
self._description = description
description = property(GetDescription, SetDescription,
doc='The short text description of this user.')
def GetUrl(self):
'''Get the homepage url of this user.
Returns:
The homepage url of this user
'''
return self._url
def SetUrl(self, url):
'''Set the homepage url of this user.
Args:
url: The homepage url of this user
'''
self._url = url
url = property(GetUrl, SetUrl,
doc='The homepage url of this user.')
def GetProfileImageUrl(self):
'''Get the url of the thumbnail of this user.
Returns:
The url of the thumbnail of this user
'''
return self._profile_image_url
def SetProfileImageUrl(self, profile_image_url):
'''Set the url of the thumbnail of this user.
Args:
profile_image_url: The url of the thumbnail of this user
'''
self._profile_image_url = profile_image_url
profile_image_url= property(GetProfileImageUrl, SetProfileImageUrl,
doc='The url of the thumbnail of this user.')
def GetProfileBackgroundTile(self):
'''Boolean for whether to tile the profile background image.
Returns:
True if the background is to be tiled, False if not, None if unset.
'''
return self._profile_background_tile
def SetProfileBackgroundTile(self, profile_background_tile):
'''Set the boolean flag for whether to tile the profile background image.
Args:
profile_background_tile: Boolean flag for whether to tile or not.
'''
self._profile_background_tile = profile_background_tile
profile_background_tile = property(GetProfileBackgroundTile, SetProfileBackgroundTile,
doc='Boolean for whether to tile the background image.')
def GetProfileBackgroundImageUrl(self):
return self._profile_background_image_url
def SetProfileBackgroundImageUrl(self, profile_background_image_url):
self._profile_background_image_url = profile_background_image_url
profile_background_image_url = property(GetProfileBackgroundImageUrl, SetProfileBackgroundImageUrl,
doc='The url of the profile background of this user.')
def GetProfileSidebarFillColor(self):
return self._profile_sidebar_fill_color
def SetProfileSidebarFillColor(self, profile_sidebar_fill_color):
self._profile_sidebar_fill_color = profile_sidebar_fill_color
profile_sidebar_fill_color = property(GetProfileSidebarFillColor, SetProfileSidebarFillColor)
def GetProfileBackgroundColor(self):
return self._profile_background_color
def SetProfileBackgroundColor(self, profile_background_color):
self._profile_background_color = profile_background_color
profile_background_color = property(GetProfileBackgroundColor, SetProfileBackgroundColor)
def GetProfileLinkColor(self):
return self._profile_link_color
def SetProfileLinkColor(self, profile_link_color):
self._profile_link_color = profile_link_color
profile_link_color = property(GetProfileLinkColor, SetProfileLinkColor)
def GetProfileTextColor(self):
return self._profile_text_color
def SetProfileTextColor(self, profile_text_color):
self._profile_text_color = profile_text_color
profile_text_color = property(GetProfileTextColor, SetProfileTextColor)
def GetProtected(self):
return self._protected
def SetProtected(self, protected):
self._protected = protected
protected = property(GetProtected, SetProtected)
def GetUtcOffset(self):
return self._utc_offset
def SetUtcOffset(self, utc_offset):
self._utc_offset = utc_offset
utc_offset = property(GetUtcOffset, SetUtcOffset)
def GetTimeZone(self):
'''Returns the current time zone string for the user.
Returns:
The descriptive time zone string for the user.
'''
return self._time_zone
def SetTimeZone(self, time_zone):
'''Sets the user's time zone string.
Args:
time_zone:
The descriptive time zone to assign for the user.
'''
self._time_zone = time_zone
time_zone = property(GetTimeZone, SetTimeZone)
def GetStatus(self):
'''Get the latest twitter.Status of this user.
Returns:
The latest twitter.Status of this user
'''
return self._status
def SetStatus(self, status):
'''Set the latest twitter.Status of this user.
Args:
status:
The latest twitter.Status of this user
'''
self._status = status
status = property(GetStatus, SetStatus,
doc='The latest twitter.Status of this user.')
def GetFriendsCount(self):
'''Get the friend count for this user.
Returns:
The number of users this user has befriended.
'''
return self._friends_count
def SetFriendsCount(self, count):
'''Set the friend count for this user.
Args:
count:
The number of users this user has befriended.
'''
self._friends_count = count
friends_count = property(GetFriendsCount, SetFriendsCount,
doc='The number of friends for this user.')
def GetListedCount(self):
'''Get the listed count for this user.
Returns:
The number of lists this user belongs to.
'''
return self._listed_count
def SetListedCount(self, count):
'''Set the listed count for this user.
Args:
count:
The number of lists this user belongs to.
'''
self._listed_count = count
listed_count = property(GetListedCount, SetListedCount,
doc='The number of lists this user belongs to.')
def GetFollowersCount(self):
'''Get the follower count for this user.
Returns:
The number of users following this user.
'''
return self._followers_count
def SetFollowersCount(self, count):
'''Set the follower count for this user.
Args:
count:
The number of users following this user.
'''
self._followers_count = count
followers_count = property(GetFollowersCount, SetFollowersCount,
doc='The number of users following this user.')
def GetStatusesCount(self):
'''Get the number of status updates for this user.
Returns:
The number of status updates for this user.
'''
return self._statuses_count
def SetStatusesCount(self, count):
'''Set the status update count for this user.
Args:
count:
The number of updates for this user.
'''
self._statuses_count = count
statuses_count = property(GetStatusesCount, SetStatusesCount,
doc='The number of updates for this user.')
def GetFavouritesCount(self):
'''Get the number of favourites for this user.
Returns:
The number of favourites for this user.
'''
return self._favourites_count
def SetFavouritesCount(self, count):
'''Set the favourite count for this user.
Args:
count:
The number of favourites for this user.
'''
self._favourites_count = count
favourites_count = property(GetFavouritesCount, SetFavouritesCount,
doc='The number of favourites for this user.')
def GetGeoEnabled(self):
'''Get the setting of geo_enabled for this user.
Returns:
True/False if Geo tagging is enabled
'''
return self._geo_enabled
def SetGeoEnabled(self, geo_enabled):
'''Set the latest twitter.geo_enabled of this user.
Args:
geo_enabled:
True/False if Geo tagging is to be enabled
'''
self._geo_enabled = geo_enabled
geo_enabled = property(GetGeoEnabled, SetGeoEnabled,
doc='The value of twitter.geo_enabled for this user.')
def GetVerified(self):
'''Get the setting of verified for this user.
Returns:
True/False if user is a verified account
'''
return self._verified
def SetVerified(self, verified):
'''Set twitter.verified for this user.
Args:
verified:
True/False if user is a verified account
'''
self._verified = verified
verified = property(GetVerified, SetVerified,
doc='The value of twitter.verified for this user.')
def GetLang(self):
'''Get the setting of lang for this user.
Returns:
language code of the user
'''
return self._lang
def SetLang(self, lang):
'''Set twitter.lang for this user.
Args:
lang:
language code for the user
'''
self._lang = lang
lang = property(GetLang, SetLang,
doc='The value of twitter.lang for this user.')
def GetNotifications(self):
'''Get the setting of notifications for this user.
Returns:
True/False for the notifications setting of the user
'''
return self._notifications
def SetNotifications(self, notifications):
'''Set twitter.notifications for this user.
Args:
notifications:
True/False notifications setting for the user
'''
self._notifications = notifications
notifications = property(GetNotifications, SetNotifications,
doc='The value of twitter.notifications for this user.')
def GetContributorsEnabled(self):
'''Get the setting of contributors_enabled for this user.
Returns:
True/False contributors_enabled of the user
'''
return self._contributors_enabled
def SetContributorsEnabled(self, contributors_enabled):
'''Set twitter.contributors_enabled for this user.
Args:
contributors_enabled:
True/False contributors_enabled setting for the user
'''
self._contributors_enabled = contributors_enabled
contributors_enabled = property(GetContributorsEnabled, SetContributorsEnabled,
doc='The value of twitter.contributors_enabled for this user.')
def GetCreatedAt(self):
'''Get the setting of created_at for this user.
Returns:
created_at value of the user
'''
return self._created_at
def SetCreatedAt(self, created_at):
'''Set twitter.created_at for this user.
Args:
created_at:
created_at value for the user
'''
self._created_at = created_at
created_at = property(GetCreatedAt, SetCreatedAt,
doc='The value of twitter.created_at for this user.')
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.id == other.id and \
self.name == other.name and \
self.screen_name == other.screen_name and \
self.location == other.location and \
self.description == other.description and \
self.profile_image_url == other.profile_image_url and \
self.profile_background_tile == other.profile_background_tile and \
self.profile_background_image_url == other.profile_background_image_url and \
self.profile_sidebar_fill_color == other.profile_sidebar_fill_color and \
self.profile_background_color == other.profile_background_color and \
self.profile_link_color == other.profile_link_color and \
self.profile_text_color == other.profile_text_color and \
self.protected == other.protected and \
self.utc_offset == other.utc_offset and \
self.time_zone == other.time_zone and \
self.url == other.url and \
self.statuses_count == other.statuses_count and \
self.followers_count == other.followers_count and \
self.favourites_count == other.favourites_count and \
self.friends_count == other.friends_count and \
self.status == other.status and \
self.geo_enabled == other.geo_enabled and \
self.verified == other.verified and \
self.lang == other.lang and \
self.notifications == other.notifications and \
self.contributors_enabled == other.contributors_enabled and \
self.created_at == other.created_at and \
self.listed_count == other.listed_count
except AttributeError:
return False
def __str__(self):
'''A string representation of this twitter.User instance.
The return value is the same as the JSON string representation.
Returns:
A string representation of this twitter.User instance.
'''
return self.AsJsonString()
def AsJsonString(self):
'''A JSON string representation of this twitter.User instance.
Returns:
A JSON string representation of this twitter.User instance
'''
return simplejson.dumps(self.AsDict(), sort_keys=True)
def AsDict(self):
'''A dict representation of this twitter.User instance.
The return value uses the same key names as the JSON representation.
Return:
A dict representing this twitter.User instance
'''
data = {}
if self.id:
data['id'] = self.id
if self.name:
data['name'] = self.name
if self.screen_name:
data['screen_name'] = self.screen_name
if self.location:
data['location'] = self.location
if self.description:
data['description'] = self.description
if self.profile_image_url:
data['profile_image_url'] = self.profile_image_url
if self.profile_background_tile is not None:
data['profile_background_tile'] = self.profile_background_tile
if self.profile_background_image_url:
data['profile_sidebar_fill_color'] = self.profile_background_image_url
if self.profile_background_color:
data['profile_background_color'] = self.profile_background_color
if self.profile_link_color:
data['profile_link_color'] = self.profile_link_color
if self.profile_text_color:
data['profile_text_color'] = self.profile_text_color
if self.protected is not None:
data['protected'] = self.protected
if self.utc_offset:
data['utc_offset'] = self.utc_offset
if self.time_zone:
data['time_zone'] = self.time_zone
if self.url:
data['url'] = self.url
if self.status:
data['status'] = self.status.AsDict()
if self.friends_count:
data['friends_count'] = self.friends_count
if self.followers_count:
data['followers_count'] = self.followers_count
if self.statuses_count:
data['statuses_count'] = self.statuses_count
if self.favourites_count:
data['favourites_count'] = self.favourites_count
if self.geo_enabled:
data['geo_enabled'] = self.geo_enabled
if self.verified:
data['verified'] = self.verified
if self.lang:
data['lang'] = self.lang
if self.notifications:
data['notifications'] = self.notifications
if self.contributors_enabled:
data['contributors_enabled'] = self.contributors_enabled
if self.created_at:
data['created_at'] = self.created_at
if self.listed_count:
data['listed_count'] = self.listed_count
return data
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data:
A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.User instance
'''
if 'status' in data:
status = Status.NewFromJsonDict(data['status'])
else:
status = None
return User(id=data.get('id', None),
name=data.get('name', None),
screen_name=data.get('screen_name', None),
location=data.get('location', None),
description=data.get('description', None),
statuses_count=data.get('statuses_count', None),
followers_count=data.get('followers_count', None),
favourites_count=data.get('favourites_count', None),
friends_count=data.get('friends_count', None),
profile_image_url=data.get('profile_image_url', None),
profile_background_tile = data.get('profile_background_tile', None),
profile_background_image_url = data.get('profile_background_image_url', None),
profile_sidebar_fill_color = data.get('profile_sidebar_fill_color', None),
profile_background_color = data.get('profile_background_color', None),
profile_link_color = data.get('profile_link_color', None),
profile_text_color = data.get('profile_text_color', None),
protected = data.get('protected', None),
utc_offset = data.get('utc_offset', None),
time_zone = data.get('time_zone', None),
url=data.get('url', None),
status=status,
geo_enabled=data.get('geo_enabled', None),
verified=data.get('verified', None),
lang=data.get('lang', None),
notifications=data.get('notifications', None),
contributors_enabled=data.get('contributors_enabled', None),
created_at=data.get('created_at', None),
listed_count=data.get('listed_count', None))
class List(object):
'''A class representing the List structure used by the twitter API.
The List structure exposes the following properties:
list.id
list.name
list.slug
list.description
list.full_name
list.mode
list.uri
list.member_count
list.subscriber_count
list.following
'''
def __init__(self,
id=None,
name=None,
slug=None,
description=None,
full_name=None,
mode=None,
uri=None,
member_count=None,
subscriber_count=None,
following=None,
user=None):
self.id = id
self.name = name
self.slug = slug
self.description = description
self.full_name = full_name
self.mode = mode
self.uri = uri
self.member_count = member_count
self.subscriber_count = subscriber_count
self.following = following
self.user = user
def GetId(self):
'''Get the unique id of this list.
Returns:
The unique id of this list
'''
return self._id
def SetId(self, id):
'''Set the unique id of this list.
Args:
id:
The unique id of this list.
'''
self._id = id
id = property(GetId, SetId,
doc='The unique id of this list.')
def GetName(self):
'''Get the real name of this list.
Returns:
The real name of this list
'''
return self._name
def SetName(self, name):
'''Set the real name of this list.
Args:
name:
The real name of this list
'''
self._name = name
name = property(GetName, SetName,
doc='The real name of this list.')
def GetSlug(self):
'''Get the slug of this list.
Returns:
The slug of this list
'''
return self._slug
def SetSlug(self, slug):
'''Set the slug of this list.
Args:
slug:
The slug of this list.
'''
self._slug = slug
slug = property(GetSlug, SetSlug,
doc='The slug of this list.')
def GetDescription(self):
'''Get the description of this list.
Returns:
The description of this list
'''
return self._description
def SetDescription(self, description):
'''Set the description of this list.
Args:
description:
The description of this list.
'''
self._description = description
description = property(GetDescription, SetDescription,
doc='The description of this list.')
def GetFull_name(self):
'''Get the full_name of this list.
Returns:
The full_name of this list
'''
return self._full_name
def SetFull_name(self, full_name):
'''Set the full_name of this list.
Args:
full_name:
The full_name of this list.
'''
self._full_name = full_name
full_name = property(GetFull_name, SetFull_name,
doc='The full_name of this list.')
def GetMode(self):
'''Get the mode of this list.
Returns:
The mode of this list
'''
return self._mode
def SetMode(self, mode):
'''Set the mode of this list.
Args:
mode:
The mode of this list.
'''
self._mode = mode
mode = property(GetMode, SetMode,
doc='The mode of this list.')
def GetUri(self):
'''Get the uri of this list.
Returns:
The uri of this list
'''
return self._uri
def SetUri(self, uri):
'''Set the uri of this list.
Args:
uri:
The uri of this list.
'''
self._uri = uri
uri = property(GetUri, SetUri,
doc='The uri of this list.')
def GetMember_count(self):
'''Get the member_count of this list.
Returns:
The member_count of this list
'''
return self._member_count
def SetMember_count(self, member_count):
'''Set the member_count of this list.
Args:
member_count:
The member_count of this list.
'''
self._member_count = member_count
member_count = property(GetMember_count, SetMember_count,
doc='The member_count of this list.')
def GetSubscriber_count(self):
'''Get the subscriber_count of this list.
Returns:
The subscriber_count of this list
'''
return self._subscriber_count
def SetSubscriber_count(self, subscriber_count):
'''Set the subscriber_count of this list.
Args:
subscriber_count:
The subscriber_count of this list.
'''
self._subscriber_count = subscriber_count
subscriber_count = property(GetSubscriber_count, SetSubscriber_count,
doc='The subscriber_count of this list.')
def GetFollowing(self):
'''Get the following status of this list.
Returns:
The following status of this list
'''
return self._following
def SetFollowing(self, following):
'''Set the following status of this list.
Args:
following:
The following of this list.
'''
self._following = following
following = property(GetFollowing, SetFollowing,
doc='The following status of this list.')
def GetUser(self):
'''Get the user of this list.
Returns:
The owner of this list
'''
return self._user
def SetUser(self, user):
'''Set the user of this list.
Args:
user:
The owner of this list.
'''
self._user = user
user = property(GetUser, SetUser,
doc='The owner of this list.')
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.id == other.id and \
self.name == other.name and \
self.slug == other.slug and \
self.description == other.description and \
self.full_name == other.full_name and \
self.mode == other.mode and \
self.uri == other.uri and \
self.member_count == other.member_count and \
self.subscriber_count == other.subscriber_count and \
self.following == other.following and \
self.user == other.user
except AttributeError:
return False
def __str__(self):
'''A string representation of this twitter.List instance.
The return value is the same as the JSON string representation.
Returns:
A string representation of this twitter.List instance.
'''
return self.AsJsonString()
def AsJsonString(self):
'''A JSON string representation of this twitter.List instance.
Returns:
A JSON string representation of this twitter.List instance
'''
return simplejson.dumps(self.AsDict(), sort_keys=True)
def AsDict(self):
'''A dict representation of this twitter.List instance.
The return value uses the same key names as the JSON representation.
Return:
A dict representing this twitter.List instance
'''
data = {}
if self.id:
data['id'] = self.id
if self.name:
data['name'] = self.name
if self.slug:
data['slug'] = self.slug
if self.description:
data['description'] = self.description
if self.full_name:
data['full_name'] = self.full_name
if self.mode:
data['mode'] = self.mode
if self.uri:
data['uri'] = self.uri
if self.member_count is not None:
data['member_count'] = self.member_count
if self.subscriber_count is not None:
data['subscriber_count'] = self.subscriber_count
if self.following is not None:
data['following'] = self.following
if self.user is not None:
data['user'] = self.user
return data
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data:
A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.List instance
'''
if 'user' in data:
user = User.NewFromJsonDict(data['user'])
else:
user = None
return List(id=data.get('id', None),
name=data.get('name', None),
slug=data.get('slug', None),
description=data.get('description', None),
full_name=data.get('full_name', None),
mode=data.get('mode', None),
uri=data.get('uri', None),
member_count=data.get('member_count', None),
subscriber_count=data.get('subscriber_count', None),
following=data.get('following', None),
user=user)
class DirectMessage(object):
'''A class representing the DirectMessage structure used by the twitter API.
The DirectMessage structure exposes the following properties:
direct_message.id
direct_message.created_at
direct_message.created_at_in_seconds # read only
direct_message.sender_id
direct_message.sender_screen_name
direct_message.recipient_id
direct_message.recipient_screen_name
direct_message.text
'''
def __init__(self,
id=None,
created_at=None,
sender_id=None,
sender_screen_name=None,
recipient_id=None,
recipient_screen_name=None,
text=None):
'''An object to hold a Twitter direct message.
This class is normally instantiated by the twitter.Api class and
returned in a sequence.
Note: Dates are posted in the form "Sat Jan 27 04:17:38 +0000 2007"
Args:
id:
The unique id of this direct message. [Optional]
created_at:
The time this direct message was posted. [Optional]
sender_id:
The id of the twitter user that sent this message. [Optional]
sender_screen_name:
The name of the twitter user that sent this message. [Optional]
recipient_id:
The id of the twitter that received this message. [Optional]
recipient_screen_name:
The name of the twitter that received this message. [Optional]
text:
The text of this direct message. [Optional]
'''
self.id = id
self.created_at = created_at
self.sender_id = sender_id
self.sender_screen_name = sender_screen_name
self.recipient_id = recipient_id
self.recipient_screen_name = recipient_screen_name
self.text = text
def GetId(self):
'''Get the unique id of this direct message.
Returns:
The unique id of this direct message
'''
return self._id
def SetId(self, id):
'''Set the unique id of this direct message.
Args:
id:
The unique id of this direct message
'''
self._id = id
id = property(GetId, SetId,
doc='The unique id of this direct message.')
def GetCreatedAt(self):
'''Get the time this direct message was posted.
Returns:
The time this direct message was posted
'''
return self._created_at
def SetCreatedAt(self, created_at):
'''Set the time this direct message was posted.
Args:
created_at:
The time this direct message was created
'''
self._created_at = created_at
created_at = property(GetCreatedAt, SetCreatedAt,
doc='The time this direct message was posted.')
def GetCreatedAtInSeconds(self):
'''Get the time this direct message was posted, in seconds since the epoch.
Returns:
The time this direct message was posted, in seconds since the epoch.
'''
return calendar.timegm(rfc822.parsedate(self.created_at))
created_at_in_seconds = property(GetCreatedAtInSeconds,
doc="The time this direct message was "
"posted, in seconds since the epoch")
def GetSenderId(self):
'''Get the unique sender id of this direct message.
Returns:
The unique sender id of this direct message
'''
return self._sender_id
def SetSenderId(self, sender_id):
'''Set the unique sender id of this direct message.
Args:
sender_id:
The unique sender id of this direct message
'''
self._sender_id = sender_id
sender_id = property(GetSenderId, SetSenderId,
doc='The unique sender id of this direct message.')
def GetSenderScreenName(self):
'''Get the unique sender screen name of this direct message.
Returns:
The unique sender screen name of this direct message
'''
return self._sender_screen_name
def SetSenderScreenName(self, sender_screen_name):
'''Set the unique sender screen name of this direct message.
Args:
sender_screen_name:
The unique sender screen name of this direct message
'''
self._sender_screen_name = sender_screen_name
sender_screen_name = property(GetSenderScreenName, SetSenderScreenName,
doc='The unique sender screen name of this direct message.')
def GetRecipientId(self):
'''Get the unique recipient id of this direct message.
Returns:
The unique recipient id of this direct message
'''
return self._recipient_id
def SetRecipientId(self, recipient_id):
'''Set the unique recipient id of this direct message.
Args:
recipient_id:
The unique recipient id of this direct message
'''
self._recipient_id = recipient_id
recipient_id = property(GetRecipientId, SetRecipientId,
doc='The unique recipient id of this direct message.')
def GetRecipientScreenName(self):
'''Get the unique recipient screen name of this direct message.
Returns:
The unique recipient screen name of this direct message
'''
return self._recipient_screen_name
def SetRecipientScreenName(self, recipient_screen_name):
'''Set the unique recipient screen name of this direct message.
Args:
recipient_screen_name:
The unique recipient screen name of this direct message
'''
self._recipient_screen_name = recipient_screen_name
recipient_screen_name = property(GetRecipientScreenName, SetRecipientScreenName,
doc='The unique recipient screen name of this direct message.')
def GetText(self):
'''Get the text of this direct message.
Returns:
The text of this direct message.
'''
return self._text
def SetText(self, text):
'''Set the text of this direct message.
Args:
text:
The text of this direct message
'''
self._text = text
text = property(GetText, SetText,
doc='The text of this direct message')
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.id == other.id and \
self.created_at == other.created_at and \
self.sender_id == other.sender_id and \
self.sender_screen_name == other.sender_screen_name and \
self.recipient_id == other.recipient_id and \
self.recipient_screen_name == other.recipient_screen_name and \
self.text == other.text
except AttributeError:
return False
def __str__(self):
'''A string representation of this twitter.DirectMessage instance.
The return value is the same as the JSON string representation.
Returns:
A string representation of this twitter.DirectMessage instance.
'''
return self.AsJsonString()
def AsJsonString(self):
'''A JSON string representation of this twitter.DirectMessage instance.
Returns:
A JSON string representation of this twitter.DirectMessage instance
'''
return simplejson.dumps(self.AsDict(), sort_keys=True)
def AsDict(self):
'''A dict representation of this twitter.DirectMessage instance.
The return value uses the same key names as the JSON representation.
Return:
A dict representing this twitter.DirectMessage instance
'''
data = {}
if self.id:
data['id'] = self.id
if self.created_at:
data['created_at'] = self.created_at
if self.sender_id:
data['sender_id'] = self.sender_id
if self.sender_screen_name:
data['sender_screen_name'] = self.sender_screen_name
if self.recipient_id:
data['recipient_id'] = self.recipient_id
if self.recipient_screen_name:
data['recipient_screen_name'] = self.recipient_screen_name
if self.text:
data['text'] = self.text
return data
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data:
A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.DirectMessage instance
'''
return DirectMessage(created_at=data.get('created_at', None),
recipient_id=data.get('recipient_id', None),
sender_id=data.get('sender_id', None),
text=data.get('text', None),
sender_screen_name=data.get('sender_screen_name', None),
id=data.get('id', None),
recipient_screen_name=data.get('recipient_screen_name', None))
class Hashtag(object):
''' A class represeinting a twitter hashtag
'''
def __init__(self,
text=None):
self.text = text
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data:
A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.Hashtag instance
'''
return Hashtag(text = data.get('text', None))
class Trend(object):
''' A class representing a trending topic
'''
def __init__(self, name=None, query=None, timestamp=None):
self.name = name
self.query = query
self.timestamp = timestamp
def __str__(self):
return 'Name: %s\nQuery: %s\nTimestamp: %s\n' % (self.name, self.query, self.timestamp)
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.name == other.name and \
self.query == other.query and \
self.timestamp == other.timestamp
except AttributeError:
return False
@staticmethod
def NewFromJsonDict(data, timestamp = None):
'''Create a new instance based on a JSON dict
Args:
data:
A JSON dict
timestamp:
Gets set as the timestamp property of the new object
Returns:
A twitter.Trend object
'''
return Trend(name=data.get('name', None),
query=data.get('query', None),
timestamp=timestamp)
class Url(object):
'''A class representing an URL contained in a tweet'''
def __init__(self,
url=None,
expanded_url=None):
self.url = url
self.expanded_url = expanded_url
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data:
A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.Url instance
'''
return Url(url=data.get('url', None),
expanded_url=data.get('expanded_url', None))
class Api(object):
'''A python interface into the Twitter API
By default, the Api caches results for 1 minute.
Example usage:
To create an instance of the twitter.Api class, with no authentication:
>>> import twitter
>>> api = twitter.Api()
To fetch the most recently posted public twitter status messages:
>>> statuses = api.GetPublicTimeline()
>>> print [s.user.name for s in statuses]
[u'DeWitt', u'Kesuke Miyagi', u'ev', u'Buzz Andersen', u'Biz Stone'] #...
To fetch a single user's public status messages, where "user" is either
a Twitter "short name" or their user id.
>>> statuses = api.GetUserTimeline(user)
>>> print [s.text for s in statuses]
To use authentication, instantiate the twitter.Api class with a
consumer key and secret; and the oAuth key and secret:
>>> api = twitter.Api(consumer_key='twitter consumer key',
consumer_secret='twitter consumer secret',
access_token_key='the_key_given',
access_token_secret='the_key_secret')
To fetch your friends (after being authenticated):
>>> users = api.GetFriends()
>>> print [u.name for u in users]
To post a twitter status message (after being authenticated):
>>> status = api.PostUpdate('I love python-twitter!')
>>> print status.text
I love python-twitter!
There are many other methods, including:
>>> api.PostUpdates(status)
>>> api.PostDirectMessage(user, text)
>>> api.GetUser(user)
>>> api.GetReplies()
>>> api.GetUserTimeline(user)
>>> api.GetStatus(id)
>>> api.DestroyStatus(id)
>>> api.GetFriendsTimeline(user)
>>> api.GetFriends(user)
>>> api.GetFollowers()
>>> api.GetFeatured()
>>> api.GetDirectMessages()
>>> api.PostDirectMessage(user, text)
>>> api.DestroyDirectMessage(id)
>>> api.DestroyFriendship(user)
>>> api.CreateFriendship(user)
>>> api.GetUserByEmail(email)
>>> api.VerifyCredentials()
'''
DEFAULT_CACHE_TIMEOUT = 60 # cache for 1 minute
_API_REALM = 'Twitter API'
def __init__(self,
consumer_key=None,
consumer_secret=None,
access_token_key=None,
access_token_secret=None,
input_encoding=None,
request_headers=None,
cache=DEFAULT_CACHE,
shortner=None,
base_url=None,
use_gzip_compression=False,
debugHTTP=False):
'''Instantiate a new twitter.Api object.
Args:
consumer_key:
Your Twitter user's consumer_key.
consumer_secret:
Your Twitter user's consumer_secret.
access_token_key:
The oAuth access token key value you retrieved
from running get_access_token.py.
access_token_secret:
The oAuth access token's secret, also retrieved
from the get_access_token.py run.
input_encoding:
The encoding used to encode input strings. [Optional]
request_header:
A dictionary of additional HTTP request headers. [Optional]
cache:
The cache instance to use. Defaults to DEFAULT_CACHE.
Use None to disable caching. [Optional]
shortner:
The shortner instance to use. Defaults to None.
See shorten_url.py for an example shortner. [Optional]
base_url:
The base URL to use to contact the Twitter API.
Defaults to https://api.twitter.com. [Optional]
use_gzip_compression:
Set to True to tell enable gzip compression for any call
made to Twitter. Defaults to False. [Optional]
debugHTTP:
Set to True to enable debug output from urllib2 when performing
any HTTP requests. Defaults to False. [Optional]
'''
self.SetCache(cache)
self._urllib = urllib2
self._cache_timeout = Api.DEFAULT_CACHE_TIMEOUT
self._input_encoding = input_encoding
self._use_gzip = use_gzip_compression
self._debugHTTP = debugHTTP
self._oauth_consumer = None
self._shortlink_size = 19
self._InitializeRequestHeaders(request_headers)
self._InitializeUserAgent()
self._InitializeDefaultParameters()
if base_url is None:
self.base_url = 'https://api.twitter.com/1'
else:
self.base_url = base_url
if consumer_key is not None and (access_token_key is None or
access_token_secret is None):
print >> sys.stderr, 'Twitter now requires an oAuth Access Token for API calls.'
print >> sys.stderr, 'If your using this library from a command line utility, please'
print >> sys.stderr, 'run the the included get_access_token.py tool to generate one.'
raise TwitterError('Twitter requires oAuth Access Token for all API access')
self.SetCredentials(consumer_key, consumer_secret, access_token_key, access_token_secret)
def SetCredentials(self,
consumer_key,
consumer_secret,
access_token_key=None,
access_token_secret=None):
'''Set the consumer_key and consumer_secret for this instance
Args:
consumer_key:
The consumer_key of the twitter account.
consumer_secret:
The consumer_secret for the twitter account.
access_token_key:
The oAuth access token key value you retrieved
from running get_access_token.py.
access_token_secret:
The oAuth access token's secret, also retrieved
from the get_access_token.py run.
'''
self._consumer_key = consumer_key
self._consumer_secret = consumer_secret
self._access_token_key = access_token_key
self._access_token_secret = access_token_secret
self._oauth_consumer = None
if consumer_key is not None and consumer_secret is not None and \
access_token_key is not None and access_token_secret is not None:
self._signature_method_plaintext = oauth.SignatureMethod_PLAINTEXT()
self._signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1()
self._oauth_token = oauth.Token(key=access_token_key, secret=access_token_secret)
self._oauth_consumer = oauth.Consumer(key=consumer_key, secret=consumer_secret)
def ClearCredentials(self):
'''Clear the any credentials for this instance
'''
self._consumer_key = None
self._consumer_secret = None
self._access_token_key = None
self._access_token_secret = None
self._oauth_consumer = None
def GetPublicTimeline(self,
since_id=None,
include_rts=None,
include_entities=None):
'''Fetch the sequence of public twitter.Status message for all users.
Args:
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occured since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
include_rts:
If True, the timeline will contain native retweets (if they
exist) in addition to the standard stream of tweets. [Optional]
include_entities:
If True, each tweet will include a node called "entities,".
This node offers a variety of metadata about the tweet in a
discreet structure, including: user_mentions, urls, and
hashtags. [Optional]
Returns:
An sequence of twitter.Status instances, one for each message
'''
parameters = {}
if since_id:
parameters['since_id'] = since_id
if include_rts:
parameters['include_rts'] = 1
if include_entities:
parameters['include_entities'] = 1
url = '%s/statuses/public_timeline.json' % self.base_url
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(x) for x in data]
def FilterPublicTimeline(self,
term,
since_id=None):
'''Filter the public twitter timeline by a given search term on
the local machine.
Args:
term:
term to search by.
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occured since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
Returns:
A sequence of twitter.Status instances, one for each message
containing the term
'''
statuses = self.GetPublicTimeline(since_id)
results = []
for s in statuses:
if s.text.lower().find(term.lower()) != -1:
results.append(s)
return results
def GetSearch(self,
term=None,
geocode=None,
since_id=None,
per_page=15,
page=1,
lang="en",
show_user="true",
query_users=False):
'''Return twitter search results for a given term.
Args:
term:
term to search by. Optional if you include geocode.
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occured since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
geocode:
geolocation information in the form (latitude, longitude, radius)
[Optional]
per_page:
number of results to return. Default is 15 [Optional]
page:
Specifies the page of results to retrieve.
Note: there are pagination limits. [Optional]
lang:
language for results. Default is English [Optional]
show_user:
prefixes screen name in status
query_users:
If set to False, then all users only have screen_name and
profile_image_url available.
If set to True, all information of users are available,
but it uses lots of request quota, one per status.
Returns:
A sequence of twitter.Status instances, one for each message containing
the term
'''
# Build request parameters
parameters = {}
if since_id:
parameters['since_id'] = since_id
if term is None and geocode is None:
return []
if term is not None:
parameters['q'] = term
if geocode is not None:
parameters['geocode'] = ','.join(map(str, geocode))
parameters['show_user'] = show_user
parameters['lang'] = lang
parameters['rpp'] = per_page
parameters['page'] = page
# Make and send requests
url = 'http://search.twitter.com/search.json'
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
results = []
for x in data['results']:
temp = Status.NewFromJsonDict(x)
if query_users:
# Build user object with new request
temp.user = self.GetUser(urllib.quote(x['from_user']))
else:
temp.user = User(screen_name=x['from_user'], profile_image_url=x['profile_image_url'])
results.append(temp)
# Return built list of statuses
return results # [Status.NewFromJsonDict(x) for x in data['results']]
def GetTrendsCurrent(self, exclude=None):
'''Get the current top trending topics
Args:
exclude:
Appends the exclude parameter as a request parameter.
Currently only exclude=hashtags is supported. [Optional]
Returns:
A list with 10 entries. Each entry contains the twitter.
'''
parameters = {}
if exclude:
parameters['exclude'] = exclude
url = '%s/trends/current.json' % self.base_url
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
trends = []
for t in data['trends']:
for item in data['trends'][t]:
trends.append(Trend.NewFromJsonDict(item, timestamp = t))
return trends
def GetTrendsWoeid(self, woeid, exclude=None):
'''Return the top 10 trending topics for a specific WOEID, if trending
information is available for it.
Args:
woeid:
the Yahoo! Where On Earth ID for a location.
exclude:
Appends the exclude parameter as a request parameter.
Currently only exclude=hashtags is supported. [Optional]
Returns:
A list with 10 entries. Each entry contains a Trend.
'''
parameters = {}
if exclude:
parameters['exclude'] = exclude
url = '%s/trends/%s.json' % (self.base_url, woeid)
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
trends = []
timestamp = data[0]['as_of']
for trend in data[0]['trends']:
trends.append(Trend.NewFromJsonDict(trend, timestamp = timestamp))
return trends
def GetTrendsDaily(self, exclude=None, startdate=None):
'''Get the current top trending topics for each hour in a given day
Args:
startdate:
The start date for the report.
Should be in the format YYYY-MM-DD. [Optional]
exclude:
Appends the exclude parameter as a request parameter.
Currently only exclude=hashtags is supported. [Optional]
Returns:
A list with 24 entries. Each entry contains the twitter.
Trend elements that were trending at the corresponding hour of the day.
'''
parameters = {}
if exclude:
parameters['exclude'] = exclude
if not startdate:
startdate = time.strftime('%Y-%m-%d', time.gmtime())
parameters['date'] = startdate
url = '%s/trends/daily.json' % self.base_url
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
trends = []
for i in xrange(24):
trends.append(None)
for t in data['trends']:
idx = int(time.strftime('%H', time.strptime(t, '%Y-%m-%d %H:%M')))
trends[idx] = [Trend.NewFromJsonDict(x, timestamp = t)
for x in data['trends'][t]]
return trends
def GetTrendsWeekly(self, exclude=None, startdate=None):
'''Get the top 30 trending topics for each day in a given week.
Args:
startdate:
The start date for the report.
Should be in the format YYYY-MM-DD. [Optional]
exclude:
Appends the exclude parameter as a request parameter.
Currently only exclude=hashtags is supported. [Optional]
Returns:
A list with each entry contains the twitter.
Trend elements of trending topics for the corrsponding day of the week
'''
parameters = {}
if exclude:
parameters['exclude'] = exclude
if not startdate:
startdate = time.strftime('%Y-%m-%d', time.gmtime())
parameters['date'] = startdate
url = '%s/trends/weekly.json' % self.base_url
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
trends = []
for i in xrange(7):
trends.append(None)
# use the epochs of the dates as keys for a dictionary
times = dict([(calendar.timegm(time.strptime(t, '%Y-%m-%d')),t)
for t in data['trends']])
cnt = 0
# create the resulting structure ordered by the epochs of the dates
for e in sorted(times.keys()):
trends[cnt] = [Trend.NewFromJsonDict(x, timestamp = times[e])
for x in data['trends'][times[e]]]
cnt +=1
return trends
def GetFriendsTimeline(self,
user=None,
count=None,
page=None,
since_id=None,
retweets=None,
include_entities=None):
'''Fetch the sequence of twitter.Status messages for a user's friends
The twitter.Api instance must be authenticated if the user is private.
Args:
user:
Specifies the ID or screen name of the user for whom to return
the friends_timeline. If not specified then the authenticated
user set in the twitter.Api instance will be used. [Optional]
count:
Specifies the number of statuses to retrieve. May not be
greater than 100. [Optional]
page:
Specifies the page of results to retrieve.
Note: there are pagination limits. [Optional]
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occured since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
retweets:
If True, the timeline will contain native retweets. [Optional]
include_entities:
If True, each tweet will include a node called "entities,".
This node offers a variety of metadata about the tweet in a
discreet structure, including: user_mentions, urls, and
hashtags. [Optional]
Returns:
A sequence of twitter.Status instances, one for each message
'''
if not user and not self._oauth_consumer:
raise TwitterError("User must be specified if API is not authenticated.")
url = '%s/statuses/friends_timeline' % self.base_url
if user:
url = '%s/%s.json' % (url, user)
else:
url = '%s.json' % url
parameters = {}
if count is not None:
try:
if int(count) > 100:
raise TwitterError("'count' may not be greater than 100")
except ValueError:
raise TwitterError("'count' must be an integer")
parameters['count'] = count
if page is not None:
try:
parameters['page'] = int(page)
except ValueError:
raise TwitterError("'page' must be an integer")
if since_id:
parameters['since_id'] = since_id
if retweets:
parameters['include_rts'] = True
if include_entities:
parameters['include_entities'] = True
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(x) for x in data]
def GetUserTimeline(self,
id=None,
user_id=None,
screen_name=None,
since_id=None,
max_id=None,
count=None,
page=None,
include_rts=None,
include_entities=None):
'''Fetch the sequence of public Status messages for a single user.
The twitter.Api instance must be authenticated if the user is private.
Args:
id:
Specifies the ID or screen name of the user for whom to return
the user_timeline. [Optional]
user_id:
Specfies the ID of the user for whom to return the
user_timeline. Helpful for disambiguating when a valid user ID
is also a valid screen name. [Optional]
screen_name:
Specfies the screen name of the user for whom to return the
user_timeline. Helpful for disambiguating when a valid screen
name is also a user ID. [Optional]
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occured since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns only statuses with an ID less than (that is, older
than) or equal to the specified ID. [Optional]
count:
Specifies the number of statuses to retrieve. May not be
greater than 200. [Optional]
page:
Specifies the page of results to retrieve.
Note: there are pagination limits. [Optional]
include_rts:
If True, the timeline will contain native retweets (if they
exist) in addition to the standard stream of tweets. [Optional]
include_entities:
If True, each tweet will include a node called "entities,".
This node offers a variety of metadata about the tweet in a
discreet structure, including: user_mentions, urls, and
hashtags. [Optional]
Returns:
A sequence of Status instances, one for each message up to count
'''
parameters = {}
if id:
url = '%s/statuses/user_timeline/%s.json' % (self.base_url, id)
elif user_id:
url = '%s/statuses/user_timeline.json?user_id=%d' % (self.base_url, user_id)
elif screen_name:
url = ('%s/statuses/user_timeline.json?screen_name=%s' % (self.base_url,
screen_name))
elif not self._oauth_consumer:
raise TwitterError("User must be specified if API is not authenticated.")
else:
url = '%s/statuses/user_timeline.json' % self.base_url
if since_id:
try:
parameters['since_id'] = long(since_id)
except:
raise TwitterError("since_id must be an integer")
if max_id:
try:
parameters['max_id'] = long(max_id)
except:
raise TwitterError("max_id must be an integer")
if count:
try:
parameters['count'] = int(count)
except:
raise TwitterError("count must be an integer")
if page:
try:
parameters['page'] = int(page)
except:
raise TwitterError("page must be an integer")
if include_rts:
parameters['include_rts'] = 1
if include_entities:
parameters['include_entities'] = 1
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(x) for x in data]
def GetStatus(self, id, include_entities=None):
'''Returns a single status message.
The twitter.Api instance must be authenticated if the
status message is private.
Args:
id:
The numeric ID of the status you are trying to retrieve.
include_entities:
If True, each tweet will include a node called "entities".
This node offers a variety of metadata about the tweet in a
discreet structure, including: user_mentions, urls, and
hashtags. [Optional]
Returns:
A twitter.Status instance representing that status message
'''
try:
if id:
long(id)
except:
raise TwitterError("id must be an long integer")
parameters = {}
if include_entities:
parameters['include_entities'] = 1
url = '%s/statuses/show/%s.json' % (self.base_url, id)
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return Status.NewFromJsonDict(data)
def DestroyStatus(self, id):
'''Destroys the status specified by the required ID parameter.
The twitter.Api instance must be authenticated and the
authenticating user must be the author of the specified status.
Args:
id:
The numerical ID of the status you're trying to destroy.
Returns:
A twitter.Status instance representing the destroyed status message
'''
try:
if id:
long(id)
except:
raise TwitterError("id must be an integer")
url = '%s/statuses/destroy/%s.json' % (self.base_url, id)
json = self._FetchUrl(url, post_data={'id': id})
data = self._ParseAndCheckTwitter(json)
return Status.NewFromJsonDict(data)
@classmethod
def _calculate_status_length(cls, status, linksize=19):
dummy_link_replacement = 'https://-%d-chars%s/' % (linksize, '-'*(linksize - 18))
shortened = ' '.join([x if not (x.startswith('http://') or
x.startswith('https://'))
else
dummy_link_replacement
for x in status.split(' ')])
return len(shortened)
def PostUpdate(self, status, in_reply_to_status_id=None):
'''Post a twitter status message from the authenticated user.
The twitter.Api instance must be authenticated.
Args:
status:
The message text to be posted.
Must be less than or equal to 140 characters.
in_reply_to_status_id:
The ID of an existing status that the status to be posted is
in reply to. This implicitly sets the in_reply_to_user_id
attribute of the resulting status to the user ID of the
message being replied to. Invalid/missing status IDs will be
ignored. [Optional]
Returns:
A twitter.Status instance representing the message posted.
'''
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
url = '%s/statuses/update.json' % self.base_url
if isinstance(status, unicode) or self._input_encoding is None:
u_status = status
else:
u_status = unicode(status, self._input_encoding)
if self._calculate_status_length(u_status, self._shortlink_size) > CHARACTER_LIMIT:
raise TwitterError("Text must be less than or equal to %d characters. "
"Consider using PostUpdates." % CHARACTER_LIMIT)
data = {'status': status}
if in_reply_to_status_id:
data['in_reply_to_status_id'] = in_reply_to_status_id
json = self._FetchUrl(url, post_data=data)
data = self._ParseAndCheckTwitter(json)
return Status.NewFromJsonDict(data)
def PostUpdates(self, status, continuation=None, **kwargs):
'''Post one or more twitter status messages from the authenticated user.
Unlike api.PostUpdate, this method will post multiple status updates
if the message is longer than 140 characters.
The twitter.Api instance must be authenticated.
Args:
status:
The message text to be posted.
May be longer than 140 characters.
continuation:
The character string, if any, to be appended to all but the
last message. Note that Twitter strips trailing '...' strings
from messages. Consider using the unicode \u2026 character
(horizontal ellipsis) instead. [Defaults to None]
**kwargs:
See api.PostUpdate for a list of accepted parameters.
Returns:
A of list twitter.Status instance representing the messages posted.
'''
results = list()
if continuation is None:
continuation = ''
line_length = CHARACTER_LIMIT - len(continuation)
lines = textwrap.wrap(status, line_length)
for line in lines[0:-1]:
results.append(self.PostUpdate(line + continuation, **kwargs))
results.append(self.PostUpdate(lines[-1], **kwargs))
return results
def GetUserRetweets(self, count=None, since_id=None, max_id=None, include_entities=False):
'''Fetch the sequence of retweets made by a single user.
The twitter.Api instance must be authenticated.
Args:
count:
The number of status messages to retrieve. [Optional]
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occured since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns results with an ID less than (that is, older than) or
equal to the specified ID. [Optional]
include_entities:
If True, each tweet will include a node called "entities,".
This node offers a variety of metadata about the tweet in a
discreet structure, including: user_mentions, urls, and
hashtags. [Optional]
Returns:
A sequence of twitter.Status instances, one for each message up to count
'''
url = '%s/statuses/retweeted_by_me.json' % self.base_url
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
parameters = {}
if count is not None:
try:
if int(count) > 100:
raise TwitterError("'count' may not be greater than 100")
except ValueError:
raise TwitterError("'count' must be an integer")
if count:
parameters['count'] = count
if since_id:
parameters['since_id'] = since_id
if include_entities:
parameters['include_entities'] = True
if max_id:
try:
parameters['max_id'] = long(max_id)
except:
raise TwitterError("max_id must be an integer")
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(x) for x in data]
def GetReplies(self, since=None, since_id=None, page=None):
'''Get a sequence of status messages representing the 20 most
recent replies (status updates prefixed with @twitterID) to the
authenticating user.
Args:
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occured since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
page:
Specifies the page of results to retrieve.
Note: there are pagination limits. [Optional]
since:
Returns:
A sequence of twitter.Status instances, one for each reply to the user.
'''
url = '%s/statuses/replies.json' % self.base_url
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
parameters = {}
if since:
parameters['since'] = since
if since_id:
parameters['since_id'] = since_id
if page:
parameters['page'] = page
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(x) for x in data]
def GetRetweets(self, statusid):
'''Returns up to 100 of the first retweets of the tweet identified
by statusid
Args:
statusid:
The ID of the tweet for which retweets should be searched for
Returns:
A list of twitter.Status instances, which are retweets of statusid
'''
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instsance must be authenticated.")
url = '%s/statuses/retweets/%s.json?include_entities=true&include_rts=true' % (self.base_url, statusid)
parameters = {}
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(s) for s in data]
def GetFriends(self, user=None, cursor=-1):
'''Fetch the sequence of twitter.User instances, one for each friend.
The twitter.Api instance must be authenticated.
Args:
user:
The twitter name or id of the user whose friends you are fetching.
If not specified, defaults to the authenticated user. [Optional]
Returns:
A sequence of twitter.User instances, one for each friend
'''
if not user and not self._oauth_consumer:
raise TwitterError("twitter.Api instance must be authenticated")
if user:
url = '%s/statuses/friends/%s.json' % (self.base_url, user)
else:
url = '%s/statuses/friends.json' % self.base_url
parameters = {}
parameters['cursor'] = cursor
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [User.NewFromJsonDict(x) for x in data['users']]
def GetFriendIDs(self, user=None, cursor=-1):
'''Returns a list of twitter user id's for every person
the specified user is following.
Args:
user:
The id or screen_name of the user to retrieve the id list for
[Optional]
Returns:
A list of integers, one for each user id.
'''
if not user and not self._oauth_consumer:
raise TwitterError("twitter.Api instance must be authenticated")
if user:
url = '%s/friends/ids/%s.json' % (self.base_url, user)
else:
url = '%s/friends/ids.json' % self.base_url
parameters = {}
parameters['cursor'] = cursor
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return data
def GetFollowerIDs(self, userid=None, cursor=-1):
'''Fetch the sequence of twitter.User instances, one for each follower
The twitter.Api instance must be authenticated.
Returns:
A sequence of twitter.User instances, one for each follower
'''
url = '%s/followers/ids.json' % self.base_url
parameters = {}
parameters['cursor'] = cursor
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return data
def GetFollowers(self, cursor=-1):
'''Fetch the sequence of twitter.User instances, one for each follower
The twitter.Api instance must be authenticated.
Args:
cursor:
Specifies the Twitter API Cursor location to start at. [Optional]
Note: there are pagination limits.
Returns:
A sequence of twitter.User instances, one for each follower
'''
if not self._oauth_consumer:
raise TwitterError("twitter.Api instance must be authenticated")
url = '%s/statuses/followers.json' % self.base_url
result = []
while True:
parameters = { 'cursor': cursor }
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
result += [User.NewFromJsonDict(x) for x in data['users']]
if 'next_cursor' in data:
if data['next_cursor'] == 0 or data['next_cursor'] == data['previous_cursor']:
break
else:
break
return result
def GetFeatured(self):
'''Fetch the sequence of twitter.User instances featured on twitter.com
The twitter.Api instance must be authenticated.
Returns:
A sequence of twitter.User instances
'''
url = '%s/statuses/featured.json' % self.base_url
json = self._FetchUrl(url)
data = self._ParseAndCheckTwitter(json)
return [User.NewFromJsonDict(x) for x in data]
def UsersLookup(self, user_id=None, screen_name=None, users=None):
'''Fetch extended information for the specified users.
Users may be specified either as lists of either user_ids,
screen_names, or twitter.User objects. The list of users that
are queried is the union of all specified parameters.
The twitter.Api instance must be authenticated.
Args:
user_id:
A list of user_ids to retrieve extended information.
[Optional]
screen_name:
A list of screen_names to retrieve extended information.
[Optional]
users:
A list of twitter.User objects to retrieve extended information.
[Optional]
Returns:
A list of twitter.User objects for the requested users
'''
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
if not user_id and not screen_name and not users:
raise TwitterError("Specify at least on of user_id, screen_name, or users.")
url = '%s/users/lookup.json' % self.base_url
parameters = {}
uids = list()
if user_id:
uids.extend(user_id)
if users:
uids.extend([u.id for u in users])
if len(uids):
parameters['user_id'] = ','.join(["%s" % u for u in uids])
if screen_name:
parameters['screen_name'] = ','.join(screen_name)
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [User.NewFromJsonDict(u) for u in data]
def GetUser(self, user):
'''Returns a single user.
The twitter.Api instance must be authenticated.
Args:
user: The twitter name or id of the user to retrieve.
Returns:
A twitter.User instance representing that user
'''
url = '%s/users/show/%s.json' % (self.base_url, user)
json = self._FetchUrl(url)
data = self._ParseAndCheckTwitter(json)
return User.NewFromJsonDict(data)
def GetDirectMessages(self, since=None, since_id=None, page=None):
'''Returns a list of the direct messages sent to the authenticating user.
The twitter.Api instance must be authenticated.
Args:
since:
Narrows the returned results to just those statuses created
after the specified HTTP-formatted date. [Optional]
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occured since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
page:
Specifies the page of results to retrieve.
Note: there are pagination limits. [Optional]
Returns:
A sequence of twitter.DirectMessage instances
'''
url = '%s/direct_messages.json' % self.base_url
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
parameters = {}
if since:
parameters['since'] = since
if since_id:
parameters['since_id'] = since_id
if page:
parameters['page'] = page
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [DirectMessage.NewFromJsonDict(x) for x in data]
def PostDirectMessage(self, user, text):
'''Post a twitter direct message from the authenticated user
The twitter.Api instance must be authenticated.
Args:
user: The ID or screen name of the recipient user.
text: The message text to be posted. Must be less than 140 characters.
Returns:
A twitter.DirectMessage instance representing the message posted
'''
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
url = '%s/direct_messages/new.json' % self.base_url
data = {'text': text, 'user': user}
json = self._FetchUrl(url, post_data=data)
data = self._ParseAndCheckTwitter(json)
return DirectMessage.NewFromJsonDict(data)
def DestroyDirectMessage(self, id):
'''Destroys the direct message specified in the required ID parameter.
The twitter.Api instance must be authenticated, and the
authenticating user must be the recipient of the specified direct
message.
Args:
id: The id of the direct message to be destroyed
Returns:
A twitter.DirectMessage instance representing the message destroyed
'''
url = '%s/direct_messages/destroy/%s.json' % (self.base_url, id)
json = self._FetchUrl(url, post_data={'id': id})
data = self._ParseAndCheckTwitter(json)
return DirectMessage.NewFromJsonDict(data)
def CreateFriendship(self, user):
'''Befriends the user specified in the user parameter as the authenticating user.
The twitter.Api instance must be authenticated.
Args:
The ID or screen name of the user to befriend.
Returns:
A twitter.User instance representing the befriended user.
'''
url = '%s/friendships/create/%s.json' % (self.base_url, user)
json = self._FetchUrl(url, post_data={'user': user})
data = self._ParseAndCheckTwitter(json)
return User.NewFromJsonDict(data)
def DestroyFriendship(self, user):
'''Discontinues friendship with the user specified in the user parameter.
The twitter.Api instance must be authenticated.
Args:
The ID or screen name of the user with whom to discontinue friendship.
Returns:
A twitter.User instance representing the discontinued friend.
'''
url = '%s/friendships/destroy/%s.json' % (self.base_url, user)
json = self._FetchUrl(url, post_data={'user': user})
data = self._ParseAndCheckTwitter(json)
return User.NewFromJsonDict(data)
def CreateFavorite(self, status):
'''Favorites the status specified in the status parameter as the authenticating user.
Returns the favorite status when successful.
The twitter.Api instance must be authenticated.
Args:
The twitter.Status instance to mark as a favorite.
Returns:
A twitter.Status instance representing the newly-marked favorite.
'''
url = '%s/favorites/create/%s.json' % (self.base_url, status.id)
json = self._FetchUrl(url, post_data={'id': status.id})
data = self._ParseAndCheckTwitter(json)
return Status.NewFromJsonDict(data)
def DestroyFavorite(self, status):
'''Un-favorites the status specified in the ID parameter as the authenticating user.
Returns the un-favorited status in the requested format when successful.
The twitter.Api instance must be authenticated.
Args:
The twitter.Status to unmark as a favorite.
Returns:
A twitter.Status instance representing the newly-unmarked favorite.
'''
url = '%s/favorites/destroy/%s.json' % (self.base_url, status.id)
json = self._FetchUrl(url, post_data={'id': status.id})
data = self._ParseAndCheckTwitter(json)
return Status.NewFromJsonDict(data)
def GetFavorites(self,
user=None,
page=None):
'''Return a list of Status objects representing favorited tweets.
By default, returns the (up to) 20 most recent tweets for the
authenticated user.
Args:
user:
The twitter name or id of the user whose favorites you are fetching.
If not specified, defaults to the authenticated user. [Optional]
page:
Specifies the page of results to retrieve.
Note: there are pagination limits. [Optional]
'''
parameters = {}
if page:
parameters['page'] = page
if user:
url = '%s/favorites/%s.json' % (self.base_url, user)
elif not user and not self._oauth_consumer:
raise TwitterError("User must be specified if API is not authenticated.")
else:
url = '%s/favorites.json' % self.base_url
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(x) for x in data]
def GetMentions(self,
since_id=None,
max_id=None,
page=None):
'''Returns the 20 most recent mentions (status containing @twitterID)
for the authenticating user.
Args:
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occured since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns only statuses with an ID less than
(that is, older than) the specified ID. [Optional]
page:
Specifies the page of results to retrieve.
Note: there are pagination limits. [Optional]
Returns:
A sequence of twitter.Status instances, one for each mention of the user.
'''
url = '%s/statuses/mentions.json' % self.base_url
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
parameters = {}
if since_id:
parameters['since_id'] = since_id
if max_id:
try:
parameters['max_id'] = long(max_id)
except:
raise TwitterError("max_id must be an integer")
if page:
parameters['page'] = page
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(x) for x in data]
def CreateList(self, user, name, mode=None, description=None):
'''Creates a new list with the give name
The twitter.Api instance must be authenticated.
Args:
user:
Twitter name to create the list for
name:
New name for the list
mode:
'public' or 'private'.
Defaults to 'public'. [Optional]
description:
Description of the list. [Optional]
Returns:
A twitter.List instance representing the new list
'''
url = '%s/%s/lists.json' % (self.base_url, user)
parameters = {'name': name}
if mode is not None:
parameters['mode'] = mode
if description is not None:
parameters['description'] = description
json = self._FetchUrl(url, post_data=parameters)
data = self._ParseAndCheckTwitter(json)
return List.NewFromJsonDict(data)
def DestroyList(self, user, id):
'''Destroys the list from the given user
The twitter.Api instance must be authenticated.
Args:
user:
The user to remove the list from.
id:
The slug or id of the list to remove.
Returns:
A twitter.List instance representing the removed list.
'''
url = '%s/%s/lists/%s.json' % (self.base_url, user, id)
json = self._FetchUrl(url, post_data={'_method': 'DELETE'})
data = self._ParseAndCheckTwitter(json)
return List.NewFromJsonDict(data)
def CreateSubscription(self, owner, list):
'''Creates a subscription to a list by the authenticated user
The twitter.Api instance must be authenticated.
Args:
owner:
User name or id of the owner of the list being subscribed to.
list:
The slug or list id to subscribe the user to
Returns:
A twitter.List instance representing the list subscribed to
'''
url = '%s/%s/%s/subscribers.json' % (self.base_url, owner, list)
json = self._FetchUrl(url, post_data={'list_id': list})
data = self._ParseAndCheckTwitter(json)
return List.NewFromJsonDict(data)
def DestroySubscription(self, owner, list):
'''Destroys the subscription to a list for the authenticated user
The twitter.Api instance must be authenticated.
Args:
owner:
The user id or screen name of the user that owns the
list that is to be unsubscribed from
list:
The slug or list id of the list to unsubscribe from
Returns:
A twitter.List instance representing the removed list.
'''
url = '%s/%s/%s/subscribers.json' % (self.base_url, owner, list)
json = self._FetchUrl(url, post_data={'_method': 'DELETE', 'list_id': list})
data = self._ParseAndCheckTwitter(json)
return List.NewFromJsonDict(data)
def GetSubscriptions(self, user, cursor=-1):
'''Fetch the sequence of Lists that the given user is subscribed to
The twitter.Api instance must be authenticated.
Args:
user:
The twitter name or id of the user
cursor:
"page" value that Twitter will use to start building the
list sequence from. -1 to start at the beginning.
Twitter will return in the result the values for next_cursor
and previous_cursor. [Optional]
Returns:
A sequence of twitter.List instances, one for each list
'''
if not self._oauth_consumer:
raise TwitterError("twitter.Api instance must be authenticated")
url = '%s/%s/lists/subscriptions.json' % (self.base_url, user)
parameters = {}
parameters['cursor'] = cursor
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [List.NewFromJsonDict(x) for x in data['lists']]
def GetLists(self, user, cursor=-1):
'''Fetch the sequence of lists for a user.
The twitter.Api instance must be authenticated.
Args:
user:
The twitter name or id of the user whose friends you are fetching.
If the passed in user is the same as the authenticated user
then you will also receive private list data.
cursor:
"page" value that Twitter will use to start building the
list sequence from. -1 to start at the beginning.
Twitter will return in the result the values for next_cursor
and previous_cursor. [Optional]
Returns:
A sequence of twitter.List instances, one for each list
'''
if not self._oauth_consumer:
raise TwitterError("twitter.Api instance must be authenticated")
url = '%s/%s/lists.json' % (self.base_url, user)
parameters = {}
parameters['cursor'] = cursor
json = self._FetchUrl(url, parameters=parameters)
data = self._ParseAndCheckTwitter(json)
return [List.NewFromJsonDict(x) for x in data['lists']]
def GetUserByEmail(self, email):
'''Returns a single user by email address.
Args:
email:
The email of the user to retrieve.
Returns:
A twitter.User instance representing that user
'''
url = '%s/users/show.json?email=%s' % (self.base_url, email)
json = self._FetchUrl(url)
data = self._ParseAndCheckTwitter(json)
return User.NewFromJsonDict(data)
def VerifyCredentials(self):
'''Returns a twitter.User instance if the authenticating user is valid.
Returns:
A twitter.User instance representing that user if the
credentials are valid, None otherwise.
'''
if not self._oauth_consumer:
raise TwitterError("Api instance must first be given user credentials.")
url = '%s/account/verify_credentials.json' % self.base_url
try:
json = self._FetchUrl(url, no_cache=True)
except urllib2.HTTPError, http_error:
if http_error.code == httplib.UNAUTHORIZED:
return None
else:
raise http_error
data = self._ParseAndCheckTwitter(json)
return User.NewFromJsonDict(data)
def SetCache(self, cache):
'''Override the default cache. Set to None to prevent caching.
Args:
cache:
An instance that supports the same API as the twitter._FileCache
'''
if cache == DEFAULT_CACHE:
self._cache = _FileCache()
else:
self._cache = cache
def SetUrllib(self, urllib):
'''Override the default urllib implementation.
Args:
urllib:
An instance that supports the same API as the urllib2 module
'''
self._urllib = urllib
def SetCacheTimeout(self, cache_timeout):
'''Override the default cache timeout.
Args:
cache_timeout:
Time, in seconds, that responses should be reused.
'''
self._cache_timeout = cache_timeout
def SetUserAgent(self, user_agent):
'''Override the default user agent
Args:
user_agent:
A string that should be send to the server as the User-agent
'''
self._request_headers['User-Agent'] = user_agent
def SetXTwitterHeaders(self, client, url, version):
'''Set the X-Twitter HTTP headers that will be sent to the server.
Args:
client:
The client name as a string. Will be sent to the server as
the 'X-Twitter-Client' header.
url:
The URL of the meta.xml as a string. Will be sent to the server
as the 'X-Twitter-Client-URL' header.
version:
The client version as a string. Will be sent to the server
as the 'X-Twitter-Client-Version' header.
'''
self._request_headers['X-Twitter-Client'] = client
self._request_headers['X-Twitter-Client-URL'] = url
self._request_headers['X-Twitter-Client-Version'] = version
def SetSource(self, source):
'''Suggest the "from source" value to be displayed on the Twitter web site.
The value of the 'source' parameter must be first recognized by
the Twitter server. New source values are authorized on a case by
case basis by the Twitter development team.
Args:
source:
The source name as a string. Will be sent to the server as
the 'source' parameter.
'''
self._default_params['source'] = source
def GetRateLimitStatus(self):
'''Fetch the rate limit status for the currently authorized user.
Returns:
A dictionary containing the time the limit will reset (reset_time),
the number of remaining hits allowed before the reset (remaining_hits),
the number of hits allowed in a 60-minute period (hourly_limit), and
the time of the reset in seconds since The Epoch (reset_time_in_seconds).
'''
url = '%s/account/rate_limit_status.json' % self.base_url
json = self._FetchUrl(url, no_cache=True)
data = self._ParseAndCheckTwitter(json)
return data
def MaximumHitFrequency(self):
'''Determines the minimum number of seconds that a program must wait
before hitting the server again without exceeding the rate_limit
imposed for the currently authenticated user.
Returns:
The minimum second interval that a program must use so as to not
exceed the rate_limit imposed for the user.
'''
rate_status = self.GetRateLimitStatus()
reset_time = rate_status.get('reset_time', None)
limit = rate_status.get('remaining_hits', None)
if reset_time:
# put the reset time into a datetime object
reset = datetime.datetime(*rfc822.parsedate(reset_time)[:7])
# find the difference in time between now and the reset time + 1 hour
delta = reset + datetime.timedelta(hours=1) - datetime.datetime.utcnow()
if not limit:
return int(delta.seconds)
# determine the minimum number of seconds allowed as a regular interval
max_frequency = int(delta.seconds / limit) + 1
# return the number of seconds
return max_frequency
return 60
def _BuildUrl(self, url, path_elements=None, extra_params=None):
# Break url into consituent parts
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url)
# Add any additional path elements to the path
if path_elements:
# Filter out the path elements that have a value of None
p = [i for i in path_elements if i]
if not path.endswith('/'):
path += '/'
path += '/'.join(p)
# Add any additional query parameters to the query string
if extra_params and len(extra_params) > 0:
extra_query = self._EncodeParameters(extra_params)
# Add it to the existing query
if query:
query += '&' + extra_query
else:
query = extra_query
# Return the rebuilt URL
return urlparse.urlunparse((scheme, netloc, path, params, query, fragment))
def _InitializeRequestHeaders(self, request_headers):
if request_headers:
self._request_headers = request_headers
else:
self._request_headers = {}
def _InitializeUserAgent(self):
user_agent = 'Python-urllib/%s (python-twitter/%s)' % \
(self._urllib.__version__, __version__)
self.SetUserAgent(user_agent)
def _InitializeDefaultParameters(self):
self._default_params = {}
def _DecompressGzippedResponse(self, response):
raw_data = response.read()
if response.headers.get('content-encoding', None) == 'gzip':
url_data = gzip.GzipFile(fileobj=StringIO.StringIO(raw_data)).read()
else:
url_data = raw_data
return url_data
def _Encode(self, s):
if self._input_encoding:
return unicode(s, self._input_encoding).encode('utf-8')
else:
return unicode(s).encode('utf-8')
def _EncodeParameters(self, parameters):
'''Return a string in key=value&key=value form
Values of None are not included in the output string.
Args:
parameters:
A dict of (key, value) tuples, where value is encoded as
specified by self._encoding
Returns:
A URL-encoded string in "key=value&key=value" form
'''
if parameters is None:
return None
else:
return urllib.urlencode(dict([(k, self._Encode(v)) for k, v in parameters.items() if v is not None]))
def _EncodePostData(self, post_data):
'''Return a string in key=value&key=value form
Values are assumed to be encoded in the format specified by self._encoding,
and are subsequently URL encoded.
Args:
post_data:
A dict of (key, value) tuples, where value is encoded as
specified by self._encoding
Returns:
A URL-encoded string in "key=value&key=value" form
'''
if post_data is None:
return None
else:
return urllib.urlencode(dict([(k, self._Encode(v)) for k, v in post_data.items()]))
def _ParseAndCheckTwitter(self, json):
"""Try and parse the JSON returned from Twitter and return
an empty dictionary if there is any error. This is a purely
defensive check because during some Twitter network outages
it will return an HTML failwhale page."""
try:
data = simplejson.loads(json)
self._CheckForTwitterError(data)
except ValueError:
if "<title>Twitter / Over capacity</title>" in json:
raise TwitterError("Capacity Error")
if "<title>Twitter / Error</title>" in json:
raise TwitterError("Technical Error")
raise TwitterError("json decoding")
return data
def _CheckForTwitterError(self, data):
"""Raises a TwitterError if twitter returns an error message.
Args:
data:
A python dict created from the Twitter json response
Raises:
TwitterError wrapping the twitter error message if one exists.
"""
# Twitter errors are relatively unlikely, so it is faster
# to check first, rather than try and catch the exception
if 'error' in data:
raise TwitterError(data['error'])
if 'errors' in data:
raise TwitterError(data['errors'])
def _FetchUrl(self,
url,
post_data=None,
parameters=None,
no_cache=None,
use_gzip_compression=None):
'''Fetch a URL, optionally caching for a specified time.
Args:
url:
The URL to retrieve
post_data:
A dict of (str, unicode) key/value pairs.
If set, POST will be used.
parameters:
A dict whose key/value pairs should encoded and added
to the query string. [Optional]
no_cache:
If true, overrides the cache on the current request
use_gzip_compression:
If True, tells the server to gzip-compress the response.
It does not apply to POST requests.
Defaults to None, which will get the value to use from
the instance variable self._use_gzip [Optional]
Returns:
A string containing the body of the response.
'''
# Build the extra parameters dict
extra_params = {}
if self._default_params:
extra_params.update(self._default_params)
if parameters:
extra_params.update(parameters)
if post_data:
http_method = "POST"
else:
http_method = "GET"
if self._debugHTTP:
_debug = 1
else:
_debug = 0
http_handler = self._urllib.HTTPHandler(debuglevel=_debug)
https_handler = self._urllib.HTTPSHandler(debuglevel=_debug)
opener = self._urllib.OpenerDirector()
opener.add_handler(http_handler)
opener.add_handler(https_handler)
if use_gzip_compression is None:
use_gzip = self._use_gzip
else:
use_gzip = use_gzip_compression
# Set up compression
if use_gzip and not post_data:
opener.addheaders.append(('Accept-Encoding', 'gzip'))
if self._oauth_consumer is not None:
if post_data and http_method == "POST":
parameters = post_data.copy()
req = oauth.Request.from_consumer_and_token(self._oauth_consumer,
token=self._oauth_token,
http_method=http_method,
http_url=url, parameters=parameters)
req.sign_request(self._signature_method_hmac_sha1, self._oauth_consumer, self._oauth_token)
headers = req.to_header()
if http_method == "POST":
encoded_post_data = req.to_postdata()
else:
encoded_post_data = None
url = req.to_url()
else:
url = self._BuildUrl(url, extra_params=extra_params)
encoded_post_data = self._EncodePostData(post_data)
# Open and return the URL immediately if we're not going to cache
if encoded_post_data or no_cache or not self._cache or not self._cache_timeout:
response = opener.open(url, encoded_post_data)
url_data = self._DecompressGzippedResponse(response)
opener.close()
else:
# Unique keys are a combination of the url and the oAuth Consumer Key
if self._consumer_key:
key = self._consumer_key + ':' + url
else:
key = url
# See if it has been cached before
last_cached = self._cache.GetCachedTime(key)
# If the cached version is outdated then fetch another and store it
if not last_cached or time.time() >= last_cached + self._cache_timeout:
try:
response = opener.open(url, encoded_post_data)
url_data = self._DecompressGzippedResponse(response)
self._cache.Set(key, url_data)
except urllib2.HTTPError, e:
print e
opener.close()
else:
url_data = self._cache.Get(key)
# Always return the latest version
return url_data
class _FileCacheError(Exception):
'''Base exception class for FileCache related errors'''
class _FileCache(object):
DEPTH = 3
def __init__(self,root_directory=None):
self._InitializeRootDirectory(root_directory)
def Get(self,key):
path = self._GetPath(key)
if os.path.exists(path):
return open(path).read()
else:
return None
def Set(self,key,data):
path = self._GetPath(key)
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
if not os.path.isdir(directory):
raise _FileCacheError('%s exists but is not a directory' % directory)
temp_fd, temp_path = tempfile.mkstemp()
temp_fp = os.fdopen(temp_fd, 'w')
temp_fp.write(data)
temp_fp.close()
if not path.startswith(self._root_directory):
raise _FileCacheError('%s does not appear to live under %s' %
(path, self._root_directory))
if os.path.exists(path):
os.remove(path)
os.rename(temp_path, path)
def Remove(self,key):
path = self._GetPath(key)
if not path.startswith(self._root_directory):
raise _FileCacheError('%s does not appear to live under %s' %
(path, self._root_directory ))
if os.path.exists(path):
os.remove(path)
def GetCachedTime(self,key):
path = self._GetPath(key)
if os.path.exists(path):
return os.path.getmtime(path)
else:
return None
def _GetUsername(self):
'''Attempt to find the username in a cross-platform fashion.'''
try:
return os.getenv('USER') or \
os.getenv('LOGNAME') or \
os.getenv('USERNAME') or \
os.getlogin() or \
'nobody'
except (AttributeError, IOError, OSError), e:
return 'nobody'
def _GetTmpCachePath(self):
username = self._GetUsername()
cache_directory = 'python.cache_' + username
return os.path.join(tempfile.gettempdir(), cache_directory)
def _InitializeRootDirectory(self, root_directory):
if not root_directory:
root_directory = self._GetTmpCachePath()
root_directory = os.path.abspath(root_directory)
if not os.path.exists(root_directory):
os.mkdir(root_directory)
if not os.path.isdir(root_directory):
raise _FileCacheError('%s exists but is not a directory' %
root_directory)
self._root_directory = root_directory
def _GetPath(self,key):
try:
hashed_key = md5(key).hexdigest()
except TypeError:
hashed_key = md5.new(key).hexdigest()
return os.path.join(self._root_directory,
self._GetPrefix(hashed_key),
hashed_key)
def _GetPrefix(self,hashed_key):
return os.path.sep.join(hashed_key[0:_FileCache.DEPTH])
| []
| []
| [
"USER",
"USERNAME",
"LOGNAME"
]
| [] | ["USER", "USERNAME", "LOGNAME"] | python | 3 | 0 | |
cms/tests/test_templatetags.py | from __future__ import with_statement
from copy import deepcopy
import os
from classytags.tests import DummyParser, DummyTokens
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.contrib.sites.models import Site
from django.core import mail
from django.core.exceptions import ImproperlyConfigured
from django.test import RequestFactory
from django.utils.html import escape
from django.utils.timezone import now
from djangocms_text_ckeditor.cms_plugins import TextPlugin
import cms
from cms.api import create_page, create_title, add_plugin
from cms.middleware.toolbar import ToolbarMiddleware
from cms.models import Page, Placeholder
from cms.templatetags.cms_tags import (_get_page_by_untyped_arg,
_show_placeholder_for_page,
_get_placeholder, RenderPlugin)
from cms.templatetags.cms_js_tags import json_filter
from cms.test_utils.fixtures.templatetags import TwoPagesFixture
from cms.test_utils.testcases import CMSTestCase
from cms.toolbar.toolbar import CMSToolbar
from cms.utils import get_cms_setting, get_site_id
from cms.utils.compat import DJANGO_1_7
from cms.utils.placeholder import get_placeholders
from sekizai.context import SekizaiContext
class TemplatetagTests(CMSTestCase):
def test_get_site_id_from_nothing(self):
with self.settings(SITE_ID=10):
self.assertEqual(10, get_site_id(None))
def test_get_site_id_from_int(self):
self.assertEqual(10, get_site_id(10))
def test_get_site_id_from_site(self):
site = Site()
site.id = 10
self.assertEqual(10, get_site_id(site))
def test_get_site_id_from_str_int(self):
self.assertEqual(10, get_site_id('10'))
def test_get_site_id_from_str(self):
with self.settings(SITE_ID=10):
self.assertEqual(10, get_site_id("something"))
def test_unicode_placeholder_name_fails_fast(self):
self.assertRaises(ImproperlyConfigured, get_placeholders, 'unicode_placeholder.html')
def test_page_attribute_tag_escapes_content(self):
script = '<script>alert("XSS");</script>'
class FakePage(object):
def get_page_title(self, *args, **kwargs):
return script
class FakeRequest(object):
current_page = FakePage()
REQUEST = {'language': 'en'}
request = FakeRequest()
template = '{% load cms_tags %}{% page_attribute page_title %}'
output = self.render_template_obj(template, {}, request)
self.assertNotEqual(script, output)
self.assertEqual(escape(script), output)
def test_json_encoder(self):
self.assertEqual(json_filter(True), 'true')
self.assertEqual(json_filter(False), 'false')
self.assertEqual(json_filter([1, 2, 3]), '[1, 2, 3]')
self.assertEqual(json_filter((1, 2, 3)), '[1, 2, 3]')
filtered_dict = json_filter({'item1': 1, 'item2': 2, 'item3': 3})
self.assertTrue('"item1": 1' in filtered_dict)
self.assertTrue('"item2": 2' in filtered_dict)
self.assertTrue('"item3": 3' in filtered_dict)
today = now().today()
self.assertEqual('"%s"' % today.isoformat()[:-3], json_filter(today))
def test_static_with_version(self):
expected = '<script src="/static/cms/css/cms.base.css?%(version)s" type="text/javascript"></script>'
expected = expected % {'version': cms.__version__}
template = (
"""{% load cms_static %}<script src="{% static_with_version "cms/css/cms.base.css" %}" """
"""type="text/javascript"></script>"""
)
output = self.render_template_obj(template, {}, None)
self.assertEqual(expected, output)
class TemplatetagDatabaseTests(TwoPagesFixture, CMSTestCase):
def _getfirst(self):
return Page.objects.public().get(title_set__title='first')
def _getsecond(self):
return Page.objects.public().get(title_set__title='second')
def test_get_page_by_untyped_arg_none(self):
control = self._getfirst()
request = self.get_request('/')
request.current_page = control
page = _get_page_by_untyped_arg(None, request, 1)
self.assertEqual(page, control)
def test_get_page_by_pk_arg_edit_mode(self):
control = self._getfirst()
request = self.get_request('/')
request.GET = {"edit": ''}
user = self._create_user("admin", True, True)
request.current_page = control
request.user = user
middleware = ToolbarMiddleware()
middleware.process_request(request)
page = _get_page_by_untyped_arg(control.pk, request, 1)
self.assertEqual(page, control.publisher_draft)
def test_get_page_by_untyped_arg_page(self):
control = self._getfirst()
request = self.get_request('/')
page = _get_page_by_untyped_arg(control, request, 1)
self.assertEqual(page, control)
def test_get_page_by_untyped_arg_reverse_id(self):
second = self._getsecond()
request = self.get_request('/')
page = _get_page_by_untyped_arg("myreverseid", request, 1)
self.assertEqual(page, second)
def test_get_page_by_untyped_arg_dict(self):
second = self._getsecond()
request = self.get_request('/')
page = _get_page_by_untyped_arg({'pk': second.pk}, request, 1)
self.assertEqual(page, second)
def test_get_page_by_untyped_arg_dict_fail_debug(self):
with self.settings(DEBUG=True):
request = self.get_request('/')
self.assertRaises(Page.DoesNotExist,
_get_page_by_untyped_arg, {'pk': 1003}, request, 1
)
self.assertEqual(len(mail.outbox), 0)
def test_get_page_by_untyped_arg_dict_fail_nodebug_do_email(self):
with self.settings(SEND_BROKEN_LINK_EMAILS=True, DEBUG=False,
MANAGERS=[("Jenkins", "[email protected]")]):
request = self.get_request('/')
page = _get_page_by_untyped_arg({'pk': 1003}, request, 1)
self.assertEqual(page, None)
self.assertEqual(len(mail.outbox), 1)
def test_get_page_by_untyped_arg_dict_fail_nodebug_no_email(self):
with self.settings(SEND_BROKEN_LINK_EMAILS=False, DEBUG=False,
MANAGERS=[("Jenkins", "[email protected]")]):
request = self.get_request('/')
page = _get_page_by_untyped_arg({'pk': 1003}, request, 1)
self.assertEqual(page, None)
self.assertEqual(len(mail.outbox), 0)
def test_get_page_by_untyped_arg_fail(self):
request = self.get_request('/')
self.assertRaises(TypeError, _get_page_by_untyped_arg, [], request, 1)
def test_show_placeholder_for_page_placeholder_does_not_exist(self):
"""
Verify ``show_placeholder`` correctly handles being given an
invalid identifier.
"""
with self.settings(DEBUG=True):
context = self.get_context('/')
self.assertRaises(Placeholder.DoesNotExist, _show_placeholder_for_page,
context, 'does_not_exist', 'myreverseid')
with self.settings(DEBUG=False):
content = _show_placeholder_for_page(context, 'does_not_exist', 'myreverseid')
self.assertEqual(content['content'], '')
def test_untranslated_language_url(self):
""" Tests page_language_url templatetag behavior when used on a page
without the requested translation, both when CMS_HIDE_UNTRANSLATED is
True and False.
When True it should return the root page URL if the current page is
untranslated (PR #1125)
"""
page_1 = create_page('Page 1', 'nav_playground.html', 'en', published=True,
in_navigation=True, reverse_id='page1')
create_title("de", "Seite 1", page_1, slug="seite-1")
page_1.publish('en')
page_1.publish('de')
page_2 = create_page('Page 2', 'nav_playground.html', 'en', page_1, published=True,
in_navigation=True, reverse_id='page2')
create_title("de", "Seite 2", page_2, slug="seite-2")
page_2.publish('en')
page_2.publish('de')
page_3 = create_page('Page 3', 'nav_playground.html', 'en', page_2, published=True,
in_navigation=True, reverse_id='page3')
tpl = "{% load menu_tags %}{% page_language_url 'de' %}"
lang_settings = deepcopy(get_cms_setting('LANGUAGES'))
lang_settings[1][1]['hide_untranslated'] = False
with self.settings(CMS_LANGUAGES=lang_settings):
context = self.get_context(page_2.get_absolute_url())
context['request'].current_page = page_2
res = self.render_template_obj(tpl, context.__dict__, context['request'])
self.assertEqual(res, "/de/seite-2/")
# Default configuration has CMS_HIDE_UNTRANSLATED=False
context = self.get_context(page_2.get_absolute_url())
context['request'].current_page = page_2.publisher_public
res = self.render_template_obj(tpl, context.__dict__, context['request'])
self.assertEqual(res, "/de/seite-2/")
context = self.get_context(page_3.get_absolute_url())
context['request'].current_page = page_3.publisher_public
res = self.render_template_obj(tpl, context.__dict__, context['request'])
self.assertEqual(res, "/de/page-3/")
lang_settings[1][1]['hide_untranslated'] = True
with self.settings(CMS_LANGUAGES=lang_settings):
context = self.get_context(page_2.get_absolute_url())
context['request'].current_page = page_2.publisher_public
res = self.render_template_obj(tpl, context.__dict__, context['request'])
self.assertEqual(res, "/de/seite-2/")
context = self.get_context(page_3.get_absolute_url())
context['request'].current_page = page_3.publisher_public
res = self.render_template_obj(tpl, context.__dict__, context['request'])
self.assertEqual(res, "/de/")
def test_create_placeholder_if_not_exist_in_template(self):
"""
Tests that adding a new placeholder to a an exising page's template
creates the placeholder.
"""
page = create_page('Test', 'col_two.html', 'en')
# I need to make it seem like the user added another plcaeholder to the SAME template.
page._template_cache = 'col_three.html'
class FakeRequest(object):
current_page = page
REQUEST = {'language': 'en'}
placeholder = _get_placeholder(page, page, dict(request=FakeRequest()), 'col_right')
page.placeholders.get(slot='col_right')
self.assertEqual(placeholder.slot, 'col_right')
class NoFixtureDatabaseTemplateTagTests(CMSTestCase):
def test_cached_show_placeholder_sekizai(self):
from django.core.cache import cache
cache.clear()
from cms.test_utils import project
template_dir = os.path.join(os.path.dirname(project.__file__), 'templates', 'alt_plugin_templates',
'show_placeholder')
page = create_page('Test', 'col_two.html', 'en')
placeholder = page.placeholders.all()[0]
add_plugin(placeholder, TextPlugin, 'en', body='HIDDEN')
request = RequestFactory().get('/')
request.user = self.get_staff_user_with_no_permissions()
request.current_page = page
if DJANGO_1_7:
override = {'TEMPLATE_DIRS': [template_dir], 'CMS_TEMPLATES': []}
else:
override = {'TEMPLATES': deepcopy(settings.TEMPLATES)}
override['TEMPLATES'][0]['DIRS'] = [template_dir]
with self.settings(**override):
template = "{% load cms_tags sekizai_tags %}{% show_placeholder slot page 'en' 1 %}{% render_block 'js' %}"
output = self.render_template_obj(template, {'page': page, 'slot': placeholder.slot}, request)
self.assertIn('JAVASCRIPT', output)
def test_show_placeholder_lang_parameter(self):
from django.core.cache import cache
cache.clear()
page = create_page('Test', 'col_two.html', 'en')
create_title('fr', 'Fr Test', page)
placeholder = page.placeholders.all()[0]
add_plugin(placeholder, TextPlugin, 'en', body='<b>En Test</b>')
add_plugin(placeholder, TextPlugin, 'fr', body='<b>Fr Test</b>')
request = RequestFactory().get('/')
request.user = AnonymousUser()
request.current_page = page
template = "{% load cms_tags sekizai_tags %}{% show_placeholder slot page 'en' 1 %}{% render_block 'js' %}"
output = self.render_template_obj(template, {'page': page, 'slot': placeholder.slot}, request)
self.assertIn('<b>En Test</b>', output)
template = "{% load cms_tags sekizai_tags %}{% show_placeholder slot page 'fr' 1 %}{% render_block 'js' %}"
output = self.render_template_obj(template, {'page': page, 'slot': placeholder.slot}, request)
self.assertIn('<b>Fr Test</b>', output)
def test_show_placeholder_for_page_marks_output_safe(self):
from django.core.cache import cache
cache.clear()
page = create_page('Test', 'col_two.html', 'en')
placeholder = page.placeholders.all()[0]
add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>')
request = RequestFactory().get('/')
request.user = AnonymousUser()
request.current_page = page
template = "{% load cms_tags sekizai_tags %}{% show_placeholder slot page 'en' 1 %}{% render_block 'js' %}"
with self.assertNumQueries(4):
output = self.render_template_obj(template, {'page': page, 'slot': placeholder.slot}, request)
self.assertIn('<b>Test</b>', output)
with self.assertNumQueries(0):
output = self.render_template_obj(template, {'page': page, 'slot': placeholder.slot}, request)
self.assertIn('<b>Test</b>', output)
def test_cached_show_placeholder_preview(self):
from django.core.cache import cache
cache.clear()
page = create_page('Test', 'col_two.html', 'en', published=True)
placeholder = page.placeholders.all()[0]
add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>')
request = RequestFactory().get('/')
user = self._create_user("admin", True, True)
request.current_page = page.publisher_public
request.user = user
template = "{% load cms_tags %}{% show_placeholder slot page 'en' 1 %}"
with self.assertNumQueries(4):
output = self.render_template_obj(template, {'page': page, 'slot': placeholder.slot}, request)
self.assertIn('<b>Test</b>', output)
add_plugin(placeholder, TextPlugin, 'en', body='<b>Test2</b>')
request = RequestFactory().get('/?preview')
request.current_page = page
request.user = user
with self.assertNumQueries(4):
output = self.render_template_obj(template, {'page': page, 'slot': placeholder.slot}, request)
self.assertIn('<b>Test2</b>', output)
def test_render_plugin(self):
from django.core.cache import cache
cache.clear()
page = create_page('Test', 'col_two.html', 'en', published=True)
placeholder = page.placeholders.all()[0]
plugin = add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>')
template = "{% load cms_tags %}{% render_plugin plugin %}"
request = RequestFactory().get('/')
user = self._create_user("admin", True, True)
request.user = user
request.current_page = page
request.session = {}
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(0):
output = self.render_template_obj(template, {'plugin': plugin}, request)
self.assertIn('<b>Test</b>', output)
def test_render_plugin_no_context(self):
placeholder = Placeholder.objects.create(slot='test')
plugin = add_plugin(placeholder, TextPlugin, 'en', body='Test')
parser = DummyParser()
tokens = DummyTokens(plugin)
tag = RenderPlugin(parser, tokens)
superuser = self.get_superuser()
request = RequestFactory().get('/')
request.current_page = None
request.user = superuser
request.session = {}
request.toolbar = CMSToolbar(request)
request.toolbar.edit_mode = True
context = SekizaiContext({
'request': request
})
output = tag.render(context)
self.assertEqual(
output,
'<div class="cms-plugin cms-plugin-{0}">Test</div>'.format(
plugin.pk
)
)
def test_render_placeholder_with_no_page(self):
page = create_page('Test', 'col_two.html', 'en', published=True)
template = "{% load cms_tags %}{% placeholder test or %}< --- empty --->{% endplaceholder %}"
request = RequestFactory().get('/asdadsaasd/')
user = self.get_superuser()
request.user = user
request.current_page = page
request.session = {}
request.toolbar = CMSToolbar(request)
request.toolbar.edit_mode = True
request.toolbar.is_staff = True
with self.assertNumQueries(4):
self.render_template_obj(template, {}, request)
def test_render_placeholder_as_var(self):
page = create_page('Test', 'col_two.html', 'en', published=True)
template = "{% load cms_tags %}{% placeholder test or %}< --- empty --->{% endplaceholder %}"
request = RequestFactory().get('/asdadsaasd/')
user = self.get_superuser()
request.user = user
request.current_page = page
request.session = {}
request.toolbar = CMSToolbar(request)
request.toolbar.edit_mode = True
request.toolbar.is_staff = True
with self.assertNumQueries(4):
self.render_template_obj(template, {}, request)
def test_render_model_add(self):
from django.core.cache import cache
from cms.test_utils.project.sampleapp.models import Category
cache.clear()
page = create_page('Test', 'col_two.html', 'en', published=True)
template = "{% load cms_tags %}{% render_model_add category %}"
user = self._create_user("admin", True, True)
request = RequestFactory().get('/')
request.user = user
request.current_page = page
request.session = {}
request.toolbar = CMSToolbar(request)
request.toolbar.edit_mode = True
request.toolbar.is_staff = True
with self.assertNumQueries(0):
output = self.render_template_obj(template, {'category': Category()}, request)
expected = 'cms-plugin cms-plugin-sampleapp-category-add-0 cms-render-model-add'
self.assertIn(expected, output)
# Now test that it does NOT render when not in edit mode
request = RequestFactory().get('/')
request.user = user
request.current_page = page
request.session = {}
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(0):
output = self.render_template_obj(template, {'category': Category()}, request)
expected = ''
self.assertEqual(expected, output)
def test_render_model_add_block(self):
from django.core.cache import cache
from cms.test_utils.project.sampleapp.models import Category
cache.clear()
page = create_page('Test', 'col_two.html', 'en', published=True)
template = "{% load cms_tags %}{% render_model_add_block category %}wrapped{% endrender_model_add_block %}"
user = self._create_user("admin", True, True)
request = RequestFactory().get('/')
request.user = user
request.current_page = page
request.session = {}
request.toolbar = CMSToolbar(request)
request.toolbar.edit_mode = True
request.toolbar.is_staff = True
with self.assertNumQueries(0):
output = self.render_template_obj(template, {'category': Category()}, request)
expected = 'cms-plugin cms-plugin-sampleapp-category-add-0 '
'cms-render-model-add'
self.assertIn(expected, output)
# Now test that it does NOT render when not in edit mode
request = RequestFactory().get('/')
request.user = user
request.current_page = page
request.session = {}
request.toolbar = CMSToolbar(request)
with self.assertNumQueries(0):
output = self.render_template_obj(template, {'category': Category()}, request)
expected = 'wrapped'
self.assertEqual(expected, output)
| []
| []
| []
| [] | [] | python | null | null | null |
test/e2e/e2e_suite_test.go | /*
Copyright The KubeDB Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e_test
import (
"flag"
"os"
"path/filepath"
"testing"
"time"
api "kubedb.dev/apimachinery/apis/kubedb/v1alpha1"
cs "kubedb.dev/apimachinery/client/clientset/versioned"
"kubedb.dev/apimachinery/client/clientset/versioned/scheme"
"kubedb.dev/redis/test/e2e/framework"
"github.com/appscode/go/log"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/reporters"
. "github.com/onsi/gomega"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes"
clientSetScheme "k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/client-go/util/homedir"
ka "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
"kmodules.xyz/client-go/logs"
"kmodules.xyz/client-go/tools/clientcmd"
appcat_cs "kmodules.xyz/custom-resources/client/clientset/versioned/typed/appcatalog/v1alpha1"
)
// To Run E2E tests:
//
// 1. ./hack/make.py test e2e
//
// 2. ./hack/make.py test e2e --v=1 --docker-registry=kubedbci --db-catalog=5.0 --db-version=5.0 --selfhosted-operator=true
type clusterVar struct {
f *framework.Invocation
redis *api.Redis
}
var (
storageClass = "standard"
kubeconfigPath = func() string {
kubecfg := os.Getenv("KUBECONFIG")
if kubecfg != "" {
return kubecfg
}
return filepath.Join(homedir.HomeDir(), ".kube", "config")
}()
kubeContext = ""
)
func init() {
utilruntime.Must(scheme.AddToScheme(clientSetScheme.Scheme))
flag.StringVar(&kubeconfigPath, "kubeconfig", kubeconfigPath, "Path to kubeconfig file with authorization information (the master location is set by the master flag).")
flag.StringVar(&kubeContext, "kube-context", "", "Name of kube context")
flag.StringVar(&storageClass, "storageclass", storageClass, "Kubernetes StorageClass name")
flag.StringVar(&framework.DockerRegistry, "docker-registry", framework.DockerRegistry, "User provided docker repository")
flag.StringVar(&framework.DBCatalogName, "db-catalog", framework.DBCatalogName, "Postgres version")
flag.BoolVar(&framework.Cluster, "cluster", framework.Cluster, "Enable cluster tests")
}
const (
TIMEOUT = 20 * time.Minute
)
var (
root *framework.Framework
cl clusterVar
)
func TestE2e(t *testing.T) {
logs.InitLogs()
defer logs.FlushLogs()
RegisterFailHandler(Fail)
SetDefaultEventuallyTimeout(TIMEOUT)
junitReporter := reporters.NewJUnitReporter("junit.xml")
RunSpecsWithDefaultAndCustomReporters(t, "e2e Suite", []Reporter{junitReporter})
}
var _ = BeforeSuite(func() {
By("Using kubeconfig from " + kubeconfigPath)
config, err := clientcmd.BuildConfigFromContext(kubeconfigPath, kubeContext)
Expect(err).NotTo(HaveOccurred())
// raise throttling time. ref: https://github.com/appscode/voyager/issues/640
config.Burst = 100
config.QPS = 100
// Clients
kubeClient := kubernetes.NewForConfigOrDie(config)
extClient := cs.NewForConfigOrDie(config)
kaClient := ka.NewForConfigOrDie(config)
appCatalogClient, err := appcat_cs.NewForConfig(config)
if err != nil {
log.Fatalln(err)
}
// Framework
root = framework.New(config, kubeClient, extClient, kaClient, appCatalogClient, storageClass)
// Create namespace
By("Using namespace " + root.Namespace())
err = root.CreateNamespace()
Expect(err).NotTo(HaveOccurred())
root.EventuallyCRD().Should(Succeed())
if framework.Cluster {
cl = clusterVar{}
cl.f = root.Invoke()
cl.redis = cl.f.RedisCluster()
createAndWaitForRunning()
}
})
var _ = AfterSuite(func() {
if framework.Cluster {
deleteTestResource()
}
By("Delete left over Redis objects")
root.CleanRedis()
By("Delete Namespace")
err := root.DeleteNamespace()
Expect(err).NotTo(HaveOccurred())
})
| [
"\"KUBECONFIG\""
]
| []
| [
"KUBECONFIG"
]
| [] | ["KUBECONFIG"] | go | 1 | 0 | |
webapp/micro/api.py | import requests
import os
import pickle
from flask import current_app
from micro import db, cache, cache_timeout
from micro.models import GeoIP
# Collect HTTP_X_FORWARDED_FOR if exist as it will contain real client IP
# instead of proxy or router. If not exist, collect REMOTE_ADDR as
# a backup plan.
def GetUserIP(req):
if req.environ.get('HTTP_X_FORWARDED_FOR') is None:
return req.environ['REMOTE_ADDR']
else:
return req.environ['HTTP_X_FORWARDED_FOR']
# API call to collect geolocation information
# based on client IP address
def Geolocation_ApiCall(ip):
url = 'https://api.ipdata.co/'+ip
headers = {'Accept': 'application/json'}
key = os.getenv('GEOLOC_KEY')
payload = {'api-key': key}
r = requests.get(url, headers=headers, params=payload)
if r.status_code == 200:
try:
return r.json()
except ValueError:
return {'ip': ip, 'error': 'No Geolocation data could be decoded'}
#
def Geolocation(ip):
geoip = GeoIP.query.filter_by(ip=ip).first()
if geoip is not None:
update_threat(ip)
return geoip
else:
geo = Geolocation_ApiCall(ip)
if geo is not None:
threat = calculate_threat(geo['threat'])
g = GeoIP(ip=ip,
city=geo['city'],
region=geo['region'],
country_name=geo['country_name'],
country_code=geo['country_code'],
continent=geo['continent_name'],
latitude=geo['latitude'],
longitude=geo['longitude'],
postal=geo['postal'],
flag=geo['flag'],
currency_name=geo['currency']['name'],
currency_code=geo['currency']['code'],
threat=threat)
db.session.add(g)
db.session.commit()
def calculate_threat(threat):
r = False
for _, v in threat.items():
if v:
r = True
break
return r
def update_threat(ip):
geo = Geolocation_ApiCall(ip)
g = GeoIP.query.filter_by(ip=ip).first()
g.threat = calculate_threat(geo['threat'])
db.session.commit()
def Currency_Change_Rate(user_currency):
r_key = f'currency:{user_currency}'
cached = cache.get(r_key)
if cached:
current_app.logger.info('currency is cached')
return pickle.loads(cached)
url = 'https://free.currencyconverterapi.com/api/v6/convert'
headers = {'Accept': 'application/json'}
payload = {}
key = os.getenv('CURRENCY_KEY')
s = f'{user_currency}_USD,{user_currency}_EUR,{user_currency}_JPY,{user_currency}_CAD'
payload = {'q': s, 'compact': 'ultra', 'apiKey': key}
r = requests.get(url, headers=headers, params=payload)
if r.status_code == 200:
try:
j = r.json()
d = {'USD': j[f'{user_currency}_USD'],
'EUR': j[f'{user_currency}_EUR'],
'JPY': j[f'{user_currency}_JPY'],
'CAD': j[f'{user_currency}_CAD']}
cache.setex(name=r_key,
time=cache_timeout,
value=pickle.dumps(d))
return d
except ValueError:
return {'error': 'No Currency data could be decoded'}
def GetWeather(data):
r_key = f'weather:{data.ip}'
cached = cache.get(r_key)
if cached:
return pickle.loads(cached)
weather_key = os.getenv('WEATHER_KEY')
url = 'http://api.openweathermap.org/data/2.5/weather'
headers = {'Accept': 'application/json'}
payload = {'q': f'{data.city},{data.country_code}',
'appid': 'f300045f7fc5531aceac891d85661b98'}
r = requests.get(url, headers=headers, params=payload)
if r.status_code == 200:
try:
t = r.json()
weather = {'cityid': t['id'], 'key': weather_key}
cache.setex(name=r_key,
time=cache_timeout,
value=pickle.dumps(weather))
return weather
except ValueError:
return {'error': 'No Currency data could be decoded'}
elif r.status_code == 404:
payload = {'zip': f'{data.postal},{data.country_code}',
'appid': weather_key}
r = requests.get(url, headers=headers, params=payload)
if r.status_code == 200:
try:
t = r.json()
weather = {'cityid': t['id'], 'key': weather_key}
cache.setex(name=r_key,
time=cache_timeout,
value=pickle.dumps(weather))
return weather
except ValueError:
return {'error': 'No Currency data could be decoded'}
elif r.status_code == 404:
payload = {'lat': int(data.latitude), 'lon': int(data.longitude),
'appid': weather_key}
r = requests.get(url, headers=headers, params=payload)
if r.status_code == 200:
try:
t = r.json()
weather = {'cityid': t['id'], 'key': weather_key}
cache.setex(name=r_key,
time=cache_timeout,
value=pickle.dumps(weather))
return weather
except ValueError:
return {'error': 'No Currency data could be decoded'}
| []
| []
| [
"WEATHER_KEY",
"GEOLOC_KEY",
"CURRENCY_KEY"
]
| [] | ["WEATHER_KEY", "GEOLOC_KEY", "CURRENCY_KEY"] | python | 3 | 0 | |
services/geom2pickle/recQueryFuncReturn.py | # generell imports
import os
import time
import requests
import urllib.request
import json
import string
import random
# pythonOCC imports
import OCC
#FIXME OCC imports are neccessary fix this !
from OCC import VERSION as OCC_VERSION
from OCC.Core.Standard import Standard_Transient
from OCC.Core import gp
from OCC.Core.gp import gp_Vec, gp_Trsf, gp_Dir, gp_Pnt, gp_Ax2
from OCC.Core.Visualization import Tesselator
from OCC.Extend.TopologyUtils import is_edge, is_wire, discretize_edge, discretize_wire
from multiprocessing import Process, Value
from OCC.Core.BRepPrimAPI import (
BRepPrimAPI_MakeBox,
BRepPrimAPI_MakeTorus,
BRepPrimAPI_MakeCylinder,
)
from OCC.Core.BRepBuilderAPI import BRepBuilderAPI_Transform
from OCC.Core.BRepAlgoAPI import BRepAlgoAPI_Cut
import jsonpickle
# from app import LC_ADDR_fuseki_app
LC_ADDR_fuseki_app = os.environ["LC_ADDR_fuseki_app"]
# LC_ADDR_fuseki_app ="http://localhost:22631"
proxies = {"http": None, "https": None}
def give_me_new_ID():
return "_%s" % "".join(random.choice(string.ascii_letters) for _ in range(8))
def dict_generator(indict, pre=None):
pre = pre[:] if pre else []
if isinstance(indict, dict):
for key, value in indict.items():
if isinstance(value, dict):
for d in dict_generator(value, [key] + pre):
yield d
elif isinstance(value, list) or isinstance(value, tuple):
for v in value:
for d in dict_generator(v, [key] + pre):
yield d
else:
yield pre + [key, value]
else:
yield indict
def sparqlSelect(PrefixList, SelectStr, FindStr):
s = time.time()
str1 = ""
for i1 in PrefixList:
str1 += "PREFIX " + i1
qer_string = """ %s """ % str1 + """ SELECT %s WHERE { GRAPH ?g { %s }} """ % (
SelectStr,
FindStr,
)
# print(f"qer_string: {qer_string}")
headers = {"content-type": "application/json", "encoding": "UTF-8"}
url = LC_ADDR_fuseki_app + "/fuseki/sparql"
payload = {"SparqlString": qer_string}
r = requests.post(url, data=json.dumps(payload), headers=headers, proxies=proxies)
jsonResp = r.json()
return jsonResp
def geomQuery(position):
# print(f"position: {position}")
PrefixList = [
"omg: <https://w3id.org/omg#> ",
"rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> ",
]
SelectStr = "?CompGeomDesc ?CompGeomDescType "
FindStr = (
"""<%s> omg:hasComplexGeometryDescription ?CompGeomDesc .
?CompGeomDesc rdf:type ?CompGeomDescType . """
% position
)
qres = sparqlSelect(PrefixList, SelectStr, FindStr)
qresResBin = qres["results"]["bindings"]
if len(qresResBin) != 0:
# find OCC class and className
occName = qresResBin[0]["CompGeomDesc"]["value"]
ResOCC_className = qresResBin[0]["CompGeomDescType"]["value"]
OCC_classname = ResOCC_className.split("#")[-1]
OCC_module = OCC_classname.split("_")[0]
# print(OCC_classname, OCC_module)
occClass = getattr(getattr(OCC.Core, OCC_module), OCC_classname)
# print(occClass)
cparamstrlist = paramQuery(occName)
# print(cparamstrlist)
#TODO find better way to convert numbers into floats and keep Classes
for i in range(len(cparamstrlist)):
try:
cparamstrlist[i] = float(cparamstrlist[i])
except:
pass
objres = occClass(*cparamstrlist)
PrefixList = [
"oop: <https://projekt-scope.de/ontologies/oop#> ",
"rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>",
]
SelectStr = "?Method ?oneMethod "
FindStr = (
"""<%s> oop:hasMethod ?Method .
OPTIONAL { ?Method oop:hasListContent ?o .
?o rdf:rest*/rdf:first ?oneMethod . }"""
% occName
)
qresResBin3 = []
qres = sparqlSelect(PrefixList, SelectStr, FindStr)
qresResBin3 = qres["results"]["bindings"]
methods = []
if len(qresResBin3) != 0:
if len(qresResBin3) > 1:
for entry in qresResBin3:
if entry["oneMethod"]["type"] == "uri":
methods.append(entry["oneMethod"]["value"])
else:
pass
else:
methods.append(qresResBin3[0]["Method"]["value"])
for methodname in methods:
PrefixList = ["rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>"]
SelectStr = "?o "
FindStr = "<%s> rdf:type ?o . " % methodname
qres = sparqlSelect(PrefixList, SelectStr, FindStr)
qresResBin4 = qres["results"]["bindings"]
for row_qMN in qresResBin4:
methodtypestr = row_qMN["o"]["value"].split("#")[-1]
mparams = []
mparams = paramQuery(methodname)
if methodtypestr in ["Shape", "Edge", "Curve", "Wire", "Face"]:
objres = getattr(objres, methodtypestr)(*mparams)
else:
# print(objres, methodtypestr,*mparams)
getattr(objres, methodtypestr)(*mparams)
return objres
def paramQuery(position):
# find all parameters (doesn't matter if list or single)
PrefixList = ["oop: <https://projekt-scope.de/ontologies/oop#> "]
SelectStr = "?o1 ?o2"
FindStr = "<%s> oop:hasParameter ?o1 . OPTIONAL { ?o1 a ?o2 }" % position
qres = sparqlSelect(PrefixList, SelectStr, FindStr)
qresResBin = qres["results"]["bindings"]
paramstrlist = []
if len(qresResBin) != 0:
for row_qP in qresResBin:
if row_qP["o1"]["type"] == "uri":
if "o2" in row_qP and (row_qP["o2"]["value"].split("#")[-1] == "List"):
PrefixList = [
"oop: <https://projekt-scope.de/ontologies/oop#> ",
"rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>",
]
SelectStr = "?item ?type "
FindStr = (
"""<%s> oop:hasParameter ?o1 .
?o1 oop:hasListContent ?o2 .
?o2 rdf:rest*/rdf:first ?item .
BIND(DATATYPE(?item) AS ?type)"""
% position
)
qres = sparqlSelect(PrefixList, SelectStr, FindStr)
qresResBin1 = qres["results"]["bindings"]
for k, row_qPL in enumerate(qresResBin1):
if row_qPL["item"]["type"] == "uri":
paramname = row_qPL["item"]["value"]
paramstrlist.append(geomQuery(paramname))
else:
if (row_qPL["item"]["datatype"].split("#"))[-1] == "double":
paramstrlist.append(float(row_qPL["item"]["value"]))
elif (row_qPL["item"]["datatype"].split("#"))[
-1
] == "integer":
paramstrlist.append(int(row_qPL["item"]["value"]))
else:
paramstrlist.append(row_qPL["item"]["value"])
else:
paramname = row_qP["o1"]["value"]
paramstrlist.append(geomQuery(paramname))
elif row_qP["o"]["type"] == "str":
paramstrlist.append(col_qP) # FIXME undefinde varibale
else:
return ValueError("further coding required for this case")
return paramstrlist
def sparqlInsertList(insertList):
headers = {"content-type": "application/json", "encoding": "UTF-8"}
url = LC_ADDR_fuseki_app + "/fuseki/insert"
insStr = "INSERT DATA {"
for i in insertList:
insStr += (
"GRAPH <%s> { <%s> <https://w3id.org/omg#hasOccPickle> '%s' . } ."
% (i[0], i[1], i[2])
)
insStr += "}"
payload = {"SparqlString": """%s""" % insStr}
requests.post(url, data=json.dumps(payload), headers=headers, proxies=proxies)
def sparqlInsert(InsStr):
headers = {"content-type": "application/json", "encoding": "UTF-8"}
url = LC_ADDR_fuseki_app + "/fuseki/insert"
payload = {"SparqlString": """%s""" % InsStr}
r = requests.post(url, data=json.dumps(payload), headers=headers, proxies=proxies)
return r
def upload_handler(elem,ur):
objectOcc = geomQuery(elem)
res = jsonpickle.encode(objectOcc)
return ur, elem, str(res)
def pool_handler(ResObjList):
l_insert = []
ur = "http://" + give_me_new_ID()
for elem in ResObjList:
l_insert.append(upload_handler(elem,ur))
sparqlInsertList(l_insert)
| []
| []
| [
"LC_ADDR_fuseki_app"
]
| [] | ["LC_ADDR_fuseki_app"] | python | 1 | 0 | |
virtual/Scripts/f2py.py | #!h:\zsolt\python\grocerypricecompare\virtual\scripts\python.exe
# See http://cens.ioc.ee/projects/f2py2e/
from __future__ import division, print_function
import os
import sys
for mode in ["g3-numpy", "2e-numeric", "2e-numarray", "2e-numpy"]:
try:
i = sys.argv.index("--" + mode)
del sys.argv[i]
break
except ValueError:
pass
os.environ["NO_SCIPY_IMPORT"] = "f2py"
if mode == "g3-numpy":
sys.stderr.write("G3 f2py support is not implemented, yet.\\n")
sys.exit(1)
elif mode == "2e-numeric":
from f2py2e import main
elif mode == "2e-numarray":
sys.argv.append("-DNUMARRAY")
from f2py2e import main
elif mode == "2e-numpy":
from numpy.f2py import main
else:
sys.stderr.write("Unknown mode: " + repr(mode) + "\\n")
sys.exit(1)
main()
| []
| []
| [
"NO_SCIPY_IMPORT"
]
| [] | ["NO_SCIPY_IMPORT"] | python | 1 | 0 | |
mmf/utils/build.py | # Copyright (c) Facebook, Inc. and its affiliates.
import logging
import os
import warnings
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple, Union
import mmf
import pytorch_lightning as pl
import torch
from mmf.common.meter import Meter
from mmf.common.registry import registry
from mmf.datasets.iteration_strategies import (
ConstantIterationStrategy,
IterationStrategy,
SizeProportionalIterationStrategy,
)
from mmf.datasets.processors.processors import Processor
from mmf.utils.configuration import Configuration, get_global_config
from mmf.utils.distributed import is_dist_initialized, is_master, is_xla, synchronize
from mmf.utils.general import get_optimizer_parameters
from omegaconf import DictConfig, OmegaConf
try:
import torch_xla.core.xla_model as xm # noqa
import torch_xla.distributed.parallel_loader as xla_pl # noqa
except ImportError:
xm = None
ProcessorDict = Dict[str, Processor]
logger = logging.getLogger(__name__)
def build_config(configuration: Configuration, *args, **kwargs) -> DictConfig:
"""Builder function for config. Freezes the configuration and registers
configuration object and config DictConfig object to registry.
Args:
configuration (Configuration): Configuration object that will be
used to create the config.
Returns:
(DictConfig): A config which is of type omegaconf.DictConfig
"""
configuration.freeze()
config = configuration.get_config()
registry.register("config", config)
registry.register("configuration", configuration)
return config
def build_trainer(config: DictConfig) -> Any:
"""Builder function for creating a trainer class. Trainer class name
is picked from the config.
Args:
config (DictConfig): Configuration that will be used to create
the trainer.
Returns:
(BaseTrainer): A trainer instance
"""
trainer_type = config.training.trainer
trainer_cls = registry.get_trainer_class(trainer_type)
trainer_obj = trainer_cls(config)
return trainer_obj
def build_model(
config: Union[DictConfig, "mmf.models.base_model.BaseModel.Config"]
) -> "mmf.models.base_model.BaseModel":
from mmf.models.base_model import BaseModel
# If it is not an OmegaConf object, create the object
if not isinstance(config, DictConfig) and isinstance(config, BaseModel.Config):
config = OmegaConf.structured(config)
model_name = config.model
model_class = registry.get_model_class(model_name)
if model_class is None:
raise RuntimeError(f"No model registered for name: {model_name}")
model = model_class(config)
if hasattr(model, "build"):
"""Model build involves checkpoint loading
If the checkpoint is not available the underlying
methods try to download it.
Let master build the model (download the checkpoints) while
other ranks wait for the sync message
Once the master has downloaded the checkpoint and built the
model it sends the sync message, completing the synchronization
now other cores can proceed to build the model
using already downloaded checkpoint.
"""
if is_master():
model.load_requirements()
model.build()
synchronize()
else:
synchronize()
model.build()
model.init_losses()
return model
def build_dataset(
dataset_key: str, config=None, dataset_type="train"
) -> torch.utils.data.Dataset:
"""Builder function for creating a dataset. If dataset_key is passed
the dataset is created from default config of the dataset and thus is
disable config even if it is passed. Otherwise, we use MultiDatasetLoader to
build and return an instance of dataset based on the config
Args:
dataset_key (str): Key of dataset to build.
config (DictConfig, optional): Configuration that will be used to create
the dataset. If not passed, dataset's default config will be used.
Defaults to {}.
dataset_type (str, optional): Type of the dataset to build, train|val|test.
Defaults to "train".
Returns:
(torch.utils.data.Dataset): A dataset instance of type torch Dataset
"""
from mmf.datasets.base_dataset_builder import BaseDatasetBuilder
from mmf.utils.configuration import load_yaml_with_defaults
datamodule_instance = build_datamodule(dataset_key)
# If config is not provided, we take it from default one
if not config:
config_path = datamodule_instance.config_path()
if config_path is None:
# If config path wasn't defined, send an empty config path
# but don't force dataset to define a config
warnings.warn(
f"Config path not defined for {dataset_key}, "
+ "continuing with empty config"
)
config = OmegaConf.create()
else:
config = load_yaml_with_defaults(config_path)
config = OmegaConf.select(config, f"dataset_config.{dataset_key}")
if config is None:
config = OmegaConf.create()
OmegaConf.set_struct(config, True)
elif dataset_key in config:
# Handle Global config
config = config[dataset_key]
datamodule_instance.build_dataset(config)
dataset = datamodule_instance.load_dataset(config, dataset_type)
if hasattr(datamodule_instance, "update_registry_for_model"):
datamodule_instance.update_registry_for_model(config)
return dataset
# TODO: move dataset_type enum to typings
def build_datasets(
dataset_list: List[str], dataset_config: DictConfig, dataset_type="train"
) -> List[torch.utils.data.Dataset]:
datasets = []
for dataset in dataset_list:
if dataset in dataset_config:
dataset_config = dataset_config[dataset]
else:
warnings.warn(
f"Dataset {dataset} is missing from dataset_config"
+ " in config. Proceeding with empty config."
)
dataset_config = OmegaConf.create()
dataset_instance = build_dataset(dataset, dataset_config, dataset_type)
if dataset_instance is None:
continue
datasets.append(dataset_instance)
return datasets
def build_datamodule(dataset_key) -> pl.LightningDataModule:
dataset_builder = registry.get_builder_class(dataset_key)
assert dataset_builder, (
f"Key {dataset_key} doesn't have a registered " + "dataset builder"
)
builder_instance: pl.LightningDataModule = dataset_builder()
return builder_instance
def build_multiple_datamodules(
dataset_list: List[str], all_dataset_config: DictConfig
) -> Dict[str, pl.LightningDataModule]:
datamodules: Dict[str, pl.LightningDataModule] = {}
for dataset in dataset_list:
datamodule_instance = build_datamodule(dataset)
if dataset in all_dataset_config:
dataset_config = all_dataset_config[dataset]
else:
warnings.warn(
f"Dataset {dataset} is missing from dataset_config"
+ " in config. Proceeding with empty config."
)
dataset_config = OmegaConf.create()
if is_master():
datamodule_instance.prepare_data(dataset_config)
synchronize()
datamodule_instance.setup(config=dataset_config)
if hasattr(datamodule_instance, "update_registry_for_model"):
datamodule_instance.update_registry_for_model(dataset_config)
datamodules[dataset] = datamodule_instance
return datamodules
def build_dataloader_and_sampler(
dataset_instance: torch.utils.data.Dataset, datamodule_config: DictConfig
) -> Tuple[torch.utils.data.DataLoader, Optional[torch.utils.data.Sampler]]:
"""Builds and returns a dataloader along with its sample
Args:
dataset_instance (torch.utils.data.Dataset): Instance of dataset for which
dataloader has to be created
datamodule_config (omegaconf.DictConfig): Datamodule configuration; required
for infering params for dataloader
Returns:
Tuple[torch.utils.data.DataLoader, Optional[torch.utils.data.Sampler]]:
Tuple of Dataloader and Sampler instance
"""
from mmf.common.batch_collator import BatchCollator
training_config = get_global_config("training")
# Support params coming in from dataloader params
other_args = {
"num_workers": datamodule_config.get(
"num_workers", training_config.get("num_workers", 4)
),
"pin_memory": datamodule_config.get(
"pin_memory", training_config.get("pin_memory", False)
),
"shuffle": datamodule_config.get("shuffle", None),
"batch_size": datamodule_config.get("batch_size", None),
}
# IterableDataset returns batches directly, so no need to add Sampler
# or batch size as user is expected to control those. This is a fine
# assumption for now to not support single item based IterableDataset
# as it will add unnecessary complexity and config parameters
# to the codebase
if not isinstance(dataset_instance, torch.utils.data.IterableDataset):
other_args = _add_extra_args_for_dataloader(dataset_instance, other_args)
else:
other_args.pop("shuffle")
loader = torch.utils.data.DataLoader(
dataset=dataset_instance,
collate_fn=BatchCollator(
dataset_instance.dataset_name, dataset_instance.dataset_type
),
drop_last=is_xla(), # see also MultiDatasetLoader.__len__
**other_args,
)
if is_xla():
device = xm.xla_device()
loader = xla_pl.MpDeviceLoader(loader, device)
if other_args["num_workers"] >= 0:
# Suppress leaking semaphore warning
os.environ["PYTHONWARNINGS"] = "ignore:semaphore_tracker:UserWarning"
loader.dataset_type = dataset_instance.dataset_type
return loader, other_args.get("sampler", None)
def build_test_reporter(
datamodules: List[pl.LightningDataModule],
config: DictConfig = None,
dataset_type: str = "train",
):
test_reporter_key = "default"
if config:
test_reporter_key = config.get("type", "default")
test_reporter_class = registry.get_test_rerporter_class(test_reporter_key)
assert (
test_reporter_class
), f"Key {test_reporter_key} doesn't have a registered test_reporter class"
if not config:
warnings.warn(
f"Config not provided for {test_reporter_key}, test_reporter"
+ "continuing with empty config"
)
params_config = OmegaConf.create()
else:
params_config = config.params
return test_reporter_class(datamodules, params_config, dataset_type)
def _add_extra_args_for_dataloader(
dataset_instance: torch.utils.data.Dataset, other_args: Dict[str, Any] = None
) -> Dict[str, Any]:
from mmf.utils.general import get_batch_size
dataset_type = dataset_instance.dataset_type
if other_args["shuffle"] is None:
other_args["shuffle"] = False
if dataset_type != "test":
other_args["shuffle"] = True
# In distributed mode, we use DistributedSampler from PyTorch
if is_dist_initialized():
other_args["sampler"] = torch.utils.data.DistributedSampler(
dataset_instance, shuffle=other_args["shuffle"]
)
# Shuffle is mutually exclusive with sampler, let DistributedSampler
# take care of shuffle and pop from main args
other_args.pop("shuffle")
if is_xla():
other_args["sampler"] = torch.utils.data.DistributedSampler(
dataset_instance,
num_replicas=xm.xrt_world_size(),
rank=xm.get_ordinal(),
shuffle=other_args["shuffle"],
)
other_args.pop("shuffle")
if other_args["batch_size"] is None:
other_args["batch_size"] = get_batch_size()
return other_args
def build_optimizer(model, config):
optimizer_config = config.optimizer
if "type" not in optimizer_config:
raise ValueError(
"Optimizer attributes must have a 'type' key "
"specifying the type of optimizer. "
"(Custom or PyTorch, e.g. 'adam_w' or 'SGD')"
)
optimizer_type = optimizer_config.type
if "params" not in optimizer_config:
warnings.warn("optimizer attributes has no params defined, defaulting to {}.")
params = optimizer_config.get("params", {})
if hasattr(torch.optim, optimizer_type):
optimizer_class = getattr(torch.optim, optimizer_type)
else:
optimizer_class = registry.get_optimizer_class(optimizer_type)
if optimizer_class is None:
raise ValueError(
"No optimizer class of type {} present in "
"either torch or registered to registry"
)
parameters = get_optimizer_parameters(model, config)
if optimizer_config.get("enable_state_sharding", False):
# TODO(vedanuj): Remove once OSS is moved to PT upstream
try:
from fairscale.optim.oss import OSS
except ImportError:
print(
"Optimizer state sharding requires fairscale. "
+ "Install using pip install fairscale."
)
raise
assert (
is_dist_initialized()
), "Optimizer state sharding can only be used in distributed mode."
is_fp16 = config.get("training", {}).get("fp16", False)
optimizer = OSS(
params=parameters, optim=optimizer_class, broadcast_fp16=is_fp16, **params
)
else:
optimizer = optimizer_class(parameters, **params)
return optimizer
def build_lightning_optimizers(model, config):
optimizer = build_optimizer(model, config)
if config.training.lr_scheduler:
lr_scheduler = build_scheduler(optimizer, config)
return {
"optimizer": optimizer,
"lr_scheduler": {"scheduler": lr_scheduler, "interval": "step"},
}
else:
return optimizer
def build_scheduler(optimizer, config):
scheduler_config = config.get("scheduler", {})
if "type" not in scheduler_config:
warnings.warn(
"No type for scheduler specified even though lr_scheduler is True, "
"setting default to 'Pythia'"
)
scheduler_type = scheduler_config.get("type", "pythia")
if "params" not in scheduler_config:
warnings.warn("scheduler attributes has no params defined, defaulting to {}.")
params = scheduler_config.get("params", {})
scheduler_class = registry.get_scheduler_class(scheduler_type)
scheduler = scheduler_class(optimizer, **params)
return scheduler
def build_classifier_layer(config, *args, **kwargs):
from mmf.modules.layers import ClassifierLayer
classifier = ClassifierLayer(config.type, *args, **config.params, **kwargs)
return classifier.module
def build_text_encoder(config, *args, **kwargs):
"""Deprecated, please do not use"""
try:
from mmf.modules.fb.encoders import TextEncoderFactory
except ImportError:
from mmf.modules.encoders import TextEncoderFactory
text_encoder = TextEncoderFactory(config, *args, **kwargs)
return text_encoder.module
def build_image_encoder(config, direct_features=False, **kwargs):
"""Deprecated, please do not use"""
from mmf.modules.encoders import ImageEncoderFactory, ImageFeatureEncoderFactory
if direct_features:
module = ImageFeatureEncoderFactory(config)
else:
module = ImageEncoderFactory(config)
return module.module
def build_encoder(config: Union[DictConfig, "mmf.modules.encoders.Encoder.Config"]):
from mmf.modules.encoders import Encoder
# If it is not an OmegaConf object, create the object
if not isinstance(config, DictConfig) and isinstance(config, Encoder.Config):
config = OmegaConf.structured(config)
if "type" in config:
# Support config initialization in form of
# encoder:
# type: identity # noqa
# params:
# in_dim: 256
name = config.type
if isinstance(name, Enum):
name = name.value
params = config.get("params", None)
else:
# Structured Config support
name = config.name
params = config
encoder_cls = registry.get_encoder_class(name)
# If params were not passed, try generating them from encoder
# class's default config
if params is None:
params = OmegaConf.structured(getattr(encoder_cls, "Config", {}))
return encoder_cls(params)
def build_processors(
processors_config: DictConfig, registry_key: str = None, *args, **kwargs
) -> ProcessorDict:
"""Given a processor config, builds the processors present and returns back
a dict containing processors mapped to keys as per the config
Args:
processors_config (omegaconf.DictConfig): OmegaConf DictConfig describing
the parameters and type of each processor passed here
registry_key (str, optional): If passed, function would look into registry for
this particular key and return it back. .format with processor_key will
be called on this string. Defaults to None.
Returns:
ProcessorDict: Dictionary containing key to
processor mapping
"""
from mmf.datasets.processors.processors import Processor
processor_dict = {}
for processor_key, processor_params in processors_config.items():
if not processor_params:
continue
processor_instance = None
if registry_key is not None:
full_key = registry_key.format(processor_key)
processor_instance = registry.get(full_key, no_warning=True)
if processor_instance is None:
processor_instance = Processor(processor_params, *args, **kwargs)
# We don't register back here as in case of hub interface, we
# want the processors to be instantiate every time. BaseDataset
# can register at its own end
processor_dict[processor_key] = processor_instance
return processor_dict
def build_iteration_strategy(
config: DictConfig,
dataloaders: Dict[str, torch.utils.data.DataLoader],
*args,
**kwargs,
) -> IterationStrategy:
if not config.get("enabled", True):
return ConstantIterationStrategy.from_params(dataloaders, *args, **kwargs)
else:
assert (
"type" in config
), "multitasking config must define 'type' attribute if enabled"
# This assumes all dataloaders will have same dataset type
iteration_strategy_class = registry.get_iteration_strategy_class(config.type)
config = config.get("params", {})
dataset_type = dataloaders[list(dataloaders.keys())[0]].dataset.dataset_type
if dataset_type != "train":
logger.info(
f"{iteration_strategy_class.__name__} updated to size "
+ f"proportional for {dataset_type}"
)
return SizeProportionalIterationStrategy.from_params(
dataloaders, *args, **kwargs
)
else:
return iteration_strategy_class(config, dataloaders, *args, **kwargs)
def build_meters(run_type: str) -> List[Meter]:
train_meter, val_meter, test_meter = None, None, None
if "train" in run_type:
train_meter = Meter()
# val_meter used for validation after training loop
val_meter = Meter()
elif "val" in run_type or "inference" in run_type:
val_meter = Meter()
if "test" in run_type:
test_meter = Meter()
return train_meter, val_meter, test_meter
| []
| []
| [
"PYTHONWARNINGS"
]
| [] | ["PYTHONWARNINGS"] | python | 1 | 0 | |
src/tests/google/appengine/ext/go/__init__.py | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A bridge between dev_appserver.py and a Go app."""
import asyncore
import atexit
import datetime
import errno
import getpass
import logging
import os
import re
import shutil
import signal
import socket
import subprocess
import stat
import sys
import tempfile
import time
from google.appengine.ext.remote_api import handler
from google.appengine.ext.remote_api import remote_api_pb
from google.appengine.runtime import apiproxy_errors
from google.appengine.tools import dev_appserver
GAB_WORK_DIR = None
GO_APP = None
GO_APP_NAME = '_go_app'
RAPI_HANDLER = None
SOCKET_HTTP = os.path.join(tempfile.gettempdir(),
'dev_appserver_%s_socket_http')
SOCKET_API = os.path.join(tempfile.gettempdir(), 'dev_appserver_%s_socket_api')
HEALTH_CHECK_PATH = '/_appengine_delegate_health_check'
INTERNAL_SERVER_ERROR = ('Status: 500 Internal Server Error\r\n' +
'Content-Type: text/plain\r\n\r\nInternal Server Error')
MAX_START_TIME = 10
HEADER_MAP = {
'APPLICATION_ID': 'X-AppEngine-Inbound-AppId',
'CONTENT_TYPE': 'Content-Type',
'CURRENT_VERSION_ID': 'X-AppEngine-Inbound-Version-Id',
'REMOTE_ADDR': 'X-AppEngine-Remote-Addr',
'USER_EMAIL': 'X-AppEngine-Inbound-User-Email',
'USER_ID': 'X-AppEngine-Inbound-User-Id',
'USER_IS_ADMIN': 'X-AppEngine-Inbound-User-Is-Admin',
}
ENV_PASSTHROUGH = re.compile(r'^(BACKEND_PORT\..*|INSTANCE_ID)$')
APP_CONFIG = None
def gab_work_dir():
base = os.getenv('XDG_CACHE_HOME')
if not base:
if sys.platform == 'darwin':
base = os.path.join(os.getenv('HOME'), 'Library', 'Caches',
'com.google.GoAppEngine')
else:
base = os.path.join(os.getenv('HOME'), '.cache')
if os.path.islink(base):
try:
os.makedirs(os.path.realpath(base))
except OSError, e:
if e.errno != errno.EEXIST:
raise
return os.path.join(base, 'dev_appserver_%s_go_app_work_dir')
def cleanup():
try:
shutil.rmtree(GAB_WORK_DIR)
except:
pass
for fn in [SOCKET_HTTP, SOCKET_API]:
try:
os.remove(fn)
except:
pass
class DelegateClient(asyncore.dispatcher):
def __init__(self, http_req):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.connect(SOCKET_HTTP)
self.buffer = http_req
self.result = ''
self.closed = False
def handle_close(self):
self.close()
self.closed = True
def handle_connect(self):
pass
def handle_read(self):
self.result += self.recv(8192)
def handle_write(self):
sent = self.send(self.buffer)
self.buffer = self.buffer[sent:]
def writable(self):
return len(self.buffer) > 0
class DelegateServer(asyncore.dispatcher):
def __init__(self):
asyncore.dispatcher.__init__(self)
self.create_socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
os.remove(SOCKET_API)
except OSError:
pass
self.bind(SOCKET_API)
self.listen(5)
def handle_accept(self):
pair = self.accept()
if not pair:
return
sock, addr = pair
RemoteAPIHandler(sock)
def writable(self):
return False
class RemoteAPIHandler(asyncore.dispatcher_with_send):
def __init__(self, sock):
asyncore.dispatcher_with_send.__init__(self, sock)
self.n = -1
self.data = ''
def handle_read(self):
self.data += self.recv(8192)
if self.n == -1:
i = self.data.find('\n')
if i == -1:
return
try:
self.n = int(self.data[:i])
except:
self.n = -2
if self.n < 0:
self.n = -2
self.data = ''
return
self.data = self.data[i+1:]
elif self.n == -2:
self.data = ''
return
if len(self.data) < self.n:
return
req = remote_api_pb.Request()
req.ParseFromString(self.data[:self.n])
self.data, self.n = self.data[self.n:], -1
rapi_result = None
rapi_error = 'unknown error'
try:
rapi_result = RAPI_HANDLER.ExecuteRequest(req)
except apiproxy_errors.CallNotFoundError, e:
service_name = req.service_name()
method = req.method()
rapi_error = 'call not found for %s/%s' % (service_name, method)
except Exception, e:
rapi_error = str(e)
res = remote_api_pb.Response()
if rapi_result:
res.set_response(rapi_result.Encode())
else:
ae = res.mutable_application_error()
ae.set_code(1)
ae.set_detail(rapi_error)
res1 = res.Encode()
self.send('%d\n' % len(res1))
self.send(res1)
def find_app_files(basedir):
if not basedir.endswith(os.path.sep):
basedir = basedir + os.path.sep
files, dirs = {}, [basedir]
while dirs:
dname = dirs.pop()
for entry in os.listdir(dname):
ename = os.path.join(dname, entry)
if APP_CONFIG.skip_files.match(ename):
continue
try:
s = os.stat(ename)
except OSError, e:
logging.warn('%s', e)
continue
if stat.S_ISDIR(s[stat.ST_MODE]):
dirs.append(ename)
continue
files[ename[len(basedir):]] = s[stat.ST_MTIME]
return files
def find_go_files_mtime(app_files):
files, mtime = [], 0
for f, mt in app_files.items():
if not f.endswith('.go'):
continue
if APP_CONFIG.nobuild_files.match(f):
continue
files.append(f)
mtime = max(mtime, mt)
return files, mtime
def wait_until_go_app_ready(pid):
deadline = (datetime.datetime.now() +
datetime.timedelta(seconds=MAX_START_TIME))
while datetime.datetime.now() < deadline:
try:
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
s.connect(SOCKET_HTTP)
s.send('HEAD %s HTTP/1.0\r\n\r\n' % HEALTH_CHECK_PATH)
s.close()
return
except:
time.sleep(0.1)
os.kill(pid, signal.SIGTERM)
raise Exception('unable to start ' + GO_APP_NAME)
def up(path, n):
"""Return the nth parent directory of the given path."""
for _ in range(n):
path = os.path.dirname(path)
return path
class GoApp:
def __init__(self, root_path):
self.root_path = root_path
self.proc = None
self.proc_start = 0
self.goroot = os.path.join(
up(__file__, 5),
"goroot")
if not os.path.isdir(self.goroot):
raise Exception('no goroot found at ' + self.goroot)
for bin in os.listdir(os.path.join(self.goroot, 'bin')):
if len(bin) == 2 and bin[1] == 'g':
self.arch = bin[0]
break
if not self.arch:
raise Exception('bad goroot: no compiler found')
atexit.register(self.cleanup)
def cleanup(self):
if self.proc:
os.kill(self.proc.pid, signal.SIGTERM)
def make_and_run(self, env):
app_files = find_app_files(self.root_path)
go_files, go_mtime = find_go_files_mtime(app_files)
if not go_files:
raise Exception('no .go files in %s', self.root_path)
app_mtime = max(app_files.values())
bin_name, bin_mtime = os.path.join(GAB_WORK_DIR, GO_APP_NAME), 0
try:
bin_mtime = os.stat(bin_name)[stat.ST_MTIME]
except:
pass
rebuild, restart = False, False
if go_mtime >= bin_mtime:
rebuild, restart = True, True
elif app_mtime > self.proc_start:
restart = True
if restart and self.proc:
os.kill(self.proc.pid, signal.SIGTERM)
self.proc.wait()
self.proc = None
if rebuild:
self.build(go_files)
if not self.proc or self.proc.poll() is not None:
logging.info('running ' + GO_APP_NAME)
limited_env = {
'PWD': self.root_path,
'TZ': 'UTC',
}
for k, v in env.items():
if ENV_PASSTHROUGH.match(k):
limited_env[k] = v
self.proc_start = app_mtime
self.proc = subprocess.Popen([bin_name,
'-addr_http', 'unix:' + SOCKET_HTTP,
'-addr_api', 'unix:' + SOCKET_API],
cwd=self.root_path, env=limited_env)
wait_until_go_app_ready(self.proc.pid)
def build(self, go_files):
logging.info('building ' + GO_APP_NAME)
if not os.path.exists(GAB_WORK_DIR):
os.makedirs(GAB_WORK_DIR)
gab_argv = [
os.path.join(self.goroot, 'bin', 'go-app-builder'),
'-app_base', self.root_path,
'-arch', self.arch,
'-binary_name', GO_APP_NAME,
'-dynamic',
'-goroot', self.goroot,
'-unsafe',
'-work_dir', GAB_WORK_DIR] + go_files
try:
p = subprocess.Popen(gab_argv, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, env={})
gab_retcode = p.wait()
except Exception, e:
raise Exception('cannot call go-app-builder', e)
if gab_retcode != 0:
raise dev_appserver.CompileError(p.stdout.read() + '\n' + p.stderr.read())
def execute_go_cgi(root_path, handler_path, cgi_path, env, infile, outfile):
global RAPI_HANDLER, GAB_WORK_DIR, SOCKET_HTTP, SOCKET_API, GO_APP
if not RAPI_HANDLER:
user_port = '%s_%s' % (getpass.getuser(), env['SERVER_PORT'])
GAB_WORK_DIR = gab_work_dir() % user_port
SOCKET_HTTP = SOCKET_HTTP % user_port
SOCKET_API = SOCKET_API % user_port
atexit.register(cleanup)
DelegateServer()
RAPI_HANDLER = handler.ApiCallHandler()
GO_APP = GoApp(root_path)
GO_APP.make_and_run(env)
request_method = env['REQUEST_METHOD']
server_protocol = env['SERVER_PROTOCOL']
request_uri = env['PATH_INFO']
if env.get('QUERY_STRING'):
request_uri += '?' + env['QUERY_STRING']
content = infile.getvalue()
headers = []
for k, v in env.items():
if k in HEADER_MAP:
headers.append('%s: %s' % (HEADER_MAP[k], v))
elif k.startswith('HTTP_'):
hk = k[5:].replace("_", "-")
if hk.title() == 'Connection':
continue
headers.append('%s: %s' % (hk, v))
headers.append('Content-Length: %d' % len(content))
headers.append('Connection: close')
http_req = (request_method + ' ' + request_uri + ' ' + server_protocol +
'\r\n' + '\r\n'.join(headers) + '\r\n\r\n' + content)
old_env = os.environ.copy()
try:
os.environ.clear()
os.environ.update(env)
x = DelegateClient(http_req)
while not x.closed:
asyncore.loop(30.0, False, None, 1)
res = x.result
finally:
os.environ.clear()
os.environ.update(old_env)
if res.startswith('HTTP/1.0 ') or res.startswith('HTTP/1.1 '):
res = 'Status:' + res[8:]
else:
res = INTERNAL_SERVER_ERROR
outfile.write(res)
| []
| []
| [
"HOME",
"XDG_CACHE_HOME"
]
| [] | ["HOME", "XDG_CACHE_HOME"] | python | 2 | 0 | |
main.go | package main
import (
"log"
"net/http"
"os"
"strings"
"time"
"github.com/cloudflare/cloudflare-go"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
func main() {
addr := os.Getenv("EXPORTER_LISTEN_ADDR")
if addr == "" {
addr = ":9299"
}
apiEmail := os.Getenv("CLOUDFLARE_API_EMAIL")
apiKey := os.Getenv("CLOUDFLARE_API_KEY")
apiToken := os.Getenv("CLOUDFLARE_API_TOKEN")
apiUserServiceKey := os.Getenv("CLOUDFLARE_API_USER_SERVICE_KEY")
zoneNames := os.Getenv("CLOUDFLARE_ZONE_NAMES")
numAuthSettings := 0
for _, v := range []string{apiToken, apiKey, apiUserServiceKey} {
if v != "" {
numAuthSettings++
}
}
if numAuthSettings != 1 {
log.Fatal("Must specify exactly one of CLOUDFLARE_API_TOKEN, CLOUDFLARE_API_KEY or CLOUDFLARE_API_USER_SERVICE_KEY.")
}
if apiKey != "" && apiEmail == "" {
log.Fatal("CLOUDFLARE_API_KEY specified without CLOUDFLARE_API_EMAIL. Both must be provided.")
}
if zoneNames == "" {
log.Fatal("A comma-separated list of zone names must be specified in CLOUDFLARE_ZONE_NAMES")
}
var cfapi *cloudflare.API
var lpapi *logpullAPI
var err error
if apiToken != "" {
cfapi, err = cloudflare.NewWithAPIToken(apiToken)
lpapi = newLogpullAPIWithToken(apiToken)
} else if apiKey != "" {
cfapi, err = cloudflare.New(apiKey, apiEmail)
lpapi = newLogpullAPI(apiKey, apiEmail)
} else {
cfapi, err = cloudflare.NewWithUserServiceKey(apiUserServiceKey)
lpapi = newLogpullAPIWithUserServiceKey(apiUserServiceKey)
}
if err != nil {
log.Fatalf("creating cfapi client: %s", err)
}
zoneIDs := make([]string, 0)
for _, zoneName := range strings.Split(zoneNames, ",") {
id, err := cfapi.ZoneIDByName(strings.TrimSpace(zoneName))
if err != nil {
log.Fatalf("zone id lookup: %s", err)
}
zoneIDs = append(zoneIDs, id)
}
collectorErrorHandler := func(err error) {
log.Printf("collector: %s", err)
}
collector, err := newCollector(lpapi, zoneIDs, time.Minute, collectorErrorHandler)
if err != nil {
log.Fatalf("creating collector: %s", err)
}
prometheus.MustRegister(collector)
http.Handle("/metrics", promhttp.Handler())
log.Printf("Listening on %s", addr)
log.Fatal(http.ListenAndServe(addr, nil))
}
| [
"\"EXPORTER_LISTEN_ADDR\"",
"\"CLOUDFLARE_API_EMAIL\"",
"\"CLOUDFLARE_API_KEY\"",
"\"CLOUDFLARE_API_TOKEN\"",
"\"CLOUDFLARE_API_USER_SERVICE_KEY\"",
"\"CLOUDFLARE_ZONE_NAMES\""
]
| []
| [
"CLOUDFLARE_API_KEY",
"CLOUDFLARE_API_USER_SERVICE_KEY",
"CLOUDFLARE_ZONE_NAMES",
"EXPORTER_LISTEN_ADDR",
"CLOUDFLARE_API_TOKEN",
"CLOUDFLARE_API_EMAIL"
]
| [] | ["CLOUDFLARE_API_KEY", "CLOUDFLARE_API_USER_SERVICE_KEY", "CLOUDFLARE_ZONE_NAMES", "EXPORTER_LISTEN_ADDR", "CLOUDFLARE_API_TOKEN", "CLOUDFLARE_API_EMAIL"] | go | 6 | 0 | |
experiments/CONFIGURE_ME.py | import datetime
import os
import pathlib
# Put the path to your UCR archive here (best used absolute path to avoid surprises)
# On linux, something like:
# UCR_ARCHIVE_PATH = "/home/user/Univariate_ts"
# !!! HERE !!!
UCR_ARCHIVE_PATH = ""
# Alternatively, you can set an environment variable named "UCR_ARCHIVE_PATH"
# On linux, in your shell configuration file (.bashrc, .zshrc,...) or .profile
# --- --- --- Tooling
def get_ucr_folder():
global UCR_ARCHIVE_PATH
# If empty, check the environment variable
if UCR_ARCHIVE_PATH == "":
try:
UCR_ARCHIVE_PATH = os.environ["UCR_ARCHIVE_PATH"]
except KeyError:
print("It looks like 'UCR_ARCHIVE_PATH' is not set")
print(f"Have a look at {pathlib.Path(__file__).absolute()}")
exit(1)
# We should have a path now. Check it
folder = pathlib.Path(UCR_ARCHIVE_PATH).absolute()
if not (folder.exists() and folder.is_dir()):
print("I could not find your UCR archive folder:")
print(" --> " + str(folder))
exit(1)
return folder
# Tooling for our scripts
def get_timestemp():
return datetime.datetime.now().strftime("%Y-%m-%d-%Hh%Mm%Ss")
| []
| []
| [
"UCR_ARCHIVE_PATH"
]
| [] | ["UCR_ARCHIVE_PATH"] | python | 1 | 0 | |
HackerRank Solutions/Java/Divisible Sum Pairs.java | import java.io.*;
import java.math.*;
import java.security.*;
import java.text.*;
import java.util.*;
import java.util.concurrent.*;
import java.util.regex.*;
public class Solution {
// Complete the divisibleSumPairs function below.
static int divisibleSumPairs(int n, int k, int[] ar) {
int pairCount = 0;
for (int i = 0; i < ar.length - 1; i++) {
for (int j = i + 1; j < ar.length; j++) {
if ((ar[i] + ar[j]) % k == 0) {
pairCount++;
}
}
}
return pairCount;
}
private static final Scanner scanner = new Scanner(System.in);
public static void main(String[] args) throws IOException {
BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(System.getenv("OUTPUT_PATH")));
String[] nk = scanner.nextLine().split(" ");
int n = Integer.parseInt(nk[0]);
int k = Integer.parseInt(nk[1]);
int[] ar = new int[n];
String[] arItems = scanner.nextLine().split(" ");
scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?");
for (int i = 0; i < n; i++) {
int arItem = Integer.parseInt(arItems[i]);
ar[i] = arItem;
}
int result = divisibleSumPairs(n, k, ar);
bufferedWriter.write(String.valueOf(result));
bufferedWriter.newLine();
bufferedWriter.close();
scanner.close();
}
}
| [
"\"OUTPUT_PATH\""
]
| []
| [
"OUTPUT_PATH"
]
| [] | ["OUTPUT_PATH"] | java | 1 | 0 | |
thetastreak/thetastreak.py | #!/usr/bin/env python
import logging
import os
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
def wait_for_load(driver):
"""Wait for the page to be fully loaded."""
time.sleep(5)
WebDriverWait(driver=driver, timeout=10).until(
lambda x: x.execute_script(
"return document.readyState === 'complete'"
)
)
logging.basicConfig(level=logging.INFO)
TG_USERNAME = os.environ.get("TG_USERNAME")
TG_PASSWORD = os.environ.get("TG_PASSWORD")
chrome_options = Options()
chrome_options.add_argument("headless")
driver = webdriver.Chrome(options=chrome_options)
logging.info("Loading the site...")
driver.get("https://thetagang.com/login")
wait_for_load(driver)
# Clear the modal.
driver.refresh()
wait_for_load(driver)
elem = driver.find_element_by_xpath('//*[@id="Username"]')
elem.clear()
elem.send_keys(TG_USERNAME)
elem = driver.find_element_by_xpath('//*[@id="Password"]')
elem.clear()
elem.send_keys(TG_PASSWORD)
logging.info("Submitting login...")
elem.send_keys(Keys.ENTER)
wait_for_load(driver)
logging.info("Refresh the page...")
driver.refresh()
wait_for_load(driver)
premium_div = driver.find_element_by_xpath(
'//*[@id="root"]/div/div/div/div[3]/div[1]/div[1]/div[3]/div/div[4]/div[2]'
)
print(premium_div.text)
driver.close()
logging.info("Done! 🎉")
| []
| []
| [
"TG_PASSWORD",
"TG_USERNAME"
]
| [] | ["TG_PASSWORD", "TG_USERNAME"] | python | 2 | 0 | |
src/main/java/com/brandonmanson/SwaggerGeneratorApplication.java | package com.brandonmanson;
import io.keen.client.java.JavaKeenClientBuilder;
import io.keen.client.java.KeenClient;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
import org.springframework.context.annotation.Bean;
import org.springframework.core.task.TaskExecutor;
import org.springframework.scheduling.annotation.EnableAsync;
import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;
import org.apache.commons.dbcp.BasicDataSource;
import org.springframework.web.servlet.config.annotation.EnableWebMvc;
import javax.activation.DataSource;
import java.net.URI;
import java.net.URISyntaxException;
import java.sql.*;
@SpringBootApplication
@EnableAsync
@EnableWebMvc
public class SwaggerGeneratorApplication {
@Bean
public TaskExecutor taskExecutor() {
ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor();
executor.setCorePoolSize(5);
executor.setMaxPoolSize(10);
executor.setQueueCapacity(25);
return executor;
}
@Bean
public BasicDataSource dataSource() throws URISyntaxException, ClassNotFoundException {
URI dbUri = new URI(System.getenv("DATABASE_URL"));
String userName = dbUri.getUserInfo().split(":")[0];
String password = dbUri.getUserInfo().split(":")[1];
BasicDataSource dataSource = new BasicDataSource();
dataSource.setUsername(userName);
dataSource.setPassword(password);
String dbUrl = "jdbc:postgresql://"
+ dbUri.getHost()
+ ":"
+ dbUri.getPort()
+ dbUri.getPath()
+ "?sslmode=require&user="
+ dataSource.getUsername()
+ "&password="
+ dataSource.getPassword();
dataSource.setUrl(dbUrl);
dataSource.setDriverClassName("org.postgresql.Driver");
return dataSource;
}
@Bean
public KeenClient keenClient() {
KeenClient client = new JavaKeenClientBuilder().build();
KeenClient.initialize(client);
return client;
}
public static void main(String[] args) {
SpringApplication.run(SwaggerGeneratorApplication.class, args);
}
}
| [
"\"DATABASE_URL\""
]
| []
| [
"DATABASE_URL"
]
| [] | ["DATABASE_URL"] | java | 1 | 0 | |
install/official/onpremise/sentry.conf.py | # This file is just Python, with a touch of Django which means
# you can inherit and tweak settings to your hearts content.
# For Docker, the following environment variables are supported:
# SENTRY_POSTGRES_HOST
# SENTRY_POSTGRES_PORT
# SENTRY_DB_NAME
# SENTRY_DB_USER
# SENTRY_DB_PASSWORD
# SENTRY_RABBITMQ_HOST
# SENTRY_RABBITMQ_USERNAME
# SENTRY_RABBITMQ_PASSWORD
# SENTRY_RABBITMQ_VHOST
# SENTRY_REDIS_HOST
# SENTRY_REDIS_PASSWORD
# SENTRY_REDIS_PORT
# SENTRY_REDIS_DB
# SENTRY_MEMCACHED_HOST
# SENTRY_MEMCACHED_PORT
# SENTRY_FILESTORE_DIR
# SENTRY_SERVER_EMAIL
# SENTRY_EMAIL_HOST
# SENTRY_EMAIL_PORT
# SENTRY_EMAIL_USER
# SENTRY_EMAIL_PASSWORD
# SENTRY_EMAIL_USE_TLS
# SENTRY_ENABLE_EMAIL_REPLIES
# SENTRY_SMTP_HOSTNAME
# SENTRY_MAILGUN_API_KEY
# SENTRY_SINGLE_ORGANIZATION
# SENTRY_SECRET_KEY
# GITHUB_APP_ID
# GITHUB_API_SECRET
# BITBUCKET_CONSUMER_KEY
# BITBUCKET_CONSUMER_SECRET
from sentry.conf.server import * # NOQA
import os
import os.path
# https://docs.sentry.io/server/sso/
# enable SAML2 SSO
SENTRY_FEATURES['organizations:sso'] = True
SENTRY_FEATURES['organizations:sso-saml2'] = True
SENTRY_FEATURES['organizations:sso-rippling'] = False
CONF_ROOT = os.path.dirname(__file__)
postgres = env('SENTRY_POSTGRES_HOST') or (env('POSTGRES_PORT_5432_TCP_ADDR') and 'postgres')
if postgres:
DATABASES = {
'default': {
'ENGINE': 'sentry.db.postgres',
'NAME': (
env('SENTRY_DB_NAME')
or env('POSTGRES_ENV_POSTGRES_USER')
or 'postgres'
),
'USER': (
env('SENTRY_DB_USER')
or env('POSTGRES_ENV_POSTGRES_USER')
or 'postgres'
),
'PASSWORD': (
env('SENTRY_DB_PASSWORD')
or env('POSTGRES_ENV_POSTGRES_PASSWORD')
or ''
),
'HOST': postgres,
'PORT': (
env('SENTRY_POSTGRES_PORT')
or ''
),
'OPTIONS': {
'autocommit': True,
},
},
}
# You should not change this setting after your database has been created
# unless you have altered all schemas first
SENTRY_USE_BIG_INTS = True
# If you're expecting any kind of real traffic on Sentry, we highly recommend
# configuring the CACHES and Redis settings
###########
# General #
###########
# Instruct Sentry that this install intends to be run by a single organization
# and thus various UI optimizations should be enabled.
SENTRY_SINGLE_ORGANIZATION = env('SENTRY_SINGLE_ORGANIZATION', True)
#########
# Redis #
#########
# Generic Redis configuration used as defaults for various things including:
# Buffers, Quotas, TSDB
redis = env('SENTRY_REDIS_HOST') or (env('REDIS_PORT_6379_TCP_ADDR') and 'redis')
if not redis:
raise Exception('Error: REDIS_PORT_6379_TCP_ADDR (or SENTRY_REDIS_HOST) is undefined, did you forget to `--link` a redis container?')
redis_password = env('SENTRY_REDIS_PASSWORD') or ''
redis_port = env('SENTRY_REDIS_PORT') or '6379'
redis_db = env('SENTRY_REDIS_DB') or '0'
SENTRY_OPTIONS.update({
'redis.clusters': {
'default': {
'hosts': {
0: {
'host': redis,
'password': redis_password,
'port': redis_port,
'db': redis_db,
},
},
},
},
})
#########
# Cache #
#########
# Sentry currently utilizes two separate mechanisms. While CACHES is not a
# requirement, it will optimize several high throughput patterns.
memcached = env('SENTRY_MEMCACHED_HOST') or (env('MEMCACHED_PORT_11211_TCP_ADDR') and 'memcached')
if memcached:
memcached_port = (
env('SENTRY_MEMCACHED_PORT')
or '11211'
)
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': [memcached + ':' + memcached_port],
'TIMEOUT': 3600,
}
}
# A primary cache is required for things such as processing events
SENTRY_CACHE = 'sentry.cache.redis.RedisCache'
#########
# Queue #
#########
# See https://docs.getsentry.com/on-premise/server/queue/ for more
# information on configuring your queue broker and workers. Sentry relies
# on a Python framework called Celery to manage queues.
rabbitmq = env('SENTRY_RABBITMQ_HOST') or (env('RABBITMQ_PORT_5672_TCP_ADDR') and 'rabbitmq')
if rabbitmq:
BROKER_URL = (
'amqp://' + (
env('SENTRY_RABBITMQ_USERNAME')
or env('RABBITMQ_ENV_RABBITMQ_DEFAULT_USER')
or 'guest'
) + ':' + (
env('SENTRY_RABBITMQ_PASSWORD')
or env('RABBITMQ_ENV_RABBITMQ_DEFAULT_PASS')
or 'guest'
) + '@' + rabbitmq + '/' + (
env('SENTRY_RABBITMQ_VHOST')
or env('RABBITMQ_ENV_RABBITMQ_DEFAULT_VHOST')
or '/'
)
)
else:
BROKER_URL = 'redis://:' + redis_password + '@' + redis + ':' + redis_port + '/' + redis_db
###############
# Rate Limits #
###############
# Rate limits apply to notification handlers and are enforced per-project
# automatically.
SENTRY_RATELIMITER = 'sentry.ratelimits.redis.RedisRateLimiter'
##################
# Update Buffers #
##################
# Buffers (combined with queueing) act as an intermediate layer between the
# database and the storage API. They will greatly improve efficiency on large
# numbers of the same events being sent to the API in a short amount of time.
# (read: if you send any kind of real data to Sentry, you should enable buffers)
SENTRY_BUFFER = 'sentry.buffer.redis.RedisBuffer'
##########
# Quotas #
##########
# Quotas allow you to rate limit individual projects or the Sentry install as
# a whole.
SENTRY_QUOTAS = 'sentry.quotas.redis.RedisQuota'
########
# TSDB #
########
# The TSDB is used for building charts as well as making things like per-rate
# alerts possible.
SENTRY_TSDB = 'sentry.tsdb.redis.RedisTSDB'
###########
# Digests #
###########
# The digest backend powers notification summaries.
SENTRY_DIGESTS = 'sentry.digests.backends.redis.RedisBackend'
################
# File storage #
################
# Uploaded media uses these `filestore` settings. The available
# backends are either `filesystem` or `s3`.
SENTRY_OPTIONS['filestore.backend'] = 'filesystem'
SENTRY_OPTIONS['filestore.options'] = {
'location': env('SENTRY_FILESTORE_DIR'),
}
##############
# Web Server #
##############
# If you're using a reverse SSL proxy, you should enable the X-Forwarded-Proto
# header and set `SENTRY_USE_SSL=1`
if env('SENTRY_USE_SSL', False):
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SOCIAL_AUTH_REDIRECT_IS_HTTPS = True
SENTRY_WEB_HOST = '0.0.0.0'
SENTRY_WEB_PORT = 9000
SENTRY_WEB_OPTIONS = {
# 'workers': 3, # the number of web workers
}
###############
# Mail Server #
###############
email = env('SENTRY_EMAIL_HOST') or (env('SMTP_PORT_25_TCP_ADDR') and 'smtp')
if email:
SENTRY_OPTIONS['mail.backend'] = 'smtp'
SENTRY_OPTIONS['mail.host'] = email
SENTRY_OPTIONS['mail.password'] = env('SENTRY_EMAIL_PASSWORD') or ''
SENTRY_OPTIONS['mail.username'] = env('SENTRY_EMAIL_USER') or ''
SENTRY_OPTIONS['mail.port'] = int(env('SENTRY_EMAIL_PORT') or 25)
SENTRY_OPTIONS['mail.use-tls'] = env('SENTRY_EMAIL_USE_TLS', False)
else:
SENTRY_OPTIONS['mail.backend'] = 'dummy'
# The email address to send on behalf of
SENTRY_OPTIONS['mail.from'] = env('SENTRY_SERVER_EMAIL') or 'root@localhost'
# If you're using mailgun for inbound mail, set your API key and configure a
# route to forward to /api/hooks/mailgun/inbound/
SENTRY_OPTIONS['mail.mailgun-api-key'] = env('SENTRY_MAILGUN_API_KEY') or ''
# If you specify a MAILGUN_API_KEY, you definitely want EMAIL_REPLIES
if SENTRY_OPTIONS['mail.mailgun-api-key']:
SENTRY_OPTIONS['mail.enable-replies'] = True
else:
SENTRY_OPTIONS['mail.enable-replies'] = env('SENTRY_ENABLE_EMAIL_REPLIES', False)
if SENTRY_OPTIONS['mail.enable-replies']:
SENTRY_OPTIONS['mail.reply-hostname'] = env('SENTRY_SMTP_HOSTNAME') or ''
# If this value ever becomes compromised, it's important to regenerate your
# SENTRY_SECRET_KEY. Changing this value will result in all current sessions
# being invalidated.
secret_key = env('SENTRY_SECRET_KEY')
if not secret_key:
raise Exception('Error: SENTRY_SECRET_KEY is undefined, run `generate-secret-key` and set to -e SENTRY_SECRET_KEY')
if 'SENTRY_RUNNING_UWSGI' not in os.environ and len(secret_key) < 32:
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
print('!! CAUTION !!')
print('!! Your SENTRY_SECRET_KEY is potentially insecure. !!')
print('!! We recommend at least 32 characters long. !!')
print('!! Regenerate with `generate-secret-key`. !!')
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
SENTRY_OPTIONS['system.secret-key'] = secret_key
if 'GITHUB_APP_ID' in os.environ:
GITHUB_EXTENDED_PERMISSIONS = ['repo']
GITHUB_APP_ID = env('GITHUB_APP_ID')
GITHUB_API_SECRET = env('GITHUB_API_SECRET')
if 'BITBUCKET_CONSUMER_KEY' in os.environ:
BITBUCKET_CONSUMER_KEY = env('BITBUCKET_CONSUMER_KEY')
BITBUCKET_CONSUMER_SECRET = env('BITBUCKET_CONSUMER_SECRET')
| []
| []
| []
| [] | [] | python | 0 | 0 | |
main.go | package main
import (
"log"
"os"
"os/signal"
"strings"
"syscall"
"github.com/bwmarrin/discordgo"
)
func main() {
var err error
token := os.Getenv("DISCORD_TOKEN")
if token == "" {
log.Fatalf("Missing Discord authentication token. Check README on how to resolve this issue.")
}
s, err := discordgo.New("Bot " + token)
if err != nil {
log.Fatalf("Error authenticating with Discord's servers. More information to follow: %v", err)
}
// Open connection to Discord
err = s.Open()
if err != nil {
log.Fatalf("Cannot connect to Discord's servers. More information to follow: %v", err)
}
// Log OK and set status
log.Println("=== === ===")
log.Println("Bot is currently running.")
log.Println("=== === ===")
s.UpdateGameStatus(0, "Use v.help")
s.AddHandler(cmd)
s.AddHandler(reactAdd)
// Gracefully close the Discord session, where possible
stop := make(chan os.Signal, 1)
signal.Notify(stop, syscall.SIGINT, syscall.SIGTERM, os.Interrupt, os.Kill)
<-stop
s.Close()
log.Println("Shutting down bot gracefully...")
}
func cmd(s *discordgo.Session, m *discordgo.MessageCreate) {
if m.Author.ID == s.State.User.ID {
return
}
// Handling for each command there is
if strings.HasPrefix(m.Content, "v.votemute") {
if len(m.Mentions) >= 2 {
// Notify that voting to mute several people isn't here.. yet.
s.ChannelMessageSend(m.ChannelID, "Voting to mute several people isn't here.. yet. Check https://github.com/doamatto/vote-to-mute to see when this is added.")
} else if len(m.Mentions) == 1 {
// Mute only one user
str := "Will we be muting " + m.Mentions[0].Mention() + " ? Vote on it!"
msg, err := s.ChannelMessageSend(m.ChannelID, str)
if err != nil {
log.Panicf("%v", err)
}
// Add reaction
err = s.MessageReactionAdd(m.ChannelID, msg.ID, "👍")
if err != nil {
log.Panicf("%v", err)
}
} else {
// Notify that you must mention who to mute.
s.ChannelMessageSend(m.ChannelID, "Please mention who to be muted (tip: type an @ followed by their name, or shift-click the user)")
}
}
if strings.HasPrefix(m.Content, "v.about") {
s.ChannelMessageSendEmbed(m.ChannelID, &discordgo.MessageEmbed{
Title: "About this bot",
Color: 16724804,
Description: "This was a bot written by [doamatto](https://www.doamatto.xyz) to both experiment with discordgo and help a friend with a moderation issue in a server.",
})
}
if strings.HasPrefix(m.Content, "v.h") || strings.HasPrefix(m.Content, "v.help") {
s.ChannelMessageSendEmbed(m.ChannelID, &discordgo.MessageEmbed{
Title: "Commands",
Color: 16724804,
Fields: []*discordgo.MessageEmbedField{
{Name: "v.about", Value: "What does this bot do and other FAQs", Inline: false},
{Name: "v.votemute", Value: "Vote to mute whatever user you mention. Can't be someone with higher privileges than this bot.", Inline: false},
},
})
}
}
func reactAdd(s *discordgo.Session, r *discordgo.MessageReactionAdd) {
m := r.MessageID
c := r.ChannelID
msg, err := s.ChannelMessage(c, m)
if err != nil {
log.Panicf("%v", err)
}
g := r.GuildID
// TODO: add a vote age limit (a day or so)
// Ignore if message being reacted on isn't one of ours
if msg.Author.ID != s.State.User.ID {
return
}
// Ignore if emoji is not helping pass a vote
if r.Emoji.Name != "👍" {
return
}
// See if threshold is met
if msg.Reactions[0].Emoji.Name == "👍" && msg.Reactions[0].Count >= 8 {
// Fetch ID
id := msg.Mentions[0].ID
// Mute the user, if the role already exists
roles, err := s.GuildRoles(g)
if err != nil {
log.Panicf("%v", err)
}
for _, r := range roles {
if r.Name == "Muted" {
// Give the user the Muted role
s.GuildMemberRoleAdd(g, id, r.ID)
return
}
}
// Create the missing roles and give the role
//
// The role is grey; the user only gets permissions to read channels.
// Servers will have to revoke permissions manually due to Discord not
// giving privileges to allow these kinds of interactions (afaik).
role, err := s.GuildRoleCreate(g)
if err != nil {
log.Panicf("%v", err)
}
s.GuildRoleEdit(g, role.ID, "Muted", 6052956, false, 66560, false)
}
}
| [
"\"DISCORD_TOKEN\""
]
| []
| [
"DISCORD_TOKEN"
]
| [] | ["DISCORD_TOKEN"] | go | 1 | 0 | |
google/appengine/ext/webapp/util.py | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Convience functions for the Webapp framework."""
from __future__ import print_function
import six
__all__ = ['login_required',
'run_wsgi_app',
'add_wsgi_middleware',
'run_bare_wsgi_app',
]
import os
import sys
import wsgiref.util
from google.appengine.api import users
from google.appengine.ext import webapp
def login_required(handler_method):
"""A decorator to require that a user be logged in to access a handler.
To use it, decorate your get() method like this:
@login_required
def get(self):
user = users.get_current_user(self)
self.response.out.write('Hello, ' + user.nickname())
We will redirect to a login page if the user is not logged in. We always
redirect to the request URI, and Google Accounts only redirects back as a GET
request, so this should not be used for POSTs.
"""
def check_login(self, *args):
if self.request.method != 'GET':
raise webapp.Error('The check_login decorator can only be used for GET '
'requests')
user = users.get_current_user()
if not user:
self.redirect(users.create_login_url(self.request.uri))
return
else:
handler_method(self, *args)
return check_login
def run_wsgi_app(application):
"""Runs your WSGI-compliant application object in a CGI environment.
Compared to wsgiref.handlers.CGIHandler().run(application), this
function takes some shortcuts. Those are possible because the
app server makes stronger promises than the CGI standard.
Also, this function may wrap custom WSGI middleware around the
application. (You can use run_bare_wsgi_app() to run an application
without adding WSGI middleware, and add_wsgi_middleware() to wrap
the configured WSGI middleware around an application without running
it. This function is merely a convenient combination of the latter
two.)
To configure custom WSGI middleware, define a function
webapp_add_wsgi_middleware(app) to your appengine_config.py file in
your application root directory:
def webapp_add_wsgi_middleware(app):
app = MiddleWareClassOne(app)
app = MiddleWareClassTwo(app)
return app
You must import the middleware classes elsewhere in the file. If
the function is not found, no WSGI middleware is added.
"""
run_bare_wsgi_app(add_wsgi_middleware(application))
def add_wsgi_middleware(application):
"""Wrap WSGI middleware around a WSGI application object."""
return webapp._config_handle.add_wsgi_middleware(application)
def run_bare_wsgi_app(application):
"""Like run_wsgi_app() but doesn't add WSGI middleware."""
env = dict(os.environ)
env["wsgi.input"] = sys.stdin
env["wsgi.errors"] = sys.stderr
env["wsgi.version"] = (1, 0)
env["wsgi.run_once"] = True
env["wsgi.url_scheme"] = wsgiref.util.guess_scheme(env)
env["wsgi.multithread"] = False
env["wsgi.multiprocess"] = False
result = application(env, _start_response)
try:
if result is not None:
for data in result:
sys.stdout.write(data)
finally:
if hasattr(result, 'close'):
result.close()
def _start_response(status, headers, exc_info=None):
"""A start_response() callable as specified by PEP 333"""
if exc_info is not None:
six.reraise(exc_info[0], exc_info[1], exc_info[2])
print("Status: %s" % status)
for name, val in headers:
print("%s: %s" % (name, val))
print()
return sys.stdout.write
| []
| []
| []
| [] | [] | python | 0 | 0 | |
kart/settings.py | """
Django settings for kart project.
Generated by 'django-admin startproject' using Django 3.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
from .config import *
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = SECRET_KEY_ENV
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['krenv.eba-gri6uaer.us-east-1.elasticbeanstalk.com','127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'category',
'user',
'store',
'cart',
'order',
'admin_honeypot',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django_session_timeout.middleware.SessionTimeoutMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
SESSION_EXPIRE_SECONDS = 3600 # 1 hour
SESSION_EXPIRE_AFTER_LAST_ACTIVITY = True
SESSION_TIMEOUT_REDIRECT = 'user/login/'
ROOT_URLCONF = 'kart.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'category.context_processor.categories_list',
'cart.context_processor.cart_items',
],
},
},
]
WSGI_APPLICATION = 'kart.wsgi.application'
AUTH_USER_MODEL = 'user.User'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
if 'RDS_DB_NAME' in os.environ:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['RDS_DB_NAME'],
'USER': os.environ['RDS_USERNAME'],
'PASSWORD': os.environ['RDS_PASSWORD'],
'HOST': os.environ['RDS_HOSTNAME'],
'PORT': os.environ['RDS_PORT'],
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR/'db.sqlite3',
}
}
LD_LIBRARY_PATH="/usr/local/lib"
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = BASE_DIR /'static'
STATICFILES_DIRS = [
'kart/static',
]
MEDIA_URL = '/media/'
MEDIA_ROOT = BASE_DIR /'media'
from django.contrib.messages import constants as messages
MESSAGE_TAGS = {
messages.ERROR : 'danger',
}
# SMTP configuration
EMAIL_HOST = 'smtp.gmail.com'
EMAILK_HOST = 587
EMAIL_HOST_USER = EMAIL_ADDRESS
EMAIL_HOST_PASSWORD = MY_EMAIL_PASSWORD
EMAIL_USE_TLS = True
| []
| []
| [
"RDS_PASSWORD",
"RDS_DB_NAME",
"RDS_USERNAME",
"RDS_PORT",
"RDS_HOSTNAME"
]
| [] | ["RDS_PASSWORD", "RDS_DB_NAME", "RDS_USERNAME", "RDS_PORT", "RDS_HOSTNAME"] | python | 5 | 0 | |
testlib/helper.go | // Copyright (c) 2017-present Mattermost, Inc. All Rights Reserved.
// See License.txt for license information.
package testlib
import (
"flag"
"fmt"
"os"
"testing"
"github.com/mattermost/mattermost-server/mlog"
"github.com/mattermost/mattermost-server/model"
"github.com/mattermost/mattermost-server/store"
"github.com/mattermost/mattermost-server/store/sqlstore"
"github.com/mattermost/mattermost-server/store/storetest"
"github.com/mattermost/mattermost-server/utils"
)
type MainHelper struct {
Settings *model.SqlSettings
Store store.Store
SqlSupplier *sqlstore.SqlSupplier
ClusterInterface *FakeClusterInterface
status int
testResourcePath string
}
type HelperOptions struct {
EnableStore bool
EnableResources bool
}
func NewMainHelper() *MainHelper {
return NewMainHelperWithOptions(&HelperOptions{
EnableStore: true,
EnableResources: true,
})
}
func NewMainHelperWithOptions(options *HelperOptions) *MainHelper {
var mainHelper MainHelper
flag.Parse()
// Setup a global logger to catch tests logging outside of app context
// The global logger will be stomped by apps initalizing but that's fine for testing.
// Ideally this won't happen.
mlog.InitGlobalLogger(mlog.NewLogger(&mlog.LoggerConfiguration{
EnableConsole: true,
ConsoleJson: true,
ConsoleLevel: "error",
EnableFile: false,
}))
utils.TranslationsPreInit()
if options != nil {
if options.EnableStore {
mainHelper.setupStore()
}
if options.EnableResources {
mainHelper.setupResources()
}
}
return &mainHelper
}
func (h *MainHelper) Main(m *testing.M) {
if h.testResourcePath != "" {
prevDir, err := os.Getwd()
if err != nil {
panic("Failed to get current working directory: " + err.Error())
}
err = os.Chdir(h.testResourcePath)
if err != nil {
panic(fmt.Sprintf("Failed to set current working directory to %s: %s", h.testResourcePath, err.Error()))
}
defer func() {
err := os.Chdir(prevDir)
if err != nil {
panic(fmt.Sprintf("Failed to restore current working directory to %s: %s", prevDir, err.Error()))
}
}()
}
h.status = m.Run()
}
func (h *MainHelper) setupStore() {
driverName := os.Getenv("MM_SQLSETTINGS_DRIVERNAME")
if driverName == "" {
driverName = model.DATABASE_DRIVER_MYSQL
}
h.Settings = storetest.MakeSqlSettings(driverName)
h.ClusterInterface = &FakeClusterInterface{}
h.SqlSupplier = sqlstore.NewSqlSupplier(*h.Settings, nil)
h.Store = &TestStore{
store.NewLayeredStore(h.SqlSupplier, nil, h.ClusterInterface),
}
}
func (h *MainHelper) setupResources() {
var err error
h.testResourcePath, err = SetupTestResources()
if err != nil {
panic("failed to setup test resources: " + err.Error())
}
}
func (h *MainHelper) Close() error {
if h.Settings != nil {
storetest.CleanupSqlSettings(h.Settings)
}
if h.testResourcePath != "" {
os.RemoveAll(h.testResourcePath)
}
os.Exit(h.status)
return nil
}
func (h *MainHelper) GetSqlSettings() *model.SqlSettings {
if h.Settings == nil {
panic("MainHelper not initialized with database access.")
}
return h.Settings
}
func (h *MainHelper) GetStore() store.Store {
if h.Store == nil {
panic("MainHelper not initialized with store.")
}
return h.Store
}
func (h *MainHelper) GetSqlSupplier() *sqlstore.SqlSupplier {
if h.SqlSupplier == nil {
panic("MainHelper not initialized with sql supplier.")
}
return h.SqlSupplier
}
func (h *MainHelper) GetClusterInterface() *FakeClusterInterface {
if h.ClusterInterface == nil {
panic("MainHelper not initialized with sql supplier.")
}
return h.ClusterInterface
}
| [
"\"MM_SQLSETTINGS_DRIVERNAME\""
]
| []
| [
"MM_SQLSETTINGS_DRIVERNAME"
]
| [] | ["MM_SQLSETTINGS_DRIVERNAME"] | go | 1 | 0 | |
vendor/github.com/snapcore/snapd/asserts/gpgkeypairmgr.go | // -*- Mode: Go; indent-tabs-mode: t -*-
/*
* Copyright (C) 2016 Canonical Ltd
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 3 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package asserts
import (
"bytes"
"errors"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/snapcore/snapd/osutil"
)
func ensureGPGHomeDirectory() (string, error) {
real, err := osutil.RealUser()
if err != nil {
return "", err
}
uid, gid, err := osutil.UidGid(real)
if err != nil {
return "", err
}
homedir := os.Getenv("SNAP_GNUPG_HOME")
if homedir == "" {
homedir = filepath.Join(real.HomeDir, ".snap", "gnupg")
}
if err := osutil.MkdirAllChown(homedir, 0700, uid, gid); err != nil {
return "", err
}
return homedir, nil
}
// findGPGCommand returns the path to a suitable GnuPG binary to use.
// GnuPG 2 is mainly intended for desktop use, and is hard for us to use
// here: in particular, it's extremely difficult to use it to delete a
// secret key without a pinentry prompt (which would be necessary in our
// test suite). GnuPG 1 is still supported so it's reasonable to continue
// using that for now.
func findGPGCommand() (string, error) {
if path := os.Getenv("SNAP_GNUPG_CMD"); path != "" {
return path, nil
}
path, err := exec.LookPath("gpg1")
if err != nil {
path, err = exec.LookPath("gpg")
}
return path, err
}
func runGPGImpl(input []byte, args ...string) ([]byte, error) {
homedir, err := ensureGPGHomeDirectory()
if err != nil {
return nil, err
}
// Ensure the gpg-agent knows what tty to talk to to ask for
// the passphrase. This is needed because we drive gpg over
// a pipe and if the agent is not already started it will
// fail to be able to ask for a password.
if os.Getenv("GPG_TTY") == "" {
tty, err := os.Readlink("/proc/self/fd/0")
if err != nil {
return nil, err
}
os.Setenv("GPG_TTY", tty)
}
general := []string{"--homedir", homedir, "-q", "--no-auto-check-trustdb"}
allArgs := append(general, args...)
path, err := findGPGCommand()
if err != nil {
return nil, err
}
cmd := exec.Command(path, allArgs...)
var outBuf bytes.Buffer
var errBuf bytes.Buffer
if len(input) != 0 {
cmd.Stdin = bytes.NewBuffer(input)
}
cmd.Stdout = &outBuf
cmd.Stderr = &errBuf
if err := cmd.Run(); err != nil {
return nil, fmt.Errorf("%s %s failed: %v (%q)", path, strings.Join(args, " "), err, errBuf.Bytes())
}
return outBuf.Bytes(), nil
}
var runGPG = runGPGImpl
// A key pair manager backed by a local GnuPG setup.
type GPGKeypairManager struct{}
func (gkm *GPGKeypairManager) gpg(input []byte, args ...string) ([]byte, error) {
return runGPG(input, args...)
}
// NewGPGKeypairManager creates a new key pair manager backed by a local GnuPG setup.
// Importing keys through the keypair manager interface is not
// suppored.
// Main purpose is allowing signing using keys from a GPG setup.
func NewGPGKeypairManager() *GPGKeypairManager {
return &GPGKeypairManager{}
}
func (gkm *GPGKeypairManager) retrieve(fpr string) (PrivateKey, error) {
out, err := gkm.gpg(nil, "--batch", "--export", "--export-options", "export-minimal,export-clean,no-export-attributes", "0x"+fpr)
if err != nil {
return nil, err
}
if len(out) == 0 {
return nil, fmt.Errorf("cannot retrieve key with fingerprint %q in GPG keyring", fpr)
}
pubKeyBuf := bytes.NewBuffer(out)
privKey, err := newExtPGPPrivateKey(pubKeyBuf, "GPG", func(content []byte) ([]byte, error) {
return gkm.sign(fpr, content)
})
if err != nil {
return nil, fmt.Errorf("cannot load GPG public key with fingerprint %q: %v", fpr, err)
}
gotFingerprint := privKey.fingerprint()
if gotFingerprint != fpr {
return nil, fmt.Errorf("got wrong public key from GPG, expected fingerprint %q: %s", fpr, gotFingerprint)
}
return privKey, nil
}
// Walk iterates over all the RSA private keys in the local GPG setup calling the provided callback until this returns an error
func (gkm *GPGKeypairManager) Walk(consider func(privk PrivateKey, fingerprint string, uid string) error) error {
// see GPG source doc/DETAILS
out, err := gkm.gpg(nil, "--batch", "--list-secret-keys", "--fingerprint", "--with-colons", "--fixed-list-mode")
if err != nil {
return err
}
lines := strings.Split(string(out), "\n")
n := len(lines)
if n > 0 && lines[n-1] == "" {
n--
}
if n == 0 {
return nil
}
lines = lines[:n]
for j := 0; j < n; j++ {
// sec: line
line := lines[j]
if !strings.HasPrefix(line, "sec:") {
continue
}
secFields := strings.Split(line, ":")
if len(secFields) < 5 {
continue
}
if secFields[3] != "1" { // not RSA
continue
}
keyID := secFields[4]
uid := ""
fpr := ""
var privKey PrivateKey
// look for fpr:, uid: lines, order may vary and gpg2.1
// may springle additional lines in (like gpr:)
Loop:
for k := j + 1; k < n && !strings.HasPrefix(lines[k], "sec:"); k++ {
switch {
case strings.HasPrefix(lines[k], "fpr:"):
fprFields := strings.Split(lines[k], ":")
// extract "Field 10 - User-ID"
// A FPR record stores the fingerprint here.
if len(fprFields) < 10 {
break Loop
}
fpr = fprFields[9]
if !strings.HasSuffix(fpr, keyID) {
break // strange, skip
}
privKey, err = gkm.retrieve(fpr)
if err != nil {
return err
}
case strings.HasPrefix(lines[k], "uid:"):
uidFields := strings.Split(lines[k], ":")
// extract "*** Field 10 - User-ID"
if len(uidFields) < 10 {
break Loop
}
uid = uidFields[9]
}
}
// sanity checking
if privKey == nil || uid == "" {
continue
}
// collected it all
err = consider(privKey, fpr, uid)
if err != nil {
return err
}
}
return nil
}
func (gkm *GPGKeypairManager) Put(privKey PrivateKey) error {
// NOTE: we don't need this initially at least and this keypair mgr is not for general arbitrary usage
return fmt.Errorf("cannot import private key into GPG keyring")
}
func (gkm *GPGKeypairManager) Get(keyID string) (PrivateKey, error) {
stop := errors.New("stop marker")
var hit PrivateKey
match := func(privk PrivateKey, fpr string, uid string) error {
if privk.PublicKey().ID() == keyID {
hit = privk
return stop
}
return nil
}
err := gkm.Walk(match)
if err == stop {
return hit, nil
}
if err != nil {
return nil, err
}
return nil, fmt.Errorf("cannot find key %q in GPG keyring", keyID)
}
func (gkm *GPGKeypairManager) sign(fingerprint string, content []byte) ([]byte, error) {
out, err := gkm.gpg(content, "--personal-digest-preferences", "SHA512", "--default-key", "0x"+fingerprint, "--detach-sign")
if err != nil {
return nil, fmt.Errorf("cannot sign using GPG: %v", err)
}
return out, nil
}
type gpgKeypairInfo struct {
privKey PrivateKey
fingerprint string
}
func (gkm *GPGKeypairManager) findByName(name string) (*gpgKeypairInfo, error) {
stop := errors.New("stop marker")
var hit *gpgKeypairInfo
match := func(privk PrivateKey, fpr string, uid string) error {
if uid == name {
hit = &gpgKeypairInfo{
privKey: privk,
fingerprint: fpr,
}
return stop
}
return nil
}
err := gkm.Walk(match)
if err == stop {
return hit, nil
}
if err != nil {
return nil, err
}
return nil, fmt.Errorf("cannot find key named %q in GPG keyring", name)
}
// GetByName looks up a private key by name and returns it.
func (gkm *GPGKeypairManager) GetByName(name string) (PrivateKey, error) {
keyInfo, err := gkm.findByName(name)
if err != nil {
return nil, err
}
return keyInfo.privKey, nil
}
var generateTemplate = `
Key-Type: RSA
Key-Length: 4096
Name-Real: %s
Creation-Date: seconds=%d
Preferences: SHA512
`
func (gkm *GPGKeypairManager) parametersForGenerate(passphrase string, name string) string {
fixedCreationTime := v1FixedTimestamp.Unix()
generateParams := fmt.Sprintf(generateTemplate, name, fixedCreationTime)
if passphrase != "" {
generateParams += "Passphrase: " + passphrase + "\n"
}
return generateParams
}
// Generate creates a new key with the given passphrase and name.
func (gkm *GPGKeypairManager) Generate(passphrase string, name string) error {
_, err := gkm.findByName(name)
if err == nil {
return fmt.Errorf("key named %q already exists in GPG keyring", name)
}
generateParams := gkm.parametersForGenerate(passphrase, name)
_, err = gkm.gpg([]byte(generateParams), "--batch", "--gen-key")
if err != nil {
return err
}
return nil
}
// Export returns the encoded text of the named public key.
func (gkm *GPGKeypairManager) Export(name string) ([]byte, error) {
keyInfo, err := gkm.findByName(name)
if err != nil {
return nil, err
}
return EncodePublicKey(keyInfo.privKey.PublicKey())
}
// Delete removes the named key pair from GnuPG's storage.
func (gkm *GPGKeypairManager) Delete(name string) error {
keyInfo, err := gkm.findByName(name)
if err != nil {
return err
}
_, err = gkm.gpg(nil, "--batch", "--delete-secret-and-public-key", "0x"+keyInfo.fingerprint)
if err != nil {
return err
}
return nil
}
| [
"\"SNAP_GNUPG_HOME\"",
"\"SNAP_GNUPG_CMD\"",
"\"GPG_TTY\""
]
| []
| [
"SNAP_GNUPG_HOME",
"SNAP_GNUPG_CMD",
"GPG_TTY"
]
| [] | ["SNAP_GNUPG_HOME", "SNAP_GNUPG_CMD", "GPG_TTY"] | go | 3 | 0 | |
main.go | //go:generate go install -v github.com/josephspurrier/goversioninfo/cmd/goversioninfo
//go:generate goversioninfo -icon=res/papp.ico -manifest=res/papp.manifest
package main
import (
"fmt"
"os"
"path"
"github.com/portapps/portapps/v3"
"github.com/portapps/portapps/v3/pkg/log"
"github.com/portapps/portapps/v3/pkg/registry"
"github.com/portapps/portapps/v3/pkg/utl"
)
type config struct {
Cleanup bool `yaml:"cleanup" mapstructure:"cleanup"`
}
var (
app *portapps.App
cfg *config
)
func init() {
var err error
// Default config
cfg = &config{
Cleanup: false,
}
// Init app
if app, err = portapps.NewWithCfg("hlsw-portable", "HLSW", cfg); err != nil {
log.Fatal().Err(err).Msg("Cannot initialize application. See log file for more info.")
}
}
func main() {
utl.CreateFolder(app.DataPath)
app.Process = utl.PathJoin(app.AppPath, "hlsw.exe")
app.Args = []string{
fmt.Sprintf("-PATH:%s", app.AppPath),
fmt.Sprintf("-DATADIR:%s", app.DataPath),
}
// Cleanup on exit
if cfg.Cleanup {
defer func() {
utl.Cleanup([]string{
path.Join(os.Getenv("APPDATA"), "HLSW"),
})
}()
}
regFile := utl.PathJoin(utl.CreateFolder(app.RootPath, "reg"), "HLSW.reg")
regKey := registry.Key{
Key: `HKCU\Software\HLSW`,
Arch: "32",
}
if err := regKey.Import(regFile); err != nil {
log.Error().Err(err).Msg("Cannot import registry key")
}
defer func() {
if err := regKey.Export(regFile); err != nil {
log.Error().Err(err).Msg("Cannot export registry key")
}
if cfg.Cleanup {
if err := regKey.Delete(true); err != nil {
log.Error().Err(err).Msg("Cannot remove registry key")
}
}
}()
defer app.Close()
app.Launch(os.Args[1:])
}
| [
"\"APPDATA\""
]
| []
| [
"APPDATA"
]
| [] | ["APPDATA"] | go | 1 | 0 | |
flume-ng-sinks/flume-hdfs-sink/src/test/java/org/apache/flume/sink/hdfs/TestHDFSEventSink.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flume.sink.hdfs;
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.nio.ByteBuffer;
import java.nio.charset.CharsetDecoder;
import java.util.Arrays;
import java.util.Calendar;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterators;
import com.google.common.collect.Maps;
import org.apache.avro.file.DataFileStream;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericDatumReader;
import org.apache.avro.generic.GenericRecord;
import org.apache.avro.io.DatumReader;
import org.apache.commons.lang.StringUtils;
import org.apache.flume.Channel;
import org.apache.flume.ChannelException;
import org.apache.flume.Clock;
import org.apache.flume.Context;
import org.apache.flume.Event;
import org.apache.flume.EventDeliveryException;
import org.apache.flume.Sink.Status;
import org.apache.flume.SystemClock;
import org.apache.flume.Transaction;
import org.apache.flume.channel.BasicTransactionSemantics;
import org.apache.flume.channel.MemoryChannel;
import org.apache.flume.conf.Configurables;
import org.apache.flume.event.EventBuilder;
import org.apache.flume.event.SimpleEvent;
import org.apache.flume.instrumentation.SinkCounter;
import org.apache.flume.lifecycle.LifecycleException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.internal.util.reflection.Whitebox;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Charsets;
import com.google.common.collect.Lists;
public class TestHDFSEventSink {
private HDFSEventSink sink;
private String testPath;
private static final Logger LOG = LoggerFactory
.getLogger(HDFSEventSink.class);
static {
System.setProperty("java.security.krb5.realm", "flume");
System.setProperty("java.security.krb5.kdc", "blah");
}
private void dirCleanup() {
Configuration conf = new Configuration();
try {
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(testPath);
if (fs.exists(dirPath)) {
fs.delete(dirPath, true);
}
} catch (IOException eIO) {
LOG.warn("IO Error in test cleanup", eIO);
}
}
// TODO: use System.getProperty("file.separator") instead of hardcoded '/'
@Before
public void setUp() {
LOG.debug("Starting...");
/*
* FIXME: Use a dynamic path to support concurrent test execution. Also,
* beware of the case where this path is used for something or when the
* Hadoop config points at file:/// rather than hdfs://. We need to find a
* better way of testing HDFS related functionality.
*/
testPath = "file:///tmp/flume-test."
+ Calendar.getInstance().getTimeInMillis() + "."
+ Thread.currentThread().getId();
sink = new HDFSEventSink();
sink.setName("HDFSEventSink-" + UUID.randomUUID().toString());
dirCleanup();
}
@After
public void tearDown() {
if (System.getenv("hdfs_keepFiles") == null) dirCleanup();
}
@Test
public void testTextBatchAppend() throws Exception {
doTestTextBatchAppend(false);
}
@Test
public void testTextBatchAppendRawFS() throws Exception {
doTestTextBatchAppend(true);
}
public void doTestTextBatchAppend(boolean useRawLocalFileSystem)
throws Exception {
LOG.debug("Starting...");
final long rollCount = 10;
final long batchSize = 2;
final String fileName = "FlumeData";
String newPath = testPath + "/singleTextBucket";
int totalEvents = 0;
int i = 1, j = 1;
// clear the test directory
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(newPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
Context context = new Context();
// context.put("hdfs.path", testPath + "/%Y-%m-%d/%H");
context.put("hdfs.path", newPath);
context.put("hdfs.filePrefix", fileName);
context.put("hdfs.rollCount", String.valueOf(rollCount));
context.put("hdfs.rollInterval", "0");
context.put("hdfs.rollSize", "0");
context.put("hdfs.batchSize", String.valueOf(batchSize));
context.put("hdfs.writeFormat", "Text");
context.put("hdfs.useRawLocalFileSystem",
Boolean.toString(useRawLocalFileSystem));
context.put("hdfs.fileType", "DataStream");
Configurables.configure(sink, context);
Channel channel = new MemoryChannel();
Configurables.configure(channel, context);
sink.setChannel(channel);
sink.start();
Calendar eventDate = Calendar.getInstance();
List<String> bodies = Lists.newArrayList();
// push the event batches into channel to roll twice
for (i = 1; i <= (rollCount * 10) / batchSize; i++) {
Transaction txn = channel.getTransaction();
txn.begin();
for (j = 1; j <= batchSize; j++) {
Event event = new SimpleEvent();
eventDate.clear();
eventDate.set(2011, i, i, i, 0); // yy mm dd
String body = "Test." + i + "." + j;
event.setBody(body.getBytes());
bodies.add(body);
channel.put(event);
totalEvents++;
}
txn.commit();
txn.close();
// execute sink to process the events
sink.process();
}
sink.stop();
// loop through all the files generated and check their contains
FileStatus[] dirStat = fs.listStatus(dirPath);
Path[] fList = FileUtil.stat2Paths(dirStat);
// check that the roll happened correctly for the given data
long expectedFiles = totalEvents / rollCount;
if (totalEvents % rollCount > 0) expectedFiles++;
Assert.assertEquals("num files wrong, found: " +
Lists.newArrayList(fList), expectedFiles, fList.length);
// check the contents of the all files
verifyOutputTextFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies);
}
@Test
public void testLifecycle() throws InterruptedException, LifecycleException {
LOG.debug("Starting...");
Context context = new Context();
context.put("hdfs.path", testPath);
/*
* context.put("hdfs.rollInterval", String.class);
* context.get("hdfs.rollSize", String.class); context.get("hdfs.rollCount",
* String.class);
*/
Configurables.configure(sink, context);
sink.setChannel(new MemoryChannel());
sink.start();
sink.stop();
}
@Test
public void testEmptyChannelResultsInStatusBackoff()
throws InterruptedException, LifecycleException, EventDeliveryException {
LOG.debug("Starting...");
Context context = new Context();
Channel channel = new MemoryChannel();
context.put("hdfs.path", testPath);
context.put("keep-alive", "0");
Configurables.configure(sink, context);
Configurables.configure(channel, context);
sink.setChannel(channel);
sink.start();
Assert.assertEquals(Status.BACKOFF, sink.process());
sink.stop();
}
@Test
public void testKerbFileAccess() throws InterruptedException,
LifecycleException, EventDeliveryException, IOException {
LOG.debug("Starting testKerbFileAccess() ...");
final String fileName = "FlumeData";
final long rollCount = 5;
final long batchSize = 2;
String newPath = testPath + "/singleBucket";
String kerbConfPrincipal = "user1/[email protected]";
String kerbKeytab = "/usr/lib/flume/nonexistkeytabfile";
//turn security on
Configuration conf = new Configuration();
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
"kerberos");
UserGroupInformation.setConfiguration(conf);
Context context = new Context();
context.put("hdfs.path", newPath);
context.put("hdfs.filePrefix", fileName);
context.put("hdfs.rollCount", String.valueOf(rollCount));
context.put("hdfs.batchSize", String.valueOf(batchSize));
context.put("hdfs.kerberosPrincipal", kerbConfPrincipal);
context.put("hdfs.kerberosKeytab", kerbKeytab);
try {
Configurables.configure(sink, context);
Assert.fail("no exception thrown");
} catch (IllegalArgumentException expected) {
Assert.assertTrue(expected.getMessage().contains(
"Keytab is not a readable file"));
} finally {
//turn security off
conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION,
"simple");
UserGroupInformation.setConfiguration(conf);
}
}
@Test
public void testTextAppend() throws InterruptedException, LifecycleException,
EventDeliveryException, IOException {
LOG.debug("Starting...");
final long rollCount = 3;
final long batchSize = 2;
final String fileName = "FlumeData";
String newPath = testPath + "/singleTextBucket";
int totalEvents = 0;
int i = 1, j = 1;
// clear the test directory
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(newPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
Context context = new Context();
// context.put("hdfs.path", testPath + "/%Y-%m-%d/%H");
context.put("hdfs.path", newPath);
context.put("hdfs.filePrefix", fileName);
context.put("hdfs.rollCount", String.valueOf(rollCount));
context.put("hdfs.batchSize", String.valueOf(batchSize));
context.put("hdfs.writeFormat", "Text");
context.put("hdfs.fileType", "DataStream");
Configurables.configure(sink, context);
Channel channel = new MemoryChannel();
Configurables.configure(channel, context);
sink.setChannel(channel);
sink.start();
Calendar eventDate = Calendar.getInstance();
List<String> bodies = Lists.newArrayList();
// push the event batches into channel
for (i = 1; i < 4; i++) {
Transaction txn = channel.getTransaction();
txn.begin();
for (j = 1; j <= batchSize; j++) {
Event event = new SimpleEvent();
eventDate.clear();
eventDate.set(2011, i, i, i, 0); // yy mm dd
event.getHeaders().put("timestamp",
String.valueOf(eventDate.getTimeInMillis()));
event.getHeaders().put("hostname", "Host" + i);
String body = "Test." + i + "." + j;
event.setBody(body.getBytes());
bodies.add(body);
channel.put(event);
totalEvents++;
}
txn.commit();
txn.close();
// execute sink to process the events
sink.process();
}
sink.stop();
// loop through all the files generated and check their contains
FileStatus[] dirStat = fs.listStatus(dirPath);
Path[] fList = FileUtil.stat2Paths(dirStat);
// check that the roll happened correctly for the given data
long expectedFiles = totalEvents / rollCount;
if (totalEvents % rollCount > 0) expectedFiles++;
Assert.assertEquals("num files wrong, found: " +
Lists.newArrayList(fList), expectedFiles, fList.length);
verifyOutputTextFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies);
}
@Test
public void testAvroAppend() throws InterruptedException, LifecycleException,
EventDeliveryException, IOException {
LOG.debug("Starting...");
final long rollCount = 3;
final long batchSize = 2;
final String fileName = "FlumeData";
String newPath = testPath + "/singleTextBucket";
int totalEvents = 0;
int i = 1, j = 1;
// clear the test directory
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(newPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
Context context = new Context();
// context.put("hdfs.path", testPath + "/%Y-%m-%d/%H");
context.put("hdfs.path", newPath);
context.put("hdfs.filePrefix", fileName);
context.put("hdfs.rollCount", String.valueOf(rollCount));
context.put("hdfs.batchSize", String.valueOf(batchSize));
context.put("hdfs.writeFormat", "Text");
context.put("hdfs.fileType", "DataStream");
context.put("serializer", "AVRO_EVENT");
Configurables.configure(sink, context);
Channel channel = new MemoryChannel();
Configurables.configure(channel, context);
sink.setChannel(channel);
sink.start();
Calendar eventDate = Calendar.getInstance();
List<String> bodies = Lists.newArrayList();
// push the event batches into channel
for (i = 1; i < 4; i++) {
Transaction txn = channel.getTransaction();
txn.begin();
for (j = 1; j <= batchSize; j++) {
Event event = new SimpleEvent();
eventDate.clear();
eventDate.set(2011, i, i, i, 0); // yy mm dd
event.getHeaders().put("timestamp",
String.valueOf(eventDate.getTimeInMillis()));
event.getHeaders().put("hostname", "Host" + i);
String body = "Test." + i + "." + j;
event.setBody(body.getBytes());
bodies.add(body);
channel.put(event);
totalEvents++;
}
txn.commit();
txn.close();
// execute sink to process the events
sink.process();
}
sink.stop();
// loop through all the files generated and check their contains
FileStatus[] dirStat = fs.listStatus(dirPath);
Path[] fList = FileUtil.stat2Paths(dirStat);
// check that the roll happened correctly for the given data
long expectedFiles = totalEvents / rollCount;
if (totalEvents % rollCount > 0) expectedFiles++;
Assert.assertEquals("num files wrong, found: " +
Lists.newArrayList(fList), expectedFiles, fList.length);
verifyOutputAvroFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies);
}
@Test
public void testSimpleAppend() throws InterruptedException,
LifecycleException, EventDeliveryException, IOException {
LOG.debug("Starting...");
final String fileName = "FlumeData";
final long rollCount = 5;
final long batchSize = 2;
final int numBatches = 4;
String newPath = testPath + "/singleBucket";
int totalEvents = 0;
int i = 1, j = 1;
// clear the test directory
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(newPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
Context context = new Context();
context.put("hdfs.path", newPath);
context.put("hdfs.filePrefix", fileName);
context.put("hdfs.rollCount", String.valueOf(rollCount));
context.put("hdfs.batchSize", String.valueOf(batchSize));
Configurables.configure(sink, context);
Channel channel = new MemoryChannel();
Configurables.configure(channel, context);
sink.setChannel(channel);
sink.start();
Calendar eventDate = Calendar.getInstance();
List<String> bodies = Lists.newArrayList();
// push the event batches into channel
for (i = 1; i < numBatches; i++) {
Transaction txn = channel.getTransaction();
txn.begin();
for (j = 1; j <= batchSize; j++) {
Event event = new SimpleEvent();
eventDate.clear();
eventDate.set(2011, i, i, i, 0); // yy mm dd
event.getHeaders().put("timestamp",
String.valueOf(eventDate.getTimeInMillis()));
event.getHeaders().put("hostname", "Host" + i);
String body = "Test." + i + "." + j;
event.setBody(body.getBytes());
bodies.add(body);
channel.put(event);
totalEvents++;
}
txn.commit();
txn.close();
// execute sink to process the events
sink.process();
}
sink.stop();
// loop through all the files generated and check their contains
FileStatus[] dirStat = fs.listStatus(dirPath);
Path[] fList = FileUtil.stat2Paths(dirStat);
// check that the roll happened correctly for the given data
long expectedFiles = totalEvents / rollCount;
if (totalEvents % rollCount > 0) expectedFiles++;
Assert.assertEquals("num files wrong, found: " +
Lists.newArrayList(fList), expectedFiles, fList.length);
verifyOutputSequenceFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies);
}
@Test
public void testSimpleAppendLocalTime()
throws InterruptedException, LifecycleException, EventDeliveryException, IOException {
final long currentTime = System.currentTimeMillis();
Clock clk = new Clock() {
@Override
public long currentTimeMillis() {
return currentTime;
}
};
LOG.debug("Starting...");
final String fileName = "FlumeData";
final long rollCount = 5;
final long batchSize = 2;
final int numBatches = 4;
String newPath = testPath + "/singleBucket/%s" ;
String expectedPath = testPath + "/singleBucket/" +
String.valueOf(currentTime / 1000);
int totalEvents = 0;
int i = 1, j = 1;
// clear the test directory
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(expectedPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
Context context = new Context();
context.put("hdfs.path", newPath);
context.put("hdfs.filePrefix", fileName);
context.put("hdfs.rollCount", String.valueOf(rollCount));
context.put("hdfs.batchSize", String.valueOf(batchSize));
context.put("hdfs.useLocalTimeStamp", String.valueOf(true));
Configurables.configure(sink, context);
Channel channel = new MemoryChannel();
Configurables.configure(channel, context);
sink.setChannel(channel);
sink.setBucketClock(clk);
sink.start();
Calendar eventDate = Calendar.getInstance();
List<String> bodies = Lists.newArrayList();
// push the event batches into channel
for (i = 1; i < numBatches; i++) {
Transaction txn = channel.getTransaction();
txn.begin();
for (j = 1; j <= batchSize; j++) {
Event event = new SimpleEvent();
eventDate.clear();
eventDate.set(2011, i, i, i, 0); // yy mm dd
event.getHeaders().put("timestamp",
String.valueOf(eventDate.getTimeInMillis()));
event.getHeaders().put("hostname", "Host" + i);
String body = "Test." + i + "." + j;
event.setBody(body.getBytes());
bodies.add(body);
channel.put(event);
totalEvents++;
}
txn.commit();
txn.close();
// execute sink to process the events
sink.process();
}
sink.stop();
// loop through all the files generated and check their contains
FileStatus[] dirStat = fs.listStatus(dirPath);
Path[] fList = FileUtil.stat2Paths(dirStat);
// check that the roll happened correctly for the given data
long expectedFiles = totalEvents / rollCount;
if (totalEvents % rollCount > 0) expectedFiles++;
Assert.assertEquals("num files wrong, found: " +
Lists.newArrayList(fList), expectedFiles, fList.length);
verifyOutputSequenceFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies);
// The clock in bucketpath is static, so restore the real clock
sink.setBucketClock(new SystemClock());
}
@Test
public void testAppend() throws InterruptedException, LifecycleException,
EventDeliveryException, IOException {
LOG.debug("Starting...");
final long rollCount = 3;
final long batchSize = 2;
final String fileName = "FlumeData";
// clear the test directory
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(testPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
Context context = new Context();
context.put("hdfs.path", testPath + "/%Y-%m-%d/%H");
context.put("hdfs.timeZone", "UTC");
context.put("hdfs.filePrefix", fileName);
context.put("hdfs.rollCount", String.valueOf(rollCount));
context.put("hdfs.batchSize", String.valueOf(batchSize));
Configurables.configure(sink, context);
Channel channel = new MemoryChannel();
Configurables.configure(channel, context);
sink.setChannel(channel);
sink.start();
Calendar eventDate = Calendar.getInstance();
List<String> bodies = Lists.newArrayList();
// push the event batches into channel
for (int i = 1; i < 4; i++) {
Transaction txn = channel.getTransaction();
txn.begin();
for (int j = 1; j <= batchSize; j++) {
Event event = new SimpleEvent();
eventDate.clear();
eventDate.set(2011, i, i, i, 0); // yy mm dd
event.getHeaders().put("timestamp",
String.valueOf(eventDate.getTimeInMillis()));
event.getHeaders().put("hostname", "Host" + i);
String body = "Test." + i + "." + j;
event.setBody(body.getBytes());
bodies.add(body);
channel.put(event);
}
txn.commit();
txn.close();
// execute sink to process the events
sink.process();
}
sink.stop();
verifyOutputSequenceFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies);
}
// inject fault and make sure that the txn is rolled back and retried
@Test
public void testBadSimpleAppend() throws InterruptedException,
LifecycleException, EventDeliveryException, IOException {
LOG.debug("Starting...");
final String fileName = "FlumeData";
final long rollCount = 5;
final long batchSize = 2;
final int numBatches = 4;
String newPath = testPath + "/singleBucket";
int totalEvents = 0;
int i = 1, j = 1;
HDFSTestWriterFactory badWriterFactory = new HDFSTestWriterFactory();
sink = new HDFSEventSink(badWriterFactory);
// clear the test directory
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(newPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
Context context = new Context();
context.put("hdfs.path", newPath);
context.put("hdfs.filePrefix", fileName);
context.put("hdfs.rollCount", String.valueOf(rollCount));
context.put("hdfs.batchSize", String.valueOf(batchSize));
context.put("hdfs.fileType", HDFSTestWriterFactory.TestSequenceFileType);
Configurables.configure(sink, context);
Channel channel = new MemoryChannel();
Configurables.configure(channel, context);
sink.setChannel(channel);
sink.start();
Calendar eventDate = Calendar.getInstance();
List<String> bodies = Lists.newArrayList();
// push the event batches into channel
for (i = 1; i < numBatches; i++) {
Transaction txn = channel.getTransaction();
txn.begin();
for (j = 1; j <= batchSize; j++) {
Event event = new SimpleEvent();
eventDate.clear();
eventDate.set(2011, i, i, i, 0); // yy mm dd
event.getHeaders().put("timestamp",
String.valueOf(eventDate.getTimeInMillis()));
event.getHeaders().put("hostname", "Host" + i);
String body = "Test." + i + "." + j;
event.setBody(body.getBytes());
bodies.add(body);
// inject fault
if ((totalEvents % 30) == 1) {
event.getHeaders().put("fault-once", "");
}
channel.put(event);
totalEvents++;
}
txn.commit();
txn.close();
LOG.info("Process events: " + sink.process());
}
LOG.info("Process events to end of transaction max: " + sink.process());
LOG.info("Process events to injected fault: " + sink.process());
LOG.info("Process events remaining events: " + sink.process());
sink.stop();
verifyOutputSequenceFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies);
SinkCounter sc = (SinkCounter) Whitebox.getInternalState(sink, "sinkCounter");
Assert.assertEquals(1, sc.getEventWriteFail());
}
private List<String> getAllFiles(String input) {
List<String> output = Lists.newArrayList();
File dir = new File(input);
if (dir.isFile()) {
output.add(dir.getAbsolutePath());
} else if (dir.isDirectory()) {
for (String file : dir.list()) {
File subDir = new File(dir, file);
output.addAll(getAllFiles(subDir.getAbsolutePath()));
}
}
return output;
}
private void verifyOutputSequenceFiles(FileSystem fs, Configuration conf, String dir,
String prefix, List<String> bodies) throws IOException {
int found = 0;
int expected = bodies.size();
for (String outputFile : getAllFiles(dir)) {
String name = (new File(outputFile)).getName();
if (name.startsWith(prefix)) {
SequenceFile.Reader reader = new SequenceFile.Reader(fs, new Path(outputFile), conf);
LongWritable key = new LongWritable();
BytesWritable value = new BytesWritable();
while (reader.next(key, value)) {
String body = new String(value.getBytes(), 0, value.getLength());
if (bodies.contains(body)) {
LOG.debug("Found event body: {}", body);
bodies.remove(body);
found++;
}
}
reader.close();
}
}
if (!bodies.isEmpty()) {
for (String body : bodies) {
LOG.error("Never found event body: {}", body);
}
}
Assert.assertTrue("Found = " + found + ", Expected = " +
expected + ", Left = " + bodies.size() + " " + bodies,
bodies.size() == 0);
}
private void verifyOutputTextFiles(FileSystem fs, Configuration conf, String dir, String prefix,
List<String> bodies) throws IOException {
int found = 0;
int expected = bodies.size();
for (String outputFile : getAllFiles(dir)) {
String name = (new File(outputFile)).getName();
if (name.startsWith(prefix)) {
FSDataInputStream input = fs.open(new Path(outputFile));
BufferedReader reader = new BufferedReader(new InputStreamReader(input));
String body = null;
while ((body = reader.readLine()) != null) {
bodies.remove(body);
found++;
}
reader.close();
}
}
Assert.assertTrue("Found = " + found + ", Expected = " +
expected + ", Left = " + bodies.size() + " " + bodies,
bodies.size() == 0);
}
private void verifyOutputAvroFiles(FileSystem fs, Configuration conf, String dir, String prefix,
List<String> bodies) throws IOException {
int found = 0;
int expected = bodies.size();
for (String outputFile : getAllFiles(dir)) {
String name = (new File(outputFile)).getName();
if (name.startsWith(prefix)) {
FSDataInputStream input = fs.open(new Path(outputFile));
DatumReader<GenericRecord> reader = new GenericDatumReader<GenericRecord>();
DataFileStream<GenericRecord> avroStream =
new DataFileStream<GenericRecord>(input, reader);
GenericRecord record = new GenericData.Record(avroStream.getSchema());
while (avroStream.hasNext()) {
avroStream.next(record);
ByteBuffer body = (ByteBuffer) record.get("body");
CharsetDecoder decoder = Charsets.UTF_8.newDecoder();
String bodyStr = decoder.decode(body).toString();
LOG.debug("Removing event: {}", bodyStr);
bodies.remove(bodyStr);
found++;
}
avroStream.close();
input.close();
}
}
Assert.assertTrue("Found = " + found + ", Expected = " +
expected + ", Left = " + bodies.size() + " " + bodies,
bodies.size() == 0);
}
/**
* Ensure that when a write throws an IOException we are
* able to continue to progress in the next process() call.
* This relies on Transactional rollback semantics for durability and
* the behavior of the BucketWriter class of close()ing upon IOException.
*/
@Test
public void testCloseReopen()
throws InterruptedException, LifecycleException, EventDeliveryException, IOException {
LOG.debug("Starting...");
final int numBatches = 4;
final String fileName = "FlumeData";
final long rollCount = 5;
final long batchSize = 2;
String newPath = testPath + "/singleBucket";
int i = 1, j = 1;
HDFSTestWriterFactory badWriterFactory = new HDFSTestWriterFactory();
sink = new HDFSEventSink(badWriterFactory);
// clear the test directory
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(newPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
Context context = new Context();
context.put("hdfs.path", newPath);
context.put("hdfs.filePrefix", fileName);
context.put("hdfs.rollCount", String.valueOf(rollCount));
context.put("hdfs.batchSize", String.valueOf(batchSize));
context.put("hdfs.fileType", HDFSTestWriterFactory.TestSequenceFileType);
Configurables.configure(sink, context);
MemoryChannel channel = new MemoryChannel();
Configurables.configure(channel, new Context());
sink.setChannel(channel);
sink.start();
Calendar eventDate = Calendar.getInstance();
List<String> bodies = Lists.newArrayList();
// push the event batches into channel
for (i = 1; i < numBatches; i++) {
channel.getTransaction().begin();
try {
for (j = 1; j <= batchSize; j++) {
Event event = new SimpleEvent();
eventDate.clear();
eventDate.set(2011, i, i, i, 0); // yy mm dd
event.getHeaders().put("timestamp",
String.valueOf(eventDate.getTimeInMillis()));
event.getHeaders().put("hostname", "Host" + i);
String body = "Test." + i + "." + j;
event.setBody(body.getBytes());
bodies.add(body);
// inject fault
event.getHeaders().put("fault-until-reopen", "");
channel.put(event);
}
channel.getTransaction().commit();
} finally {
channel.getTransaction().close();
}
LOG.info("execute sink to process the events: " + sink.process());
}
LOG.info("clear any events pending due to errors: " + sink.process());
sink.stop();
verifyOutputSequenceFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies);
SinkCounter sc = (SinkCounter) Whitebox.getInternalState(sink, "sinkCounter");
Assert.assertEquals(1, sc.getEventWriteFail());
}
/**
* Test that the old bucket writer is closed at the end of rollInterval and
* a new one is used for the next set of events.
*/
@Test
public void testCloseReopenOnRollTime()
throws InterruptedException, LifecycleException, EventDeliveryException, IOException {
LOG.debug("Starting...");
final int numBatches = 4;
final String fileName = "FlumeData";
final long batchSize = 2;
String newPath = testPath + "/singleBucket";
int i = 1, j = 1;
HDFSTestWriterFactory badWriterFactory = new HDFSTestWriterFactory();
sink = new HDFSEventSink(badWriterFactory);
// clear the test directory
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(newPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
Context context = new Context();
context.put("hdfs.path", newPath);
context.put("hdfs.filePrefix", fileName);
context.put("hdfs.rollCount", String.valueOf(0));
context.put("hdfs.rollSize", String.valueOf(0));
context.put("hdfs.rollInterval", String.valueOf(2));
context.put("hdfs.batchSize", String.valueOf(batchSize));
context.put("hdfs.fileType", HDFSTestWriterFactory.TestSequenceFileType);
Configurables.configure(sink, context);
MemoryChannel channel = new MemoryChannel();
Configurables.configure(channel, new Context());
sink.setChannel(channel);
sink.start();
Calendar eventDate = Calendar.getInstance();
List<String> bodies = Lists.newArrayList();
// push the event batches into channel
for (i = 1; i < numBatches; i++) {
channel.getTransaction().begin();
try {
for (j = 1; j <= batchSize; j++) {
Event event = new SimpleEvent();
eventDate.clear();
eventDate.set(2011, i, i, i, 0); // yy mm dd
event.getHeaders().put("timestamp",
String.valueOf(eventDate.getTimeInMillis()));
event.getHeaders().put("hostname", "Host" + i);
String body = "Test." + i + "." + j;
event.setBody(body.getBytes());
bodies.add(body);
// inject fault
event.getHeaders().put("count-check", "");
channel.put(event);
}
channel.getTransaction().commit();
} finally {
channel.getTransaction().close();
}
LOG.info("execute sink to process the events: " + sink.process());
// Make sure the first file gets rolled due to rollTimeout.
if (i == 1) {
Thread.sleep(2001);
}
}
LOG.info("clear any events pending due to errors: " + sink.process());
sink.stop();
Assert.assertTrue(badWriterFactory.openCount.get() >= 2);
LOG.info("Total number of bucket writers opened: {}",
badWriterFactory.openCount.get());
verifyOutputSequenceFiles(fs, conf, dirPath.toUri().getPath(), fileName,
bodies);
}
/**
* Test that a close due to roll interval removes the bucketwriter from
* sfWriters map.
*/
@Test
public void testCloseRemovesFromSFWriters()
throws InterruptedException, LifecycleException, EventDeliveryException, IOException {
LOG.debug("Starting...");
final String fileName = "FlumeData";
final long batchSize = 2;
String newPath = testPath + "/singleBucket";
int i = 1, j = 1;
HDFSTestWriterFactory badWriterFactory = new HDFSTestWriterFactory();
sink = new HDFSEventSink(badWriterFactory);
// clear the test directory
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(newPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
Context context = new Context();
context.put("hdfs.path", newPath);
context.put("hdfs.filePrefix", fileName);
context.put("hdfs.rollCount", String.valueOf(0));
context.put("hdfs.rollSize", String.valueOf(0));
context.put("hdfs.rollInterval", String.valueOf(1));
context.put("hdfs.batchSize", String.valueOf(batchSize));
context.put("hdfs.fileType", HDFSTestWriterFactory.TestSequenceFileType);
String expectedLookupPath = newPath + "/FlumeData";
Configurables.configure(sink, context);
MemoryChannel channel = new MemoryChannel();
Configurables.configure(channel, new Context());
sink.setChannel(channel);
sink.start();
Calendar eventDate = Calendar.getInstance();
List<String> bodies = Lists.newArrayList();
// push the event batches into channel
channel.getTransaction().begin();
try {
for (j = 1; j <= 2 * batchSize; j++) {
Event event = new SimpleEvent();
eventDate.clear();
eventDate.set(2011, i, i, i, 0); // yy mm dd
event.getHeaders().put("timestamp",
String.valueOf(eventDate.getTimeInMillis()));
event.getHeaders().put("hostname", "Host" + i);
String body = "Test." + i + "." + j;
event.setBody(body.getBytes());
bodies.add(body);
// inject fault
event.getHeaders().put("count-check", "");
channel.put(event);
}
channel.getTransaction().commit();
} finally {
channel.getTransaction().close();
}
LOG.info("execute sink to process the events: " + sink.process());
Assert.assertTrue(sink.getSfWriters().containsKey(expectedLookupPath));
// Make sure the first file gets rolled due to rollTimeout.
Thread.sleep(2001);
Assert.assertFalse(sink.getSfWriters().containsKey(expectedLookupPath));
LOG.info("execute sink to process the events: " + sink.process());
// A new bucket writer should have been created for this bucket. So
// sfWriters map should not have the same key again.
Assert.assertTrue(sink.getSfWriters().containsKey(expectedLookupPath));
sink.stop();
LOG.info("Total number of bucket writers opened: {}",
badWriterFactory.openCount.get());
verifyOutputSequenceFiles(fs, conf, dirPath.toUri().getPath(), fileName,
bodies);
}
/*
* append using slow sink writer.
* verify that the process returns backoff due to timeout
*/
@Test
public void testSlowAppendFailure() throws InterruptedException,
LifecycleException, EventDeliveryException, IOException {
LOG.debug("Starting...");
final String fileName = "FlumeData";
final long rollCount = 5;
final long batchSize = 2;
final int numBatches = 2;
String newPath = testPath + "/singleBucket";
int i = 1, j = 1;
// clear the test directory
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(newPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
// create HDFS sink with slow writer
HDFSTestWriterFactory badWriterFactory = new HDFSTestWriterFactory();
sink = new HDFSEventSink(badWriterFactory);
Context context = new Context();
context.put("hdfs.path", newPath);
context.put("hdfs.filePrefix", fileName);
context.put("hdfs.rollCount", String.valueOf(rollCount));
context.put("hdfs.batchSize", String.valueOf(batchSize));
context.put("hdfs.fileType", HDFSTestWriterFactory.TestSequenceFileType);
context.put("hdfs.callTimeout", Long.toString(1000));
Configurables.configure(sink, context);
Channel channel = new MemoryChannel();
Configurables.configure(channel, context);
sink.setChannel(channel);
sink.start();
Calendar eventDate = Calendar.getInstance();
// push the event batches into channel
for (i = 0; i < numBatches; i++) {
Transaction txn = channel.getTransaction();
txn.begin();
for (j = 1; j <= batchSize; j++) {
Event event = new SimpleEvent();
eventDate.clear();
eventDate.set(2011, i, i, i, 0); // yy mm dd
event.getHeaders().put("timestamp",
String.valueOf(eventDate.getTimeInMillis()));
event.getHeaders().put("hostname", "Host" + i);
event.getHeaders().put("slow", "1500");
event.setBody(("Test." + i + "." + j).getBytes());
channel.put(event);
}
txn.commit();
txn.close();
// execute sink to process the events
Status satus = sink.process();
// verify that the append returned backoff due to timeotu
Assert.assertEquals(satus, Status.BACKOFF);
}
sink.stop();
SinkCounter sc = (SinkCounter) Whitebox.getInternalState(sink, "sinkCounter");
Assert.assertEquals(2, sc.getEventWriteFail());
}
/*
* append using slow sink writer with specified append timeout
* verify that the data is written correctly to files
*/
private void slowAppendTestHelper(long appendTimeout)
throws InterruptedException, IOException, LifecycleException, EventDeliveryException,
IOException {
final String fileName = "FlumeData";
final long rollCount = 5;
final long batchSize = 2;
final int numBatches = 2;
String newPath = testPath + "/singleBucket";
int totalEvents = 0;
int i = 1, j = 1;
// clear the test directory
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(newPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
// create HDFS sink with slow writer
HDFSTestWriterFactory badWriterFactory = new HDFSTestWriterFactory();
sink = new HDFSEventSink(badWriterFactory);
Context context = new Context();
context.put("hdfs.path", newPath);
context.put("hdfs.filePrefix", fileName);
context.put("hdfs.rollCount", String.valueOf(rollCount));
context.put("hdfs.batchSize", String.valueOf(batchSize));
context.put("hdfs.fileType", HDFSTestWriterFactory.TestSequenceFileType);
context.put("hdfs.appendTimeout", String.valueOf(appendTimeout));
Configurables.configure(sink, context);
Channel channel = new MemoryChannel();
Configurables.configure(channel, context);
sink.setChannel(channel);
sink.start();
Calendar eventDate = Calendar.getInstance();
List<String> bodies = Lists.newArrayList();
// push the event batches into channel
for (i = 0; i < numBatches; i++) {
Transaction txn = channel.getTransaction();
txn.begin();
for (j = 1; j <= batchSize; j++) {
Event event = new SimpleEvent();
eventDate.clear();
eventDate.set(2011, i, i, i, 0); // yy mm dd
event.getHeaders().put("timestamp",
String.valueOf(eventDate.getTimeInMillis()));
event.getHeaders().put("hostname", "Host" + i);
event.getHeaders().put("slow", "1500");
String body = "Test." + i + "." + j;
event.setBody(body.getBytes());
bodies.add(body);
channel.put(event);
totalEvents++;
}
txn.commit();
txn.close();
// execute sink to process the events
sink.process();
}
sink.stop();
// loop through all the files generated and check their contains
FileStatus[] dirStat = fs.listStatus(dirPath);
Path[] fList = FileUtil.stat2Paths(dirStat);
// check that the roll happened correctly for the given data
// Note that we'll end up with two files with only a head
long expectedFiles = totalEvents / rollCount;
if (totalEvents % rollCount > 0) expectedFiles++;
Assert.assertEquals("num files wrong, found: " +
Lists.newArrayList(fList), expectedFiles, fList.length);
verifyOutputSequenceFiles(fs, conf, dirPath.toUri().getPath(), fileName, bodies);
}
/*
* append using slow sink writer with long append timeout
* verify that the data is written correctly to files
*/
@Test
public void testSlowAppendWithLongTimeout() throws InterruptedException,
LifecycleException, EventDeliveryException, IOException {
LOG.debug("Starting...");
slowAppendTestHelper(3000);
}
/*
* append using slow sink writer with no timeout to make append
* synchronous. Verify that the data is written correctly to files
*/
@Test
public void testSlowAppendWithoutTimeout() throws InterruptedException,
LifecycleException, EventDeliveryException, IOException {
LOG.debug("Starting...");
slowAppendTestHelper(0);
}
@Test
public void testCloseOnIdle() throws IOException, EventDeliveryException, InterruptedException {
String hdfsPath = testPath + "/idleClose";
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(hdfsPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
Context context = new Context();
context.put("hdfs.path", hdfsPath);
/*
* All three rolling methods are disabled so the only
* way a file can roll is through the idle timeout.
*/
context.put("hdfs.rollCount", "0");
context.put("hdfs.rollSize", "0");
context.put("hdfs.rollInterval", "0");
context.put("hdfs.batchSize", "2");
context.put("hdfs.idleTimeout", "1");
Configurables.configure(sink, context);
Channel channel = new MemoryChannel();
Configurables.configure(channel, context);
sink.setChannel(channel);
sink.start();
Transaction txn = channel.getTransaction();
txn.begin();
for (int i = 0; i < 10; i++) {
Event event = new SimpleEvent();
event.setBody(("test event " + i).getBytes());
channel.put(event);
}
txn.commit();
txn.close();
sink.process();
sink.process();
Thread.sleep(1001);
// previous file should have timed out now
// this can throw BucketClosedException(from the bucketWriter having
// closed),this is not an issue as the sink will retry and get a fresh
// bucketWriter so long as the onClose handler properly removes
// bucket writers that were closed.
sink.process();
sink.process();
Thread.sleep(500); // shouldn't be enough for a timeout to occur
sink.process();
sink.process();
sink.stop();
FileStatus[] dirStat = fs.listStatus(dirPath);
Path[] fList = FileUtil.stat2Paths(dirStat);
Assert.assertEquals("Incorrect content of the directory " + StringUtils.join(fList, ","),
2, fList.length);
Assert.assertTrue(!fList[0].getName().endsWith(".tmp") &&
!fList[1].getName().endsWith(".tmp"));
fs.close();
}
/**
* This test simulates what happens when a batch of events is written to a compressed sequence
* file (and thus hsync'd to hdfs) but the file is not yet closed.
*
* When this happens, the data that we wrote should still be readable.
*/
@Test
public void testBlockCompressSequenceFileWriterSync() throws IOException, EventDeliveryException {
String hdfsPath = testPath + "/sequenceFileWriterSync";
FileSystem fs = FileSystem.get(new Configuration());
// Since we are reading a partial file we don't want to use checksums
fs.setVerifyChecksum(false);
fs.setWriteChecksum(false);
// Compression codecs that don't require native hadoop libraries
String [] codecs = {"BZip2Codec", "DeflateCodec"};
for (String codec : codecs) {
sequenceFileWriteAndVerifyEvents(fs, hdfsPath, codec, Collections.singletonList(
"single-event"
));
sequenceFileWriteAndVerifyEvents(fs, hdfsPath, codec, Arrays.asList(
"multiple-events-1",
"multiple-events-2",
"multiple-events-3",
"multiple-events-4",
"multiple-events-5"
));
}
fs.close();
}
private void sequenceFileWriteAndVerifyEvents(FileSystem fs, String hdfsPath, String codec,
Collection<String> eventBodies)
throws IOException, EventDeliveryException {
Path dirPath = new Path(hdfsPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
Context context = new Context();
context.put("hdfs.path", hdfsPath);
// Ensure the file isn't closed and rolled
context.put("hdfs.rollCount", String.valueOf(eventBodies.size() + 1));
context.put("hdfs.rollSize", "0");
context.put("hdfs.rollInterval", "0");
context.put("hdfs.batchSize", "1");
context.put("hdfs.fileType", "SequenceFile");
context.put("hdfs.codeC", codec);
context.put("hdfs.writeFormat", "Writable");
Configurables.configure(sink, context);
Channel channel = new MemoryChannel();
Configurables.configure(channel, context);
sink.setChannel(channel);
sink.start();
for (String eventBody : eventBodies) {
Transaction txn = channel.getTransaction();
txn.begin();
Event event = new SimpleEvent();
event.setBody(eventBody.getBytes());
channel.put(event);
txn.commit();
txn.close();
sink.process();
}
// Sink is _not_ closed. The file should remain open but
// the data written should be visible to readers via sync + hflush
FileStatus[] dirStat = fs.listStatus(dirPath);
Path[] paths = FileUtil.stat2Paths(dirStat);
Assert.assertEquals(1, paths.length);
SequenceFile.Reader reader =
new SequenceFile.Reader(fs.getConf(), SequenceFile.Reader.stream(fs.open(paths[0])));
LongWritable key = new LongWritable();
BytesWritable value = new BytesWritable();
for (String eventBody : eventBodies) {
Assert.assertTrue(reader.next(key, value));
Assert.assertArrayEquals(eventBody.getBytes(), value.copyBytes());
}
Assert.assertFalse(reader.next(key, value));
}
private Context getContextForRetryTests() {
Context context = new Context();
context.put("hdfs.path", testPath + "/%{retryHeader}");
context.put("hdfs.filePrefix", "test");
context.put("hdfs.batchSize", String.valueOf(100));
context.put("hdfs.fileType", "DataStream");
context.put("hdfs.serializer", "text");
context.put("hdfs.closeTries","3");
context.put("hdfs.rollCount", "1");
context.put("hdfs.retryInterval", "1");
return context;
}
@Test
public void testBadConfigurationForRetryIntervalZero() throws Exception {
Context context = getContextForRetryTests();
context.put("hdfs.retryInterval", "0");
Configurables.configure(sink, context);
Assert.assertEquals(1, sink.getTryCount());
}
@Test
public void testBadConfigurationForRetryIntervalNegative() throws Exception {
Context context = getContextForRetryTests();
context.put("hdfs.retryInterval", "-1");
Configurables.configure(sink, context);
Assert.assertEquals(1, sink.getTryCount());
}
@Test
public void testBadConfigurationForRetryCountZero() throws Exception {
Context context = getContextForRetryTests();
context.put("hdfs.closeTries" ,"0");
Configurables.configure(sink, context);
Assert.assertEquals(Integer.MAX_VALUE, sink.getTryCount());
}
@Test
public void testBadConfigurationForRetryCountNegative() throws Exception {
Context context = getContextForRetryTests();
context.put("hdfs.closeTries" ,"-4");
Configurables.configure(sink, context);
Assert.assertEquals(Integer.MAX_VALUE, sink.getTryCount());
}
@Test
public void testRetryRename()
throws InterruptedException, LifecycleException, EventDeliveryException, IOException {
testRetryRename(true);
testRetryRename(false);
}
private void testRetryRename(boolean closeSucceed)
throws InterruptedException, LifecycleException, EventDeliveryException, IOException {
LOG.debug("Starting...");
String newPath = testPath + "/retryBucket";
// clear the test directory
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(conf);
Path dirPath = new Path(newPath);
fs.delete(dirPath, true);
fs.mkdirs(dirPath);
MockFileSystem mockFs = new MockFileSystem(fs, 6, closeSucceed);
Context context = getContextForRetryTests();
Configurables.configure(sink, context);
Channel channel = new MemoryChannel();
Configurables.configure(channel, context);
sink.setChannel(channel);
sink.setMockFs(mockFs);
HDFSWriter hdfsWriter = new MockDataStream(mockFs);
hdfsWriter.configure(context);
sink.setMockWriter(hdfsWriter);
sink.start();
// push the event batches into channel
for (int i = 0; i < 2; i++) {
Transaction txn = channel.getTransaction();
txn.begin();
Map<String, String> hdr = Maps.newHashMap();
hdr.put("retryHeader", "v1");
channel.put(EventBuilder.withBody("random".getBytes(), hdr));
txn.commit();
txn.close();
// execute sink to process the events
sink.process();
}
// push the event batches into channel
for (int i = 0; i < 2; i++) {
Transaction txn = channel.getTransaction();
txn.begin();
Map<String, String> hdr = Maps.newHashMap();
hdr.put("retryHeader", "v2");
channel.put(EventBuilder.withBody("random".getBytes(), hdr));
txn.commit();
txn.close();
// execute sink to process the events
sink.process();
}
TimeUnit.SECONDS.sleep(5); //Sleep till all retries are done.
Collection<BucketWriter> writers = sink.getSfWriters().values();
int totalRenameAttempts = 0;
for (BucketWriter writer : writers) {
LOG.info("Rename tries = " + writer.renameTries.get());
totalRenameAttempts += writer.renameTries.get();
}
// stop clears the sfWriters map, so we need to compute the
// close tries count before stopping the sink.
sink.stop();
Assert.assertEquals(6, totalRenameAttempts);
}
/**
* BucketWriter.append() can throw a BucketClosedException when called from
* HDFSEventSink.process() due to a race condition between HDFSEventSink.process() and the
* BucketWriter's close threads.
* This test case tests whether if this happens the newly created BucketWriter will be flushed.
* For more details see FLUME-3085
*/
@Test
public void testFlushedIfAppendFailedWithBucketClosedException() throws Exception {
final Set<BucketWriter> bucketWriters = new HashSet<>();
sink = new HDFSEventSink() {
@Override
BucketWriter initializeBucketWriter(String realPath, String realName, String lookupPath,
HDFSWriter hdfsWriter, WriterCallback closeCallback) {
BucketWriter bw = Mockito.spy(super.initializeBucketWriter(realPath, realName, lookupPath,
hdfsWriter, closeCallback));
try {
// create mock BucketWriters where the first append() succeeds but the
// the second call throws a BucketClosedException
Mockito.doCallRealMethod()
.doThrow(BucketClosedException.class)
.when(bw).append(Mockito.any(Event.class));
} catch (IOException | InterruptedException e) {
Assert.fail("This shouldn't happen, as append() is called during mocking.");
}
bucketWriters.add(bw);
return bw;
}
};
Context context = new Context(ImmutableMap.of("hdfs.path", testPath));
Configurables.configure(sink, context);
Channel channel = Mockito.spy(new MemoryChannel());
Configurables.configure(channel, new Context());
final Iterator<Event> events = Iterators.forArray(
EventBuilder.withBody("test1".getBytes()), EventBuilder.withBody("test2".getBytes()));
Mockito.doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
return events.hasNext() ? events.next() : null;
}
}).when(channel).take();
sink.setChannel(channel);
sink.start();
sink.process();
// channel.take() should have called 3 times (2 events + 1 null)
Mockito.verify(channel, Mockito.times(3)).take();
FileSystem fs = FileSystem.get(new Configuration());
int fileCount = 0;
for (RemoteIterator<LocatedFileStatus> i = fs.listFiles(new Path(testPath), false);
i.hasNext(); i.next()) {
fileCount++;
}
Assert.assertEquals(2, fileCount);
Assert.assertEquals(2, bucketWriters.size());
// It is expected that flush() method was called exactly once for every BucketWriter
for (BucketWriter bw : bucketWriters) {
Mockito.verify(bw, Mockito.times(1)).flush();
}
sink.stop();
}
@Test
public void testChannelException() {
LOG.debug("Starting...");
Context context = new Context();
context.put("hdfs.path", testPath);
context.put("keep-alive", "0");
Configurables.configure(sink, context);
Channel channel = Mockito.mock(Channel.class);
Mockito.when(channel.take()).thenThrow(new ChannelException("dummy"));
Mockito.when(channel.getTransaction())
.thenReturn(Mockito.mock(BasicTransactionSemantics.class));
sink.setChannel(channel);
sink.start();
try {
sink.process();
} catch (EventDeliveryException e) {
//
}
sink.stop();
SinkCounter sc = (SinkCounter) Whitebox.getInternalState(sink, "sinkCounter");
Assert.assertEquals(1, sc.getChannelReadFail());
}
@Test
public void testEmptyInUseSuffix() {
String inUseSuffixConf = "aaaa";
Context context = new Context();
context.put("hdfs.path", testPath);
context.put("hdfs.inUseSuffix", inUseSuffixConf);
//hdfs.emptyInUseSuffix not defined
Configurables.configure(sink, context);
String inUseSuffix = (String) Whitebox.getInternalState(sink, "inUseSuffix");
Assert.assertEquals(inUseSuffixConf, inUseSuffix);
context.put("hdfs.emptyInUseSuffix", "true");
Configurables.configure(sink, context);
inUseSuffix = (String) Whitebox.getInternalState(sink, "inUseSuffix");
Assert.assertEquals("", inUseSuffix);
context.put("hdfs.emptyInUseSuffix", "false");
Configurables.configure(sink, context);
inUseSuffix = (String) Whitebox.getInternalState(sink, "inUseSuffix");
Assert.assertEquals(inUseSuffixConf, inUseSuffix);
}
}
| [
"\"hdfs_keepFiles\""
]
| []
| [
"hdfs_keepFiles"
]
| [] | ["hdfs_keepFiles"] | java | 1 | 0 | |
enterprise/internal/campaigns/webhooks_test.go | package campaigns
import (
"bytes"
"context"
"crypto/hmac"
"crypto/sha256"
"database/sql"
"encoding/hex"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"path"
"path/filepath"
"strings"
"testing"
"time"
"github.com/dnaeon/go-vcr/cassette"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/sourcegraph/sourcegraph/cmd/repo-updater/repos"
"github.com/sourcegraph/sourcegraph/internal/campaigns"
"github.com/sourcegraph/sourcegraph/internal/httpcli"
"github.com/sourcegraph/sourcegraph/internal/httptestutil"
"github.com/sourcegraph/sourcegraph/internal/rcache"
"github.com/sourcegraph/sourcegraph/schema"
)
var update = flag.Bool("update", false, "update testdata")
// Run from integration_test.go
func testGitHubWebhook(db *sql.DB) func(*testing.T) {
return func(t *testing.T) {
now := time.Now().UTC().Truncate(time.Microsecond)
clock := func() time.Time { return now }
ctx := context.Background()
rcache.SetupForTest(t)
cf, save := newGithubClientFactory(t, "github-webhooks")
defer save()
var userID int32
err := db.QueryRow("INSERT INTO users (username) VALUES ('admin') RETURNING id").Scan(&userID)
if err != nil {
t.Fatal(err)
}
secret := "secret"
repoStore := repos.NewDBStore(db, sql.TxOptions{})
extSvc := &repos.ExternalService{
Kind: "GITHUB",
DisplayName: "GitHub",
Config: marshalJSON(t, &schema.GitHubConnection{
Url: "https://github.com",
Token: os.Getenv("GITHUB_TOKEN"),
Repos: []string{"sourcegraph/sourcegraph"},
Webhooks: []*schema.GitHubWebhook{{Org: "sourcegraph", Secret: secret}},
}),
}
err = repoStore.UpsertExternalServices(ctx, extSvc)
if err != nil {
t.Fatal(t)
}
githubSrc, err := repos.NewGithubSource(extSvc, cf, nil)
if err != nil {
t.Fatal(t)
}
githubRepo, err := githubSrc.GetRepo(ctx, "sourcegraph/sourcegraph")
if err != nil {
t.Fatal(err)
}
err = repoStore.UpsertRepos(ctx, githubRepo)
if err != nil {
t.Fatal(err)
}
store := NewStoreWithClock(db, clock)
campaign := &campaigns.Campaign{
Name: "Test campaign",
Description: "Testing THE WEBHOOKS",
AuthorID: userID,
NamespaceUserID: userID,
}
err = store.CreateCampaign(ctx, campaign)
if err != nil {
t.Fatal(err)
}
changesets := []*campaigns.Changeset{
{
RepoID: githubRepo.ID,
ExternalID: "10156",
ExternalServiceType: githubRepo.ExternalRepo.ServiceType,
CampaignIDs: []int64{campaign.ID},
},
}
err = store.CreateChangesets(ctx, changesets...)
if err != nil {
t.Fatal(err)
}
err = SyncChangesets(ctx, repoStore, store, cf, changesets...)
if err != nil {
t.Fatal(err)
}
hook := NewGitHubWebhook(store, repoStore, clock)
fixtureFiles, err := filepath.Glob("testdata/fixtures/webhooks/github/*.json")
if err != nil {
t.Fatal(err)
}
for _, fixtureFile := range fixtureFiles {
_, name := path.Split(fixtureFile)
name = strings.TrimSuffix(name, ".json")
t.Run(name, func(t *testing.T) {
_, err = db.Exec("ALTER SEQUENCE changeset_events_id_seq RESTART")
if err != nil {
t.Fatal(err)
}
_, err = db.Exec("TRUNCATE TABLE changeset_events")
if err != nil {
t.Fatal(err)
}
tc := loadWebhookTestCase(t, fixtureFile)
// Send all events twice to ensure we are idempotent
for i := 0; i < 2; i++ {
for _, event := range tc.Payloads {
req, err := http.NewRequest("POST", "", bytes.NewReader(event.Data))
if err != nil {
t.Fatal(err)
}
req.Header.Set("X-Github-Event", event.PayloadType)
req.Header.Set("X-Hub-Signature", sign(t, event.Data, []byte(secret)))
rec := httptest.NewRecorder()
hook.ServeHTTP(rec, req)
resp := rec.Result()
if resp.StatusCode != http.StatusOK {
t.Fatalf("Non 200 code: %v", resp.StatusCode)
}
}
}
have, _, err := store.ListChangesetEvents(ctx, ListChangesetEventsOpts{Limit: -1})
if err != nil {
t.Fatal(err)
}
// Overwrite and format test case
if *update {
tc.ChangesetEvents = have
data, err := json.MarshalIndent(tc, " ", " ")
if err != nil {
t.Fatal(err)
}
err = ioutil.WriteFile(fixtureFile, data, 0666)
if err != nil {
t.Fatal(err)
}
}
opts := []cmp.Option{
cmpopts.IgnoreFields(campaigns.ChangesetEvent{}, "CreatedAt"),
cmpopts.IgnoreFields(campaigns.ChangesetEvent{}, "UpdatedAt"),
}
if diff := cmp.Diff(tc.ChangesetEvents, have, opts...); diff != "" {
t.Error(diff)
}
})
}
}
}
// Run from integration_test.go
func testBitbucketWebhook(db *sql.DB) func(*testing.T) {
return func(t *testing.T) {
now := time.Now().UTC().Truncate(time.Microsecond)
clock := func() time.Time { return now }
ctx := context.Background()
rcache.SetupForTest(t)
cf, save := newGithubClientFactory(t, "bitbucket-webhooks")
defer save()
var userID int32
err := db.QueryRow("INSERT INTO users (username) VALUES ('admin') RETURNING id").Scan(&userID)
if err != nil {
t.Fatal(err)
}
secret := "secret"
repoStore := repos.NewDBStore(db, sql.TxOptions{})
extSvc := &repos.ExternalService{
Kind: "BITBUCKETSERVER",
DisplayName: "Bitbucket",
Config: marshalJSON(t, &schema.BitbucketServerConnection{
Url: "https://bitbucket.sgdev.org",
Token: os.Getenv("BITBUCKET_SERVER_TOKEN"),
Repos: []string{"SOUR/automation-testing"},
Webhooks: &schema.Webhooks{
Secret: secret,
},
}),
}
err = repoStore.UpsertExternalServices(ctx, extSvc)
if err != nil {
t.Fatal(t)
}
bitbucketSource, err := repos.NewBitbucketServerSource(extSvc, cf, nil)
if err != nil {
t.Fatal(t)
}
bitbucketRepo, err := getSingleRepo(ctx, bitbucketSource, "bitbucket.sgdev.org/SOUR/automation-testing")
if err != nil {
t.Fatal(err)
}
if bitbucketRepo == nil {
t.Fatal("repo not found")
}
err = repoStore.UpsertRepos(ctx, bitbucketRepo)
if err != nil {
t.Fatal(err)
}
store := NewStoreWithClock(db, clock)
campaign := &campaigns.Campaign{
Name: "Test campaign",
Description: "Testing THE WEBHOOKS",
AuthorID: userID,
NamespaceUserID: userID,
}
err = store.CreateCampaign(ctx, campaign)
if err != nil {
t.Fatal(err)
}
changesets := []*campaigns.Changeset{
{
RepoID: bitbucketRepo.ID,
ExternalID: "69",
ExternalServiceType: bitbucketRepo.ExternalRepo.ServiceType,
CampaignIDs: []int64{campaign.ID},
},
{
RepoID: bitbucketRepo.ID,
ExternalID: "19",
ExternalServiceType: bitbucketRepo.ExternalRepo.ServiceType,
CampaignIDs: []int64{campaign.ID},
},
}
err = store.CreateChangesets(ctx, changesets...)
if err != nil {
t.Fatal(err)
}
err = SyncChangesets(ctx, repoStore, store, cf, changesets...)
if err != nil {
t.Fatal(err)
}
hook := NewBitbucketServerWebhook(store, repoStore, clock, "testhook")
fixtureFiles, err := filepath.Glob("testdata/fixtures/webhooks/bitbucketserver/*.json")
if err != nil {
t.Fatal(err)
}
for _, fixtureFile := range fixtureFiles {
_, name := path.Split(fixtureFile)
name = strings.TrimSuffix(name, ".json")
t.Run(name, func(t *testing.T) {
_, err = db.Exec("ALTER SEQUENCE changeset_events_id_seq RESTART")
if err != nil {
t.Fatal(err)
}
_, err = db.Exec("TRUNCATE TABLE changeset_events")
if err != nil {
t.Fatal(err)
}
tc := loadWebhookTestCase(t, fixtureFile)
// Send all events twice to ensure we are idempotent
for i := 0; i < 2; i++ {
for _, event := range tc.Payloads {
u := fmt.Sprintf("http://example.com/?%s=%d", externalServiceIDParam, extSvc.ID)
req, err := http.NewRequest("POST", u, bytes.NewReader(event.Data))
if err != nil {
t.Fatal(err)
}
req.Header.Set("X-Event-Key", event.PayloadType)
req.Header.Set("X-Hub-Signature", sign(t, event.Data, []byte(secret)))
rec := httptest.NewRecorder()
hook.ServeHTTP(rec, req)
resp := rec.Result()
if resp.StatusCode != http.StatusOK {
t.Fatalf("Non 200 code: %v", resp.StatusCode)
}
}
}
have, _, err := store.ListChangesetEvents(ctx, ListChangesetEventsOpts{Limit: -1})
if err != nil {
t.Fatal(err)
}
// Overwrite and format test case
if *update {
tc.ChangesetEvents = have
data, err := json.MarshalIndent(tc, " ", " ")
if err != nil {
t.Fatal(err)
}
err = ioutil.WriteFile(fixtureFile, data, 0666)
if err != nil {
t.Fatal(err)
}
}
opts := []cmp.Option{
cmpopts.IgnoreFields(campaigns.ChangesetEvent{}, "CreatedAt"),
cmpopts.IgnoreFields(campaigns.ChangesetEvent{}, "UpdatedAt"),
}
if diff := cmp.Diff(tc.ChangesetEvents, have, opts...); diff != "" {
t.Error(diff)
}
})
}
}
}
func getSingleRepo(ctx context.Context, bitbucketSource *repos.BitbucketServerSource, name string) (*repos.Repo, error) {
repoChan := make(chan repos.SourceResult)
go func() {
bitbucketSource.ListRepos(ctx, repoChan)
close(repoChan)
}()
var bitbucketRepo *repos.Repo
for result := range repoChan {
if result.Err != nil {
return nil, result.Err
}
if result.Repo == nil {
continue
}
if result.Repo.Name == name {
bitbucketRepo = result.Repo
}
}
return bitbucketRepo, nil
}
type webhookTestCase struct {
Payloads []struct {
PayloadType string `json:"payload_type"`
Data json.RawMessage `json:"data"`
} `json:"payloads"`
ChangesetEvents []*campaigns.ChangesetEvent `json:"changeset_events"`
}
func loadWebhookTestCase(t testing.TB, path string) webhookTestCase {
t.Helper()
bs, err := ioutil.ReadFile(path)
if err != nil {
t.Fatal(err)
}
var tc webhookTestCase
if err := json.Unmarshal(bs, &tc); err != nil {
t.Fatal(err)
}
for i, ev := range tc.ChangesetEvents {
meta, err := campaigns.NewChangesetEventMetadata(ev.Kind)
if err != nil {
t.Fatal(err)
}
raw, err := json.Marshal(ev.Metadata)
if err != nil {
t.Fatal(err)
}
err = json.Unmarshal(raw, &meta)
if err != nil {
t.Fatal(err)
}
tc.ChangesetEvents[i].Metadata = meta
}
return tc
}
func TestBitbucketWebhookUpsert(t *testing.T) {
testCases := []struct {
name string
con *schema.BitbucketServerConnection
secrets map[int64]string
expect []string
}{
{
name: "No existing secret",
con: &schema.BitbucketServerConnection{
Plugin: &schema.BitbucketServerPlugin{
Permissions: "",
Webhooks: &schema.BitbucketServerPluginWebhooks{
Secret: "secret",
},
},
},
secrets: map[int64]string{},
expect: []string{"POST"},
},
{
name: "existing secret matches",
con: &schema.BitbucketServerConnection{
Plugin: &schema.BitbucketServerPlugin{
Permissions: "",
Webhooks: &schema.BitbucketServerPluginWebhooks{
Secret: "secret",
},
},
},
secrets: map[int64]string{
1: "secret",
},
expect: []string{},
},
{
name: "existing secret does not match matches",
con: &schema.BitbucketServerConnection{
Plugin: &schema.BitbucketServerPlugin{
Permissions: "",
Webhooks: &schema.BitbucketServerPluginWebhooks{
Secret: "secret",
},
},
},
secrets: map[int64]string{
1: "old",
},
expect: []string{"POST"},
},
{
name: "secret removed",
con: &schema.BitbucketServerConnection{
Plugin: &schema.BitbucketServerPlugin{
Permissions: "",
Webhooks: &schema.BitbucketServerPluginWebhooks{
Secret: "",
},
},
},
secrets: map[int64]string{
1: "old",
},
expect: []string{"DELETE"},
},
{
name: "secret removed, no history",
con: &schema.BitbucketServerConnection{
Plugin: &schema.BitbucketServerPlugin{
Permissions: "",
Webhooks: &schema.BitbucketServerPluginWebhooks{
Secret: "",
},
},
},
secrets: map[int64]string{},
expect: []string{"DELETE"},
},
{
name: "secret removed, with history",
con: &schema.BitbucketServerConnection{
Plugin: &schema.BitbucketServerPlugin{
Permissions: "",
Webhooks: &schema.BitbucketServerPluginWebhooks{
Secret: "",
},
},
},
secrets: map[int64]string{
1: "",
},
expect: []string{},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
rec := new(requestRecorder)
h := NewBitbucketServerWebhook(nil, nil, time.Now, "testhook")
h.secrets = tc.secrets
h.httpClient = rec
err := h.syncWebhook(1, tc.con, "http://example.com/")
if err != nil {
t.Fatal(err)
}
methods := make([]string, len(rec.requests))
for i := range rec.requests {
methods[i] = rec.requests[i].Method
}
if diff := cmp.Diff(tc.expect, methods); diff != "" {
t.Fatal(diff)
}
})
}
}
type requestRecorder struct {
requests []*http.Request
}
func (r *requestRecorder) Do(req *http.Request) (*http.Response, error) {
r.requests = append(r.requests, req)
return &http.Response{
Status: http.StatusText(http.StatusOK),
StatusCode: http.StatusOK,
Body: ioutil.NopCloser(strings.NewReader("")),
}, nil
}
func sign(t *testing.T, message, secret []byte) string {
t.Helper()
mac := hmac.New(sha256.New, secret)
_, err := mac.Write(message)
if err != nil {
t.Fatalf("writing hmac message failed: %s", err)
}
return "sha256=" + hex.EncodeToString(mac.Sum(nil))
}
func marshalJSON(t testing.TB, v interface{}) string {
t.Helper()
bs, err := json.Marshal(v)
if err != nil {
t.Fatal(err)
}
return string(bs)
}
func newGithubClientFactory(t testing.TB, name string) (*httpcli.Factory, func()) {
t.Helper()
cassete := filepath.Join("testdata/vcr/", strings.Replace(name, " ", "-", -1))
rec, err := httptestutil.NewRecorder(cassete, *update, func(i *cassette.Interaction) error {
return nil
})
if err != nil {
t.Fatal(err)
}
mw := httpcli.NewMiddleware(githubProxyRedirectMiddleware)
hc := httpcli.NewFactory(mw, httptestutil.NewRecorderOpt(rec))
return hc, func() {
if err := rec.Stop(); err != nil {
t.Errorf("failed to update test data: %s", err)
}
}
}
func githubProxyRedirectMiddleware(cli httpcli.Doer) httpcli.Doer {
return httpcli.DoerFunc(func(req *http.Request) (*http.Response, error) {
if req.URL.Hostname() == "github-proxy" {
req.URL.Host = "api.github.com"
req.URL.Scheme = "https"
}
return cli.Do(req)
})
}
| [
"\"GITHUB_TOKEN\"",
"\"BITBUCKET_SERVER_TOKEN\""
]
| []
| [
"BITBUCKET_SERVER_TOKEN",
"GITHUB_TOKEN"
]
| [] | ["BITBUCKET_SERVER_TOKEN", "GITHUB_TOKEN"] | go | 2 | 0 | |
e2efold_rt/experiment_rnastralign/e2e_learning_stage1_rnastralign_all_long.py | import os
from e2efold.common.config import process_config
from e2efold.common.utils import get_args
args = get_args()
config_file = args.config
config = process_config(config_file)
print("#####Stage 1#####")
print('Here is the configuration of this run: ')
print(config)
os.environ["CUDA_VISIBLE_DEVICES"]= config.gpu
import torch.optim as optim
from torch.utils import data
from e2efold.models import ContactNetwork, ContactNetwork_test, ContactNetwork_fc
from e2efold.models import ContactAttention, ContactAttention_simple_fix_PE
from e2efold.models import ContactAttention_simple
from e2efold.common.utils import *
from e2efold.common.long_seq_pre_post_process import *
from e2efold.postprocess import postprocess
d = config.u_net_d
BATCH_SIZE = config.batch_size_stage_1
OUT_STEP = config.OUT_STEP
LOAD_MODEL = config.LOAD_MODEL
pp_steps = config.pp_steps
data_type = config.data_type
model_type = config.model_type
model_path = '../models_ckpt/supervised_{}_{}_d{}_l3.pt'.format(model_type, data_type,d)
epoches_first = config.epoches_first
evaluate_epi = config.evaluate_epi_stage_1
steps_done = 0
# if gpu is to be used
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
seed_torch()
# for loading data
# loading the rna ss data, the data has been preprocessed
# 5s data is just a demo data, which do not have pseudoknot, will generate another data having that
from e2efold.data_generator import RNASSDataGenerator, Dataset, Dataset_1800
import collections
RNA_SS_data = collections.namedtuple('RNA_SS_data',
'seq ss_label length name pairs')
train_data = RNASSDataGenerator('../data/{}/'.format(data_type), 'train_600')
val_data = RNASSDataGenerator('../data/{}/'.format(data_type), 'val_600')
if data_type == 'archiveII_all':
test_data = RNASSDataGenerator('../data/{}/'.format(data_type), 'test_600')
if data_type == 'rnastralign_all':
test_data = RNASSDataGenerator('../data/{}/'.format(data_type), 'test_no_redundant_600')
train_data_1800 = RNASSDataGenerator('../data/{}/'.format(data_type), 'train_1800')
val_data_1800 = RNASSDataGenerator('../data/{}/'.format(data_type), 'val_1800')
if data_type == 'archiveII_all':
test_data_1800 = RNASSDataGenerator('../data/{}/'.format(data_type), 'test_1800')
if data_type == 'rnastralign_all':
test_data_1800 = RNASSDataGenerator('../data/{}/'.format(data_type), 'test_no_redundant_1800')
seq_len = train_data.data_y.shape[-2]
print('Max seq length ', seq_len)
# using the pytorch interface to parallel the data generation and model training
params = {'batch_size': BATCH_SIZE,
'shuffle': True,
'num_workers': 6,
'drop_last': True}
train_set = Dataset(train_data)
train_generator = data.DataLoader(train_set, **params)
val_set = Dataset(val_data)
val_generator = data.DataLoader(val_set, **params)
params = {'batch_size': 1,
'shuffle': True,
'num_workers': 6,
'drop_last': False}
train_set_1800 = Dataset_1800(train_data_1800)
train_generator_1800 = data.DataLoader(train_set_1800, **params)
val_set_1800 = Dataset_1800(val_data_1800)
val_generator_1800 = data.DataLoader(val_set_1800, **params)
params = {'batch_size': BATCH_SIZE,
'shuffle': False,
'num_workers': 6,
'drop_last': True}
test_set = Dataset(test_data)
test_generator = data.DataLoader(test_set, **params)
params = {'batch_size': 1,
'shuffle': False,
'num_workers': 6,
'drop_last': False}
test_set_1800 = Dataset_1800(test_data_1800)
test_generator_1800 = data.DataLoader(test_set_1800, **params)
if model_type =='test_lc':
contact_net = ContactNetwork_test(d=d, L=seq_len).to(device)
if model_type == 'att6':
contact_net = ContactAttention(d=d, L=seq_len).to(device)
if model_type == 'att_simple':
contact_net = ContactAttention_simple(d=d, L=seq_len).to(device)
if model_type == 'att_simple_fix':
contact_net = ContactAttention_simple_fix_PE(d=d, L=seq_len,
device=device).to(device)
if model_type == 'fc':
contact_net = ContactNetwork_fc(d=d, L=seq_len).to(device)
if model_type == 'conv2d_fc':
contact_net = ContactNetwork(d=d, L=seq_len).to(device)
if LOAD_MODEL and os.path.isfile(model_path):
print('Loading u net model...')
contact_net.load_state_dict(torch.load(model_path))
u_optimizer = optim.Adam(contact_net.parameters())
# for length as 600
pos_weight = torch.Tensor([300]).to(device)
criterion_bce_weighted = torch.nn.BCEWithLogitsLoss(
pos_weight = pos_weight)
# randomly select one sample from the test set and perform the evaluation
def model_eval():
contact_net.eval()
contacts, seq_embeddings, matrix_reps, seq_lens = next(iter(val_generator))
contacts_batch = torch.Tensor(contacts.float()).to(device)
seq_embedding_batch = torch.Tensor(seq_embeddings.float()).to(device)
# padding the states for supervised training with all 0s
state_pad = torch.zeros(1,2,1).to(device)
PE_batch = get_pe(seq_lens, 600).float().to(device)
with torch.no_grad():
pred_contacts = contact_net(PE_batch,
seq_embedding_batch, state_pad)
u_no_train = postprocess(pred_contacts,
seq_embedding_batch, 0.01, 0.1, 50, 1.0, True)
map_no_train = (u_no_train > 0.5).float()
f1_no_train_tmp = list(map(lambda i: F1_low_tri(map_no_train.cpu()[i],
contacts_batch.cpu()[i]), range(contacts_batch.shape[0])))
print('Average val F1 score for 600 with pure post-processing: ', np.average(f1_no_train_tmp))
seq_embedding_batch, PE_batch, contacts_batch, _, _, _, _ = next(iter(val_generator_1800))
seq_embedding_batch = seq_embedding_batch[0].to(device)
PE_batch = PE_batch[0].to(device)
contacts_batch = contacts_batch[0]
# padding the states for supervised training with all 0s
state_pad = torch.zeros(1,2,2).to(device)
with torch.no_grad():
pred_contacts = contact_net(PE_batch,
seq_embedding_batch, state_pad)
u_no_train = postprocess(pred_contacts,
seq_embedding_batch, 0.01, 0.1, 50, 1.0, True)
map_no_train = (u_no_train > 0.5).float()
f1_no_train_tmp = list(map(lambda i: F1_low_tri(map_no_train.cpu()[i],
contacts_batch.cpu()[i]), range(contacts_batch.shape[0])))
print('Average val F1 score for 1800 with pure post-processing: ', np.average(f1_no_train_tmp))
def model_eval_all_test():
contact_net.eval()
result_no_train = list()
result_no_train_shift = list()
seq_lens_list = list()
batch_n = 0
# for contacts, seq_embeddings, matrix_reps, seq_lens in test_generator:
# if batch_n%10==0:
# print('Batch number: ', batch_n)
# batch_n += 1
# contacts_batch = torch.Tensor(contacts.float()).to(device)
# seq_embedding_batch = torch.Tensor(seq_embeddings.float()).to(device)
# state_pad = torch.zeros(1,2,2).to(device)
# PE_batch = get_pe(seq_lens, 600).float().to(device)
# with torch.no_grad():
# pred_contacts = contact_net(PE_batch,
# seq_embedding_batch, state_pad)
# # only post-processing without learning
# u_no_train = postprocess(pred_contacts,
# seq_embedding_batch, 0.01, 0.1, 50, 1.0, True)
# map_no_train = (u_no_train > 0.5).float()
# result_no_train_tmp = list(map(lambda i: evaluate_exact(map_no_train.cpu()[i],
# contacts_batch.cpu()[i]), range(contacts_batch.shape[0])))
# result_no_train += result_no_train_tmp
# result_no_train_tmp_shift = list(map(lambda i: evaluate_shifted(map_no_train.cpu()[i],
# contacts_batch.cpu()[i]), range(contacts_batch.shape[0])))
# result_no_train_shift += result_no_train_tmp_shift
for seq_embedding_batch, PE_batch, contacts_batch, _, _, _, seq_lens in test_generator_1800:
if batch_n%10==0:
print('Batch number: ', batch_n)
batch_n += 1
seq_embedding_batch = seq_embedding_batch[0].to(device)
PE_batch = PE_batch[0].to(device)
contacts_batch = contacts_batch[0]
# padding the states for supervised training with all 0s
state_pad = torch.zeros(1,2,2).to(device)
with torch.no_grad():
pred_contacts = contact_net(PE_batch, seq_embedding_batch, state_pad)
# only post-processing without learning
u_no_train = postprocess(pred_contacts,
seq_embedding_batch, 0.01, 0.1, 50, 1.0, True)
map_no_train = (u_no_train > 0.5).float()
result_no_train_tmp = list(map(lambda i: evaluate_exact(map_no_train.cpu()[i],
contacts_batch.cpu()[i]), range(contacts_batch.shape[0])))
result_no_train += result_no_train_tmp
result_no_train_tmp_shift = list(map(lambda i: evaluate_shifted(map_no_train.cpu()[i],
contacts_batch.cpu()[i]), range(contacts_batch.shape[0])))
result_no_train_shift += result_no_train_tmp_shift
seq_lens_list += list(seq_lens)
nt_exact_p,nt_exact_r,nt_exact_f1 = zip(*result_no_train)
nt_shift_p,nt_shift_r,nt_shift_f1 = zip(*result_no_train_shift)
nt_exact_p = np.nan_to_num(np.array(nt_exact_p))
nt_exact_r = np.nan_to_num(np.array(nt_exact_r))
nt_exact_f1 = np.nan_to_num(np.array(nt_exact_f1))
nt_shift_p = np.nan_to_num(np.array(nt_shift_p))
nt_shift_r = np.nan_to_num(np.array(nt_shift_r))
nt_shift_f1 = np.nan_to_num(np.array(nt_shift_f1))
print('Average testing F1 score with pure post-processing: ', np.average(nt_exact_f1))
print('Average testing F1 score with pure post-processing allow shift: ', np.average(nt_shift_f1))
print('Average testing precision with pure post-processing: ', np.average(nt_exact_p))
print('Average testing precision with pure post-processing allow shift: ', np.average(nt_shift_p))
print('Average testing recall with pure post-processing: ', np.average(nt_exact_r))
print('Average testing recall with pure post-processing allow shift: ', np.average(nt_shift_r))
nt_exact_f1_agg = list()
nt_shift_f1_agg = list()
for i in range(len(seq_lens_list)):
nt_exact_f1_agg.append(np.average(nt_exact_f1[i*15:(i+1)*15]))
nt_shift_f1_agg.append(np.average(nt_shift_f1[i*15:(i+1)*15]))
result_dict = dict()
result_dict['exact_p'] = nt_exact_p
result_dict['exact_r'] = nt_exact_r
result_dict['exact_f1'] = nt_exact_f1
result_dict['shift_p'] = nt_shift_p
result_dict['shift_r'] = nt_shift_r
result_dict['shift_f1'] = nt_shift_f1
result_dict['seq_lens'] = seq_lens_list
result_dict['exact_weighted_f1'] = np.sum(np.array(nt_exact_f1_agg)*np.array(seq_lens_list)/np.sum(seq_lens_list))
result_dict['shift_weighted_f1'] = np.sum(np.array(nt_shift_f1_agg)*np.array(seq_lens_list)/np.sum(seq_lens_list))
import _pickle as pickle
with open('../results/rnastralign_long_pure_pp_evaluation_dict.pickle', 'wb') as f:
pickle.dump(result_dict, f)
def model_eval_all_test_greedy_sort():
contact_net.eval()
result_no_train = list()
result_no_train_shift = list()
seq_lens_list = list()
batch_n = 0
for seq_embedding_batch, PE_batch, contacts_batch, comb_index, _, contacts, seq_lens in test_generator_1800:
if batch_n%10==0:
print('Batch number: ', batch_n)
batch_n += 1
seq_embedding_batch = seq_embedding_batch[0].to(device)
PE_batch = PE_batch[0].to(device)
contacts_batch = contacts_batch[0]
# padding the states for supervised training with all 0s
state_pad = torch.zeros(1,2,2).to(device)
with torch.no_grad():
pred_contacts = contact_net(PE_batch, seq_embedding_batch, state_pad)
pred_u_map = combine_chunk_u_maps_no_replace(pred_contacts, comb_index, 6)
pred_u_map = pred_u_map.unsqueeze(0)
# only post-processing without learning
map_no_train = conflict_sort(pred_u_map)
result_no_train_tmp = list(map(lambda i: evaluate_exact(map_no_train[i],
contacts.float().cpu()[i]), range(contacts.shape[0])))
result_no_train += result_no_train_tmp
result_no_train_tmp_shift = list(map(lambda i: evaluate_shifted(map_no_train[i],
contacts.float().cpu()[i]), range(contacts.shape[0])))
result_no_train_shift += result_no_train_tmp_shift
seq_lens_list += list(seq_lens)
nt_exact_p,nt_exact_r,nt_exact_f1 = zip(*result_no_train)
nt_shift_p,nt_shift_r,nt_shift_f1 = zip(*result_no_train_shift)
nt_exact_p = np.nan_to_num(np.array(nt_exact_p))
nt_exact_r = np.nan_to_num(np.array(nt_exact_r))
nt_exact_f1 = np.nan_to_num(np.array(nt_exact_f1))
nt_shift_p = np.nan_to_num(np.array(nt_shift_p))
nt_shift_r = np.nan_to_num(np.array(nt_shift_r))
nt_shift_f1 = np.nan_to_num(np.array(nt_shift_f1))
print('Average testing F1 score with pure post-processing: ', np.average(nt_exact_f1))
print('Average testing F1 score with pure post-processing allow shift: ', np.average(nt_shift_f1))
print('Average testing precision with pure post-processing: ', np.average(nt_exact_p))
print('Average testing precision with pure post-processing allow shift: ', np.average(nt_shift_p))
print('Average testing recall with pure post-processing: ', np.average(nt_exact_r))
print('Average testing recall with pure post-processing allow shift: ', np.average(nt_shift_r))
result_dict = dict()
result_dict['exact_p'] = nt_exact_p
result_dict['exact_r'] = nt_exact_r
result_dict['exact_f1'] = nt_exact_f1
result_dict['shift_p'] = nt_shift_p
result_dict['shift_r'] = nt_shift_r
result_dict['shift_f1'] = nt_shift_f1
result_dict['seq_lens'] = seq_lens_list
result_dict['exact_weighted_f1'] = np.sum(np.array(nt_exact_f1)*np.array(seq_lens_list)/np.sum(seq_lens_list))
result_dict['shift_weighted_f1'] = np.sum(np.array(nt_shift_f1)*np.array(seq_lens_list)/np.sum(seq_lens_list))
import _pickle as pickle
with open('../results/rnastralign_long_greedy_sort_evaluation_dict.pickle', 'wb') as f:
pickle.dump(result_dict, f)
def model_eval_all_test_greedy_sampling():
contact_net.eval()
result_no_train = list()
result_no_train_shift = list()
seq_lens_list = list()
batch_n = 0
for seq_embedding_batch, PE_batch, contacts_batch, comb_index, _, contacts, seq_lens in test_generator_1800:
if batch_n%10==0:
print('Batch number: ', batch_n)
batch_n += 1
seq_embedding_batch = seq_embedding_batch[0].to(device)
PE_batch = PE_batch[0].to(device)
contacts_batch = contacts_batch[0]
# padding the states for supervised training with all 0s
state_pad = torch.zeros(1,2,2).to(device)
with torch.no_grad():
pred_contacts = contact_net(PE_batch, seq_embedding_batch, state_pad)
pred_u_map = combine_chunk_u_maps_no_replace(pred_contacts, comb_index, 6)
pred_u_map = pred_u_map.unsqueeze(0)
# only post-processing without learning
map_no_train = conflict_sampling(pred_u_map)
result_no_train_tmp = list(map(lambda i: evaluate_exact(map_no_train[i],
contacts.float().cpu()[i]), range(contacts.shape[0])))
result_no_train += result_no_train_tmp
result_no_train_tmp_shift = list(map(lambda i: evaluate_shifted(map_no_train[i],
contacts.float().cpu()[i]), range(contacts.shape[0])))
result_no_train_shift += result_no_train_tmp_shift
seq_lens_list += list(seq_lens)
nt_exact_p,nt_exact_r,nt_exact_f1 = zip(*result_no_train)
nt_shift_p,nt_shift_r,nt_shift_f1 = zip(*result_no_train_shift)
nt_exact_p = np.nan_to_num(np.array(nt_exact_p))
nt_exact_r = np.nan_to_num(np.array(nt_exact_r))
nt_exact_f1 = np.nan_to_num(np.array(nt_exact_f1))
nt_shift_p = np.nan_to_num(np.array(nt_shift_p))
nt_shift_r = np.nan_to_num(np.array(nt_shift_r))
nt_shift_f1 = np.nan_to_num(np.array(nt_shift_f1))
print('Average testing F1 score with pure post-processing: ', np.average(nt_exact_f1))
print('Average testing F1 score with pure post-processing allow shift: ', np.average(nt_shift_f1))
print('Average testing precision with pure post-processing: ', np.average(nt_exact_p))
print('Average testing precision with pure post-processing allow shift: ', np.average(nt_shift_p))
print('Average testing recall with pure post-processing: ', np.average(nt_exact_r))
print('Average testing recall with pure post-processing allow shift: ', np.average(nt_shift_r))
result_dict = dict()
result_dict['exact_p'] = nt_exact_p
result_dict['exact_r'] = nt_exact_r
result_dict['exact_f1'] = nt_exact_f1
result_dict['shift_p'] = nt_shift_p
result_dict['shift_r'] = nt_shift_r
result_dict['shift_f1'] = nt_shift_f1
result_dict['seq_lens'] = seq_lens_list
result_dict['exact_weighted_f1'] = np.sum(np.array(nt_exact_f1)*np.array(seq_lens_list)/np.sum(seq_lens_list))
result_dict['shift_weighted_f1'] = np.sum(np.array(nt_shift_f1)*np.array(seq_lens_list)/np.sum(seq_lens_list))
import _pickle as pickle
with open('../results/rnastralign_long_greedy_sampling_evaluation_dict.pickle', 'wb') as f:
pickle.dump(result_dict, f)
# There are three steps of training
# step one: train the u net
for epoch in range(epoches_first):
contact_net.train()
print('On short sequence phase:')
for contacts, seq_embeddings, matrix_reps, seq_lens in train_generator:
contacts_batch = torch.Tensor(contacts.float()).to(device)
seq_embedding_batch = torch.Tensor(seq_embeddings.float()).to(device)
# padding the states for supervised training with all 0s
state_pad = torch.zeros(1,2,2).to(device)
PE_batch = get_pe(seq_lens, 600).float().to(device)
contact_masks = torch.Tensor(contact_map_masks(seq_lens, 600)).to(device)
pred_contacts = contact_net(PE_batch,
seq_embedding_batch, state_pad)
# Compute loss
loss_u = criterion_bce_weighted(pred_contacts*contact_masks, contacts_batch)
# print(steps_done)
if steps_done % OUT_STEP ==0:
print('Stage 1, epoch for 600: {}, step: {}, loss: {}'.format(
epoch, steps_done, loss_u))
u_no_train = postprocess(pred_contacts,
seq_embedding_batch, 0.01, 0.1, 50, 1.0, True)
map_no_train = (u_no_train > 0.5).float()
f1_no_train_tmp = list(map(lambda i: F1_low_tri(map_no_train.cpu()[i],
contacts_batch.cpu()[i]), range(contacts_batch.shape[0])))
print('Average train F1 score for 600 with pure post-processing: ', np.average(f1_no_train_tmp))
# Optimize the model
u_optimizer.zero_grad()
loss_u.backward()
u_optimizer.step()
steps_done=steps_done+1
if steps_done % 600 ==0:
break
print('On long sequence phase:')
u_optimizer.zero_grad()
for seq_embedding_batch, PE_batch, contacts_batch, _, _, _, _ in train_generator_1800:
# padding the states for supervised training with all 0s
state_pad = torch.zeros(1,2,2).to(device)
seq_embedding_batch = seq_embedding_batch[0].to(device)
PE_batch = PE_batch[0].to(device)
contacts_batch = contacts_batch[0].to(device)
pred_contacts = contact_net(PE_batch, seq_embedding_batch, state_pad)
# Compute loss
loss_u = criterion_bce_weighted(pred_contacts, contacts_batch)
# print(steps_done)
if steps_done % OUT_STEP ==0:
print('Stage 1, epoch for 1800: {},step: {}, loss: {}'.format(
epoch, steps_done, loss_u))
u_no_train = postprocess(pred_contacts,
seq_embedding_batch, 0.01, 0.1, 50, 1.0, True)
map_no_train = (u_no_train > 0.5).float()
f1_no_train_tmp = list(map(lambda i: F1_low_tri(map_no_train.cpu()[i],
contacts_batch.cpu()[i]), range(contacts_batch.shape[0])))
print('Average train F1 score for 1800 with pure post-processing: ', np.average(f1_no_train_tmp))
# Optimize the model
loss_u.backward()
if steps_done % 5 ==0:
u_optimizer.step()
u_optimizer.zero_grad()
steps_done=steps_done+1
if steps_done % 150 ==0:
break
if epoch%evaluate_epi==0:
model_eval()
torch.save(contact_net.state_dict(), model_path)
# model_eval_all_test()
# sys.exit()
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
lojapam/asgi.py | """
ASGI config for lojapam project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'lojapam.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
perflogging/src/main/java/com/intel/perflogging/BenchmarkHelper.java | // Copyright (C) 2020 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
package com.intel.perflogging;
import java.io.*;
import java.nio.charset.StandardCharsets;
import java.time.Instant;
import java.time.temporal.ChronoUnit;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicLong;
/**
* Class to record event counts and duration in a general way for micro benchmarking.
*
* To enable benchmarking set the DAI_USE_BENCHMARKING environment variable to "true". It will be disabled by default.
*
* To change the minimum burst threshold from the default of 10 set the DAI_BENCHMARKING_THRESHOLD environment
* variable to the desired minimum size. This is to filter out random bursts.
*/
public class BenchmarkHelper {
/**
* Create a micro benchmarking object for a process that has bursts of activity. Only one per process is needed.
*
* @param dataSetName The name of the dataset for this benchmarking object.
* @param filename The file to write after a "burst" is finished.
* @param maxBurstSeconds The delay in seconds after a burst that signifies the burst is over.
*/
public BenchmarkHelper(String dataSetName, String filename, long maxBurstSeconds) {
this(dataSetName, new File(filename), maxBurstSeconds);
}
/**
* Create a micro benchmarking object for a process that has bursts of activity. Only one per process is needed.
*
* @param dataSetName The name of the dataset for this benchmarking object.
* @param file The file to write after a "burst" is finished.
* @param maxBurstSeconds The delay in seconds after a burst that signifies the burst is over.
*/
public BenchmarkHelper(String dataSetName, File file, long maxBurstSeconds) {
file_ = file;
maxBurstMicroSeconds_ = maxBurstSeconds * 1_000_000;
dataSetName_ = dataSetName;
if(System.getenv("DAI_USE_BENCHMARKING") != null)
doBenchmarking_ = Boolean.parseBoolean(System.getenv("DAI_USE_BENCHMARKING").trim().toLowerCase());
if(System.getenv("DAI_BENCHMARKING_THRESHOLD") != null)
threshold_ = Long.parseLong(System.getenv("DAI_BENCHMARKING_THRESHOLD").trim());
}
/**
* Change the dataset name after construction.
*
* @param newName The new name.
*/
public void changeDataSetName(String newName) {
if(doBenchmarking_) {
assert newName != null : "Benchmarking: changeDataSetName: new name cannot be null";
assert !newName.trim().isEmpty():"Benchmarking: changeDataSetName: new name cannot be blank";
dataSetName_ = newName;
}
}
/**
* Replace a variable in the filename with a value.
*
* @param variableName The name of the variable to replace.
* @param value The replacement for the variable.
*/
public void replaceFilenameVariable(String variableName, String value) {
if(variableName == null || variableName.trim().isEmpty()) throw new NullPointerException("variableName cannot be null or empty!");
if(value == null || value.trim().isEmpty()) throw new NullPointerException("value cannot be null or empty!");
file_ = new File(file_.toString().replace("{{" + variableName + "}}", value));
}
/**
* Add a count to the default data tracker.
*
* @param value The value to add to the total count for the default data tracker.
*/
public void addDefaultValue(long value) {
if(doBenchmarking_) {
commonAdd();
defaultValue_.addAndGet(value);
}
}
/**
* Add a named count to the named data tracker.
*
* @param name The name of the data tracker to accumulate the count for.
* @param value The value to add to the total count for the named data tracker.
*/
public void addNamedValue(String name, long value) {
if(doBenchmarking_) {
commonAdd();
if(!values_.containsKey(name))
values_.put(name, new AtomicLong(0L));
values_.get(name).addAndGet(value);
}
}
/**
* Called when the loop in the process does not do any work (i.e. no add methods are called).
*/
public void tick() {
if(doBenchmarking_ && (defaultValue_.get() > 0L || values_.size() > 0)) {
long target = lastTs_.get() + maxBurstMicroSeconds_;
if (getMicroSecondTimestamp() > target)
recordAndReset();
}
}
private void commonAdd() {
long ts = getMicroSecondTimestamp();
if (firstTs_.get() == 0L)
firstTs_.set(ts);
lastTs_.set(ts);
}
private void recordAndReset() {
if(aboveThreshold()) {
try (OutputStream os = new FileOutputStream(file_, true)) {
try (Writer out = new OutputStreamWriter(os, StandardCharsets.UTF_8)) {
StringBuilder builder = new StringBuilder();
builder.append("{\"name\":\"").append(dataSetName_).append("\",");
builder.append("\"start\":\"").append(firstTs_.get()).append("\",");
builder.append("\"finish\":\"").append(lastTs_.get()).append("\",");
builder.append("\"duration\":\"").append(lastTs_.get() - firstTs_.get()).append("\",");
builder.append("\"counts\":{");
boolean first = true;
if (defaultValue_.get() > 0) {
builder.append("\"DEFAULT\":").append(defaultValue_.get());
first = false;
}
for (Map.Entry<String, AtomicLong> entry : values_.entrySet()) {
if (first)
first = false;
else
builder.append(",");
builder.append("\"").append(entry.getKey()).append("\":").append(entry.getValue().get());
}
builder.append("}}\n");
out.write(builder.toString());
}
} catch (IOException e) {
System.err.println("*** Benchmarking Error: Failed to write result of benchmarking to file: " +
file_.toString());
}
}
firstTs_.set(0L);
lastTs_.set(0L);
defaultValue_.set(0L);
values_.clear();
}
private boolean aboveThreshold() {
long max = defaultValue_.get();
for(Map.Entry<String, AtomicLong> entry : values_.entrySet())
max = Math.max(max, entry.getValue().get());
return max >= threshold_;
}
private Instant getMicroSecondTimeStampAsInstant() {
return Instant.now().truncatedTo(ChronoUnit.MICROS);
}
private long getMicroSecondTimestamp() {
Instant now = getMicroSecondTimeStampAsInstant();
return now.getEpochSecond() * 1_000_000 + now.getNano() / 1_000;
}
private File file_;
private final long maxBurstMicroSeconds_;
private String dataSetName_;
private final Map<String, AtomicLong> values_ = new ConcurrentHashMap<>(32);
private AtomicLong defaultValue_ = new AtomicLong(0L);
private AtomicLong firstTs_ = new AtomicLong(0L);
private AtomicLong lastTs_ = new AtomicLong(0L);
private boolean doBenchmarking_ = false;
private long threshold_ = 10;
}
| [
"\"DAI_USE_BENCHMARKING\"",
"\"DAI_USE_BENCHMARKING\"",
"\"DAI_BENCHMARKING_THRESHOLD\"",
"\"DAI_BENCHMARKING_THRESHOLD\""
]
| []
| [
"DAI_BENCHMARKING_THRESHOLD",
"DAI_USE_BENCHMARKING"
]
| [] | ["DAI_BENCHMARKING_THRESHOLD", "DAI_USE_BENCHMARKING"] | java | 2 | 0 | |
pkg/jx/cmd/step_release.go | package cmd
import (
"fmt"
"io"
"os"
"os/user"
"path/filepath"
"github.com/jenkins-x/jx/pkg/helm"
"github.com/jenkins-x/jx/pkg/kube"
"github.com/jenkins-x/jx/pkg/log"
"github.com/jenkins-x/jx/pkg/util"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// StepReleaseOptions contains the CLI arguments
type StepReleaseOptions struct {
StepOptions
DockerRegistry string
Organisation string
Application string
Version string
GitUsername string
GitEmail string
Dir string
XdgConfigHome string
NoBatch bool
// promote flags
Build string
Timeout string
PullRequestPollTime string
LocalHelmRepoName string
HelmRepositoryURL string
}
// NewCmdStep Steps a command object for the "step" command
func NewCmdStepRelease(f Factory, out io.Writer, errOut io.Writer) *cobra.Command {
options := &StepReleaseOptions{
StepOptions: StepOptions{
CommonOptions: CommonOptions{
Factory: f,
Out: out,
Err: errOut,
},
},
}
cmd := &cobra.Command{
Use: "release",
Short: "performs a release on the current git repository",
Run: func(cmd *cobra.Command, args []string) {
options.Cmd = cmd
options.Args = args
err := options.Run()
CheckErr(err)
},
}
cmd.Flags().StringVarP(&options.DockerRegistry, "docker-registry", "r", "", "the docker registry host or host:port to use. If not specified it is loaded from the `docker-registry` ConfigMap")
cmd.Flags().StringVarP(&options.Organisation, "organisation", "o", "", "the docker organisation for the generated docker image")
cmd.Flags().StringVarP(&options.Application, "application", "a", "", "the docker application image name")
cmd.Flags().StringVarP(&options.GitUsername, "git-username", "u", "", "The git username to configure if there is none already setup")
cmd.Flags().StringVarP(&options.GitEmail, "git-email", "e", "", "The git email address to configure if there is none already setup")
cmd.Flags().StringVarP(&options.XdgConfigHome, "xdg-config-home", "", "/home/jenkins", "The home directory where git config is setup")
cmd.Flags().BoolVarP(&options.NoBatch, "no-batch", "", false, "Whether to disable batch mode")
cmd.Flags().StringVarP(&options.Timeout, optionTimeout, "t", "1h", "The timeout to wait for the promotion to succeed in the underlying Environment. The command fails if the timeout is exceeded or the promotion does not complete")
cmd.Flags().StringVarP(&options.PullRequestPollTime, optionPullRequestPollTime, "", "20s", "Poll time when waiting for a Pull Request to merge")
cmd.Flags().StringVarP(&options.LocalHelmRepoName, "helm-repo-name", "", kube.LocalHelmRepoName, "The name of the helm repository that contains the app")
cmd.Flags().StringVarP(&options.HelmRepositoryURL, "helm-repo-url", "", helm.DefaultHelmRepositoryURL, "The Helm Repository URL to use for the App")
cmd.Flags().StringVarP(&options.Build, "build", "b", "", "The Build number which is used to update the PipelineActivity. If not specified its defaulted from the '$BUILD_NUMBER' environment variable")
return cmd
}
// Run implements this command
func (o *StepReleaseOptions) Run() error {
o.BatchMode = !o.NoBatch
err := o.runCommandVerbose("git", "config", "--global", "credential.helper", "store")
if err != nil {
return err
}
if o.XdgConfigHome != "" {
if os.Getenv("XDG_CONFIG_HOME") == "" {
err = o.Setenv("XDG_CONFIG_HOME", o.XdgConfigHome)
if err != nil {
return err
}
}
}
stepGitCredentialsOptions := &StepGitCredentialsOptions{
StepOptions: o.StepOptions,
}
err = stepGitCredentialsOptions.Run()
if err != nil {
return fmt.Errorf("Failed to setup git credentials: %s", err)
}
dir := o.Dir
gitUser, err := o.Git().Username(dir)
if err != nil || gitUser == "" {
gitUser = o.GitUsername
if gitUser == "" {
user, err := user.Current()
if err == nil && user != nil {
gitUser = user.Username
}
}
if gitUser == "" {
gitUser = "jenkins-x-bot"
}
err = o.Git().SetUsername(dir, gitUser)
if err != nil {
return fmt.Errorf("Failed to set git user %s: %s", gitUser, err)
}
}
gitEmail, err := o.Git().Email(dir)
if err != nil || gitEmail == "" {
gitEmail = o.GitEmail
if gitEmail == "" {
gitEmail = "[email protected]"
}
err = o.Git().SetEmail(dir, gitEmail)
if err != nil {
return fmt.Errorf("Failed to set git email %s: %s", gitEmail, err)
}
}
if o.DockerRegistry == "" {
o.DockerRegistry = os.Getenv("DOCKER_REGISTRY")
}
if o.Organisation == "" {
o.Organisation = os.Getenv("ORG")
}
if o.Application == "" {
o.Application = os.Getenv("APP_NAME")
}
if o.DockerRegistry == "" {
o.DockerRegistry, err = o.loadDockerRegistry()
if err != nil {
return err
}
}
if o.Organisation == "" || o.Application == "" {
gitInfo, err := o.FindGitInfo("")
if err != nil {
return err
}
if o.Organisation == "" {
o.Organisation = gitInfo.Organisation
}
if o.Application == "" {
o.Application = gitInfo.Name
}
}
err = o.Setenv("DOCKER_REGISTRY", o.DockerRegistry)
if err != nil {
return err
}
err = o.Setenv("ORG", o.Organisation)
if err != nil {
return err
}
err = o.Setenv("APP_NAME", o.Application)
if err != nil {
return err
}
stepNextVersionOptions := &StepNextVersionOptions{
StepOptions: o.StepOptions,
}
if o.isNode() {
stepNextVersionOptions.Filename = packagejson
/*
} else if o.isMaven() {
stepNextVersionOptions.Filename = pomxml
*/
} else {
stepNextVersionOptions.UseGitTagOnly = true
}
err = stepNextVersionOptions.Run()
if err != nil {
return fmt.Errorf("Failed to create next version: %s", err)
}
o.Version = stepNextVersionOptions.NewVersion
err = o.Setenv("VERSION", o.Version)
if err != nil {
return err
}
err = o.updateVersionInSource()
if err != nil {
return fmt.Errorf("Failed to update version in source: %s", err)
}
chartsDir := filepath.Join("charts", o.Application)
chartExists, err := util.FileExists(chartsDir)
if err != nil {
return fmt.Errorf("Failed to find chart folder: %s", err)
}
stepTagOptions := &StepTagOptions{
StepOptions: o.StepOptions,
}
if chartExists {
stepTagOptions.Flags.ChartsDir = chartsDir
stepTagOptions.Flags.ChartValueRepository = fmt.Sprintf("%s/%s/%s", o.DockerRegistry, o.Organisation, o.Application)
}
stepTagOptions.Flags.Version = o.Version
err = stepTagOptions.Run()
if err != nil {
return fmt.Errorf("Failed to tag source: %s", err)
}
err = o.buildSource()
if err != nil {
return err
}
err = o.runCommandVerbose("skaffold", "build", "-f", "skaffold.yaml")
if err != nil {
return fmt.Errorf("Failed to run skaffold: %s", err)
}
imageName := fmt.Sprintf("%s/%s/%s:%s", o.DockerRegistry, o.Organisation, o.Application, o.Version)
stepPostBuildOptions := &StepPostBuildOptions{
StepOptions: o.StepOptions,
FullImageName: imageName,
}
err = stepPostBuildOptions.Run()
if err != nil {
return fmt.Errorf("Failed to run post build step: %s", err)
}
// now lets promote from the charts dir...
if chartExists {
err = o.releaseAndPromoteChart(chartsDir)
if err != nil {
return fmt.Errorf("Failed to promote: %s", err)
}
} else {
log.Infof("No charts directory %s so not promoting\n", util.ColorInfo(chartsDir))
}
return nil
}
func (o *StepReleaseOptions) updateVersionInSource() error {
if o.isMaven() {
return o.runCommandVerbose("mvn", "versions:set", "-DnewVersion="+o.Version)
}
return nil
}
func (o *StepReleaseOptions) buildSource() error {
if o.isMaven() {
return o.runCommandVerbose("mvn", "clean", "deploy")
}
return nil
}
func (o *StepReleaseOptions) loadDockerRegistry() (string, error) {
kubeClient, curNs, err := o.KubeClient()
if err != nil {
return "", err
}
ns, _, err := kube.GetDevNamespace(kubeClient, curNs)
if err != nil {
return "", err
}
configMapName := kube.ConfigMapJenkinsDockerRegistry
cm, err := kubeClient.CoreV1().ConfigMaps(ns).Get(configMapName, metav1.GetOptions{})
if err != nil {
return "", fmt.Errorf("Could not find ConfigMap %s in namespace %s: %s", configMapName, ns, err)
}
if cm.Data != nil {
dockerRegistry := cm.Data["docker.registry"]
if dockerRegistry != "" {
return dockerRegistry, nil
}
}
return "", fmt.Errorf("Could not find the docker.registry property in the ConfigMap: %s", configMapName)
}
func (o *StepReleaseOptions) releaseAndPromoteChart(dir string) error {
err := os.Chdir(dir)
if err != nil {
return fmt.Errorf("Failed to change to directory %s: %s", dir, err)
}
stepChangelogOptions := &StepChangelogOptions{
StepOptions: o.StepOptions,
Build: o.Build,
}
err = stepChangelogOptions.Run()
if err != nil {
return fmt.Errorf("Failed to generate changelog: %s", err)
}
stepHelmReleaseOptions := &StepHelmReleaseOptions{
StepHelmOptions: StepHelmOptions{
StepOptions: o.StepOptions,
},
}
err = stepHelmReleaseOptions.Run()
if err != nil {
return fmt.Errorf("Failed to release helm chart: %s", err)
}
promoteOptions := PromoteOptions{
CommonOptions: o.CommonOptions,
AllAutomatic: true,
Timeout: o.Timeout,
PullRequestPollTime: o.PullRequestPollTime,
Version: o.Version,
LocalHelmRepoName: o.LocalHelmRepoName,
HelmRepositoryURL: o.HelmRepositoryURL,
Build: o.Build,
}
promoteOptions.BatchMode = true
return promoteOptions.Run()
}
func (o *StepReleaseOptions) isMaven() bool {
exists, err := util.FileExists("pom.xml")
return exists && err == nil
}
func (o *StepReleaseOptions) isNode() bool {
exists, err := util.FileExists("package.json")
return exists && err == nil
}
func (o *StepReleaseOptions) Setenv(key string, value string) error {
err := os.Setenv(key, value)
if err != nil {
return fmt.Errorf("Failed to set environment variable %s=%s: %s", key, value, err)
}
return nil
}
| [
"\"XDG_CONFIG_HOME\"",
"\"DOCKER_REGISTRY\"",
"\"ORG\"",
"\"APP_NAME\""
]
| []
| [
"DOCKER_REGISTRY",
"APP_NAME",
"ORG",
"XDG_CONFIG_HOME"
]
| [] | ["DOCKER_REGISTRY", "APP_NAME", "ORG", "XDG_CONFIG_HOME"] | go | 4 | 0 | |
integration/integration_suite_test.go | package integration_test
import (
"encoding/json"
"flag"
"fmt"
"net/url"
"os"
"os/exec"
"path/filepath"
"testing"
"time"
"github.com/cloudfoundry/libbuildpack/cutlass"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var bpDir string
var buildpackVersion string
var packagedBuildpack cutlass.VersionedBuildpackPackage
func init() {
flag.StringVar(&buildpackVersion, "version", "", "version to use (builds if empty)")
flag.BoolVar(&cutlass.Cached, "cached", true, "cached buildpack")
flag.StringVar(&cutlass.DefaultMemory, "memory", "128M", "default memory for pushed apps")
flag.StringVar(&cutlass.DefaultDisk, "disk", "256M", "default disk for pushed apps")
flag.Parse()
}
var _ = SynchronizedBeforeSuite(func() []byte {
// Run once
if buildpackVersion == "" {
packagedBuildpack, err := cutlass.PackageUniquelyVersionedBuildpack(os.Getenv("CF_STACK"), ApiHasStackAssociation())
Expect(err).NotTo(HaveOccurred(), "failed to package buildpack")
data, err := json.Marshal(packagedBuildpack)
Expect(err).NotTo(HaveOccurred())
return data
}
return []byte{}
}, func(data []byte) {
// Run on all nodes
var err error
if len(data) > 0 {
err = json.Unmarshal(data, &packagedBuildpack)
Expect(err).NotTo(HaveOccurred())
buildpackVersion = packagedBuildpack.Version
}
bpDir, err = cutlass.FindRoot()
Expect(err).NotTo(HaveOccurred())
Expect(cutlass.CopyCfHome()).To(Succeed())
cutlass.SeedRandom()
cutlass.DefaultStdoutStderr = GinkgoWriter
})
var _ = SynchronizedAfterSuite(func() {
// Run on all nodes
}, func() {
// Run once
Expect(cutlass.RemovePackagedBuildpack(packagedBuildpack)).To(Succeed())
Expect(cutlass.DeleteOrphanedRoutes()).To(Succeed())
})
func TestIntegration(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Integration Suite")
}
func PushAppAndConfirm(app *cutlass.App) {
Expect(app.Push()).To(Succeed())
Eventually(func() ([]string, error) { return app.InstanceStates() }, 20*time.Second).Should(Equal([]string{"RUNNING"}))
Expect(app.ConfirmBuildpack(buildpackVersion)).To(Succeed())
}
func DestroyApp(app *cutlass.App) *cutlass.App {
if app != nil {
app.Destroy()
}
return nil
}
func ApiHasTask() bool {
supported, err := cutlass.ApiGreaterThan("2.75.0")
Expect(err).NotTo(HaveOccurred())
return supported
}
func ApiHasMultiBuildpack() bool {
supported, err := cutlass.ApiGreaterThan("2.90.0")
Expect(err).NotTo(HaveOccurred(), "the targeted CF does not support multiple buildpacks")
return supported
}
func ApiSupportsSymlinks() bool {
supported, err := cutlass.ApiGreaterThan("2.103.0")
Expect(err).NotTo(HaveOccurred(), "the targeted CF does not support symlinks")
return supported
}
func ApiHasStackAssociation() bool {
supported, err := cutlass.ApiGreaterThan("2.113.0")
Expect(err).NotTo(HaveOccurred(), "the targeted CF does not support stack association")
return supported
}
func AssertUsesProxyDuringStagingIfPresent(fixtureName string) {
Context("with an uncached buildpack", func() {
BeforeEach(func() {
if cutlass.Cached {
Skip("Running cached tests")
}
})
It("uses a proxy during staging if present", func() {
proxy, err := cutlass.NewProxy()
Expect(err).To(BeNil())
defer proxy.Close()
bpFile := filepath.Join(bpDir, buildpackVersion+"tmp")
cmd := exec.Command("cp", packagedBuildpack.File, bpFile)
err = cmd.Run()
Expect(err).To(BeNil())
defer os.Remove(bpFile)
traffic, _, _, err := cutlass.InternetTraffic(
bpDir,
filepath.Join("testdata", fixtureName),
bpFile,
[]string{"HTTP_PROXY=" + proxy.URL, "HTTPS_PROXY=" + proxy.URL},
)
Expect(err).To(BeNil())
// Expect(built).To(BeTrue())
destUrl, err := url.Parse(proxy.URL)
Expect(err).To(BeNil())
Expect(cutlass.UniqueDestination(
traffic, fmt.Sprintf("%s.%s", destUrl.Hostname(), destUrl.Port()),
)).To(BeNil())
})
})
}
func AssertNoInternetTraffic(fixtureName string) {
It("has no traffic", func() {
if !cutlass.Cached {
Skip("Running uncached tests")
}
bpFile := filepath.Join(bpDir, buildpackVersion+"tmp")
cmd := exec.Command("cp", packagedBuildpack.File, bpFile)
err := cmd.Run()
Expect(err).To(BeNil())
defer os.Remove(bpFile)
traffic, _, _, err := cutlass.InternetTraffic(
bpDir,
filepath.Join("testdata", fixtureName),
bpFile,
[]string{},
)
Expect(err).To(BeNil())
// Expect(built).To(BeTrue())
Expect(traffic).To(BeEmpty())
})
}
func RunCF(args ...string) error {
command := exec.Command("cf", args...)
command.Stdout = GinkgoWriter
command.Stderr = GinkgoWriter
return command.Run()
}
| [
"\"CF_STACK\""
]
| []
| [
"CF_STACK"
]
| [] | ["CF_STACK"] | go | 1 | 0 | |
build/lacros/lacros_resource_sizes.py | #!/usr/bin/env python
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Reports binary size metrics for LaCrOS build artifacts.
More information at //docs/speed/binary_size/metrics.md.
"""
import argparse
import collections
import contextlib
import json
import logging
import os
import subprocess
import sys
import tempfile
@contextlib.contextmanager
def _SysPath(path):
"""Library import context that temporarily appends |path| to |sys.path|."""
if path and path not in sys.path:
sys.path.insert(0, path)
else:
path = None # Indicates that |sys.path| is not modified.
try:
yield
finally:
if path:
sys.path.pop(0)
DIR_SOURCE_ROOT = os.environ.get(
'CHECKOUT_SOURCE_ROOT',
os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)))
BUILD_COMMON_PATH = os.path.join(DIR_SOURCE_ROOT, 'build', 'util', 'lib',
'common')
TRACING_PATH = os.path.join(DIR_SOURCE_ROOT, 'third_party', 'catapult',
'tracing')
EU_STRIP_PATH = os.path.join(DIR_SOURCE_ROOT, 'buildtools', 'third_party',
'eu-strip', 'bin', 'eu-strip')
with _SysPath(BUILD_COMMON_PATH):
import perf_tests_results_helper # pylint: disable=import-error
with _SysPath(TRACING_PATH):
from tracing.value import convert_chart_json # pylint: disable=import-error
_BASE_CHART = {
'format_version': '0.1',
'benchmark_name': 'resource_sizes',
'benchmark_description': 'LaCrOS resource size information.',
'trace_rerun_options': [],
'charts': {}
}
_KEY_RAW = 'raw'
_KEY_GZIPPED = 'gzipped'
_KEY_STRIPPED = 'stripped'
_KEY_STRIPPED_GZIPPED = 'stripped_then_gzipped'
class _Group:
"""A group of build artifacts whose file sizes are summed and tracked.
Build artifacts for size tracking fall under these categories:
* File: A single file.
* Group: A collection of files.
* Dir: All files under a directory.
Attributes:
paths: A list of files or directories to be tracked together.
title: The display name of the group.
track_stripped: Whether to also track summed stripped ELF sizes.
track_compressed: Whether to also track summed compressed sizes.
"""
def __init__(self, paths, title, track_stripped=False,
track_compressed=False):
self.paths = paths
self.title = title
self.track_stripped = track_stripped
self.track_compressed = track_compressed
# List of disjoint build artifact groups for size tracking. This list should be
# synched with lacros-amd64-generic-binary-size-rel builder contents (specified
# in # //infra/config/subprojects/chromium/ci.star) and
# chromeos-amd64-generic-lacros-internal builder (specified in src-internal).
_TRACKED_GROUPS = [
_Group(paths=['chrome'],
title='File: chrome',
track_stripped=True,
track_compressed=True),
_Group(paths=['chrome_crashpad_handler'],
title='File: chrome_crashpad_handler'),
_Group(paths=['icudtl.dat'], title='File: icudtl.dat'),
_Group(paths=['nacl_helper'], title='File: nacl_helper'),
_Group(paths=['nacl_irt_x86_64.nexe'], title='File: nacl_irt_x86_64.nexe'),
_Group(paths=['resources.pak'], title='File: resources.pak'),
_Group(paths=[
'chrome_100_percent.pak', 'chrome_200_percent.pak', 'headless_lib.pak'
],
title='Group: Other PAKs'),
_Group(paths=['snapshot_blob.bin'], title='Group: Misc'),
_Group(paths=['locales/'], title='Dir: locales'),
_Group(paths=['swiftshader/'], title='Dir: swiftshader'),
_Group(paths=['WidevineCdm/'], title='Dir: WidevineCdm'),
]
def _visit_paths(base_dir, paths):
"""Itemizes files specified by a list of paths.
Args:
base_dir: Base directory for all elements in |paths|.
paths: A list of filenames or directory names to specify files whose sizes
to be counted. Directories are recursed. There's no de-duping effort.
Non-existing files or directories are ignored (with warning message).
"""
for path in paths:
full_path = os.path.join(base_dir, path)
if os.path.exists(full_path):
if os.path.isdir(full_path):
for dirpath, _, filenames in os.walk(full_path):
for filename in filenames:
yield os.path.join(dirpath, filename)
else: # Assume is file.
yield full_path
else:
logging.critical('Not found: %s', path)
def _is_probably_elf(filename):
"""Heuristically decides whether |filename| is ELF via magic signature."""
with open(filename, 'rb') as fh:
return fh.read(4) == '\x7FELF'
def _is_unstrippable_elf(filename):
"""Identifies known-unstrippable ELF files to denoise the system."""
return filename.endswith('.nexe') or filename.endswith('libwidevinecdm.so')
def _get_filesize(filename):
"""Returns the size of a file, or 0 if file is not found."""
try:
return os.path.getsize(filename)
except OSError:
logging.critical('Failed to get size: %s', filename)
return 0
def _get_gzipped_filesize(filename):
"""Returns the gzipped size of a file, or 0 if file is not found."""
BUFFER_SIZE = 65536
if not os.path.isfile(filename):
return 0
try:
# Call gzip externally instead of using gzip package since it's > 2x faster.
cmd = ['gzip', '-c', filename]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
# Manually counting bytes instead of using len(p.communicate()[0]) to avoid
# buffering the entire compressed data (can be ~100 MB).
ret = 0
while True:
chunk = len(p.stdout.read(BUFFER_SIZE))
if chunk == 0:
break
ret += chunk
return ret
except OSError:
logging.critical('Failed to get gzipped size: %s', filename)
return 0
def _get_catagorized_filesizes(filename):
"""Measures |filename| sizes under various transforms.
Returns: A Counter (keyed by _Key_* constants) that stores measured sizes.
"""
sizes = collections.Counter()
sizes[_KEY_RAW] = _get_filesize(filename)
sizes[_KEY_GZIPPED] = _get_gzipped_filesize(filename)
# Pre-assign values for non-ELF, or in case of failure for ELF.
sizes[_KEY_STRIPPED] = sizes[_KEY_RAW]
sizes[_KEY_STRIPPED_GZIPPED] = sizes[_KEY_GZIPPED]
if _is_probably_elf(filename) and not _is_unstrippable_elf(filename):
try:
fd, temp_file = tempfile.mkstemp()
os.close(fd)
cmd = [EU_STRIP_PATH, filename, '-o', temp_file]
subprocess.check_output(cmd)
sizes[_KEY_STRIPPED] = _get_filesize(temp_file)
sizes[_KEY_STRIPPED_GZIPPED] = _get_gzipped_filesize(temp_file)
if sizes[_KEY_STRIPPED] > sizes[_KEY_RAW]:
# This weird case has been observed for libwidevinecdm.so.
logging.critical('Stripping made things worse for %s' % filename)
except subprocess.CalledProcessError:
logging.critical('Failed to strip file: %s' % filename)
finally:
os.unlink(temp_file)
return sizes
def _dump_chart_json(output_dir, chartjson):
"""Writes chart histogram to JSON files.
Output files:
results-chart.json contains the chart JSON.
perf_results.json contains histogram JSON for Catapult.
Args:
output_dir: Directory to place the JSON files.
chartjson: Source JSON data for output files.
"""
results_path = os.path.join(output_dir, 'results-chart.json')
logging.critical('Dumping chartjson to %s', results_path)
with open(results_path, 'w') as json_file:
json.dump(chartjson, json_file, indent=2)
# We would ideally generate a histogram set directly instead of generating
# chartjson then converting. However, perf_tests_results_helper is in
# //build, which doesn't seem to have any precedent for depending on
# anything in Catapult. This can probably be fixed, but since this doesn't
# need to be super fast or anything, converting is a good enough solution
# for the time being.
histogram_result = convert_chart_json.ConvertChartJson(results_path)
if histogram_result.returncode != 0:
raise Exception('chartjson conversion failed with error: ' +
histogram_result.stdout)
histogram_path = os.path.join(output_dir, 'perf_results.json')
logging.critical('Dumping histograms to %s', histogram_path)
with open(histogram_path, 'w') as json_file:
json_file.write(histogram_result.stdout)
def _run_resource_sizes(args):
"""Main flow to extract and output size data."""
chartjson = _BASE_CHART.copy()
report_func = perf_tests_results_helper.ReportPerfResult
total_sizes = collections.Counter()
def report_sizes(sizes, title, track_stripped, track_compressed):
report_func(chart_data=chartjson,
graph_title=title,
trace_title='size',
value=sizes[_KEY_RAW],
units='bytes')
if track_stripped:
report_func(chart_data=chartjson,
graph_title=title + ' (Stripped)',
trace_title='size',
value=sizes[_KEY_STRIPPED],
units='bytes')
if track_compressed:
report_func(chart_data=chartjson,
graph_title=title + ' (Gzipped)',
trace_title='size',
value=sizes[_KEY_GZIPPED],
units='bytes')
if track_stripped and track_compressed:
report_func(chart_data=chartjson,
graph_title=title + ' (Stripped, Gzipped)',
trace_title='size',
value=sizes[_KEY_STRIPPED_GZIPPED],
units='bytes')
for g in _TRACKED_GROUPS:
sizes = sum(
map(_get_catagorized_filesizes, _visit_paths(args.out_dir, g.paths)),
collections.Counter())
report_sizes(sizes, g.title, g.track_stripped, g.track_compressed)
# Total compressed size is summed over individual compressed sizes, instead
# of concatanating first, then compress everything. This is done for
# simplicity. It also gives a conservative size estimate (assuming file
# metadata and overheads are negligible).
total_sizes += sizes
report_sizes(total_sizes, 'Total', True, True)
_dump_chart_json(args.output_dir, chartjson)
def main():
"""Parses arguments and runs high level flows."""
argparser = argparse.ArgumentParser(description='Writes LaCrOS size metrics.')
argparser.add_argument('--chromium-output-directory',
dest='out_dir',
required=True,
type=os.path.realpath,
help='Location of the build artifacts.')
output_group = argparser.add_mutually_exclusive_group()
output_group.add_argument('--output-dir',
default='.',
help='Directory to save chartjson to.')
# Accepted to conform to the isolated script interface, but ignored.
argparser.add_argument('--isolated-script-test-filter',
help=argparse.SUPPRESS)
argparser.add_argument('--isolated-script-test-perf-output',
type=os.path.realpath,
help=argparse.SUPPRESS)
output_group.add_argument(
'--isolated-script-test-output',
type=os.path.realpath,
help='File to which results will be written in the simplified JSON '
'output format.')
args = argparser.parse_args()
isolated_script_output = {'valid': False, 'failures': []}
if args.isolated_script_test_output:
test_name = 'lacros_resource_sizes'
args.output_dir = os.path.join(
os.path.dirname(args.isolated_script_test_output), test_name)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
try:
_run_resource_sizes(args)
isolated_script_output = {'valid': True, 'failures': []}
finally:
if args.isolated_script_test_output:
results_path = os.path.join(args.output_dir, 'test_results.json')
with open(results_path, 'w') as output_file:
json.dump(isolated_script_output, output_file)
with open(args.isolated_script_test_output, 'w') as output_file:
json.dump(isolated_script_output, output_file)
if __name__ == '__main__':
main()
| []
| []
| [
"CHECKOUT_SOURCE_ROOT"
]
| [] | ["CHECKOUT_SOURCE_ROOT"] | python | 1 | 0 | |
pkg/controllers/management/node/controller.go | package node
import (
"context"
"crypto/sha256"
"encoding/base32"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"strings"
"time"
"github.com/pkg/errors"
"github.com/rancher/norman/objectclient"
"github.com/rancher/norman/types/convert"
"github.com/rancher/norman/types/values"
"github.com/rancher/rancher/pkg/api/norman/customization/clusterregistrationtokens"
v32 "github.com/rancher/rancher/pkg/apis/management.cattle.io/v3"
"github.com/rancher/rancher/pkg/clustermanager"
"github.com/rancher/rancher/pkg/controllers/management/drivers/nodedriver"
"github.com/rancher/rancher/pkg/encryptedstore"
corev1 "github.com/rancher/rancher/pkg/generated/norman/core/v1"
v3 "github.com/rancher/rancher/pkg/generated/norman/management.cattle.io/v3"
"github.com/rancher/rancher/pkg/jailer"
"github.com/rancher/rancher/pkg/kubectl"
"github.com/rancher/rancher/pkg/namespace"
nodehelper "github.com/rancher/rancher/pkg/node"
"github.com/rancher/rancher/pkg/nodeconfig"
"github.com/rancher/rancher/pkg/ref"
"github.com/rancher/rancher/pkg/systemaccount"
"github.com/rancher/rancher/pkg/taints"
"github.com/rancher/rancher/pkg/types/config"
"github.com/rancher/rancher/pkg/user"
rketypes "github.com/rancher/rke/types"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh"
v1 "k8s.io/api/core/v1"
kerror "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
typedv1 "k8s.io/client-go/kubernetes/typed/core/v1"
)
const (
defaultEngineInstallURL = "https://releases.rancher.com/install-docker/17.03.2.sh"
amazonec2 = "amazonec2"
userNodeRemoveCleanupAnnotation = "nodes.management.cattle.io/user-node-remove-cleanup"
)
// aliases maps Schema field => driver field
// The opposite of this lives in pkg/controllers/management/drivers/nodedriver/machine_driver.go
var aliases = map[string]map[string]string{
"aliyunecs": map[string]string{"sshKeyContents": "sshKeypath"},
"amazonec2": map[string]string{"sshKeyContents": "sshKeypath", "userdata": "userdata"},
"azure": map[string]string{"customData": "customData"},
"digitalocean": map[string]string{"sshKeyContents": "sshKeyPath", "userdata": "userdata"},
"exoscale": map[string]string{"sshKey": "sshKey", "userdata": "userdata"},
"openstack": map[string]string{"cacert": "cacert", "privateKeyFile": "privateKeyFile", "userDataFile": "userDataFile"},
"otc": map[string]string{"privateKeyFile": "privateKeyFile"},
"packet": map[string]string{"userdata": "userdata"},
"vmwarevsphere": map[string]string{"cloudConfig": "cloud-config"},
}
func Register(ctx context.Context, management *config.ManagementContext, clusterManager *clustermanager.Manager) {
secretStore, err := nodeconfig.NewStore(management.Core.Namespaces(""), management.Core)
if err != nil {
logrus.Fatal(err)
}
nodeClient := management.Management.Nodes("")
nodeLifecycle := &Lifecycle{
ctx: ctx,
systemAccountManager: systemaccount.NewManager(management),
secretStore: secretStore,
nodeClient: nodeClient,
nodeTemplateClient: management.Management.NodeTemplates(""),
nodePoolLister: management.Management.NodePools("").Controller().Lister(),
nodeTemplateGenericClient: management.Management.NodeTemplates("").ObjectClient().UnstructuredClient(),
configMapGetter: management.K8sClient.CoreV1(),
clusterLister: management.Management.Clusters("").Controller().Lister(),
schemaLister: management.Management.DynamicSchemas("").Controller().Lister(),
credLister: management.Core.Secrets("").Controller().Lister(),
userManager: management.UserManager,
clusterManager: clusterManager,
devMode: os.Getenv("CATTLE_DEV_MODE") != "",
}
nodeClient.AddLifecycle(ctx, "node-controller", nodeLifecycle)
}
type Lifecycle struct {
ctx context.Context
systemAccountManager *systemaccount.Manager
secretStore *encryptedstore.GenericEncryptedStore
nodeTemplateGenericClient objectclient.GenericClient
nodeClient v3.NodeInterface
nodeTemplateClient v3.NodeTemplateInterface
nodePoolLister v3.NodePoolLister
configMapGetter typedv1.ConfigMapsGetter
clusterLister v3.ClusterLister
schemaLister v3.DynamicSchemaLister
credLister corev1.SecretLister
userManager user.Manager
clusterManager *clustermanager.Manager
devMode bool
}
func (m *Lifecycle) setupCustom(obj *v3.Node) {
obj.Status.NodeConfig = &rketypes.RKEConfigNode{
NodeName: obj.Namespace + ":" + obj.Name,
HostnameOverride: obj.Spec.RequestedHostname,
Address: obj.Spec.CustomConfig.Address,
InternalAddress: obj.Spec.CustomConfig.InternalAddress,
User: obj.Spec.CustomConfig.User,
DockerSocket: obj.Spec.CustomConfig.DockerSocket,
SSHKey: obj.Spec.CustomConfig.SSHKey,
Labels: obj.Spec.CustomConfig.Label,
Port: "22",
Role: roles(obj),
Taints: taints.GetRKETaintsFromStrings(obj.Spec.CustomConfig.Taints),
}
if obj.Status.NodeConfig.User == "" {
obj.Status.NodeConfig.User = "root"
}
obj.Status.InternalNodeStatus.Addresses = []v1.NodeAddress{
{
Type: v1.NodeInternalIP,
Address: obj.Status.NodeConfig.Address,
},
}
}
func isCustom(obj *v3.Node) bool {
return obj.Spec.CustomConfig != nil && obj.Spec.CustomConfig.Address != ""
}
func (m *Lifecycle) setWaiting(node *v3.Node) {
v32.NodeConditionRegistered.IsUnknown(node)
v32.NodeConditionRegistered.Message(node, "waiting to register with Kubernetes")
}
func (m *Lifecycle) Create(obj *v3.Node) (runtime.Object, error) {
if isCustom(obj) {
m.setupCustom(obj)
newObj, err := v32.NodeConditionInitialized.Once(obj, func() (runtime.Object, error) {
if err := validateCustomHost(obj); err != nil {
return obj, err
}
m.setWaiting(obj)
return obj, nil
})
return newObj.(*v3.Node), err
}
if obj.Spec.NodeTemplateName == "" {
return obj, nil
}
newObj, err := v32.NodeConditionInitialized.Once(obj, func() (runtime.Object, error) {
logrus.Debugf("Called v3.NodeConditionInitialized.Once for [%s] in namespace [%s]", obj.Name, obj.Namespace)
// Ensure jail is created first, else the function `NewNodeConfig` will create the full jail path (including parent jail directory) and CreateJail will remove the directory as it does not contain a done file
if !m.devMode {
err := jailer.CreateJail(obj.Namespace)
if err != nil {
return nil, errors.WithMessage(err, "node create jail error")
}
}
nodeConfig, err := nodeconfig.NewNodeConfig(m.secretStore, obj)
if err != nil {
return obj, errors.WithMessagef(err, "failed to create node driver config for node [%v]", obj.Name)
}
defer nodeConfig.Cleanup()
err = m.refreshNodeConfig(nodeConfig, obj)
if err != nil {
return nil, errors.WithMessagef(err, "unable to create config for node %v", obj.Name)
}
template, err := m.getNodeTemplate(obj.Spec.NodeTemplateName)
if err != nil {
return obj, err
}
obj.Status.NodeTemplateSpec = &template.Spec
if obj.Spec.RequestedHostname == "" {
obj.Spec.RequestedHostname = obj.Name
}
if obj.Status.NodeTemplateSpec.EngineInstallURL == "" {
obj.Status.NodeTemplateSpec.EngineInstallURL = defaultEngineInstallURL
}
return obj, nil
})
return newObj.(*v3.Node), err
}
func (m *Lifecycle) getNodeTemplate(nodeTemplateName string) (*v3.NodeTemplate, error) {
ns, n := ref.Parse(nodeTemplateName)
logrus.Debugf("getNodeTemplate parsed [%s] to ns: [%s] and n: [%s]", nodeTemplateName, ns, n)
return m.nodeTemplateClient.GetNamespaced(ns, n, metav1.GetOptions{})
}
func (m *Lifecycle) getNodePool(nodePoolName string) (*v3.NodePool, error) {
ns, p := ref.Parse(nodePoolName)
return m.nodePoolLister.Get(ns, p)
}
func (m *Lifecycle) Remove(obj *v3.Node) (runtime.Object, error) {
if obj.Status.NodeTemplateSpec == nil {
return obj, nil
}
newObj, err := v32.NodeConditionRemoved.DoUntilTrue(obj, func() (runtime.Object, error) {
found, err := m.isNodeInAppliedSpec(obj)
if err != nil {
return obj, err
}
if found {
return obj, errors.New("waiting for node to be removed from cluster")
}
if !m.devMode {
err := jailer.CreateJail(obj.Namespace)
if err != nil {
return nil, errors.WithMessage(err, "node remove jail error")
}
}
config, err := nodeconfig.NewNodeConfig(m.secretStore, obj)
if err != nil {
return obj, err
}
if err := config.Restore(); err != nil {
return obj, err
}
defer config.Remove()
err = m.refreshNodeConfig(config, obj)
if err != nil {
return nil, errors.WithMessagef(err, "unable to refresh config for node %v", obj.Name)
}
mExists, err := nodeExists(config.Dir(), obj)
if err != nil {
return obj, err
}
if mExists {
logrus.Infof("Removing node %s", obj.Spec.RequestedHostname)
if err := m.drainNode(obj); err != nil {
return obj, err
}
if err := deleteNode(config.Dir(), obj); err != nil {
return obj, err
}
logrus.Infof("Removing node %s done", obj.Spec.RequestedHostname)
}
return obj, nil
})
if err != nil {
return newObj.(*v3.Node), err
}
return m.deleteV1Node(newObj.(*v3.Node))
}
func (m *Lifecycle) provision(driverConfig, nodeDir string, obj *v3.Node) (*v3.Node, error) {
configRawMap := map[string]interface{}{}
if err := json.Unmarshal([]byte(driverConfig), &configRawMap); err != nil {
return obj, errors.Wrap(err, "failed to unmarshal node config")
}
// Since we know this will take a long time persist so user sees status
obj, err := m.nodeClient.Update(obj)
if err != nil {
return obj, err
}
err = aliasToPath(obj.Status.NodeTemplateSpec.Driver, configRawMap, obj.Namespace)
if err != nil {
return obj, err
}
createCommandsArgs := buildCreateCommand(obj, configRawMap)
cmd, err := buildCommand(nodeDir, obj, createCommandsArgs)
if err != nil {
return obj, err
}
logrus.Infof("Provisioning node %s", obj.Spec.RequestedHostname)
stdoutReader, stderrReader, err := startReturnOutput(cmd)
if err != nil {
return obj, err
}
defer stdoutReader.Close()
defer stderrReader.Close()
defer cmd.Wait()
obj, err = m.reportStatus(stdoutReader, stderrReader, obj)
if err != nil {
return obj, err
}
if err := cmd.Wait(); err != nil {
return obj, err
}
if err := m.deployAgent(nodeDir, obj); err != nil {
return obj, err
}
logrus.Infof("Provisioning node %s done", obj.Spec.RequestedHostname)
return obj, nil
}
func aliasToPath(driver string, config map[string]interface{}, ns string) error {
devMode := os.Getenv("CATTLE_DEV_MODE") != ""
baseDir := path.Join("/opt/jail", ns)
if devMode {
baseDir = os.TempDir()
}
// Check if the required driver has aliased fields
if fields, ok := aliases[driver]; ok {
hasher := sha256.New()
for schemaField, driverField := range fields {
if fileRaw, ok := config[schemaField]; ok {
fileContents := fileRaw.(string)
// Delete our aliased fields
delete(config, schemaField)
if fileContents == "" {
continue
}
fileName := driverField
if ok := nodedriver.SSHKeyFields[schemaField]; ok {
fileName = "id_rsa"
}
// The ending newline gets stripped, add em back
if !strings.HasSuffix(fileContents, "\n") {
fileContents = fileContents + "\n"
}
hasher.Reset()
hasher.Write([]byte(fileContents))
sha := base32.StdEncoding.WithPadding(-1).EncodeToString(hasher.Sum(nil))[:10]
fileDir := path.Join(baseDir, sha)
// Delete the fileDir path if it's not a directory
if info, err := os.Stat(fileDir); err == nil && !info.IsDir() {
if err := os.Remove(fileDir); err != nil {
return err
}
}
err := os.MkdirAll(fileDir, 0755)
if err != nil {
return err
}
fullPath := path.Join(fileDir, fileName)
err = ioutil.WriteFile(fullPath, []byte(fileContents), 0600)
if err != nil {
return err
}
// Add the field and path
if devMode {
config[driverField] = fullPath
} else {
config[driverField] = path.Join("/", sha, fileName)
}
}
}
}
return nil
}
func (m *Lifecycle) deployAgent(nodeDir string, obj *v3.Node) error {
token, err := m.systemAccountManager.GetOrCreateSystemClusterToken(obj.Namespace)
if err != nil {
return err
}
drun := clusterregistrationtokens.NodeCommand(token, nil)
args := buildAgentCommand(obj, drun)
cmd, err := buildCommand(nodeDir, obj, args)
if err != nil {
return err
}
output, err := cmd.CombinedOutput()
if err != nil {
return errors.Wrap(err, string(output))
}
return nil
}
func (m *Lifecycle) ready(obj *v3.Node) (*v3.Node, error) {
config, err := nodeconfig.NewNodeConfig(m.secretStore, obj)
if err != nil {
return obj, err
}
defer config.Cleanup()
if err := config.Restore(); err != nil {
return obj, err
}
err = m.refreshNodeConfig(config, obj)
if err != nil {
return nil, errors.WithMessagef(err, "unable to refresh config for node %v", obj.Name)
}
driverConfig, err := config.DriverConfig()
if err != nil {
return nil, err
}
// Provision in the background so we can poll and save the config
done := make(chan error)
go func() {
newObj, err := m.provision(driverConfig, config.Dir(), obj)
obj = newObj
done <- err
}()
// Poll and save config
outer:
for {
select {
case err = <-done:
break outer
case <-time.After(5 * time.Second):
config.Save()
}
}
newObj, saveError := v32.NodeConditionConfigSaved.Once(obj, func() (runtime.Object, error) {
return m.saveConfig(config, config.FullDir(), obj)
})
obj = newObj.(*v3.Node)
if err == nil {
return obj, saveError
}
return obj, err
}
func (m *Lifecycle) Updated(obj *v3.Node) (runtime.Object, error) {
if cleanupAnnotation, ok := obj.Annotations[userNodeRemoveCleanupAnnotation]; !ok || cleanupAnnotation != "true" {
// finalizer from user-node-remove has to be checked/cleaned
return m.userNodeRemoveCleanup(obj)
}
newObj, err := v32.NodeConditionProvisioned.Once(obj, func() (runtime.Object, error) {
if obj.Status.NodeTemplateSpec == nil {
m.setWaiting(obj)
return obj, nil
}
if !m.devMode {
logrus.Infof("Creating jail for %v", obj.Namespace)
err := jailer.CreateJail(obj.Namespace)
if err != nil {
return nil, errors.WithMessage(err, "node update jail error")
}
}
obj, err := m.ready(obj)
if err == nil {
m.setWaiting(obj)
}
return obj, err
})
return newObj.(*v3.Node), err
}
func (m *Lifecycle) saveConfig(config *nodeconfig.NodeConfig, nodeDir string, obj *v3.Node) (*v3.Node, error) {
logrus.Infof("Generating and uploading node config %s", obj.Spec.RequestedHostname)
if err := config.Save(); err != nil {
return obj, err
}
ip, err := config.IP()
if err != nil {
return obj, err
}
interalAddress, err := config.InternalIP()
if err != nil {
return obj, err
}
keyPath, err := config.SSHKeyPath()
if err != nil {
return obj, err
}
sshKey, err := getSSHKey(nodeDir, keyPath, obj)
if err != nil {
return obj, err
}
sshUser, err := config.SSHUser()
if err != nil {
return obj, err
}
if err := config.Save(); err != nil {
return obj, err
}
template, err := m.getNodeTemplate(obj.Spec.NodeTemplateName)
if err != nil {
return obj, err
}
pool, err := m.getNodePool(obj.Spec.NodePoolName)
if err != nil {
return obj, err
}
obj.Status.NodeConfig = &rketypes.RKEConfigNode{
NodeName: obj.Namespace + ":" + obj.Name,
Address: ip,
InternalAddress: interalAddress,
User: sshUser,
Role: roles(obj),
HostnameOverride: obj.Spec.RequestedHostname,
SSHKey: sshKey,
Labels: template.Labels,
}
obj.Status.InternalNodeStatus.Addresses = []v1.NodeAddress{
{
Type: v1.NodeInternalIP,
Address: obj.Status.NodeConfig.Address,
},
}
if len(obj.Status.NodeConfig.Role) == 0 {
obj.Status.NodeConfig.Role = []string{"worker"}
}
templateSet := taints.GetKeyEffectTaintSet(template.Spec.NodeTaints)
nodeSet := taints.GetKeyEffectTaintSet(pool.Spec.NodeTaints)
expectTaints := pool.Spec.NodeTaints
for key, ti := range templateSet {
// the expect taints are based on the node pool. so we don't need to set taints with same key and effect by template because
// the taints from node pool should override the taints from template.
if _, ok := nodeSet[key]; !ok {
expectTaints = append(expectTaints, template.Spec.NodeTaints[ti])
}
}
obj.Status.NodeConfig.Taints = taints.GetRKETaintsFromTaints(expectTaints)
return obj, nil
}
func (m *Lifecycle) refreshNodeConfig(nc *nodeconfig.NodeConfig, obj *v3.Node) error {
template, err := m.getNodeTemplate(obj.Spec.NodeTemplateName)
if err != nil {
return err
}
rawTemplate, err := m.nodeTemplateGenericClient.GetNamespaced(template.Namespace, template.Name, metav1.GetOptions{})
if err != nil {
return err
}
data := rawTemplate.(*unstructured.Unstructured).Object
rawConfig, ok := values.GetValue(data, template.Spec.Driver+"Config")
if !ok {
return fmt.Errorf("refreshNodeConfig: node config not specified for node %v", obj.Name)
}
if err := m.updateRawConfigFromCredential(data, rawConfig, template); err != nil {
logrus.Debugf("refreshNodeConfig: error calling updateRawConfigFromCredential for [%v]: %v", obj.Name, err)
return err
}
var update bool
if template.Spec.Driver == amazonec2 {
setEc2ClusterIDTag(rawConfig, obj.Namespace)
logrus.Debug("refreshNodeConfig: Updating amazonec2 machine config")
//TODO: Update to not be amazon specific, this needs to be moved to the driver
update, err = nc.UpdateAmazonAuth(rawConfig)
if err != nil {
return err
}
}
bytes, err := json.Marshal(rawConfig)
if err != nil {
return errors.Wrap(err, "failed to marshal node driver config")
}
newConfig := string(bytes)
currentConfig, err := nc.DriverConfig()
if err != nil {
return err
}
if currentConfig != newConfig || update {
err = nc.SetDriverConfig(string(bytes))
if err != nil {
return err
}
return nc.Save()
}
return nil
}
func (m *Lifecycle) isNodeInAppliedSpec(node *v3.Node) (bool, error) {
// worker/controlplane nodes can just be immediately deleted
if !node.Spec.Etcd {
return false, nil
}
cluster, err := m.clusterLister.Get("", node.Namespace)
if err != nil {
if kerror.IsNotFound(err) {
return false, nil
}
return false, err
}
if cluster == nil {
return false, nil
}
if cluster.DeletionTimestamp != nil {
return false, nil
}
if cluster.Status.AppliedSpec.RancherKubernetesEngineConfig == nil {
return false, nil
}
for _, rkeNode := range cluster.Status.AppliedSpec.RancherKubernetesEngineConfig.Nodes {
nodeName := rkeNode.NodeName
if len(nodeName) == 0 {
continue
}
if nodeName == fmt.Sprintf("%s:%s", node.Namespace, node.Name) {
return true, nil
}
}
return false, nil
}
func validateCustomHost(obj *v3.Node) error {
if obj.Spec.Imported {
return nil
}
customConfig := obj.Spec.CustomConfig
signer, err := ssh.ParsePrivateKey([]byte(customConfig.SSHKey))
if err != nil {
return errors.Wrapf(err, "sshKey format is invalid")
}
config := &ssh.ClientConfig{
User: customConfig.User,
Auth: []ssh.AuthMethod{
ssh.PublicKeys(signer),
},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}
conn, err := ssh.Dial("tcp", customConfig.Address+":22", config)
if err != nil {
return errors.Wrapf(err, "Failed to validate ssh connection to address [%s]", customConfig.Address)
}
defer conn.Close()
return nil
}
func roles(node *v3.Node) []string {
var roles []string
if node.Spec.Etcd {
roles = append(roles, "etcd")
}
if node.Spec.ControlPlane {
roles = append(roles, "controlplane")
}
if node.Spec.Worker {
roles = append(roles, "worker")
}
if len(roles) == 0 {
return []string{"worker"}
}
return roles
}
func (m *Lifecycle) setCredFields(data interface{}, fields map[string]v32.Field, credID string) error {
splitID := strings.Split(credID, ":")
if len(splitID) != 2 {
return fmt.Errorf("invalid credential id %s", credID)
}
cred, err := m.credLister.Get(namespace.GlobalNamespace, splitID[1])
if err != nil {
return err
}
if ans := convert.ToMapInterface(data); len(ans) > 0 {
for key, val := range cred.Data {
splitKey := strings.Split(key, "-")
if len(splitKey) == 2 && strings.HasSuffix(splitKey[0], "Config") {
if _, ok := fields[splitKey[1]]; ok {
ans[splitKey[1]] = string(val)
}
}
}
}
return nil
}
func (m *Lifecycle) updateRawConfigFromCredential(data map[string]interface{}, rawConfig interface{}, template *v3.NodeTemplate) error {
credID := convert.ToString(values.GetValueN(data, "spec", "cloudCredentialName"))
if credID != "" {
existingSchema, err := m.schemaLister.Get("", template.Spec.Driver+"config")
if err != nil {
return err
}
logrus.Debugf("setCredFields for credentialName %s", credID)
err = m.setCredFields(rawConfig, existingSchema.Spec.ResourceFields, credID)
if err != nil {
return errors.Wrap(err, "failed to set credential fields")
}
}
return nil
}
func (m *Lifecycle) deleteV1Node(node *v3.Node) (runtime.Object, error) {
logrus.Debugf("Deleting v1.node for [%v] node", node.Status.NodeName)
if nodehelper.IgnoreNode(node.Status.NodeName, node.Status.NodeLabels) {
logrus.Debugf("Skipping v1.node removal for [%v] node", node.Status.NodeName)
return node, nil
}
if node.Status.NodeName == "" {
return node, nil
}
userClient, err := m.clusterManager.UserContext(node.Namespace)
if err != nil {
if kerror.IsNotFound(err) {
return node, nil
}
return node, err
}
ctx, cancel := context.WithTimeout(context.TODO(), 45*time.Second)
defer cancel()
err = userClient.K8sClient.CoreV1().Nodes().Delete(
ctx, node.Status.NodeName, metav1.DeleteOptions{})
if !kerror.IsNotFound(err) && ctx.Err() != context.DeadlineExceeded {
return node, err
}
return node, nil
}
func (m *Lifecycle) drainNode(node *v3.Node) error {
nodeCopy := node.DeepCopy() // copy for cache protection as we do no updating but need things set for the drain
cluster, err := m.clusterLister.Get("", nodeCopy.Namespace)
if err != nil {
if kerror.IsNotFound(err) {
return nil
}
return err
}
if !nodehelper.DrainBeforeDelete(nodeCopy, cluster) {
return nil
}
logrus.Infof("node [%s] requires draining before delete", nodeCopy.Spec.RequestedHostname)
kubeConfig, err := m.getKubeConfig(cluster)
if err != nil {
return fmt.Errorf("node [%s] error getting kubeConfig", nodeCopy.Spec.RequestedHostname)
}
if nodeCopy.Spec.NodeDrainInput == nil {
logrus.Debugf("node [%s] has no NodeDrainInput, creating one with 60s timeout",
nodeCopy.Spec.RequestedHostname)
nodeCopy.Spec.NodeDrainInput = &rketypes.NodeDrainInput{
Force: false,
DeleteLocalData: false,
GracePeriod: 60,
Timeout: 60,
}
}
backoff := wait.Backoff{
Duration: 2 * time.Second,
Factor: 1,
Jitter: 0,
Steps: 3,
}
logrus.Infof("node [%s] attempting to drain, retrying up to 3 times", nodeCopy.Spec.RequestedHostname)
// purposefully ignoring error, if the drain fails this falls back to deleting the node as usual
wait.ExponentialBackoff(backoff, func() (bool, error) {
ctx, cancel := context.WithTimeout(m.ctx, time.Duration(nodeCopy.Spec.NodeDrainInput.Timeout)*time.Second)
defer cancel()
_, msg, err := kubectl.Drain(ctx, kubeConfig, nodeCopy.Status.NodeName, nodehelper.GetDrainFlags(nodeCopy))
if ctx.Err() != nil {
logrus.Errorf("node [%s] kubectl drain failed, retrying: %s", nodeCopy.Spec.RequestedHostname, ctx.Err())
return false, nil
}
if err != nil {
// kubectl failed continue on with delete any way
logrus.Errorf("node [%s] kubectl drain error, retrying: %s", nodeCopy.Spec.RequestedHostname, err)
return false, nil
}
logrus.Infof("node [%s] kubectl drain response: %s", nodeCopy.Spec.RequestedHostname, msg)
return true, nil
})
return nil
}
func (m *Lifecycle) userNodeRemoveCleanup(obj *v3.Node) (runtime.Object, error) {
copy := obj.DeepCopy()
copy.Annotations[userNodeRemoveCleanupAnnotation] = "true"
if hasFinalizerWithPrefix(copy, "clusterscoped.controller.cattle.io/user-node-remove_") {
// user-node-remove controller functionality is now merged into this controller
logrus.Infof("node [%s] has a finalizer for user-node-remove controller and it will be removed",
copy.Spec.RequestedHostname)
copy = removeFinalizerWithPrefix(copy, "clusterscoped.controller.cattle.io/user-node-remove_")
}
return m.nodeClient.Update(copy)
}
func hasFinalizerWithPrefix(node *v3.Node, prefix string) bool {
for _, finalizer := range node.Finalizers {
if strings.HasPrefix(finalizer, prefix) {
return true
}
}
return false
}
func removeFinalizerWithPrefix(node *v3.Node, prefix string) *v3.Node {
var newFinalizers []string
for _, finalizer := range node.Finalizers {
if strings.HasPrefix(finalizer, prefix) {
continue
}
newFinalizers = append(newFinalizers, finalizer)
}
node.SetFinalizers(newFinalizers)
return node
}
| [
"\"CATTLE_DEV_MODE\"",
"\"CATTLE_DEV_MODE\""
]
| []
| [
"CATTLE_DEV_MODE"
]
| [] | ["CATTLE_DEV_MODE"] | go | 1 | 0 | |
app/main/lib/google_client.py | import os
import json
from google.oauth2 import service_account
def get_credentialed_google_client(client):
default_values = {}
if os.path.exists('./google_credentials.json'):
with open('./google_credentials.json') as f:
default_values = json.load(f)
credentials_dict = {
"type": os.getenv("google_credentials_type", default_values.get("type")),
"project_id": os.getenv("google_credentials_project_id", default_values.get("project_id")),
"private_key_id": os.getenv("google_credentials_private_key_id", default_values.get("private_key_id")),
"private_key": os.getenv("google_credentials_private_key", default_values.get("private_key")).replace('\\n', '\n'),
"client_email": os.getenv("google_credentials_client_email", default_values.get("client_email")),
"client_id": os.getenv("google_credentials_client_id", default_values.get("client_id")),
"auth_uri": os.getenv("google_credentials_auth_uri", default_values.get("auth_uri")),
"token_uri": os.getenv("google_credentials_token_uri", default_values.get("token_uri")),
"auth_provider_x509_cert_url": os.getenv("google_credentials_auth_provider_x509_cert_url", default_values.get("auth_provider_x509_cert_url")),
"client_x509_cert_url": os.getenv("google_credentials_client_x509_cert_url", default_values.get("client_x509_cert_url")),
}
try:
credentials = service_account.Credentials.from_service_account_info(credentials_dict)
return client(credentials=credentials)
except ValueError as e:
print(f"Couldn't authenticate to google client: {str(e)}")
return None
| []
| []
| [
"google_credentials_client_id",
"google_credentials_client_email",
"google_credentials_private_key_id",
"google_credentials_project_id",
"google_credentials_auth_uri",
"google_credentials_client_x509_cert_url",
"google_credentials_auth_provider_x509_cert_url",
"google_credentials_type",
"google_credentials_token_uri",
"google_credentials_private_key"
]
| [] | ["google_credentials_client_id", "google_credentials_client_email", "google_credentials_private_key_id", "google_credentials_project_id", "google_credentials_auth_uri", "google_credentials_client_x509_cert_url", "google_credentials_auth_provider_x509_cert_url", "google_credentials_type", "google_credentials_token_uri", "google_credentials_private_key"] | python | 10 | 0 | |
ml_service/pipelines/sales_forecast_verify_train_pipeline.py | import argparse
import sys
import os
from azureml.core import Run, Experiment, Workspace
from ml_service.util.env_variables import Env
from sales_forecast.util.model_helper import get_latest_model
def main():
run = Run.get_context()
if (run.id.startswith('OfflineRun')):
from dotenv import load_dotenv
load_dotenv()
sources_dir = os.environ.get("SOURCES_DIR_TRAIN")
if (sources_dir is None):
sources_dir = 'sales_forecast'
workspace_name = os.environ.get("WORKSPACE_NAME")
experiment_name = os.environ.get("EXPERIMENT_NAME")
resource_group = os.environ.get("RESOURCE_GROUP")
subscription_id = os.environ.get("SUBSCRIPTION_ID")
build_id = os.environ.get('BUILD_BUILDID')
aml_workspace = Workspace.get(
name=workspace_name,
subscription_id=subscription_id,
resource_group=resource_group
)
ws = aml_workspace
exp = Experiment(ws, experiment_name)
else:
exp = run.experiment
e = Env()
parser = argparse.ArgumentParser("register")
parser.add_argument(
"--build_id",
type=str,
help="The Build ID of the build triggering this pipeline run",
)
parser.add_argument(
"--output_model_version_file",
type=str,
default="model_version.txt",
help="Name of a file to write model version to"
)
args = parser.parse_args()
if (args.build_id is not None):
build_id = args.build_id
model_name = e.model_name
try:
tag_name = 'BuildId'
model = get_latest_model(
model_name, tag_name, build_id, exp.workspace)
if (model is not None):
print("Model was registered for this build.")
if (model is None):
print("Model was not registered for this run.")
sys.exit(1)
except Exception as e:
print(e)
print("Model was not registered for this run.")
sys.exit(1)
# Save the Model Version for other AzDO jobs after script is complete
if args.output_model_version_file is not None:
with open(args.output_model_version_file, "w") as out_file:
out_file.write(str(model.version))
if __name__ == '__main__':
main()
| []
| []
| [
"SOURCES_DIR_TRAIN",
"RESOURCE_GROUP",
"WORKSPACE_NAME",
"EXPERIMENT_NAME",
"SUBSCRIPTION_ID",
"BUILD_BUILDID"
]
| [] | ["SOURCES_DIR_TRAIN", "RESOURCE_GROUP", "WORKSPACE_NAME", "EXPERIMENT_NAME", "SUBSCRIPTION_ID", "BUILD_BUILDID"] | python | 6 | 0 | |
get_alliances_alliance_id_corporations_internal_server_error.go | /*
* EVE Swagger Interface
*
* An OpenAPI for EVE Online
*
* OpenAPI spec version: 0.3.10.dev12
*
* Generated by: https://github.com/swagger-api/swagger-codegen.git
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package evesi
/* Internal server error */
type GetAlliancesAllianceIdCorporationsInternalServerError struct {
/* Internal server error message */
Error_ string `json:"error,omitempty"`
}
| []
| []
| []
| [] | [] | go | null | null | null |
src/main/java/com/google/devtools/build/lib/sandbox/DarwinSandboxedSpawnRunner.java | // Copyright 2014 The Bazel Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.devtools.build.lib.sandbox;
import static java.nio.charset.StandardCharsets.UTF_8;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.io.ByteStreams;
import com.google.devtools.build.lib.actions.ExecException;
import com.google.devtools.build.lib.actions.ExecutionStrategy;
import com.google.devtools.build.lib.actions.Spawn;
import com.google.devtools.build.lib.actions.SpawnActionContext;
import com.google.devtools.build.lib.buildtool.BuildRequest;
import com.google.devtools.build.lib.exec.SpawnResult;
import com.google.devtools.build.lib.exec.apple.XCodeLocalEnvProvider;
import com.google.devtools.build.lib.exec.local.LocalEnvProvider;
import com.google.devtools.build.lib.runtime.CommandEnvironment;
import com.google.devtools.build.lib.shell.Command;
import com.google.devtools.build.lib.shell.CommandException;
import com.google.devtools.build.lib.shell.CommandResult;
import com.google.devtools.build.lib.util.OS;
import com.google.devtools.build.lib.vfs.FileSystem;
import com.google.devtools.build.lib.vfs.Path;
import com.google.devtools.build.lib.vfs.PathFragment;
import java.io.BufferedWriter;
import java.io.File;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
/** Spawn runner that uses Darwin (macOS) sandboxing to execute a process. */
@ExecutionStrategy(
name = {"sandboxed", "darwin-sandbox"},
contextType = SpawnActionContext.class
)
final class DarwinSandboxedSpawnRunner extends AbstractSandboxSpawnRunner {
private static final String SANDBOX_EXEC = "/usr/bin/sandbox-exec";
public static boolean isSupported(CommandEnvironment cmdEnv) {
if (OS.getCurrent() != OS.DARWIN) {
return false;
}
if (!ProcessWrapperRunner.isSupported(cmdEnv)) {
return false;
}
List<String> args = new ArrayList<>();
args.add(SANDBOX_EXEC);
args.add("-p");
args.add("(version 1) (allow default)");
args.add("/usr/bin/true");
ImmutableMap<String, String> env = ImmutableMap.of();
File cwd = new File("/usr/bin");
Command cmd = new Command(args.toArray(new String[0]), env, cwd);
try {
cmd.execute(
/* stdin */ new byte[] {},
Command.NO_OBSERVER,
ByteStreams.nullOutputStream(),
ByteStreams.nullOutputStream(),
/* killSubprocessOnInterrupt */ true);
} catch (CommandException e) {
return false;
}
return true;
}
private final Path execRoot;
private final boolean allowNetwork;
private final String productName;
private final Path processWrapper;
private final int timeoutGraceSeconds;
/**
* The set of directories that always should be writable, independent of the Spawn itself.
*
* <p>We cache this, because creating it involves executing {@code getconf}, which is expensive.
*/
private final ImmutableSet<Path> alwaysWritableDirs;
private final LocalEnvProvider localEnvProvider;
DarwinSandboxedSpawnRunner(
CommandEnvironment cmdEnv,
BuildRequest buildRequest,
Path sandboxBase,
String productName,
int timeoutGraceSeconds)
throws IOException {
super(
cmdEnv,
sandboxBase,
buildRequest.getOptions(SandboxOptions.class));
this.execRoot = cmdEnv.getExecRoot();
this.allowNetwork = SandboxHelpers.shouldAllowNetwork(cmdEnv.getOptions());
this.productName = productName;
this.alwaysWritableDirs = getAlwaysWritableDirs(cmdEnv.getDirectories().getFileSystem());
this.processWrapper = ProcessWrapperRunner.getProcessWrapper(cmdEnv);
this.localEnvProvider = new XCodeLocalEnvProvider();
this.timeoutGraceSeconds = timeoutGraceSeconds;
}
private static void addPathToSetIfExists(FileSystem fs, Set<Path> paths, String path)
throws IOException {
if (path != null) {
addPathToSetIfExists(paths, fs.getPath(path));
}
}
private static void addPathToSetIfExists(Set<Path> paths, Path path) throws IOException {
if (path.exists()) {
paths.add(path.resolveSymbolicLinks());
}
}
private static ImmutableSet<Path> getAlwaysWritableDirs(FileSystem fs) throws IOException {
HashSet<Path> writableDirs = new HashSet<>();
addPathToSetIfExists(fs, writableDirs, "/dev");
addPathToSetIfExists(fs, writableDirs, System.getenv("TMPDIR"));
addPathToSetIfExists(fs, writableDirs, "/tmp");
addPathToSetIfExists(fs, writableDirs, "/private/tmp");
addPathToSetIfExists(fs, writableDirs, "/private/var/tmp");
// On macOS, in addition to what is specified in $TMPDIR, two other temporary directories may be
// written to by processes. We have to get their location by calling "getconf".
addPathToSetIfExists(fs, writableDirs, getConfStr("DARWIN_USER_TEMP_DIR"));
addPathToSetIfExists(fs, writableDirs, getConfStr("DARWIN_USER_CACHE_DIR"));
// ~/Library/Cache and ~/Library/Logs need to be writable (cf. issue #2231).
Path homeDir = fs.getPath(System.getProperty("user.home"));
addPathToSetIfExists(writableDirs, homeDir.getRelative("Library/Cache"));
addPathToSetIfExists(writableDirs, homeDir.getRelative("Library/Logs"));
// Certain Xcode tools expect to be able to write to this path.
addPathToSetIfExists(writableDirs, homeDir.getRelative("Library/Developer"));
return ImmutableSet.copyOf(writableDirs);
}
/**
* Returns the value of a POSIX or X/Open system configuration variable.
*/
private static String getConfStr(String confVar) throws IOException {
String[] commandArr = new String[2];
commandArr[0] = "/usr/bin/getconf";
commandArr[1] = confVar;
Command cmd = new Command(commandArr);
CommandResult res;
try {
res = cmd.execute();
} catch (CommandException e) {
throw new IOException("getconf failed", e);
}
return new String(res.getStdout(), UTF_8).trim();
}
@Override
protected SpawnResult actuallyExec(Spawn spawn, SpawnExecutionPolicy policy)
throws ExecException, IOException, InterruptedException {
// Each invocation of "exec" gets its own sandbox.
Path sandboxPath = getSandboxRoot();
Path sandboxExecRoot = sandboxPath.getRelative("execroot").getRelative(execRoot.getBaseName());
Map<String, String> spawnEnvironment =
localEnvProvider.rewriteLocalEnv(spawn.getEnvironment(), execRoot, productName);
final HashSet<Path> writableDirs = new HashSet<>(alwaysWritableDirs);
ImmutableSet<Path> extraWritableDirs = getWritableDirs(sandboxExecRoot, spawnEnvironment);
writableDirs.addAll(extraWritableDirs);
ImmutableSet<PathFragment> outputs = SandboxHelpers.getOutputFiles(spawn);
final Path sandboxConfigPath = sandboxPath.getRelative("sandbox.sb");
int timeoutSeconds = (int) TimeUnit.MILLISECONDS.toSeconds(policy.getTimeoutMillis());
List<String> arguments =
computeCommandLine(spawn, timeoutSeconds, sandboxConfigPath, timeoutGraceSeconds);
Map<String, String> environment =
localEnvProvider.rewriteLocalEnv(spawn.getEnvironment(), execRoot, productName);
boolean allowNetworkForThisSpawn = allowNetwork || SandboxHelpers.shouldAllowNetwork(spawn);
SandboxedSpawn sandbox = new SymlinkedSandboxedSpawn(
sandboxPath,
sandboxExecRoot,
arguments,
environment,
SandboxHelpers.getInputFiles(spawn, policy, execRoot),
outputs,
writableDirs) {
@Override
public void createFileSystem() throws IOException {
super.createFileSystem();
writeConfig(
sandboxConfigPath, writableDirs, getInaccessiblePaths(), allowNetworkForThisSpawn);
}
};
return runSpawn(spawn, sandbox, policy, execRoot, timeoutSeconds);
}
private List<String> computeCommandLine(
Spawn spawn, int timeoutSeconds, Path sandboxConfigPath, int timeoutGraceSeconds) {
List<String> commandLineArgs = new ArrayList<>();
commandLineArgs.add(SANDBOX_EXEC);
commandLineArgs.add("-f");
commandLineArgs.add(sandboxConfigPath.getPathString());
commandLineArgs.addAll(
ProcessWrapperRunner.getCommandLine(
processWrapper, spawn.getArguments(), timeoutSeconds, timeoutGraceSeconds));
return commandLineArgs;
}
private void writeConfig(
Path sandboxConfigPath,
Set<Path> writableDirs,
Set<Path> inaccessiblePaths,
boolean allowNetwork) throws IOException {
try (PrintWriter out =
new PrintWriter(
new BufferedWriter(
new OutputStreamWriter(sandboxConfigPath.getOutputStream(), UTF_8)))) {
// Note: In Apple's sandbox configuration language, the *last* matching rule wins.
out.println("(version 1)");
out.println("(debug deny)");
out.println("(allow default)");
if (!allowNetwork) {
out.println("(deny network*)");
out.println("(allow network* (local ip \"localhost:*\"))");
out.println("(allow network* (remote ip \"localhost:*\"))");
out.println("(allow network* (remote unix-socket))");
}
// By default, everything is read-only.
out.println("(deny file-write*)");
out.println("(allow file-write*");
for (Path path : writableDirs) {
out.println(" (subpath \"" + path.getPathString() + "\")");
}
out.println(")");
if (!inaccessiblePaths.isEmpty()) {
out.println("(deny file-read*");
// The sandbox configuration file is not part of a cache key and sandbox-exec doesn't care
// about ordering of paths in expressions, so it's fine if the iteration order is random.
for (Path inaccessiblePath : inaccessiblePaths) {
out.println(" (subpath \"" + inaccessiblePath + "\")");
}
out.println(")");
}
}
}
@Override
protected String getName() {
return "darwin-sandbox";
}
}
| [
"\"TMPDIR\""
]
| []
| [
"TMPDIR"
]
| [] | ["TMPDIR"] | java | 1 | 0 | |
util/util.go | /*
Copyright 2015 Crunchy Data Solutions, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"os"
)
var CPMBASE string
func GetBase() string {
if CPMBASE == "" {
CPMBASE = os.Getenv("CPMBASE")
if CPMBASE == "" {
CPMBASE = "/var/cpm"
}
}
return CPMBASE
}
| [
"\"CPMBASE\""
]
| []
| [
"CPMBASE"
]
| [] | ["CPMBASE"] | go | 1 | 0 | |
odoo/tools/config.py | #odoo.loggers.handlers. -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import configparser as ConfigParser
import errno
import logging
import optparse
import glob
import os
import sys
import tempfile
import warnings
import odoo
from os.path import expandvars, expanduser, abspath, realpath
from .. import release, conf, loglevels
from . import appdirs
from passlib.context import CryptContext
crypt_context = CryptContext(schemes=['pbkdf2_sha512', 'plaintext'],
deprecated=['plaintext'])
class MyOption (optparse.Option, object):
""" optparse Option with two additional attributes.
The list of command line options (getopt.Option) is used to create the
list of the configuration file options. When reading the file, and then
reading the command line arguments, we don't want optparse.parse results
to override the configuration file values. But if we provide default
values to optparse, optparse will return them and we can't know if they
were really provided by the user or not. A solution is to not use
optparse's default attribute, but use a custom one (that will be copied
to create the default values of the configuration file).
"""
def __init__(self, *opts, **attrs):
self.my_default = attrs.pop('my_default', None)
super(MyOption, self).__init__(*opts, **attrs)
DEFAULT_LOG_HANDLER = ':INFO'
def _get_default_datadir():
home = os.path.expanduser('~')
if os.path.isdir(home):
func = appdirs.user_data_dir
else:
if sys.platform in ['win32', 'darwin']:
func = appdirs.site_data_dir
else:
func = lambda **kwarg: "/var/lib/%s" % kwarg['appname'].lower()
# No "version" kwarg as session and filestore paths are shared against series
return func(appname=release.product_name, appauthor=release.author)
def _deduplicate_loggers(loggers):
""" Avoid saving multiple logging levels for the same loggers to a save
file, that just takes space and the list can potentially grow unbounded
if for some odd reason people use :option`--save`` all the time.
"""
# dict(iterable) -> the last item of iterable for any given key wins,
# which is what we want and expect. Output order should not matter as
# there are no duplicates within the output sequence
return (
'{}:{}'.format(logger, level)
for logger, level in dict(it.split(':') for it in loggers).items()
)
class configmanager(object):
def __init__(self, fname=None):
"""Constructor.
:param fname: a shortcut allowing to instantiate :class:`configmanager`
from Python code without resorting to environment
variable
"""
# Options not exposed on the command line. Command line options will be added
# from optparse's parser.
self.options = {
'admin_passwd': 'admin',
'csv_internal_sep': ',',
'publisher_warranty_url': 'http://services.openerp.com/publisher-warranty/',
'reportgz': False,
'root_path': None,
}
# Not exposed in the configuration file.
self.blacklist_for_save = set([
'publisher_warranty_url', 'load_language', 'root_path',
'init', 'save', 'config', 'update', 'stop_after_init', 'dev_mode', 'shell_interface'
])
# dictionary mapping option destination (keys in self.options) to MyOptions.
self.casts = {}
self.misc = {}
self.config_file = fname
self._LOGLEVELS = dict([
(getattr(loglevels, 'LOG_%s' % x), getattr(logging, x))
for x in ('CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'NOTSET')
])
version = "%s %s" % (release.description, release.version)
self.parser = parser = optparse.OptionParser(version=version, option_class=MyOption)
# Server startup config
group = optparse.OptionGroup(parser, "Common options")
group.add_option("-c", "--config", dest="config", help="specify alternate config file")
group.add_option("-s", "--save", action="store_true", dest="save", default=False,
help="save configuration to ~/.odoorc (or to ~/.openerp_serverrc if it exists)")
group.add_option("-i", "--init", dest="init", help="install one or more modules (comma-separated list, use \"all\" for all modules), requires -d")
group.add_option("-u", "--update", dest="update",
help="update one or more modules (comma-separated list, use \"all\" for all modules). Requires -d.")
group.add_option("--without-demo", dest="without_demo",
help="disable loading demo data for modules to be installed (comma-separated, use \"all\" for all modules). Requires -d and -i. Default is %default",
my_default=False)
group.add_option("-P", "--import-partial", dest="import_partial", my_default='',
help="Use this for big data importation, if it crashes you will be able to continue at the current state. Provide a filename to store intermediate importation states.")
group.add_option("--pidfile", dest="pidfile", help="file where the server pid will be stored")
group.add_option("--addons-path", dest="addons_path",
help="specify additional addons paths (separated by commas).",
action="callback", callback=self._check_addons_path, nargs=1, type="string")
group.add_option("--upgrade-path", dest="upgrade_path",
help="specify an additional upgrade path.",
action="callback", callback=self._check_upgrade_path, nargs=1, type="string")
group.add_option("--load", dest="server_wide_modules", help="Comma-separated list of server-wide modules.", my_default='base,web')
group.add_option("-D", "--data-dir", dest="data_dir", my_default=_get_default_datadir(),
help="Directory where to store Odoo data")
parser.add_option_group(group)
# HTTP
group = optparse.OptionGroup(parser, "HTTP Service Configuration")
group.add_option("--http-interface", dest="http_interface", my_default='',
help="Listen interface address for HTTP services. "
"Keep empty to listen on all interfaces (0.0.0.0)")
group.add_option("-p", "--http-port", dest="http_port", my_default=8069,
help="Listen port for the main HTTP service", type="int", metavar="PORT")
group.add_option("--longpolling-port", dest="longpolling_port", my_default=8072,
help="Listen port for the longpolling HTTP service", type="int", metavar="PORT")
group.add_option("--no-http", dest="http_enable", action="store_false", my_default=True,
help="Disable the HTTP and Longpolling services entirely")
group.add_option("--proxy-mode", dest="proxy_mode", action="store_true", my_default=False,
help="Activate reverse proxy WSGI wrappers (headers rewriting) "
"Only enable this when running behind a trusted web proxy!")
# HTTP: hidden backwards-compatibility for "*xmlrpc*" options
hidden = optparse.SUPPRESS_HELP
group.add_option("--xmlrpc-interface", dest="http_interface", help=hidden)
group.add_option("--xmlrpc-port", dest="http_port", type="int", help=hidden)
group.add_option("--no-xmlrpc", dest="http_enable", action="store_false", help=hidden)
parser.add_option_group(group)
# WEB
group = optparse.OptionGroup(parser, "Web interface Configuration")
group.add_option("--db-filter", dest="dbfilter", my_default='', metavar="REGEXP",
help="Regular expressions for filtering available databases for Web UI. "
"The expression can use %d (domain) and %h (host) placeholders.")
parser.add_option_group(group)
# Testing Group
group = optparse.OptionGroup(parser, "Testing Configuration")
group.add_option("--test-file", dest="test_file", my_default=False,
help="Launch a python test file.")
group.add_option("--test-enable", action="callback", callback=self._test_enable_callback,
dest='test_enable',
help="Enable unit tests.")
group.add_option("--test-tags", dest="test_tags",
help="Comma-separated list of specs to filter which tests to execute. Enable unit tests if set. "
"A filter spec has the format: [-][tag][/module][:class][.method] "
"The '-' specifies if we want to include or exclude tests matching this spec. "
"The tag will match tags added on a class with a @tagged decorator "
"(all Test classes have 'standard' and 'at_install' tags "
"until explicitly removed, see the decorator documentation). "
"'*' will match all tags. "
"If tag is omitted on include mode, its value is 'standard'. "
"If tag is omitted on exclude mode, its value is '*'. "
"The module, class, and method will respectively match the module name, test class name and test method name. "
"Example: --test-tags :TestClass.test_func,/test_module,external "
"Filtering and executing the tests happens twice: right "
"after each module installation/update and at the end "
"of the modules loading. At each stage tests are filtered "
"by --test-tags specs and additionally by dynamic specs "
"'at_install' and 'post_install' correspondingly.")
group.add_option("--screencasts", dest="screencasts", action="store", my_default=None,
metavar='DIR',
help="Screencasts will go in DIR/{db_name}/screencasts.")
temp_tests_dir = os.path.join(tempfile.gettempdir(), 'odoo_tests')
group.add_option("--screenshots", dest="screenshots", action="store", my_default=temp_tests_dir,
metavar='DIR',
help="Screenshots will go in DIR/{db_name}/screenshots. Defaults to %s." % temp_tests_dir)
parser.add_option_group(group)
# Logging Group
group = optparse.OptionGroup(parser, "Logging Configuration")
group.add_option("--logfile", dest="logfile", help="file where the server log will be stored")
group.add_option("--syslog", action="store_true", dest="syslog", my_default=False, help="Send the log to the syslog server")
group.add_option('--log-handler', action="append", default=[], my_default=DEFAULT_LOG_HANDLER, metavar="PREFIX:LEVEL", help='setup a handler at LEVEL for a given PREFIX. An empty PREFIX indicates the root logger. This option can be repeated. Example: "odoo.orm:DEBUG" or "werkzeug:CRITICAL" (default: ":INFO")')
group.add_option('--log-request', action="append_const", dest="log_handler", const="odoo.http.rpc.request:DEBUG", help='shortcut for --log-handler=odoo.http.rpc.request:DEBUG')
group.add_option('--log-response', action="append_const", dest="log_handler", const="odoo.http.rpc.response:DEBUG", help='shortcut for --log-handler=odoo.http.rpc.response:DEBUG')
group.add_option('--log-web', action="append_const", dest="log_handler", const="odoo.http:DEBUG", help='shortcut for --log-handler=odoo.http:DEBUG')
group.add_option('--log-sql', action="append_const", dest="log_handler", const="odoo.sql_db:DEBUG", help='shortcut for --log-handler=odoo.sql_db:DEBUG')
group.add_option('--log-db', dest='log_db', help="Logging database", my_default=False)
group.add_option('--log-db-level', dest='log_db_level', my_default='warning', help="Logging database level")
# For backward-compatibility, map the old log levels to something
# quite close.
levels = [
'info', 'debug_rpc', 'warn', 'test', 'critical', 'runbot',
'debug_sql', 'error', 'debug', 'debug_rpc_answer', 'notset'
]
group.add_option('--log-level', dest='log_level', type='choice',
choices=levels, my_default='info',
help='specify the level of the logging. Accepted values: %s.' % (levels,))
parser.add_option_group(group)
# SMTP Group
group = optparse.OptionGroup(parser, "SMTP Configuration")
group.add_option('--email-from', dest='email_from', my_default=False,
help='specify the SMTP email address for sending email')
group.add_option('--smtp', dest='smtp_server', my_default='localhost',
help='specify the SMTP server for sending email')
group.add_option('--smtp-port', dest='smtp_port', my_default=25,
help='specify the SMTP port', type="int")
group.add_option('--smtp-ssl', dest='smtp_ssl', action='store_true', my_default=False,
help='if passed, SMTP connections will be encrypted with SSL (STARTTLS)')
group.add_option('--smtp-user', dest='smtp_user', my_default=False,
help='specify the SMTP username for sending email')
group.add_option('--smtp-password', dest='smtp_password', my_default=False,
help='specify the SMTP password for sending email')
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Database related options")
group.add_option("-d", "--database", dest="db_name", my_default=False,
help="specify the database name")
group.add_option("-r", "--db_user", dest="db_user", my_default=False,
help="specify the database user name")
group.add_option("-w", "--db_password", dest="db_password", my_default=False,
help="specify the database password")
group.add_option("--pg_path", dest="pg_path", help="specify the pg executable path")
group.add_option("--db_host", dest="db_host", my_default=False,
help="specify the database host")
group.add_option("--db_port", dest="db_port", my_default=False,
help="specify the database port", type="int")
group.add_option("--db_sslmode", dest="db_sslmode", type="choice", my_default='prefer',
choices=['disable', 'allow', 'prefer', 'require', 'verify-ca', 'verify-full'],
help="specify the database ssl connection mode (see PostgreSQL documentation)")
group.add_option("--db_maxconn", dest="db_maxconn", type='int', my_default=64,
help="specify the maximum number of physical connections to PostgreSQL")
group.add_option("--db-template", dest="db_template", my_default="template0",
help="specify a custom database template to create a new database")
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Internationalisation options. ",
"Use these options to translate Odoo to another language. "
"See i18n section of the user manual. Option '-d' is mandatory. "
"Option '-l' is mandatory in case of importation"
)
group.add_option('--load-language', dest="load_language",
help="specifies the languages for the translations you want to be loaded")
group.add_option('-l', "--language", dest="language",
help="specify the language of the translation file. Use it with --i18n-export or --i18n-import")
group.add_option("--i18n-export", dest="translate_out",
help="export all sentences to be translated to a CSV file, a PO file or a TGZ archive and exit")
group.add_option("--i18n-import", dest="translate_in",
help="import a CSV or a PO file with translations and exit. The '-l' option is required.")
group.add_option("--i18n-overwrite", dest="overwrite_existing_translations", action="store_true", my_default=False,
help="overwrites existing translation terms on updating a module or importing a CSV or a PO file.")
group.add_option("--modules", dest="translate_modules",
help="specify modules to export. Use in combination with --i18n-export")
parser.add_option_group(group)
security = optparse.OptionGroup(parser, 'Security-related options')
security.add_option('--no-database-list', action="store_false", dest='list_db', my_default=True,
help="Disable the ability to obtain or view the list of databases. "
"Also disable access to the database manager and selector, "
"so be sure to set a proper --database parameter first")
parser.add_option_group(security)
# Advanced options
group = optparse.OptionGroup(parser, "Advanced options")
group.add_option('--dev', dest='dev_mode', type="string",
help="Enable developer mode. Param: List of options separated by comma. "
"Options : all, [pudb|wdb|ipdb|pdb], reload, qweb, werkzeug, xml")
group.add_option('--shell-interface', dest='shell_interface', type="string",
help="Specify a preferred REPL to use in shell mode. Supported REPLs are: "
"[ipython|ptpython|bpython|python]")
group.add_option("--stop-after-init", action="store_true", dest="stop_after_init", my_default=False,
help="stop the server after its initialization")
group.add_option("--osv-memory-count-limit", dest="osv_memory_count_limit", my_default=False,
help="Force a limit on the maximum number of records kept in the virtual "
"osv_memory tables. The default is False, which means no count-based limit.",
type="int")
group.add_option("--transient-age-limit", dest="transient_age_limit", my_default=1.0,
help="Time limit (decimal value in hours) records created with a "
"TransientModel (mosly wizard) are kept in the database. Default to 1 hour.",
type="float")
group.add_option("--osv-memory-age-limit", dest="osv_memory_age_limit", my_default=False,
help="Deprecated alias to the transient-age-limit option",
type="float")
group.add_option("--max-cron-threads", dest="max_cron_threads", my_default=2,
help="Maximum number of threads processing concurrently cron jobs (default 2).",
type="int")
group.add_option("--unaccent", dest="unaccent", my_default=False, action="store_true",
help="Try to enable the unaccent extension when creating new databases.")
group.add_option("--geoip-db", dest="geoip_database", my_default='/usr/share/GeoIP/GeoLite2-City.mmdb',
help="Absolute path to the GeoIP database file.")
parser.add_option_group(group)
if os.name == 'posix':
group = optparse.OptionGroup(parser, "Multiprocessing options")
# TODO sensible default for the three following limits.
group.add_option("--workers", dest="workers", my_default=0,
help="Specify the number of workers, 0 disable prefork mode.",
type="int")
group.add_option("--limit-memory-soft", dest="limit_memory_soft", my_default=2048 * 1024 * 1024,
help="Maximum allowed virtual memory per worker (in bytes), when reached the worker be "
"reset after the current request (default 2048MiB).",
type="int")
group.add_option("--limit-memory-hard", dest="limit_memory_hard", my_default=2560 * 1024 * 1024,
help="Maximum allowed virtual memory per worker (in bytes), when reached, any memory "
"allocation will fail (default 2560MiB).",
type="int")
group.add_option("--limit-time-cpu", dest="limit_time_cpu", my_default=60,
help="Maximum allowed CPU time per request (default 60).",
type="int")
group.add_option("--limit-time-real", dest="limit_time_real", my_default=120,
help="Maximum allowed Real time per request (default 120).",
type="int")
group.add_option("--limit-time-real-cron", dest="limit_time_real_cron", my_default=-1,
help="Maximum allowed Real time per cron job. (default: --limit-time-real). "
"Set to 0 for no limit. ",
type="int")
group.add_option("--limit-request", dest="limit_request", my_default=8192,
help="Maximum number of request to be processed per worker (default 8192).",
type="int")
parser.add_option_group(group)
# Copy all optparse options (i.e. MyOption) into self.options.
for group in parser.option_groups:
for option in group.option_list:
if option.dest not in self.options:
self.options[option.dest] = option.my_default
self.casts[option.dest] = option
# generate default config
self._parse_config()
def parse_config(self, args=None):
""" Parse the configuration file (if any) and the command-line
arguments.
This method initializes odoo.tools.config and openerp.conf (the
former should be removed in the future) with library-wide
configuration values.
This method must be called before proper usage of this library can be
made.
Typical usage of this method:
odoo.tools.config.parse_config(sys.argv[1:])
"""
opt = self._parse_config(args)
odoo.netsvc.init_logger()
self._warn_deprecated_options()
odoo.modules.module.initialize_sys_path()
return opt
def _parse_config(self, args=None):
if args is None:
args = []
opt, args = self.parser.parse_args(args)
def die(cond, msg):
if cond:
self.parser.error(msg)
# Ensures no illegitimate argument is silently discarded (avoids insidious "hyphen to dash" problem)
die(args, "unrecognized parameters: '%s'" % " ".join(args))
die(bool(opt.syslog) and bool(opt.logfile),
"the syslog and logfile options are exclusive")
die(opt.translate_in and (not opt.language or not opt.db_name),
"the i18n-import option cannot be used without the language (-l) and the database (-d) options")
die(opt.overwrite_existing_translations and not (opt.translate_in or opt.update),
"the i18n-overwrite option cannot be used without the i18n-import option or without the update option")
die(opt.translate_out and (not opt.db_name),
"the i18n-export option cannot be used without the database (-d) option")
# Check if the config file exists (-c used, but not -s)
die(not opt.save and opt.config and not os.access(opt.config, os.R_OK),
"The config file '%s' selected with -c/--config doesn't exist or is not readable, "\
"use -s/--save if you want to generate it"% opt.config)
die(bool(opt.osv_memory_age_limit) and bool(opt.transient_memory_age_limit),
"the osv-memory-count-limit option cannot be used with the "
"transient-age-limit option, please only use the latter.")
# place/search the config file on Win32 near the server installation
# (../etc from the server)
# if the server is run by an unprivileged user, he has to specify location of a config file where he has the rights to write,
# else he won't be able to save the configurations, or even to start the server...
# TODO use appdirs
if os.name == 'nt':
rcfilepath = os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), 'odoo.conf')
else:
rcfilepath = os.path.expanduser('~/.odoorc')
old_rcfilepath = os.path.expanduser('~/.openerp_serverrc')
die(os.path.isfile(rcfilepath) and os.path.isfile(old_rcfilepath),
"Found '.odoorc' and '.openerp_serverrc' in your path. Please keep only one of "\
"them, preferably '.odoorc'.")
if not os.path.isfile(rcfilepath) and os.path.isfile(old_rcfilepath):
rcfilepath = old_rcfilepath
self.rcfile = os.path.abspath(
self.config_file or opt.config or os.environ.get('ODOO_RC') or os.environ.get('OPENERP_SERVER') or rcfilepath)
self.load()
# Verify that we want to log or not, if not the output will go to stdout
if self.options['logfile'] in ('None', 'False'):
self.options['logfile'] = False
# the same for the pidfile
if self.options['pidfile'] in ('None', 'False'):
self.options['pidfile'] = False
# the same for the test_tags
if self.options['test_tags'] == 'None':
self.options['test_tags'] = None
# and the server_wide_modules
if self.options['server_wide_modules'] in ('', 'None', 'False'):
self.options['server_wide_modules'] = 'base,web'
# if defined do not take the configfile value even if the defined value is None
keys = ['http_interface', 'http_port', 'longpolling_port', 'http_enable',
'db_name', 'db_user', 'db_password', 'db_host', 'db_sslmode',
'db_port', 'db_template', 'logfile', 'pidfile', 'smtp_port',
'email_from', 'smtp_server', 'smtp_user', 'smtp_password',
'db_maxconn', 'import_partial', 'addons_path', 'upgrade_path',
'syslog', 'without_demo', 'screencasts', 'screenshots',
'dbfilter', 'log_level', 'log_db',
'log_db_level', 'geoip_database', 'dev_mode', 'shell_interface'
]
for arg in keys:
# Copy the command-line argument (except the special case for log_handler, due to
# action=append requiring a real default, so we cannot use the my_default workaround)
if getattr(opt, arg, None) is not None:
self.options[arg] = getattr(opt, arg)
# ... or keep, but cast, the config file value.
elif isinstance(self.options[arg], str) and self.casts[arg].type in optparse.Option.TYPE_CHECKER:
self.options[arg] = optparse.Option.TYPE_CHECKER[self.casts[arg].type](self.casts[arg], arg, self.options[arg])
if isinstance(self.options['log_handler'], str):
self.options['log_handler'] = self.options['log_handler'].split(',')
self.options['log_handler'].extend(opt.log_handler)
# if defined but None take the configfile value
keys = [
'language', 'translate_out', 'translate_in', 'overwrite_existing_translations',
'dev_mode', 'shell_interface', 'smtp_ssl', 'load_language',
'stop_after_init', 'without_demo', 'http_enable', 'syslog',
'list_db', 'proxy_mode',
'test_file', 'test_tags',
'osv_memory_count_limit', 'osv_memory_age_limit', 'transient_age_limit', 'max_cron_threads', 'unaccent',
'data_dir',
'server_wide_modules',
]
posix_keys = [
'workers',
'limit_memory_hard', 'limit_memory_soft',
'limit_time_cpu', 'limit_time_real', 'limit_request', 'limit_time_real_cron'
]
if os.name == 'posix':
keys += posix_keys
else:
self.options.update(dict.fromkeys(posix_keys, None))
# Copy the command-line arguments...
for arg in keys:
if getattr(opt, arg) is not None:
self.options[arg] = getattr(opt, arg)
# ... or keep, but cast, the config file value.
elif isinstance(self.options[arg], str) and self.casts[arg].type in optparse.Option.TYPE_CHECKER:
self.options[arg] = optparse.Option.TYPE_CHECKER[self.casts[arg].type](self.casts[arg], arg, self.options[arg])
self.options['root_path'] = self._normalize(os.path.join(os.path.dirname(__file__), '..'))
if not self.options['addons_path'] or self.options['addons_path']=='None':
default_addons = []
base_addons = os.path.join(self.options['root_path'], 'addons')
if os.path.exists(base_addons):
default_addons.append(base_addons)
main_addons = os.path.abspath(os.path.join(self.options['root_path'], '../addons'))
if os.path.exists(main_addons):
default_addons.append(main_addons)
self.options['addons_path'] = ','.join(default_addons)
else:
self.options['addons_path'] = ",".join(
self._normalize(x)
for x in self.options['addons_path'].split(','))
self.options["upgrade_path"] = (
",".join(self._normalize(x)
for x in self.options['upgrade_path'].split(','))
if self.options['upgrade_path']
else ""
)
self.options['init'] = opt.init and dict.fromkeys(opt.init.split(','), 1) or {}
self.options['demo'] = (dict(self.options['init'])
if not self.options['without_demo'] else {})
self.options['update'] = opt.update and dict.fromkeys(opt.update.split(','), 1) or {}
self.options['translate_modules'] = opt.translate_modules and [m.strip() for m in opt.translate_modules.split(',')] or ['all']
self.options['translate_modules'].sort()
dev_split = opt.dev_mode and [s.strip() for s in opt.dev_mode.split(',')] or []
self.options['dev_mode'] = 'all' in dev_split and dev_split + ['pdb', 'reload', 'qweb', 'werkzeug', 'xml'] or dev_split
if opt.pg_path:
self.options['pg_path'] = opt.pg_path
self.options['test_enable'] = bool(self.options['test_tags'])
if opt.save:
self.save()
# normalize path options
for key in ['data_dir', 'logfile', 'pidfile', 'test_file', 'screencasts', 'screenshots', 'pg_path', 'translate_out', 'translate_in', 'geoip_database']:
self.options[key] = self._normalize(self.options[key])
conf.addons_paths = self.options['addons_path'].split(',')
conf.server_wide_modules = [
m.strip() for m in self.options['server_wide_modules'].split(',') if m.strip()
]
return opt
def _warn_deprecated_options(self):
if self.options['osv_memory_age_limit']:
warnings.warn(
"The osv-memory-age-limit is a deprecated alias to "
"the transient-age-limit option, please use the latter.",
DeprecationWarning)
self.options['transient_age_limit'] = self.options.pop('osv_memory_age_limit')
def _is_addons_path(self, path):
from odoo.modules.module import MANIFEST_NAMES
for f in os.listdir(path):
modpath = os.path.join(path, f)
if os.path.isdir(modpath):
def hasfile(filename):
return os.path.isfile(os.path.join(modpath, filename))
if hasfile('__init__.py') and any(hasfile(mname) for mname in MANIFEST_NAMES):
return True
return False
def _check_addons_path(self, option, opt, value, parser):
ad_paths = []
for path in value.split(','):
path = path.strip()
res = os.path.abspath(os.path.expanduser(path))
if not os.path.isdir(res):
raise optparse.OptionValueError("option %s: no such directory: %r" % (opt, path))
if not self._is_addons_path(res):
raise optparse.OptionValueError("option %s: the path %r is not a valid addons directory" % (opt, path))
ad_paths.append(res)
setattr(parser.values, option.dest, ",".join(ad_paths))
def _check_upgrade_path(self, option, opt, value, parser):
upgrade_path = []
for path in value.split(','):
path = path.strip()
res = self._normalize(path)
if not os.path.isdir(res):
raise optparse.OptionValueError("option %s: no such directory: %r" % (opt, path))
if not self._is_upgrades_path(res):
raise optparse.OptionValueError("option %s: the path %r is not a valid upgrade directory" % (opt, path))
if res not in upgrade_path:
upgrade_path.append(res)
setattr(parser.values, option.dest, ",".join(upgrade_path))
def _is_upgrades_path(self, res):
return any(
glob.glob(os.path.join(res, f"*/*/{prefix}-*.py"))
for prefix in ["pre", "post", "end"]
)
def _test_enable_callback(self, option, opt, value, parser):
if not parser.values.test_tags:
parser.values.test_tags = "+standard"
def load(self):
outdated_options_map = {
'xmlrpc_port': 'http_port',
'xmlrpc_interface': 'http_interface',
'xmlrpc': 'http_enable',
}
p = ConfigParser.RawConfigParser()
try:
p.read([self.rcfile])
for (name,value) in p.items('options'):
name = outdated_options_map.get(name, name)
if value=='True' or value=='true':
value = True
if value=='False' or value=='false':
value = False
self.options[name] = value
#parse the other sections, as well
for sec in p.sections():
if sec == 'options':
continue
self.misc.setdefault(sec, {})
for (name, value) in p.items(sec):
if value=='True' or value=='true':
value = True
if value=='False' or value=='false':
value = False
self.misc[sec][name] = value
except IOError:
pass
except ConfigParser.NoSectionError:
pass
def save(self):
p = ConfigParser.RawConfigParser()
loglevelnames = dict(zip(self._LOGLEVELS.values(), self._LOGLEVELS))
p.add_section('options')
for opt in sorted(self.options):
if opt in ('version', 'language', 'translate_out', 'translate_in', 'overwrite_existing_translations', 'init', 'update'):
continue
if opt in self.blacklist_for_save:
continue
if opt in ('log_level',):
p.set('options', opt, loglevelnames.get(self.options[opt], self.options[opt]))
elif opt == 'log_handler':
p.set('options', opt, ','.join(_deduplicate_loggers(self.options[opt])))
else:
p.set('options', opt, self.options[opt])
for sec in sorted(self.misc):
p.add_section(sec)
for opt in sorted(self.misc[sec]):
p.set(sec,opt,self.misc[sec][opt])
# try to create the directories and write the file
try:
rc_exists = os.path.exists(self.rcfile)
if not rc_exists and not os.path.exists(os.path.dirname(self.rcfile)):
os.makedirs(os.path.dirname(self.rcfile))
try:
p.write(open(self.rcfile, 'w'))
if not rc_exists:
os.chmod(self.rcfile, 0o600)
except IOError:
sys.stderr.write("ERROR: couldn't write the config file\n")
except OSError:
# what to do if impossible?
sys.stderr.write("ERROR: couldn't create the config directory\n")
def get(self, key, default=None):
return self.options.get(key, default)
def pop(self, key, default=None):
return self.options.pop(key, default)
def get_misc(self, sect, key, default=None):
return self.misc.get(sect,{}).get(key, default)
def __setitem__(self, key, value):
self.options[key] = value
if key in self.options and isinstance(self.options[key], str) and \
key in self.casts and self.casts[key].type in optparse.Option.TYPE_CHECKER:
self.options[key] = optparse.Option.TYPE_CHECKER[self.casts[key].type](self.casts[key], key, self.options[key])
def __getitem__(self, key):
return self.options[key]
@property
def addons_data_dir(self):
add_dir = os.path.join(self['data_dir'], 'addons')
d = os.path.join(add_dir, release.series)
if not os.path.exists(d):
try:
# bootstrap parent dir +rwx
if not os.path.exists(add_dir):
os.makedirs(add_dir, 0o700)
# try to make +rx placeholder dir, will need manual +w to activate it
os.makedirs(d, 0o500)
except OSError:
logging.getLogger(__name__).debug('Failed to create addons data dir %s', d)
return d
@property
def session_dir(self):
d = os.path.join(self['data_dir'], 'sessions')
try:
os.makedirs(d, 0o700)
except OSError as e:
if e.errno != errno.EEXIST:
raise
assert os.access(d, os.W_OK), \
"%s: directory is not writable" % d
return d
def filestore(self, dbname):
return os.path.join(self['data_dir'], 'filestore', dbname)
def set_admin_password(self, new_password):
hash_password = crypt_context.hash if hasattr(crypt_context, 'hash') else crypt_context.encrypt
self.options['admin_passwd'] = hash_password(new_password)
def verify_admin_password(self, password):
"""Verifies the super-admin password, possibly updating the stored hash if needed"""
stored_hash = self.options['admin_passwd']
if not stored_hash:
# empty password/hash => authentication forbidden
return False
result, updated_hash = crypt_context.verify_and_update(password, stored_hash)
if result:
if updated_hash:
self.options['admin_passwd'] = updated_hash
return True
def _normalize(self, path):
if not path:
return ''
return realpath(abspath(expanduser(expandvars(path.strip()))))
config = configmanager()
| []
| []
| [
"OPENERP_SERVER",
"ODOO_RC"
]
| [] | ["OPENERP_SERVER", "ODOO_RC"] | python | 2 | 0 | |
sriov_ci_tests/tests/api/test_sriov_network_one_macvtap_port.py | # Copyright 2014 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ipaddress
import netaddr
import os
import time
#from tempest.pci import pci
from . import pci
from .network_base import ExtendNetworkScenarioTest
from oslo_log import log as logging
from tempest.common.utils.linux import remote_client
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest import test
#Not stable
from tempest.common import waiters
CONF = config.CONF
LOG = logging.getLogger(__name__)
PRIVATE_IP_PATTERN = r'192.168.198.(25[0-5]|2[0-4]\d|1\d\d|[1-9]\d|[2-9])/24'
IFACEPATH = "/opt/stack/tempest/interface"
#hard code
PEER_HOST = os.getenv('PEER_HOST')
PEER_NAME = os.getenv('PEER_NAME')
PEER_PWD = os.getenv('PEER_PWD')
VM_PASSWD = 'cubswin:)'
PRIVATE_CIDR = "192.168.198.0/24"
PRIVATE_IP_START = "192.168.198.128"
PRIVATE_IP_END = "192.168.198.254"
PRIVATE_FIX_IP = "192.168.198.125"
#SRIOV_IP_START = os.getenv('VM_INTERFACE_RANGE_START')
#SRIOV_IPADDR_START = ipaddress.IPv4Address(str(SRIOV_IP_START))
SRIOV_IP_START = "192.168.3.130"
SRIOV_IPADDR_START = ipaddress.IPv4Address(str(SRIOV_IP_START))
SRIOV_CIDR = str(SRIOV_IPADDR_START).rsplit(".", 1)[0] + ".0/24"
SRIOV_IP_END = str(SRIOV_IPADDR_START + 10)
SRIOV_FIX_IP = str(SRIOV_IPADDR_START + 5)
class TestNetworkAdvancedServerOps(ExtendNetworkScenarioTest):
"""
This test case checks VM connectivity after some advanced
instance operations executed:
* Stop/Start an instance
* Reboot an instance
* Rebuild an instance
* Pause/Unpause an instance
* Suspend/Resume an instance
* Resize an instance
"""
@classmethod
def check_preconditions(cls):
super(TestNetworkAdvancedServerOps, cls).check_preconditions()
if not (CONF.network.tenant_networks_reachable
or CONF.network.public_network_id):
msg = ('Either tenant_networks_reachable must be "true", or '
'public_network_id must be defined.')
raise cls.skipException(msg)
def _check_network_connectivity(self, should_connect=True):
username = "cirros"
private_key = self.keypair['private_key']
self._check_tenant_network_connectivity(
self.server, username, private_key,
should_connect=should_connect,
servers_for_debug=[self.server])
print(self.sriov_ip)
for i in range(60):
print("%sth times to ping" % i)
error = False
try:
linux_client = remote_client.RemoteClient(
PEER_HOST, PEER_NAME, PEER_PWD)
linux_client.ping_host(self.sriov_ip)
except Exception as e:
# we only tolerance 20 times failed.
error = True
if i > 20:
raise Exception("Error: can not connect the instance "
"by the sriov port!")
else:
print(e)
if not error:
print("ping sriov port success!")
break
time.sleep(2)
def _setup_network_and_servers(self):
self.keypair = self.create_keypair()
kwargs = {"name": "private"}
private_network = self.create_network(**kwargs)
cidr = netaddr.IPNetwork(PRIVATE_CIDR)
ranges = {"start": PRIVATE_IP_START, "end": PRIVATE_IP_END}
subnet = self.create_subnet(
private_network, cidr=cidr,
allocation_pools=[ranges], enable_dhcp=True)
self.private_gateway = subnet["gateway_ip"]
kwargs = {"binding:vnic_type": "normal",
"fixed_ips": [{"subnet_id": subnet['id'],
"ip_address": PRIVATE_FIX_IP}]}
private_port = self.create_port(
private_network, name="port-sriov", **kwargs)
port_info = self.ports_client.show_port(private_port['id'])
self.private_ip = port_info['port']['fixed_ips'][0]['ip_address']
print(self.private_ip)
kwargs = {"provider:network_type": "vlan",
"provider:physical_network": "physnet1",
"provider:segmentation_id": "1000"}
sriov_network = self.create_network(**kwargs)
cidr = netaddr.IPNetwork(SRIOV_CIDR)
ranges = {"start": SRIOV_IP_START, "end": SRIOV_IP_END}
subnet = self.create_subnet(
sriov_network, cidr=cidr,
allocation_pools=[ranges], enable_dhcp=False)
kwargs = {"binding:vnic_type": "macvtap",
"fixed_ips": [{"subnet_id": subnet['id'],
"ip_address": SRIOV_IP_START}]}
sriov_port = self.create_port(
sriov_network, name="port-sriov-", **kwargs)
port_info = self.ports_client.show_port(sriov_port['id'])
self.sriov_ip = port_info['port']['fixed_ips'][0]['ip_address']
self.sriov_port_id = sriov_port['id']
cont = pci.gen_rc_local_dict(pci.INTERFACES)
print(cont)
personality = [
{'path': "/etc/network/interfaces",
'contents': cont}]
create_kwargs = {
'networks': [
{'uuid': private_network["id"], 'port': private_port['id']},
{'uuid': sriov_network['id'], 'port': sriov_port['id']}],
'key_name': self.keypair['name'],
'config_drive': True,
}
server_name = data_utils.rand_name('server-sriov')
print(create_kwargs['networks'])
server = self.servers_client.create_server(name=server_name,
imageRef=CONF.compute.image_ref,
flavorRef=CONF.compute.flavor_ref,
# user_data=user_data,
personality=personality,
**create_kwargs)
self.server = server['server']
self.addCleanup(
self.servers_client.delete_server, self.server['id'])
waiters.wait_for_server_status(
self.servers_client, self.server["id"], 'ACTIVE')
@test.services('compute', 'network')
def test_sriov_one_macvtap_port(self):
self._setup_network_and_servers()
time.sleep(30)
port_info = self.ports_client.show_port(self.sriov_port_id)
self.assertEqual('ACTIVE', port_info['port']['status'])
| []
| []
| [
"PEER_NAME",
"PEER_HOST",
"VM_INTERFACE_RANGE_START",
"PEER_PWD"
]
| [] | ["PEER_NAME", "PEER_HOST", "VM_INTERFACE_RANGE_START", "PEER_PWD"] | python | 4 | 0 | |
monitor_django/config/settings/local.py | # -*- coding: utf-8 -*-
"""
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
"""
from .common import * # noqa
import socket
import os
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env('DJANGO_SECRET_KEY', default='kylt1i#%7e*g=x(213_vt%^=q&*k7y&zj!gxs#a6(=3v!^b*e7')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_PORT = 1025
EMAIL_HOST = 'localhost'
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ['127.0.0.1', '10.0.2.2', ]
# tricks to have debug toolbar when developing with docker
if os.environ.get('USE_DOCKER') == 'yes':
ip = socket.gethostbyname(socket.gethostname())
INTERNAL_IPS += [ip[:-1]+"1"]
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
| []
| []
| [
"USE_DOCKER"
]
| [] | ["USE_DOCKER"] | python | 1 | 0 | |
yaTools/yaTools/wsgi.py | """
WSGI config for yaTools project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'yaTools.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
src/django_server/manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_server.settings.local')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
examples/service/conversations/message/create/message_create_example.go | package main
import (
"log"
"os"
"github.com/RJPearson94/twilio-sdk-go"
v1 "github.com/RJPearson94/twilio-sdk-go/service/conversations/v1"
"github.com/RJPearson94/twilio-sdk-go/service/conversations/v1/conversation/messages"
"github.com/RJPearson94/twilio-sdk-go/session/credentials"
"github.com/RJPearson94/twilio-sdk-go/utils"
)
var conversationClient *v1.Conversations
func init() {
creds, err := credentials.New(credentials.Account{
Sid: os.Getenv("TWILIO_ACCOUNT_SID"),
AuthToken: os.Getenv("TWILIO_AUTH_TOKEN"),
})
if err != nil {
log.Panicf("%s", err.Error())
}
conversationClient = twilio.NewWithCredentials(creds).Conversations.V1
}
func main() {
resp, err := conversationClient.
Conversation("CHXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").
Messages.
Create(&messages.CreateMessageInput{
Body: utils.String("Hello World"),
})
if err != nil {
log.Panicf("%s", err.Error())
}
log.Printf("SID: %s", resp.Sid)
}
| [
"\"TWILIO_ACCOUNT_SID\"",
"\"TWILIO_AUTH_TOKEN\""
]
| []
| [
"TWILIO_AUTH_TOKEN",
"TWILIO_ACCOUNT_SID"
]
| [] | ["TWILIO_AUTH_TOKEN", "TWILIO_ACCOUNT_SID"] | go | 2 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
# This allows easy placement of apps within the interior
# fb_wall directory.
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(current_path, "fb_wall"))
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
flaskr/Config.py | import os
class Config:
host = 'localhost'
url = os.environ.get('APP_IP', 'http://localhost:3000/')
port = 27017
| []
| []
| [
"APP_IP"
]
| [] | ["APP_IP"] | python | 1 | 0 | |
experiments/aws-ssm/aws-ssm-chaos-by-id/experiment/aws-ssm-chaos-by-id.go | package experiment
import (
"os"
"github.com/litmuschaos/chaos-operator/pkg/apis/litmuschaos/v1alpha1"
litmusLIB "github.com/litmuschaos/litmus-go/chaoslib/litmus/aws-ssm-chaos/lib/ssm"
experimentEnv "github.com/litmuschaos/litmus-go/pkg/aws-ssm/aws-ssm-chaos/environment"
experimentTypes "github.com/litmuschaos/litmus-go/pkg/aws-ssm/aws-ssm-chaos/types"
clients "github.com/litmuschaos/litmus-go/pkg/clients"
ec2 "github.com/litmuschaos/litmus-go/pkg/cloud/aws/ec2"
"github.com/litmuschaos/litmus-go/pkg/cloud/aws/ssm"
"github.com/litmuschaos/litmus-go/pkg/events"
"github.com/litmuschaos/litmus-go/pkg/log"
"github.com/litmuschaos/litmus-go/pkg/probe"
"github.com/litmuschaos/litmus-go/pkg/result"
"github.com/litmuschaos/litmus-go/pkg/status"
"github.com/litmuschaos/litmus-go/pkg/types"
"github.com/litmuschaos/litmus-go/pkg/utils/common"
"github.com/sirupsen/logrus"
)
// AWSSSMChaosByID inject the ssm chaos on ec2 instance
func AWSSSMChaosByID(clients clients.ClientSets) {
experimentsDetails := experimentTypes.ExperimentDetails{}
resultDetails := types.ResultDetails{}
eventsDetails := types.EventDetails{}
chaosDetails := types.ChaosDetails{}
//Fetching all the ENV passed from the runner pod
log.Infof("[PreReq]: Getting the ENV for the %v experiment", os.Getenv("EXPERIMENT_NAME"))
experimentEnv.GetENV(&experimentsDetails, "aws-ssm-chaos-by-id")
// Initialize the chaos attributes
types.InitialiseChaosVariables(&chaosDetails)
// Initialize Chaos Result Parameters
types.SetResultAttributes(&resultDetails, chaosDetails)
if experimentsDetails.EngineName != "" {
// Initialize the probe details. Bail out upon error, as we haven't entered exp business logic yet
if err := probe.InitializeProbesInChaosResultDetails(&chaosDetails, clients, &resultDetails); err != nil {
log.Errorf("Unable to initialize the probes, err: %v", err)
return
}
}
//Updating the chaos result in the beginning of experiment
log.Infof("[PreReq]: Updating the chaos result of %v experiment (SOT)", experimentsDetails.ExperimentName)
if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "SOT"); err != nil {
log.Errorf("Unable to Create the Chaos Result, err: %v", err)
failStep := "[pre-chaos]: Failed to update the chaos result of ec2 terminate experiment (SOT), err: " + err.Error()
result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails)
return
}
// Set the chaos result uid
result.SetResultUID(&resultDetails, clients, &chaosDetails)
// generating the event in chaosresult to marked the verdict as awaited
msg := "experiment: " + experimentsDetails.ExperimentName + ", Result: Awaited"
types.SetResultEventAttributes(&eventsDetails, types.AwaitedVerdict, msg, "Normal", &resultDetails)
events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
// Calling AbortWatcher go routine, it will continuously watch for the abort signal and generate the required events and result
go common.AbortWatcherWithoutExit(experimentsDetails.ExperimentName, clients, &resultDetails, &chaosDetails, &eventsDetails)
//DISPLAY THE INSTANCE INFORMATION
log.InfoWithValues("The instance information is as follows", logrus.Fields{
"Total Chaos Duration": experimentsDetails.ChaosDuration,
"Chaos Namespace": experimentsDetails.ChaosNamespace,
"Instance ID": experimentsDetails.EC2InstanceID,
"Sequence": experimentsDetails.Sequence,
})
//PRE-CHAOS APPLICATION STATUS CHECK
log.Info("[Status]: Verify that the AUT (Application Under Test) is running (pre-chaos)")
if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil {
log.Errorf("Application status check failed, err: %v", err)
failStep := "[pre-chaos]: Failed to verify that the AUT (Application Under Test) is in running state, err: " + err.Error()
result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails)
return
}
//PRE-CHAOS AUXILIARY APPLICATION STATUS CHECK
if experimentsDetails.AuxiliaryAppInfo != "" {
log.Info("[Status]: Verify that the Auxiliary Applications are running (pre-chaos)")
if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
log.Errorf("Auxiliary Application status check failed, err: %v", err)
failStep := "[pre-chaos]: Failed to verify that the Auxiliary Applications are in running state, err: " + err.Error()
result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails)
return
}
}
if experimentsDetails.EngineName != "" {
// marking AUT as running, as we already checked the status of application under test
msg := "AUT: Running"
// run the probes in the pre-chaos check
if len(resultDetails.ProbeDetails) != 0 {
if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PreChaos", &eventsDetails); err != nil {
log.Errorf("Probe Failed, err: %v", err)
failStep := "[pre-chaos]: Failed while running probes, err: " + err.Error()
msg := "AUT: Running, Probes: Unsuccessful"
types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Warning", &chaosDetails)
events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails)
return
}
msg = "AUT: Running, Probes: Successful"
}
// generating the events for the pre-chaos check
types.SetEngineEventAttributes(&eventsDetails, types.PreChaosCheck, msg, "Normal", &chaosDetails)
events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
}
//Verify that the instance should have permission to perform ssm api calls
if err := ssm.CheckInstanceInformation(&experimentsDetails); err != nil {
log.Errorf("failed perform ssm api calls, err: %v", err)
failStep := "[pre-chaos]: Failed to verify to make SSM api calls, err: " + err.Error()
result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails)
return
}
//Verify the aws ec2 instance is running (pre chaos)
if err := ec2.InstanceStatusCheckByID(experimentsDetails.EC2InstanceID, experimentsDetails.Region); err != nil {
log.Errorf("failed to get the ec2 instance status, err: %v", err)
failStep := "[pre-chaos]: Failed to verify the AWS ec2 instance status, err: " + err.Error()
result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails)
return
}
log.Info("[Status]: EC2 instance is in running state")
// Including the litmus lib for aws-ssm-chaos-by-id
switch experimentsDetails.ChaosLib {
case "litmus":
if err := litmusLIB.PrepareAWSSSMChaosByID(&experimentsDetails, clients, &resultDetails, &eventsDetails, &chaosDetails); err != nil {
log.Errorf("Chaos injection failed, err: %v", err)
failStep := "[chaos]: Failed inside the chaoslib, err: " + err.Error()
result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails)
//Delete the ssm document on the given aws service monitoring docs
if experimentsDetails.IsDocsUploaded {
log.Info("[Recovery]: Delete the uploaded aws ssm docs")
if err := ssm.SSMDeleteDocument(experimentsDetails.DocumentName, experimentsDetails.Region); err != nil {
log.Errorf("fail to delete ssm doc, err: %v", err)
}
}
return
}
default:
log.Error("[Invalid]: Please Provide the correct LIB")
failStep := "[chaos]: no match was found for the specified lib"
result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails)
return
}
log.Infof("[Confirmation]: %v chaos has been injected successfully", experimentsDetails.ExperimentName)
resultDetails.Verdict = v1alpha1.ResultVerdictPassed
//Verify the aws ec2 instance is running (post chaos)
if err := ec2.InstanceStatusCheckByID(experimentsDetails.EC2InstanceID, experimentsDetails.Region); err != nil {
log.Errorf("failed to get the ec2 instance status, err: %v", err)
failStep := "[post-chaos]: Failed to verify the AWS ec2 instance status, err: " + err.Error()
result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails)
return
}
log.Info("[Status]: EC2 instance is in running state (post chaos)")
//POST-CHAOS APPLICATION STATUS CHECK
log.Info("[Status]: Verify that the AUT (Application Under Test) is running (post-chaos)")
if err := status.AUTStatusCheck(experimentsDetails.AppNS, experimentsDetails.AppLabel, experimentsDetails.TargetContainer, experimentsDetails.Timeout, experimentsDetails.Delay, clients, &chaosDetails); err != nil {
log.Errorf("Application status check failed, err: %v", err)
failStep := "[post-chaos]: Failed to verify that the AUT (Application Under Test) is running, err: " + err.Error()
result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails)
return
}
//POST-CHAOS AUXILIARY APPLICATION STATUS CHECK
if experimentsDetails.AuxiliaryAppInfo != "" {
log.Info("[Status]: Verify that the Auxiliary Applications are running (post-chaos)")
if err := status.CheckAuxiliaryApplicationStatus(experimentsDetails.AuxiliaryAppInfo, experimentsDetails.Timeout, experimentsDetails.Delay, clients); err != nil {
log.Errorf("Auxiliary Application status check failed, err: %v", err)
failStep := "[post-chaos]: Failed to verify that the Auxiliary Applications are running, err: " + err.Error()
result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails)
return
}
}
if experimentsDetails.EngineName != "" {
// marking AUT as running, as we already checked the status of application under test
msg := "AUT: Running"
// run the probes in the post-chaos check
if len(resultDetails.ProbeDetails) != 0 {
if err := probe.RunProbes(&chaosDetails, clients, &resultDetails, "PostChaos", &eventsDetails); err != nil {
log.Errorf("Probes Failed, err: %v", err)
failStep := "[post-chaos]: Failed while running probes, err: " + err.Error()
msg := "AUT: Running, Probes: Unsuccessful"
types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Warning", &chaosDetails)
events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
result.RecordAfterFailure(&chaosDetails, &resultDetails, failStep, clients, &eventsDetails)
return
}
msg = "AUT: Running, Probes: Successful"
}
// generating post chaos event
types.SetEngineEventAttributes(&eventsDetails, types.PostChaosCheck, msg, "Normal", &chaosDetails)
events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
}
//Updating the chaosResult in the end of experiment
log.Infof("[The End]: Updating the chaos result of %v experiment (EOT)", experimentsDetails.ExperimentName)
if err := result.ChaosResult(&chaosDetails, clients, &resultDetails, "EOT"); err != nil {
log.Errorf("Unable to Update the Chaos Result, err: %v", err)
return
}
// generating the event in chaosresult to marked the verdict as pass/fail
msg = "experiment: " + experimentsDetails.ExperimentName + ", Result: " + string(resultDetails.Verdict)
reason := types.PassVerdict
eventType := "Normal"
if resultDetails.Verdict != "Pass" {
reason = types.FailVerdict
eventType = "Warning"
}
types.SetResultEventAttributes(&eventsDetails, reason, msg, eventType, &resultDetails)
events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosResult")
if experimentsDetails.EngineName != "" {
msg := experimentsDetails.ExperimentName + " experiment has been " + string(resultDetails.Verdict) + "ed"
types.SetEngineEventAttributes(&eventsDetails, types.Summary, msg, "Normal", &chaosDetails)
events.GenerateEvents(&eventsDetails, clients, &chaosDetails, "ChaosEngine")
}
}
| [
"\"EXPERIMENT_NAME\""
]
| []
| [
"EXPERIMENT_NAME"
]
| [] | ["EXPERIMENT_NAME"] | go | 1 | 0 | |
cmd/client/request.go | package main
import (
"io/ioutil"
"log"
"net/http"
"reflect"
"strconv"
)
type RespData []byte
var threadCount int
/**
* HttpRequest executes an HTTP request (crazy, I know)
*/
func HttpRequest(baseUrl string, params *Options) ([]byte, uintptr) {
// Builds request object
client := &http.Client{}
req, err := http.NewRequest("GET", baseUrl+strconv.Itoa(params.offset), nil)
if err != nil {
log.Fatal("httprequest error: " + err.Error())
}
// Builds query string
q := req.URL.Query()
q.Add("format", params.format)
q.Add("count", strconv.Itoa(params.arrayLength))
q.Add("length", strconv.Itoa(params.dataLength))
q.Add("compress", strconv.FormatBool(params.compress))
req.URL.RawQuery = q.Encode()
// Retrieves response from server
resp, err := client.Do(req)
// Reads response body
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
log.Fatal("error: " + err.Error())
}
// Gets size of payload in bytes
size := uintptr(len(body)) * reflect.TypeOf(body).Elem().Size()
return body, size
}
| []
| []
| []
| [] | [] | go | null | null | null |
venv/lib/python3.8/site-packages/ansible_collections/kubernetes/core/plugins/module_utils/client/discovery.py | # Copyright [2017] [Red Hat, Inc.]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from collections import defaultdict
import hashlib
import tempfile
from functools import partial
import kubernetes.dynamic
import kubernetes.dynamic.discovery
from kubernetes import __version__
from kubernetes.dynamic.exceptions import (ResourceNotFoundError, ResourceNotUniqueError,
ServiceUnavailableError)
from ansible_collections.kubernetes.core.plugins.module_utils.client.resource import ResourceList
class Discoverer(kubernetes.dynamic.discovery.Discoverer):
def __init__(self, client, cache_file):
self.client = client
default_cache_file_name = 'k8srcp-{0}.json'.format(hashlib.sha256(self.__get_default_cache_id()).hexdigest())
self.__cache_file = cache_file or os.path.join(tempfile.gettempdir(), default_cache_file_name)
self.__init_cache()
def __get_default_cache_id(self):
user = self.__get_user()
if user:
cache_id = "{0}-{1}".format(self.client.configuration.host, user)
else:
cache_id = self.client.configuration.host
return cache_id.encode('utf-8')
def __get_user(self):
# This is intended to provide a portable method for getting a username.
# It could, and maybe should, be replaced by getpass.getuser() but, due
# to a lack of portability testing the original code is being left in
# place.
if hasattr(os, 'getlogin'):
try:
user = os.getlogin()
if user:
return str(user)
except OSError:
pass
if hasattr(os, 'getuid'):
try:
user = os.getuid()
if user:
return str(user)
except OSError:
pass
user = os.environ.get("USERNAME")
if user:
return str(user)
return None
def __init_cache(self, refresh=False):
if refresh or not os.path.exists(self.__cache_file):
self._cache = {'library_version': __version__}
refresh = True
else:
try:
with open(self.__cache_file, 'r') as f:
self._cache = json.load(f, cls=partial(CacheDecoder, self.client))
if self._cache.get('library_version') != __version__:
# Version mismatch, need to refresh cache
self.invalidate_cache()
except Exception:
self.invalidate_cache()
self._load_server_info()
self.discover()
if refresh:
self._write_cache()
def get_resources_for_api_version(self, prefix, group, version, preferred):
""" returns a dictionary of resources associated with provided (prefix, group, version)"""
resources = defaultdict(list)
subresources = defaultdict(dict)
path = '/'.join(filter(None, [prefix, group, version]))
try:
resources_response = self.client.request('GET', path).resources or []
except ServiceUnavailableError:
resources_response = []
resources_raw = list(filter(lambda resource: '/' not in resource['name'], resources_response))
subresources_raw = list(filter(lambda resource: '/' in resource['name'], resources_response))
for subresource in subresources_raw:
resource, name = subresource['name'].split('/')
subresources[resource][name] = subresource
for resource in resources_raw:
# Prevent duplicate keys
for key in ('prefix', 'group', 'api_version', 'client', 'preferred'):
resource.pop(key, None)
resourceobj = kubernetes.dynamic.Resource(
prefix=prefix,
group=group,
api_version=version,
client=self.client,
preferred=preferred,
subresources=subresources.get(resource['name']),
**resource
)
resources[resource['kind']].append(resourceobj)
resource_lookup = {
'prefix': prefix,
'group': group,
'api_version': version,
'kind': resourceobj.kind,
'name': resourceobj.name
}
resource_list = ResourceList(self.client, group=group, api_version=version, base_kind=resource['kind'], base_resource_lookup=resource_lookup)
resources[resource_list.kind].append(resource_list)
return resources
def get(self, **kwargs):
"""
Same as search, but will throw an error if there are multiple or no
results. If there are multiple results and only one is an exact match
on api_version, that resource will be returned.
"""
results = self.search(**kwargs)
# If there are multiple matches, prefer exact matches on api_version
if len(results) > 1 and kwargs.get('api_version'):
results = [
result for result in results if result.group_version == kwargs['api_version']
]
# If there are multiple matches, prefer non-List kinds
if len(results) > 1 and not all(isinstance(x, ResourceList) for x in results):
results = [result for result in results if not isinstance(result, ResourceList)]
# if multiple resources are found that share a GVK, prefer the one with the most supported verbs
if len(results) > 1 and len(set((x.group_version, x.kind) for x in results)) == 1:
if len(set(len(x.verbs) for x in results)) != 1:
results = [max(results, key=lambda x: len(x.verbs))]
if len(results) == 1:
return results[0]
elif not results:
raise ResourceNotFoundError('No matches found for {0}'.format(kwargs))
else:
raise ResourceNotUniqueError('Multiple matches found for {0}: {1}'.format(kwargs, results))
class LazyDiscoverer(Discoverer, kubernetes.dynamic.LazyDiscoverer):
def __init__(self, client, cache_file):
Discoverer.__init__(self, client, cache_file)
self.__update_cache = False
@property
def update_cache(self):
self.__update_cache
class CacheDecoder(json.JSONDecoder):
def __init__(self, client, *args, **kwargs):
self.client = client
json.JSONDecoder.__init__(self, object_hook=self.object_hook, *args, **kwargs)
def object_hook(self, obj):
if '_type' not in obj:
return obj
_type = obj.pop('_type')
if _type == 'Resource':
return kubernetes.dynamic.Resource(client=self.client, **obj)
elif _type == 'ResourceList':
return ResourceList(self.client, **obj)
elif _type == 'ResourceGroup':
return kubernetes.dynamic.discovery.ResourceGroup(obj['preferred'], resources=self.object_hook(obj['resources']))
return obj
| []
| []
| [
"USERNAME"
]
| [] | ["USERNAME"] | python | 1 | 0 | |
cmd/gardener-extension-provider-tencentcloud/app/app.go | // Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package app
import (
"context"
"fmt"
tencentcontrolplaneexposure "github.com/gardener/gardener-extension-provider-tencentcloud/pkg/webhook/controlplaneexposure"
"os"
"github.com/gardener/gardener-extension-provider-tencentcloud/pkg/tencent"
tencentinstall "github.com/gardener/gardener-extension-provider-tencentcloud/pkg/apis/tencentcloud/install"
tencentcmd "github.com/gardener/gardener-extension-provider-tencentcloud/pkg/cmd"
tencentbackupbucket "github.com/gardener/gardener-extension-provider-tencentcloud/pkg/controller/backupbucket"
tencentbackupentry "github.com/gardener/gardener-extension-provider-tencentcloud/pkg/controller/backupentry"
tencentcontrolplane "github.com/gardener/gardener-extension-provider-tencentcloud/pkg/controller/controlplane"
"github.com/gardener/gardener-extension-provider-tencentcloud/pkg/controller/healthcheck"
tencentinfrastructure "github.com/gardener/gardener-extension-provider-tencentcloud/pkg/controller/infrastructure"
tencentworker "github.com/gardener/gardener-extension-provider-tencentcloud/pkg/controller/worker"
genericcontrolplaneactuator "github.com/gardener/gardener/extensions/pkg/controller/controlplane/genericactuator"
druidv1alpha1 "github.com/gardener/etcd-druid/api/v1alpha1"
"github.com/gardener/gardener/extensions/pkg/controller"
controllercmd "github.com/gardener/gardener/extensions/pkg/controller/cmd"
"github.com/gardener/gardener/extensions/pkg/controller/worker"
"github.com/gardener/gardener/extensions/pkg/util"
webhookcmd "github.com/gardener/gardener/extensions/pkg/webhook/cmd"
machinev1alpha1 "github.com/gardener/machine-controller-manager/pkg/apis/machine/v1alpha1"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/manager"
)
// NewControllerManagerCommand creates a new command for running a Alicloud provider controller.
func NewControllerManagerCommand(ctx context.Context) *cobra.Command {
var (
restOpts = &controllercmd.RESTOptions{}
mgrOpts = &controllercmd.ManagerOptions{
LeaderElection: true,
LeaderElectionID: controllercmd.LeaderElectionNameID(tencent.Name),
LeaderElectionNamespace: os.Getenv("LEADER_ELECTION_NAMESPACE"),
WebhookServerPort: 443,
WebhookCertDir: "/tmp/gardener-extensions-cert",
}
configFileOpts = &tencentcmd.ConfigOptions{}
// options for the backupbucket controller
backupBucketCtrlOpts = &controllercmd.ControllerOptions{
MaxConcurrentReconciles: 5,
}
// options for the backupentry controller
backupEntryCtrlOpts = &controllercmd.ControllerOptions{
MaxConcurrentReconciles: 5,
}
// options for the health care controller
healthCheckCtrlOpts = &controllercmd.ControllerOptions{
MaxConcurrentReconciles: 5,
}
// options for the controlplane controller
controlPlaneCtrlOpts = &controllercmd.ControllerOptions{
MaxConcurrentReconciles: 5,
}
// options for the infrastructure controller
infraCtrlOpts = &controllercmd.ControllerOptions{
MaxConcurrentReconciles: 5,
}
reconcileOpts = &controllercmd.ReconcilerOptions{}
// options for the worker controller
workerCtrlOpts = &controllercmd.ControllerOptions{
MaxConcurrentReconciles: 5,
}
workerReconcileOpts = &worker.Options{
DeployCRDs: true,
}
workerCtrlOptsUnprefixed = controllercmd.NewOptionAggregator(workerCtrlOpts, workerReconcileOpts)
// options for the webhook server
webhookServerOptions = &webhookcmd.ServerOptions{
Namespace: os.Getenv("WEBHOOK_CONFIG_NAMESPACE"),
}
controllerSwitches = tencentcmd.ControllerSwitchOptions()
webhookSwitches = tencentcmd.WebhookSwitchOptions()
webhookOptions = webhookcmd.NewAddToManagerOptions(tencent.Name, webhookServerOptions, webhookSwitches)
aggOption = controllercmd.NewOptionAggregator(
restOpts,
mgrOpts,
controllercmd.PrefixOption("backupbucket-", backupBucketCtrlOpts),
controllercmd.PrefixOption("backupentry-", backupEntryCtrlOpts),
controllercmd.PrefixOption("controlplane-", controlPlaneCtrlOpts),
controllercmd.PrefixOption("infrastructure-", infraCtrlOpts),
controllercmd.PrefixOption("worker-", &workerCtrlOptsUnprefixed),
controllercmd.PrefixOption("healthcheck-", healthCheckCtrlOpts),
configFileOpts,
controllerSwitches,
reconcileOpts,
webhookOptions,
)
)
cmd := &cobra.Command{
Use: fmt.Sprintf("%s-controller-manager", tencent.Name),
Run: func(cmd *cobra.Command, args []string) {
if err := aggOption.Complete(); err != nil {
controllercmd.LogErrAndExit(err, "Error completing options")
}
util.ApplyClientConnectionConfigurationToRESTConfig(configFileOpts.Completed().Config.ClientConnection, restOpts.Completed().Config)
if workerReconcileOpts.Completed().DeployCRDs {
if err := worker.ApplyMachineResourcesForConfig(ctx, restOpts.Completed().Config); err != nil {
controllercmd.LogErrAndExit(err, "Error ensuring the machine CRDs")
}
}
mgr, err := manager.New(restOpts.Completed().Config, mgrOpts.Completed().Options())
if err != nil {
controllercmd.LogErrAndExit(err, "Could not instantiate manager")
}
scheme := mgr.GetScheme()
if err := controller.AddToScheme(scheme); err != nil {
controllercmd.LogErrAndExit(err, "Could not update manager scheme")
}
if err := tencentinstall.AddToScheme(scheme); err != nil {
controllercmd.LogErrAndExit(err, "Could not update manager scheme")
}
if err := druidv1alpha1.AddToScheme(scheme); err != nil {
controllercmd.LogErrAndExit(err, "Could not update manager scheme")
}
if err := machinev1alpha1.AddToScheme(scheme); err != nil {
controllercmd.LogErrAndExit(err, "Could not update manager scheme")
}
// add common meta types to schema for controller-runtime to use v1.ListOptions
metav1.AddToGroupVersion(scheme, machinev1alpha1.SchemeGroupVersion)
configFileOpts.Completed().ApplyMachineImageOwnerSecretRef(&tencentinfrastructure.DefaultAddOptions.MachineImageOwnerSecretRef)
configFileOpts.Completed().ApplyWhitelistedImageIDs(&tencentinfrastructure.DefaultAddOptions.WhitelistedImageIDs)
configFileOpts.Completed().ApplyETCDStorage(&tencentcontrolplaneexposure.DefaultAddOptions.ETCDStorage)
configFileOpts.Completed().ApplyKubeAPIServer(&tencentcontrolplaneexposure.DefaultAddOptions.KubeAPIServer)
configFileOpts.Completed().ApplyHealthCheckConfig(&healthcheck.DefaultAddOptions.HealthCheckConfig)
healthCheckCtrlOpts.Completed().Apply(&healthcheck.DefaultAddOptions.Controller)
backupBucketCtrlOpts.Completed().Apply(&tencentbackupbucket.DefaultAddOptions.Controller)
backupEntryCtrlOpts.Completed().Apply(&tencentbackupentry.DefaultAddOptions.Controller)
controlPlaneCtrlOpts.Completed().Apply(&tencentcontrolplane.DefaultAddOptions.Controller)
infraCtrlOpts.Completed().Apply(&tencentinfrastructure.DefaultAddOptions.Controller)
reconcileOpts.Completed().Apply(&tencentinfrastructure.DefaultAddOptions.IgnoreOperationAnnotation)
reconcileOpts.Completed().Apply(&tencentcontrolplane.DefaultAddOptions.IgnoreOperationAnnotation)
reconcileOpts.Completed().Apply(&tencentworker.DefaultAddOptions.IgnoreOperationAnnotation)
workerCtrlOpts.Completed().Apply(&tencentworker.DefaultAddOptions.Controller)
_, shootWebhooks, err := webhookOptions.Completed().AddToManager(mgr)
if err != nil {
controllercmd.LogErrAndExit(err, "Could not add webhooks to manager")
}
tencentcontrolplane.DefaultAddOptions.ShootWebhooks = shootWebhooks
// Update shoot webhook configuration in case the webhook server port has changed.
c, err := client.New(restOpts.Completed().Config, client.Options{})
if err != nil {
controllercmd.LogErrAndExit(err, "Error creating client for startup tasks")
}
if err := genericcontrolplaneactuator.ReconcileShootWebhooksForAllNamespaces(ctx, c, tencent.Name, tencent.Type, mgr.GetWebhookServer().Port, shootWebhooks); err != nil {
controllercmd.LogErrAndExit(err, "Error ensuring shoot webhooks in all namespaces")
}
if err := controllerSwitches.Completed().AddToManager(mgr); err != nil {
controllercmd.LogErrAndExit(err, "Could not add controllers to manager")
}
if err := mgr.Start(ctx.Done()); err != nil {
controllercmd.LogErrAndExit(err, "Error running manager")
}
},
}
aggOption.AddFlags(cmd.Flags())
return cmd
}
| [
"\"LEADER_ELECTION_NAMESPACE\"",
"\"WEBHOOK_CONFIG_NAMESPACE\""
]
| []
| [
"LEADER_ELECTION_NAMESPACE",
"WEBHOOK_CONFIG_NAMESPACE"
]
| [] | ["LEADER_ELECTION_NAMESPACE", "WEBHOOK_CONFIG_NAMESPACE"] | go | 2 | 0 | |
pkg/loadbalancer/config/config.go | /*
* Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package config
import (
"fmt"
"io"
"os"
"strconv"
"gopkg.in/gcfg.v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/klog"
)
const (
// SizeSmall is the NSX-T load balancer size small
SizeSmall = "SMALL"
// SizeMedium is the NSX-T load balancer size medium
SizeMedium = "MEDIUM"
// SizeLarge is the NSX-T load balancer size large
SizeLarge = "LARGE"
// DefaultMaxRetries is the default value for max retries
DefaultMaxRetries = 30
// DefaultRetryMinDelay is the default value for minimum retry delay
DefaultRetryMinDelay = 500
// DefaultRetryMaxDelay is the default value for maximum retry delay
DefaultRetryMaxDelay = 5000
// DefaultLoadBalancerClass is the default load balancer class
DefaultLoadBalancerClass = "default"
)
// LoadBalancerSizes contains the valid size names
var LoadBalancerSizes = sets.NewString(SizeSmall, SizeMedium, SizeLarge)
// LBConfig is used to read and store information from the cloud configuration file
type LBConfig struct {
LoadBalancer LoadBalancerConfig `gcfg:"LoadBalancer"`
LoadBalancerClasses map[string]*LoadBalancerClassConfig `gcfg:"LoadBalancerClass"`
NSXT NsxtConfig `gcfg:"NSX-T"`
AdditionalTags map[string]string `gcfg:"Tags"`
NSXTSimulation *NsxtSimulation `gcfg:"NSX-T-Simulation"`
}
// LoadBalancerConfig contains the configuration for the load balancer itself
type LoadBalancerConfig struct {
IPPoolName string `gcfg:"ipPoolName"`
IPPoolID string `gcfg:"ipPoolID"`
Size string `gcfg:"size"`
LBServiceID string `gcfg:"lbServiceId"`
LogicalRouterID string `gcfg:"logicalRouterId"`
}
// LoadBalancerClassConfig contains the configuration for a load balancer class
type LoadBalancerClassConfig struct {
IPPoolName string `gcfg:"ipPoolName"`
IPPoolID string `gcfg:"ipPoolID"`
}
// NsxtConfig contains the NSX-T specific configuration
type NsxtConfig struct {
// NSX-T username.
User string `gcfg:"user"`
// NSX-T password in clear text.
Password string `gcfg:"password"`
// NSX-T host.
Host string `gcfg:"host"`
// True if NSX-T uses self-signed cert.
InsecureFlag bool `gcfg:"insecure-flag"`
RemoteAuth bool `gcfg:"remote-auth"`
MaxRetries int `gcfg:"max-retries"`
RetryMinDelay int `gcfg:"retry-min-delay"`
RetryMaxDelay int `gcfg:"retry-max-delay"`
RetryOnStatusCodes []int `gcfg:"retry-on-status-codes"`
ClientAuthCertFile string `gcfg:"client-auth-cert-file"`
ClientAuthKeyFile string `gcfg:"client-auth-key-file"`
CAFile string `gcfg:"ca-file"`
}
// NsxtSimulation is a helper configuration to pass fake data for testing purposes
type NsxtSimulation struct {
SimulatedIPPools []string `gcfg:"simulatedIPPools"`
}
// IsEnabled checks whether the load balancer feature is enabled
// It is enabled if any flavor of the load balancer configuration is given.
func (cfg *LBConfig) IsEnabled() bool {
return len(cfg.LoadBalancerClasses) > 0 || !cfg.LoadBalancer.IsEmpty()
}
func (cfg *LBConfig) validateConfig() error {
if cfg.LoadBalancer.LBServiceID == "" && cfg.LoadBalancer.LogicalRouterID == "" {
msg := "load balancer servive id or logical router id required"
klog.Errorf(msg)
return fmt.Errorf(msg)
}
if !LoadBalancerSizes.Has(cfg.LoadBalancer.Size) {
msg := "load balancer size is invalid"
klog.Errorf(msg)
return fmt.Errorf(msg)
}
if cfg.LoadBalancer.IPPoolID == "" && cfg.LoadBalancer.IPPoolName == "" {
class, ok := cfg.LoadBalancerClasses[DefaultLoadBalancerClass]
if !ok {
msg := "no default load balancer class defined"
klog.Errorf(msg)
return fmt.Errorf(msg)
} else if class.IPPoolName == "" && class.IPPoolID == "" {
msg := "default load balancer class: ipPoolName and ipPoolID is empty"
klog.Errorf(msg)
return fmt.Errorf(msg)
}
} else {
if cfg.LoadBalancer.IPPoolName != "" && cfg.LoadBalancer.IPPoolID != "" {
msg := "either load balancer ipPoolName or ipPoolID can be set"
klog.Errorf(msg)
return fmt.Errorf(msg)
}
}
if cfg.NSXTSimulation == nil {
return cfg.NSXT.validateConfig()
}
return nil
}
// IsEmpty checks whether the load balancer config is empty (no values specified)
func (cfg *LoadBalancerConfig) IsEmpty() bool {
return cfg.Size == "" && cfg.LBServiceID == "" &&
cfg.IPPoolID == "" && cfg.IPPoolName == "" &&
cfg.LogicalRouterID == ""
}
func (cfg *NsxtConfig) validateConfig() error {
if cfg.User == "" {
msg := "user is empty"
klog.Errorf(msg)
return fmt.Errorf(msg)
}
if cfg.Password == "" {
msg := "password is empty"
klog.Errorf(msg)
return fmt.Errorf(msg)
}
if cfg.Host == "" {
msg := "host is empty"
klog.Errorf(msg)
return fmt.Errorf(msg)
}
return nil
}
// FromEnv initializes the provided configuratoin object with values
// obtained from environment variables. If an environment variable is set
// for a property that's already initialized, the environment variable's value
// takes precedence.
func (cfg *NsxtConfig) FromEnv() error {
if v := os.Getenv("NSXT_MANAGER_HOST"); v != "" {
cfg.Host = v
}
if v := os.Getenv("NSXT_USERNAME"); v != "" {
cfg.User = v
}
if v := os.Getenv("NSXT_PASSWORD"); v != "" {
cfg.Password = v
}
if v := os.Getenv("NSXT_ALLOW_UNVERIFIED_SSL"); v != "" {
InsecureFlag, err := strconv.ParseBool(v)
if err != nil {
klog.Errorf("Failed to parse NSXT_ALLOW_UNVERIFIED_SSL: %s", err)
return fmt.Errorf("Failed to parse NSXT_ALLOW_UNVERIFIED_SSL: %s", err)
}
cfg.InsecureFlag = InsecureFlag
}
if v := os.Getenv("NSXT_MAX_RETRIES"); v != "" {
n, err := strconv.Atoi(v)
if err != nil {
klog.Errorf("Failed to parse NSXT_MAX_RETRIES: %s", err)
return fmt.Errorf("Failed to parse NSXT_MAX_RETRIES: %s", err)
}
cfg.MaxRetries = n
}
if v := os.Getenv("NSXT_RETRY_MIN_DELAY"); v != "" {
n, err := strconv.Atoi(v)
if err != nil {
klog.Errorf("Failed to parse NSXT_RETRY_MIN_DELAY: %s", err)
return fmt.Errorf("Failed to parse NSXT_RETRY_MIN_DELAY: %s", err)
}
cfg.RetryMinDelay = n
}
if v := os.Getenv("NSXT_RETRY_MAX_DELAY"); v != "" {
n, err := strconv.Atoi(v)
if err != nil {
klog.Errorf("Failed to parse NSXT_RETRY_MAX_DELAY: %s", err)
return fmt.Errorf("Failed to parse NSXT_RETRY_MAX_DELAY: %s", err)
}
cfg.RetryMaxDelay = n
}
if v := os.Getenv("NSXT_REMOTE_AUTH"); v != "" {
remoteAuth, err := strconv.ParseBool(v)
if err != nil {
klog.Errorf("Failed to parse NSXT_REMOTE_AUTH: %s", err)
return fmt.Errorf("Failed to parse NSXT_REMOTE_AUTH: %s", err)
}
cfg.RemoteAuth = remoteAuth
}
if v := os.Getenv("NSXT_CLIENT_AUTH_CERT_FILE"); v != "" {
cfg.ClientAuthCertFile = v
}
if v := os.Getenv("NSXT_CLIENT_AUTH_KEY_FILE"); v != "" {
cfg.ClientAuthKeyFile = v
}
if v := os.Getenv("NSXT_CA_FILE"); v != "" {
cfg.CAFile = v
}
err := cfg.validateConfig()
if err != nil {
return err
}
return nil
}
// ReadConfig parses vSphere cloud config file and stores it into LBConfif.
// Environment variables are also checked
func ReadConfig(config io.Reader) (*LBConfig, error) {
if config == nil {
return nil, fmt.Errorf("no vSphere cloud provider config file given")
}
cfg := &LBConfig{}
if err := gcfg.FatalOnly(gcfg.ReadInto(cfg, config)); err != nil {
return nil, err
}
err := cfg.CompleteAndValidate()
if err != nil {
return nil, err
}
return cfg, nil
}
// CompleteAndValidate sets default values, overrides by env and validates the resulting config
func (cfg *LBConfig) CompleteAndValidate() error {
if !cfg.IsEnabled() {
return nil
}
if cfg.NSXT.MaxRetries == 0 {
cfg.NSXT.MaxRetries = DefaultMaxRetries
}
if cfg.NSXT.RetryMinDelay == 0 {
cfg.NSXT.RetryMinDelay = DefaultRetryMinDelay
}
if cfg.NSXT.RetryMaxDelay == 0 {
cfg.NSXT.RetryMaxDelay = DefaultRetryMaxDelay
}
if cfg.LoadBalancerClasses == nil {
cfg.LoadBalancerClasses = map[string]*LoadBalancerClassConfig{}
}
for _, class := range cfg.LoadBalancerClasses {
if class.IPPoolName == "" && class.IPPoolID == "" {
class.IPPoolID = cfg.LoadBalancer.IPPoolID
class.IPPoolName = cfg.LoadBalancer.IPPoolName
}
}
// Env Vars should override config file entries if present
if err := cfg.NSXT.FromEnv(); err != nil {
return err
}
return cfg.validateConfig()
}
| [
"\"NSXT_MANAGER_HOST\"",
"\"NSXT_USERNAME\"",
"\"NSXT_PASSWORD\"",
"\"NSXT_ALLOW_UNVERIFIED_SSL\"",
"\"NSXT_MAX_RETRIES\"",
"\"NSXT_RETRY_MIN_DELAY\"",
"\"NSXT_RETRY_MAX_DELAY\"",
"\"NSXT_REMOTE_AUTH\"",
"\"NSXT_CLIENT_AUTH_CERT_FILE\"",
"\"NSXT_CLIENT_AUTH_KEY_FILE\"",
"\"NSXT_CA_FILE\""
]
| []
| [
"NSXT_REMOTE_AUTH",
"NSXT_PASSWORD",
"NSXT_CLIENT_AUTH_KEY_FILE",
"NSXT_RETRY_MIN_DELAY",
"NSXT_ALLOW_UNVERIFIED_SSL",
"NSXT_MANAGER_HOST",
"NSXT_MAX_RETRIES",
"NSXT_RETRY_MAX_DELAY",
"NSXT_USERNAME",
"NSXT_CA_FILE",
"NSXT_CLIENT_AUTH_CERT_FILE"
]
| [] | ["NSXT_REMOTE_AUTH", "NSXT_PASSWORD", "NSXT_CLIENT_AUTH_KEY_FILE", "NSXT_RETRY_MIN_DELAY", "NSXT_ALLOW_UNVERIFIED_SSL", "NSXT_MANAGER_HOST", "NSXT_MAX_RETRIES", "NSXT_RETRY_MAX_DELAY", "NSXT_USERNAME", "NSXT_CA_FILE", "NSXT_CLIENT_AUTH_CERT_FILE"] | go | 11 | 0 | |
zendesk/zendesk.go | // Package zendesk provides a client for using the Zendesk Core API.
package zendesk
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"os"
"strconv"
"time"
)
// Client describes a client for the Zendesk Core API.
type Client interface {
WithHeader(name, value string) Client
AddUserTags(int64, []string) ([]string, error)
AutocompleteOrganizations(string) ([]Organization, error)
BatchUpdateManyTickets([]Ticket) error
BulkUpdateManyTickets([]int64, *Ticket) error
CreateIdentity(int64, *UserIdentity) (*UserIdentity, error)
CreateOrganization(*Organization) (*Organization, error)
CreateOrganizationMembership(*OrganizationMembership) (*OrganizationMembership, error)
CreateOrUpdateOrganization(*Organization) (*Organization, error)
CreateOrUpdateUser(*User) (*User, error)
CreateTicket(*Ticket) (*Ticket, error)
CreateUser(*User) (*User, error)
CreateGroup(*Group) (*Group, error)
DeleteIdentity(int64, int64) error
DeleteOrganization(int64) error
DeleteTicket(int64) error
DeleteUser(int64) (*User, error)
DeleteOrganizationMembershipByID(int64) error
DeleteGroup(int64) error
IncrementalUsers(startTime time.Time) ([]User, error)
ListIdentities(int64) ([]UserIdentity, error)
ListLocales() ([]Locale, error)
ListOrganizationMembershipsByUserID(id int64) ([]OrganizationMembership, error)
ListOrganizations(*ListOptions) ([]Organization, error)
ListOrganizationUsers(int64, *ListUsersOptions) ([]User, error)
ListOrganizationTickets(int64, *ListOptions, ...SideLoad) (*ListResponse, error)
ListExternalIDTickets(string, *ListOptions, ...SideLoad) (*ListResponse, error)
ListRequestedTickets(int64) ([]Ticket, error)
ListSchedules() ([]Schedule, error)
ListTickets(*ListOptions, ...SideLoad) (*ListResponse, error)
ListTicketAudits(int64, *ListOptions) (*ListResponse, error)
ListTicketComments(int64) ([]TicketComment, error)
ListTicketCommentsFull(int64, *ListOptions, ...SideLoad) (*ListResponse, error)
ListTicketCollaborators(int64) ([]User, error)
ListTicketFollowers(int64) ([]User, error)
ListTicketEmailCCs(int64) ([]User, error)
ListTicketFields() ([]TicketField, error)
ListTicketIncidents(int64) ([]Ticket, error)
ListUsers(*ListUsersOptions) ([]User, error)
ListGroups() ([]Group, error)
MakeIdentityPrimary(int64, int64) ([]UserIdentity, error)
PermanentlyDeleteTicket(int64) (*JobStatus, error)
PermanentlyDeleteUser(int64) (*User, error)
RedactCommentString(int64, int64, string) (*TicketComment, error)
SearchOrganizationsByExternalID(string) ([]Organization, error)
SearchTickets(string, *ListOptions, ...Filters) (*TicketSearchResults, error)
SearchUsers(string) ([]User, error)
SearchUserByExternalID(string) (*User, error)
ShowComplianceDeletionStatuses(int64) ([]ComplianceDeletionStatus, error)
ShowIdentity(int64, int64) (*UserIdentity, error)
ShowJobStatus(string) (*JobStatus, error)
ShowLocale(int64) (*Locale, error)
ShowLocaleByCode(string) (*Locale, error)
ShowManyOrganizations([]int64) ([]Organization, error)
ShowManyUsers([]int64) ([]User, error)
ShowManyUsersByExternalIDs([]string) ([]User, error)
ShowOrganization(int64) (*Organization, error)
ShowTicket(int64) (*Ticket, error)
ShowUser(int64) (*User, error)
UpdateIdentity(int64, int64, *UserIdentity) (*UserIdentity, error)
UpdateOrganization(int64, *Organization) (*Organization, error)
UpdateTicket(int64, *Ticket) (*Ticket, error)
UpdateUser(int64, *User) (*User, error)
UploadFile(string, *string, io.Reader) (*Upload, error)
UpdateGroup(int64, *Group) (*Group, error)
}
type client struct {
username string
password string
client *http.Client
baseURL *url.URL
userAgent string
headers map[string]string
}
type ClientOption func(*client)
func WithHTTPClient(httpClient *http.Client) ClientOption {
return func(c *client) {
c.client = httpClient
}
}
// NewEnvClient creates a new Client configured via environment variables.
//
// Three environment variables are required: ZENDESK_DOMAIN, ZENDESK_USERNAME and ZENDESK_PASSWORD
// they will provide parameters to the NewClient function
func NewEnvClient(opts ...ClientOption) (Client, error) {
domain := os.Getenv("ZENDESK_DOMAIN")
if domain == "" {
return nil, errors.New("ZENDESK_DOMAIN not found")
}
username := os.Getenv("ZENDESK_USERNAME")
if username == "" {
return nil, errors.New("ZENDESK_USERNAME not found")
}
password := os.Getenv("ZENDESK_PASSWORD")
if password == "" {
return nil, errors.New("ZENDESK_PASSWORD not found")
}
return NewClient(domain, username, password, opts...)
}
// NewClient creates a new Client.
//
// You can use either a user email/password combination or an API token.
// For the latter, append /token to the email and use the API token as a password
func NewClient(domain, username, password string, opts ...ClientOption) (Client, error) {
return NewURLClient(fmt.Sprintf("https://%s.zendesk.com", domain), username, password, opts...)
}
// NewURLClient is like NewClient but accepts an explicit end point instead of a Zendesk domain.
func NewURLClient(endpoint, username, password string, opts ...ClientOption) (Client, error) {
baseURL, err := url.Parse(endpoint)
if err != nil {
return nil, err
}
c := &client{
baseURL: baseURL,
userAgent: "Go-Zendesk",
username: username,
password: password,
client: http.DefaultClient,
headers: make(map[string]string),
}
for _, opt := range opts {
opt(c)
}
return c, nil
}
// WithHeader returns an updated client that sends the provided header
// with each subsequent request.
func (c *client) WithHeader(name, value string) Client {
newClient := *c
newClient.headers = make(map[string]string)
for k, v := range c.headers {
newClient.headers[k] = v
}
newClient.headers[name] = value
return &newClient
}
func (c *client) request(method, endpoint string, headers map[string]string, body io.Reader) (*http.Response, error) {
rel, err := url.Parse(endpoint)
if err != nil {
return nil, err
}
url := c.baseURL.ResolveReference(rel)
req, err := http.NewRequest(method, url.String(), body)
if err != nil {
return nil, err
}
req.SetBasicAuth(c.username, c.password)
req.Header.Set("User-Agent", c.userAgent)
for key, value := range c.headers {
req.Header.Set(key, value)
}
for key, value := range headers {
req.Header.Set(key, value)
}
return c.client.Do(req)
}
func (c *client) do(method, endpoint string, in, out interface{}) error {
payload, err := marshall(in)
if err != nil {
return err
}
headers := map[string]string{}
if in != nil {
headers["Content-Type"] = "application/json"
}
res, err := c.request(method, endpoint, headers, bytes.NewReader(payload))
if err != nil {
return err
}
defer res.Body.Close()
// Retry the request if the retry after header is present. This can happen when we are
// being rate limited or we failed with a retriable error.
if res.Header.Get("Retry-After") != "" {
after, err := strconv.ParseInt(res.Header.Get("Retry-After"), 10, 64)
if err != nil || after == 0 {
return unmarshall(res, out)
}
time.Sleep(time.Duration(after) * time.Second)
res, err = c.request(method, endpoint, headers, bytes.NewReader(payload))
if err != nil {
return err
}
defer res.Body.Close()
}
return unmarshall(res, out)
}
func (c *client) get(endpoint string, out interface{}) error {
return c.do("GET", endpoint, nil, out)
}
func (c *client) post(endpoint string, in, out interface{}) error {
return c.do("POST", endpoint, in, out)
}
func (c *client) put(endpoint string, in, out interface{}) error {
return c.do("PUT", endpoint, in, out)
}
func (c *client) delete(endpoint string, out interface{}) error {
return c.do("DELETE", endpoint, nil, out)
}
func marshall(in interface{}) ([]byte, error) {
if in == nil {
return nil, nil
}
return json.Marshal(in)
}
func unmarshall(res *http.Response, out interface{}) error {
if res.StatusCode < 200 || res.StatusCode >= 300 {
apierr := new(APIError)
apierr.Response = res
if err := json.NewDecoder(res.Body).Decode(apierr); err != nil {
apierr.Type = String("Unknown")
apierr.Description = String("Oops! Something went wrong when parsing the error response.")
}
return apierr
}
if out != nil {
return json.NewDecoder(res.Body).Decode(out)
}
return nil
}
// APIPayload represents the payload of an API call.
type APIPayload struct {
Attachment *Attachment `json:"attachment"`
Attachments []Attachment `json:"attachments"`
Audits []TicketAudit `json:"audits,omitempty"`
Comment *TicketComment `json:"comment,omitempty"`
Comments []TicketComment `json:"comments,omitempty"`
ComplianceDeletionStatuses []ComplianceDeletionStatus `json:"compliance_deletion_statuses,omitempty"`
Identity *UserIdentity `json:"identity,omitempty"`
Identities []UserIdentity `json:"identities,omitempty"`
JobStatus *JobStatus `json:"job_status,omitempty"`
Locale *Locale `json:"locale,omitempty"`
Locales []Locale `json:"locales,omitempty"`
Organization *Organization `json:"organization,omitempty"`
OrganizationMembership *OrganizationMembership `json:"organization_membership,omitempty"`
OrganizationMemberships []OrganizationMembership `json:"organization_memberships,omitempty"`
Organizations []Organization `json:"organizations,omitempty"`
Schedules []Schedule `json:"schedules,omitempty"`
Tags []string `json:"tags,omitempty"`
Ticket *Ticket `json:"ticket,omitempty"`
TicketField *TicketField `json:"ticket_field,omitempty"`
TicketFields []TicketField `json:"ticket_fields,omitempty"`
Tickets []Ticket `json:"tickets,omitempty"`
Upload *Upload `json:"upload,omitempty"`
User *User `json:"user,omitempty"`
Users []User `json:"users,omitempty"`
Group *Group `json:"group,omitempty"`
Groups []Group `json:"groups,omitempty"`
NextPage *string `json:"next_page,omitempty"`
PreviousPage *string `json:"previous_page,omitempty"`
Count *int64 `json:"count,omitempty"`
}
// TicketSearchResults represents returned results from the unified search api for type:ticket
type TicketSearchResults struct {
Results []Ticket `json:"results"`
NextPage *string `json:"next_page"`
PreviousPage *string `json:"previous_page"`
Count *int64 `json:"count"`
}
// APIError represents an error response returnted by the API.
type APIError struct {
Response *http.Response
Type *string `json:"error,omitmepty"`
Description *string `json:"description,omitempty"`
Details *map[string][]*APIErrorDetail `json:"details,omitempty"`
}
func (e *APIError) Error() string {
msg := fmt.Sprintf("%v %v: %d", e.Response.Request.Method, e.Response.Request.URL, e.Response.StatusCode)
if e.Type != nil {
msg = fmt.Sprintf("%s %v", msg, *e.Type)
}
if e.Description != nil {
msg = fmt.Sprintf("%s: %v", msg, *e.Description)
}
if e.Details != nil {
msg = fmt.Sprintf("%s: %+v", msg, *e.Details)
}
return msg
}
// APIErrorDetail represents a detail about an APIError.
type APIErrorDetail struct {
Type *string `json:"error,omitempty"`
Description *string `json:"description,omitempty"`
}
func (e *APIErrorDetail) Error() string {
msg := ""
if e.Type != nil {
msg = *e.Type + ": "
}
if e.Description != nil {
msg += *e.Description
}
return msg
}
// Bool is a helper function that returns a pointer to the bool value b.
func Bool(b bool) *bool {
p := b
return &p
}
// Int is a helper function that returns a pointer to the int value i.
func Int(i int64) *int64 {
p := i
return &p
}
// String is a helper function that returns a pointer to the string value s.
func String(s string) *string {
p := s
return &p
}
// ListResponse is a holder for the various returns from the list apis
type ListResponse struct {
Comments []TicketComment
Tickets []Ticket
Users []User
Groups []Group
Audits []TicketAudit
NextPage *string
PreviousPage *string
Count *int64
}
// ListOptions specifies the optional parameters for the list methods that support pagination.
//
// Zendesk Core API doscs: https://developer.zendesk.com/rest_api/docs/core/introduction#pagination
type ListOptions struct {
// Sets the page of results to retrieve.
Page int `url:"page,omitempty"`
// Sets the number of results to include per page.
PerPage int `url:"per_page,omitempty"`
// Sets the field to sort the retrieved results by.
SortBy string `url:"sort_by,omitempty"`
// Sets the sort order of the results. One of asc or desc.
SortOrder string `url:"sort_order,omitempty"`
}
// Side-Loading
//
// Zendesk Core API doscs: https://developer.zendesk.com/rest_api/docs/core/side_loading#side-loading
type SideLoadOptions struct {
Include []string
}
// Allows for side loads to be specified on api requests that support it
type SideLoad func(*SideLoadOptions)
// IncludeUsers will include a top level array of users
func IncludeUsers() SideLoad {
return func(c *SideLoadOptions) {
c.Include = append(c.Include, "users")
}
}
// IncludeGroups will include a top level array of groups
func IncludeGroups() SideLoad {
return func(c *SideLoadOptions) {
c.Include = append(c.Include, "groups")
}
}
// IncludeCommentCount will include a top level array of groups
func IncludeCommentCount() SideLoad {
return func(c *SideLoadOptions) {
c.Include = append(c.Include, "comment_count")
}
}
| [
"\"ZENDESK_DOMAIN\"",
"\"ZENDESK_USERNAME\"",
"\"ZENDESK_PASSWORD\""
]
| []
| [
"ZENDESK_PASSWORD",
"ZENDESK_DOMAIN",
"ZENDESK_USERNAME"
]
| [] | ["ZENDESK_PASSWORD", "ZENDESK_DOMAIN", "ZENDESK_USERNAME"] | go | 3 | 0 | |
Godeps/_workspace/src/github.com/jinzhu/gorm/main_test.go | package gorm_test
import (
"database/sql"
"database/sql/driver"
"fmt"
"strconv"
_ "github.com/denisenkom/go-mssqldb"
testdb "github.com/erikstmartin/go-testdb"
_ "github.com/go-sql-driver/mysql"
"github.com/jinzhu/gorm"
"github.com/jinzhu/now"
_ "github.com/lib/pq"
_ "github.com/mattn/go-sqlite3"
"os"
"testing"
"time"
)
var (
DB gorm.DB
t1, t2, t3, t4, t5 time.Time
)
func init() {
var err error
switch os.Getenv("GORM_DIALECT") {
case "mysql":
// CREATE USER 'gorm'@'localhost' IDENTIFIED BY 'gorm';
// CREATE DATABASE gorm;
// GRANT ALL ON gorm.* TO 'gorm'@'localhost';
fmt.Println("testing mysql...")
DB, err = gorm.Open("mysql", "gorm:gorm@/gorm?charset=utf8&parseTime=True")
case "postgres":
fmt.Println("testing postgres...")
DB, err = gorm.Open("postgres", "user=gorm DB.name=gorm sslmode=disable")
case "mssql":
fmt.Println("testing mssql...")
DB, err = gorm.Open("mssql", "server=SERVER_HERE;database=rogue;user id=USER_HERE;password=PW_HERE;port=1433")
default:
fmt.Println("testing sqlite3...")
DB, err = gorm.Open("sqlite3", "/tmp/gorm.db")
}
// DB.SetLogger(Logger{log.New(os.Stdout, "\r\n", 0)})
// DB.SetLogger(log.New(os.Stdout, "\r\n", 0))
DB.LogMode(true)
DB.LogMode(false)
if err != nil {
panic(fmt.Sprintf("No error should happen when connect database, but got %+v", err))
}
DB.DB().SetMaxIdleConns(10)
runMigration()
}
func TestExceptionsWithInvalidSql(t *testing.T) {
var columns []string
if DB.Where("sdsd.zaaa = ?", "sd;;;aa").Pluck("aaa", &columns).Error == nil {
t.Errorf("Should got error with invalid SQL")
}
if DB.Model(&User{}).Where("sdsd.zaaa = ?", "sd;;;aa").Pluck("aaa", &columns).Error == nil {
t.Errorf("Should got error with invalid SQL")
}
if DB.Where("sdsd.zaaa = ?", "sd;;;aa").Find(&User{}).Error == nil {
t.Errorf("Should got error with invalid SQL")
}
var count1, count2 int64
DB.Model(&User{}).Count(&count1)
if count1 <= 0 {
t.Errorf("Should find some users")
}
if DB.Where("name = ?", "jinzhu; delete * from users").First(&User{}).Error == nil {
t.Errorf("Should got error with invalid SQL")
}
DB.Model(&User{}).Count(&count2)
if count1 != count2 {
t.Errorf("No user should not be deleted by invalid SQL")
}
}
func TestSetTable(t *testing.T) {
DB.Create(getPreparedUser("pluck_user1", "pluck_user"))
DB.Create(getPreparedUser("pluck_user2", "pluck_user"))
DB.Create(getPreparedUser("pluck_user3", "pluck_user"))
if err := DB.Table("users").Where("role = ?", "pluck_user").Pluck("age", &[]int{}).Error; err != nil {
t.Errorf("No errors should happen if set table for pluck", err.Error())
}
var users []User
if DB.Table("users").Find(&[]User{}).Error != nil {
t.Errorf("No errors should happen if set table for find")
}
if DB.Table("invalid_table").Find(&users).Error == nil {
t.Errorf("Should got error when table is set to an invalid table")
}
DB.Exec("drop table deleted_users;")
if DB.Table("deleted_users").CreateTable(&User{}).Error != nil {
t.Errorf("Create table with specified table")
}
DB.Table("deleted_users").Save(&User{Name: "DeletedUser"})
var deletedUsers []User
DB.Table("deleted_users").Find(&deletedUsers)
if len(deletedUsers) != 1 {
t.Errorf("Query from specified table")
}
DB.Save(getPreparedUser("normal_user", "reset_table"))
DB.Table("deleted_users").Save(getPreparedUser("deleted_user", "reset_table"))
var user1, user2, user3 User
DB.Where("role = ?", "reset_table").First(&user1).Table("deleted_users").First(&user2).Table("").First(&user3)
if (user1.Name != "normal_user") || (user2.Name != "deleted_user") || (user3.Name != "normal_user") {
t.Errorf("unset specified table with blank string")
}
}
type Order struct {
}
type Cart struct {
}
func (c Cart) TableName() string {
return "shopping_cart"
}
func TestHasTable(t *testing.T) {
type Foo struct {
Id int
Stuff string
}
DB.DropTable(&Foo{})
if ok := DB.HasTable(&Foo{}); ok {
t.Errorf("Table should not exist, but does")
}
if err := DB.CreateTable(&Foo{}).Error; err != nil {
t.Errorf("Table should be created")
}
if ok := DB.HasTable(&Foo{}); !ok {
t.Errorf("Table should exist, but HasTable informs it does not")
}
}
func TestTableName(t *testing.T) {
DB := DB.Model("")
if DB.NewScope(Order{}).TableName() != "orders" {
t.Errorf("Order's table name should be orders")
}
if DB.NewScope(&Order{}).TableName() != "orders" {
t.Errorf("&Order's table name should be orders")
}
if DB.NewScope([]Order{}).TableName() != "orders" {
t.Errorf("[]Order's table name should be orders")
}
if DB.NewScope(&[]Order{}).TableName() != "orders" {
t.Errorf("&[]Order's table name should be orders")
}
DB.SingularTable(true)
if DB.NewScope(Order{}).TableName() != "order" {
t.Errorf("Order's singular table name should be order")
}
if DB.NewScope(&Order{}).TableName() != "order" {
t.Errorf("&Order's singular table name should be order")
}
if DB.NewScope([]Order{}).TableName() != "order" {
t.Errorf("[]Order's singular table name should be order")
}
if DB.NewScope(&[]Order{}).TableName() != "order" {
t.Errorf("&[]Order's singular table name should be order")
}
if DB.NewScope(&Cart{}).TableName() != "shopping_cart" {
t.Errorf("&Cart's singular table name should be shopping_cart")
}
if DB.NewScope(Cart{}).TableName() != "shopping_cart" {
t.Errorf("Cart's singular table name should be shopping_cart")
}
if DB.NewScope(&[]Cart{}).TableName() != "shopping_cart" {
t.Errorf("&[]Cart's singular table name should be shopping_cart")
}
if DB.NewScope([]Cart{}).TableName() != "shopping_cart" {
t.Errorf("[]Cart's singular table name should be shopping_cart")
}
DB.SingularTable(false)
}
func TestSqlNullValue(t *testing.T) {
DB.DropTable(&NullValue{})
DB.AutoMigrate(&NullValue{})
if err := DB.Save(&NullValue{Name: sql.NullString{String: "hello", Valid: true},
Age: sql.NullInt64{Int64: 18, Valid: true},
Male: sql.NullBool{Bool: true, Valid: true},
Height: sql.NullFloat64{Float64: 100.11, Valid: true},
AddedAt: NullTime{Time: time.Now(), Valid: true},
}).Error; err != nil {
t.Errorf("Not error should raise when test null value")
}
var nv NullValue
DB.First(&nv, "name = ?", "hello")
if nv.Name.String != "hello" || nv.Age.Int64 != 18 || nv.Male.Bool != true || nv.Height.Float64 != 100.11 || nv.AddedAt.Valid != true {
t.Errorf("Should be able to fetch null value")
}
if err := DB.Save(&NullValue{Name: sql.NullString{String: "hello-2", Valid: true},
Age: sql.NullInt64{Int64: 18, Valid: false},
Male: sql.NullBool{Bool: true, Valid: true},
Height: sql.NullFloat64{Float64: 100.11, Valid: true},
AddedAt: NullTime{Time: time.Now(), Valid: false},
}).Error; err != nil {
t.Errorf("Not error should raise when test null value")
}
var nv2 NullValue
DB.First(&nv2, "name = ?", "hello-2")
if nv2.Name.String != "hello-2" || nv2.Age.Int64 != 0 || nv2.Male.Bool != true || nv2.Height.Float64 != 100.11 || nv2.AddedAt.Valid != false {
t.Errorf("Should be able to fetch null value")
}
if err := DB.Save(&NullValue{Name: sql.NullString{String: "hello-3", Valid: false},
Age: sql.NullInt64{Int64: 18, Valid: false},
Male: sql.NullBool{Bool: true, Valid: true},
Height: sql.NullFloat64{Float64: 100.11, Valid: true},
AddedAt: NullTime{Time: time.Now(), Valid: false},
}).Error; err == nil {
t.Errorf("Can't save because of name can't be null")
}
}
func TestTransaction(t *testing.T) {
tx := DB.Begin()
u := User{Name: "transcation"}
if err := tx.Save(&u).Error; err != nil {
t.Errorf("No error should raise")
}
if err := tx.First(&User{}, "name = ?", "transcation").Error; err != nil {
t.Errorf("Should find saved record")
}
if sqlTx, ok := tx.CommonDB().(*sql.Tx); !ok || sqlTx == nil {
t.Errorf("Should return the underlying sql.Tx")
}
tx.Rollback()
if err := tx.First(&User{}, "name = ?", "transcation").Error; err == nil {
t.Errorf("Should not find record after rollback")
}
tx2 := DB.Begin()
u2 := User{Name: "transcation-2"}
if err := tx2.Save(&u2).Error; err != nil {
t.Errorf("No error should raise")
}
if err := tx2.First(&User{}, "name = ?", "transcation-2").Error; err != nil {
t.Errorf("Should find saved record")
}
tx2.Commit()
if err := DB.First(&User{}, "name = ?", "transcation-2").Error; err != nil {
t.Errorf("Should be able to find committed record")
}
}
func TestRow(t *testing.T) {
user1 := User{Name: "RowUser1", Age: 1, Birthday: now.MustParse("2000-1-1")}
user2 := User{Name: "RowUser2", Age: 10, Birthday: now.MustParse("2010-1-1")}
user3 := User{Name: "RowUser3", Age: 20, Birthday: now.MustParse("2020-1-1")}
DB.Save(&user1).Save(&user2).Save(&user3)
row := DB.Table("users").Where("name = ?", user2.Name).Select("age").Row()
var age int64
row.Scan(&age)
if age != 10 {
t.Errorf("Scan with Row")
}
}
func TestRows(t *testing.T) {
user1 := User{Name: "RowsUser1", Age: 1, Birthday: now.MustParse("2000-1-1")}
user2 := User{Name: "RowsUser2", Age: 10, Birthday: now.MustParse("2010-1-1")}
user3 := User{Name: "RowsUser3", Age: 20, Birthday: now.MustParse("2020-1-1")}
DB.Save(&user1).Save(&user2).Save(&user3)
rows, err := DB.Table("users").Where("name = ? or name = ?", user2.Name, user3.Name).Select("name, age").Rows()
if err != nil {
t.Errorf("Not error should happen, but got")
}
count := 0
for rows.Next() {
var name string
var age int64
rows.Scan(&name, &age)
count++
}
if count != 2 {
t.Errorf("Should found two records with name 3")
}
}
func TestScan(t *testing.T) {
user1 := User{Name: "ScanUser1", Age: 1, Birthday: now.MustParse("2000-1-1")}
user2 := User{Name: "ScanUser2", Age: 10, Birthday: now.MustParse("2010-1-1")}
user3 := User{Name: "ScanUser3", Age: 20, Birthday: now.MustParse("2020-1-1")}
DB.Save(&user1).Save(&user2).Save(&user3)
type result struct {
Name string
Age int
}
var res result
DB.Table("users").Select("name, age").Where("name = ?", user3.Name).Scan(&res)
if res.Name != user3.Name {
t.Errorf("Scan into struct should work")
}
var doubleAgeRes result
DB.Table("users").Select("age + age as age").Where("name = ?", user3.Name).Scan(&doubleAgeRes)
if doubleAgeRes.Age != res.Age*2 {
t.Errorf("Scan double age as age")
}
var ress []result
DB.Table("users").Select("name, age").Where("name in (?)", []string{user2.Name, user3.Name}).Scan(&ress)
if len(ress) != 2 || ress[0].Name != user2.Name || ress[1].Name != user3.Name {
t.Errorf("Scan into struct map")
}
}
func TestRaw(t *testing.T) {
user1 := User{Name: "ExecRawSqlUser1", Age: 1, Birthday: now.MustParse("2000-1-1")}
user2 := User{Name: "ExecRawSqlUser2", Age: 10, Birthday: now.MustParse("2010-1-1")}
user3 := User{Name: "ExecRawSqlUser3", Age: 20, Birthday: now.MustParse("2020-1-1")}
DB.Save(&user1).Save(&user2).Save(&user3)
type result struct {
Name string
Email string
}
var ress []result
DB.Raw("SELECT name, age FROM users WHERE name = ? or name = ?", user2.Name, user3.Name).Scan(&ress)
if len(ress) != 2 || ress[0].Name != user2.Name || ress[1].Name != user3.Name {
t.Errorf("Raw with scan")
}
rows, _ := DB.Raw("select name, age from users where name = ?", user3.Name).Rows()
count := 0
for rows.Next() {
count++
}
if count != 1 {
t.Errorf("Raw with Rows should find one record with name 3")
}
DB.Exec("update users set name=? where name in (?)", "jinzhu", []string{user1.Name, user2.Name, user3.Name})
if DB.Where("name in (?)", []string{user1.Name, user2.Name, user3.Name}).First(&User{}).Error != gorm.RecordNotFound {
t.Error("Raw sql to update records")
}
}
func TestGroup(t *testing.T) {
rows, err := DB.Select("name").Table("users").Group("name").Rows()
if err == nil {
defer rows.Close()
for rows.Next() {
var name string
rows.Scan(&name)
}
} else {
t.Errorf("Should not raise any error")
}
}
func TestJoins(t *testing.T) {
type result struct {
Name string
Email string
}
user := User{
Name: "joins",
Emails: []Email{{Email: "[email protected]"}, {Email: "[email protected]"}},
}
DB.Save(&user)
var results []result
DB.Table("users").Select("name, email").Joins("left join emails on emails.user_id = users.id").Where("name = ?", "joins").Scan(&results)
if len(results) != 2 || results[0].Email != "[email protected]" || results[1].Email != "[email protected]" {
t.Errorf("Should find all two emails with Join")
}
}
func TestHaving(t *testing.T) {
rows, err := DB.Select("name, count(*) as total").Table("users").Group("name").Having("name IN (?)", []string{"2", "3"}).Rows()
if err == nil {
defer rows.Close()
for rows.Next() {
var name string
var total int64
rows.Scan(&name, &total)
if name == "2" && total != 1 {
t.Errorf("Should have one user having name 2")
}
if name == "3" && total != 2 {
t.Errorf("Should have two users having name 3")
}
}
} else {
t.Errorf("Should not raise any error")
}
}
func TestTimeWithZone(t *testing.T) {
var format = "2006-01-02 15:04:05 -0700"
var times []time.Time
GMT8, _ := time.LoadLocation("Asia/Shanghai")
times = append(times, time.Date(2013, 02, 19, 1, 51, 49, 123456789, GMT8))
times = append(times, time.Date(2013, 02, 18, 17, 51, 49, 123456789, time.UTC))
for index, vtime := range times {
name := "time_with_zone_" + strconv.Itoa(index)
user := User{Name: name, Birthday: vtime}
// TODO mssql does not support time zones
if dialect := os.Getenv("GORM_DIALECT"); dialect == "mssql" {
user.Birthday = vtime.UTC()
}
DB.Save(&user)
if user.Birthday.UTC().Format(format) != "2013-02-18 17:51:49 +0000" {
t.Errorf("User's birthday should not be changed after save")
}
var findUser, findUser2, findUser3 User
DB.First(&findUser, "name = ?", name)
if findUser.Birthday.UTC().Format(format) != "2013-02-18 17:51:49 +0000" {
t.Errorf("User's birthday should not be changed after find")
}
if DB.Where("id = ? AND birthday >= ?", findUser.Id, vtime.Add(-time.Minute)).First(&findUser2).RecordNotFound() {
t.Errorf("User should be found")
}
if !DB.Where("id = ? AND birthday >= ?", findUser.Id, vtime.Add(time.Minute)).First(&findUser3).RecordNotFound() {
t.Errorf("User should not be found")
}
}
}
func TestHstore(t *testing.T) {
type Details struct {
Id int64
Bulk gorm.Hstore
}
if dialect := os.Getenv("GORM_DIALECT"); dialect != "postgres" {
t.Skip()
}
if err := DB.Exec("CREATE EXTENSION IF NOT EXISTS hstore").Error; err != nil {
fmt.Println("\033[31mHINT: Must be superuser to create hstore extension (ALTER USER gorm WITH SUPERUSER;)\033[0m")
panic(fmt.Sprintf("No error should happen when create hstore extension, but got %+v", err))
}
DB.Exec("drop table details")
if err := DB.CreateTable(&Details{}).Error; err != nil {
panic(fmt.Sprintf("No error should happen when create table, but got %+v", err))
}
bankAccountId, phoneNumber, opinion := "123456", "14151321232", "sharkbait"
bulk := map[string]*string{
"bankAccountId": &bankAccountId,
"phoneNumber": &phoneNumber,
"opinion": &opinion,
}
d := Details{Bulk: bulk}
DB.Save(&d)
var d2 Details
if err := DB.First(&d2).Error; err != nil {
t.Errorf("Got error when tried to fetch details: %+v", err)
}
for k := range bulk {
if r, ok := d2.Bulk[k]; ok {
if res, _ := bulk[k]; *res != *r {
t.Errorf("Details should be equal")
}
} else {
t.Errorf("Details should be existed")
}
}
}
func TestSetAndGet(t *testing.T) {
if value, ok := DB.Set("hello", "world").Get("hello"); !ok {
t.Errorf("Should be able to get setting after set")
} else {
if value.(string) != "world" {
t.Errorf("Setted value should not be changed")
}
}
if _, ok := DB.Get("non_existing"); ok {
t.Errorf("Get non existing key should return error")
}
}
func TestCompatibilityMode(t *testing.T) {
DB, _ := gorm.Open("testdb", "")
testdb.SetQueryFunc(func(query string) (driver.Rows, error) {
columns := []string{"id", "name", "age"}
result := `
1,Tim,20
2,Joe,25
3,Bob,30
`
return testdb.RowsFromCSVString(columns, result), nil
})
var users []User
DB.Find(&users)
if (users[0].Name != "Tim") || len(users) != 3 {
t.Errorf("Unexcepted result returned")
}
}
func TestOpenExistingDB(t *testing.T) {
DB.Save(&User{Name: "jnfeinstein"})
dialect := os.Getenv("GORM_DIALECT")
db, err := gorm.Open(dialect, DB.DB())
if err != nil {
t.Errorf("Should have wrapped the existing DB connection")
}
var user User
if db.Where("name = ?", "jnfeinstein").First(&user).Error == gorm.RecordNotFound {
t.Errorf("Should have found existing record")
}
}
func BenchmarkGorm(b *testing.B) {
b.N = 2000
for x := 0; x < b.N; x++ {
e := strconv.Itoa(x) + "[email protected]"
email := BigEmail{Email: e, UserAgent: "pc", RegisteredAt: time.Now()}
// Insert
DB.Save(&email)
// Query
DB.First(&BigEmail{}, "email = ?", e)
// Update
DB.Model(&email).UpdateColumn("email", "new-"+e)
// Delete
DB.Delete(&email)
}
}
func BenchmarkRawSql(b *testing.B) {
DB, _ := sql.Open("postgres", "user=gorm DB.ame=gorm sslmode=disable")
DB.SetMaxIdleConns(10)
insertSql := "INSERT INTO emails (user_id,email,user_agent,registered_at,created_at,updated_at) VALUES ($1,$2,$3,$4,$5,$6) RETURNING id"
querySql := "SELECT * FROM emails WHERE email = $1 ORDER BY id LIMIT 1"
updateSql := "UPDATE emails SET email = $1, updated_at = $2 WHERE id = $3"
deleteSql := "DELETE FROM orders WHERE id = $1"
b.N = 2000
for x := 0; x < b.N; x++ {
var id int64
e := strconv.Itoa(x) + "[email protected]"
email := BigEmail{Email: e, UserAgent: "pc", RegisteredAt: time.Now()}
// Insert
DB.QueryRow(insertSql, email.UserId, email.Email, email.UserAgent, email.RegisteredAt, time.Now(), time.Now()).Scan(&id)
// Query
rows, _ := DB.Query(querySql, email.Email)
rows.Close()
// Update
DB.Exec(updateSql, "new-"+e, time.Now(), id)
// Delete
DB.Exec(deleteSql, id)
}
}
| [
"\"GORM_DIALECT\"",
"\"GORM_DIALECT\"",
"\"GORM_DIALECT\"",
"\"GORM_DIALECT\""
]
| []
| [
"GORM_DIALECT"
]
| [] | ["GORM_DIALECT"] | go | 1 | 0 | |
main.go | package main
import (
"fmt"
"log"
"os"
)
// main function, where it all starts
func main() {
//var s = "bla"
var b = os.Getenv("PROCESSOR_IDENTIFIER")
fmt.Println("GO test>> ", b, "PID=", os.Getpid(), "xx")
log.Println("blue and red")
type User struct {
name string
email string
adminInt bool
}
david := User{name: "David James", adminInt: true}
log.Println("user is", david)
}
| [
"\"PROCESSOR_IDENTIFIER\""
]
| []
| [
"PROCESSOR_IDENTIFIER"
]
| [] | ["PROCESSOR_IDENTIFIER"] | go | 1 | 0 | |
goinsta.go | package goinsta
import (
"crypto/tls"
"encoding/json"
"io"
"io/ioutil"
"net/http"
"net/http/cookiejar"
neturl "net/url"
"os"
"path/filepath"
"strconv"
"time"
)
// Instagram represent the main API handler
//
// Profiles: Represents instragram's user profile.
// Account: Represents instagram's personal account.
// Search: Represents instagram's search.
// Timeline: Represents instagram's timeline.
// Activity: Represents instagram's user activity.
// Inbox: Represents instagram's messages.
//
// See Scheme section in README.md for more information.
//
// We recommend to use Export and Import functions after first Login.
//
// Also you can use SetProxy and UnsetProxy to set and unset proxy.
// Golang also provides the option to set a proxy using HTTP_PROXY env var.
type Instagram struct {
user string
pass string
// device id: android-1923fjnma8123
dID string
// uuid: 8493-1233-4312312-5123
uuid string
// rankToken
rankToken string
// token
token string
// phone id
pid string
// ads id
adid string
// Instagram objects
// Profiles is the user interaction
Profiles *Profiles
// Account stores all personal data of the user and his/her options.
Account *Account
// Search performs searching of multiple things (users, locations...)
Search *Search
// Timeline allows to receive timeline media.
Timeline *Timeline
// Activity are instagram notifications.
Activity *Activity
// Inbox are instagram message/chat system.
Inbox *Inbox
// Feed for search over feeds
Feed *Feed
// User contacts from mobile address book
Contacts *Contacts
c *http.Client
}
// SetDeviceID sets device id
func (inst *Instagram) SetDeviceID(id string) {
inst.dID = id
}
// SetUUID sets uuid
func (inst *Instagram) SetUUID(uuid string) {
inst.uuid = uuid
}
// SetPhoneID sets phone id
func (inst *Instagram) SetPhoneID(id string) {
inst.pid = id
}
// New creates Instagram structure
func New(username, password string) *Instagram {
// this call never returns error
jar, _ := cookiejar.New(nil)
inst := &Instagram{
user: username,
pass: password,
dID: generateDeviceID(
generateMD5Hash(username + password),
),
uuid: generateUUID(), // both uuid must be differents
pid: generateUUID(),
c: &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
},
Jar: jar,
},
}
inst.init()
return inst
}
func (inst *Instagram) init() {
inst.Profiles = newProfiles(inst)
inst.Activity = newActivity(inst)
inst.Timeline = newTimeline(inst)
inst.Search = newSearch(inst)
inst.Inbox = newInbox(inst)
inst.Feed = newFeed(inst)
inst.Contacts = newContacts(inst)
}
// SetProxy sets proxy for connection.
func (inst *Instagram) SetProxy(url string, insecure bool) error {
uri, err := neturl.Parse(url)
if err == nil {
inst.c.Transport = &http.Transport{
Proxy: http.ProxyURL(uri),
TLSClientConfig: &tls.Config{
InsecureSkipVerify: insecure,
},
}
}
return err
}
// SetHTTPTransport sets SOCKS5 proxy for connection.
func (inst *Instagram) SetHTTPTransport(transport *http.Transport) {
inst.c.Transport = transport
}
// UnsetProxy unsets proxy for connection.
func (inst *Instagram) UnsetProxy() {
inst.c.Transport = nil
}
// Save exports config to ~/.goinsta
func (inst *Instagram) Save() error {
home := os.Getenv("HOME")
if home == "" {
home = os.Getenv("home") // for plan9
}
return inst.Export(filepath.Join(home, ".goinsta"))
}
// Export exports *Instagram object options
func (inst *Instagram) Export(path string) error {
url, err := neturl.Parse(goInstaAPIUrl)
if err != nil {
return err
}
config := ConfigFile{
ID: inst.Account.ID,
User: inst.user,
DeviceID: inst.dID,
UUID: inst.uuid,
RankToken: inst.rankToken,
Token: inst.token,
PhoneID: inst.pid,
Cookies: inst.c.Jar.Cookies(url),
}
bytes, err := json.Marshal(config)
if err != nil {
return err
}
return ioutil.WriteFile(path, bytes, 0644)
}
// Export exports selected *Instagram object options to an io.Writer
func Export(inst *Instagram, writer io.Writer) error {
url, err := neturl.Parse(goInstaAPIUrl)
if err != nil {
return err
}
config := ConfigFile{
ID: inst.Account.ID,
User: inst.user,
DeviceID: inst.dID,
UUID: inst.uuid,
RankToken: inst.rankToken,
Token: inst.token,
PhoneID: inst.pid,
Cookies: inst.c.Jar.Cookies(url),
}
bytes, err := json.Marshal(config)
if err != nil {
return err
}
_, err = writer.Write(bytes)
return err
}
// ImportReader imports instagram configuration from io.Reader
//
// This function does not set proxy automatically. Use SetProxy after this call.
func ImportReader(r io.Reader) (*Instagram, error) {
url, err := neturl.Parse(goInstaAPIUrl)
if err != nil {
return nil, err
}
bytes, err := ioutil.ReadAll(r)
if err != nil {
return nil, err
}
config := ConfigFile{}
err = json.Unmarshal(bytes, &config)
if err != nil {
return nil, err
}
inst := &Instagram{
user: config.User,
dID: config.DeviceID,
uuid: config.UUID,
rankToken: config.RankToken,
token: config.Token,
pid: config.PhoneID,
c: &http.Client{
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
},
},
}
inst.c.Jar, err = cookiejar.New(nil)
if err != nil {
return inst, err
}
inst.c.Jar.SetCookies(url, config.Cookies)
inst.init()
inst.Account = &Account{inst: inst, ID: config.ID}
inst.Account.Sync()
return inst, nil
}
// Import imports instagram configuration
//
// This function does not set proxy automatically. Use SetProxy after this call.
func Import(path string) (*Instagram, error) {
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
return ImportReader(f)
}
func (inst *Instagram) readMsisdnHeader() error {
data, err := json.Marshal(
map[string]string{
"device_id": inst.uuid,
},
)
if err != nil {
return err
}
_, err = inst.sendRequest(
&reqOptions{
Endpoint: urlMsisdnHeader,
IsPost: true,
Connection: "keep-alive",
Query: generateSignature(b2s(data)),
},
)
return err
}
func (inst *Instagram) contactPrefill() error {
data, err := json.Marshal(
map[string]string{
"phone_id": inst.pid,
"_csrftoken": inst.token,
"usage": "prefill",
},
)
if err != nil {
return err
}
_, err = inst.sendRequest(
&reqOptions{
Endpoint: urlContactPrefill,
IsPost: true,
Connection: "keep-alive",
Query: generateSignature(b2s(data)),
},
)
return err
}
func (inst *Instagram) zrToken() error {
_, err := inst.sendRequest(
&reqOptions{
Endpoint: urlZrToken,
IsPost: false,
Connection: "keep-alive",
Query: map[string]string{
"device_id": inst.dID,
"token_hash": "",
"custom_device_id": inst.uuid,
"fetch_reason": "token_expired",
},
},
)
return err
}
func (inst *Instagram) sendAdID() error {
data, err := inst.prepareData(
map[string]interface{}{
"adid": inst.adid,
},
)
if err != nil {
return err
}
_, err = inst.sendRequest(
&reqOptions{
Endpoint: urlLogAttribution,
IsPost: true,
Connection: "keep-alive",
Query: generateSignature(data),
},
)
return err
}
// Login performs instagram login.
//
// Password will be deleted after login
func (inst *Instagram) Login() error {
err := inst.readMsisdnHeader()
if err != nil {
return err
}
err = inst.syncFeatures()
if err != nil {
return err
}
err = inst.zrToken()
if err != nil {
return err
}
err = inst.sendAdID()
if err != nil {
return err
}
err = inst.contactPrefill()
if err != nil {
return err
}
result, err := json.Marshal(
map[string]interface{}{
"guid": inst.uuid,
"login_attempt_count": 0,
"_csrftoken": inst.token,
"device_id": inst.dID,
"adid": inst.adid,
"phone_id": inst.pid,
"username": inst.user,
"password": inst.pass,
"google_tokens": "[]",
},
)
if err != nil {
return err
}
body, err := inst.sendRequest(
&reqOptions{
Endpoint: urlLogin,
Query: generateSignature(b2s(result)),
IsPost: true,
Login: true,
},
)
if err != nil {
return err
}
inst.pass = ""
// getting account data
res := accountResp{}
err = json.Unmarshal(body, &res)
if err != nil {
return err
}
inst.Account = &res.Account
inst.Account.inst = inst
inst.rankToken = strconv.FormatInt(inst.Account.ID, 10) + "_" + inst.uuid
inst.zrToken()
return err
}
// Logout closes current session
func (inst *Instagram) Logout() error {
_, err := inst.sendSimpleRequest(urlLogout)
inst.c.Jar = nil
inst.c = nil
return err
}
func (inst *Instagram) syncFeatures() error {
data, err := inst.prepareData(
map[string]interface{}{
"id": inst.uuid,
"experiments": goInstaExperiments,
},
)
if err != nil {
return err
}
_, err = inst.sendRequest(
&reqOptions{
Endpoint: urlQeSync,
Query: generateSignature(data),
IsPost: true,
Login: true,
},
)
return err
}
func (inst *Instagram) megaphoneLog() error {
data, err := inst.prepareData(
map[string]interface{}{
"id": inst.Account.ID,
"type": "feed_aysf",
"action": "seen",
"reason": "",
"device_id": inst.dID,
"uuid": generateMD5Hash(string(time.Now().Unix())),
},
)
if err != nil {
return err
}
_, err = inst.sendRequest(
&reqOptions{
Endpoint: urlMegaphoneLog,
Query: generateSignature(data),
IsPost: true,
Login: true,
},
)
return err
}
func (inst *Instagram) expose() error {
data, err := inst.prepareData(
map[string]interface{}{
"id": inst.Account.ID,
"experiment": "ig_android_profile_contextual_feed",
},
)
if err != nil {
return err
}
_, err = inst.sendRequest(
&reqOptions{
Endpoint: urlExpose,
Query: generateSignature(data),
IsPost: true,
},
)
return err
}
// GetMedia returns media specified by id.
//
// The argument can be int64 or string
//
// See example: examples/media/like.go
func (inst *Instagram) GetMedia(o interface{}) (*FeedMedia, error) {
media := &FeedMedia{
inst: inst,
NextID: o,
}
return media, media.Sync()
}
| [
"\"HOME\"",
"\"home\""
]
| []
| [
"home",
"HOME"
]
| [] | ["home", "HOME"] | go | 2 | 0 | |
hyperopt/tests/test_plotting.py | """
Verify that the plotting routines can at least run.
If environment variable HYPEROPT_SHOW is defined and true,
then the plots actually appear.
"""
from __future__ import print_function
from __future__ import absolute_import
import unittest
import os
try:
import matplotlib
matplotlib.use('svg') # -- prevents trying to connect to X server
except ImportError:
import nose
raise nose.SkipTest()
from hyperopt import Trials
import hyperopt.plotting
from hyperopt import rand, fmin
from .test_domains import many_dists
def get_do_show():
rval = int(os.getenv('HYPEROPT_SHOW', '0'))
print('do_show =', rval)
return rval
class TestPlotting(unittest.TestCase):
def setUp(self):
domain = self.domain = many_dists()
trials = self.trials = Trials()
fmin(lambda x: x,
space=domain.expr,
trials=trials,
algo=rand.suggest,
max_evals=200)
def test_plot_history(self):
hyperopt.plotting.main_plot_history(
self.trials,
do_show=get_do_show())
def test_plot_histogram(self):
hyperopt.plotting.main_plot_histogram(
self.trials,
do_show=get_do_show())
def test_plot_vars(self):
hyperopt.plotting.main_plot_vars(
self.trials,
self.domain)
| []
| []
| [
"HYPEROPT_SHOW"
]
| [] | ["HYPEROPT_SHOW"] | python | 1 | 0 | |
examples/DeepWisdom/Auto_NLP/deepWisdom/autnlp_config.py | # -*- encoding: utf-8 -*-
"""
@Time : 2019-08-17 10:49
@Author : alexanderwu
@Email : [email protected]
@Software: PyCharm
"""
import os
# config for final.
FT_GPU_ENV = "DEV" # DEV or PROD
IF_Sniffer = False # Ture or False
IF_Down_Pretrained_Mode = True
# common
MAX_SEQ_LEN = 64
bs = 16
MAX_VOCAB_SIZE = 30000
# fasttext keras
ngram_range = 1
max_features = 20000
# maxlen = 128
maxlen = 400
batch_size = 32
embedding_dims = 100
epochs = 100
EARLY_STOPPING_EPOCH = 5
# glove
MAX_SEQUENCE_LENGTH = 1000
MAX_NUM_WORDS = 30000
EMBEDDING_DIM = 100
VALIDATION_SPLIT = 0.2
# bert
WITH_TAIL = False
BERT_CHINESE_CHAR = True
USE_ROBERTA = True
mask_padding_with_zero = True
# 分词
USE_CPPJIEBA_PY = False #不用CPP版本.
ChZh_Wordseg_Method = "jieba_fast" # "cppjieba-py, jieba_fast"
# for autosampling
SVM_MAX_AUTOSAMPLE_NUM = 20000
FINETUNE_MAX_AUTOSAMPLE_NUM = 2500
Min_Sample_Num_Per_Label = 300
Total_Sample_Num_for_SVM = 30000
Lowbound_Fold_for_Binary = 2
# for finetune
FT_MAX_SEQ_LEN = 128
FT_TRAIN_BATCH_SIZE = 32
FT_EVAL_BATCH_SIZE = 128 # 512
# if FT_GPU_ENV == "DEV":
# os.environ["CUDA_VISIBLE_DEVICES"] = "0"
#
# else:
# os.environ["CUDA_VISIBLE_DEVICES"] = "0"
# for sniffer.
Test_Sniffer_Num = 50
class Config(object):
def __init__(self):
from keras.optimizers import Adam
# input configuration
self.level = 'word'
self.input_level = 'word'
self.word_max_len = 400
self.char_max_len = 200
self.max_len = {'word': self.word_max_len,
'char': self.char_max_len
}
self.han_max_sent = 10
self.word_embed_dim = 100
self.word_embed_type = 'glove'
self.word_embed_trainable = True
self.word_embeddings = None
# model structure configuration
self.exp_name = None
self.model_name = None
self.rnn_units = 300
self.dense_units = 512
# model training configuration
self.batch_size = 128
self.n_epoch = 50
self.learning_rate = 0.01
self.optimizer = Adam(self.learning_rate)
self.dropout = 0.5
self.l2_reg = 0.001
# output configuration
self.n_class = 3
# checkpoint configuration
self.checkpoint_dir = 'ckpt'
self.checkpoint_monitor = 'val_loss'
self.checkpoint_save_best_only = True
self.checkpoint_save_weights_only = True
self.checkpoint_save_weights_mode = 'max'
self.checkpoint_verbose = 1
# early_stopping configuration
self.early_stopping_monitor = 'val_loss'
self.early_stopping_mode = 'max'
self.early_stopping_patience = 5
self.early_stopping_verbose = 1
class AutoNLPPathConfig(object):
AutoNLP_Config_File_Dir = os.path.dirname(__file__)
AutoNLP_Src_Script_Dir = os.path.abspath(os.path.join(AutoNLP_Config_File_Dir, ".."))
AutoNLP_Pro_Dir = os.path.abspath(os.path.join(AutoNLP_Src_Script_Dir, "..")) # 向上要abspath, 向下不用.
AutoNLP_Pro_Log_Dir = os.path.join(AutoNLP_Pro_Dir, "logs")
AutoNLP_Model_Warehouse_Dir = os.path.join(AutoNLP_Pro_Dir, "models_warehouses")
autonlp_pro_log_dir = AutoNLPPathConfig.AutoNLP_Pro_Log_Dir
models_warehouses_dir = AutoNLPPathConfig.AutoNLP_Model_Warehouse_Dir
class AutoNlpLoggerConfig(object):
LOG_DIR = autonlp_pro_log_dir
AutoNLP_OFFLINE_LOGNAME = "autonlp_offline"
AutoNLP_ONLINE_LOGNAME = "autonlp_online"
AutoNLP_OFFLINE_LOGFILE = os.path.join(LOG_DIR, "autonlp_offline.log")
AutoNLP_ONLINE_LOGFILE = os.path.join(LOG_DIR, "autonlp_online.log")
# OFFLINE_LOG_LEVEL = logging.INFO
# ONLINE_LOG_LEVEL = logging.INFO
class AutoNlpDemoServiceConfig(object):
autonlp_demo_host = "0.0.0.0" # 顶层外部agent_server的host.
autonlp_demo_port = 38008 # 顶层外部agent_server的port.
# bot_agent_port = 8080# 顶层外部agent_server的port.
autonlp_demo_route_path = "autonlp_demo"
bot_agent_server_url = "http://{}:{}/{}".format(autonlp_demo_host, autonlp_demo_port, autonlp_demo_route_path)
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
cmd/plugin/purser.go | /*
* Copyright (c) 2018 VMware Inc. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"flag"
"fmt"
"os"
"strings"
log "github.com/Sirupsen/logrus"
"github.com/vmware/purser/pkg/client"
groups_client_v1 "github.com/vmware/purser/pkg/client/clientset/typed/groups/v1"
"github.com/vmware/purser/pkg/plugin"
"github.com/vmware/purser/pkg/utils"
)
const (
pluginVersion = "version v1.0.0"
)
var (
groupClient *groups_client_v1.GroupClient
// Variables used for cmd interface
kubeconfig string
info string
version string
description = fmt.Sprintf("Purser gives cost insights of kubernetes deployments.\n\n")
usage = fmt.Sprintf("Usage:\n kubectl plugin purser [options] <command> <args>\n\n")
supportedCmds = fmt.Sprintf("The supported commands are:\n get Get resource information.\n set Set resource information.\n\n")
optionHelp = fmt.Sprintf("\n --info Show more details about the plugin.")
optionKubeConfig = fmt.Sprintf("\n --kubeconfig Absolute path for the kube config file.")
optionVersion = fmt.Sprintf("\n --version Show plugin version.")
options = fmt.Sprintf("options:%s%s%s\n\n", optionHelp, optionKubeConfig, optionVersion)
kubecltOption = fmt.Sprintf("\nUse \"kubectl options\" for a list of global command-line options (applies to all commands).\n\n")
)
func init() {
flag.StringVar(&kubeconfig, "kubeconfig", os.Getenv("KUBECTL_PLUGINS_GLOBAL_FLAG_KUBECONFIG"), "path to Kubernetes config file")
flag.StringVar(&info, "info", os.Getenv("KUBECTL_PLUGINS_LOCAL_FLAG_INFO"), "Show help documentation")
flag.StringVar(&version, "version", os.Getenv("KUBECTL_PLUGINS_LOCAL_FLAG_VERSION"), "Show version number")
flag.Usage = func() {
_, err := fmt.Fprint(flag.CommandLine.Output(), description)
if err != nil {
log.Fatal(err)
}
_, err = fmt.Fprint(flag.CommandLine.Output(), usage)
if err != nil {
log.Fatal(err)
}
_, err = fmt.Fprint(flag.CommandLine.Output(), supportedCmds)
if err != nil {
log.Fatal(err)
}
_, err = fmt.Fprint(flag.CommandLine.Output(), options)
if err != nil {
log.Fatal(err)
}
_, err = fmt.Fprint(flag.CommandLine.Output(), "Example(s):\n\n")
if err != nil {
log.Fatal(err)
}
printHelp()
_, err = fmt.Fprint(flag.CommandLine.Output(), kubecltOption)
if err != nil {
log.Fatal(err)
}
}
if version != "" {
fmt.Println(pluginVersion)
os.Exit(0)
}
if info != "" {
flag.Usage()
os.Exit(0)
}
config, err := utils.GetKubeconfig(kubeconfig)
if err != nil {
log.Fatal(err)
}
plugin.ProvideClientSetInstance(utils.GetKubeclient(config))
client, clusterConfig := client.GetAPIExtensionClient(kubeconfig)
groupClient = groups_client_v1.NewGroupClient(client, clusterConfig)
}
func main() {
inputs := os.Args[2:] // index 1 is empty
if len(inputs) == 4 && inputs[0] == Get {
computeMetricInsight(inputs)
} else if len(inputs) == 2 {
computeStats(inputs)
} else {
printHelp()
}
}
func computeMetricInsight(inputs []string) {
switch inputs[1] {
case Cost:
computeCost(inputs)
case Resources:
fetchResource(inputs)
}
}
func computeCost(inputs []string) {
switch inputs[2] {
case Label:
plugin.GetPodsCostForLabel(inputs[3])
case Pod:
plugin.GetPodCost(inputs[3])
case Node:
plugin.GetAllNodesCost()
default:
printHelp()
}
}
func fetchResource(inputs []string) {
switch inputs[2] {
case Namespace:
group := plugin.GetGroupByName(groupClient, inputs[3])
if group != nil {
plugin.PrintGroup(group)
} else {
fmt.Printf("Group %s is not present\n", inputs[3])
}
case Label:
if !strings.Contains(inputs[3], "=") {
printHelp()
}
group := plugin.GetGroupByName(groupClient, createGroupNameFromLabel(inputs[3]))
if group != nil {
plugin.PrintGroup(group)
} else {
fmt.Printf("Group %s is not present\n", inputs[3])
}
case Group:
group := plugin.GetGroupByName(groupClient, inputs[3])
if group != nil {
plugin.PrintGroup(group)
} else {
fmt.Printf("No group with name: %s\n", inputs[3])
}
default:
printHelp()
}
}
func createGroupNameFromLabel(input string) string {
inp := strings.Split(input, "=")
key, val := inp[0], inp[1]
groupName := key + "." + val
if strings.Contains(groupName, "/") {
groupName = strings.Replace(groupName, "/", "-", -1)
}
return strings.ToLower(groupName)
}
func computeStats(inputs []string) {
switch inputs[0] {
case Get:
getStats(inputs)
case Set:
inputUserCosts(inputs)
default:
printHelp()
}
}
func getStats(inputs []string) {
switch inputs[1] {
case "summary":
plugin.GetClusterSummary()
case "savings":
plugin.GetSavings()
case "user-costs":
price := plugin.GetUserCosts()
fmt.Printf("cpu cost per CPU per hour:\t %f$\nmem cost per GB per hour:\t %f$\nstorage cost per GB per hour:\t %f$\n",
price.CPU,
price.Memory,
price.Storage)
default:
printHelp()
}
}
func inputUserCosts(inputs []string) {
if inputs[1] == "user-costs" {
fmt.Printf("Enter CPU cost per cpu per hour:\t ")
var cpuCostPerCPUPerHour string
_, err := fmt.Scan(&cpuCostPerCPUPerHour)
logError(err)
fmt.Printf("Enter Memory cost per GB per hour:\t ")
var memCostPerGBPerHour string
_, err = fmt.Scan(&memCostPerGBPerHour)
logError(err)
fmt.Printf("Enter Storage cost per GB per hour:\t ")
var storageCostPerGBPerHour string
_, err = fmt.Scan(&storageCostPerGBPerHour)
logError(err)
plugin.SaveUserCosts(cpuCostPerCPUPerHour, memCostPerGBPerHour, storageCostPerGBPerHour)
} else {
printHelp()
}
}
func printHelp() {
pluginExt := "kubectl --kubeconfig=<absolute path to config> plugin purser "
fmt.Println("Try one of the following commands...")
fmt.Println(pluginExt + "get summary")
fmt.Println(pluginExt + "get resources group <group-name>")
fmt.Println(pluginExt + "get cost label <key=val>")
fmt.Println(pluginExt + "get cost pod <pod name>")
fmt.Println(pluginExt + "get cost node all")
fmt.Println(pluginExt + "set user-costs")
fmt.Println(pluginExt + "get user-costs")
fmt.Println(pluginExt + "get savings")
}
func logError(err error) {
if err != nil {
log.Printf("failed to read user input %+v", err)
}
}
| [
"\"KUBECTL_PLUGINS_GLOBAL_FLAG_KUBECONFIG\"",
"\"KUBECTL_PLUGINS_LOCAL_FLAG_INFO\"",
"\"KUBECTL_PLUGINS_LOCAL_FLAG_VERSION\""
]
| []
| [
"KUBECTL_PLUGINS_LOCAL_FLAG_INFO",
"KUBECTL_PLUGINS_GLOBAL_FLAG_KUBECONFIG",
"KUBECTL_PLUGINS_LOCAL_FLAG_VERSION"
]
| [] | ["KUBECTL_PLUGINS_LOCAL_FLAG_INFO", "KUBECTL_PLUGINS_GLOBAL_FLAG_KUBECONFIG", "KUBECTL_PLUGINS_LOCAL_FLAG_VERSION"] | go | 3 | 0 | |
tools/amd_build/build_amd.py | #!/usr/bin/env python3
import os
import argparse
import sys
sys.path.append(os.path.realpath(os.path.join(
__file__,
os.path.pardir,
os.path.pardir,
os.path.pardir,
'torch',
'utils')))
from hipify import hipify_python # type: ignore[import]
parser = argparse.ArgumentParser(description='Top-level script for HIPifying, filling in most common parameters')
parser.add_argument(
'--out-of-place-only',
action='store_true',
help="Whether to only run hipify out-of-place on source files")
parser.add_argument(
'--project-directory',
type=str,
default='',
help="The root of the project.",
required=False)
parser.add_argument(
'--output-directory',
type=str,
default='',
help="The directory to store the hipified project",
required=False)
parser.add_argument(
'--extra-include-dir',
type=str,
default=[],
nargs='+',
help="The list of extra directories in caffe2 to hipify",
required=False)
args = parser.parse_args()
amd_build_dir = os.path.dirname(os.path.realpath(__file__))
proj_dir = os.path.join(os.path.dirname(os.path.dirname(amd_build_dir)))
if args.project_directory:
proj_dir = args.project_directory
out_dir = proj_dir
if args.output_directory:
out_dir = args.output_directory
includes = [
"caffe2/operators/*",
"caffe2/sgd/*",
"caffe2/image/*",
"caffe2/transforms/*",
"caffe2/video/*",
"caffe2/distributed/*",
"caffe2/queue/*",
"caffe2/contrib/aten/*",
"binaries/*",
"caffe2/**/*_test*",
"caffe2/core/*",
"caffe2/db/*",
"caffe2/utils/*",
"caffe2/contrib/gloo/*",
"caffe2/contrib/nccl/*",
"c10/cuda/*",
"c10/cuda/test/CMakeLists.txt",
"modules/*",
# PyTorch paths
# Keep this synchronized with is_pytorch_file in hipify_python.py
"aten/src/ATen/cuda/*",
"aten/src/ATen/native/cuda/*",
"aten/src/ATen/native/cudnn/*",
"aten/src/ATen/native/nested/cuda/*",
"aten/src/ATen/native/sparse/cuda/*",
"aten/src/ATen/native/quantized/cuda/*",
"aten/src/THC/*",
"aten/src/ATen/test/*",
# CMakeLists.txt isn't processed by default, but there are a few
# we do want to handle, so explicitly specify them
"aten/src/THC/CMakeLists.txt",
"torch/*",
"tools/autograd/templates/python_variable_methods.cpp",
]
for new_dir in args.extra_include_dir:
abs_new_dir = os.path.join(proj_dir, new_dir)
if os.path.exists(abs_new_dir):
new_dir = os.path.join(new_dir, '**/*')
includes.append(new_dir)
ignores = [
"caffe2/operators/depthwise_3x3_conv_op_cudnn.cu",
"caffe2/operators/pool_op_cudnn.cu",
'*/hip/*',
# These files are compatible with both cuda and hip
"aten/src/ATen/core/*",
"torch/csrc/jit/codegen/cuda/codegen.cpp",
"torch/csrc/jit/codegen/cuda/runtime/block_reduction.cu",
"torch/csrc/jit/codegen/cuda/runtime/broadcast.cu",
"torch/csrc/jit/codegen/cuda/runtime/grid_reduction.cu",
"torch/csrc/jit/codegen/fuser/cuda/resource_strings.h",
"torch/csrc/jit/tensorexpr/ir_printer.cpp",
# generated files we shouldn't frob
"torch/lib/tmp_install/*",
"torch/include/*",
]
# Check if the compiler is hip-clang.
def is_hip_clang() -> bool:
try:
hip_path = os.getenv('HIP_PATH', '/opt/rocm/hip')
with open(hip_path + '/lib/.hipInfo') as f:
return 'HIP_COMPILER=clang' in f.read()
except IOError:
return False
# TODO Remove once gloo submodule is recent enough to contain upstream fix.
if is_hip_clang():
gloo_cmake_file = "third_party/gloo/cmake/Hip.cmake"
do_write = False
if os.path.exists(gloo_cmake_file):
with open(gloo_cmake_file, "r") as sources:
lines = sources.readlines()
newlines = [line.replace(' hip_hcc ', ' amdhip64 ') for line in lines]
if lines == newlines:
print("%s skipped" % gloo_cmake_file)
else:
with open(gloo_cmake_file, "w") as sources:
for line in newlines:
sources.write(line)
print("%s updated" % gloo_cmake_file)
gloo_cmake_file = "third_party/gloo/cmake/Modules/Findrccl.cmake"
if os.path.exists(gloo_cmake_file):
do_write = False
with open(gloo_cmake_file, "r") as sources:
lines = sources.readlines()
newlines = [line.replace('RCCL_LIBRARY', 'RCCL_LIBRARY_PATH') for line in lines]
if lines == newlines:
print("%s skipped" % gloo_cmake_file)
else:
with open(gloo_cmake_file, "w") as sources:
for line in newlines:
sources.write(line)
print("%s updated" % gloo_cmake_file)
# TODO Remove once gloo submodule is recent enough to contain upstream fix.
if is_hip_clang():
gloo_cmake_file = "third_party/gloo/cmake/Dependencies.cmake"
do_write = False
if os.path.exists(gloo_cmake_file):
with open(gloo_cmake_file, "r") as sources:
lines = sources.readlines()
newlines = [line.replace('HIP_HCC_FLAGS', 'HIP_CLANG_FLAGS') for line in lines]
if lines == newlines:
print("%s skipped" % gloo_cmake_file)
else:
with open(gloo_cmake_file, "w") as sources:
for line in newlines:
sources.write(line)
print("%s updated" % gloo_cmake_file)
hipify_python.hipify(
project_directory=proj_dir,
output_directory=out_dir,
includes=includes,
ignores=ignores,
out_of_place_only=args.out_of_place_only,
hip_clang_launch=is_hip_clang())
| []
| []
| [
"HIP_PATH"
]
| [] | ["HIP_PATH"] | python | 1 | 0 | |
src/tools/reliability/server/main.go | package main
import (
"log"
"net/http"
"os"
"time"
"tools/reliability/server/internal/api"
)
func main() {
port := os.Getenv("PORT")
workerHandler := api.NewWorkerHandler()
http.Handle("/tests", api.NewCreateTestHandler(workerHandler, 5*time.Second))
http.Handle("/workers", workerHandler)
addr := ":" + port
log.Printf("server started on %s", addr)
log.Println(http.ListenAndServe(addr, nil))
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
config/wsgi.py | """
WSGI config for reddit_clone project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# reddit directory.
app_path = os.path.abspath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), os.pardir))
sys.path.append(os.path.join(app_path, 'reddit'))
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
from raven.contrib.django.raven_compat.middleware.wsgi import Sentry
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
if os.environ.get('DJANGO_SETTINGS_MODULE') == 'config.settings.production':
application = Sentry(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| []
| []
| [
"DJANGO_SETTINGS_MODULE"
]
| [] | ["DJANGO_SETTINGS_MODULE"] | python | 1 | 0 | |
djangodemo/djangodemo/asgi.py | """
ASGI config for djangodemo project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangodemo.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
tests/src/main/python/TestDriver_MeasureCLI.py | #
# (C) Copyright IBM Corp. 2021, 2022
#
# SPDX-License-Identifier: Apache-2.0
#
#
import unittest
import os
import time
import csv
import json
import re
import subprocess
import pytest
currentDir=os.getcwd()
baseDir = currentDir + '/'
testFile=baseDir + os.environ['TESTS_JSON']
jar = os.environ['JAR']
def setup():
os.chdir(baseDir)
tests = list()
with open(testFile) as f:
data = json.load(f)
testValues = data['tests']
for testValue in testValues.values():
regEx=False
try:
regEx=testValue['regEx']
except:
regEx=False
tests.append((testValue['jsonMeasureConfigurationFile'], testValue['resource'], testValue['params'], testValue['targets'], testValue['response'], testValue['measureServer'], testValue['filters'], regEx))
return tests
class Test(object):
@pytest.mark.parametrize("jsonMeasureConfigurationFile, resource, params, targets, output, measureServer, filters, regEx", setup())
def test(self, jsonMeasureConfigurationFile, resource, params, targets, output, measureServer, filters, regEx):
self.execute(jsonMeasureConfigurationFile, resource, params, targets, output, measureServer, filters, regEx)
# Execute submits a query and validates the return.
def execute(self, jsonMeasureConfigurationFile, resource, params, targets, output, measureServer, filters, regEx):
expectedOutputs = output.split('\n')
callDetails = ["java", "-Xms1G", "-Xmx1G", "-Djavax.net.ssl.trustStore="+os.environ["TRUSTSTORE"], "-Djavax.net.ssl.trustStorePassword="+os.environ["TRUSTSTORE_PASSWORD"], "-Djavax.net.ssl.trustStoreType="+os.environ["TRUSTSTORE_TYPE"], "-Dorg.jboss.logging.provider=slf4j", "-Dorg.slf4j.simpleLogger.log.org.hibernate.validator.internal.util.Version=off", "-classpath", jar, "com.ibm.cohort.cli.MeasureCLI"]
if os.environ['DATA_FHIR_SERVER_DETAILS']:
callDetails.append("-d")
callDetails.append(os.environ['DATA_FHIR_SERVER_DETAILS'])
if os.environ['TERM_FHIR_SERVER_DETAILS']:
callDetails.append("-t")
callDetails.append(os.environ['TERM_FHIR_SERVER_DETAILS'])
if jsonMeasureConfigurationFile:
callDetails.append("-j")
callDetails.append(jsonMeasureConfigurationFile)
if resource:
callDetails.append("-r")
callDetails.append(resource)
if params:
for val in params:
callDetails.append("-p")
callDetails.append(val)
if filters:
for val in filters:
callDetails.append("--filter")
callDetails.append(val)
if measureServer:
callDetails.append("-m")
callDetails.append(measureServer)
for val in targets:
callDetails.append("-c")
callDetails.append(val)
out = subprocess.Popen(callDetails, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
tmpout=""
for line in out.stdout:
temp=line.decode('utf-8')
if not "[main]" in temp:
tmpout=tmpout+temp
out=tmpout
if regEx:
for line in expectedOutputs:
assert re.search(line, out), 'Did not contain: ' + line + '\nContained: ' + out
else:
respOut = out.splitlines()
error = "\n"
for line in expectedOutputs:
assert line in respOut, 'Did not contain: ' + line + '\nContained: ' + error.join(respOut)
print("In respOut:")
for line in respOut:
assert line in expectedOutputs, 'Did not contain: ' + line + '\nContained: ' + error.join(expectedOutputs)
| []
| []
| [
"TRUSTSTORE",
"DATA_FHIR_SERVER_DETAILS",
"TESTS_JSON",
"TRUSTSTORE_PASSWORD",
"TRUSTSTORE_TYPE",
"TERM_FHIR_SERVER_DETAILS",
"JAR"
]
| [] | ["TRUSTSTORE", "DATA_FHIR_SERVER_DETAILS", "TESTS_JSON", "TRUSTSTORE_PASSWORD", "TRUSTSTORE_TYPE", "TERM_FHIR_SERVER_DETAILS", "JAR"] | python | 7 | 0 | |
scripts/ESIM/main.py | import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
gpus = '1'
import numpy
import tensorflow as tf
import logging
from tensorflow import logging as log
from collections import OrderedDict
from data_iterator import TextIterator
from tensorflow.contrib import rnn
import warnings
import pickle as pkl
import sys
import pprint
import pdb
import os
import copy
import time
logger = logging.getLogger(__name__)
def _s(pp, name): # add perfix
return '{}_{}'.format(pp, name)
def load_params(path, params):
pp = numpy.load(path)
for kk, vv in params.iteritems():
if kk not in pp:
warnings.warn('{} is not in the archive'.format(kk))
continue
params[kk] = pp[kk]
return params
def ortho_weight(ndim): # used by norm_weight below
"""
Random orthogonal weights
Used by norm_weights(below), in which case, we
are ensuring that the rows are orthogonal
(i.e W = U \Sigma V, U has the same
# of rows, V has the same # of cols)
"""
W = numpy.random.randn(ndim, ndim)
u, s, v = numpy.linalg.svd(W)
return u.astype('float32')
def norm_weight(nin, nout=None, scale=0.01, ortho=True):
"""
Random weights drawn from a Gaussian
"""
if nout is None:
nout = nin
if nout == nin and ortho:
W = ortho_weight(nin)
else:
# W = numpy.random.uniform(-0.5,0.5,size=(nin,nout))
W = scale * numpy.random.randn(nin, nout)
return W.astype('float32')
def prepare_data(sequence, sequence_d1, sequence_d2, labels, options, maxlen=None, max_word=100):
# length = [len(s) for s in sequence]
length, length_d1, length_d2 = [], [], []
for i, d1, d2 in zip(sequence, sequence_d1, sequence_d2):
dd1, dd2 = list(), list()
length.append(len(i))
for day in d1:
dd1.append(len(day))
length_d1.append(dd1)
for day in d2:
dd2.append(len(day))
length_d2.append(dd2)
if maxlen is not None: # max length is the sentence level
new_sequence = []
new_lengths = []
new_sequence_d1 = []
new_lengths_d1 = []
new_sequence_d2 = []
new_lengths_d2 = []
for l, s, ld1, sd1, ld2, sd2 in zip(length, sequence, length_d1, sequence_d1, length_d2, sequence_d2):
dd1, lld1, dd2, lld2 = list(), list(), list(), list()
if l < maxlen:
new_sequence.append(s)
new_lengths.append(l)
for i, j in zip(ld1, sd1):
if i < maxlen:
dd1.append(j)
lld1.append(i)
new_sequence_d1.append(dd1)
new_lengths_d1.append(lld1)
for i, j in zip(ld2, sd2):
if i < maxlen:
dd2.append(j)
lld2.append(i)
new_sequence_d2.append(dd2)
new_lengths_d2.append(lld2)
length = new_lengths # This step is to filter the sentence which length is bigger
sequence = new_sequence # than the max length. length means number of news. sequence means
# length of each sentence
length_d1 = new_lengths_d1
sequence_d1 = new_sequence_d1
length_d2 = new_lengths_d2
sequence_d2 = new_sequence_d2
day1 = len(sequence_d1[0])
day2 = len(sequence_d2[0])
##TODO need to be careful, set the max length bigger to avoid bug
if len(length) < 1:
return None, None, None, None, None, None, None
maxlen_x = numpy.max(length) # max time step
maxlen_xd1 = numpy.max([numpy.max(i) for i in length_d1])
maxlen_xd2 = numpy.max([numpy.max(i) for i in length_d2])
n_samples = len(sequence) # number of samples== batch
max_sequence = max(len(j) for i in sequence for j in i) # find the sequence max length
max_sequence_d1 = max(len(j) for i in sequence_d1 for z in i for j in z)
max_sequence_d2 = max(len(j) for i in sequence_d2 for z in i for j in z)
max_sequence = max_word if max_sequence > max_word else max_sequence # shrink the data size
max_sequence_d1 = max_word if max_sequence_d1 > max_word else max_sequence_d1 # shrink the data size
max_sequence_d2 = max_word if max_sequence_d2 > max_word else max_sequence_d2 # shrink the data size
##TODO for x
x = numpy.zeros((maxlen_x, n_samples, max_sequence)).astype('int64')
x_mask = numpy.zeros((maxlen_x, n_samples)).astype('float32')
##TODO for x_d1
x_d1 = numpy.zeros((day1, maxlen_xd1, n_samples, max_sequence_d1)).astype('int64')
x_d1_mask = numpy.zeros((day1,maxlen_xd1, n_samples)).astype('float32')
##TODO for x_d2
x_d2 = numpy.zeros((day2, maxlen_xd2, n_samples, max_sequence_d2)).astype('int64')
x_d2_mask = numpy.zeros((day2,maxlen_xd2, n_samples)).astype('float32')
# l = numpy.array(labels).astype('int64')
##TODO for label
l = numpy.zeros((n_samples,)).astype('int64')
for index, (i, j, k, ll) in enumerate(zip(sequence, sequence_d1, sequence_d2, labels)): # batch size
l[index] = ll
for idx, ss in enumerate(i): # time step
# x[idx, index, :sequence_length[idx]] = ss
if len(ss) < max_sequence:
x[idx, index, :len(ss)] = ss
else:
x[idx, index, :max_sequence] = ss[:max_sequence]
x_mask[idx, index] = 1.
for jj, day in enumerate(j):
for idx, ss in enumerate(day):
if len(ss) < max_sequence_d1:
x_d1[jj, idx, index, :len(ss)] = ss
else:
x_d1[jj, idx, index, :max_sequence_d1] = ss[:max_sequence_d1]
x_d1_mask[jj, idx, index] = 1.
for jj, day in enumerate(k):
for idx, ss in enumerate(day):
if len(ss) < max_sequence_d2:
x_d2[jj, idx, index, :len(ss)] = ss
else:
x_d2[jj, idx, index, :max_sequence_d2] = ss[:max_sequence_d2]
x_d2_mask[jj, idx, index] = 1.
return x, x_mask, x_d1, x_d1_mask, x_d2, x_d2_mask, l
def old_sequence_lstm(input, sequence_mask, keep_prob, is_training, options):
# input time_step,batch,sequence_step,embedding, 40*32*13*100
# sequence_mask shape is time_step,batch,sequence_step, 40*32*13
def fn(inp):
out = bilstm_filter(tf.transpose(inp[0], [1, 0, 2]), tf.transpose(inp[1], [1, 0]), keep_prob,
prefix='sequence_encode', dim=options['dim'],
is_training=is_training) # output shape: sequence_step,batch,2*lstm_unit(concate) 13*32*600
return tf.transpose(tf.concat(out, axis=2), perm=[1, 0, 2])
outputs = tf.map_fn(fn, (input, sequence_mask), dtype=tf.float32)
print(tf.shape(outputs)) # outputs shape 40*32*13*600
outputs = outputs * tf.expand_dims(sequence_mask, -1) # mask the output
with tf.variable_scope('words_attention'):
hidden = tf.layers.dense(outputs, units=300, activation=tf.nn.tanh, use_bias=False,
kernel_initializer=tf.random_normal_initializer(stddev=0.02), reuse=tf.AUTO_REUSE)
hidden = tf.nn.dropout(hidden, keep_prob)
# hidden 40*32*13*1200 #attention 40*32*13*1
attention = tf.layers.dense(hidden, units=1, use_bias=False,
kernel_initializer=tf.random_normal_initializer(stddev=0.02), activation=None)
padding = tf.fill(tf.shape(attention), float(-1e8)) # float('-inf')
attention = tf.where(tf.equal(tf.expand_dims(sequence_mask, -1), 0.), padding,
attention) # fill 0 with -1e8 for softmax
attention = tf.transpose(tf.nn.softmax(tf.transpose(attention, perm=[0, 1, 3, 2])),
perm=[0, 1, 3, 2]) # attention 40*32*13*r
attention = attention * tf.expand_dims(sequence_mask, -1) # mask the attention
outputs = tf.reduce_sum(outputs * attention, axis=2)
print(tf.shape(outputs))
return outputs
def sequence_lstm(input, sequence_mask, keep_prob, is_training, options):
# input time_step,batch,sequence_step,embedding, 40*32*13*100
time_step = tf.shape(input)[0]
# time_step = input.get_shape().as_list()[0]
output_list = tf.TensorArray(dtype=tf.float32, size=time_step)
# sequence_mask shape is time_step,batch,sequence_step, 40*32*13
t = tf.constant(0, dtype=tf.int32)
def cond(i, *args):
return i < time_step
def body(i, x, mask, out_):
out = bilstm_filter(tf.transpose(x[i], [1, 0, 2]), tf.transpose(mask[i], [1, 0]), keep_prob,
prefix='sequence_encode', dim=options['dim'],
is_training=is_training) # output shape: sequence_step,batch,2*lstm_unit(concate) 13*32*600
'''out = bilstm_filter(tf.concat(out, 2) * tf.expand_dims(tf.transpose(mask[i], [1, 0]), 2), tf.transpose(mask[i], [1, 0]), keep_prob,
prefix='sequence_encode', dim=options['dim'],
is_training=is_training)
'''
out = tf.concat(out, 2) * tf.expand_dims(tf.transpose(mask[i], [1, 0]), -1) # mask the output 13*32*600
att = attention_v1(tf.transpose(out, [1, 0, 2]), mask[i],
name='attention_1', keep=keep_prob) # attention shape 32*600
out_ = out_.write(i, att)
return i + 1, x, mask, out_
_, _, _, result = tf.while_loop(cond, body, [t, input, sequence_mask, output_list])
result = result.stack() # result shape is time_step,batch,hidden units 40*32*600
return result
def attention_v1(input, masks, name='attention', nin=600, keep=1.0):
# input is batch,time_step,hidden_state 32*40*600 mask 32*40
# hidden layer is:batch,hidden_shape,attention_hidden_size 32*40*1200 or 32*40*600
# attention shape after squeeze is 32*40, # batch,time_step,attention_size 32*40*1
hidden = tf.layers.dense(input, nin / 2, activation=tf.nn.tanh, use_bias=False,
kernel_initializer=tf.random_normal_initializer(stddev=0.02),
name=_s(name, 'hidden'), reuse=tf.AUTO_REUSE)
hidden = tf.nn.dropout(hidden, keep)
attention = tf.layers.dense(hidden, 1 , activation=None, use_bias=False,
kernel_initializer=tf.random_normal_initializer(stddev=0.02), name=_s(name, 'out'),
reuse=tf.AUTO_REUSE)
padding = tf.fill(tf.shape(attention), float('-1e8')) # float('-inf')
attention = tf.where(tf.equal(tf.expand_dims(masks,-1), 0.), padding, attention) # fill 0 with a small number for softmax
attention = tf.nn.softmax(attention, 1) *tf.expand_dims(masks,-1) # 32*40*r #mask the attention here is not really neccesary,
outputs = tf.reduce_sum(input * attention, axis=1)#32*600
#outputs = tf.squeeze(tf.matmul(tf.transpose(attention, [0, 2, 1]), input)) # transpose to batch,hidden,time_step
return outputs
def attention_v2(input, masks, name='attention', nin=600, keep=1.0, r=4, beta=1.):
# input is batch,time_step,hidden_state 32*40*600 mask 32*40
# hidden layer is:batch,hidden_shape,attention_hidden_size 32*40*1200 or 32*40*600
# attention shape after squeeze is 32*40, # batch,time_step,attention_size 32*40*1
masks = tf.stack([masks] * r, -1) # copy r time for filling 32*40*r
iden = tf.eye(tf.shape(input)[1], batch_shape=[tf.shape(input)[0]]) # an identity matrix 32*40*40
hidden = tf.layers.dense(input, nin / 2, activation=tf.nn.tanh, use_bias=False,
kernel_initializer=tf.random_normal_initializer(stddev=0.02),
name=_s(name, 'hidden'), reuse=tf.AUTO_REUSE)
hidden = tf.nn.dropout(hidden, keep)
attention = tf.layers.dense(hidden, r, activation=None, use_bias=False,
kernel_initializer=tf.random_normal_initializer(stddev=0.02), name=_s(name, 'out'),
reuse=tf.AUTO_REUSE)
padding = tf.fill(tf.shape(attention), float('-1e8')) # float('-inf')
attention = tf.where(tf.equal(masks, 0.), padding, attention) # fill 0 with a small number for softmax
attention = tf.nn.softmax(attention, 1) * masks # 32*40*r #mask the attention here is not really neccesary,
penalty = tf.norm((tf.matmul(attention, tf.transpose(attention, [0, 2, 1])) - iden), ord='fro',
axis=(-2, -1)) # the Frobenius norm penalty 32 dimension
attention = attention + beta * tf.expand_dims(tf.expand_dims(penalty, -1), -1) # expand twice
# outputs = tf.reduce_sum(input * attention, axis=1)#32*600
outputs = tf.matmul(tf.transpose(attention, [0, 2, 1]), input) # transpose to batch,hidden,time_step
outputs = tf.reshape(outputs, [tf.shape(outputs)[0], -1])
if name == 'attention_2':
outputs.set_shape([None, nin * (r ** 2)])
else:
outputs.set_shape([None, nin * r])
return outputs # result shape is batch, hidden_unit 32*600
def fflayer_2D(options, input, name='feed_forward', activation_function=None, nin=None, nout=None):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
W = tf.get_variable(
_s(name, 'W'),
shape=[nin, nout],
# initializer=tf.random_uniform_initializer(-0.1, 0.1),
initializer=tf.random_normal_initializer(stddev=0.02),
dtype=tf.float32
)
bias = tf.get_variable(
_s(name, 'bias'),
shape=[nout],
initializer=tf.constant_initializer(0.),
dtype=tf.float32
)
# result = tf.nn.bias_add(tf.matmul(input, W), bias)
result = tf.nn.bias_add(tf.tensordot(input, W, [[-1], [0]]), bias)
if activation_function is None:
outputs = result
else:
outputs = activation_function(result)
return outputs
def bilstm_filter(input, mask, keep_prob, prefix='lstm', dim=300, is_training=True):
with tf.variable_scope(name_or_scope=prefix, reuse=tf.AUTO_REUSE):
sequence = tf.cast(tf.reduce_sum(mask, 0), tf.int32)
lstm_fw_cell = rnn.LSTMCell(dim, forget_bias=0.0, initializer=tf.orthogonal_initializer(), state_is_tuple=True)
# back directions
lstm_bw_cell = rnn.LSTMCell(dim, forget_bias=0.0, initializer=tf.orthogonal_initializer(), state_is_tuple=True)
keep_rate = tf.cond(is_training is not False and keep_prob < 1, lambda: 0.8, lambda: 1.0)
cell_dp_fw = rnn.DropoutWrapper(cell=lstm_fw_cell, output_keep_prob=keep_rate)
cell_dp_bw = rnn.DropoutWrapper(cell=lstm_bw_cell, output_keep_prob=keep_rate)
outputs, _ = tf.nn.bidirectional_dynamic_rnn(cell_dp_fw, cell_dp_bw, input, sequence_length=sequence,
dtype=tf.float32,
time_major=True)
return outputs
def init_params(options, worddicts):
params = OrderedDict()
# embedding
params['Wemb'] = norm_weight(options['n_words'], options['dim_word'])
# read embedding from GloVe
if options['embedding']:
with open(options['embedding'], 'r') as f:
for line in f:
tmp = line.split()
word = tmp[0]
vector = tmp[1:]
if word in worddicts and worddicts[word] < options['n_words']:
try:
params['Wemb'][worddicts[word], :] = vector
# encoder: bidirectional RNN
except ValueError as e:
print(str(e))
return params
def word_embedding(options, params):
embeddings = tf.get_variable("embeddings", shape=[options['n_words'], options['dim_word']],
initializer=tf.constant_initializer(numpy.array(params['Wemb'])))
return embeddings
def build_model(embedding, options):
""" Builds the entire computational graph used for training
"""
# description string: #words x #samples
with tf.device('/gpu:1'):
with tf.variable_scope('input'):
x = tf.placeholder(tf.int64, shape=[None, None, None],
name='x') # 3D vector timestep, batch and sequence(before embedding)40*32*13
x_mask = tf.placeholder(tf.float32, shape=[None, None], name='x_mask') # mask time step, batch
y = tf.placeholder(tf.int64, shape=[None], name='y')
##TODO important
keep_prob = tf.placeholder(tf.float32, [], name='keep_prob')
is_training = tf.placeholder(tf.bool, name='is_training')
##TODO important
# n_timesteps = x.get_shape().as_list()[0] # time steps
# n_samples = x.get_shape().as_list()[1] # n samples
sequence_mask = tf.cast(tf.abs(tf.sign(x)), tf.float32) # 3D
n_timesteps = tf.shape(x)[0] # time steps
n_samples = tf.shape(x)[1] # n samples
# # word embedding
##TODO word embedding
emb = tf.nn.embedding_lookup(embedding, x)
with tf.device('/gpu:1'):
# emb = tf.reduce_mean(emb, -2) # average embedding
# fed into the input of BILSTM from the official document
'''if options['use_dropout']:
emb = tf.nn.dropout(emb, keep_prob)'''
emb = sequence_lstm(emb, sequence_mask, keep_prob, is_training, options)
emb = emb * tf.expand_dims(x_mask, -1) # mask before attention
# TODO bilstm layers
# Change the time step and batch
att = attention_v1(tf.transpose(emb, [1, 0, 2]), tf.transpose(x_mask, [1, 0]),
name='attention_2', keep=keep_prob) # already masked after attention
# maxpolling and sum pooling from batch
if options['use_dropout']:
att = tf.nn.dropout(att, keep_prob)
'''conv1 = tf.layers.conv2d(inputs=tf.expand_dims(tf.transpose(emb,[1,0,2])),filters=32,kernel_size=[3, 2400],padding="same",activation=tf.nn.relu)
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)'''
logit = fflayer_2D(options, att, name='ff', activation_function=tf.nn.tanh, nin=2 * options['dim'],
nout=300) # 2 * options['dim']'''
if options['use_dropout']:
logit = tf.nn.dropout(logit, keep_prob)
pred = fflayer_2D(options, logit, name='fout', activation_function=None, nin=300, nout=2)
# with tf.device('/cpu:0'):
logger.info('Building f_cost...')
# todo not same
labels = tf.one_hot(y, depth=2, axis=1)
# labels = y
preds = tf.nn.softmax(pred, 1)
# preds = tf.nn.sigmoid(pred)
# pred=tf.reshape(pred,[-1])
cost = tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred, labels=labels)
# cost = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=labels,logits=pred),1)
# cost = -tf.reduce_sum((tf.cast(labels, tf.float32) * tf.log(preds + 1e-8)),axis=1)
# cost = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=y)
logger.info('Done')
'''
logit1 = tf.reduce_sum(ctx1 * tf.expand_dims(x_mask, 2), 0) / tf.expand_dims(tf.reduce_sum(x_mask, 0), 1)
logit2 = tf.reduce_max(ctx1 * tf.expand_dims(x_mask, 2), 0)
logit = tf.concat([logit1, logit2], 1)
'''
with tf.variable_scope('logging'):
tf.summary.scalar('current_cost', tf.reduce_mean(cost))
tf.summary.histogram('predicted_value', preds)
summary = tf.summary.merge_all()
return is_training, cost, x, x_mask, y, n_timesteps, preds, summary
def predict_pro_acc(sess, cost, prepare_data, model_options, iterator, maxlen, correct_pred, pred, summary, eidx,
is_training, writer=None):
# fo = open(_s(prefix,'pre.txt'), "w")
num = 0
valid_acc = 0
total_cost = 0
loss = 0
result = 0
for x_sent, x_d1_sent, x_d2_sent, y_sent in iterator:
num += len(x_sent)
data_x, data_x_mask, data_x_d1, data_x_d1_mask, data_x_d2, data_x_d2_mask, data_y = prepare_data(x_sent, x_d1_sent, x_d2_sent, y_sent, model_options, maxlen=maxlen)
loss, result, preds = sess.run([cost, correct_pred, pred],
feed_dict={'input/x:0': data_x, 'input/x_mask:0': data_x_mask,
'input/y:0': data_y, 'input/keep_prob:0': 1.,
'input/is_training:0': is_training})
valid_acc += result.sum()
total_cost += loss.sum()
final_acc = 1.0 * valid_acc / num
final_loss = 1.0 * total_cost / num
# if writer is not None:
# writer.add_summary(test_summary, eidx)
# print result,preds,loss,result_
print(preds, result, num)
return final_acc, final_loss
def train(
dim_word=100, # word vector dimensionality
dim=100, # the number of GRU units
encoder='lstm', # encoder model
decoder='lstm', # decoder model
patience=10, # early stopping patience
max_epochs=5000,
finish_after=10000000, # finish after this many updates
decay_c=0., # L2 regularization penalty
clip_c=-1., # gradient clipping threshold
lrate=0.0004, # learning rate
n_words=100000, # vocabulary size
n_words_lemma=100000,
maxlen=100, # maximum length of the description
optimizer='adam',
batch_size=32,
valid_batch_size=32,
save_model='../../models/',
saveto='model.npz',
dispFreq=100,
validFreq=1000,
saveFreq=1000, # save the parameters after every saveFreq updates
use_dropout=False,
reload_=False,
verbose=False, # print verbose information for debug but slow speed
delay1=3,
delay2=7,
types='title',
cut_word=False,
cut_sentence=False,
datasets=[],
valid_datasets=[],
test_datasets=[],
dictionary=[],
kb_dicts=[],
embedding='', # pretrain embedding file, such as word2vec, GLOVE
dim_kb=5,
RUN_NAME="histogram_visualization",
wait_N=10
):
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s: %(name)s: %(levelname)s: %(message)s",
filename='./log_result.txt')
# Model options
model_options = locals().copy()
# tf.set_random_seed(2345)
with open(dictionary, 'rb') as f:
worddicts = pkl.load(f)
logger.info("Loading knowledge base ...")
# reload options
if reload_ and os.path.exists(saveto):
logger.info("Reload options")
with open('%s.pkl' % saveto, 'rb') as f:
model_options = pkl.load(f)
logger.debug(pprint.pformat(model_options))
logger.info("Loading data")
train = TextIterator(datasets[0], datasets[1],
dict=dictionary,
delay1=delay1,
delay2=delay2,
types=types,
n_words=n_words,
batch_size=batch_size,
cut_word=cut_word,
cut_sentence=cut_sentence,
shuffle=True)
train_valid = TextIterator(datasets[0], datasets[1],
dict=dictionary,
delay1=delay1,
delay2=delay2,
types=types,
n_words=n_words,
batch_size=valid_batch_size,
cut_word=cut_word,
cut_sentence=cut_sentence,
shuffle=False)
valid = TextIterator(valid_datasets[0], valid_datasets[1],
dict=dictionary,
delay1=delay1,
delay2=delay2,
types=types,
n_words=n_words,
batch_size=valid_batch_size,
cut_word=cut_word,
cut_sentence=cut_sentence,
shuffle=False)
test = TextIterator(test_datasets[0], test_datasets[1],
dict=dictionary,
delay1=delay1,
delay2=delay2,
types=types,
n_words=n_words,
batch_size=valid_batch_size,
cut_word=cut_word,
cut_sentence=cut_sentence,
shuffle=False)
# Initialize (or reload) the parameters using 'model_options'
# then build the tensorflow graph
logger.info("init_word_embedding")
params = init_params(model_options, worddicts)
embedding = word_embedding(model_options, params)
is_training, cost, x, x_mask, y, n_timesteps, pred, summary = build_model(embedding, model_options)
lr = tf.Variable(0.0, trainable=False)
def assign_lr(session, lr_value):
session.run(tf.assign(lr, lr_value))
logger.info('Building optimizers...')
optimizer = tf.train.AdamOptimizer(learning_rate=lr)
logger.info('Done')
# print all variables
tvars = tf.trainable_variables()
for var in tvars:
print(var.name, var.shape)
lossL2 = tf.add_n([tf.nn.l2_loss(v) for v in tvars if ('embeddings' or 'bias') not in v.name]) * 0.0001 #
cost = cost + lossL2
# regularization_cost = 0.0003 * tf.reduce_sum([tf.nn.l2_loss(v) for v in tvars])
# cost = cost + regularization_cost
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), model_options['clip_c'])
train_op = optimizer.apply_gradients(zip(grads, tvars))
# train_op = optimizer.minimize(cost)
op_loss = tf.reduce_mean(cost)
logger.info("correct_pred")
correct_pred = tf.equal(tf.argmax(input=pred, axis=1), y) # make prediction
logger.info("Done")
temp_accuracy = tf.cast(correct_pred, tf.float32) # change to float32
logger.info("init variables")
init = tf.global_variables_initializer()
logger.info("Done")
# saver
saver = tf.train.Saver(max_to_keep=15)
config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction = 0.4
config.gpu_options.allow_growth = True
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)) as sess:
training_writer = tf.summary.FileWriter("./logs/{}/training".format(RUN_NAME), sess.graph)
validate_writer = tf.summary.FileWriter("./logs/{}/validate".format(RUN_NAME), sess.graph)
testing_writer = tf.summary.FileWriter("./logs/{}/testing".format(RUN_NAME), sess.graph)
sess.run(init)
history_errs = []
# reload history
if reload_ and os.path.exists(saveto):
logger.info("Reload history error")
history_errs = list(numpy.load(saveto)['history_errs'])
bad_counter = 0
if validFreq == -1:
validFreq = len(train[0]) / batch_size
if saveFreq == -1:
saveFreq = len(train[0]) / batch_size
uidx = 0
estop = False
valid_acc_record = []
test_acc_record = []
best_num = -1
best_epoch_num = 0
lr_change_list = []
wait_counter = 0
wait_N = model_options['wait_N']
learning_rate = model_options['lrate']
assign_lr(sess, learning_rate)
for eidx in range(max_epochs):
n_samples = 0
for x, x_d1, x_d2, y in train:
n_samples += len(x)
uidx += 1
keep_prob = 0.5
is_training = True
data_x, data_x_mask, data_x_d1, data_x_d1_mask, data_x_d2, data_x_d2_mask, data_y = prepare_data(x,
x_d1,
x_d2,
y,
model_options,
maxlen=maxlen)
print(data_x.shape, data_x_mask.shape, data_y.shape)
assert data_y.shape[0] == data_x.shape[1], 'Size does not match'
if x is None:
logger.debug('Minibatch with zero sample under length {0}'.format(maxlen))
uidx -= 1
continue
ud_start = time.time()
_, loss = sess.run([train_op, op_loss],
feed_dict={'input/x:0': data_x, 'input/x_mask:0': data_x_mask, 'input/y:0': data_y,
'input/keep_prob:0': keep_prob, 'input/is_training:0': is_training})
ud = time.time() - ud_start
'''train_summary = sess.run(summary, feed_dict={'input/x:0': data_x, 'input/x_mask:0': data_x_mask,
'input/y:0': data_y,'input/keep_prob:0':keep_prob,'input/is_training:0':is_training})
training_writer.add_summary(train_summary, eidx)'''
if numpy.mod(uidx, dispFreq) == 0:
logger.debug('Epoch {0} Update {1} Cost {2} TIME {3}'.format(eidx, uidx, loss, ud))
# validate model on validation set and early stop if necessary
if numpy.mod(uidx, validFreq) == 0:
keep_prob = 1
is_training = False
valid_acc, valid_loss = predict_pro_acc(sess, cost, prepare_data, model_options, valid, maxlen,
correct_pred, pred, summary, eidx, is_training,
validate_writer)
test_acc, test_loss = predict_pro_acc(sess, cost, prepare_data, model_options, test, maxlen,
correct_pred, pred, summary, eidx, is_training,
testing_writer)
valid_err = 1.0 - valid_acc
# valid_err = valid_loss
history_errs.append(valid_err)
logger.debug('Epoch {0}'.format(eidx))
logger.debug('Valid cost {0}'.format(valid_loss))
logger.debug('Valid accuracy {0}'.format(valid_acc))
logger.debug('Test cost {0}'.format(test_loss))
logger.debug('Test accuracy {0}'.format(test_acc))
logger.debug('learning_rate: {0}'.format(learning_rate))
valid_acc_record.append(valid_acc)
test_acc_record.append(test_acc)
if uidx == 0 and valid_err <= numpy.array(history_errs).min():
best_num = best_num + 1
best_epoch_num = eidx
wait_counter = 0
logger.info("Saving...")
saver.save(sess, _s(_s(_s(save_model, "epoch"), str(best_num)), "model.ckpt"))
logger.info(_s(_s(_s(save_model, "epoch"), str(best_num)), "model.ckpt"))
numpy.savez(saveto, history_errs=history_errs, **params)
pkl.dump(model_options, open('{}.pkl'.format(saveto), 'wb'))
logger.info("Done")
if valid_err > numpy.array(history_errs).min():
wait_counter += 1
# wait_counter +=1 if valid_err>numpy.array(history_errs).min() else 0
if wait_counter >= wait_N:
logger.info("wait_counter max, need to half the lr")
# print 'wait_counter max, need to half the lr'
bad_counter += 1
wait_counter = 0
logger.debug('bad_counter: {0}'.format(bad_counter))
# TODO change the learining rate
learning_rate = learning_rate * 0.5
# learning_rate = learning_rate
assign_lr(sess, learning_rate)
lr_change_list.append(eidx)
logger.debug('lrate change to: {0}'.format(learning_rate))
# print 'lrate change to: ' + str(lrate)
if bad_counter > patience:
logger.info("Early Stop!")
estop = True
break
if numpy.isnan(valid_err):
pdb.set_trace()
# finish after this many updates
if uidx >= finish_after:
logger.debug('Finishing after iterations! {0}'.format(uidx))
# print 'Finishing after %d iterations!' % uidx
estop = True
break
logger.debug('Seen samples: {0}'.format(n_samples))
# print 'Seen %d samples' % n_samples
if estop:
break
with tf.Session() as sess:
# Restore variables from disk.
saver.restore(sess, _s(_s(_s(save_model, "epoch"), str(best_num)), "model.ckpt"))
keep_prob = 1
is_training = False
logger.info('=' * 80)
logger.info('Final Result')
logger.info('=' * 80)
logger.debug('best epoch {0}'.format(best_epoch_num))
valid_acc, valid_cost = predict_pro_acc(sess, cost, prepare_data, model_options, valid,
maxlen, correct_pred, pred, summary, eidx, is_training, None)
logger.debug('Valid cost {0}'.format(valid_cost))
logger.debug('Valid accuracy {0}'.format(valid_acc))
# print 'Valid cost', valid_cost
# print 'Valid accuracy', valid_acc
test_acc, test_cost = predict_pro_acc(sess, cost, prepare_data, model_options, test,
maxlen, correct_pred, pred, summary, eidx, is_training, None)
logger.debug('Test cost {0}'.format(test_cost))
logger.debug('Test accuracy {0}'.format(test_acc))
# print 'best epoch ', best_epoch_num
train_acc, train_cost = predict_pro_acc(sess, cost, prepare_data, model_options, train_valid,
maxlen, correct_pred, pred, summary, eidx, is_training, None)
logger.debug('Train cost {0}'.format(train_cost))
logger.debug('Train accuracy {0}'.format(train_acc))
# print 'Train cost', train_cost
# print 'Train accuracy', train_acc
# print 'Test cost ', test_cost
# print 'Test accuracy ', test_acc
return None
if __name__ == '__main__':
pass
| []
| []
| [
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"] | python | 2 | 0 | |
babyai/arguments.py | """
Common arguments for BabyAI training scripts
"""
import os
import argparse
import numpy as np
class ArgumentParser(argparse.ArgumentParser):
def __init__(self):
super().__init__()
# Base arguments
self.add_argument("--env", default=None,
help="name of the environment to train on (REQUIRED)")
self.add_argument("--model", default=None,
help="name of the model (default: ENV_ALGO_TIME)")
self.add_argument("--pretrained-model", default=None,
help='If you\'re using a pre-trained model and want the fine-tuned one to have a new name')
self.add_argument("--seed", type=int, default=1,
help="random seed; if 0, a random random seed will be used (default: 1)")
self.add_argument("--task-id-seed", action='store_true',
help="use the task id within a Slurm job array as the seed")
self.add_argument("--procs", type=int, default=64,
help="number of processes (default: 64)")
self.add_argument("--tb", action="store_true", default=False,
help="log into Tensorboard")
# Training arguments
self.add_argument("--log-interval", type=int, default=1,
help="number of updates between two logs (default(Mathijs): 1, used to be 10)")
self.add_argument("--save-interval", type=int, default=1000,
help="number of updates between two saves (default: 1000, 0 means no saving)")
self.add_argument("--frames", type=int, default=int(9e10),
help="number of frames of training (default: 9e10)")
self.add_argument("--patience", type=int, default=100,
help="patience for early stopping (default: 100)")
self.add_argument("--epochs", type=int, default=1000000,
help="maximum number of epochs")
self.add_argument("--frames-per-proc", type=int, default=40,
help="number of frames per process before update (default: 40)")
self.add_argument("--lr", type=float, default=1e-4,
help="learning rate (default: 1e-4)")
self.add_argument("--beta1", type=float, default=0.9,
help="beta1 for Adam (default: 0.9)")
self.add_argument("--beta2", type=float, default=0.999,
help="beta2 for Adam (default: 0.999)")
self.add_argument("--recurrence", type=int, default=20,
help="number of timesteps gradient is backpropagated (default: 20)")
self.add_argument("--optim-eps", type=float, default=1e-5,
help="Adam and RMSprop optimizer epsilon (default: 1e-5)")
self.add_argument("--optim-alpha", type=float, default=0.99,
help="RMSprop optimizer apha (default: 0.99)")
self.add_argument("--batch-size", type=int, default=1280,
help="batch size for PPO (default: 1280)")
self.add_argument("--entropy-coef", type=float, default=0.01,
help="entropy term coefficient (default: 0.01)")
self.add_argument("--dropout", type=float, default=0.5,
help="dropout probability for processed corrections (default: 0.5)")
self.add_argument("--save-each-epoch", action="store_true", default=False,
help="store model at each epoch")
self.add_argument("--class-weights", action="store_true", default=False,
help="use class weights in loss function")
self.add_argument("--compute-cic", action="store_true", default=False,
help="compute and log causal influence of communication metric after each epoch")
# Model parameters
self.add_argument("--image-dim", type=int, default=128,
help="dimensionality of the image embedding")
self.add_argument("--memory-dim", type=int, default=128,
help="dimensionality of the memory LSTM")
self.add_argument("--instr-dim", type=int, default=128,
help="dimensionality of the memory LSTM")
self.add_argument("--no-instr", action="store_true", default=False,
help="don't use instructions in the model")
self.add_argument("--instr-arch", default="gru",
help="arch to encode instructions, possible values: gru, bigru, conv, bow (default: gru)")
self.add_argument("--no-mem", action="store_true", default=False,
help="don't use memory in the model")
self.add_argument("--arch", default='expert_filmcnn',
help="image embedding architecture")
self.add_argument("--learner", action="store_true", default=False,
help="use ordinary learner")
# Corrector parameters
self.add_argument("--corrector", action="store_true", default=False,
help="use correction module")
self.add_argument("--corr-length", type=int, default=2,
help="length of correction messages (max length if --var-corr-length true)")
self.add_argument("--corr-own-vocab", action="store_true", default=False,
help="corrector uses its own vocabulary instead of instruction vocabulary")
self.add_argument("--corr-embedding-dim", type=int, default=0,
help="embedding dimensionality for corrector")
self.add_argument("--corr-vocab-size", type=int, default=3,
help="vocabulary size of corrector")
self.add_argument("--pretrained-corrector", type=str, default=None,
help="location of pretrained corrector to use and freeze")
self.add_argument("--show-corrections", action="store_true", default=False,
help="show correction messages")
self.add_argument("--corrector-frozen", action="store_true", default=False,
help="freeze pretrained corrector")
self.add_argument("--random-corrector", action="store_true", default=False,
help="randomize correction messages")
self.add_argument("--var-corr-length", action="store_true", default=False,
help="variable length correction messages with penalty for longer ones")
self.add_argument("--corr-loss-coef", type=float, default=0.1,
help="correction loss coefficient (untested default: 0.1)")
self.add_argument("--weigh-corrections", action="store_true", default=False,
help="weigh corrections depending on entropy of previous timestep")
self.add_argument("--correction-weight-loss-coef", type=float, default=1.0,
help="coefficient for correction weight loss")
# Validation parameters
self.add_argument("--val-seed", type=int, default=0,
help="seed for environment used for validation (default: 0)")
self.add_argument("--val-interval", type=int, default=1,
help="number of epochs between two validation checks (default: 1)")
self.add_argument("--val-episodes", type=int, default=500,
help="number of episodes used to evaluate the agent, and to evaluate validation accuracy")
def parse_args(self):
"""
Parse the arguments and perform some basic validation
"""
args = super().parse_args()
# Set seed for all randomness sources
if args.seed == 0:
args.seed = np.random.randint(10000)
if args.task_id_seed:
args.seed = int(os.environ['SLURM_ARRAY_TASK_ID'])
print('set seed to {}'.format(args.seed))
# TODO: more validation
return args
| []
| []
| [
"SLURM_ARRAY_TASK_ID"
]
| [] | ["SLURM_ARRAY_TASK_ID"] | python | 1 | 0 | |
tools/keep-block-check/keep-block-check_test.go | // Copyright (C) The Arvados Authors. All rights reserved.
//
// SPDX-License-Identifier: AGPL-3.0
package main
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"regexp"
"strings"
"testing"
"time"
"git.arvados.org/arvados.git/sdk/go/arvadosclient"
"git.arvados.org/arvados.git/sdk/go/arvadostest"
"git.arvados.org/arvados.git/sdk/go/keepclient"
. "gopkg.in/check.v1"
)
// Gocheck boilerplate
func Test(t *testing.T) {
TestingT(t)
}
// Gocheck boilerplate
var _ = Suite(&ServerRequiredSuite{})
var _ = Suite(&DoMainTestSuite{})
type ServerRequiredSuite struct{}
type DoMainTestSuite struct{}
var kc *keepclient.KeepClient
var logBuffer bytes.Buffer
var TestHash = "aaaa09c290d0fb1ca068ffaddf22cbd0"
var TestHash2 = "aaaac516f788aec4f30932ffb6395c39"
var blobSignatureTTL = time.Duration(2*7*24) * time.Hour
func (s *ServerRequiredSuite) SetUpSuite(c *C) {
arvadostest.StartAPI()
}
func (s *ServerRequiredSuite) TearDownSuite(c *C) {
arvadostest.StopAPI()
arvadostest.ResetEnv()
}
func (s *ServerRequiredSuite) SetUpTest(c *C) {
logOutput := io.MultiWriter(&logBuffer)
log.SetOutput(logOutput)
}
func (s *ServerRequiredSuite) TearDownTest(c *C) {
arvadostest.StopKeep(2)
log.SetOutput(os.Stdout)
log.Printf("%v", logBuffer.String())
}
func (s *DoMainTestSuite) SetUpSuite(c *C) {
}
func (s *DoMainTestSuite) SetUpTest(c *C) {
logOutput := io.MultiWriter(&logBuffer)
log.SetOutput(logOutput)
keepclient.RefreshServiceDiscovery()
}
func (s *DoMainTestSuite) TearDownTest(c *C) {
log.SetOutput(os.Stdout)
log.Printf("%v", logBuffer.String())
}
func setupKeepBlockCheck(c *C, enforcePermissions bool, keepServicesJSON string) {
setupKeepBlockCheckWithTTL(c, enforcePermissions, keepServicesJSON, blobSignatureTTL)
}
func setupKeepBlockCheckWithTTL(c *C, enforcePermissions bool, keepServicesJSON string, ttl time.Duration) {
var config apiConfig
config.APIHost = os.Getenv("ARVADOS_API_HOST")
config.APIToken = arvadostest.DataManagerToken
config.APIHostInsecure = arvadosclient.StringBool(os.Getenv("ARVADOS_API_HOST_INSECURE"))
// Start Keep servers
arvadostest.StartKeep(2, enforcePermissions)
// setup keepclients
var err error
kc, ttl, err = setupKeepClient(config, keepServicesJSON, ttl)
c.Assert(ttl, Equals, blobSignatureTTL)
c.Check(err, IsNil)
keepclient.RefreshServiceDiscovery()
}
// Setup test data
func setupTestData(c *C) []string {
allLocators := []string{}
// Put a few blocks
for i := 0; i < 5; i++ {
hash, _, err := kc.PutB([]byte(fmt.Sprintf("keep-block-check-test-data-%d", i)))
c.Check(err, IsNil)
allLocators = append(allLocators, strings.Split(hash, "+A")[0])
}
return allLocators
}
func setupConfigFile(c *C, fileName string) string {
// Setup a config file
file, err := ioutil.TempFile(os.TempDir(), fileName)
c.Check(err, IsNil)
// Add config to file. While at it, throw some extra white space
fileContent := "ARVADOS_API_HOST=" + os.Getenv("ARVADOS_API_HOST") + "\n"
fileContent += "ARVADOS_API_TOKEN=" + arvadostest.DataManagerToken + "\n"
fileContent += "\n"
fileContent += "ARVADOS_API_HOST_INSECURE=" + os.Getenv("ARVADOS_API_HOST_INSECURE") + "\n"
fileContent += " ARVADOS_EXTERNAL_CLIENT = false \n"
fileContent += " NotANameValuePairAndShouldGetIgnored \n"
fileContent += "ARVADOS_BLOB_SIGNING_KEY=abcdefg\n"
_, err = file.Write([]byte(fileContent))
c.Check(err, IsNil)
return file.Name()
}
func setupBlockHashFile(c *C, name string, blocks []string) string {
// Setup a block hash file
file, err := ioutil.TempFile(os.TempDir(), name)
c.Check(err, IsNil)
// Add the hashes to the file. While at it, throw some extra white space
fileContent := ""
for _, hash := range blocks {
fileContent += fmt.Sprintf(" %s \n", hash)
}
fileContent += "\n"
_, err = file.Write([]byte(fileContent))
c.Check(err, IsNil)
return file.Name()
}
func checkErrorLog(c *C, blocks []string, prefix, suffix string) {
for _, hash := range blocks {
expected := `(?ms).*` + prefix + `.*` + hash + `.*` + suffix + `.*`
c.Check(logBuffer.String(), Matches, expected)
}
}
func checkNoErrorsLogged(c *C, prefix, suffix string) {
expected := prefix + `.*` + suffix
match, _ := regexp.MatchString(expected, logBuffer.String())
c.Assert(match, Equals, false)
}
func (s *ServerRequiredSuite) TestBlockCheck(c *C) {
setupKeepBlockCheck(c, false, "")
allLocators := setupTestData(c)
err := performKeepBlockCheck(kc, blobSignatureTTL, "", allLocators, true)
c.Check(err, IsNil)
checkNoErrorsLogged(c, "Error verifying block", "Block not found")
}
func (s *ServerRequiredSuite) TestBlockCheckWithBlobSigning(c *C) {
setupKeepBlockCheck(c, true, "")
allLocators := setupTestData(c)
err := performKeepBlockCheck(kc, blobSignatureTTL, arvadostest.BlobSigningKey, allLocators, true)
c.Check(err, IsNil)
checkNoErrorsLogged(c, "Error verifying block", "Block not found")
}
func (s *ServerRequiredSuite) TestBlockCheckWithBlobSigningAndTTLFromDiscovery(c *C) {
setupKeepBlockCheckWithTTL(c, true, "", 0)
allLocators := setupTestData(c)
err := performKeepBlockCheck(kc, blobSignatureTTL, arvadostest.BlobSigningKey, allLocators, true)
c.Check(err, IsNil)
checkNoErrorsLogged(c, "Error verifying block", "Block not found")
}
func (s *ServerRequiredSuite) TestBlockCheck_NoSuchBlock(c *C) {
setupKeepBlockCheck(c, false, "")
allLocators := setupTestData(c)
allLocators = append(allLocators, TestHash)
allLocators = append(allLocators, TestHash2)
err := performKeepBlockCheck(kc, blobSignatureTTL, "", allLocators, true)
c.Check(err, NotNil)
c.Assert(err.Error(), Equals, "Block verification failed for 2 out of 7 blocks with matching prefix.")
checkErrorLog(c, []string{TestHash, TestHash2}, "Error verifying block", "Block not found")
}
func (s *ServerRequiredSuite) TestBlockCheck_NoSuchBlock_WithMatchingPrefix(c *C) {
setupKeepBlockCheck(c, false, "")
allLocators := setupTestData(c)
allLocators = append(allLocators, TestHash)
allLocators = append(allLocators, TestHash2)
locatorFile := setupBlockHashFile(c, "block-hash", allLocators)
defer os.Remove(locatorFile)
locators, err := getBlockLocators(locatorFile, "aaa")
c.Check(err, IsNil)
err = performKeepBlockCheck(kc, blobSignatureTTL, "", locators, true)
c.Check(err, NotNil)
// Of the 7 blocks in allLocators, only two match the prefix and hence only those are checked
c.Assert(err.Error(), Equals, "Block verification failed for 2 out of 2 blocks with matching prefix.")
checkErrorLog(c, []string{TestHash, TestHash2}, "Error verifying block", "Block not found")
}
func (s *ServerRequiredSuite) TestBlockCheck_NoSuchBlock_WithPrefixMismatch(c *C) {
setupKeepBlockCheck(c, false, "")
allLocators := setupTestData(c)
allLocators = append(allLocators, TestHash)
allLocators = append(allLocators, TestHash2)
locatorFile := setupBlockHashFile(c, "block-hash", allLocators)
defer os.Remove(locatorFile)
locators, err := getBlockLocators(locatorFile, "999")
c.Check(err, IsNil)
err = performKeepBlockCheck(kc, blobSignatureTTL, "", locators, true)
c.Check(err, IsNil) // there were no matching locators in file and hence nothing was checked
}
func (s *ServerRequiredSuite) TestBlockCheck_BadSignature(c *C) {
setupKeepBlockCheck(c, true, "")
setupTestData(c)
err := performKeepBlockCheck(kc, blobSignatureTTL, "badblobsigningkey", []string{TestHash, TestHash2}, false)
c.Assert(err.Error(), Equals, "Block verification failed for 2 out of 2 blocks with matching prefix.")
checkErrorLog(c, []string{TestHash, TestHash2}, "Error verifying block", "HTTP 403")
// verbose logging not requested
c.Assert(strings.Contains(logBuffer.String(), "Verifying block 1 of 2"), Equals, false)
}
var testKeepServicesJSON = `{
"kind":"arvados#keepServiceList",
"etag":"",
"self_link":"",
"offset":null, "limit":null,
"items":[
{"href":"/keep_services/zzzzz-bi6l4-123456789012340",
"kind":"arvados#keepService",
"uuid":"zzzzz-bi6l4-123456789012340",
"service_host":"keep0.zzzzz.arvadosapi.com",
"service_port":25107,
"service_ssl_flag":false,
"service_type":"disk",
"read_only":false },
{"href":"/keep_services/zzzzz-bi6l4-123456789012341",
"kind":"arvados#keepService",
"uuid":"zzzzz-bi6l4-123456789012341",
"service_host":"keep0.zzzzz.arvadosapi.com",
"service_port":25108,
"service_ssl_flag":false,
"service_type":"disk",
"read_only":false }
],
"items_available":2 }`
// Setup block-check using keepServicesJSON with fake keepservers.
// Expect error during performKeepBlockCheck due to unreachable keepservers.
func (s *ServerRequiredSuite) TestErrorDuringKeepBlockCheck_FakeKeepservers(c *C) {
setupKeepBlockCheck(c, false, testKeepServicesJSON)
err := performKeepBlockCheck(kc, blobSignatureTTL, "", []string{TestHash, TestHash2}, true)
c.Assert(err.Error(), Equals, "Block verification failed for 2 out of 2 blocks with matching prefix.")
checkErrorLog(c, []string{TestHash, TestHash2}, "Error verifying block", "")
}
// Test keep-block-check initialization with keepServicesJSON
func (s *ServerRequiredSuite) TestKeepBlockCheck_InitializeWithKeepServicesJSON(c *C) {
setupKeepBlockCheck(c, false, testKeepServicesJSON)
found := 0
for k := range kc.LocalRoots() {
if k == "zzzzz-bi6l4-123456789012340" || k == "zzzzz-bi6l4-123456789012341" {
found++
}
}
c.Check(found, Equals, 2)
}
// Test loadConfig func
func (s *ServerRequiredSuite) TestLoadConfig(c *C) {
// Setup config file
configFile := setupConfigFile(c, "config")
defer os.Remove(configFile)
// load configuration from the file
config, blobSigningKey, err := loadConfig(configFile)
c.Check(err, IsNil)
c.Assert(config.APIHost, Equals, os.Getenv("ARVADOS_API_HOST"))
c.Assert(config.APIToken, Equals, arvadostest.DataManagerToken)
c.Assert(config.APIHostInsecure, Equals, arvadosclient.StringBool(os.Getenv("ARVADOS_API_HOST_INSECURE")))
c.Assert(config.ExternalClient, Equals, false)
c.Assert(blobSigningKey, Equals, "abcdefg")
}
func (s *DoMainTestSuite) Test_doMain_WithNoConfig(c *C) {
args := []string{"-prefix", "a"}
err := doMain(args)
c.Check(err, NotNil)
c.Assert(strings.Contains(err.Error(), "config file not specified"), Equals, true)
}
func (s *DoMainTestSuite) Test_doMain_WithNoSuchConfigFile(c *C) {
args := []string{"-config", "no-such-file"}
err := doMain(args)
c.Check(err, NotNil)
c.Assert(strings.Contains(err.Error(), "no such file or directory"), Equals, true)
}
func (s *DoMainTestSuite) Test_doMain_WithNoBlockHashFile(c *C) {
config := setupConfigFile(c, "config")
defer os.Remove(config)
// Start keepservers.
arvadostest.StartKeep(2, false)
defer arvadostest.StopKeep(2)
args := []string{"-config", config}
err := doMain(args)
c.Assert(strings.Contains(err.Error(), "block-hash-file not specified"), Equals, true)
}
func (s *DoMainTestSuite) Test_doMain_WithNoSuchBlockHashFile(c *C) {
config := setupConfigFile(c, "config")
defer os.Remove(config)
arvadostest.StartKeep(2, false)
defer arvadostest.StopKeep(2)
args := []string{"-config", config, "-block-hash-file", "no-such-file"}
err := doMain(args)
c.Assert(strings.Contains(err.Error(), "no such file or directory"), Equals, true)
}
func (s *DoMainTestSuite) Test_doMain(c *C) {
// Start keepservers.
arvadostest.StartKeep(2, false)
defer arvadostest.StopKeep(2)
config := setupConfigFile(c, "config")
defer os.Remove(config)
locatorFile := setupBlockHashFile(c, "block-hash", []string{TestHash, TestHash2})
defer os.Remove(locatorFile)
args := []string{"-config", config, "-block-hash-file", locatorFile, "-v"}
err := doMain(args)
c.Check(err, NotNil)
c.Assert(err.Error(), Equals, "Block verification failed for 2 out of 2 blocks with matching prefix.")
checkErrorLog(c, []string{TestHash, TestHash2}, "Error verifying block", "Block not found")
c.Assert(strings.Contains(logBuffer.String(), "Verifying block 1 of 2"), Equals, true)
}
| [
"\"ARVADOS_API_HOST\"",
"\"ARVADOS_API_HOST_INSECURE\"",
"\"ARVADOS_API_HOST\"",
"\"ARVADOS_API_HOST_INSECURE\"",
"\"ARVADOS_API_HOST\"",
"\"ARVADOS_API_HOST_INSECURE\""
]
| []
| [
"ARVADOS_API_HOST",
"ARVADOS_API_HOST_INSECURE"
]
| [] | ["ARVADOS_API_HOST", "ARVADOS_API_HOST_INSECURE"] | go | 2 | 0 | |
samples/java/computer-vision/recognize-text-ocr/src/main/java/RecognizeTextOCR.java | import com.microsoft.azure.cognitiveservices.vision.computervision.*;
import com.microsoft.azure.cognitiveservices.vision.computervision.models.*;
import java.io.File;
import java.nio.file.Files;
import java.util.ArrayList;
import java.util.List;
public class RecognizeTextOCR {
public static String subKey = System.getenv("AZURE_COMPUTERVISION_API_KEY");
public static String baseURL = System.getenv("AZURE_ENDPOINT");
public static void main(String[] args) {
try {
RecognizeTextOCRSample.RunSample(baseURL, subKey);
} catch (Exception e) {
System.out.println(e.getMessage());
e.printStackTrace();
}
}
private static class RecognizeTextOCRSample {
public static void RunSample(String url, String key) {
ComputerVisionClient compVisClient = ComputerVisionManager.authenticate(key).withEndpoint(url);
//System.out.println("compVisClient.endpoint(): " + compVisClient.endpoint());
String imgPath = "src\\main\\resources\\printed_text.jpg";
String remotePath = "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/ComputerVision/Images/handwritten_text.jpg";
System.out.println("\nRecognizing printed text with OCR on a local image ...");
RecognizeTextOCRLocal(compVisClient, imgPath);
System.out.println("\nRecognizing handwritten text with OCR on a remote image ...");
RecognizeTextOCRFromUrl(compVisClient, remotePath);
}
private static void RecognizeTextOCRLocal(ComputerVisionClient client, String path) {
try {
File rawImage = new File(path);
byte[] imageBytes = Files.readAllBytes(rawImage.toPath());
OcrResult ocrResult = client.computerVision().recognizePrintedTextInStream()
.withDetectOrientation(true)
.withImage(imageBytes)
.withLanguage(OcrLanguages.EN)
.execute();
DisplayResults(ocrResult);
} catch (Exception e) {
System.out.println(e.getMessage());
e.printStackTrace();
}
}
private static void RecognizeTextOCRFromUrl(ComputerVisionClient client, String path) {
try {
OcrResult ocrResult = client.computerVision().recognizePrintedText()
.withDetectOrientation(true)
.withUrl(path)
.withLanguage(OcrLanguages.EN)
.execute();
DisplayResults(ocrResult);
} catch (Exception e) {
System.out.println(e.getMessage());
e.printStackTrace();
}
}
private static void DisplayResults(OcrResult ocrResult) {
System.out.println("Text: ");
System.out.println("Language: " + ocrResult.language());
System.out.println("Text angle: " + ocrResult.textAngle());
System.out.println("Orientation: " + ocrResult.orientation());
System.out.println("Text regions: ");
for (OcrRegion reg : ocrResult.regions()) {
System.out.println("Region bounding box: " + reg.boundingBox());
for (OcrLine line : reg.lines()) {
System.out.println("Line bounding box: " + line.boundingBox());
for (OcrWord word : line.words()) {
System.out.println("Word bounding box: " + word.boundingBox());
System.out.println("Text: " + word.text() + " ");
}
System.out.println();
}
System.out.println();
}
}
}
}
| [
"\"AZURE_COMPUTERVISION_API_KEY\"",
"\"AZURE_ENDPOINT\""
]
| []
| [
"AZURE_ENDPOINT",
"AZURE_COMPUTERVISION_API_KEY"
]
| [] | ["AZURE_ENDPOINT", "AZURE_COMPUTERVISION_API_KEY"] | java | 2 | 0 | |
aodh/tests/functional/gabbi/test_gabbi_live.py | #
# Copyright 2015 Red Hat. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A test module to exercise the Gnocchi API with gabbi.
This is designed to run against a real running web server (started by
devstack).
"""
import os
from gabbi import driver
import six.moves.urllib.parse as urlparse
TESTS_DIR = 'gabbits-live'
def load_tests(loader, tests, pattern):
"""Provide a TestSuite to the discovery process."""
aodh_url = os.getenv('AODH_SERVICE_URL')
if aodh_url:
parsed_url = urlparse.urlsplit(aodh_url)
prefix = parsed_url.path.rstrip('/') # turn it into a prefix
# NOTE(chdent): gabbi requires a port be passed or it will
# default to 8001, so we must dance a little dance to get
# the right ports. Probably gabbi needs to change.
# https://github.com/cdent/gabbi/issues/50
port = 443 if parsed_url.scheme == 'https' else 80
if parsed_url.port:
port = parsed_url.port
test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR)
return driver.build_tests(test_dir, loader,
host=parsed_url.hostname,
port=port,
prefix=prefix)
elif os.getenv('GABBI_LIVE_FAIL_IF_NO_TEST'):
raise RuntimeError('AODH_SERVICE_URL is not set')
| []
| []
| [
"GABBI_LIVE_FAIL_IF_NO_TEST",
"AODH_SERVICE_URL"
]
| [] | ["GABBI_LIVE_FAIL_IF_NO_TEST", "AODH_SERVICE_URL"] | python | 2 | 0 | |
pkg/clicmd/root.go | // Copyright 2017 The kubecfg authors
//
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package clicmd
import (
"fmt"
"os"
"path/filepath"
"github.com/ksonnet/ksonnet/pkg/app"
"github.com/ksonnet/ksonnet/pkg/log"
"github.com/ksonnet/ksonnet/pkg/plugin"
"github.com/ksonnet/ksonnet/pkg/util/strings"
"github.com/pkg/errors"
"github.com/shomron/pflag"
"github.com/spf13/afero"
"github.com/spf13/cobra"
"github.com/spf13/viper"
// Register auth plugins
_ "k8s.io/client-go/plugin/pkg/client/auth"
)
const (
rootLong = `
You can use the ` + "`ks`" + ` commands to write, share, and deploy your Kubernetes
application configuration to remote clusters.
----
`
)
func runPlugin(fs afero.Fs, p plugin.Plugin, args []string) error {
env := []string{
fmt.Sprintf("KS_PLUGIN_DIR=%s", p.RootDir),
fmt.Sprintf("KS_PLUGIN_NAME=%s", p.Config.Name),
fmt.Sprintf("HOME=%s", os.Getenv("HOME")),
}
root, err := appRoot()
if err != nil {
return err
}
appConfig := filepath.Join(root, "app.yaml")
exists, err := afero.Exists(fs, appConfig)
if err != nil {
return err
}
if exists {
env = append(env, fmt.Sprintf("KS_APP_DIR=%s", root))
// TODO: make kube context or something similar available to the plugin
}
cmd := p.BuildRunCmd(env, args)
return cmd.Run()
}
// addEnvCmdFlags adds the flags that are common to the family of commands
// whose form is `[<env>|-f <file-name>]`, e.g., `apply` and `delete`.
func addEnvCmdFlags(cmd *cobra.Command) {
cmd.PersistentFlags().StringSliceP(flagComponent, shortComponent, nil, "Name of a specific component (multiple -c flags accepted, allows YAML, JSON, and Jsonnet)")
}
func appRoot() (string, error) {
return os.Getwd()
}
type earlyParseArgs struct {
command string
help bool
tlsSkipVerify bool
}
// parseCommand does an early parse of the command line and returns
// what will ultimately be recognized as the command by cobra
// and a boolean for calling help flags.
func parseCommand(args []string) (earlyParseArgs, error) {
var parsed earlyParseArgs
fset := pflag.NewFlagSet("", pflag.ContinueOnError)
fset.ParseErrorsWhitelist.UnknownFlags = true
fset.BoolVarP(&parsed.help, "help", "h", false, "") // Needed to avoid pflag.ErrHelp
fset.BoolVar(&parsed.tlsSkipVerify, flagTLSSkipVerify, false, "")
if err := fset.Parse(args); err != nil {
return earlyParseArgs{}, err
}
if len(fset.Args()) == 0 {
return earlyParseArgs{}, nil
}
parsed.command = fset.Args()[0]
return parsed, nil
}
// checkUpgrade runs upgrade validations unless the user is running an excluded command.
// If upgrades are found to be necessary, they will be reported to the user.
func checkUpgrade(a app.App, cmd string) error {
skip := map[string]struct{}{
"init": struct{}{},
"upgrade": struct{}{},
"help": struct{}{},
"version": struct{}{},
"": struct{}{},
}
if _, ok := skip[cmd]; ok {
return nil
}
if a == nil {
return errors.Errorf("nil receiver")
}
_, _ = a.CheckUpgrade() // NOTE we're surpressing any validation errors here
return nil
}
func NewRoot(appFs afero.Fs, wd string, args []string) (*cobra.Command, error) {
if appFs == nil {
appFs = afero.NewOsFs()
}
var a app.App
parsed, err := parseCommand(args)
if err != nil {
return nil, err
}
httpClient := app.NewHTTPClient(parsed.tlsSkipVerify)
cmds := []string{"init", "version", "help"}
switch {
// Commands that do not require a ksonnet application
case strings.InSlice(parsed.command, cmds), parsed.help:
a, err = app.Load(appFs, httpClient, wd, true)
case len(args) > 0:
a, err = app.Load(appFs, httpClient, wd, false)
default:
// noop
}
if err != nil {
return nil, err
}
if err := checkUpgrade(a, parsed.command); err != nil {
return nil, errors.Wrap(err, "checking if app needs upgrade")
}
rootCmd := &cobra.Command{
Use: "ks",
Short: `Configure your application to deploy to a Kubernetes cluster`,
Long: rootLong,
SilenceErrors: true,
SilenceUsage: true,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
flags := cmd.Flags()
verbosity, err := flags.GetCount(flagVerbose)
if err != nil {
return err
}
log.Init(verbosity, cmd.OutOrStderr())
return nil
},
Args: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
return cobra.NoArgs(cmd, args)
}
pluginName := args[0]
_, err := plugin.Find(appFs, pluginName)
if err != nil {
return cobra.NoArgs(cmd, args)
}
return nil
},
RunE: func(cmd *cobra.Command, args []string) error {
if len(args) == 0 {
return cmd.Help()
}
pluginName, args := args[0], args[1:]
p, err := plugin.Find(appFs, pluginName)
if err != nil {
return err
}
return runPlugin(appFs, p, args)
},
}
rootCmd.SetArgs(args)
rootCmd.PersistentFlags().CountP(flagVerbose, "v", "Increase verbosity. May be given multiple times.")
rootCmd.PersistentFlags().Set("logtostderr", "true")
rootCmd.PersistentFlags().Bool(flagTLSSkipVerify, false, "Skip verification of TLS server certificates")
viper.BindPFlag(flagTLSSkipVerify, rootCmd.PersistentFlags().Lookup(flagTLSSkipVerify))
rootCmd.AddCommand(newApplyCmd(a))
rootCmd.AddCommand(newComponentCmd(a))
rootCmd.AddCommand(newDeleteCmd(a))
rootCmd.AddCommand(newDiffCmd(a))
rootCmd.AddCommand(newEnvCmd(a))
rootCmd.AddCommand(newGenerateCmd(a))
rootCmd.AddCommand(newImportCmd(a))
rootCmd.AddCommand(newInitCmd(appFs, wd))
rootCmd.AddCommand(newModuleCmd(a))
rootCmd.AddCommand(newParamCmd(a))
rootCmd.AddCommand(newPkgCmd(a))
rootCmd.AddCommand(newPrototypeCmd(a))
rootCmd.AddCommand(newRegistryCmd(a))
rootCmd.AddCommand(newShowCmd(a))
rootCmd.AddCommand(newValidateCmd(a))
rootCmd.AddCommand(newUpgradeCmd(a))
rootCmd.AddCommand(newVersionCmd())
return rootCmd, nil
}
| [
"\"HOME\""
]
| []
| [
"HOME"
]
| [] | ["HOME"] | go | 1 | 0 | |
manage.py | #!/usr/bin/env python3
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Core.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
core/dbt/events/functions.py |
from colorama import Style
from datetime import datetime
import dbt.events.functions as this # don't worry I hate it too.
from dbt.events.base_types import Cli, Event, File, ShowException, NodeInfo, Cache
from dbt.events.types import EventBufferFull, T_Event, MainReportVersion, EmptyLine
import dbt.flags as flags
# TODO this will need to move eventually
from dbt.logger import SECRET_ENV_PREFIX, make_log_dir_if_missing, GLOBAL_LOGGER
import json
import io
from io import StringIO, TextIOWrapper
import logbook
import logging
from logging import Logger
import sys
from logging.handlers import RotatingFileHandler
import os
import uuid
import threading
from typing import Any, Callable, Dict, List, Optional, Union
import dataclasses
from collections import deque
# create the global event history buffer with the default max size (10k)
# python 3.7 doesn't support type hints on globals, but mypy requires them. hence the ignore.
# TODO the flags module has not yet been resolved when this is created
global EVENT_HISTORY
EVENT_HISTORY = deque(maxlen=flags.EVENT_BUFFER_SIZE) # type: ignore
# create the global file logger with no configuration
global FILE_LOG
FILE_LOG = logging.getLogger('default_file')
null_handler = logging.NullHandler()
FILE_LOG.addHandler(null_handler)
# set up logger to go to stdout with defaults
# setup_event_logger will be called once args have been parsed
global STDOUT_LOG
STDOUT_LOG = logging.getLogger('default_stdout')
STDOUT_LOG.setLevel(logging.INFO)
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(logging.INFO)
STDOUT_LOG.addHandler(stdout_handler)
format_color = True
format_json = False
invocation_id: Optional[str] = None
def setup_event_logger(log_path, level_override=None):
# flags have been resolved, and log_path is known
global EVENT_HISTORY
EVENT_HISTORY = deque(maxlen=flags.EVENT_BUFFER_SIZE) # type: ignore
make_log_dir_if_missing(log_path)
this.format_json = flags.LOG_FORMAT == 'json'
# USE_COLORS can be None if the app just started and the cli flags
# havent been applied yet
this.format_color = True if flags.USE_COLORS else False
# TODO this default should live somewhere better
log_dest = os.path.join(log_path, 'dbt.log')
level = level_override or (logging.DEBUG if flags.DEBUG else logging.INFO)
# overwrite the STDOUT_LOG logger with the configured one
this.STDOUT_LOG = logging.getLogger('configured_std_out')
this.STDOUT_LOG.setLevel(level)
FORMAT = "%(message)s"
stdout_passthrough_formatter = logging.Formatter(fmt=FORMAT)
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setFormatter(stdout_passthrough_formatter)
stdout_handler.setLevel(level)
# clear existing stdout TextIOWrapper stream handlers
this.STDOUT_LOG.handlers = [
h for h in this.STDOUT_LOG.handlers
if not (hasattr(h, 'stream') and isinstance(h.stream, TextIOWrapper)) # type: ignore
]
this.STDOUT_LOG.addHandler(stdout_handler)
# overwrite the FILE_LOG logger with the configured one
this.FILE_LOG = logging.getLogger('configured_file')
this.FILE_LOG.setLevel(logging.DEBUG) # always debug regardless of user input
file_passthrough_formatter = logging.Formatter(fmt=FORMAT)
file_handler = RotatingFileHandler(
filename=log_dest,
encoding='utf8',
maxBytes=10 * 1024 * 1024, # 10 mb
backupCount=5
)
file_handler.setFormatter(file_passthrough_formatter)
file_handler.setLevel(logging.DEBUG) # always debug regardless of user input
this.FILE_LOG.handlers.clear()
this.FILE_LOG.addHandler(file_handler)
# used for integration tests
def capture_stdout_logs() -> StringIO:
capture_buf = io.StringIO()
stdout_capture_handler = logging.StreamHandler(capture_buf)
stdout_handler.setLevel(logging.DEBUG)
this.STDOUT_LOG.addHandler(stdout_capture_handler)
return capture_buf
# used for integration tests
def stop_capture_stdout_logs() -> None:
this.STDOUT_LOG.handlers = [
h for h in this.STDOUT_LOG.handlers
if not (hasattr(h, 'stream') and isinstance(h.stream, StringIO)) # type: ignore
]
def env_secrets() -> List[str]:
return [
v for k, v in os.environ.items()
if k.startswith(SECRET_ENV_PREFIX)
]
def scrub_secrets(msg: str, secrets: List[str]) -> str:
scrubbed = msg
for secret in secrets:
scrubbed = scrubbed.replace(secret, "*****")
return scrubbed
# returns a dictionary representation of the event fields. You must specify which of the
# available messages you would like to use (i.e. - e.message, e.cli_msg(), e.file_msg())
# used for constructing json formatted events. includes secrets which must be scrubbed at
# the usage site.
def event_to_serializable_dict(
e: T_Event, ts_fn: Callable[[datetime], str],
msg_fn: Callable[[T_Event], str]
) -> Dict[str, Any]:
data = dict()
node_info = dict()
log_line = dict()
try:
log_line = dataclasses.asdict(e, dict_factory=type(e).asdict)
except AttributeError:
event_type = type(e).__name__
raise Exception( # TODO this may hang async threads
f"type {event_type} is not serializable to json."
f" First make sure that the call sites for {event_type} match the type hints"
f" and if they do, you can override the dataclass method `asdict` in {event_type} in"
" types.py to define your own serialization function to a dictionary of valid json"
" types"
)
if isinstance(e, NodeInfo):
node_info = dataclasses.asdict(e.get_node_info())
for field, value in log_line.items(): # type: ignore[attr-defined]
if field not in ["code", "report_node_data"]:
data[field] = value
event_dict = {
'type': 'log_line',
'log_version': e.log_version,
'ts': ts_fn(e.get_ts()),
'pid': e.get_pid(),
'msg': msg_fn(e),
'level': e.level_tag(),
'data': data,
'invocation_id': e.get_invocation_id(),
'thread_name': e.get_thread_name(),
'node_info': node_info,
'code': e.code
}
return event_dict
# translates an Event to a completely formatted text-based log line
# you have to specify which message you want. (i.e. - e.message, e.cli_msg(), e.file_msg())
# type hinting everything as strings so we don't get any unintentional string conversions via str()
def create_info_text_log_line(e: T_Event, msg_fn: Callable[[T_Event], str]) -> str:
color_tag: str = '' if this.format_color else Style.RESET_ALL
ts: str = e.get_ts().strftime("%H:%M:%S")
scrubbed_msg: str = scrub_secrets(msg_fn(e), env_secrets())
log_line: str = f"{color_tag}{ts} {scrubbed_msg}"
return log_line
def create_debug_text_log_line(e: T_Event, msg_fn: Callable[[T_Event], str]) -> str:
log_line: str = ''
# Create a separator if this is the beginning of an invocation
if type(e) == MainReportVersion:
separator = 30 * '='
log_line = f'\n\n{separator} {e.get_ts()} | {get_invocation_id()} {separator}\n'
color_tag: str = '' if this.format_color else Style.RESET_ALL
ts: str = e.get_ts().strftime("%H:%M:%S.%f")
scrubbed_msg: str = scrub_secrets(msg_fn(e), env_secrets())
level: str = e.level_tag() if len(e.level_tag()) == 5 else f"{e.level_tag()} "
thread = ''
if threading.current_thread().getName():
thread_name = threading.current_thread().getName()
thread_name = thread_name[:10]
thread_name = thread_name.ljust(10, ' ')
thread = f' [{thread_name}]:'
log_line = log_line + f"{color_tag}{ts} [{level}]{thread} {scrubbed_msg}"
return log_line
# translates an Event to a completely formatted json log line
# you have to specify which message you want. (i.e. - e.message(), e.cli_msg(), e.file_msg())
def create_json_log_line(e: T_Event, msg_fn: Callable[[T_Event], str]) -> Optional[str]:
if type(e) == EmptyLine:
return None # will not be sent to logger
# using preformatted string instead of formatting it here to be extra careful about timezone
values = event_to_serializable_dict(e, lambda _: e.get_ts_rfc3339(), lambda x: msg_fn(x))
raw_log_line = json.dumps(values, sort_keys=True)
return scrub_secrets(raw_log_line, env_secrets())
# calls create_stdout_text_log_line() or create_json_log_line() according to logger config
def create_log_line(
e: T_Event,
msg_fn: Callable[[T_Event], str],
file_output=False
) -> Optional[str]:
if this.format_json:
return create_json_log_line(e, msg_fn) # json output, both console and file
elif file_output is True or flags.DEBUG:
return create_debug_text_log_line(e, msg_fn) # default file output
else:
return create_info_text_log_line(e, msg_fn) # console output
# allows for resuse of this obnoxious if else tree.
# do not use for exceptions, it doesn't pass along exc_info, stack_info, or extra
def send_to_logger(l: Union[Logger, logbook.Logger], level_tag: str, log_line: str):
if not log_line:
return
if level_tag == 'test':
# TODO after implmenting #3977 send to new test level
l.debug(log_line)
elif level_tag == 'debug':
l.debug(log_line)
elif level_tag == 'info':
l.info(log_line)
elif level_tag == 'warn':
l.warning(log_line)
elif level_tag == 'error':
l.error(log_line)
else:
raise AssertionError(
f"While attempting to log {log_line}, encountered the unhandled level: {level_tag}"
)
def send_exc_to_logger(
l: Logger,
level_tag: str,
log_line: str,
exc_info=True,
stack_info=False,
extra=False
):
if level_tag == 'test':
# TODO after implmenting #3977 send to new test level
l.debug(
log_line,
exc_info=exc_info,
stack_info=stack_info,
extra=extra
)
elif level_tag == 'debug':
l.debug(
log_line,
exc_info=exc_info,
stack_info=stack_info,
extra=extra
)
elif level_tag == 'info':
l.info(
log_line,
exc_info=exc_info,
stack_info=stack_info,
extra=extra
)
elif level_tag == 'warn':
l.warning(
log_line,
exc_info=exc_info,
stack_info=stack_info,
extra=extra
)
elif level_tag == 'error':
l.error(
log_line,
exc_info=exc_info,
stack_info=stack_info,
extra=extra
)
else:
raise AssertionError(
f"While attempting to log {log_line}, encountered the unhandled level: {level_tag}"
)
# top-level method for accessing the new eventing system
# this is where all the side effects happen branched by event type
# (i.e. - mutating the event history, printing to stdout, logging
# to files, etc.)
def fire_event(e: Event) -> None:
# skip logs when `--log-cache-events` is not passed
if isinstance(e, Cache) and not flags.LOG_CACHE_EVENTS:
return
# if and only if the event history deque will be completely filled by this event
# fire warning that old events are now being dropped
global EVENT_HISTORY
if len(EVENT_HISTORY) == (flags.EVENT_BUFFER_SIZE - 1):
EVENT_HISTORY.append(e)
fire_event(EventBufferFull())
else:
EVENT_HISTORY.append(e)
# backwards compatibility for plugins that require old logger (dbt-rpc)
if flags.ENABLE_LEGACY_LOGGER:
# using Event::message because the legacy logger didn't differentiate messages by
# destination
log_line = create_log_line(e, msg_fn=lambda x: x.message())
if log_line:
send_to_logger(GLOBAL_LOGGER, e.level_tag(), log_line)
return # exit the function to avoid using the current logger as well
# always logs debug level regardless of user input
if isinstance(e, File):
log_line = create_log_line(e, msg_fn=lambda x: x.file_msg(), file_output=True)
# doesn't send exceptions to exception logger
if log_line:
send_to_logger(FILE_LOG, level_tag=e.level_tag(), log_line=log_line)
if isinstance(e, Cli):
# explicitly checking the debug flag here so that potentially expensive-to-construct
# log messages are not constructed if debug messages are never shown.
if e.level_tag() == 'debug' and not flags.DEBUG:
return # eat the message in case it was one of the expensive ones
log_line = create_log_line(e, msg_fn=lambda x: x.cli_msg())
if log_line:
if not isinstance(e, ShowException):
send_to_logger(STDOUT_LOG, level_tag=e.level_tag(), log_line=log_line)
# CliEventABC and ShowException
else:
send_exc_to_logger(
STDOUT_LOG,
level_tag=e.level_tag(),
log_line=log_line,
exc_info=e.exc_info,
stack_info=e.stack_info,
extra=e.extra
)
def get_invocation_id() -> str:
global invocation_id
if invocation_id is None:
invocation_id = str(uuid.uuid4())
return invocation_id
def set_invocation_id() -> None:
# This is primarily for setting the invocation_id for separate
# commands in the dbt servers. It shouldn't be necessary for the CLI.
global invocation_id
invocation_id = str(uuid.uuid4())
| []
| []
| []
| [] | [] | python | 0 | 0 | |
tfx/components/example_gen/base_example_gen_executor_test.py | # Lint as: python2, python3
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.components.example_gen.base_example_gen_executor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
import apache_beam as beam
import tensorflow as tf
from google.protobuf import json_format
from tfx.components.example_gen import base_example_gen_executor
from tfx.proto import example_gen_pb2
from tfx.types import artifact_utils
from tfx.types import standard_artifacts
@beam.ptransform_fn
def _TestInputSourceToExamplePTransform(
pipeline,
input_dict, # pylint: disable=unused-argument
exec_properties, # pylint: disable=unused-argument
split_pattern):
mock_examples = []
size = 0
if split_pattern == 'single/*':
size = 30000
elif split_pattern == 'train/*':
size = 20000
elif split_pattern == 'eval/*':
size = 10000
assert size != 0
for i in range(size):
feature = {}
feature['i'] = tf.train.Feature() if random.randrange(
10) == 0 else tf.train.Feature(
int64_list=tf.train.Int64List(value=[i]))
feature['f'] = tf.train.Feature() if random.randrange(
10) == 0 else tf.train.Feature(
float_list=tf.train.FloatList(value=[float(i)]))
feature['s'] = tf.train.Feature() if random.randrange(
10) == 0 else tf.train.Feature(
bytes_list=tf.train.BytesList(value=[tf.compat.as_bytes(str(i))]))
example_proto = tf.train.Example(
features=tf.train.Features(feature=feature))
mock_examples.append(example_proto)
return pipeline | beam.Create(mock_examples)
class TestExampleGenExecutor(base_example_gen_executor.BaseExampleGenExecutor):
def GetInputSourceToExamplePTransform(self):
return _TestInputSourceToExamplePTransform
class BaseExampleGenExecutorTest(tf.test.TestCase):
def setUp(self):
super(BaseExampleGenExecutorTest, self).setUp()
output_data_dir = os.path.join(
os.environ.get('TEST_UNDECLARED_OUTPUTS_DIR', self.get_temp_dir()),
self._testMethodName)
# Create output dict.
examples = standard_artifacts.Examples()
examples.uri = output_data_dir
examples.split_names = artifact_utils.encode_split_names(['train', 'eval'])
self._output_dict = {'examples': [examples]}
self._train_output_file = os.path.join(examples.uri, 'train',
'data_tfrecord-00000-of-00001.gz')
self._eval_output_file = os.path.join(examples.uri, 'eval',
'data_tfrecord-00000-of-00001.gz')
def testDoInputSplit(self):
# Create exec proterties.
exec_properties = {
'input_config':
json_format.MessageToJson(
example_gen_pb2.Input(splits=[
example_gen_pb2.Input.Split(
name='train', pattern='train/*'),
example_gen_pb2.Input.Split(name='eval', pattern='eval/*')
]),
preserving_proto_field_name=True),
'output_config':
json_format.MessageToJson(
example_gen_pb2.Output(), preserving_proto_field_name=True)
}
# Run executor.
example_gen = TestExampleGenExecutor()
example_gen.Do({}, self._output_dict, exec_properties)
# Check example gen outputs.
self.assertTrue(tf.io.gfile.exists(self._train_output_file))
self.assertTrue(tf.io.gfile.exists(self._eval_output_file))
# Input train split is bigger than eval split.
self.assertGreater(
tf.io.gfile.GFile(self._train_output_file).size(),
tf.io.gfile.GFile(self._eval_output_file).size())
def testDoOutputSplit(self):
# Create exec proterties.
exec_properties = {
'input_config':
json_format.MessageToJson(
example_gen_pb2.Input(splits=[
example_gen_pb2.Input.Split(
name='single', pattern='single/*'),
]),
preserving_proto_field_name=True),
'output_config':
json_format.MessageToJson(
example_gen_pb2.Output(
split_config=example_gen_pb2.SplitConfig(splits=[
example_gen_pb2.SplitConfig.Split(
name='train', hash_buckets=2),
example_gen_pb2.SplitConfig.Split(
name='eval', hash_buckets=1)
])))
}
# Run executor.
example_gen = TestExampleGenExecutor()
example_gen.Do({}, self._output_dict, exec_properties)
# Check example gen outputs.
self.assertTrue(tf.io.gfile.exists(self._train_output_file))
self.assertTrue(tf.io.gfile.exists(self._eval_output_file))
# Output split ratio: train:eval=2:1.
self.assertGreater(
tf.io.gfile.GFile(self._train_output_file).size(),
tf.io.gfile.GFile(self._eval_output_file).size())
if __name__ == '__main__':
tf.test.main()
| []
| []
| [
"TEST_UNDECLARED_OUTPUTS_DIR"
]
| [] | ["TEST_UNDECLARED_OUTPUTS_DIR"] | python | 1 | 0 | |
src/APIs/youtube.py | import os
import httplib2
import google_auth_oauthlib.flow
import googleapiclient.discovery
from typing import Any
from discord import Colour,Embed,utils
class YouTubeAPI(object):
youtube: Any
id_all_videos: list = []
def __init__(self,bot):
self.bot = bot
self.__client_secrets_file = self.bot.config["YOUTUBE"]["client_secrets_file"]
self.__api_service_name = self.bot.config["YOUTUBE"]["api_service_name"]
self.__api_version = self.bot.config["YOUTUBE"]["api_version"]
self.__scopes = [self.bot.config["YOUTUBE"]["scopes"]]
self.__channel_id = self.bot.config["YOUTUBE"]["channel_id"]
def append_videos_ids(self):
r = self.requests(max_r=9999999)
for item in r["items"]:
self.id_all_videos.append(item["contentDetails"]["upload"]["videoId"])
def create(self):
os.environ["OAUTHLIB_INSECURE_TRANSPORT"] = "1"
flow = google_auth_oauthlib.flow.InstalledAppFlow.from_client_secrets_file(self.__client_secrets_file,self.__scopes)
credentials = flow.run_console()
self.youtube = googleapiclient.discovery.build(self.__api_service_name,self.__api_version,credentials=credentials)
def requests(self,max_r=1,p="snippet,contentDetails,id"):
""" request_youtube( ) -> Send a request to youtube for will get a new videos. """
request = self.youtube.activities().list(part=p,channelId=self.__channel_id,maxResults=max_r)
try:
response = request.execute()
except httplib2.error.ServerNotFoundError:
pass
else:
return response
async def video_notification_system(self):
# Notification YouTube system
response = self.requests()
for _id in self.id_all_videos:
if _id == response["items"][0]["contentDetails"]["upload"]["videoId"]:
break
await self.send_notification(response)
self.id_all_videos.append(response["items"][0]["contentDetails"]["upload"]["videoId"])
async def send_notification(self,r):
for guild in self.bot.guilds:
try:
self.bot.guilds_data[str(guild.id)]["channels_ID"]["video_notif"]
except KeyError:
pass
else:
videos_role = utils.get(guild.roles,id=self.bot.guilds_data[str(guild.id)]["roles"]["🎬"])
channel_videos = self.bot.get_channel(self.bot.guilds_data[str(guild.id)]["channels_ID"]["video_notif"])
self.last_video = r["items"][0]["snippet"]["title"]
video_message = Embed(title="> Un nouveau `🔴 Live`",colour=Colour.from_rgb(255,0,0))
video_message.add_field(name=f"{r['items'][0]['snippet']['channelTitle']} vien de sortir une vidéo !",value=f"{videos_role.mention}\nhttps://youtu.be/{r['items'][0]['contentDetails']['upload']['videoId']}")
video_message.set_author(name=guild.owner.name,icon_url=guild.owner.avatar_url)
await channel_videos.send(embed=video_message)
| []
| []
| [
"OAUTHLIB_INSECURE_TRANSPORT"
]
| [] | ["OAUTHLIB_INSECURE_TRANSPORT"] | python | 1 | 0 | |
test/function_tests/utils/utils.go | // Copyright (c) 2020 Red Hat, Inc.
// Copyright Contributors to the Open Cluster Management project
package utils
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"os/user"
"path"
"path/filepath"
"strconv"
"time"
"github.com/Masterminds/semver/v3"
"github.com/ghodss/yaml"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
var (
// KubeClient ...
KubeClient = NewKubeClient("", "", "")
// DynamicKubeClient ...
DynamicKubeClient = NewKubeClientDynamic("", "", "")
// ImageOverridesCMBadImageName ...
ImageOverridesCMBadImageName = "bad-image-ref"
// GVRCustomResourceDefinition ...
GVRCustomResourceDefinition = schema.GroupVersionResource{
Group: "apiextensions.k8s.io",
Version: "v1",
Resource: "customresourcedefinitions",
}
// GVRClusterManager ...
GVRClusterManager = schema.GroupVersionResource{
Group: "operator.open-cluster-management.io",
Version: "v1",
Resource: "clustermanagers",
}
// GVRObservability ...
GVRObservability = schema.GroupVersionResource{
Group: "observability.open-cluster-management.io",
Version: "v1beta2",
Resource: "multiclusterobservabilities",
}
// GVRMultiClusterEngine ...
GVRMultiClusterEngine = schema.GroupVersionResource{
Group: "multicluster.openshift.io",
Version: "v1alpha1",
Resource: "multiclusterengines",
}
// GVRMultiClusterHub ...
GVRMultiClusterHub = schema.GroupVersionResource{
Group: "operator.open-cluster-management.io",
Version: "v1",
Resource: "multiclusterhubs",
}
// GVRAppSub ...
GVRAppSub = schema.GroupVersionResource{
Group: "apps.open-cluster-management.io",
Version: "v1",
Resource: "subscriptions",
}
// GVRHiveConfig ...
GVRHiveConfig = schema.GroupVersionResource{
Group: "hive.openshift.io",
Version: "v1",
Resource: "hiveconfigs",
}
// GVRSub ...
GVRSub = schema.GroupVersionResource{
Group: "operators.coreos.com",
Version: "v1alpha1",
Resource: "subscriptions",
}
// GVROperatorGroup ...
GVROperatorGroup = schema.GroupVersionResource{
Group: "operators.coreos.com",
Version: "v1",
Resource: "operatorgroups",
}
// GVRCSV ...
GVRCSV = schema.GroupVersionResource{
Group: "operators.coreos.com",
Version: "v1alpha1",
Resource: "clusterserviceversions",
}
// GVRHelmRelease ...
GVRHelmRelease = schema.GroupVersionResource{
Group: "apps.open-cluster-management.io",
Version: "v1",
Resource: "helmreleases",
}
// GVRInstallPlan ...
GVRInstallPlan = schema.GroupVersionResource{
Group: "operators.coreos.com",
Version: "v1alpha1",
Resource: "installplans",
}
// GVRDeployment ...
GVRDeployment = schema.GroupVersionResource{
Group: "apps",
Version: "v1",
Resource: "deployments",
}
// GVRManagedCluster
GVRManagedCluster = schema.GroupVersionResource{
Group: "cluster.open-cluster-management.io",
Version: "v1",
Resource: "managedclusters",
}
// GVRKlusterletAddonConfig
GVRKlusterletAddonConfig = schema.GroupVersionResource{
Group: "agent.open-cluster-management.io",
Version: "v1",
Resource: "klusterletaddonconfigs",
}
// GVRBareMetalAsset
GVRBareMetalAsset = schema.GroupVersionResource{
Group: "inventory.open-cluster-management.io",
Version: "v1alpha1",
Resource: "baremetalassets",
}
// GVRDiscoveryConfig
GVRDiscoveryConfig = schema.GroupVersionResource{
Group: "discovery.open-cluster-management.io",
Version: "v1alpha1",
Resource: "discoveryconfigs",
}
// DefaultImageRegistry ...
DefaultImageRegistry = "quay.io/stolostron"
// DefaultImagePullSecretName ...
DefaultImagePullSecretName = "multiclusterhub-operator-pull-secret"
// MCHName ...
MCHName = "multiclusterhub"
// MCHNamespace ...
MCHNamespace = "open-cluster-management"
// MCHPullSecretName ...
MCHPullSecretName = os.Getenv("pullSecret")
// MCHRepoName ...
MCHRepoName = "multiclusterhub-repo"
// MCHOperatorName ...
MCHOperatorName = "multiclusterhub-operator"
// OCMSubscriptionName ...
OCMSubscriptionName = os.Getenv("name")
// HiveConfigName ...
HiveConfigName = "hive"
// AppSubName console-chart-sub where clusterset pause is set
AppSubName = "console-chart-sub"
// SubList contains the list of subscriptions to delete
SubList = [...]string{
OCMSubscriptionName,
"hive-operator-alpha-community-operators-openshift-marketplace",
"multicluster-operators-subscription-alpha-community-operators-openshift-marketplace",
}
// AppSubSlice ...
AppSubSlice = [...]string{"application-chart-sub", "assisted-service-sub",
"console-chart-sub", "policyreport-sub", "discovery-operator-sub",
"grc-sub", "management-ingress-sub",
"rcm-sub", "search-prod-sub"}
// CSVName ...
CSVName = "advanced-cluster-management"
// WaitInMinutesDefault ...
WaitInMinutesDefault = 22
// DisableHubSelfManagementString ...
DisableHubSelfManagementString = "disableHubSelfManagement"
)
// GetWaitInMinutes...
func GetWaitInMinutes() int {
waitInMinutesAsString := os.Getenv("waitInMinutes")
if waitInMinutesAsString == "" {
return WaitInMinutesDefault
}
waitInMinutesAsInt, err := strconv.Atoi(waitInMinutesAsString)
if err != nil {
return WaitInMinutesDefault
}
return waitInMinutesAsInt
}
func runCleanUpScript() bool {
runCleanUpScript, _ := strconv.ParseBool(os.Getenv("runCleanUpScript"))
return runCleanUpScript
}
// CreateNewUnstructured creates resources by using gvr & obj, will get object after create.
func CreateNewUnstructured(
clientHubDynamic dynamic.Interface,
gvr schema.GroupVersionResource,
obj *unstructured.Unstructured,
name, namespace string,
) {
ns := clientHubDynamic.Resource(gvr).Namespace(namespace)
Expect(ns.Create(context.TODO(), obj, metav1.CreateOptions{})).NotTo(BeNil())
Expect(ns.Get(context.TODO(), name, metav1.GetOptions{})).NotTo(BeNil())
}
// CreateNewConfigMap ...
func CreateNewConfigMap(cm *corev1.ConfigMap, namespace string) error {
_, err := KubeClient.CoreV1().ConfigMaps(namespace).Create(context.TODO(), cm, metav1.CreateOptions{})
return err
}
// DeleteConfigMapIfExists ...
func DeleteConfigMapIfExists(cmName, namespace string) error {
_, err := KubeClient.CoreV1().ConfigMaps(namespace).Get(context.TODO(), cmName, metav1.GetOptions{})
if err == nil {
return KubeClient.CoreV1().ConfigMaps(namespace).Delete(context.TODO(), cmName, metav1.DeleteOptions{})
}
return nil
}
// DeleteIfExists deletes resources by using gvr, name, and namespace.
// Will wait for deletion to complete by using eventually
func DeleteIfExists(clientHubDynamic dynamic.Interface, gvr schema.GroupVersionResource, name, namespace string, wait bool) {
ns := clientHubDynamic.Resource(gvr).Namespace(namespace)
if _, err := ns.Get(context.TODO(), name, metav1.GetOptions{}); err != nil {
Expect(errors.IsNotFound(err)).To(Equal(true))
return
}
Expect(func() error {
// possibly already got deleted
err := ns.Delete(context.TODO(), name, metav1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
return err
}
return nil
}()).To(BeNil())
By("Wait for deletion")
Eventually(func() error {
var err error
_, err = ns.Get(context.TODO(), name, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
return err
}
if err == nil {
if wait {
return fmt.Errorf("found object %s in namespace %s after deletion", name, namespace)
}
return nil
}
return nil
}, GetWaitInMinutes()*60, 1).Should(BeNil())
}
// NewKubeClient returns a kube client
func NewKubeClient(url, kubeconfig, context string) kubernetes.Interface {
klog.V(5).Infof("Create kubeclient for url %s using kubeconfig path %s\n", url, kubeconfig)
config, err := LoadConfig(url, kubeconfig, context)
if err != nil {
panic(err)
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
panic(err)
}
return clientset
}
// NewKubeClientDynamic returns a dynamic kube client
func NewKubeClientDynamic(url, kubeconfig, context string) dynamic.Interface {
klog.V(5).Infof(
"Create kubeclient dynamic for url %s using kubeconfig path %s\n",
url,
kubeconfig,
)
config, err := LoadConfig(url, kubeconfig, context)
if err != nil {
panic(err)
}
clientset, err := dynamic.NewForConfig(config)
if err != nil {
panic(err)
}
return clientset
}
// LoadConfig loads kubeconfig
func LoadConfig(url, kubeconfig, context string) (*rest.Config, error) {
if kubeconfig == "" {
kubeconfig = os.Getenv("KUBECONFIG")
}
klog.V(5).Infof("Kubeconfig path %s\n", kubeconfig)
// If we have an explicit indication of where the kubernetes config lives, read that.
if kubeconfig != "" {
if context == "" {
return clientcmd.BuildConfigFromFlags(url, kubeconfig)
}
return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeconfig},
&clientcmd.ConfigOverrides{
CurrentContext: context,
}).ClientConfig()
}
// If not, try the in-cluster config.
if c, err := rest.InClusterConfig(); err == nil {
return c, nil
}
// If no in-cluster config, try the default location in the user's home directory.
if usr, err := user.Current(); err == nil {
klog.V(5).Infof("clientcmd.BuildConfigFromFlags for url %s using %s\n", url, filepath.Join(usr.HomeDir, ".kube", "config"))
if c, err := clientcmd.BuildConfigFromFlags("", filepath.Join(usr.HomeDir, ".kube", "config")); err == nil {
return c, nil
}
}
return nil, fmt.Errorf("could not create a valid kubeconfig")
}
// IsOwner checks if obj is owned by owner, obj can either be unstructured or ObjectMeta
func IsOwner(owner *unstructured.Unstructured, obj interface{}) bool {
if obj == nil {
return false
}
var owners []metav1.OwnerReference
objMeta, ok := obj.(*metav1.ObjectMeta)
if ok {
owners = objMeta.GetOwnerReferences()
} else {
if objUnstructured, ok := obj.(*unstructured.Unstructured); ok {
owners = objUnstructured.GetOwnerReferences()
} else {
klog.Error("Failed to get owners")
return false
}
}
for _, ownerRef := range owners {
if _, ok := owner.Object["metadata"]; !ok {
klog.Error("no meta")
continue
}
meta, ok := owner.Object["metadata"].(map[string]interface{})
if !ok || meta == nil {
klog.Error("no meta map")
continue
}
name, ok := meta["name"].(string)
if !ok || name == "" {
klog.Error("failed to get name")
continue
}
if ownerRef.Kind == owner.Object["kind"] && ownerRef.Name == name {
return true
}
}
return false
}
// CreateMCHNotManaged ...
func CreateMCHNotManaged() *unstructured.Unstructured {
mch := NewMultiClusterHub(MCHName, MCHNamespace, "", true)
CreateNewUnstructured(DynamicKubeClient, GVRMultiClusterHub, mch, MCHName, MCHNamespace)
return mch
}
// CreateMCHImageOverridesAnnotation ...
func CreateMCHImageOverridesAnnotation(imageOverridesConfigmapName string) *unstructured.Unstructured {
mch := NewMultiClusterHub(MCHName, MCHNamespace, imageOverridesConfigmapName, true)
CreateNewUnstructured(DynamicKubeClient, GVRMultiClusterHub, mch, MCHName, MCHNamespace)
return mch
}
func CreateDefaultMCH() *unstructured.Unstructured {
mch := NewMultiClusterHub(MCHName, MCHNamespace, "", false)
CreateNewUnstructured(DynamicKubeClient, GVRMultiClusterHub, mch, MCHName, MCHNamespace)
return mch
}
// GetDeploymentLabels returns the labels on deployment d
func GetDeploymentLabels(d string) (map[string]string, error) {
deploy, err := KubeClient.AppsV1().Deployments(MCHNamespace).Get(context.TODO(), d, metav1.GetOptions{})
if err != nil {
return nil, err
}
return deploy.GetLabels(), nil
}
// BrickMCHRepo modifies the multiclusterhub-repo deployment so it becomes unhealthy
func BrickMCHRepo() error {
By("- Breaking mch repo")
deploy, err := KubeClient.AppsV1().Deployments(MCHNamespace).Get(context.TODO(), MCHRepoName, metav1.GetOptions{})
if err != nil {
return err
}
// Add non-existent nodeSelector so the pod isn't scheduled
deploy.Spec.Template.Spec.NodeSelector = map[string]string{"schedule": "never"}
_, err = KubeClient.AppsV1().Deployments(MCHNamespace).Update(context.TODO(), deploy, metav1.UpdateOptions{})
if err != nil {
return err
}
if err = waitForUnavailable(MCHRepoName, time.Duration(GetWaitInMinutes())*time.Minute); err != nil {
return err
}
return nil
}
// FixMCHRepo deletes the multiclusterhub-repo deployment so it can be recreated by the installer
func FixMCHRepo() error {
By("- Repairing mch-repo")
return KubeClient.AppsV1().Deployments(MCHNamespace).Delete(context.TODO(), MCHRepoName, metav1.DeleteOptions{})
}
// DeleteMCHRepo deletes the multiclusterhub-repo deployment
func DeleteMCHRepo() error {
return KubeClient.AppsV1().Deployments(MCHNamespace).Delete(context.TODO(), MCHRepoName, metav1.DeleteOptions{})
}
func PauseMCH() error {
mch, err := DynamicKubeClient.Resource(GVRMultiClusterHub).Namespace(MCHNamespace).Get(context.TODO(), MCHName, metav1.GetOptions{})
if err != nil {
return err
}
labels := mch.GetLabels()
if labels == nil {
labels = map[string]string{"mch-pause": "true"}
} else {
labels["mch-pause"] = "true"
}
mch.SetLabels(labels)
_, err = DynamicKubeClient.Resource(GVRMultiClusterHub).Namespace(MCHNamespace).Update(context.TODO(), mch, metav1.UpdateOptions{})
return err
}
func UnpauseMCH() error {
mch, err := DynamicKubeClient.Resource(GVRMultiClusterHub).Namespace(MCHNamespace).Get(context.TODO(), MCHName, metav1.GetOptions{})
if err != nil {
return err
}
labels := mch.GetLabels()
if labels == nil {
labels = map[string]string{"mch-pause": "false"}
} else {
labels["mch-pause"] = "false"
}
mch.SetLabels(labels)
_, err = DynamicKubeClient.Resource(GVRMultiClusterHub).Namespace(MCHNamespace).Update(context.TODO(), mch, metav1.UpdateOptions{})
return err
}
// BrickCLC modifies the multiclusterhub-repo deployment so it becomes unhealthy
func BrickCLC() (string, error) {
By("- Breaking cluster-lifecycle")
oldImage, err := UpdateDeploymentImage("cluster-lifecycle", "bad-image")
if err != nil {
return "", err
}
err = waitForUnavailable("cluster-lifecycle", time.Duration(GetWaitInMinutes())*time.Minute)
return oldImage, err
}
// FiCLC deletes the multiclusterhub-repo deployment so it can be recreated by the installer
func FixCLC(image string) error {
By("- Repairing cluster-lifecycle")
_, err := UpdateDeploymentImage("cluster-lifecycle", image)
if err != nil {
return err
}
err = waitForAvailable("cluster-lifecycle", time.Duration(GetWaitInMinutes())*time.Minute)
return err
}
// UpdateDeploymentImage updates the deployment image
func UpdateDeploymentImage(dName string, image string) (string, error) {
deploy, err := KubeClient.AppsV1().Deployments(MCHNamespace).Get(context.TODO(), dName, metav1.GetOptions{})
if err != nil {
return "", err
}
originalImage := deploy.Spec.Template.Spec.Containers[0].Image
deploy.Spec.Template.Spec.Containers[0].Image = image
_, err = KubeClient.AppsV1().Deployments(MCHNamespace).Update(context.TODO(), deploy, metav1.UpdateOptions{})
return originalImage, err
}
// waitForUnavailable waits for the deployment to go unready, with timeout
func waitForUnavailable(dName string, timeout time.Duration) error {
deadline := time.Now().Add(timeout)
for time.Now().Before(deadline) {
deploy, err := KubeClient.AppsV1().Deployments(MCHNamespace).Get(context.TODO(), dName, metav1.GetOptions{})
if err != nil {
return err
}
if deploy.Status.UnavailableReplicas > 0 {
time.Sleep(10 * time.Second)
return nil
}
time.Sleep(2 * time.Second)
}
return fmt.Errorf("Deploy failed to become unready after %s", timeout)
}
// waitForAvailable waits for the deployment to be available, with timeout
func waitForAvailable(dName string, timeout time.Duration) error {
deadline := time.Now().Add(timeout)
for time.Now().Before(deadline) {
deploy, err := KubeClient.AppsV1().Deployments(MCHNamespace).Get(context.TODO(), dName, metav1.GetOptions{})
if err != nil {
return err
}
if deploy.Status.UnavailableReplicas == 0 {
return nil
}
time.Sleep(2 * time.Second)
}
return fmt.Errorf("Repo failed to become unready after %s", timeout)
}
// GetMCHStatus gets the mch object and parses its status
func GetMCHStatus() (map[string]interface{}, error) {
mch, err := DynamicKubeClient.Resource(GVRMultiClusterHub).Namespace(MCHNamespace).Get(context.TODO(), MCHName, metav1.GetOptions{})
if err != nil {
return nil, err
}
status, ok := mch.Object["status"].(map[string]interface{})
if !ok || status == nil {
return nil, fmt.Errorf("MultiClusterHub: %s has no 'status' map", mch.GetName())
}
return status, nil
}
// IsMCHSelfManaged returns the opposite of `spec.disableHubSelfManagement`
func IsMCHSelfManaged() (bool, error) {
mch, err := DynamicKubeClient.Resource(GVRMultiClusterHub).Namespace(MCHNamespace).Get(context.TODO(), MCHName, metav1.GetOptions{})
if err != nil {
return true, err
}
spec, ok := mch.Object["spec"].(map[string]interface{})
if !ok || spec == nil {
return true, fmt.Errorf("MultiClusterHub: %s has no 'spec' map", mch.GetName())
}
disableHubSelfManagement, ok := spec[DisableHubSelfManagementString]
if !ok || disableHubSelfManagement == nil {
return true, nil // if spec not set, default to managed
}
selfManaged := !(disableHubSelfManagement.(bool))
return selfManaged, nil
}
// findPhase reports whether the hub status has the desired phase and returns an error if not
func findPhase(status map[string]interface{}, wantPhase string) error {
if _, ok := status["phase"]; !ok {
return fmt.Errorf("MCH status has no 'phase' field")
}
if phase := status["phase"]; phase != wantPhase {
return fmt.Errorf("MCH phase equals `%s`, expected `%s`", phase, wantPhase)
}
return nil
}
// ValidateMCHDegraded validates the install operator responds appropriately when the install components
// go into a degraded state after a successful install
func ValidateMCHDegraded() error {
status, err := GetMCHStatus()
if err != nil {
return err
}
// Ensuring MCH is in 'pending' phase
if err := findPhase(status, "Pending"); err != nil {
return err
}
// Ensuring hub condition shows installation as incomplete
if err := FindCondition(status, "Complete", "False"); err != nil {
return err
}
return nil
}
// ValidateDelete ...
func ValidateDelete(clientHubDynamic dynamic.Interface) error {
By("Validating MCH has been successfully uninstalled.")
labelSelector := fmt.Sprintf("installer.name=%s, installer.namespace=%s", MCHName, MCHNamespace)
listOptions := metav1.ListOptions{
LabelSelector: labelSelector,
Limit: 100,
}
appSubLink := clientHubDynamic.Resource(GVRAppSub)
appSubs, err := appSubLink.List(context.TODO(), listOptions)
if err != nil {
return err
}
helmReleaseLink := clientHubDynamic.Resource(GVRHelmRelease)
helmReleases, err := helmReleaseLink.List(context.TODO(), listOptions)
if err != nil {
return err
}
By("- Ensuring Application Subscriptions have terminated")
if len(appSubs.Items) != 0 {
return fmt.Errorf("%d appsubs left to be uninstalled", len(appSubs.Items))
}
By("- Ensuring HelmReleases have terminated")
if len(helmReleases.Items) != 0 {
By(fmt.Sprintf("%d helmreleases left to be uninstalled", len(helmReleases.Items)))
return fmt.Errorf("%d helmreleases left to be uninstalled", len(helmReleases.Items))
}
By("- Ensuring MCH Repo deployment has been terminated")
deploymentLink := clientHubDynamic.Resource(GVRDeployment).Namespace(MCHNamespace)
_, err = deploymentLink.Get(context.TODO(), "multiclusterhub-repo", metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
return err
}
By("- Ensuring MCH image manifest configmap is terminated")
labelSelector = fmt.Sprintf("ocm-configmap-type=%s", "image-manifest")
listOptions = metav1.ListOptions{
LabelSelector: labelSelector,
Limit: 100,
}
Eventually(func() error {
configmaps, err := KubeClient.CoreV1().ConfigMaps(MCHNamespace).List(context.TODO(), listOptions)
Expect(err).Should(BeNil())
if len(configmaps.Items) != 0 {
return fmt.Errorf("Expecting configmaps to terminate")
}
return nil
}, GetWaitInMinutes()*60, 1).Should(BeNil())
By("- Validating CRDs were deleted")
crds, err := getCRDs()
if err != nil {
return err
}
for _, crd := range crds {
_, err = DynamicKubeClient.Resource(GVRCustomResourceDefinition).Get(context.TODO(), crd, metav1.GetOptions{})
Expect(err).ToNot(BeNil())
}
By("- Validating ClusterManager was deleted")
clusterManagerLink := clientHubDynamic.Resource(GVRClusterManager)
_, err = clusterManagerLink.Get(context.TODO(), "cluster-manager", metav1.GetOptions{})
Expect(err).ShouldNot(BeNil())
By("- Validating HiveConfig was deleted")
hiveConfigLink := clientHubDynamic.Resource(GVRHiveConfig)
_, err = hiveConfigLink.Get(context.TODO(), HiveConfigName, metav1.GetOptions{})
Expect(err).ShouldNot(BeNil())
if runCleanUpScript() {
By("- Running documented clean up script")
workingDir, err := os.Getwd()
if err != nil {
log.Fatalf("failed to get working dir %v", err)
}
cleanupPath := path.Join(path.Dir(workingDir), "clean-up.sh")
err = os.Setenv("ACM_NAMESPACE", MCHNamespace)
if err != nil {
log.Fatal(err)
}
out, err := exec.Command("/bin/sh", cleanupPath).Output()
if err != nil {
log.Fatal(err)
}
err = os.Unsetenv("ACM_NAMESPACE")
if err != nil {
log.Fatal(err)
}
log.Println(fmt.Sprintf("Resources cleaned up by clean-up script:\n %s\n", bytes.NewBuffer(out).String()))
}
return nil
}
// FindCondition reports whether a hub condition of type 't' exists and matches the status 's'
func FindCondition(status map[string]interface{}, t string, s string) error {
conditions, ok := status["conditions"].([]interface{})
if !ok || conditions == nil {
return fmt.Errorf("no hubConditions found")
}
for i := range conditions {
condition := conditions[i]
if condition.(map[string]interface{})["type"].(string) == t {
if got := condition.(map[string]interface{})["status"].(string); got == s {
return nil
} else {
return fmt.Errorf("hubCondition `%s` status equals '%s', expected '%s'", t, got, s)
}
}
}
return fmt.Errorf("MCH does not have a hubcondition with type '%s'", t)
}
// ValidateMCHUnsuccessful ...
func ValidateMCHUnsuccessful() error {
By("Validating MultiClusterHub Unsuccessful")
By(fmt.Sprintf("- Waiting %d minutes", GetWaitInMinutes()), func() {
time.Sleep(time.Duration(GetWaitInMinutes()) * time.Minute)
})
By("- Ensuring MCH is in 'Installing' phase")
status, err := GetMCHStatus()
if err != nil {
return err
}
if err := findPhase(status, "Installing"); err != nil {
return err
}
When("MCH Condition 'type' should be `Progressing` and 'status' should be 'true", func() {
Eventually(func() error {
mch, err := DynamicKubeClient.Resource(GVRMultiClusterHub).Namespace(MCHNamespace).Get(context.TODO(), MCHName, metav1.GetOptions{})
Expect(err).To(BeNil())
status := mch.Object["status"].(map[string]interface{})
return FindCondition(status, "Progressing", "True")
}, 1, 1).Should(BeNil())
})
return nil
}
// ValidateMCH validates MCH CR is running successfully
func ValidateMCH() error {
By("Validating MultiClusterHub")
By(fmt.Sprintf("- Ensuring MCH is in 'running' phase within %d minutes", GetWaitInMinutes()))
When(fmt.Sprintf("Wait for MultiClusterHub to be in running phase (Will take up to %d minutes)", GetWaitInMinutes()), func() {
Eventually(func() error {
status, err := GetMCHStatus()
if err != nil {
return err
}
if err := findPhase(status, "Running"); err != nil {
return err
}
return nil
}, GetWaitInMinutes()*60, 1).Should(BeNil())
})
By("- Ensuring MCH Repo Is available")
var deploy *appsv1.Deployment
deploy, err := KubeClient.AppsV1().Deployments(MCHNamespace).Get(context.TODO(), MCHRepoName, metav1.GetOptions{})
Expect(err).Should(BeNil())
mch, err := DynamicKubeClient.Resource(GVRMultiClusterHub).Namespace(MCHNamespace).Get(context.TODO(), MCHName, metav1.GetOptions{})
Expect(err).To(BeNil())
Expect(deploy.Status.AvailableReplicas).ShouldNot(Equal(0))
Expect(IsOwner(mch, &deploy.ObjectMeta)).To(Equal(true))
By("- Validating CRDs were created successfully")
crds, err := getCRDs()
Expect(err).Should(BeNil())
for _, crd := range crds {
_, err = DynamicKubeClient.Resource(GVRCustomResourceDefinition).Get(context.TODO(), crd, metav1.GetOptions{})
Expect(err).To(BeNil())
}
By("- Ensuring components have status 'true' when MCH is in 'running' phase")
mch, err = DynamicKubeClient.Resource(GVRMultiClusterHub).Namespace(MCHNamespace).Get(context.TODO(), MCHName, metav1.GetOptions{})
Expect(err).To(BeNil())
status := mch.Object["status"].(map[string]interface{})
if findPhase(status, "Running") == nil {
components, ok := mch.Object["status"].(map[string]interface{})["components"]
if !ok || components == nil {
return fmt.Errorf("MultiClusterHub: %s has no 'Components' map despite reporting 'running'", mch.GetName())
}
for k, v := range components.(map[string]interface{}) {
compStatus := v.(map[string]interface{})["status"].(string)
if compStatus != "True" {
return fmt.Errorf("Component: %s does not have status of 'true'", k)
}
}
}
By("- Ensuring condition has status 'true' and type 'complete' when MCH is in 'running' phase")
When("Component statuses should be true", func() {
Eventually(func() error {
mch, err := DynamicKubeClient.Resource(GVRMultiClusterHub).Namespace(MCHNamespace).Get(context.TODO(), MCHName, metav1.GetOptions{})
Expect(err).To(BeNil())
status := mch.Object["status"].(map[string]interface{})
return FindCondition(status, "Complete", "True")
}, 1, 1).Should(BeNil())
})
By("- Checking Appsubs")
unstructuredAppSubs := listByGVR(DynamicKubeClient, GVRAppSub, MCHNamespace, 1, len(AppSubSlice))
for _, appsub := range unstructuredAppSubs.Items {
if _, ok := appsub.Object["status"]; !ok {
return fmt.Errorf("Appsub: %s has no 'status' field", appsub.GetName())
}
status, ok := appsub.Object["status"].(map[string]interface{})
if !ok || status == nil {
return fmt.Errorf("Appsub: %s has no 'status' map", appsub.GetName())
}
klog.V(5).Infof("Checking Appsub - %s", appsub.GetName())
Expect(status["message"]).To(Equal("Active"))
Expect(status["phase"]).To(Equal("Subscribed"))
}
By("- Checking HelmReleases")
unstructuredHelmReleases := listByGVR(DynamicKubeClient, GVRHelmRelease, MCHNamespace, 1, len(AppSubSlice))
for _, helmRelease := range unstructuredHelmReleases.Items {
klog.V(5).Infof("Checking HelmRelease - %s", helmRelease.GetName())
status, ok := helmRelease.Object["status"].(map[string]interface{})
if !ok || status == nil {
return fmt.Errorf("HelmRelease: %s has no 'status' map", helmRelease.GetName())
}
conditions, ok := status["deployedRelease"].(map[string]interface{})
if !ok || conditions == nil {
return fmt.Errorf("HelmRelease: %s has no 'deployedRelease' interface", helmRelease.GetName())
}
}
By("- Checking Imported Hub Cluster")
if os.Getenv("MOCK") != "true" {
selfManaged, err := IsMCHSelfManaged()
Expect(err).Should(BeNil())
err = ValidateManagedCluster(selfManaged)
Expect(err).Should(BeNil())
}
currentVersion, err := GetCurrentVersionFromMCH()
Expect(err).Should(BeNil())
v, err := semver.NewVersion(currentVersion)
Expect(err).Should(BeNil())
c, err := semver.NewConstraint(">= 2.5.0")
Expect(err).Should(BeNil())
if c.Check(v) {
By("- Ensuring image manifest configmap is created")
_, err = KubeClient.CoreV1().ConfigMaps(MCHNamespace).Get(context.TODO(), fmt.Sprintf("mch-image-manifest-%s", currentVersion), metav1.GetOptions{})
Expect(err).Should(BeNil())
}
By("- Checking for Installer Labels on Deployments")
l, err := GetDeploymentLabels("infrastructure-operator")
if err != nil {
return err
}
if l["installer.name"] != MCHName || l["installer.namespace"] != MCHNamespace {
return fmt.Errorf("infrastructure-operator missing installer labels: `%s` != `%s`, `%s` != `%s`", l["installer.name"], MCHName, l["installer.namespace"], MCHNamespace)
}
return nil
}
// ValidateMCHStatusExist check if mch status exists
func ValidateMCHStatusExist() error {
Eventually(func() error {
mch, err := DynamicKubeClient.Resource(GVRMultiClusterHub).Namespace(MCHNamespace).Get(context.TODO(), MCHName, metav1.GetOptions{})
Expect(err).To(BeNil())
status, ok := mch.Object["status"].(map[string]interface{})
if !ok || status == nil {
return fmt.Errorf("MultiClusterHub: %s has no 'status' map", mch.GetName())
}
return nil
}, GetWaitInMinutes()*60, 1).Should(BeNil())
return nil
}
// ValidateComponentStatusExist check if Component statuses exist immediately when MCH is created
func ValidateComponentStatusExist() error {
Eventually(func() error {
mch, err := DynamicKubeClient.Resource(GVRMultiClusterHub).Namespace(MCHNamespace).Get(context.TODO(), MCHName, metav1.GetOptions{})
Expect(err).To(BeNil())
status, ok := mch.Object["status"].(map[string]interface{})
if !ok || status == nil {
return fmt.Errorf("MultiClusterHub: %s has no 'status' map", mch.GetName())
}
if components, ok := status["components"]; !ok || components == nil {
return fmt.Errorf("MultiClusterHub: %s has no 'Components' map in status", mch.GetName())
} else {
for k, v := range components.(map[string]interface{}) {
if _, ok := v.(map[string]interface{})["status"].(string); !ok {
return fmt.Errorf("Component: %s status does not exist", k)
}
}
}
return nil
}, GetWaitInMinutes()*60, 1).Should(BeNil())
return nil
}
// ValidateHubStatusExist checks if hub statuses exist immediately when MCH is created
func ValidateHubStatusExist() error {
Eventually(func() error {
mch, err := DynamicKubeClient.Resource(GVRMultiClusterHub).Namespace(MCHNamespace).Get(context.TODO(), MCHName, metav1.GetOptions{})
Expect(err).To(BeNil())
status, ok := mch.Object["status"].(map[string]interface{})
if !ok || status == nil {
return fmt.Errorf("MultiClusterHub: %s has no 'status' map", mch.GetName())
}
return FindCondition(status, "Progressing", "True")
}, GetWaitInMinutes()*60, 1).Should(BeNil())
return nil
}
//ValidateConditionDuringUninstall check if condition is terminating during uninstall of MCH
func ValidateConditionDuringUninstall() error {
By("- Checking HubCondition type")
Eventually(func() error {
mch, err := DynamicKubeClient.Resource(GVRMultiClusterHub).Namespace(MCHNamespace).Get(context.TODO(), MCHName, metav1.GetOptions{})
Expect(err).To(BeNil())
status := mch.Object["status"].(map[string]interface{})
return FindCondition(status, "Terminating", "True")
}, GetWaitInMinutes()*60, 1).Should(BeNil())
return nil
}
// ValidatePhase returns error if MCH phase does not match the provided phase
func ValidatePhase(phase string) error {
By("- Checking HubCondition type")
status, err := GetMCHStatus()
if err != nil {
return err
}
return findPhase(status, phase)
}
// ValidateStatusesExist Confirms existence of both overall MCH and Component statuses immediately after MCH creation
func ValidateStatusesExist() error {
By("Validating Statuses exist")
By("- Ensuring MCH Status exists")
if err := ValidateMCHStatusExist(); err != nil {
return err
}
By("- Ensuring Component Status exist")
if err := ValidateComponentStatusExist(); err != nil {
return err
}
By("- Ensuring Hub Status exist")
if err := ValidateHubStatusExist(); err != nil {
return err
}
return nil
}
//ValidateImportHubResourcesExist confirms the existence of 3 resources that are created when importing hub as managed cluster
func ValidateImportHubResourcesExist(expected bool) error {
//check created namespace exists
_, nsErr := KubeClient.CoreV1().Namespaces().Get(context.TODO(), "local-cluster", metav1.GetOptions{})
//check created ManagedCluster exists
mc, mcErr := DynamicKubeClient.Resource(GVRManagedCluster).Get(context.TODO(), "local-cluster", metav1.GetOptions{})
//check created KlusterletAddonConfig
kac, kacErr := DynamicKubeClient.Resource(GVRKlusterletAddonConfig).Namespace("local-cluster").Get(context.TODO(), "local-cluster", metav1.GetOptions{})
if expected {
if mc != nil {
if nsErr != nil || mcErr != nil || kacErr != nil {
return fmt.Errorf("not all local-cluster resources created")
}
return nil
} else {
return fmt.Errorf("local-cluster resources exist")
}
} else {
if mc != nil || kac != nil {
return fmt.Errorf("local-cluster resources exist")
}
return nil
}
}
// ValidateManagedCluster ...
func ValidateManagedCluster(importResourcesShouldExist bool) error {
By("- Checking imported hub resources exist or not")
By("- Confirming Necessary Resources")
// mc, _ := DynamicKubeClient.Resource(GVRManagedCluster).Get(context.TODO(), "local-cluster", metav1.GetOptions{})
if err := ValidateImportHubResourcesExist(importResourcesShouldExist); err != nil {
return fmt.Errorf("Resources are as they shouldn't")
}
if importResourcesShouldExist {
if val := validateManagedClusterConditions(); val != nil {
return fmt.Errorf("cluster conditions")
}
return nil
}
return nil
}
// validateManagedClusterConditions
func validateManagedClusterConditions() error {
By("- Checking ManagedClusterConditions type true")
mc, _ := DynamicKubeClient.Resource(GVRManagedCluster).Get(context.TODO(), "local-cluster", metav1.GetOptions{})
status, ok := mc.Object["status"].(map[string]interface{})
if ok {
joinErr := FindCondition(status, "ManagedClusterJoined", "True")
avaiErr := FindCondition(status, "ManagedClusterConditionAvailable", "True")
accpErr := FindCondition(status, "HubAcceptedManagedCluster", "True")
if joinErr != nil || avaiErr != nil || accpErr != nil {
return fmt.Errorf("managedcluster conditions not all true")
}
return nil
} else {
return fmt.Errorf("no status")
}
}
func ValidateDeploymentPolicies() error {
unstructuredDeployments := listByGVR(DynamicKubeClient, GVRDeployment, MCHNamespace, 60, 3)
for _, deployment := range unstructuredDeployments.Items {
deploymentName := deployment.GetName()
if deploymentName != "multicluster-operators-application" && deploymentName != "hive-operator" && deploymentName != "multicluster-operators-channel" && deploymentName != "multicluster-operators-hub-subscription" && deploymentName != "multicluster-operators-standalone-subscription" {
policy := deployment.Object["spec"].(map[string]interface{})["template"].(map[string]interface{})["spec"].(map[string]interface{})["containers"].([]interface{})[0].(map[string]interface{})["imagePullPolicy"]
fmt.Println(fmt.Sprintf(deploymentName))
Expect(policy).To(BeEquivalentTo("IfNotPresent"))
}
}
return nil
}
// ToggleDisableHubSelfManagement toggles the value of spec.disableHubSelfManagement from true to false or false to true
func ToggleDisableHubSelfManagement(disableHubSelfImport bool) error {
mch, err := DynamicKubeClient.Resource(GVRMultiClusterHub).Namespace(MCHNamespace).Get(context.TODO(), MCHName, metav1.GetOptions{})
Expect(err).To(BeNil())
mch.Object["spec"].(map[string]interface{})[DisableHubSelfManagementString] = disableHubSelfImport
mch, err = DynamicKubeClient.Resource(GVRMultiClusterHub).Namespace(MCHNamespace).Update(context.TODO(), mch, metav1.UpdateOptions{})
Expect(err).To(BeNil())
mch, err = DynamicKubeClient.Resource(GVRMultiClusterHub).Namespace(MCHNamespace).Get(context.TODO(), MCHName, metav1.GetOptions{})
if disableHubSelfManagement := mch.Object["spec"].(map[string]interface{})[DisableHubSelfManagementString].(bool); disableHubSelfManagement != disableHubSelfImport {
return fmt.Errorf("Spec was not updated")
}
return nil
}
// ToggleDisableUpdateClusterImageSets toggles the value of spec.disableUpdateClusterImageSets from true to false or false to true
func ToggleDisableUpdateClusterImageSets(disableUpdateCIS bool) error {
mch, err := DynamicKubeClient.Resource(GVRMultiClusterHub).Namespace(MCHNamespace).Get(context.TODO(), MCHName, metav1.GetOptions{})
Expect(err).To(BeNil())
disableUpdateClusterImageSetsString := "disableUpdateClusterImageSets"
mch.Object["spec"].(map[string]interface{})[disableUpdateClusterImageSetsString] = disableUpdateCIS
mch, err = DynamicKubeClient.Resource(GVRMultiClusterHub).Namespace(MCHNamespace).Update(context.TODO(), mch, metav1.UpdateOptions{})
Expect(err).To(BeNil())
mch, err = DynamicKubeClient.Resource(GVRMultiClusterHub).Namespace(MCHNamespace).Get(context.TODO(), MCHName, metav1.GetOptions{})
if disableUpdateClusterImageSets := mch.Object["spec"].(map[string]interface{})[disableUpdateClusterImageSetsString].(bool); disableUpdateClusterImageSets != disableUpdateCIS {
return fmt.Errorf("Spec was not updated")
}
return nil
}
// UpdateAnnotations
func UpdateAnnotations(annotations map[string]string) {
mch, err := DynamicKubeClient.Resource(GVRMultiClusterHub).Namespace(MCHNamespace).Get(context.TODO(), MCHName, metav1.GetOptions{})
Expect(err).To(BeNil())
mch.SetAnnotations(annotations)
mch, err = DynamicKubeClient.Resource(GVRMultiClusterHub).Namespace(MCHNamespace).Update(context.TODO(), mch, metav1.UpdateOptions{})
Expect(err).To(BeNil())
}
// ValidateClusterImageSetsSubscriptionPause validates that the console-chart-sub created ClusterImageSets subscription is either subscription-pause true or false
func ValidateClusterImageSetsSubscriptionPause(expected string) error {
appsub, err := DynamicKubeClient.Resource(GVRAppSub).Namespace(MCHNamespace).Get(context.TODO(), AppSubName, metav1.GetOptions{})
Expect(err).To(BeNil())
spec, ok := appsub.Object["spec"].(map[string]interface{})
if !ok || spec == nil {
return fmt.Errorf("MultiClusterHub: %s has no 'spec' map", appsub.GetName())
}
packageoverrides_outer, ok := spec["packageOverrides"].([]interface{})
if !ok || packageoverrides_outer == nil {
return fmt.Errorf("MultiClusterHub: %s has no 'packageoverrides' outer map", appsub.GetName())
}
packageoverrides_outer_first := packageoverrides_outer[0].(map[string]interface{})
packageoverrides_inner, ok := packageoverrides_outer_first["packageOverrides"].([]interface{})
if !ok || packageoverrides_inner == nil {
return fmt.Errorf("MultiClusterHub: %s has no 'packageoverrides' inner map", appsub.GetName())
}
packageoverrides_inner_first := packageoverrides_inner[0].(map[string]interface{})
value, ok := packageoverrides_inner_first["value"].(map[string]interface{})
if !ok || value == nil {
return fmt.Errorf("MultiClusterHub: %s has no 'value' map", appsub.GetName())
}
clusterimageset, ok := value["clusterImageSets"].(map[string]interface{})
if !ok || clusterimageset == nil {
return fmt.Errorf("MultiClusterHub: %s has no 'clusterimageset' map", appsub.GetName())
}
subscriptionPauseValue, ok := clusterimageset["subscriptionPause"]
if !ok || subscriptionPauseValue == nil {
return fmt.Errorf("MultiClusterHub: %s has no 'subscriptionPauseValue'", appsub.GetName())
}
if subscriptionPauseValue != expected {
return fmt.Errorf("subscriptionPause attribute is not correct")
}
return nil
}
// listByGVR keeps polling to get the object for timeout seconds
func listByGVR(clientHubDynamic dynamic.Interface, gvr schema.GroupVersionResource, namespace string, timeout int, expectedTotal int) *unstructured.UnstructuredList {
if timeout < 1 {
timeout = 1
}
var obj *unstructured.UnstructuredList
Eventually(func() error {
var err error
namespace := clientHubDynamic.Resource(gvr).Namespace(namespace)
// labelSelector := fmt.Sprintf("installer.name=%s, installer.namespace=%s", MCHName, MCHNamespace)
// listOptions := metav1.ListOptions{
// LabelSelector: labelSelector,
// Limit: 100,
// }
obj, err = namespace.List(context.TODO(), metav1.ListOptions{})
if err != nil {
return err
}
if len(obj.Items) < expectedTotal {
return fmt.Errorf("not all resources created in time. %d/%d appsubs found", len(obj.Items), expectedTotal)
}
return nil
}, timeout, 1).Should(BeNil())
return obj
}
// GetSubscriptionSpec Returns Install Plan Mode
func GetSubscriptionSpec() map[string]interface{} {
if os.Getenv("TEST_MODE") == "update" {
return map[string]interface{}{
"sourceNamespace": os.Getenv("sourceNamespace"),
"source": os.Getenv("source"),
"channel": os.Getenv("channel"),
"installPlanApproval": "Manual",
"name": os.Getenv("name"),
"startingCSV": fmt.Sprintf("advanced-cluster-management.v%s", os.Getenv("startVersion")),
}
}
return map[string]interface{}{
"sourceNamespace": os.Getenv("sourceNamespace"),
"source": os.Getenv("source"),
"channel": os.Getenv("channel"),
"installPlanApproval": "Automatic",
"name": os.Getenv("name"),
}
}
// GetInstallPlanNameFromSub ...
func GetInstallPlanNameFromSub(sub *unstructured.Unstructured) (string, error) {
if _, ok := sub.Object["status"]; !ok {
return "", fmt.Errorf("Sub: %s has no 'status' field", sub.GetName())
}
status, ok := sub.Object["status"].(map[string]interface{})
if !ok || status == nil {
return "", fmt.Errorf("Sub: %s has no 'status' map", sub.GetName())
}
installplan, ok := status["installplan"].(map[string]interface{})
if !ok || status == nil {
return "", fmt.Errorf("Sub: %s has no 'installplan' map", sub.GetName())
}
return installplan["name"].(string), nil
}
// MarkInstallPlanAsApproved ...
func MarkInstallPlanAsApproved(ip *unstructured.Unstructured) (*unstructured.Unstructured, error) {
spec, ok := ip.Object["spec"].(map[string]interface{})
if !ok || spec == nil {
return nil, fmt.Errorf("Installplan: %s has no 'spec' map", ip.GetName())
}
spec["approved"] = true
return ip, nil
}
// ShouldSkipSubscription skips subscription operations if set as true
func ShouldSkipSubscription() bool {
skipSubscription := os.Getenv("skipSubscription")
if skipSubscription == "true" {
return true
}
return false
}
// GetCurrentVersionFromMCH ...
func GetCurrentVersionFromMCH() (string, error) {
mch, err := DynamicKubeClient.Resource(GVRMultiClusterHub).Namespace(MCHNamespace).Get(context.TODO(), MCHName, metav1.GetOptions{})
Expect(err).To(BeNil())
status, ok := mch.Object["status"].(map[string]interface{})
if !ok || status == nil {
return "", fmt.Errorf("MultiClusterHub: %s has no 'status' map", mch.GetName())
}
version, ok := status["currentVersion"]
if !ok {
return "", fmt.Errorf("MultiClusterHub: %s status has no 'currentVersion' field", mch.GetName())
}
return version.(string), nil
}
// CreateDiscoveryConfig ...
func CreateDiscoveryConfig() {
By("- Creating DiscoveryConfig CR if it does not exist")
_, err := DynamicKubeClient.Resource(GVRDiscoveryConfig).Namespace(MCHNamespace).Get(context.TODO(), "discoveryconfig", metav1.GetOptions{})
if err == nil {
return
}
discoveryConfigByte, err := ioutil.ReadFile("../resources/discoveryconfig.yaml")
Expect(err).To(BeNil())
discoveryConfig := &unstructured.Unstructured{Object: map[string]interface{}{}}
err = yaml.Unmarshal(discoveryConfigByte, &discoveryConfig.Object)
Expect(err).To(BeNil())
_, err = DynamicKubeClient.Resource(GVRDiscoveryConfig).Namespace(MCHNamespace).Create(context.TODO(), discoveryConfig, metav1.CreateOptions{})
Expect(err).To(BeNil())
}
// DeleteDiscoveryConfig ...
func DeleteDiscoveryConfig() {
By("- Deleting DiscoveryConfig CR if it exists")
discoveryConfigByte, err := ioutil.ReadFile("../resources/discoveryconfig.yaml")
Expect(err).To(BeNil())
discoveryConfig := &unstructured.Unstructured{Object: map[string]interface{}{}}
err = yaml.Unmarshal(discoveryConfigByte, &discoveryConfig.Object)
Expect(err).To(BeNil())
err = DynamicKubeClient.Resource(GVRDiscoveryConfig).Namespace(MCHNamespace).Delete(context.TODO(), discoveryConfig.GetName(), metav1.DeleteOptions{})
Expect(err).To(BeNil())
}
// CreateObservabilityCRD ...
func CreateObservabilityCRD() {
By("- Creating Observability CRD if it does not exist")
_, err := DynamicKubeClient.Resource(GVRCustomResourceDefinition).Get(context.TODO(), "multiclusterobservabilities.observability.open-cluster-management.io", metav1.GetOptions{})
if err == nil {
return
}
crd, err := ioutil.ReadFile("../resources/observability-crd.yaml")
Expect(err).To(BeNil())
unstructuredCRD := &unstructured.Unstructured{Object: map[string]interface{}{}}
err = yaml.Unmarshal(crd, &unstructuredCRD.Object)
Expect(err).To(BeNil())
_, err = DynamicKubeClient.Resource(GVRCustomResourceDefinition).Create(context.TODO(), unstructuredCRD, metav1.CreateOptions{})
Expect(err).To(BeNil())
}
// CreateMultiClusterEngineCRD ...
func CreateMultiClusterEngineCRD() {
By("- Creating MultiClusterEngine CRD if it does not exist")
_, err := DynamicKubeClient.Resource(GVRCustomResourceDefinition).Get(context.TODO(), "multiclusterengines.multicluster.openshift.io", metav1.GetOptions{})
if err == nil {
return
}
crd, err := ioutil.ReadFile("../resources/multiclusterengine-crd.yaml")
Expect(err).To(BeNil())
unstructuredCRD := &unstructured.Unstructured{Object: map[string]interface{}{}}
err = yaml.Unmarshal(crd, &unstructuredCRD.Object)
Expect(err).To(BeNil())
_, err = DynamicKubeClient.Resource(GVRCustomResourceDefinition).Create(context.TODO(), unstructuredCRD, metav1.CreateOptions{})
Expect(err).To(BeNil())
}
// CreateMultiClusterEngineCR ...
func CreateMultiClusterEngineCR() {
By("- Creating MultiClusterEngine CR if it does not exist")
_, err := DynamicKubeClient.Resource(GVRMultiClusterEngine).Get(context.TODO(), "multiclusterengine-sample", metav1.GetOptions{})
if err == nil {
return
}
crd, err := ioutil.ReadFile("../resources/multiclusterengine-cr.yaml")
Expect(err).To(BeNil())
unstructuredCRD := &unstructured.Unstructured{Object: map[string]interface{}{}}
err = yaml.Unmarshal(crd, &unstructuredCRD.Object)
Expect(err).To(BeNil())
_, err = DynamicKubeClient.Resource(GVRMultiClusterEngine).Create(context.TODO(), unstructuredCRD, metav1.CreateOptions{})
Expect(err).To(BeNil())
}
// DeleteMultiClusterEngineCR ...
func DeleteMultiClusterEngineCR() {
By("- Deleting MultiClusterEngine CR if it exists")
crd, err := ioutil.ReadFile("../resources/multiclusterengine-cr.yaml")
Expect(err).To(BeNil())
unstructuredCRD := &unstructured.Unstructured{Object: map[string]interface{}{}}
err = yaml.Unmarshal(crd, &unstructuredCRD.Object)
Expect(err).To(BeNil())
err = DynamicKubeClient.Resource(GVRMultiClusterEngine).Delete(context.TODO(), "multiclusterengine-sample", metav1.DeleteOptions{})
Expect(err).To(BeNil())
}
// DeleteMultiClusterEngineCRD ...
func DeleteMultiClusterEngineCRD() {
By("- Deleting MultiClusterEngine CRD if it exists")
crd, err := ioutil.ReadFile("../resources/multiclusterengine-crd.yaml")
Expect(err).To(BeNil())
unstructuredCRD := &unstructured.Unstructured{Object: map[string]interface{}{}}
err = yaml.Unmarshal(crd, &unstructuredCRD.Object)
Expect(err).To(BeNil())
err = DynamicKubeClient.Resource(GVRCustomResourceDefinition).Delete(context.TODO(), "multiclusterengines.multicluster.openshift.io", metav1.DeleteOptions{})
Expect(err).To(BeNil())
}
// CreateObservabilityCR ...
func CreateObservabilityCR() {
By("- Creating Observability CR if it does not exist")
_, err := DynamicKubeClient.Resource(GVRObservability).Get(context.TODO(), "observability", metav1.GetOptions{})
if err == nil {
return
}
crd, err := ioutil.ReadFile("../resources/observability-cr.yaml")
Expect(err).To(BeNil())
unstructuredCRD := &unstructured.Unstructured{Object: map[string]interface{}{}}
err = yaml.Unmarshal(crd, &unstructuredCRD.Object)
Expect(err).To(BeNil())
_, err = DynamicKubeClient.Resource(GVRObservability).Create(context.TODO(), unstructuredCRD, metav1.CreateOptions{})
Expect(err).To(BeNil())
}
// DeleteObservabilityCR ...
func DeleteObservabilityCR() {
By("- Deleting Observability CR if it exists")
crd, err := ioutil.ReadFile("../resources/observability-cr.yaml")
Expect(err).To(BeNil())
unstructuredCRD := &unstructured.Unstructured{Object: map[string]interface{}{}}
err = yaml.Unmarshal(crd, &unstructuredCRD.Object)
Expect(err).To(BeNil())
err = DynamicKubeClient.Resource(GVRObservability).Delete(context.TODO(), "observability", metav1.DeleteOptions{})
Expect(err).To(BeNil())
}
// DeleteObservabilityCRD ...
func DeleteObservabilityCRD() {
By("- Deleting Observability CRD if it exists")
crd, err := ioutil.ReadFile("../resources/observability-crd.yaml")
Expect(err).To(BeNil())
unstructuredCRD := &unstructured.Unstructured{Object: map[string]interface{}{}}
err = yaml.Unmarshal(crd, &unstructuredCRD.Object)
Expect(err).To(BeNil())
err = DynamicKubeClient.Resource(GVRCustomResourceDefinition).Delete(context.TODO(), "multiclusterobservabilities.observability.open-cluster-management.io", metav1.DeleteOptions{})
Expect(err).To(BeNil())
}
// CreateBareMetalAssetsCR ...
func CreateBareMetalAssetsCR() {
By("- Creating BareMetalAsset CR if it does not exist")
_, err := DynamicKubeClient.Resource(GVRBareMetalAsset).Namespace(MCHNamespace).Get(context.TODO(), "mch-test-bma", metav1.GetOptions{})
if err == nil {
return
}
crd, err := ioutil.ReadFile("../resources/baremetalasset-cr.yaml")
Expect(err).To(BeNil())
unstructuredCRD := &unstructured.Unstructured{Object: map[string]interface{}{}}
err = yaml.Unmarshal(crd, &unstructuredCRD.Object)
Expect(err).To(BeNil())
_, err = DynamicKubeClient.Resource(GVRBareMetalAsset).Namespace(MCHNamespace).Create(context.TODO(), unstructuredCRD, metav1.CreateOptions{})
Expect(err).To(BeNil())
}
// DeleteBareMetalAssetsCR ...
func DeleteBareMetalAssetsCR() {
By("- Deleting BareMetal CR if it exists")
_, err := ioutil.ReadFile("../resources/baremetalasset-cr.yaml")
Expect(err).To(BeNil())
err = DynamicKubeClient.Resource(GVRBareMetalAsset).Namespace(MCHNamespace).Delete(context.TODO(), "mch-test-bma", metav1.DeleteOptions{})
Expect(err).To(BeNil())
}
func getCRDs() ([]string, error) {
err := os.Setenv("CRDS_PATH", "../../../bin/crds")
if err != nil {
return nil, err
}
defer os.Unsetenv("CRDS_PATH")
crdDir, found := os.LookupEnv("CRDS_PATH")
if !found {
return nil, fmt.Errorf("CRDS_PATH environment variable is required")
}
var crds []string
files, err := ioutil.ReadDir(crdDir)
Expect(err).To(BeNil())
for _, file := range files {
if filepath.Ext(file.Name()) != ".yaml" {
continue
}
filePath := path.Join(crdDir, file.Name())
src, err := ioutil.ReadFile(filepath.Clean(filePath)) // #nosec G304 (filepath cleaned)
if err != nil {
return nil, err
}
crd := &unstructured.Unstructured{}
err = yaml.Unmarshal(src, crd)
if err != nil {
return nil, err
}
crdName, _, err := unstructured.NestedString(crd.Object, "metadata", "name")
if err != nil {
return nil, err
}
crds = append(crds, crdName)
}
return crds, nil
}
// CoffeeBreak ...
func CoffeeBreak(minutes int) {
log.Println(fmt.Sprintf("Starting coffee break for %d minutes...\n", minutes))
slept_minutes := 0
for slept_minutes < minutes {
time.Sleep(time.Duration(1) * time.Minute)
slept_minutes += 1
log.Println(fmt.Sprintf("... slept %d minutes...\n", slept_minutes))
}
log.Println(fmt.Sprintf("... ending coffee break after %d minutes!\n", slept_minutes))
}
| [
"\"pullSecret\"",
"\"name\"",
"\"waitInMinutes\"",
"\"runCleanUpScript\"",
"\"KUBECONFIG\"",
"\"MOCK\"",
"\"TEST_MODE\"",
"\"sourceNamespace\"",
"\"source\"",
"\"channel\"",
"\"name\"",
"\"startVersion\"",
"\"sourceNamespace\"",
"\"source\"",
"\"channel\"",
"\"name\"",
"\"skipSubscription\""
]
| []
| [
"MOCK",
"name",
"TEST_MODE",
"pullSecret",
"sourceNamespace",
"source",
"startVersion",
"waitInMinutes",
"channel",
"KUBECONFIG",
"runCleanUpScript",
"skipSubscription"
]
| [] | ["MOCK", "name", "TEST_MODE", "pullSecret", "sourceNamespace", "source", "startVersion", "waitInMinutes", "channel", "KUBECONFIG", "runCleanUpScript", "skipSubscription"] | go | 12 | 0 | |
generated/niswitch/niswitch/__init__.py | # -*- coding: utf-8 -*-
# This file was generated
__version__ = '1.3.3.dev0'
from niswitch.enums import * # noqa: F403,F401,H303
from niswitch.errors import DriverWarning # noqa: F401
from niswitch.errors import Error # noqa: F401
from niswitch.session import Session # noqa: F401
def get_diagnostic_information():
'''Get diagnostic information about the system state that is suitable for printing or logging
returns: dict
note: Python bitness may be incorrect when running in a virtual environment
'''
import os
import pkg_resources
import platform
import struct
import sys
def is_python_64bit():
return (struct.calcsize("P") == 8)
def is_os_64bit():
return platform.machine().endswith('64')
def is_venv():
return 'VIRTUAL_ENV' in os.environ
info = {}
info['os'] = {}
info['python'] = {}
info['driver'] = {}
info['module'] = {}
if platform.system() == 'Windows':
try:
import winreg as winreg
except ImportError:
import _winreg as winreg
os_name = 'Windows'
try:
driver_version_key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r"SOFTWARE\National Instruments\NI-SWITCH\CurrentVersion")
driver_version = winreg.QueryValueEx(driver_version_key, "Version")[0]
except WindowsError:
driver_version = 'Unknown'
elif platform.system() == 'Linux':
os_name = 'Linux'
driver_version = 'Unknown'
else:
raise SystemError('Unsupported platform: {}'.format(platform.system()))
installed_packages = pkg_resources.working_set
installed_packages_list = [{'name': i.key, 'version': i.version, } for i in installed_packages]
info['os']['name'] = os_name
info['os']['version'] = platform.version()
info['os']['bits'] = '64' if is_os_64bit() else '32'
info['driver']['name'] = "NI-SWITCH"
info['driver']['version'] = driver_version
info['module']['name'] = 'niswitch'
info['module']['version'] = "1.3.3.dev0"
info['python']['version'] = sys.version
info['python']['bits'] = '64' if is_python_64bit() else '32'
info['python']['is_venv'] = is_venv()
info['python']['packages'] = installed_packages_list
return info
def print_diagnostic_information():
'''Print diagnostic information in a format suitable for issue report
note: Python bitness may be incorrect when running in a virtual environment
'''
info = get_diagnostic_information()
row_format = ' {:<10} {}'
for type in ['OS', 'Driver', 'Module', 'Python']:
typename = type.lower()
print(type + ':')
for item in info[typename]:
if item != 'packages':
print(row_format.format(item.title() + ':', info[typename][item]))
print(' Installed Packages:')
for p in info['python']['packages']:
print((' ' * 8) + p['name'] + '==' + p['version'])
return info
| []
| []
| []
| [] | [] | python | 0 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ace_api.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
icloudpd/base.py | #!/usr/bin/env python
"""Main script that uses Click to parse command-line arguments"""
from __future__ import print_function
import os
import sys
import time
import datetime
import logging
import itertools
import subprocess
import json
import threading
import multiprocessing
import click
from tqdm import tqdm
from tzlocal import get_localzone
from icloudpd.logger import setup_logger
from icloudpd.authentication import authenticate, TwoStepAuthRequiredError
from icloudpd import download
from icloudpd.email_notifications import send_2sa_notification
from icloudpd.string_helpers import truncate_middle
from icloudpd.autodelete import autodelete_photos
from icloudpd.paths import local_download_path
from icloudpd import exif_datetime
# Must import the constants object so that we can mock values in tests.
from icloudpd import constants
from icloudpd.counter import Counter
try:
import Queue as queue
except ImportError:
import queue
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
@click.command(context_settings=CONTEXT_SETTINGS, options_metavar="<options>")
# @click.argument(
@click.option(
"-d", "--directory",
help="Local directory that should be used for download",
type=click.Path(exists=True),
metavar="<directory>")
@click.option(
"-u", "--username",
help="Your iCloud username or email address",
metavar="<username>",
prompt="iCloud username/email",
)
@click.option(
"-p", "--password",
help="Your iCloud password "
"(default: use PyiCloud keyring or prompt for password)",
metavar="<password>",
)
@click.option(
"--cookie-directory",
help="Directory to store cookies for authentication "
"(default: ~/.pyicloud)",
metavar="</cookie/directory>",
default="~/.pyicloud",
)
@click.option(
"--size",
help="Image size to download (default: original)",
type=click.Choice(["original", "medium", "thumb"]),
default="original",
)
@click.option(
"--live-photo-size",
help="Live Photo video size to download (default: original)",
type=click.Choice(["original", "medium", "thumb"]),
default="original",
)
@click.option(
"--recent",
help="Number of recent photos to download (default: download all photos)",
type=click.IntRange(0),
)
@click.option(
"--until-found",
help="Download most recently added photos until we find x number of "
"previously downloaded consecutive photos (default: download all photos)",
type=click.IntRange(0),
)
@click.option(
"-a", "--album",
help="Album to download (default: All Photos)",
metavar="<album>",
default="All Photos",
)
@click.option(
"-l", "--list-albums",
help="Lists the avaliable albums",
is_flag=True,
)
@click.option(
"--skip-videos",
help="Don't download any videos (default: Download all photos and videos)",
is_flag=True,
)
@click.option(
"--skip-live-photos",
help="Don't download any live photos (default: Download live photos)",
is_flag=True,
)
@click.option(
"--force-size",
help="Only download the requested size "
+ "(default: download original if size is not available)",
is_flag=True,
)
@click.option(
"--auto-delete",
help='Scans the "Recently Deleted" folder and deletes any files found in there. '
+ "(If you restore the photo in iCloud, it will be downloaded again.)",
is_flag=True,
)
@click.option(
"--only-print-filenames",
help="Only prints the filenames of all files that will be downloaded "
"(not including files that are already downloaded.)"
+ "(Does not download or delete any files.)",
is_flag=True,
)
@click.option(
"--folder-structure",
help="Folder structure (default: {:%Y/%m/%d})",
metavar="<folder_structure>",
default="{:%Y/%m/%d}",
)
@click.option(
"--set-exif-datetime",
help="Write the DateTimeOriginal exif tag from file creation date, if it doesn't exist.",
is_flag=True,
)
@click.option(
"--smtp-username",
help="Your SMTP username, for sending email notifications when "
"two-step authentication expires.",
metavar="<smtp_username>",
)
@click.option(
"--smtp-password",
help="Your SMTP password, for sending email notifications when "
"two-step authentication expires.",
metavar="<smtp_password>",
)
@click.option(
"--smtp-host",
help="Your SMTP server host. Defaults to: smtp.gmail.com",
metavar="<smtp_host>",
default="smtp.gmail.com",
)
@click.option(
"--smtp-port",
help="Your SMTP server port. Default: 587 (Gmail)",
metavar="<smtp_port>",
type=click.IntRange(0),
default=587,
)
@click.option(
"--smtp-no-tls",
help="Pass this flag to disable TLS for SMTP (TLS is required for Gmail)",
metavar="<smtp_no_tls>",
is_flag=True,
)
@click.option(
"--notification-email",
help="Email address where you would like to receive email notifications. "
"Default: SMTP username",
metavar="<notification_email>",
)
@click.option(
"--notification-script",
type=click.Path(),
help="Runs an external script when two factor authentication expires. "
"(path required: /path/to/my/script.sh)",
)
@click.option(
"--log-level",
help="Log level (default: debug)",
type=click.Choice(["debug", "info", "error"]),
default="debug",
)
@click.option(
"--no-progress-bar",
help="Disables the one-line progress bar and prints log messages on separate lines "
"(Progress bar is disabled by default if there is no tty attached)",
is_flag=True,
)
@click.option(
"--threads-num",
help="Number of cpu threads(default: cpu count * 5)",
type=click.IntRange(1),
default=multiprocessing.cpu_count() * 5,
)
@click.version_option()
# pylint: disable-msg=too-many-arguments,too-many-statements
# pylint: disable-msg=too-many-branches,too-many-locals
def main(
directory,
username,
password,
cookie_directory,
size,
live_photo_size,
recent,
until_found,
album,
list_albums,
skip_videos,
skip_live_photos,
force_size,
auto_delete,
only_print_filenames,
folder_structure,
set_exif_datetime,
smtp_username,
smtp_password,
smtp_host,
smtp_port,
smtp_no_tls,
notification_email,
log_level,
no_progress_bar,
notification_script,
threads_num,
):
"""Download all iCloud photos to a local directory"""
logger = setup_logger()
if only_print_filenames:
logger.disabled = True
else:
# Need to make sure disabled is reset to the correct value,
# because the logger instance is shared between tests.
logger.disabled = False
if log_level == "debug":
logger.setLevel(logging.DEBUG)
elif log_level == "info":
logger.setLevel(logging.INFO)
elif log_level == "error":
logger.setLevel(logging.ERROR)
raise_error_on_2sa = (
smtp_username is not None
or notification_email is not None
or notification_script is not None
)
try:
icloud = authenticate(
username,
password,
cookie_directory,
raise_error_on_2sa,
client_id=os.environ.get("CLIENT_ID"),
)
except TwoStepAuthRequiredError:
if notification_script is not None:
subprocess.call([notification_script])
if smtp_username is not None or notification_email is not None:
send_2sa_notification(
smtp_username,
smtp_password,
smtp_host,
smtp_port,
smtp_no_tls,
notification_email,
)
sys.exit(1)
# Default album is "All Photos", so this is the same as
# calling `icloud.photos.all`.
photos = icloud.photos.albums[album]
if list_albums:
albums_dict = icloud.photos.albums
# Python2: itervalues, Python3: values()
if sys.version_info[0] >= 3:
albums = albums_dict.values() # pragma: no cover
else:
albums = albums_dict.itervalues() # pragma: no cover
album_titles = [str(a) for a in albums]
print(*album_titles, sep="\n")
sys.exit(0)
# For Python 2.7
if hasattr(directory, "decode"):
directory = directory.decode("utf-8") # pragma: no cover
directory = os.path.normpath(directory)
logger.debug(
"Looking up all photos%s from album %s...",
"" if skip_videos else " and videos",
album)
def photos_exception_handler(ex, retries):
"""Handles session errors in the PhotoAlbum photos iterator"""
if "Invalid global session" in str(ex):
if retries > constants.MAX_RETRIES:
logger.tqdm_write(
"iCloud re-authentication failed! Please try again later."
)
raise ex
logger.tqdm_write(
"Session error, re-authenticating...",
logging.ERROR)
if retries > 1:
# If the first reauthentication attempt failed,
# start waiting a few seconds before retrying in case
# there are some issues with the Apple servers
time.sleep(constants.WAIT_SECONDS)
icloud.authenticate()
photos.exception_handler = photos_exception_handler
photos_count = len(photos)
# Optional: Only download the x most recent photos.
if recent is not None:
photos_count = recent
photos = itertools.islice(photos, recent)
tqdm_kwargs = {"total": photos_count}
if until_found is not None:
del tqdm_kwargs["total"]
photos_count = "???"
# ensure photos iterator doesn't have a known length
photos = (p for p in photos)
plural_suffix = "" if photos_count == 1 else "s"
video_suffix = ""
photos_count_str = "the first" if photos_count == 1 else photos_count
if not skip_videos:
video_suffix = " or video" if photos_count == 1 else " and videos"
logger.info(
"Downloading %s %s photo%s%s to %s/ ...",
photos_count_str,
size,
plural_suffix,
video_suffix,
directory,
)
# Use only ASCII characters in progress bar
tqdm_kwargs["ascii"] = True
# Skip the one-line progress bar if we're only printing the filenames,
# or if the progress bar is explicity disabled,
# or if this is not a terminal (e.g. cron or piping output to file)
if not os.environ.get("FORCE_TQDM") and (
only_print_filenames or no_progress_bar or not sys.stdout.isatty()
):
photos_enumerator = photos
logger.set_tqdm(None)
else:
photos_enumerator = tqdm(photos, **tqdm_kwargs)
logger.set_tqdm(photos_enumerator)
def download_photo(counter, photo):
"""internal function for actually downloading the photos"""
if skip_videos and photo.item_type != "image":
logger.set_tqdm_description(
"Skipping %s, only downloading photos." % photo.filename
)
return
if photo.item_type != "image" and photo.item_type != "movie":
logger.set_tqdm_description(
"Skipping %s, only downloading photos and videos. "
"(Item type was: %s)" % (photo.filename, photo.item_type)
)
return
try:
created_date = photo.created.astimezone(get_localzone())
except (ValueError, OSError):
logger.set_tqdm_description(
"Could not convert photo created date to local timezone (%s)" %
photo.created, logging.ERROR)
created_date = photo.created
try:
date_path = folder_structure.format(created_date)
except ValueError: # pragma: no cover
# This error only seems to happen in Python 2
logger.set_tqdm_description(
"Photo created date was not valid (%s)" %
photo.created, logging.ERROR)
# e.g. ValueError: year=5 is before 1900
# (https://github.com/ndbroadbent/icloud_photos_downloader/issues/122)
# Just use the Unix epoch
created_date = datetime.datetime.fromtimestamp(0)
date_path = folder_structure.format(created_date)
download_dir = os.path.join(directory, date_path)
if not os.path.exists(download_dir):
try:
os.makedirs(download_dir)
except OSError: # pragma: no cover
pass # pragma: no cover
download_size = size
try:
versions = photo.versions
except KeyError as ex:
print(
"KeyError: %s attribute was not found in the photo fields!" %
ex)
with open('icloudpd-photo-error.json', 'w') as outfile:
# pylint: disable=protected-access
json.dump({
"master_record": photo._master_record,
"asset_record": photo._asset_record
}, outfile)
# pylint: enable=protected-access
print("icloudpd has saved the photo record to: "
"./icloudpd-photo-error.json")
print("Please create a Gist with the contents of this file: "
"https://gist.github.com")
print(
"Then create an issue on GitHub: "
"https://github.com/ndbroadbent/icloud_photos_downloader/issues")
print(
"Include a link to the Gist in your issue, so that we can "
"see what went wrong.\n")
return
if size not in versions and size != "original":
if force_size:
filename = photo.filename.encode(
"utf-8").decode("ascii", "ignore")
logger.set_tqdm_description(
"%s size does not exist for %s. Skipping..." %
(size, filename), logging.ERROR, )
return
download_size = "original"
download_path = local_download_path(
photo, download_size, download_dir)
file_exists = os.path.isfile(download_path)
if not file_exists and download_size == "original":
# Deprecation - We used to download files like IMG_1234-original.jpg,
# so we need to check for these.
# Now we match the behavior of iCloud for Windows: IMG_1234.jpg
original_download_path = ("-%s." % size).join(
download_path.rsplit(".", 1)
)
file_exists = os.path.isfile(original_download_path)
if file_exists:
counter.increment()
logger.set_tqdm_description(
"%s already exists." % truncate_middle(download_path, 96)
)
else:
counter.reset()
if only_print_filenames:
print(download_path)
else:
truncated_path = truncate_middle(download_path, 96)
logger.set_tqdm_description(
"Downloading %s" %
truncated_path)
download_result = download.download_media(
icloud, photo, download_path, download_size
)
if download_result and set_exif_datetime:
if photo.filename.lower().endswith((".jpg", ".jpeg")):
if not exif_datetime.get_photo_exif(download_path):
# %Y:%m:%d looks wrong but it's the correct format
date_str = created_date.strftime(
"%Y:%m:%d %H:%M:%S")
logger.debug(
"Setting EXIF timestamp for %s: %s",
download_path,
date_str,
)
exif_datetime.set_photo_exif(
download_path,
created_date.strftime("%Y:%m:%d %H:%M:%S"),
)
else:
timestamp = time.mktime(created_date.timetuple())
os.utime(download_path, (timestamp, timestamp))
# Also download the live photo if present
if not skip_live_photos:
lp_size = live_photo_size + "Video"
if lp_size in photo.versions:
version = photo.versions[lp_size]
filename = version["filename"]
if live_photo_size != "original":
# Add size to filename if not original
filename = filename.replace(
".MOV", "-%s.MOV" %
live_photo_size)
lp_download_path = os.path.join(download_dir, filename)
if only_print_filenames:
print(lp_download_path)
else:
if os.path.isfile(lp_download_path):
logger.set_tqdm_description(
"%s already exists."
% truncate_middle(lp_download_path, 96)
)
return
truncated_path = truncate_middle(lp_download_path, 96)
logger.set_tqdm_description(
"Downloading %s" % truncated_path)
download.download_media(
icloud, photo, lp_download_path, lp_size
)
def get_threads_count():
"""Disable threads if we have until_found or recent arguments"""
if until_found is None and recent is None:
return threads_num # pragma: no cover
return 1
download_queue = queue.Queue(get_threads_count())
consecutive_files_found = Counter(0)
def should_break(counter):
"""Exit if until_found condition is reached"""
return until_found is not None and counter.value() >= until_found
def worker(counter):
"""Threaded worker"""
while True:
item = download_queue.get()
if item is None:
break
download_photo(counter, item)
download_queue.task_done()
threads = []
for _ in range(get_threads_count()):
thread = threading.Thread(
target=worker, args=(
consecutive_files_found, ))
thread.daemon = True
thread.start()
threads.append(thread)
photos_iterator = iter(photos_enumerator)
while True:
try:
if should_break(consecutive_files_found):
logger.tqdm_write(
"Found %d consecutive previously downloaded photos. Exiting" %
until_found)
break
download_queue.put(next(photos_iterator))
except StopIteration:
break
if not should_break(consecutive_files_found):
download_queue.join()
for _ in threads:
download_queue.put(None)
for thread in threads:
thread.join()
if only_print_filenames:
sys.exit(0)
logger.info("All photos have been downloaded!")
if auto_delete:
autodelete_photos(icloud, folder_structure, directory)
| []
| []
| [
"FORCE_TQDM",
"CLIENT_ID"
]
| [] | ["FORCE_TQDM", "CLIENT_ID"] | python | 2 | 0 | |
store/tikv/2pc_test.go | // Copyright 2016 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package tikv
import (
"bytes"
"context"
"math"
"math/rand"
"strings"
"sync"
"sync/atomic"
"time"
. "github.com/pingcap/check"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/kvproto/pkg/kvrpcpb"
"github.com/pingcap/tidb/kv"
"github.com/pingcap/tidb/store/mockstore/mocktikv"
"github.com/pingcap/tidb/store/tikv/oracle"
"github.com/pingcap/tidb/store/tikv/tikvrpc"
)
type testCommitterSuite struct {
OneByOneSuite
cluster *mocktikv.Cluster
store *tikvStore
mvccStore mocktikv.MVCCStore
}
var _ = SerialSuites(&testCommitterSuite{})
func (s *testCommitterSuite) SetUpSuite(c *C) {
atomic.StoreUint64(&ManagedLockTTL, 3000) // 3s
s.OneByOneSuite.SetUpSuite(c)
}
func (s *testCommitterSuite) SetUpTest(c *C) {
s.cluster = mocktikv.NewCluster()
mocktikv.BootstrapWithMultiRegions(s.cluster, []byte("a"), []byte("b"), []byte("c"))
mvccStore, err := mocktikv.NewMVCCLevelDB("")
c.Assert(err, IsNil)
s.mvccStore = mvccStore
client := mocktikv.NewRPCClient(s.cluster, mvccStore)
pdCli := &codecPDClient{mocktikv.NewPDClient(s.cluster)}
spkv := NewMockSafePointKV()
store, err := newTikvStore("mocktikv-store", pdCli, spkv, client, false, nil)
c.Assert(err, IsNil)
store.EnableTxnLocalLatches(1024000)
s.store = store
CommitMaxBackoff = 1000
}
func (s *testCommitterSuite) TearDownSuite(c *C) {
CommitMaxBackoff = 20000
s.store.Close()
s.OneByOneSuite.TearDownSuite(c)
}
func (s *testCommitterSuite) begin(c *C) *tikvTxn {
txn, err := s.store.Begin()
c.Assert(err, IsNil)
return txn.(*tikvTxn)
}
func (s *testCommitterSuite) checkValues(c *C, m map[string]string) {
txn := s.begin(c)
for k, v := range m {
val, err := txn.Get(context.TODO(), []byte(k))
c.Assert(err, IsNil)
c.Assert(string(val), Equals, v)
}
}
func (s *testCommitterSuite) mustCommit(c *C, m map[string]string) {
txn := s.begin(c)
for k, v := range m {
err := txn.Set([]byte(k), []byte(v))
c.Assert(err, IsNil)
}
err := txn.Commit(context.Background())
c.Assert(err, IsNil)
s.checkValues(c, m)
}
func randKV(keyLen, valLen int) (string, string) {
const letters = "abc"
k, v := make([]byte, keyLen), make([]byte, valLen)
for i := range k {
k[i] = letters[rand.Intn(len(letters))]
}
for i := range v {
v[i] = letters[rand.Intn(len(letters))]
}
return string(k), string(v)
}
func (s *testCommitterSuite) TestCommitRollback(c *C) {
s.mustCommit(c, map[string]string{
"a": "a",
"b": "b",
"c": "c",
})
txn := s.begin(c)
txn.Set([]byte("a"), []byte("a1"))
txn.Set([]byte("b"), []byte("b1"))
txn.Set([]byte("c"), []byte("c1"))
s.mustCommit(c, map[string]string{
"c": "c2",
})
err := txn.Commit(context.Background())
c.Assert(err, NotNil)
s.checkValues(c, map[string]string{
"a": "a",
"b": "b",
"c": "c2",
})
}
func (s *testCommitterSuite) TestPrewriteRollback(c *C) {
s.mustCommit(c, map[string]string{
"a": "a0",
"b": "b0",
})
ctx := context.Background()
txn1 := s.begin(c)
err := txn1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = txn1.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
committer, err := newTwoPhaseCommitterWithInit(txn1, 0)
c.Assert(err, IsNil)
err = committer.prewriteMutations(NewBackoffer(ctx, PrewriteMaxBackoff), committer.mutations)
c.Assert(err, IsNil)
txn2 := s.begin(c)
v, err := txn2.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("a0"))
err = committer.prewriteMutations(NewBackoffer(ctx, PrewriteMaxBackoff), committer.mutations)
if err != nil {
// Retry.
txn1 = s.begin(c)
err = txn1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = txn1.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
committer, err = newTwoPhaseCommitterWithInit(txn1, 0)
c.Assert(err, IsNil)
err = committer.prewriteMutations(NewBackoffer(ctx, PrewriteMaxBackoff), committer.mutations)
c.Assert(err, IsNil)
}
committer.commitTS, err = s.store.oracle.GetTimestamp(ctx)
c.Assert(err, IsNil)
err = committer.commitMutations(NewBackoffer(ctx, CommitMaxBackoff), committerMutations{keys: [][]byte{[]byte("a")}})
c.Assert(err, IsNil)
txn3 := s.begin(c)
v, err = txn3.Get(context.TODO(), []byte("b"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("b1"))
}
func (s *testCommitterSuite) TestContextCancel(c *C) {
txn1 := s.begin(c)
err := txn1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = txn1.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
committer, err := newTwoPhaseCommitterWithInit(txn1, 0)
c.Assert(err, IsNil)
bo := NewBackoffer(context.Background(), PrewriteMaxBackoff)
backoffer, cancel := bo.Fork()
cancel() // cancel the context
err = committer.prewriteMutations(backoffer, committer.mutations)
c.Assert(errors.Cause(err), Equals, context.Canceled)
}
func (s *testCommitterSuite) TestContextCancel2(c *C) {
txn := s.begin(c)
err := txn.Set([]byte("a"), []byte("a"))
c.Assert(err, IsNil)
err = txn.Set([]byte("b"), []byte("b"))
c.Assert(err, IsNil)
ctx, cancel := context.WithCancel(context.Background())
err = txn.Commit(ctx)
c.Assert(err, IsNil)
cancel()
// Secondary keys should not be canceled.
time.Sleep(time.Millisecond * 20)
c.Assert(s.isKeyLocked(c, []byte("b")), IsFalse)
}
func (s *testCommitterSuite) TestContextCancelRetryable(c *C) {
txn1, txn2, txn3 := s.begin(c), s.begin(c), s.begin(c)
// txn1 locks "b"
err := txn1.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
committer, err := newTwoPhaseCommitterWithInit(txn1, 0)
c.Assert(err, IsNil)
err = committer.prewriteMutations(NewBackoffer(context.Background(), PrewriteMaxBackoff), committer.mutations)
c.Assert(err, IsNil)
// txn3 writes "c"
err = txn3.Set([]byte("c"), []byte("c3"))
c.Assert(err, IsNil)
err = txn3.Commit(context.Background())
c.Assert(err, IsNil)
// txn2 writes "a"(PK), "b", "c" on different regions.
// "c" will return a retryable error.
// "b" will get a Locked error first, then the context must be canceled after backoff for lock.
err = txn2.Set([]byte("a"), []byte("a2"))
c.Assert(err, IsNil)
err = txn2.Set([]byte("b"), []byte("b2"))
c.Assert(err, IsNil)
err = txn2.Set([]byte("c"), []byte("c2"))
c.Assert(err, IsNil)
err = txn2.Commit(context.Background())
c.Assert(err, NotNil)
c.Assert(kv.ErrWriteConflictInTiDB.Equal(err), IsTrue, Commentf("err: %s", err))
}
func (s *testCommitterSuite) mustGetRegionID(c *C, key []byte) uint64 {
loc, err := s.store.regionCache.LocateKey(NewBackoffer(context.Background(), getMaxBackoff), key)
c.Assert(err, IsNil)
return loc.Region.id
}
func (s *testCommitterSuite) isKeyLocked(c *C, key []byte) bool {
ver, err := s.store.CurrentVersion()
c.Assert(err, IsNil)
bo := NewBackoffer(context.Background(), getMaxBackoff)
req := tikvrpc.NewRequest(tikvrpc.CmdGet, &kvrpcpb.GetRequest{
Key: key,
Version: ver.Ver,
})
loc, err := s.store.regionCache.LocateKey(bo, key)
c.Assert(err, IsNil)
resp, err := s.store.SendReq(bo, req, loc.Region, readTimeoutShort)
c.Assert(err, IsNil)
c.Assert(resp.Resp, NotNil)
keyErr := (resp.Resp.(*kvrpcpb.GetResponse)).GetError()
return keyErr.GetLocked() != nil
}
func (s *testCommitterSuite) TestPrewriteCancel(c *C) {
// Setup region delays for key "b" and "c".
delays := map[uint64]time.Duration{
s.mustGetRegionID(c, []byte("b")): time.Millisecond * 10,
s.mustGetRegionID(c, []byte("c")): time.Millisecond * 20,
}
s.store.client = &slowClient{
Client: s.store.client,
regionDelays: delays,
}
txn1, txn2 := s.begin(c), s.begin(c)
// txn2 writes "b"
err := txn2.Set([]byte("b"), []byte("b2"))
c.Assert(err, IsNil)
err = txn2.Commit(context.Background())
c.Assert(err, IsNil)
// txn1 writes "a"(PK), "b", "c" on different regions.
// "b" will return an error and cancel commit.
err = txn1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = txn1.Set([]byte("b"), []byte("b1"))
c.Assert(err, IsNil)
err = txn1.Set([]byte("c"), []byte("c1"))
c.Assert(err, IsNil)
err = txn1.Commit(context.Background())
c.Assert(err, NotNil)
// "c" should be cleaned up in reasonable time.
for i := 0; i < 50; i++ {
if !s.isKeyLocked(c, []byte("c")) {
return
}
time.Sleep(time.Millisecond * 10)
}
c.Fail()
}
// slowClient wraps rpcClient and makes some regions respond with delay.
type slowClient struct {
Client
regionDelays map[uint64]time.Duration
}
func (c *slowClient) SendReq(ctx context.Context, addr string, req *tikvrpc.Request, timeout time.Duration) (*tikvrpc.Response, error) {
for id, delay := range c.regionDelays {
reqCtx := &req.Context
if reqCtx.GetRegionId() == id {
time.Sleep(delay)
}
}
return c.Client.SendRequest(ctx, addr, req, timeout)
}
func (s *testCommitterSuite) TestIllegalTso(c *C) {
txn := s.begin(c)
data := map[string]string{
"name": "aa",
"age": "12",
}
for k, v := range data {
err := txn.Set([]byte(k), []byte(v))
c.Assert(err, IsNil)
}
// make start ts bigger.
txn.startTS = uint64(math.MaxUint64)
err := txn.Commit(context.Background())
c.Assert(err, NotNil)
errMsgMustContain(c, err, "invalid txnStartTS")
}
func errMsgMustContain(c *C, err error, msg string) {
c.Assert(strings.Contains(err.Error(), msg), IsTrue)
}
func newTwoPhaseCommitterWithInit(txn *tikvTxn, connID uint64) (*twoPhaseCommitter, error) {
c, err := newTwoPhaseCommitter(txn, connID)
if err != nil {
return nil, errors.Trace(err)
}
if err = c.initKeysAndMutations(); err != nil {
return nil, errors.Trace(err)
}
return c, nil
}
func (s *testCommitterSuite) TestCommitBeforePrewrite(c *C) {
txn := s.begin(c)
err := txn.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
committer, err := newTwoPhaseCommitterWithInit(txn, 0)
c.Assert(err, IsNil)
ctx := context.Background()
err = committer.cleanupMutations(NewBackoffer(ctx, cleanupMaxBackoff), committer.mutations)
c.Assert(err, IsNil)
err = committer.prewriteMutations(NewBackoffer(ctx, PrewriteMaxBackoff), committer.mutations)
c.Assert(err, NotNil)
errMsgMustContain(c, err, "conflictCommitTS")
}
func (s *testCommitterSuite) TestPrewritePrimaryKeyFailed(c *C) {
// commit (a,a1)
txn1 := s.begin(c)
err := txn1.Set([]byte("a"), []byte("a1"))
c.Assert(err, IsNil)
err = txn1.Commit(context.Background())
c.Assert(err, IsNil)
// check a
txn := s.begin(c)
v, err := txn.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("a1"))
// set txn2's startTs before txn1's
txn2 := s.begin(c)
txn2.startTS = txn1.startTS - 1
err = txn2.Set([]byte("a"), []byte("a2"))
c.Assert(err, IsNil)
err = txn2.Set([]byte("b"), []byte("b2"))
c.Assert(err, IsNil)
// prewrite:primary a failed, b success
err = txn2.Commit(context.Background())
c.Assert(err, NotNil)
// txn2 failed with a rollback for record a.
txn = s.begin(c)
v, err = txn.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("a1"))
_, err = txn.Get(context.TODO(), []byte("b"))
errMsgMustContain(c, err, "key not exist")
// clean again, shouldn't be failed when a rollback already exist.
ctx := context.Background()
committer, err := newTwoPhaseCommitterWithInit(txn2, 0)
c.Assert(err, IsNil)
err = committer.cleanupMutations(NewBackoffer(ctx, cleanupMaxBackoff), committer.mutations)
c.Assert(err, IsNil)
// check the data after rollback twice.
txn = s.begin(c)
v, err = txn.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("a1"))
// update data in a new txn, should be success.
err = txn.Set([]byte("a"), []byte("a3"))
c.Assert(err, IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
// check value
txn = s.begin(c)
v, err = txn.Get(context.TODO(), []byte("a"))
c.Assert(err, IsNil)
c.Assert(v, BytesEquals, []byte("a3"))
}
func (s *testCommitterSuite) TestWrittenKeysOnConflict(c *C) {
// This test checks that when there is a write conflict, written keys is collected,
// so we can use it to clean up keys.
region, _ := s.cluster.GetRegionByKey([]byte("x"))
newRegionID := s.cluster.AllocID()
newPeerID := s.cluster.AllocID()
s.cluster.Split(region.Id, newRegionID, []byte("y"), []uint64{newPeerID}, newPeerID)
var totalTime time.Duration
for i := 0; i < 10; i++ {
txn1 := s.begin(c)
txn2 := s.begin(c)
txn2.Set([]byte("x1"), []byte("1"))
committer2, err := newTwoPhaseCommitterWithInit(txn2, 2)
c.Assert(err, IsNil)
err = committer2.execute(context.Background())
c.Assert(err, IsNil)
txn1.Set([]byte("x1"), []byte("1"))
txn1.Set([]byte("y1"), []byte("2"))
committer1, err := newTwoPhaseCommitterWithInit(txn1, 2)
c.Assert(err, IsNil)
err = committer1.execute(context.Background())
c.Assert(err, NotNil)
committer1.cleanWg.Wait()
txn3 := s.begin(c)
start := time.Now()
txn3.Get(context.TODO(), []byte("y1"))
totalTime += time.Since(start)
txn3.Commit(context.Background())
}
c.Assert(totalTime, Less, time.Millisecond*200)
}
func (s *testCommitterSuite) TestPrewriteTxnSize(c *C) {
// Prepare two regions first: (, 100) and [100, )
region, _ := s.cluster.GetRegionByKey([]byte{50})
newRegionID := s.cluster.AllocID()
newPeerID := s.cluster.AllocID()
s.cluster.Split(region.Id, newRegionID, []byte{100}, []uint64{newPeerID}, newPeerID)
txn := s.begin(c)
var val [1024]byte
for i := byte(50); i < 120; i++ {
err := txn.Set([]byte{i}, val[:])
c.Assert(err, IsNil)
}
committer, err := newTwoPhaseCommitterWithInit(txn, 1)
c.Assert(err, IsNil)
ctx := context.Background()
err = committer.prewriteMutations(NewBackoffer(ctx, PrewriteMaxBackoff), committer.mutations)
c.Assert(err, IsNil)
// Check the written locks in the first region (50 keys)
for i := byte(50); i < 100; i++ {
lock := s.getLockInfo(c, []byte{i})
c.Assert(int(lock.TxnSize), Equals, 50)
}
// Check the written locks in the second region (20 keys)
for i := byte(100); i < 120; i++ {
lock := s.getLockInfo(c, []byte{i})
c.Assert(int(lock.TxnSize), Equals, 20)
}
}
func (s *testCommitterSuite) TestRejectCommitTS(c *C) {
txn := s.begin(c)
c.Assert(txn.Set([]byte("x"), []byte("v")), IsNil)
committer, err := newTwoPhaseCommitterWithInit(txn, 1)
c.Assert(err, IsNil)
bo := NewBackoffer(context.Background(), getMaxBackoff)
loc, err := s.store.regionCache.LocateKey(bo, []byte("x"))
c.Assert(err, IsNil)
mutations := []*kvrpcpb.Mutation{
{
Op: committer.mutations.ops[0],
Key: committer.mutations.keys[0],
Value: committer.mutations.values[0],
},
}
prewrite := &kvrpcpb.PrewriteRequest{
Mutations: mutations,
PrimaryLock: committer.primary(),
StartVersion: committer.startTS,
LockTtl: committer.lockTTL,
MinCommitTs: committer.startTS + 100, // Set minCommitTS
}
req := tikvrpc.NewRequest(tikvrpc.CmdPrewrite, prewrite)
_, err = s.store.SendReq(bo, req, loc.Region, readTimeoutShort)
c.Assert(err, IsNil)
// Make commitTS less than minCommitTS.
committer.commitTS = committer.startTS + 1
// Ensure that the new commit ts is greater than minCommitTS when retry
time.Sleep(3 * time.Millisecond)
err = committer.commitMutations(bo, committer.mutations)
c.Assert(err, IsNil)
// Use startTS+2 to read the data and get nothing.
// Use max.Uint64 to read the data and success.
// That means the final commitTS > startTS+2, it's not the one we provide.
// So we cover the rety commitTS logic.
txn1, err := s.store.BeginWithStartTS(committer.startTS + 2)
c.Assert(err, IsNil)
_, err = txn1.Get(bo.ctx, []byte("x"))
c.Assert(kv.IsErrNotFound(err), IsTrue)
txn2, err := s.store.BeginWithStartTS(math.MaxUint64)
c.Assert(err, IsNil)
val, err := txn2.Get(bo.ctx, []byte("x"))
c.Assert(err, IsNil)
c.Assert(bytes.Equal(val, []byte("v")), IsTrue)
}
func (s *testCommitterSuite) TestPessimisticPrewriteRequest(c *C) {
// This test checks that the isPessimisticLock field is set in the request even when no keys are pessimistic lock.
txn := s.begin(c)
txn.SetOption(kv.Pessimistic, true)
err := txn.Set([]byte("t1"), []byte("v1"))
c.Assert(err, IsNil)
committer, err := newTwoPhaseCommitterWithInit(txn, 0)
c.Assert(err, IsNil)
committer.forUpdateTS = 100
var batch batchMutations
batch.mutations = committer.mutations.subRange(0, 1)
batch.region = RegionVerID{1, 1, 1}
req := committer.buildPrewriteRequest(batch, 1)
c.Assert(len(req.Prewrite().IsPessimisticLock), Greater, 0)
c.Assert(req.Prewrite().ForUpdateTs, Equals, uint64(100))
}
func (s *testCommitterSuite) TestUnsetPrimaryKey(c *C) {
// This test checks that the isPessimisticLock field is set in the request even when no keys are pessimistic lock.
key := kv.Key("key")
txn := s.begin(c)
c.Assert(txn.Set(key, key), IsNil)
c.Assert(txn.Commit(context.Background()), IsNil)
txn = s.begin(c)
txn.SetOption(kv.Pessimistic, true)
txn.SetOption(kv.PresumeKeyNotExists, nil)
txn.SetOption(kv.PresumeKeyNotExistsError, kv.NewExistErrInfo("name", "value"))
_, _ = txn.us.Get(context.TODO(), key)
c.Assert(txn.Set(key, key), IsNil)
txn.DelOption(kv.PresumeKeyNotExistsError)
txn.DelOption(kv.PresumeKeyNotExists)
lockCtx := &kv.LockCtx{ForUpdateTS: txn.startTS, WaitStartTime: time.Now()}
err := txn.LockKeys(context.Background(), lockCtx, key)
c.Assert(err, NotNil)
c.Assert(txn.Delete(key), IsNil)
key2 := kv.Key("key2")
c.Assert(txn.Set(key2, key2), IsNil)
err = txn.Commit(context.Background())
c.Assert(err, IsNil)
}
func (s *testCommitterSuite) TestPessimisticLockedKeysDedup(c *C) {
txn := s.begin(c)
txn.SetOption(kv.Pessimistic, true)
lockCtx := &kv.LockCtx{ForUpdateTS: 100, WaitStartTime: time.Now()}
err := txn.LockKeys(context.Background(), lockCtx, kv.Key("abc"), kv.Key("def"))
c.Assert(err, IsNil)
lockCtx = &kv.LockCtx{ForUpdateTS: 100, WaitStartTime: time.Now()}
err = txn.LockKeys(context.Background(), lockCtx, kv.Key("abc"), kv.Key("def"))
c.Assert(err, IsNil)
c.Assert(txn.lockKeys, HasLen, 2)
}
func (s *testCommitterSuite) TestPessimisticTTL(c *C) {
key := kv.Key("key")
txn := s.begin(c)
txn.SetOption(kv.Pessimistic, true)
time.Sleep(time.Millisecond * 100)
lockCtx := &kv.LockCtx{ForUpdateTS: txn.startTS, WaitStartTime: time.Now()}
err := txn.LockKeys(context.Background(), lockCtx, key)
c.Assert(err, IsNil)
time.Sleep(time.Millisecond * 100)
key2 := kv.Key("key2")
lockCtx = &kv.LockCtx{ForUpdateTS: txn.startTS, WaitStartTime: time.Now()}
err = txn.LockKeys(context.Background(), lockCtx, key2)
c.Assert(err, IsNil)
lockInfo := s.getLockInfo(c, key)
msBeforeLockExpired := s.store.GetOracle().UntilExpired(txn.StartTS(), lockInfo.LockTtl)
c.Assert(msBeforeLockExpired, GreaterEqual, int64(100))
lr := newLockResolver(s.store)
bo := NewBackoffer(context.Background(), getMaxBackoff)
status, err := lr.getTxnStatus(bo, txn.startTS, key2, 0, txn.startTS, true)
c.Assert(err, IsNil)
c.Assert(status.ttl, GreaterEqual, lockInfo.LockTtl)
// Check primary lock TTL is auto increasing while the pessimistic txn is ongoing.
for i := 0; i < 50; i++ {
lockInfoNew := s.getLockInfo(c, key)
if lockInfoNew.LockTtl > lockInfo.LockTtl {
currentTS, err := lr.store.GetOracle().GetTimestamp(bo.ctx)
c.Assert(err, IsNil)
// Check that the TTL is update to a reasonable range.
expire := oracle.ExtractPhysical(txn.startTS) + int64(lockInfoNew.LockTtl)
now := oracle.ExtractPhysical(currentTS)
c.Assert(expire > now, IsTrue)
c.Assert(uint64(expire-now) <= atomic.LoadUint64(&ManagedLockTTL), IsTrue)
return
}
time.Sleep(100 * time.Millisecond)
}
c.Assert(false, IsTrue, Commentf("update pessimistic ttl fail"))
}
func (s *testCommitterSuite) TestPessimisticLockReturnValues(c *C) {
key := kv.Key("key")
key2 := kv.Key("key2")
txn := s.begin(c)
c.Assert(txn.Set(key, key), IsNil)
c.Assert(txn.Set(key2, key2), IsNil)
c.Assert(txn.Commit(context.Background()), IsNil)
txn = s.begin(c)
txn.SetOption(kv.Pessimistic, true)
lockCtx := &kv.LockCtx{ForUpdateTS: txn.startTS, WaitStartTime: time.Now()}
lockCtx.ReturnValues = true
lockCtx.Values = map[string]kv.ReturnedValue{}
c.Assert(txn.LockKeys(context.Background(), lockCtx, key, key2), IsNil)
c.Assert(lockCtx.Values, HasLen, 2)
c.Assert(lockCtx.Values[string(key)].Value, BytesEquals, []byte(key))
c.Assert(lockCtx.Values[string(key2)].Value, BytesEquals, []byte(key2))
}
// TestElapsedTTL tests that elapsed time is correct even if ts physical time is greater than local time.
func (s *testCommitterSuite) TestElapsedTTL(c *C) {
key := kv.Key("key")
txn := s.begin(c)
txn.startTS = oracle.ComposeTS(oracle.GetPhysical(time.Now().Add(time.Second*10)), 1)
txn.SetOption(kv.Pessimistic, true)
time.Sleep(time.Millisecond * 100)
lockCtx := &kv.LockCtx{
ForUpdateTS: oracle.ComposeTS(oracle.ExtractPhysical(txn.startTS)+100, 1),
WaitStartTime: time.Now(),
}
err := txn.LockKeys(context.Background(), lockCtx, key)
c.Assert(err, IsNil)
lockInfo := s.getLockInfo(c, key)
c.Assert(lockInfo.LockTtl-atomic.LoadUint64(&ManagedLockTTL), GreaterEqual, uint64(100))
c.Assert(lockInfo.LockTtl-atomic.LoadUint64(&ManagedLockTTL), Less, uint64(150))
}
// TestAcquireFalseTimeoutLock tests acquiring a key which is a secondary key of another transaction.
// The lock's own TTL is expired but the primary key is still alive due to heartbeats.
func (s *testCommitterSuite) TestAcquireFalseTimeoutLock(c *C) {
atomic.StoreUint64(&ManagedLockTTL, 1000) // 1s
defer atomic.StoreUint64(&ManagedLockTTL, 3000) // restore default test value
// k1 is the primary lock of txn1
k1 := kv.Key("k1")
// k2 is a secondary lock of txn1 and a key txn2 wants to lock
k2 := kv.Key("k2")
txn1 := s.begin(c)
txn1.SetOption(kv.Pessimistic, true)
// lock the primary key
lockCtx := &kv.LockCtx{ForUpdateTS: txn1.startTS, WaitStartTime: time.Now()}
err := txn1.LockKeys(context.Background(), lockCtx, k1)
c.Assert(err, IsNil)
// lock the secondary key
lockCtx = &kv.LockCtx{ForUpdateTS: txn1.startTS, WaitStartTime: time.Now()}
err = txn1.LockKeys(context.Background(), lockCtx, k2)
c.Assert(err, IsNil)
// Heartbeats will increase the TTL of the primary key
// wait until secondary key exceeds its own TTL
time.Sleep(time.Duration(atomic.LoadUint64(&ManagedLockTTL)) * time.Millisecond)
txn2 := s.begin(c)
txn2.SetOption(kv.Pessimistic, true)
// test no wait
lockCtx = &kv.LockCtx{ForUpdateTS: txn2.startTS, LockWaitTime: kv.LockNoWait, WaitStartTime: time.Now()}
startTime := time.Now()
err = txn2.LockKeys(context.Background(), lockCtx, k2)
elapsed := time.Since(startTime)
// cannot acquire lock immediately thus error
c.Assert(err.Error(), Equals, ErrLockAcquireFailAndNoWaitSet.Error())
// it should return immediately
c.Assert(elapsed, Less, 50*time.Millisecond)
// test for wait limited time (200ms)
lockCtx = &kv.LockCtx{ForUpdateTS: txn2.startTS, LockWaitTime: 200, WaitStartTime: time.Now()}
err = txn2.LockKeys(context.Background(), lockCtx, k2)
// cannot acquire lock in time thus error
c.Assert(err.Error(), Equals, ErrLockWaitTimeout.Error())
}
func (s *testCommitterSuite) getLockInfo(c *C, key []byte) *kvrpcpb.LockInfo {
txn := s.begin(c)
err := txn.Set(key, key)
c.Assert(err, IsNil)
committer, err := newTwoPhaseCommitterWithInit(txn, 1)
c.Assert(err, IsNil)
bo := NewBackoffer(context.Background(), getMaxBackoff)
loc, err := s.store.regionCache.LocateKey(bo, key)
c.Assert(err, IsNil)
batch := batchMutations{region: loc.Region, mutations: committer.mutations.subRange(0, 1)}
req := committer.buildPrewriteRequest(batch, 1)
resp, err := s.store.SendReq(bo, req, loc.Region, readTimeoutShort)
c.Assert(err, IsNil)
c.Assert(resp.Resp, NotNil)
keyErrs := (resp.Resp.(*kvrpcpb.PrewriteResponse)).Errors
c.Assert(keyErrs, HasLen, 1)
locked := keyErrs[0].Locked
c.Assert(locked, NotNil)
return locked
}
func (s *testCommitterSuite) TestPkNotFound(c *C) {
atomic.StoreUint64(&ManagedLockTTL, 100) // 100ms
defer atomic.StoreUint64(&ManagedLockTTL, 3000) // restore default value
// k1 is the primary lock of txn1
k1 := kv.Key("k1")
// k2 is a secondary lock of txn1 and a key txn2 wants to lock
k2 := kv.Key("k2")
k3 := kv.Key("k3")
txn1 := s.begin(c)
txn1.SetOption(kv.Pessimistic, true)
// lock the primary key
lockCtx := &kv.LockCtx{ForUpdateTS: txn1.startTS, WaitStartTime: time.Now()}
err := txn1.LockKeys(context.Background(), lockCtx, k1)
c.Assert(err, IsNil)
// lock the secondary key
lockCtx = &kv.LockCtx{ForUpdateTS: txn1.startTS, WaitStartTime: time.Now()}
err = txn1.LockKeys(context.Background(), lockCtx, k2)
c.Assert(err, IsNil)
// Stop txn ttl manager and remove primary key, like tidb server crashes and the priamry key lock does not exists actually,
// while the secondary lock operation succeeded
bo := NewBackoffer(context.Background(), pessimisticLockMaxBackoff)
txn1.committer.ttlManager.close()
err = txn1.committer.pessimisticRollbackMutations(bo, committerMutations{keys: [][]byte{k1}})
c.Assert(err, IsNil)
// Txn2 tries to lock the secondary key k2, dead loop if the left secondary lock by txn1 not resolved
txn2 := s.begin(c)
txn2.SetOption(kv.Pessimistic, true)
lockCtx = &kv.LockCtx{ForUpdateTS: txn2.startTS, WaitStartTime: time.Now()}
err = txn2.LockKeys(context.Background(), lockCtx, k2)
c.Assert(err, IsNil)
// Using smaller forUpdateTS cannot rollback this lock, other lock will fail
lockKey3 := &Lock{
Key: k3,
Primary: k1,
TxnID: txn1.startTS,
TTL: ManagedLockTTL,
TxnSize: txnCommitBatchSize,
LockType: kvrpcpb.Op_PessimisticLock,
LockForUpdateTS: txn1.startTS - 1,
}
cleanTxns := make(map[RegionVerID]struct{})
err = s.store.lockResolver.resolvePessimisticLock(bo, lockKey3, cleanTxns)
c.Assert(err, IsNil)
lockCtx = &kv.LockCtx{ForUpdateTS: txn1.startTS, WaitStartTime: time.Now()}
err = txn1.LockKeys(context.Background(), lockCtx, k3)
c.Assert(err, IsNil)
txn3 := s.begin(c)
txn3.SetOption(kv.Pessimistic, true)
lockCtx = &kv.LockCtx{ForUpdateTS: txn1.startTS - 1, WaitStartTime: time.Now(), LockWaitTime: kv.LockNoWait}
c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/txnNotFoundRetTTL", "return"), IsNil)
err = txn3.LockKeys(context.Background(), lockCtx, k3)
c.Assert(err.Error(), Equals, ErrLockAcquireFailAndNoWaitSet.Error())
c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/txnNotFoundRetTTL"), IsNil)
}
func (s *testCommitterSuite) TestPessimisticLockPrimary(c *C) {
// a is the primary lock of txn1
k1 := kv.Key("a")
// b is a secondary lock of txn1 and a key txn2 wants to lock, b is on another region
k2 := kv.Key("b")
txn1 := s.begin(c)
txn1.SetOption(kv.Pessimistic, true)
// txn1 lock k1
lockCtx := &kv.LockCtx{ForUpdateTS: txn1.startTS, WaitStartTime: time.Now()}
err := txn1.LockKeys(context.Background(), lockCtx, k1)
c.Assert(err, IsNil)
// txn2 wants to lock k1, k2, k1(pk) is blocked by txn1, pessimisticLockKeys has been changed to
// lock primary key first and then secondary keys concurrently, k2 should not be locked by txn2
doneCh := make(chan error)
go func() {
txn2 := s.begin(c)
txn2.SetOption(kv.Pessimistic, true)
lockCtx2 := &kv.LockCtx{ForUpdateTS: txn2.startTS, WaitStartTime: time.Now(), LockWaitTime: 200}
waitErr := txn2.LockKeys(context.Background(), lockCtx2, k1, k2)
doneCh <- waitErr
}()
time.Sleep(50 * time.Millisecond)
// txn3 should locks k2 successfully using no wait
txn3 := s.begin(c)
txn3.SetOption(kv.Pessimistic, true)
lockCtx3 := &kv.LockCtx{ForUpdateTS: txn3.startTS, WaitStartTime: time.Now(), LockWaitTime: kv.LockNoWait}
c.Assert(failpoint.Enable("github.com/pingcap/tidb/store/tikv/txnNotFoundRetTTL", "return"), IsNil)
err = txn3.LockKeys(context.Background(), lockCtx3, k2)
c.Assert(failpoint.Disable("github.com/pingcap/tidb/store/tikv/txnNotFoundRetTTL"), IsNil)
c.Assert(err, IsNil)
waitErr := <-doneCh
c.Assert(ErrLockWaitTimeout.Equal(waitErr), IsTrue)
}
func (c *twoPhaseCommitter) mutationsOfKeys(keys [][]byte) committerMutations {
var res committerMutations
for i := range c.mutations.keys {
for _, key := range keys {
if bytes.Equal(c.mutations.keys[i], key) {
res.push(c.mutations.ops[i], c.mutations.keys[i], c.mutations.values[i], c.mutations.isPessimisticLock[i])
break
}
}
}
return res
}
func (s *testCommitterSuite) TestCommitDeadLock(c *C) {
// Split into two region and let k1 k2 in different regions.
s.cluster.SplitKeys(s.mvccStore, kv.Key("z"), kv.Key("a"), 2)
k1 := kv.Key("a_deadlock_k1")
k2 := kv.Key("y_deadlock_k2")
region1, _ := s.cluster.GetRegionByKey(k1)
region2, _ := s.cluster.GetRegionByKey(k2)
c.Assert(region1.Id != region2.Id, IsTrue)
txn1 := s.begin(c)
txn1.Set(k1, []byte("t1"))
txn1.Set(k2, []byte("t1"))
commit1, err := newTwoPhaseCommitterWithInit(txn1, 1)
c.Assert(err, IsNil)
commit1.primaryKey = k1
commit1.txnSize = 1000 * 1024 * 1024
commit1.lockTTL = txnLockTTL(txn1.startTime, commit1.txnSize)
txn2 := s.begin(c)
txn2.Set(k1, []byte("t2"))
txn2.Set(k2, []byte("t2"))
commit2, err := newTwoPhaseCommitterWithInit(txn2, 2)
c.Assert(err, IsNil)
commit2.primaryKey = k2
commit2.txnSize = 1000 * 1024 * 1024
commit2.lockTTL = txnLockTTL(txn1.startTime, commit2.txnSize)
s.cluster.ScheduleDelay(txn2.startTS, region1.Id, 5*time.Millisecond)
s.cluster.ScheduleDelay(txn1.startTS, region2.Id, 5*time.Millisecond)
// Txn1 prewrites k1, k2 and txn2 prewrites k2, k1, the large txn
// protocol run ttlManager and update their TTL, cause dead lock.
ch := make(chan error, 2)
var wg sync.WaitGroup
wg.Add(1)
go func() {
ch <- commit2.execute(context.Background())
wg.Done()
}()
ch <- commit1.execute(context.Background())
wg.Wait()
close(ch)
res := 0
for e := range ch {
if e != nil {
res++
}
}
c.Assert(res, Equals, 1)
}
| []
| []
| []
| [] | [] | go | null | null | null |
scripts/testrunner.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2011-2018, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
import sys
import os
import io
import getopt
import unittest
import doctest
import inspect
from multiprocessing import Process, Queue
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors.execute import CellExecutionError
import coverage
VERBOSE = 2
def create_examples_testsuite():
# gather information on examples
# all functions inside the examples starting with 'ex_' or 'recipe_'
# are considered as tests
# find example files in examples directory
root_dir = 'examples/'
files = []
skip = ['__init__.py']
for root, _, filenames in os.walk(root_dir):
for filename in filenames:
if filename in skip or filename[-3:] != '.py':
continue
if 'examples/data' in root:
continue
f = os.path.join(root, filename)
f = f.replace('/', '.')
f = f[:-3]
files.append(f)
# create empty testsuite
suite = unittest.TestSuite()
# find matching functions in
for idx, module in enumerate(files):
module1, func = module.split('.')
module = __import__(module)
func = getattr(module, func)
funcs = inspect.getmembers(func, inspect.isfunction)
[suite.addTest(unittest.FunctionTestCase(v))
for k, v in funcs if k.startswith(("ex_", "recipe_"))]
return suite
class NotebookTest(unittest.TestCase):
def __init__(self, nbfile, cov):
setattr(self.__class__, nbfile, staticmethod(self._runTest))
super(NotebookTest, self).__init__(nbfile)
self.nbfile = nbfile
self.cov = cov
def _runTest(self):
kernel = 'python%d' % sys.version_info[0]
cur_dir = os.path.dirname(self.nbfile)
with open(self.nbfile) as f:
nb = nbformat.read(f, as_version=4)
if self.cov:
covdict = {'cell_type': 'code', 'execution_count': 1,
'metadata': {'collapsed': True}, 'outputs': [],
'nbsphinx': 'hidden',
'source': 'import coverage\n'
'coverage.process_startup()\n'
'import sys\n'
'sys.path.append("{0}")\n'.format(cur_dir)
}
nb['cells'].insert(0, nbformat.from_dict(covdict))
exproc = ExecutePreprocessor(kernel_name=kernel, timeout=600)
try:
run_dir = os.getenv('WRADLIB_BUILD_DIR', cur_dir)
exproc.preprocess(nb, {'metadata': {'path': run_dir}})
except CellExecutionError as e:
raise e
if self.cov:
nb['cells'].pop(0)
with io.open(self.nbfile, 'wt') as f:
nbformat.write(nb, f)
self.assertTrue(True)
def create_notebooks_testsuite(**kwargs):
# gather information on notebooks
# all notebooks in the notebooks folder
# are considered as tests
# find notebook files in notebooks directory
cov = kwargs.pop('cov')
root_dir = os.getenv('WRADLIB_NOTEBOOKS', 'notebooks')
files = []
skip = []
for root, _, filenames in os.walk(root_dir):
for filename in filenames:
if filename in skip or filename[-6:] != '.ipynb':
continue
# skip checkpoints
if '/.' in root:
continue
f = os.path.join(root, filename)
files.append(f)
# create one TestSuite per Notebook to treat testrunners
# memory overconsumption on travis-ci
suites = []
for file in files:
suite = unittest.TestSuite()
suite.addTest(NotebookTest(file, cov))
suites.append(suite)
return suites
def create_doctest_testsuite():
# gather information on doctests, search in only wradlib folder
root_dir = 'wradlib/'
files = []
skip = ['__init__.py', 'version.py', 'bufr.py', 'test_']
for root, _, filenames in os.walk(root_dir):
for filename in filenames:
if filename in skip or filename[-3:] != '.py':
continue
if 'wradlib/tests' in root:
continue
f = os.path.join(root, filename)
f = f.replace('/', '.')
f = f[:-3]
files.append(f)
# put modules in doctest suite
suite = unittest.TestSuite()
for module in files:
suite.addTest(doctest.DocTestSuite(module))
return suite
def create_unittest_testsuite():
# gather information on tests (unittest etc)
root_dir = 'wradlib/tests/'
return unittest.defaultTestLoader.discover(root_dir)
def single_suite_process(queue, test, verbosity, **kwargs):
test_cov = kwargs.pop('coverage', 0)
test_nb = kwargs.pop('notebooks', 0)
if test_cov and not test_nb:
cov = coverage.coverage()
cov.start()
all_success = 1
for ts in test:
if ts.countTestCases() != 0:
res = unittest.TextTestRunner(verbosity=verbosity).run(ts)
all_success = all_success & res.wasSuccessful()
if test_cov and not test_nb:
cov.stop()
cov.save()
queue.put(all_success)
def keep_tests(suite, arg):
newsuite = unittest.TestSuite()
try:
for tc in suite:
try:
if tc.id().find(arg) != -1:
newsuite.addTest(tc)
except AttributeError:
new = keep_tests(tc, arg)
if new.countTestCases() != 0:
newsuite.addTest(new)
except TypeError:
pass
return newsuite
def main():
args = sys.argv[1:]
usage_message = """Usage: python testrunner.py options arg
If run without options, testrunner displays the usage message.
If all tests suites should be run, use the -a option.
If arg is given, only tests containing arg are run.
options:
-a
--all
Run all tests (examples, test, doctest, notebooks)
-m
Run all tests within a single testsuite [default]
-M
Run each suite as separate instance
-e
--example
Run only examples tests
-d
--doc
Run only doctests
-u
--unit
Run only unit test
-n
--notebook
Run only notebook test
-s
--use-subprocess
Run every testsuite in a subprocess.
-c
--coverage
Run notebook tests with code coverage
-v level
Set the level of verbosity.
0 - Silent
1 - Quiet (produces a dot for each succesful test)
2 - Verbose (default - produces a line of output for each test)
-h
Display usage information.
"""
test_all = 0
test_examples = 0
test_docs = 0
test_notebooks = 0
test_units = 0
test_subprocess = 0
test_cov = 0
verbosity = VERBOSE
try:
options, arg = getopt.getopt(args, 'aednuschv:',
['all', 'example', 'doc',
'notebook', 'unit', 'use-subprocess',
'coverage', 'help'])
except getopt.GetoptError as e:
err_exit(e.msg)
if not options:
err_exit(usage_message)
for name, value in options:
if name in ('-a', '--all'):
test_all = 1
elif name in ('-e', '--example'):
test_examples = 1
elif name in ('-d', '--doc'):
test_docs = 1
elif name in ('-n', '--notebook'):
test_notebooks = 1
elif name in ('-u', '--unit'):
test_units = 1
elif name in ('-s', '--use-subprocess'):
test_subprocess = 1
elif name in ('-c', '--coverage'):
test_cov = 1
elif name in ('-h', '--help'):
err_exit(usage_message, 0)
elif name == '-v':
verbosity = int(value)
else:
err_exit(usage_message)
if not (test_all or test_examples or test_docs or
test_notebooks or test_units):
err_exit('must specify one of: -a -e -d -n -u')
testSuite = []
if test_all:
testSuite.append(create_examples_testsuite())
testSuite.append(create_notebooks_testsuite(cov=test_cov))
testSuite.append(create_doctest_testsuite())
testSuite.append(create_unittest_testsuite())
elif test_examples:
testSuite.append(create_examples_testsuite())
elif test_notebooks:
testSuite.append(create_notebooks_testsuite(cov=test_cov))
elif test_docs:
testSuite.append(unittest.TestSuite(create_doctest_testsuite()))
elif test_units:
testSuite.append(create_unittest_testsuite())
all_success = 1
if test_subprocess:
for test in testSuite:
if arg:
test = keep_tests(test, arg[0])
queue = Queue()
keywords = {'coverage': test_cov, 'notebooks': test_notebooks}
proc = Process(target=single_suite_process,
args=(queue, test, verbosity),
kwargs=keywords)
proc.start()
result = queue.get()
proc.join()
# all_success should be 0 in the end
all_success = all_success & result
else:
if test_cov and not test_notebooks:
cov = coverage.coverage()
cov.start()
for ts in testSuite:
if arg:
ts = keep_tests(ts, arg[0])
for test in ts:
if test.countTestCases() != 0:
result = unittest.TextTestRunner(verbosity=verbosity).\
run(test)
# all_success should be 0 in the end
all_success = all_success & result.wasSuccessful()
if test_cov and not test_notebooks:
cov.stop()
cov.save()
if all_success:
sys.exit(0)
else:
# This will return exit code 1
sys.exit("At least one test has failed. "
"Please see test report for details.")
def err_exit(message, rc=2):
sys.stderr.write("\n%s\n" % message)
sys.exit(rc)
if __name__ == '__main__':
main()
| []
| []
| [
"WRADLIB_NOTEBOOKS",
"WRADLIB_BUILD_DIR"
]
| [] | ["WRADLIB_NOTEBOOKS", "WRADLIB_BUILD_DIR"] | python | 2 | 0 | |
cmd/frontend/internal/app/ui/handlers.go | package ui
import (
"context"
"html/template"
"log"
"net/http"
"net/http/httputil"
"net/url"
"os"
"regexp"
"strconv"
"strings"
"github.com/gorilla/mux"
"github.com/inconshreveable/log15"
"github.com/pkg/errors"
"github.com/sourcegraph/sourcegraph/cmd/frontend/auth"
"github.com/sourcegraph/sourcegraph/cmd/frontend/backend"
"github.com/sourcegraph/sourcegraph/cmd/frontend/envvar"
"github.com/sourcegraph/sourcegraph/cmd/frontend/globals"
"github.com/sourcegraph/sourcegraph/cmd/frontend/internal/app/assetsutil"
"github.com/sourcegraph/sourcegraph/cmd/frontend/internal/app/jscontext"
"github.com/sourcegraph/sourcegraph/cmd/frontend/internal/handlerutil"
"github.com/sourcegraph/sourcegraph/cmd/frontend/types"
"github.com/sourcegraph/sourcegraph/internal/actor"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/env"
"github.com/sourcegraph/sourcegraph/internal/errcode"
"github.com/sourcegraph/sourcegraph/internal/gitserver"
"github.com/sourcegraph/sourcegraph/internal/repoupdater"
"github.com/sourcegraph/sourcegraph/internal/routevar"
"github.com/sourcegraph/sourcegraph/internal/vcs"
"github.com/sourcegraph/sourcegraph/internal/vcs/git"
)
type InjectedHTML struct {
HeadTop template.HTML
HeadBottom template.HTML
BodyTop template.HTML
BodyBottom template.HTML
}
type Metadata struct {
// Title is the title of the page for Twitter cards, OpenGraph, etc.
// e.g. "Open in Sourcegraph"
Title string
// Description is the description of the page for Twitter cards, OpenGraph,
// etc. e.g. "View this link in Sourcegraph Editor."
Description string
// ShowPreview controls whether or not OpenGraph/Twitter card/etc metadata is rendered.
ShowPreview bool
}
type Common struct {
Injected InjectedHTML
Metadata *Metadata
Context jscontext.JSContext
AssetURL string
Title string
Error *pageError
WebpackDevServer bool // whether the Webpack dev server is running (WEBPACK_DEV_SERVER env var)
// The fields below have zero values when not on a repo page.
Repo *types.Repo
Rev string // unresolved / user-specified revision (e.x.: "@master")
api.CommitID // resolved SHA1 revision
}
var webpackDevServer, _ = strconv.ParseBool(os.Getenv("WEBPACK_DEV_SERVER"))
// repoShortName trims the first path element of the given repo name if it has
// at least two path components.
func repoShortName(name api.RepoName) string {
split := strings.Split(string(name), "/")
if len(split) < 2 {
return string(name)
}
return strings.Join(split[1:], "/")
}
// newCommon builds a *Common data structure, returning an error if one occurs.
//
// In the event of the repository having been renamed, the request is handled
// by newCommon and nil, nil is returned. Basic usage looks like:
//
// common, err := newCommon(w, r, serveError)
// if err != nil {
// return err
// }
// if common == nil {
// return nil // request was handled
// }
//
// In the case of a repository that is cloning, a Common data structure is
// returned but it has an incomplete RevSpec.
func newCommon(w http.ResponseWriter, r *http.Request, title string, serveError func(w http.ResponseWriter, r *http.Request, err error, statusCode int)) (*Common, error) {
common := &Common{
Injected: InjectedHTML{
HeadTop: template.HTML(conf.Get().HtmlHeadTop),
HeadBottom: template.HTML(conf.Get().HtmlHeadBottom),
BodyTop: template.HTML(conf.Get().HtmlBodyTop),
BodyBottom: template.HTML(conf.Get().HtmlBodyBottom),
},
Context: jscontext.NewJSContextFromRequest(r),
AssetURL: assetsutil.URL("").String(),
Title: title,
Metadata: &Metadata{
Title: globals.Branding().BrandName,
Description: "Sourcegraph is a web-based code search and navigation tool for dev teams. Search, navigate, and review code. Find answers.",
ShowPreview: r.URL.Path == "/sign-in" && r.URL.RawQuery == "returnTo=%2F",
},
WebpackDevServer: webpackDevServer,
}
if _, ok := mux.Vars(r)["Repo"]; ok {
// Common repo pages (blob, tree, etc).
var err error
common.Repo, common.CommitID, err = handlerutil.GetRepoAndRev(r.Context(), mux.Vars(r))
isRepoEmptyError := routevar.ToRepoRev(mux.Vars(r)).Rev == "" && gitserver.IsRevisionNotFound(errors.Cause(err)) // should reply with HTTP 200
if err != nil && !isRepoEmptyError {
if e, ok := err.(*handlerutil.URLMovedError); ok {
// The repository has been renamed, e.g. "github.com/docker/docker"
// was renamed to "github.com/moby/moby" -> redirect the user now.
err = handlerutil.RedirectToNewRepoName(w, r, e.NewRepo)
if err != nil {
return nil, errors.Wrap(err, "when sending renamed repository redirect response")
}
return nil, nil
}
if e, ok := err.(backend.ErrRepoSeeOther); ok {
// Repo does not exist here, redirect to the recommended location.
u, err := url.Parse(e.RedirectURL)
if err != nil {
return nil, err
}
u.Path, u.RawQuery = r.URL.Path, r.URL.RawQuery
http.Redirect(w, r, u.String(), http.StatusSeeOther)
return nil, nil
}
if gitserver.IsRevisionNotFound(errors.Cause(err)) {
// Revision does not exist.
serveError(w, r, err, http.StatusNotFound)
return nil, nil
}
if _, ok := errors.Cause(err).(*gitserver.RepoNotCloneableErr); ok {
if errcode.IsNotFound(err) {
// Repository is not found.
serveError(w, r, err, http.StatusNotFound)
return nil, nil
}
// Repository is not clonable.
dangerouslyServeError(w, r, errors.New("repository could not be cloned"), http.StatusInternalServerError)
return nil, nil
}
if vcs.IsRepoNotExist(err) {
if vcs.IsCloneInProgress(err) {
// Repo is cloning.
return common, nil
}
// Repo does not exist.
serveError(w, r, err, http.StatusNotFound)
return nil, nil
}
if errcode.IsNotFound(err) {
// Repo does not exist.
serveError(w, r, err, http.StatusNotFound)
return nil, nil
}
if errcode.IsUnauthorized(err) {
// Not authorized to access repository.
serveError(w, r, err, http.StatusUnauthorized)
return nil, nil
}
return nil, err
}
if common.Repo.Name == "github.com/sourcegraphtest/Always500Test" {
return nil, errors.New("error caused by Always500Test repo name")
}
common.Rev = mux.Vars(r)["Rev"]
// Update gitserver contents for a repo whenever it is visited.
go func() {
ctx := context.Background()
gitserverRepo, err := backend.GitRepo(ctx, common.Repo)
if err != nil {
log15.Error("backend.GitRepo", "error", err)
return
}
_, err = repoupdater.DefaultClient.EnqueueRepoUpdate(ctx, gitserverRepo)
if err != nil {
log15.Error("EnqueueRepoUpdate", "error", err)
}
}()
}
return common, nil
}
type handlerFunc func(w http.ResponseWriter, r *http.Request) error
func serveBrandedPageString(titles string, description *string) handlerFunc {
return serveBasicPage(func(c *Common, r *http.Request) string {
return brandNameSubtitle(titles)
}, description)
}
func serveBasicPage(title func(c *Common, r *http.Request) string, description *string) handlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
common, err := newCommon(w, r, "", serveError)
if err != nil {
return err
}
if description != nil {
common.Metadata.Description = *description
}
if common == nil {
return nil // request was handled
}
common.Title = title(common, r)
return renderTemplate(w, "app.html", common)
}
}
func serveHome(w http.ResponseWriter, r *http.Request) error {
common, err := newCommon(w, r, globals.Branding().BrandName, serveError)
if err != nil {
return err
}
if common == nil {
return nil // request was handled
}
if envvar.SourcegraphDotComMode() && !actor.FromContext(r.Context()).IsAuthenticated() && !strings.Contains(r.UserAgent(), "Cookiebot") {
// The user is not signed in and tried to access Sourcegraph.com. Redirect to
// about.sourcegraph.com so they see general info page.
// Don't redirect Cookiebot so it can scan the website without authentication.
http.Redirect(w, r, (&url.URL{Scheme: aboutRedirectScheme, Host: aboutRedirectHost}).String(), http.StatusTemporaryRedirect)
return nil
}
// On non-Sourcegraph.com instances, there is no separate homepage, so redirect to /search.
r.URL.Path = "/search"
http.Redirect(w, r, r.URL.String(), http.StatusTemporaryRedirect)
return nil
}
func serveSignIn(w http.ResponseWriter, r *http.Request) error {
common, err := newCommon(w, r, "", serveError)
if err != nil {
return err
}
if common == nil {
return nil // request was handled
}
common.Title = brandNameSubtitle("Sign in")
return renderTemplate(w, "app.html", common)
}
// redirectTreeOrBlob redirects a blob page to a tree page if the file is actually a directory,
// or a tree page to a blob page if the directory is actually a file.
func redirectTreeOrBlob(routeName, path string, common *Common, w http.ResponseWriter, r *http.Request) (requestHandled bool, err error) {
// NOTE: It makes no sense for this function to proceed if the commit ID
// for the repository is empty. It is most likely the repository is still
// clone in progress.
if common.CommitID == "" {
return false, nil
}
if path == "/" || path == "" {
if routeName != routeRepo {
// Redirect to repo route
target := "/" + string(common.Repo.Name) + common.Rev
http.Redirect(w, r, target, http.StatusTemporaryRedirect)
return true, nil
}
return false, nil
}
cachedRepo, err := backend.CachedGitRepo(r.Context(), common.Repo)
if err != nil {
return false, err
}
stat, err := git.Stat(r.Context(), *cachedRepo, common.CommitID, path)
if err != nil {
if os.IsNotExist(err) {
serveError(w, r, err, http.StatusNotFound)
return true, nil
}
return false, err
}
expectedDir := routeName == routeTree
if stat.Mode().IsDir() != expectedDir {
target := "/" + string(common.Repo.Name) + common.Rev + "/-/"
if expectedDir {
target += "blob"
} else {
target += "tree"
}
target += path
http.Redirect(w, r, auth.SafeRedirectURL(target), http.StatusTemporaryRedirect)
return true, nil
}
return false, nil
}
// serveTree serves the tree (directory) pages.
func serveTree(title func(c *Common, r *http.Request) string) handlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
common, err := newCommon(w, r, "", serveError)
if err != nil {
return err
}
if common == nil {
return nil // request was handled
}
handled, err := redirectTreeOrBlob(routeTree, mux.Vars(r)["Path"], common, w, r)
if handled {
return nil
}
if err != nil {
return err
}
common.Title = title(common, r)
return renderTemplate(w, "app.html", common)
}
}
func serveRepoOrBlob(routeName string, title func(c *Common, r *http.Request) string) handlerFunc {
return func(w http.ResponseWriter, r *http.Request) error {
common, err := newCommon(w, r, "", serveError)
if err != nil {
return err
}
if common == nil {
return nil // request was handled
}
handled, err := redirectTreeOrBlob(routeName, mux.Vars(r)["Path"], common, w, r)
if handled {
return nil
}
if err != nil {
return err
}
common.Title = title(common, r)
q := r.URL.Query()
_, isNewQueryUX := q["sq"] // sq URL param is only set by new query UX in SearchNavbarItem.tsx
if search := q.Get("q"); search != "" && !isNewQueryUX {
// Redirect old search URLs:
//
// /github.com/gorilla/mux@24fca303ac6da784b9e8269f724ddeb0b2eea5e7?q=ErrMethodMismatch&utm_source=chrome-extension
// /github.com/gorilla/mux@24fca303ac6da784b9e8269f724ddeb0b2eea5e7/-/blob/mux.go?q=NewRouter
//
// To new ones:
//
// /search?q=repo:^github.com/gorilla/mux$+ErrMethodMismatch
//
// It does not apply the file: filter because that was not the behavior of the
// old blob URLs with a 'q' parameter either.
r.URL.Path = "/search"
q.Set("sq", "repo:^"+regexp.QuoteMeta(string(common.Repo.Name))+"$")
r.URL.RawQuery = q.Encode()
http.Redirect(w, r, r.URL.String(), http.StatusPermanentRedirect)
return nil
}
return renderTemplate(w, "app.html", common)
}
}
// searchBadgeHandler serves the search readme badges from the search-badger service
// https://github.com/sourcegraph/search-badger
var searchBadgeHandler = &httputil.ReverseProxy{
Director: func(r *http.Request) {
r.URL.Scheme = "http"
r.URL.Host = "search-badger"
r.URL.Path = "/"
},
ErrorLog: log.New(env.DebugOut, "search-badger proxy: ", log.LstdFlags),
}
| [
"\"WEBPACK_DEV_SERVER\""
]
| []
| [
"WEBPACK_DEV_SERVER"
]
| [] | ["WEBPACK_DEV_SERVER"] | go | 1 | 0 | |
perfkitbenchmarker/providers/aws/aws_container_service.py | # Copyright 2017 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains classes/functions related to AWS container clusters."""
import json
import os
import uuid
from absl import flags
from perfkitbenchmarker import container_service
from perfkitbenchmarker import context
from perfkitbenchmarker import errors
from perfkitbenchmarker import resource
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers import aws
from perfkitbenchmarker.providers.aws import aws_load_balancer
from perfkitbenchmarker.providers.aws import aws_logs
from perfkitbenchmarker.providers.aws import aws_network
from perfkitbenchmarker.providers.aws import s3
from perfkitbenchmarker.providers.aws import util
import requests
import six
import yaml
FLAGS = flags.FLAGS
_ECS_NOT_READY = frozenset(['PROVISIONING', 'PENDING'])
class EcrRepository(resource.BaseResource):
"""Class representing an Elastic Container Registry image repository."""
def __init__(self, name, region):
super(EcrRepository, self).__init__()
self.name = name
self.region = region
def _Create(self):
"""Creates the image repository."""
if self._Exists():
self.user_managed = True
return
create_cmd = util.AWS_PREFIX + [
'ecr', 'create-repository', '--region', self.region,
'--repository-name', self.name
]
_, stderr, retcode = vm_util.IssueCommand(
create_cmd, raise_on_failure=False)
if retcode:
if 'InsufficientInstanceCapacity' in stderr:
raise errors.Benchmarks.InsufficientCapacityCloudFailure(stderr)
if 'InstanceLimitExceeded' in stderr or 'VpcLimitExceeded' in stderr:
raise errors.Benchmarks.QuotaFailure(stderr)
raise errors.Resource.CreationError(
'Failed to create EKS Cluster: {} return code: {}'.format(
retcode, stderr))
def _Exists(self):
"""Returns True if the repository exists."""
describe_cmd = util.AWS_PREFIX + [
'ecr', 'describe-repositories', '--region', self.region,
'--repository-names', self.name
]
stdout, _, _ = vm_util.IssueCommand(
describe_cmd, suppress_warning=True, raise_on_failure=False)
if not stdout or not json.loads(stdout)['repositories']:
return False
return True
def _Delete(self):
"""Deletes the repository."""
delete_cmd = util.AWS_PREFIX + [
'ecr', 'delete-repository', '--region', self.region,
'--repository-name', self.name, '--force'
]
vm_util.IssueCommand(delete_cmd, raise_on_failure=False)
class ElasticContainerRegistry(container_service.BaseContainerRegistry):
"""Class for building and storing container images on AWS."""
CLOUD = aws.CLOUD
def __init__(self, registry_spec):
super(ElasticContainerRegistry, self).__init__(registry_spec)
self.account = self.project or util.GetAccount()
self.region = util.GetRegionFromZone(self.zone.split(',')[0])
self.repositories = []
def _Delete(self):
"""Deletes the repositories."""
for repository in self.repositories:
repository.Delete()
def Push(self, image):
"""Push a locally built image to the registry."""
repository_name = '{namespace}/{name}'.format(
namespace=self.name, name=image.name)
repository = EcrRepository(repository_name, self.region)
self.repositories.append(repository)
repository.Create()
super(ElasticContainerRegistry, self).Push(image)
def GetFullRegistryTag(self, image):
"""Gets the full tag of the image."""
tag = '{account}.dkr.ecr.{region}.amazonaws.com/{namespace}/{name}'.format(
account=self.account,
region=self.region,
namespace=self.name,
name=image)
return tag
def Login(self):
"""Logs in to the registry."""
get_login_cmd = util.AWS_PREFIX + [
'--region', self.region, 'ecr', 'get-login', '--no-include-email'
]
stdout, _, _ = vm_util.IssueCommand(get_login_cmd)
login_cmd = stdout.split()
vm_util.IssueCommand(login_cmd)
def RemoteBuild(self, image):
"""Build the image remotely."""
# TODO(ehankland) use AWS codebuild to build the image.
raise NotImplementedError()
class TaskDefinition(resource.BaseResource):
"""Class representing an AWS task definition."""
def __init__(self, name, container_spec, cluster):
super(TaskDefinition, self).__init__()
self.name = name
self.cpus = container_spec.cpus
self.memory = container_spec.memory
self.image = container_spec.image
self.container_port = container_spec.container_port
self.region = cluster.region
self.arn = None
self.log_group = aws_logs.LogGroup(self.region, 'pkb')
def _CreateDependencies(self):
"""Create the log group if it doesn't exist."""
if not self.log_group.Exists():
self.log_group.Create()
def _Create(self):
"""Create the task definition."""
register_cmd = util.AWS_PREFIX + [
'--region', self.region, 'ecs', 'register-task-definition', '--family',
self.name, '--execution-role-arn', 'ecsTaskExecutionRole',
'--network-mode', 'awsvpc', '--requires-compatibilities=FARGATE',
'--cpu',
str(int(1024 * self.cpus)), '--memory',
str(self.memory), '--container-definitions',
self._GetContainerDefinitions()
]
stdout, _, _ = vm_util.IssueCommand(register_cmd)
response = json.loads(stdout)
self.arn = response['taskDefinition']['taskDefinitionArn']
def _Delete(self):
"""Deregister the task definition."""
if self.arn is None:
return
deregister_cmd = util.AWS_PREFIX + [
'--region', self.region, 'ecs', 'deregister-task-definition',
'--task-definition', self.arn
]
vm_util.IssueCommand(deregister_cmd)
def _GetContainerDefinitions(self):
"""Returns a JSON representation of the container definitions."""
definitions = [{
'name': self.name,
'image': self.image,
'essential': True,
'portMappings': [{
'containerPort': self.container_port,
'protocol': 'TCP'
}],
'logConfiguration': {
'logDriver': 'awslogs',
'options': {
'awslogs-group': 'pkb',
'awslogs-region': self.region,
'awslogs-stream-prefix': 'pkb'
}
}
}]
return json.dumps(definitions)
class EcsTask(container_service.BaseContainer):
"""Class representing an ECS/Fargate task."""
def __init__(self, name, container_spec, cluster):
super(EcsTask, self).__init__(container_spec)
self.name = name
self.task_def = cluster.task_defs[name]
self.arn = None
self.region = cluster.region
self.cluster_name = cluster.name
self.subnet_id = cluster.network.subnet.id
self.ip_address = None
self.security_group_id = (
cluster.network.regional_network.vpc.default_security_group_id)
def _GetNetworkConfig(self):
network_config = {
'awsvpcConfiguration': {
'subnets': [self.subnet_id],
'securityGroups': [self.security_group_id],
'assignPublicIp': 'ENABLED',
}
}
return json.dumps(network_config)
def _GetOverrides(self):
"""Returns a JSON representaion of task overrides.
While the container level resources can be overridden, they have no
effect on task level resources for Fargate tasks. This means
that modifying a container spec will only affect the command of any
new containers launched from it and not cpu/memory.
"""
overrides = {
'containerOverrides': [{
'name': self.name,
}]
}
if self.command:
overrides['containerOverrides'][0]['command'] = self.command
return json.dumps(overrides)
def _Create(self):
"""Creates the task."""
run_cmd = util.AWS_PREFIX + [
'--region', self.region, 'ecs', 'run-task', '--cluster',
self.cluster_name, '--task-definition', self.task_def.arn,
'--launch-type', 'FARGATE', '--network-configuration',
self._GetNetworkConfig(), '--overrides',
self._GetOverrides()
]
stdout, _, _ = vm_util.IssueCommand(run_cmd)
response = json.loads(stdout)
self.arn = response['tasks'][0]['taskArn']
def _PostCreate(self):
"""Gets the tasks IP address."""
container = self._GetTask()['containers'][0]
self.ip_address = container['networkInterfaces'][0]['privateIpv4Address']
def _DeleteDependencies(self):
"""Delete the task def."""
self.task_def.Delete()
def _Delete(self):
"""Deletes the task."""
if self.arn is None:
return
stop_cmd = util.AWS_PREFIX + [
'--region', self.region, 'ecs', 'stop-task', '--cluster',
self.cluster_name, '--task', self.arn
]
vm_util.IssueCommand(stop_cmd)
def _GetTask(self):
"""Returns a dictionary representation of the task."""
describe_cmd = util.AWS_PREFIX + [
'--region', self.region, 'ecs', 'describe-tasks', '--cluster',
self.cluster_name, '--tasks', self.arn
]
stdout, _, _ = vm_util.IssueCommand(describe_cmd)
response = json.loads(stdout)
return response['tasks'][0]
def _IsReady(self):
"""Returns true if the task has stopped pending."""
return self._GetTask()['lastStatus'] not in _ECS_NOT_READY
def WaitForExit(self, timeout=None):
"""Waits until the task has finished running."""
@vm_util.Retry(
timeout=timeout,
retryable_exceptions=(container_service.RetriableContainerException,))
def _WaitForExit():
task = self._GetTask()
if task['lastStatus'] != 'STOPPED':
raise container_service.RetriableContainerException(
'Task is not STOPPED.')
return task
return _WaitForExit()
def GetLogs(self):
"""Returns the logs from the container."""
task_id = self.arn.split('/')[-1]
log_stream = 'pkb/{name}/{task_id}'.format(name=self.name, task_id=task_id)
return six.text_type(
aws_logs.GetLogStreamAsString(self.region, log_stream, 'pkb'))
class EcsService(container_service.BaseContainerService):
"""Class representing an ECS/Fargate service."""
def __init__(self, name, container_spec, cluster):
super(EcsService, self).__init__(container_spec)
self.client_token = str(uuid.uuid4())[:32]
self.name = name
self.task_def = cluster.task_defs[name]
self.arn = None
self.region = cluster.region
self.cluster_name = cluster.name
self.subnet_id = cluster.network.subnet.id
self.security_group_id = (
cluster.network.regional_network.vpc.default_security_group_id)
self.load_balancer = aws_load_balancer.LoadBalancer(
[cluster.network.subnet])
self.target_group = aws_load_balancer.TargetGroup(
cluster.network.regional_network.vpc, self.container_port)
self.port = 80
def _CreateDependencies(self):
"""Creates the load balancer for the service."""
self.load_balancer.Create()
self.target_group.Create()
listener = aws_load_balancer.Listener(self.load_balancer, self.target_group,
self.port)
listener.Create()
self.ip_address = self.load_balancer.dns_name
def _DeleteDependencies(self):
"""Deletes the service's load balancer."""
self.task_def.Delete()
self.load_balancer.Delete()
self.target_group.Delete()
# TODO(ferneyhough): Consider supporting the flag container_cluster_version.
def _Create(self):
"""Creates the service."""
create_cmd = util.AWS_PREFIX + [
'--region',
self.region,
'ecs',
'create-service',
'--desired-count',
'1',
'--client-token',
self.client_token,
'--cluster',
self.cluster_name,
'--service-name',
self.name,
'--task-definition',
self.task_def.arn,
'--launch-type',
'FARGATE',
'--network-configuration',
self._GetNetworkConfig(),
'--load-balancers',
self._GetLoadBalancerConfig(),
]
vm_util.IssueCommand(create_cmd)
def _Delete(self):
"""Deletes the service."""
update_cmd = util.AWS_PREFIX + [
'--region', self.region, 'ecs', 'update-service', '--cluster',
self.cluster_name, '--service', self.name, '--desired-count', '0'
]
vm_util.IssueCommand(update_cmd)
delete_cmd = util.AWS_PREFIX + [
'--region', self.region, 'ecs', 'delete-service', '--cluster',
self.cluster_name, '--service', self.name
]
vm_util.IssueCommand(delete_cmd, raise_on_failure=False)
def _GetNetworkConfig(self):
network_config = {
'awsvpcConfiguration': {
'subnets': [self.subnet_id],
'securityGroups': [self.security_group_id],
'assignPublicIp': 'ENABLED',
}
}
return json.dumps(network_config)
def _GetLoadBalancerConfig(self):
"""Returns the JSON representation of the service load balancers."""
load_balancer_config = [{
'targetGroupArn': self.target_group.arn,
'containerName': self.name,
'containerPort': self.container_port,
}]
return json.dumps(load_balancer_config)
def _IsReady(self):
"""Returns True if the Service is ready."""
url = 'http://%s' % self.ip_address
try:
r = requests.get(url)
except requests.ConnectionError:
return False
if r.status_code == 200:
return True
return False
class FargateCluster(container_service.BaseContainerCluster):
"""Class representing an AWS Fargate cluster."""
CLOUD = aws.CLOUD
CLUSTER_TYPE = 'Fargate'
def __init__(self, cluster_spec):
super(FargateCluster, self).__init__(cluster_spec)
self.region = util.GetRegionFromZone(self.zone)
self.network = aws_network.AwsNetwork.GetNetwork(self)
self.firewall = aws_network.AwsFirewall.GetFirewall()
self.name = 'pkb-%s' % FLAGS.run_uri
self.task_defs = {}
self.arn = None
def _Create(self):
"""Creates the cluster."""
create_cmd = util.AWS_PREFIX + [
'--region', self.region, 'ecs', 'create-cluster', '--cluster-name',
self.name
]
stdout, _, _ = vm_util.IssueCommand(create_cmd)
response = json.loads(stdout)
self.arn = response['cluster']['clusterArn']
def _Exists(self):
"""Returns True if the cluster exists."""
if not self.arn:
return False
describe_cmd = util.AWS_PREFIX + [
'--region', self.region, 'ecs', 'describe-clusters', '--clusters',
self.arn
]
stdout, _, _ = vm_util.IssueCommand(describe_cmd)
response = json.loads(stdout)
clusters = response['clusters']
if not clusters or clusters[0]['status'] == 'INACTIVE':
return False
return True
def _Delete(self):
"""Deletes the cluster."""
delete_cmd = util.AWS_PREFIX + [
'--region', self.region, 'ecs', 'delete-cluster', '--cluster', self.name
]
vm_util.IssueCommand(delete_cmd, raise_on_failure=False)
def DeployContainer(self, name, container_spec):
"""Deploys the container according to the spec."""
if name not in self.task_defs:
task_def = TaskDefinition(name, container_spec, self)
self.task_defs[name] = task_def
task_def.Create()
task = EcsTask(name, container_spec, self)
self.containers[name].append(task)
task.Create()
def DeployContainerService(self, name, container_spec):
"""Deploys the container service according to the spec."""
if name not in self.task_defs:
task_def = TaskDefinition(name, container_spec, self)
self.task_defs[name] = task_def
task_def.Create()
service = EcsService(name, container_spec, self)
self.services[name] = service
self.firewall.AllowPortInSecurityGroup(service.region,
service.security_group_id,
service.container_port)
service.Create()
class AwsKopsCluster(container_service.KubernetesCluster):
"""Class representing a kops based Kubernetes cluster."""
CLOUD = aws.CLOUD
CLUSTER_TYPE = 'kops'
def __init__(self, spec):
super(AwsKopsCluster, self).__init__(spec)
self.name += '.k8s.local'
self.config_bucket = 'kops-%s-%s' % (FLAGS.run_uri, str(uuid.uuid4()))
self.region = util.GetRegionFromZone(self.zone)
self.s3_service = s3.S3Service()
self.s3_service.PrepareService(self.region)
def _CreateDependencies(self):
"""Create the bucket to store cluster config."""
self.s3_service.MakeBucket(self.config_bucket)
def _DeleteDependencies(self):
"""Delete the bucket that stores cluster config."""
self.s3_service.DeleteBucket(self.config_bucket)
def _Create(self):
"""Creates the cluster."""
# Create the cluster spec but don't provision any resources.
create_cmd = [
FLAGS.kops, 'create', 'cluster',
'--name=%s' % self.name,
'--zones=%s' % self.zone,
'--node-count=%s' % self.num_nodes,
'--node-size=%s' % self.machine_type
]
env = os.environ.copy()
env['KUBECONFIG'] = FLAGS.kubeconfig
env['KOPS_STATE_STORE'] = 's3://%s' % self.config_bucket
vm_util.IssueCommand(create_cmd, env=env)
# Download the cluster spec and modify it.
get_cmd = [FLAGS.kops, 'get', 'cluster', self.name, '--output=yaml']
stdout, _, _ = vm_util.IssueCommand(get_cmd, env=env)
spec = yaml.safe_load(stdout)
spec['metadata']['creationTimestamp'] = None
spec['spec']['api']['loadBalancer']['idleTimeoutSeconds'] = 3600
benchmark_spec = context.GetThreadBenchmarkSpec()
spec['spec']['cloudLabels'] = {
'owner': FLAGS.owner,
'perfkitbenchmarker-run': FLAGS.run_uri,
'benchmark': benchmark_spec.name,
'perfkit_uuid': benchmark_spec.uuid,
'benchmark_uid': benchmark_spec.uid
}
# Replace the cluster spec.
with vm_util.NamedTemporaryFile() as tf:
yaml.dump(spec, tf)
tf.close()
replace_cmd = [FLAGS.kops, 'replace', '--filename=%s' % tf.name]
vm_util.IssueCommand(replace_cmd, env=env)
# Create the actual cluster.
update_cmd = [FLAGS.kops, 'update', 'cluster', self.name, '--yes']
vm_util.IssueCommand(update_cmd, env=env)
def _Delete(self):
"""Deletes the cluster."""
super()._Delete()
delete_cmd = [
FLAGS.kops, 'delete', 'cluster',
'--name=%s' % self.name,
'--state=s3://%s' % self.config_bucket, '--yes'
]
vm_util.IssueCommand(delete_cmd, raise_on_failure=False)
def _IsReady(self):
"""Returns True if the cluster is ready, else False."""
validate_cmd = [
FLAGS.kops, 'validate', 'cluster',
'--name=%s' % self.name,
'--state=s3://%s' % self.config_bucket
]
env = os.environ.copy()
env['KUBECONFIG'] = FLAGS.kubeconfig
_, _, retcode = vm_util.IssueCommand(
validate_cmd, env=env, suppress_warning=True, raise_on_failure=False)
return not retcode
| []
| []
| []
| [] | [] | python | 0 | 0 | |
main.py | # bigQuery
import bq_helper
from bq_helper import BigQueryHelper
from google.oauth2 import service_account
# Pipeline
from airflow.models import Variable
# General
from datetime import datetime
import os
import pandas as pd
import numpy as np
import json
def run_StackoverflowDatasetEtl():
bq_conn_id = "my_gcp_conn"
dt_now = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print(f'START {dt_now}, {os.getcwd()}')
# # Still dunno how to import variable environment to airflow on local. Hmm...
# Environment Variables
# variable = Variable.get("GENERAL_ENV", deserialize_json=True)
# var_google_credentials = json.load(Variable.get("GOOGLE_APPLICATION_CREDENTIALS", deserialize_json=True))
# var_db_url_tes = Variable.get("GENERAL_ENV", deserialize_json=True)
# google_credentials = var_google_credentials['GOOGLE_APPLICATION_CREDENTIALS']
# db_url_tes = var_db_url_tes['DB_URL_TES']
# google_credentials = variable['GOOGLE_APPLICATION_CREDENTIALS']
# db_url_tes = variable['DB_URL_TES']
google_credentials = os.environ['GOOGLE_APPLICATION_CREDENTIALS']
db_url_tes = os.environ['DB_URL_TES']
# -------------------------------------------------------------------------------------------- #
# Get the GOOGLE_APPLICATION_CREDENTIALS from GCP Service Accounts etl_test > Keys
credentials = service_account.Credentials.from_service_account_file(
google_credentials,
scopes=['https://www.googleapis.com/auth/cloud-platform'])
stackOverflow = bq_helper.BigQueryHelper(active_project="bigquery-public-data", dataset_name="stackoverflow")
# print list of tables
# bq_assistant = BigQueryHelper("bigquery-public-data", "stackoverflow")
# print(bq_assistant.list_tables())
# Get data from kaggle, stack overflow data (bigQuery Dataset)
# https://www.kaggle.com/stackoverflow/stackoverflow?select=users
# Solved error by pip install pyarrow==1.0.1 (correct version)
q = """
SELECT
users.display_name,
users.location,
users.reputation
FROM
`bigquery-public-data.stackoverflow.users` users
WHERE
users.creation_date BETWEEN '2008-07-31' and '2008-08-01 23:59:59.997'
LIMIT 100
"""
df = stackOverflow.query_to_pandas_safe(q)
# In case need for .csv
# df.to_csv("result.csv", index=False)
# df = pd.read_csv('result.csv')
# Group by location, get count of display_name
df_location = df.groupby(['location'])['display_name'].count().reset_index()
df_location.sort_values(by=['display_name'], ascending=False, inplace=True)
df_location.rename(columns={'display_name': 'count_location'}, inplace=True)
# Group by reputation, get max reputation from each location
df_reputation = df.groupby(['location']).agg({'reputation': np.max}).reset_index()
df_reputation.sort_values(by=['reputation'], ascending=False, inplace=True)
# Merge df_reputation & df to get the name of the max_reputation users
df_reputation = df_reputation.merge(
df.loc[:, ['location', 'reputation', 'display_name']],
how='left', left_on=['location', 'reputation'], right_on=['location', 'reputation'])
# If there are more than 1 user with the same location & max_reputation value, merge into one column.
df_reputation = df_reputation.groupby(['location', 'reputation'])['display_name'].agg(
lambda x: ', '.join(x.dropna())).reset_index()
df_reputation.rename(columns={'reputation': 'max_reputation'}, inplace=True)
# Merge count of display_name & max_reputation from each location
result = df_location.merge(df_reputation, how='left', left_on=['location'], right_on=['location'])
# Assign created_date
result['created_date'] = dt_now
# Save to database
result.to_sql("users_summary_detail_etl_test", db_url_tes, if_exists="replace", index=False)
print(f'Save to DB Successfully, {dt_now}')
print(f'DONE {dt_now}') | []
| []
| [
"DB_URL_TES",
"GOOGLE_APPLICATION_CREDENTIALS"
]
| [] | ["DB_URL_TES", "GOOGLE_APPLICATION_CREDENTIALS"] | python | 2 | 0 | |
config/wsgi.py | """
WSGI config for findhelp project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from pathlib import Path
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# findhelp directory.
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent
sys.path.append(str(ROOT_DIR / "findhelp"))
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| []
| []
| [
"DJANGO_SETTINGS_MODULE"
]
| [] | ["DJANGO_SETTINGS_MODULE"] | python | 1 | 0 | |
metashare/import_xml.py | #!/usr/bin/env python
import os
import sys
# Magic python path, based on http://djangosnippets.org/snippets/281/
from os.path import abspath, dirname, join
parentdir = dirname(dirname(abspath(__file__)))
# Insert our dependencies:
sys.path.insert(0, join(parentdir, 'lib', 'python2.7', 'site-packages'))
# Insert our parent directory (the one containing the folder metashare/):
sys.path.insert(0, parentdir)
try:
import settings # Assumed to be in the same directory.
except ImportError:
sys.stderr.write("Error: Can't find the file 'settings.py' in the " \
"directory containing %r. It appears you've customized things.\n" \
"You'll have to run django-admin.py, passing it your settings " \
"module.\n(If the file settings.py does indeed exist, it's causing" \
" an ImportError somehow.)\n" % __file__)
sys.exit(1)
def print_usage():
print "\n\tusage: {0} [--id-file=idfile] <file.xml|archive.zip> [<file.xml|archive." \
"zip> ...]\n".format(sys.argv[0])
print " --id-file=idfile : print identifier of imported resources in idfile"
return
if __name__ == "__main__":
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
PROJECT_HOME = os.path.normpath(os.getcwd() + "/..")
sys.path.append(PROJECT_HOME)
# Check command line parameters first.
if len(sys.argv) < 2:
print_usage()
sys.exit(-1)
# Check command line options for --id-file
id_filename = None
arg_num=1
if sys.argv[arg_num].startswith("--id-file="):
opt_len = len("--id-file=")
id_filename = sys.argv[arg_num][opt_len:]
arg_num = arg_num + 1
if len(id_filename) == 0:
print "Incorrect option"
print_usage()
sys.exit(-1)
if len(sys.argv) < 3:
print_usage()
sys.exit(-1)
# Check that SOLR is running, or else all resources will stay at status INTERNAL:
from metashare.repository import verify_at_startup
verify_at_startup() # may raise Exception, which we don't want to catch.
# Disable verbose debug output for the import process...
settings.DEBUG = False
os.environ['DISABLE_INDEXING_DURING_IMPORT'] = 'True'
successful_imports = []
erroneous_imports = []
from metashare.xml_utils import import_from_file
from metashare.storage.models import PUBLISHED, MASTER
from metashare.repository.supermodel import OBJECT_XML_CACHE
# Clean cache before starting the import process.
OBJECT_XML_CACHE.clear()
for filename in sys.argv[arg_num:]:
temp_file = open(filename, 'rb')
success, failure = import_from_file(temp_file, filename, PUBLISHED, MASTER)
successful_imports += success
erroneous_imports += failure
temp_file.close()
print "Done. Successfully imported {0} files into the database, errors " \
"occurred in {1} cases.".format(len(successful_imports), len(erroneous_imports))
if len(erroneous_imports) > 0:
print "The following files could not be imported:"
for descriptor, exception in erroneous_imports:
if isinstance(exception.args, basestring):
print "\t{}: {}".format(descriptor, ' '.join(exception.args))
else:
print "\t{}: {}".format(descriptor, exception.args)
# Salvatore:
# This is useful for tracking where the resource is stored.
# It is used by some scripts for testing purposes
if not id_filename is None:
id_file = open(id_filename, 'w')
for resource in successful_imports:
id_file.write('--->RESOURCE_ID:{0};STORAGE_IDENTIFIER:{1}\n'\
.format(resource.id, resource.storage_object.identifier))
id_file.close()
# Be nice and cleanup cache...
_cache_size = sum([len(x) for x in OBJECT_XML_CACHE.values()])
OBJECT_XML_CACHE.clear()
print "Cleared OBJECT_XML_CACHE ({} bytes)".format(_cache_size)
from django.core.management import call_command
call_command('rebuild_index', interactive=False)
| []
| []
| [
"DJANGO_SETTINGS_MODULE",
"DISABLE_INDEXING_DURING_IMPORT"
]
| [] | ["DJANGO_SETTINGS_MODULE", "DISABLE_INDEXING_DURING_IMPORT"] | python | 2 | 0 | |
pmdarima/_config.py | # -*- coding: utf-8 -*-
#
# Private configuration
from __future__ import absolute_import
import os
from os.path import expanduser
import warnings
# TODO: EVENTUALLY MIGRATE TO PMDARIMA_CACHE, etc.
# The directory in which we'll store TimeSeries models from statsmodels
# during the internal ARIMA pickling operation. NOTE: This does not change from
# version 0.9.0, when we moved from 'pyramid' -> 'pmd'!!!
PMDARIMA_CACHE = os.environ.get(
'PMDARIMA_CACHE',
os.environ.get('PYRAMID_ARIMA_CACHE',
# TODO: do we EVER want to change this?
expanduser('~/.pyramid-arima-cache')))
# The size of the pyramid cache above which to warn the user
cwb = os.environ.get('PMDARIMA_CACHE_WARN_SIZE',
os.environ.get('PYRAMID_ARIMA_CACHE_WARN_SIZE', 1e8))
# TODO: WARN
if 'PYRAMID_ARIMA_CACHE_WARN_SIZE' in os.environ:
warnings.warn("The environment variable 'PYRAMID_ARIMA_CACHE_WARN_SIZE' "
"has changed to 'PMDARIMA_CACHE_WARN_SIZE' and will be "
"removed in version 1.2.0 ",
DeprecationWarning)
try:
CACHE_WARN_BYTES = int(cwb)
except ValueError:
warnings.warn('The value of PMDARIMA_CACHE_WARN_SIZE should be '
'an integer, but got "{cache_val}". Defaulting to 1e8.'
.format(cache_val=cwb))
CACHE_WARN_BYTES = 1e8 # 100MB default
def _warn_for_cache_size():
"""Warn for a cache size that is too large.
This is called on the initial import and warns if the size of the cached
statsmodels TS objects exceeds the CACHE_WARN_BYTES value.
"""
from os.path import join, getsize, isfile
try:
cache_size = sum(getsize(join(PMDARIMA_CACHE, f))
for f in os.listdir(PMDARIMA_CACHE)
if isfile(join(PMDARIMA_CACHE, f)))
except OSError as ose:
# If it's OSError no 2, it means the cache doesn't exist yet, which
# is fine. Otherwise it's something else and we need to raise.
if ose.errno != 2:
raise
else:
if cache_size > CACHE_WARN_BYTES:
warnings.warn("The pmdarima cache ({cache_loc}) has grown to "
"{nbytes:,} bytes. Consider cleaning out old ARIMA "
"models or increasing the max cache bytes with "
"'PMDARIMA_CACHE_WARN_SIZE' (currently "
"{current_max:,} bytes) to avoid this warning in "
"the future."
.format(cache_loc=PMDARIMA_CACHE,
nbytes=cache_size,
current_max=int(CACHE_WARN_BYTES)),
UserWarning)
| []
| []
| [
"PYRAMID_ARIMA_CACHE_WARN_SIZE",
"PMDARIMA_CACHE_WARN_SIZE",
"PYRAMID_ARIMA_CACHE",
"PMDARIMA_CACHE"
]
| [] | ["PYRAMID_ARIMA_CACHE_WARN_SIZE", "PMDARIMA_CACHE_WARN_SIZE", "PYRAMID_ARIMA_CACHE", "PMDARIMA_CACHE"] | python | 4 | 0 | |
setup.py | import os
import sys
import logging
from setuptools import setup, find_packages
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
log = logging.getLogger()
# package description and keywords
description = ('Python tools for obtaining and working with elevation data '
'from Spire GNSS grazing angle altimetry')
keywords = 'Spire GNSS, altimetry, grazing angle, surface elevation and change'
# get long_description from README.rst
with open("README.rst", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/x-rst"
# install requirements and dependencies
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if on_rtd:
install_requires = []
dependency_links = []
else:
# get install requirements
with open('requirements.txt') as fh:
install_requires = [line.split().pop(0) for line in fh.read().splitlines()]
dependency_links = []
# get version
with open('version.txt') as fh:
fallback_version = fh.read()
# list of all scripts to be included with package
scripts=[os.path.join('scripts',f) for f in os.listdir('scripts') if f.endswith('.py')]
# semantic version configuration for setuptools-scm
setup_requires = ["setuptools_scm"]
use_scm_version = {
"relative_to": __file__,
"local_scheme": "node-and-date",
"version_scheme": "python-simplified-semver",
"fallback_version":fallback_version,
}
setup(
name='spire-toolkit',
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
url='https://github.com/tsutterley/Spire-GNSS',
author='Tyler Sutterley',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Physics',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
keywords=keywords,
packages=find_packages(),
install_requires=install_requires,
setup_requires=setup_requires,
dependency_links=dependency_links,
use_scm_version=use_scm_version,
scripts=scripts,
include_package_data=True,
)
| []
| []
| [
"READTHEDOCS"
]
| [] | ["READTHEDOCS"] | python | 1 | 0 | |
files/debug.go | // Copyright (C) 2014 Jakob Borg and Contributors (see the CONTRIBUTORS file).
// All rights reserved. Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package files
import (
"os"
"strings"
"github.com/syncthing/syncthing/logger"
)
var (
debug = strings.Contains(os.Getenv("STTRACE"), "files") || os.Getenv("STTRACE") == "all"
l = logger.DefaultLogger
logPrefix = logger.LogPrefix
)
| [
"\"STTRACE\"",
"\"STTRACE\""
]
| []
| [
"STTRACE"
]
| [] | ["STTRACE"] | go | 1 | 0 | |
tests/components/hassio/test_init.py | """The tests for the hassio component."""
from datetime import timedelta
import os
from unittest.mock import patch
import pytest
from homeassistant.auth.const import GROUP_ID_ADMIN
from homeassistant.components import frontend
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR_DOMAIN
from homeassistant.components.hassio import ADDONS_COORDINATOR, DOMAIN, STORAGE_KEY
from homeassistant.components.hassio.handler import HassioAPIError
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.helpers.device_registry import async_get
from homeassistant.setup import async_setup_component
from homeassistant.util import dt as dt_util
from tests.common import MockConfigEntry, async_fire_time_changed
MOCK_ENVIRON = {"HASSIO": "127.0.0.1", "HASSIO_TOKEN": "abcdefgh"}
@pytest.fixture()
def os_info():
"""Mock os/info."""
return {
"json": {
"result": "ok",
"data": {"version_latest": "1.0.0", "version": "1.0.0"},
}
}
@pytest.fixture(autouse=True)
def mock_all(aioclient_mock, request, os_info):
"""Mock all setup requests."""
aioclient_mock.post("http://127.0.0.1/homeassistant/options", json={"result": "ok"})
aioclient_mock.get("http://127.0.0.1/supervisor/ping", json={"result": "ok"})
aioclient_mock.post("http://127.0.0.1/supervisor/options", json={"result": "ok"})
aioclient_mock.get(
"http://127.0.0.1/info",
json={
"result": "ok",
"data": {
"supervisor": "222",
"homeassistant": "0.110.0",
"hassos": "1.2.3",
},
},
)
aioclient_mock.get(
"http://127.0.0.1/store",
json={
"result": "ok",
"data": {"addons": [], "repositories": []},
},
)
aioclient_mock.get(
"http://127.0.0.1/host/info",
json={
"result": "ok",
"data": {
"result": "ok",
"data": {
"chassis": "vm",
"operating_system": "Debian GNU/Linux 10 (buster)",
"kernel": "4.19.0-6-amd64",
},
},
},
)
aioclient_mock.get(
"http://127.0.0.1/core/info",
json={"result": "ok", "data": {"version_latest": "1.0.0", "version": "1.0.0"}},
)
aioclient_mock.get(
"http://127.0.0.1/os/info",
**os_info,
)
aioclient_mock.get(
"http://127.0.0.1/supervisor/info",
json={
"result": "ok",
"data": {"version_latest": "1.0.0", "version": "1.0.0"},
"addons": [
{
"name": "test",
"slug": "test",
"installed": True,
"update_available": False,
"version": "1.0.0",
"version_latest": "1.0.0",
"repository": "core",
"url": "https://github.com/home-assistant/addons/test",
},
{
"name": "test2",
"slug": "test2",
"installed": True,
"update_available": False,
"version": "1.0.0",
"version_latest": "1.0.0",
"repository": "core",
"url": "https://github.com",
},
],
},
)
aioclient_mock.get(
"http://127.0.0.1/addons/test/stats",
json={
"result": "ok",
"data": {
"cpu_percent": 0.99,
"memory_usage": 182611968,
"memory_limit": 3977146368,
"memory_percent": 4.59,
"network_rx": 362570232,
"network_tx": 82374138,
"blk_read": 46010945536,
"blk_write": 15051526144,
},
},
)
aioclient_mock.get(
"http://127.0.0.1/addons/test2/stats",
json={
"result": "ok",
"data": {
"cpu_percent": 0.8,
"memory_usage": 51941376,
"memory_limit": 3977146368,
"memory_percent": 1.31,
"network_rx": 31338284,
"network_tx": 15692900,
"blk_read": 740077568,
"blk_write": 6004736,
},
},
)
aioclient_mock.get(
"http://127.0.0.1/addons/test3/stats",
json={
"result": "ok",
"data": {
"cpu_percent": 0.8,
"memory_usage": 51941376,
"memory_limit": 3977146368,
"memory_percent": 1.31,
"network_rx": 31338284,
"network_tx": 15692900,
"blk_read": 740077568,
"blk_write": 6004736,
},
},
)
aioclient_mock.get("http://127.0.0.1/addons/test/changelog", text="")
aioclient_mock.get(
"http://127.0.0.1/addons/test/info",
json={"result": "ok", "data": {"auto_update": True}},
)
aioclient_mock.get("http://127.0.0.1/addons/test2/changelog", text="")
aioclient_mock.get(
"http://127.0.0.1/addons/test2/info",
json={"result": "ok", "data": {"auto_update": False}},
)
aioclient_mock.get(
"http://127.0.0.1/ingress/panels", json={"result": "ok", "data": {"panels": {}}}
)
aioclient_mock.post("http://127.0.0.1/refresh_updates", json={"result": "ok"})
async def test_setup_api_ping(hass, aioclient_mock):
"""Test setup with API ping."""
with patch.dict(os.environ, MOCK_ENVIRON):
result = await async_setup_component(hass, "hassio", {})
assert result
assert aioclient_mock.call_count == 15
assert hass.components.hassio.get_core_info()["version_latest"] == "1.0.0"
assert hass.components.hassio.is_hassio()
async def test_setup_api_panel(hass, aioclient_mock):
"""Test setup with API ping."""
assert await async_setup_component(hass, "frontend", {})
with patch.dict(os.environ, MOCK_ENVIRON):
result = await async_setup_component(hass, "hassio", {})
assert result
panels = hass.data[frontend.DATA_PANELS]
assert panels.get("hassio").to_response() == {
"component_name": "custom",
"icon": None,
"title": None,
"url_path": "hassio",
"require_admin": True,
"config": {
"_panel_custom": {
"embed_iframe": True,
"js_url": "/api/hassio/app/entrypoint.js",
"name": "hassio-main",
"trust_external": False,
}
},
}
async def test_setup_api_push_api_data(hass, aioclient_mock):
"""Test setup with API push."""
with patch.dict(os.environ, MOCK_ENVIRON):
result = await async_setup_component(
hass, "hassio", {"http": {"server_port": 9999}, "hassio": {}}
)
assert result
assert aioclient_mock.call_count == 15
assert not aioclient_mock.mock_calls[1][2]["ssl"]
assert aioclient_mock.mock_calls[1][2]["port"] == 9999
assert aioclient_mock.mock_calls[1][2]["watchdog"]
async def test_setup_api_push_api_data_server_host(hass, aioclient_mock):
"""Test setup with API push with active server host."""
with patch.dict(os.environ, MOCK_ENVIRON):
result = await async_setup_component(
hass,
"hassio",
{"http": {"server_port": 9999, "server_host": "127.0.0.1"}, "hassio": {}},
)
assert result
assert aioclient_mock.call_count == 15
assert not aioclient_mock.mock_calls[1][2]["ssl"]
assert aioclient_mock.mock_calls[1][2]["port"] == 9999
assert not aioclient_mock.mock_calls[1][2]["watchdog"]
async def test_setup_api_push_api_data_default(hass, aioclient_mock, hass_storage):
"""Test setup with API push default data."""
with patch.dict(os.environ, MOCK_ENVIRON):
result = await async_setup_component(hass, "hassio", {"http": {}, "hassio": {}})
assert result
assert aioclient_mock.call_count == 15
assert not aioclient_mock.mock_calls[1][2]["ssl"]
assert aioclient_mock.mock_calls[1][2]["port"] == 8123
refresh_token = aioclient_mock.mock_calls[1][2]["refresh_token"]
hassio_user = await hass.auth.async_get_user(
hass_storage[STORAGE_KEY]["data"]["hassio_user"]
)
assert hassio_user is not None
assert hassio_user.system_generated
assert len(hassio_user.groups) == 1
assert hassio_user.groups[0].id == GROUP_ID_ADMIN
assert hassio_user.name == "Supervisor"
for token in hassio_user.refresh_tokens.values():
if token.token == refresh_token:
break
else:
assert False, "refresh token not found"
async def test_setup_adds_admin_group_to_user(hass, aioclient_mock, hass_storage):
"""Test setup with API push default data."""
# Create user without admin
user = await hass.auth.async_create_system_user("Hass.io")
assert not user.is_admin
await hass.auth.async_create_refresh_token(user)
hass_storage[STORAGE_KEY] = {
"data": {"hassio_user": user.id},
"key": STORAGE_KEY,
"version": 1,
}
with patch.dict(os.environ, MOCK_ENVIRON):
result = await async_setup_component(hass, "hassio", {"http": {}, "hassio": {}})
assert result
assert user.is_admin
async def test_setup_migrate_user_name(hass, aioclient_mock, hass_storage):
"""Test setup with migrating the user name."""
# Create user with old name
user = await hass.auth.async_create_system_user("Hass.io")
await hass.auth.async_create_refresh_token(user)
hass_storage[STORAGE_KEY] = {
"data": {"hassio_user": user.id},
"key": STORAGE_KEY,
"version": 1,
}
with patch.dict(os.environ, MOCK_ENVIRON):
result = await async_setup_component(hass, "hassio", {"http": {}, "hassio": {}})
assert result
assert user.name == "Supervisor"
async def test_setup_api_existing_hassio_user(hass, aioclient_mock, hass_storage):
"""Test setup with API push default data."""
user = await hass.auth.async_create_system_user("Hass.io test")
token = await hass.auth.async_create_refresh_token(user)
hass_storage[STORAGE_KEY] = {"version": 1, "data": {"hassio_user": user.id}}
with patch.dict(os.environ, MOCK_ENVIRON):
result = await async_setup_component(hass, "hassio", {"http": {}, "hassio": {}})
assert result
assert aioclient_mock.call_count == 15
assert not aioclient_mock.mock_calls[1][2]["ssl"]
assert aioclient_mock.mock_calls[1][2]["port"] == 8123
assert aioclient_mock.mock_calls[1][2]["refresh_token"] == token.token
async def test_setup_core_push_timezone(hass, aioclient_mock):
"""Test setup with API push default data."""
hass.config.time_zone = "testzone"
with patch.dict(os.environ, MOCK_ENVIRON):
result = await async_setup_component(hass, "hassio", {"hassio": {}})
assert result
assert aioclient_mock.call_count == 15
assert aioclient_mock.mock_calls[2][2]["timezone"] == "testzone"
with patch("homeassistant.util.dt.set_default_time_zone"):
await hass.config.async_update(time_zone="America/New_York")
await hass.async_block_till_done()
assert aioclient_mock.mock_calls[-1][2]["timezone"] == "America/New_York"
async def test_setup_hassio_no_additional_data(hass, aioclient_mock):
"""Test setup with API push default data."""
with patch.dict(os.environ, MOCK_ENVIRON), patch.dict(
os.environ, {"HASSIO_TOKEN": "123456"}
):
result = await async_setup_component(hass, "hassio", {"hassio": {}})
assert result
assert aioclient_mock.call_count == 15
assert aioclient_mock.mock_calls[-1][3]["X-Hassio-Key"] == "123456"
async def test_fail_setup_without_environ_var(hass):
"""Fail setup if no environ variable set."""
with patch.dict(os.environ, {}, clear=True):
result = await async_setup_component(hass, "hassio", {})
assert not result
async def test_warn_when_cannot_connect(hass, caplog):
"""Fail warn when we cannot connect."""
with patch.dict(os.environ, MOCK_ENVIRON), patch(
"homeassistant.components.hassio.HassIO.is_connected",
return_value=None,
):
result = await async_setup_component(hass, "hassio", {})
assert result
assert hass.components.hassio.is_hassio()
assert "Not connected with the supervisor / system too busy!" in caplog.text
async def test_service_register(hassio_env, hass):
"""Check if service will be setup."""
assert await async_setup_component(hass, "hassio", {})
assert hass.services.has_service("hassio", "addon_start")
assert hass.services.has_service("hassio", "addon_stop")
assert hass.services.has_service("hassio", "addon_restart")
assert hass.services.has_service("hassio", "addon_update")
assert hass.services.has_service("hassio", "addon_stdin")
assert hass.services.has_service("hassio", "host_shutdown")
assert hass.services.has_service("hassio", "host_reboot")
assert hass.services.has_service("hassio", "host_reboot")
assert hass.services.has_service("hassio", "backup_full")
assert hass.services.has_service("hassio", "backup_partial")
assert hass.services.has_service("hassio", "restore_full")
assert hass.services.has_service("hassio", "restore_partial")
async def test_service_calls(hassio_env, hass, aioclient_mock, caplog):
"""Call service and check the API calls behind that."""
assert await async_setup_component(hass, "hassio", {})
aioclient_mock.post("http://127.0.0.1/addons/test/start", json={"result": "ok"})
aioclient_mock.post("http://127.0.0.1/addons/test/stop", json={"result": "ok"})
aioclient_mock.post("http://127.0.0.1/addons/test/restart", json={"result": "ok"})
aioclient_mock.post("http://127.0.0.1/addons/test/update", json={"result": "ok"})
aioclient_mock.post("http://127.0.0.1/addons/test/stdin", json={"result": "ok"})
aioclient_mock.post("http://127.0.0.1/host/shutdown", json={"result": "ok"})
aioclient_mock.post("http://127.0.0.1/host/reboot", json={"result": "ok"})
aioclient_mock.post("http://127.0.0.1/backups/new/full", json={"result": "ok"})
aioclient_mock.post("http://127.0.0.1/backups/new/partial", json={"result": "ok"})
aioclient_mock.post(
"http://127.0.0.1/backups/test/restore/full", json={"result": "ok"}
)
aioclient_mock.post(
"http://127.0.0.1/backups/test/restore/partial", json={"result": "ok"}
)
await hass.services.async_call("hassio", "addon_start", {"addon": "test"})
await hass.services.async_call("hassio", "addon_stop", {"addon": "test"})
await hass.services.async_call("hassio", "addon_restart", {"addon": "test"})
await hass.services.async_call("hassio", "addon_update", {"addon": "test"})
await hass.services.async_call(
"hassio", "addon_stdin", {"addon": "test", "input": "test"}
)
await hass.async_block_till_done()
assert aioclient_mock.call_count == 9
assert aioclient_mock.mock_calls[-1][2] == "test"
await hass.services.async_call("hassio", "host_shutdown", {})
await hass.services.async_call("hassio", "host_reboot", {})
await hass.async_block_till_done()
assert aioclient_mock.call_count == 11
await hass.services.async_call("hassio", "backup_full", {})
await hass.services.async_call(
"hassio",
"backup_partial",
{
"homeassistant": True,
"addons": ["test"],
"folders": ["ssl"],
"password": "123456",
},
)
await hass.async_block_till_done()
assert aioclient_mock.call_count == 13
assert aioclient_mock.mock_calls[-1][2] == {
"homeassistant": True,
"addons": ["test"],
"folders": ["ssl"],
"password": "123456",
}
await hass.services.async_call("hassio", "restore_full", {"slug": "test"})
await hass.async_block_till_done()
await hass.services.async_call(
"hassio",
"restore_partial",
{
"slug": "test",
"homeassistant": False,
"addons": ["test"],
"folders": ["ssl"],
"password": "123456",
},
)
await hass.async_block_till_done()
assert aioclient_mock.call_count == 15
assert aioclient_mock.mock_calls[-1][2] == {
"addons": ["test"],
"folders": ["ssl"],
"homeassistant": False,
"password": "123456",
}
async def test_service_calls_core(hassio_env, hass, aioclient_mock):
"""Call core service and check the API calls behind that."""
assert await async_setup_component(hass, "hassio", {})
aioclient_mock.post("http://127.0.0.1/homeassistant/restart", json={"result": "ok"})
aioclient_mock.post("http://127.0.0.1/homeassistant/stop", json={"result": "ok"})
await hass.services.async_call("homeassistant", "stop")
await hass.async_block_till_done()
assert aioclient_mock.call_count == 5
await hass.services.async_call("homeassistant", "check_config")
await hass.async_block_till_done()
assert aioclient_mock.call_count == 5
with patch(
"homeassistant.config.async_check_ha_config_file", return_value=None
) as mock_check_config:
await hass.services.async_call("homeassistant", "restart")
await hass.async_block_till_done()
assert mock_check_config.called
assert aioclient_mock.call_count == 6
async def test_entry_load_and_unload(hass):
"""Test loading and unloading config entry."""
with patch.dict(os.environ, MOCK_ENVIRON):
config_entry = MockConfigEntry(domain=DOMAIN, data={}, unique_id=DOMAIN)
config_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert SENSOR_DOMAIN in hass.config.components
assert BINARY_SENSOR_DOMAIN in hass.config.components
assert ADDONS_COORDINATOR in hass.data
assert await hass.config_entries.async_unload(config_entry.entry_id)
await hass.async_block_till_done()
assert ADDONS_COORDINATOR not in hass.data
async def test_migration_off_hassio(hass):
"""Test that when a user moves instance off Hass.io, config entry gets cleaned up."""
config_entry = MockConfigEntry(domain=DOMAIN, data={}, unique_id=DOMAIN)
config_entry.add_to_hass(hass)
assert not await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert hass.config_entries.async_entries(DOMAIN) == []
async def test_device_registry_calls(hass):
"""Test device registry entries for hassio."""
dev_reg = async_get(hass)
supervisor_mock_data = {
"version": "1.0.0",
"version_latest": "1.0.0",
"addons": [
{
"name": "test",
"state": "started",
"slug": "test",
"installed": True,
"icon": False,
"update_available": False,
"version": "1.0.0",
"version_latest": "1.0.0",
"repository": "test",
"url": "https://github.com/home-assistant/addons/test",
},
{
"name": "test2",
"state": "started",
"slug": "test2",
"installed": True,
"icon": False,
"update_available": False,
"version": "1.0.0",
"version_latest": "1.0.0",
"url": "https://github.com",
},
],
}
os_mock_data = {
"board": "odroid-n2",
"boot": "A",
"update_available": False,
"version": "5.12",
"version_latest": "5.12",
}
with patch.dict(os.environ, MOCK_ENVIRON), patch(
"homeassistant.components.hassio.HassIO.get_supervisor_info",
return_value=supervisor_mock_data,
), patch(
"homeassistant.components.hassio.HassIO.get_os_info",
return_value=os_mock_data,
):
config_entry = MockConfigEntry(domain=DOMAIN, data={}, unique_id=DOMAIN)
config_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert len(dev_reg.devices) == 5
supervisor_mock_data = {
"version": "1.0.0",
"version_latest": "1.0.0",
"addons": [
{
"name": "test2",
"state": "started",
"slug": "test2",
"installed": True,
"icon": False,
"update_available": False,
"version": "1.0.0",
"version_latest": "1.0.0",
"url": "https://github.com",
},
],
}
# Test that when addon is removed, next update will remove the add-on and subsequent updates won't
with patch(
"homeassistant.components.hassio.HassIO.get_supervisor_info",
return_value=supervisor_mock_data,
), patch(
"homeassistant.components.hassio.HassIO.get_os_info",
return_value=os_mock_data,
):
async_fire_time_changed(hass, dt_util.now() + timedelta(hours=1))
await hass.async_block_till_done()
assert len(dev_reg.devices) == 4
async_fire_time_changed(hass, dt_util.now() + timedelta(hours=2))
await hass.async_block_till_done()
assert len(dev_reg.devices) == 4
supervisor_mock_data = {
"version": "1.0.0",
"version_latest": "1.0.0",
"addons": [
{
"name": "test2",
"slug": "test2",
"state": "started",
"installed": True,
"icon": False,
"update_available": False,
"version": "1.0.0",
"version_latest": "1.0.0",
"url": "https://github.com",
},
{
"name": "test3",
"slug": "test3",
"state": "stopped",
"installed": True,
"icon": False,
"update_available": False,
"version": "1.0.0",
"version_latest": "1.0.0",
"url": "https://github.com",
},
],
}
# Test that when addon is added, next update will reload the entry so we register
# a new device
with patch(
"homeassistant.components.hassio.HassIO.get_supervisor_info",
return_value=supervisor_mock_data,
), patch(
"homeassistant.components.hassio.HassIO.get_os_info",
return_value=os_mock_data,
), patch(
"homeassistant.components.hassio.HassIO.get_info",
return_value={
"supervisor": "222",
"homeassistant": "0.110.0",
"hassos": None,
},
):
async_fire_time_changed(hass, dt_util.now() + timedelta(hours=3))
await hass.async_block_till_done()
assert len(dev_reg.devices) == 4
async def test_coordinator_updates(hass, caplog):
"""Test coordinator updates."""
await async_setup_component(hass, "homeassistant", {})
with patch.dict(os.environ, MOCK_ENVIRON), patch(
"homeassistant.components.hassio.HassIO.refresh_updates"
) as refresh_updates_mock:
config_entry = MockConfigEntry(domain=DOMAIN, data={}, unique_id=DOMAIN)
config_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert refresh_updates_mock.call_count == 1
with patch(
"homeassistant.components.hassio.HassIO.refresh_updates",
) as refresh_updates_mock:
async_fire_time_changed(hass, dt_util.now() + timedelta(minutes=20))
await hass.async_block_till_done()
assert refresh_updates_mock.call_count == 0
with patch(
"homeassistant.components.hassio.HassIO.refresh_updates",
) as refresh_updates_mock:
await hass.services.async_call(
"homeassistant",
"update_entity",
{
"entity_id": [
"update.home_assistant_core_update",
"update.home_assistant_supervisor_update",
]
},
blocking=True,
)
assert refresh_updates_mock.call_count == 1
# There is a 10s cooldown on the debouncer
async_fire_time_changed(hass, dt_util.now() + timedelta(seconds=10))
await hass.async_block_till_done()
with patch(
"homeassistant.components.hassio.HassIO.refresh_updates",
side_effect=HassioAPIError("Unknown"),
) as refresh_updates_mock:
await hass.services.async_call(
"homeassistant",
"update_entity",
{
"entity_id": [
"update.home_assistant_core_update",
"update.home_assistant_supervisor_update",
]
},
blocking=True,
)
assert refresh_updates_mock.call_count == 1
assert "Error on Supervisor API: Unknown" in caplog.text
@pytest.mark.parametrize(
"os_info",
[
{
"json": {
"result": "ok",
"data": {"version_latest": "1.0.0", "version": "1.0.0", "board": "rpi"},
}
}
],
)
async def test_setup_hardware_integration(hass, aioclient_mock):
"""Test setup initiates hardware integration."""
with patch.dict(os.environ, MOCK_ENVIRON), patch(
"homeassistant.components.raspberry_pi.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await async_setup_component(hass, "hassio", {"hassio": {}})
assert result
await hass.async_block_till_done()
assert aioclient_mock.call_count == 15
assert len(mock_setup_entry.mock_calls) == 1
| []
| []
| []
| [] | [] | python | 0 | 0 | |
src/cloclify/client.py | import calendar
import dataclasses
import datetime
import os
from typing import Any, Dict, Iterator, List, Optional, Set
import requests
import rich
import dateutil.tz
from cloclify import utils
@dataclasses.dataclass
class Entry:
start: Optional[datetime.datetime] = None
end: Optional[datetime.datetime] = None
description: Optional[str] = None
billable: bool = False
project: Optional[str] = None
project_color: Optional[str] = None
tags: List[str] = dataclasses.field(default_factory=list)
eid: Optional[str] = None
def serialize(self, *, projects: Dict[str, Any], tags: Dict[str, Any]) -> Any:
if self.start is None:
# for PATCH
assert self.end is not None
return {
"end": utils.to_iso_timestamp(self.end),
}
data: Dict[str, Any] = {}
data["start"] = utils.to_iso_timestamp(self.start)
if self.end is not None:
data["end"] = utils.to_iso_timestamp(self.end)
if self.description is not None:
data["description"] = self.description
data["billable"] = self.billable
if self.project is not None:
data["projectId"] = projects[self.project]["id"]
if self.tags is not None:
data["tagIds"] = [tags[tag]["id"] for tag in self.tags]
return data
@classmethod
def deserialize(
cls, data: Any, *, projects: Dict[str, Any], tags: Dict[str, Any],
user_tz: datetime.tzinfo,
) -> "Entry":
entry = cls(data["description"])
entry.start = utils.from_iso_timestamp(
data["timeInterval"]["start"],
timezone=user_tz,
)
if data["timeInterval"]["end"] is not None:
entry.end = utils.from_iso_timestamp(
data["timeInterval"]["end"],
timezone=user_tz,
)
entry.description = data["description"]
entry.billable = data["billable"]
if data["projectId"] is not None:
project = projects[data["projectId"]]
entry.project = project["name"]
entry.project_color = project["color"]
if data["tagIds"] is not None:
for tag_id in data["tagIds"]:
entry.tags.append(tags[tag_id]["name"])
entry.eid = data["id"]
return entry
class ClockifyClient:
API_URL = "https://api.clockify.me/api/v1"
def __init__(self, debug: bool = False, workspace: str = None) -> None:
self._debug = debug
try:
key = os.environ["CLOCKIFY_API_KEY"]
except KeyError as e:
raise utils.UsageError(f"{e} not defined in environment")
if workspace is None:
try:
self.workspace_name = os.environ["CLOCKIFY_WORKSPACE"]
except KeyError as e:
self.workspace_name = None
else:
self.workspace_name = workspace
self._headers = {"X-Api-Key": key}
self._user_id = None
self._workspace_id = None
self._projects_by_name: Dict[str, str] = {}
self._projects_by_id: Dict[str, str] = {}
self._tags_by_name: Dict[str, str] = {}
self._tags_by_id: Dict[str, str] = {}
def _api_call(self, verb: str, path: str, **kwargs: Any) -> Any:
if self._debug:
rich.print(f"[u]{verb.upper()} {path}[/u]:", kwargs, "\n")
func = getattr(requests, verb.lower())
response = func(f"{self.API_URL}/{path}", headers=self._headers, **kwargs)
if not response.ok:
raise utils.APIError(verb.upper(), path, response.status_code, response.json())
r_data = response.json()
if self._debug:
rich.print(f"[u]Answer[/u]:", r_data, "\n")
return r_data
def _api_get(self, path: str, params: Dict[str, str] = None) -> Any:
return self._api_call("get", path, params=params)
def _api_post(self, path: str, data: Any) -> Any:
return self._api_call("post", path, json=data)
def _api_patch(self, path: str, data: Any) -> Any:
return self._api_call("patch", path, json=data)
def _fetch_workspace_id(self) -> None:
assert self.workspace_name is not None
workspaces = self._api_get("workspaces")
for workspace in workspaces:
if workspace["name"] == self.workspace_name:
self._workspace_id = workspace["id"]
return
names = [workspace["name"] for workspace in workspaces]
raise utils.UsageError(
f"No workspace [yellow]{self.workspace_name}[/yellow] found!\n"
f"Available workspaces: [yellow]{', '.join(names)}[/yellow]"
)
def _fetch_workspace_name(self) -> None:
assert self._workspace_id is not None
workspaces = self._api_get("workspaces")
for workspace in workspaces:
if workspace["id"] == self._workspace_id:
self.workspace_name = workspace["name"]
return
assert False, f"Unknown workspace ID {self._workspace_id}"
def _fetch_user_info(self) -> None:
info = self._api_get("user")
self._user_id = info["id"]
self._user_tz = dateutil.tz.gettz(info["settings"]["timeZone"])
if self.workspace_name is None:
self._workspace_id = info["defaultWorkspace"]
self._fetch_workspace_name()
def _fetch_projects(self) -> None:
projects = self._api_get(f"workspaces/{self._workspace_id}/projects")
for proj in projects:
self._projects_by_name[proj["name"]] = proj
self._projects_by_id[proj["id"]] = proj
def _fetch_tags(self) -> None:
tags = self._api_get(f"workspaces/{self._workspace_id}/tags")
for tag in tags:
self._tags_by_name[tag["name"]] = tag
self._tags_by_id[tag["id"]] = tag
def fetch_info(self) -> None:
if self.workspace_name is not None:
self._fetch_workspace_id()
self._fetch_user_info()
self._fetch_projects()
self._fetch_tags()
def add_entries(self, date: datetime.date, entries: List[Entry]) -> Set[str]:
added_ids = set()
for entry in entries:
data = entry.serialize(
projects=self._projects_by_name,
tags=self._tags_by_name,
)
if entry.start is None:
# Finishing a started entry
endpoint = (
f"workspaces/{self._workspace_id}/user/{self._user_id}/time-entries"
)
r_data = self._api_patch(endpoint, data)
else:
# Adding a new entry
endpoint = f"workspaces/{self._workspace_id}/time-entries"
r_data = self._api_post(endpoint, data)
# XXX Maybe do some sanity checks on the returned data?
added_ids.add(r_data["id"])
return added_ids
def get_entries_day(self, date: datetime.date) -> Iterator[Entry]:
start = datetime.datetime.combine(date, datetime.time())
end = start + datetime.timedelta(days=1)
return self._get_entries(start, end)
def get_entries_month(self, date: datetime.date) -> Iterator[Entry]:
assert date.day == 1, date
first_date = datetime.date(date.year, date.month, 1)
_first_weekday, last_day = calendar.monthrange(date.year, date.month)
last_date = datetime.date(date.year, date.month, last_day)
start = datetime.datetime.combine(first_date, datetime.time())
end = datetime.datetime.combine(last_date, datetime.time.max)
return self._get_entries(start, end)
def _get_entries(
self, start: datetime.datetime, end: datetime.datetime
) -> Iterator[Entry]:
endpoint = f"workspaces/{self._workspace_id}/user/{self._user_id}/time-entries"
params = {
"start": utils.to_iso_timestamp(start, timezone=self._user_tz),
"end": utils.to_iso_timestamp(end, timezone=self._user_tz),
}
data = self._api_get(endpoint, params)
for entry in data:
yield Entry.deserialize(
entry,
projects=self._projects_by_id,
tags=self._tags_by_id,
user_tz=self._user_tz,
)
def validate(self, *, tags: List[str], project: Optional[str]) -> None:
for tag in tags:
if tag not in self._tags_by_name:
raise utils.UsageError(f"Unknown tag {tag}")
if project is not None and project not in self._projects_by_name:
raise utils.UsageError(
f"Unknown project {project}\n"
f"Available projects: "
f"[yellow]{', '.join(self._projects_by_name)}[/yellow]")
| []
| []
| [
"CLOCKIFY_WORKSPACE",
"CLOCKIFY_API_KEY"
]
| [] | ["CLOCKIFY_WORKSPACE", "CLOCKIFY_API_KEY"] | python | 2 | 0 | |
pkg/jx/cmd/clients/factory.go | package clients
import (
"flag"
"fmt"
"io"
"net/url"
"os"
"path/filepath"
"runtime/debug"
"github.com/jenkins-x/jx/pkg/builds"
"github.com/jenkins-x/golang-jenkins"
"github.com/jenkins-x/jx/pkg/io/secrets"
"github.com/jenkins-x/jx/pkg/vault"
certmngclient "github.com/jetstack/cert-manager/pkg/client/clientset/versioned"
"github.com/jenkins-x/jx/pkg/helm"
"github.com/jenkins-x/jx/pkg/kube/services"
kubevault "github.com/jenkins-x/jx/pkg/kube/vault"
"github.com/jenkins-x/jx/pkg/log"
"github.com/heptio/sonobuoy/pkg/client"
"github.com/heptio/sonobuoy/pkg/dynamic"
"github.com/jenkins-x/jx/pkg/gits"
"github.com/jenkins-x/jx/pkg/jenkins"
"github.com/jenkins-x/jx/pkg/kube"
"github.com/jenkins-x/jx/pkg/table"
"github.com/pkg/errors"
"gopkg.in/AlecAivazis/survey.v1/terminal"
"github.com/jenkins-x/jx/pkg/auth"
"github.com/jenkins-x/jx/pkg/client/clientset/versioned"
"github.com/jenkins-x/jx/pkg/util"
corev1 "k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
vaultoperatorclient "github.com/banzaicloud/bank-vaults/operator/pkg/client/clientset/versioned"
build "github.com/knative/build/pkg/client/clientset/versioned"
tektonclient "github.com/tektoncd/pipeline/pkg/client/clientset/versioned"
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
metricsclient "k8s.io/metrics/pkg/client/clientset/versioned"
// this is so that we load the auth plugins so we can connect to, say, GCP
_ "k8s.io/client-go/plugin/pkg/client/auth"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type factory struct {
Batch bool
kubeConfig kube.Kuber
impersonateUser string
bearerToken string
secretLocation secrets.SecretLocation
useVault bool
offline bool
}
var _ Factory = (*factory)(nil)
// NewFactory creates a factory with the default Kubernetes resources defined
// if optionalClientConfig is nil, then flags will be bound to a new clientcmd.ClientConfig.
// if optionalClientConfig is not nil, then this factory will make use of it.
func NewFactory() Factory {
f := &factory{}
f.kubeConfig = kube.NewKubeConfig()
return f
}
func (f *factory) SetBatch(batch bool) {
f.Batch = batch
}
func (f *factory) SetOffline(offline bool) {
f.offline = offline
}
// ImpersonateUser returns a new factory impersonating the given user
func (f *factory) ImpersonateUser(user string) Factory {
copy := *f
copy.impersonateUser = user
return ©
}
// WithBearerToken returns a new factory with bearer token
func (f *factory) WithBearerToken(token string) Factory {
copy := *f
copy.bearerToken = token
return ©
}
// CreateJenkinsClient creates a new Jenkins client
func (f *factory) CreateJenkinsClient(kubeClient kubernetes.Interface, ns string, in terminal.FileReader, out terminal.FileWriter, errOut io.Writer) (gojenkins.JenkinsClient, error) {
svc, err := f.CreateJenkinsAuthConfigService(kubeClient, ns, "")
if err != nil {
return nil, err
}
url, err := f.GetJenkinsURL(kubeClient, ns)
if err != nil {
return nil, fmt.Errorf("%s. Try switching to the Development Tools environment via: jx env dev", err)
}
return jenkins.GetJenkinsClient(url, f.Batch, svc, in, out, errOut)
}
// CreateCustomJenkinsClient creates a new Jenkins client for the given custom Jenkins App
func (f *factory) CreateCustomJenkinsClient(kubeClient kubernetes.Interface, ns string, jenkinsServiceName string, in terminal.FileReader, out terminal.FileWriter, errOut io.Writer) (gojenkins.JenkinsClient, error) {
svc, err := f.CreateJenkinsAuthConfigService(kubeClient, ns, jenkinsServiceName)
if err != nil {
return nil, err
}
url, err := f.GetCustomJenkinsURL(kubeClient, ns, jenkinsServiceName)
if err != nil {
return nil, fmt.Errorf("%s. Try switching to the Development Tools environment via: jx env dev", err)
}
return jenkins.GetJenkinsClient(url, f.Batch, svc, in, out, errOut)
}
// GetJenkinsURL gets the Jenkins URL for the given namespace
func (f *factory) GetJenkinsURL(kubeClient kubernetes.Interface, ns string) (string, error) {
// lets find the Kubernetes service
client, ns, err := f.CreateKubeClient()
if err != nil {
return "", errors.Wrap(err, "failed to create the kube client")
}
url, err := services.FindServiceURL(client, ns, kube.ServiceJenkins)
if err != nil {
// lets try the real environment
realNS, _, err := kube.GetDevNamespace(client, ns)
if err != nil {
return "", errors.Wrapf(err, "failed to get the dev namespace from '%s' namespace", ns)
}
if realNS != ns {
url, err = services.FindServiceURL(client, realNS, kube.ServiceJenkins)
if err != nil {
return "", fmt.Errorf("%s in namespaces %s and %s", err, realNS, ns)
}
return url, nil
}
}
if err != nil {
return "", fmt.Errorf("%s in namespace %s", err, ns)
}
return url, err
}
// GetCustomJenkinsURL gets a custom jenkins App service URL
func (f *factory) GetCustomJenkinsURL(kubeClient kubernetes.Interface, ns string, jenkinsServiceName string) (string, error) {
// lets find the Kubernetes service
client, ns, err := f.CreateKubeClient()
if err != nil {
return "", errors.Wrap(err, "failed to create the kube client")
}
url, err := services.FindServiceURL(client, ns, jenkinsServiceName)
if err != nil {
// lets try the real environment
realNS, _, err := kube.GetDevNamespace(client, ns)
if err != nil {
return "", errors.Wrapf(err, "failed to get the dev namespace from '%s' namespace", ns)
}
if realNS != ns {
url, err = services.FindServiceURL(client, realNS, jenkinsServiceName)
if err != nil {
return "", errors.Wrapf(err, "failed to find service URL for %s in namespaces %s and %s", jenkinsServiceName, realNS, ns)
}
return url, nil
}
}
if err != nil {
return "", fmt.Errorf("%s in namespace %s", err, ns)
}
return url, err
}
func (f *factory) CreateJenkinsAuthConfigService(c kubernetes.Interface, ns string, jenkinsServiceName string) (auth.ConfigService, error) {
authConfigSvc, err := f.CreateAuthConfigService(auth.JenkinsAuthConfigFile, ns)
if jenkinsServiceName == "" {
jenkinsServiceName = kube.SecretJenkins
}
if err != nil {
return authConfigSvc, err
}
config, err := authConfigSvc.LoadConfig()
if err != nil {
return authConfigSvc, err
}
customJenkins := jenkinsServiceName != kube.SecretJenkins
if len(config.Servers) == 0 || customJenkins {
secretName := jenkinsServiceName
if customJenkins {
secretName = jenkinsServiceName + "-auth"
}
userAuth := auth.UserAuth{}
s, err := c.CoreV1().Secrets(ns).Get(secretName, metav1.GetOptions{})
if err != nil {
if !customJenkins {
return authConfigSvc, err
}
}
if s != nil {
userAuth.Username = string(s.Data[kube.JenkinsAdminUserField])
userAuth.ApiToken = string(s.Data[kube.JenkinsAdminApiToken])
userAuth.BearerToken = string(s.Data[kube.JenkinsBearTokenField])
}
if customJenkins {
s, err = c.CoreV1().Secrets(ns).Get(jenkinsServiceName, metav1.GetOptions{})
if err == nil {
if userAuth.Username == "" {
userAuth.Username = string(s.Data[kube.JenkinsAdminUserField])
}
userAuth.Password = string(s.Data[kube.JenkinsAdminPasswordField])
}
}
svc, err := c.CoreV1().Services(ns).Get(jenkinsServiceName, metav1.GetOptions{})
if err != nil {
return authConfigSvc, err
}
svcURL := services.GetServiceURL(svc)
if svcURL == "" {
return authConfigSvc, fmt.Errorf("unable to find external URL annotation on service %s in namespace %s", svc.Name, ns)
}
u, err := url.Parse(svcURL)
if err != nil {
return authConfigSvc, err
}
if !userAuth.IsInvalid() || (customJenkins && userAuth.Password != "") {
if len(config.Servers) == 0 {
config.Servers = []*auth.AuthServer{
{
Name: u.Host,
URL: svcURL,
Users: []*auth.UserAuth{&userAuth},
},
}
} else {
server := config.GetOrCreateServer(svcURL)
server.Name = u.Host
server.Users = []*auth.UserAuth{&userAuth}
}
// lets save the file so that if we call LoadConfig() again we still have this defaulted user auth
err = authConfigSvc.SaveConfig()
if err != nil {
return authConfigSvc, err
}
}
}
return authConfigSvc, err
}
func (f *factory) CreateChartmuseumAuthConfigService(namespace string) (auth.ConfigService, error) {
authConfigSvc, err := f.CreateAuthConfigService(auth.ChartmuseumAuthConfigFile, namespace)
if err != nil {
return authConfigSvc, err
}
_, err = authConfigSvc.LoadConfig()
if err != nil {
return authConfigSvc, err
}
return authConfigSvc, err
}
func (f *factory) CreateIssueTrackerAuthConfigService(namespace string, secrets *corev1.SecretList) (auth.ConfigService, error) {
authConfigSvc, err := f.CreateAuthConfigService(auth.IssuesAuthConfigFile, namespace)
if err != nil {
return authConfigSvc, err
}
if secrets != nil {
config, err := authConfigSvc.LoadConfig()
if err != nil {
return authConfigSvc, err
}
f.AuthMergePipelineSecrets(config, secrets, kube.ValueKindIssue, f.IsInCDPipeline())
}
return authConfigSvc, err
}
func (f *factory) CreateChatAuthConfigService(namespace string, secrets *corev1.SecretList) (auth.ConfigService, error) {
authConfigSvc, err := f.CreateAuthConfigService(auth.ChatAuthConfigFile, namespace)
if err != nil {
return authConfigSvc, err
}
if secrets != nil {
config, err := authConfigSvc.LoadConfig()
if err != nil {
return authConfigSvc, err
}
f.AuthMergePipelineSecrets(config, secrets, kube.ValueKindChat, f.IsInCDPipeline())
}
return authConfigSvc, err
}
func (f *factory) CreateAddonAuthConfigService(namespace string, secrets *corev1.SecretList) (auth.ConfigService, error) {
authConfigSvc, err := f.CreateAuthConfigService(auth.AddonAuthConfigFile, namespace)
if err != nil {
return authConfigSvc, err
}
if secrets != nil {
config, err := authConfigSvc.LoadConfig()
if err != nil {
return authConfigSvc, err
}
f.AuthMergePipelineSecrets(config, secrets, kube.ValueKindAddon, f.IsInCDPipeline())
}
return authConfigSvc, err
}
func (f *factory) AuthMergePipelineSecrets(config *auth.AuthConfig, secrets *corev1.SecretList, kind string, isCDPipeline bool) error {
if config == nil || secrets == nil {
return nil
}
for _, secret := range secrets.Items {
labels := secret.Labels
annotations := secret.Annotations
data := secret.Data
if labels != nil && labels[kube.LabelKind] == kind && annotations != nil {
u := annotations[kube.AnnotationURL]
name := annotations[kube.AnnotationName]
k := labels[kube.LabelServiceKind]
if u != "" {
server := config.GetOrCreateServer(u)
if server != nil {
// lets use the latest values from the credential
if k != "" {
server.Kind = k
}
if name != "" {
server.Name = name
}
if data != nil {
username := data[kube.SecretDataUsername]
pwd := data[kube.SecretDataPassword]
if len(username) > 0 && isCDPipeline {
userAuth := config.FindUserAuth(u, string(username))
if userAuth == nil {
userAuth = &auth.UserAuth{
Username: string(username),
ApiToken: string(pwd),
}
} else if len(pwd) > 0 {
userAuth.ApiToken = string(pwd)
}
config.SetUserAuth(u, userAuth)
config.UpdatePipelineServer(server, userAuth)
}
}
}
}
}
}
return nil
}
// CreateAuthConfigService creates a new service saving auth config under the provided name. Depending on the factory,
// It will either save the config to the local file-system, or a Vault
func (f *factory) CreateAuthConfigService(configName string, namespace string) (auth.ConfigService, error) {
if f.SecretsLocation() == secrets.VaultLocationKind {
vaultClient, err := f.CreateSystemVaultClient(namespace)
authService := auth.NewVaultAuthConfigService(configName, vaultClient)
return authService, err
} else {
return auth.NewFileAuthConfigService(configName)
}
}
// SecretsLocation indicates the location where the secrets are stored
func (f *factory) SecretsLocation() secrets.SecretsLocationKind {
client, namespace, err := f.CreateKubeClient()
if err != nil {
return secrets.FileSystemLocationKind
}
if f.secretLocation == nil {
f.secretLocation = secrets.NewSecretLocation(client, namespace)
}
return f.secretLocation.Location()
}
// SetSecretsLocation configures the secrets location. It will persist the value in a config map
// if the persist flag is set.
func (f *factory) SetSecretsLocation(location secrets.SecretsLocationKind, persist bool) error {
if f.secretLocation == nil {
client, namespace, err := f.CreateKubeClient()
if err != nil {
return errors.Wrap(err, "creating the kube client")
}
f.secretLocation = secrets.NewSecretLocation(client, namespace)
}
err := f.secretLocation.SetLocation(location, persist)
if err != nil {
return errors.Wrapf(err, "setting the secrets location %q", location)
}
return nil
}
// ResetSecretsLocation resets the location of the secrets stored in memory
func (f *factory) ResetSecretsLocation() {
f.secretLocation = nil
}
// CreateSystemVaultClient gets the system vault client for managing the secrets
func (f *factory) CreateSystemVaultClient(namespace string) (vault.Client, error) {
name, err := f.getVaultName(namespace)
if err != nil {
return nil, err
}
return f.CreateVaultClient(name, namespace)
}
func (f *factory) getVaultName(namespace string) (string, error) {
// if we cannot load the cluster name from the kube context lets try load the cluster name from the install values
kubeClient, _, err := f.CreateKubeClient()
if err != nil {
return "", err
}
data, err := kube.ReadInstallValues(kubeClient, namespace)
if err != nil {
log.Warnf("cannot find vault name as no ConfigMap %s in dev namespace %s", kube.ConfigMapNameJXInstallConfig, namespace)
}
name := ""
if data != nil {
name = data[kube.SystemVaultName]
if name == "" {
log.Warnf("ConfigMap %s in dev namespace %s does not have key %s", kube.ConfigMapNameJXInstallConfig, namespace, kube.SystemVaultName)
clusterName := data[kube.ClusterName]
if clusterName != "" {
name = kubevault.SystemVaultNameForCluster(clusterName)
}
}
}
if name == "" {
name, err = kubevault.SystemVaultName(f.kubeConfig)
if err != nil {
return name, fmt.Errorf("could not find the system vault namein namespace %s", namespace)
}
}
return name, nil
}
// CreateVaultClient returns the given vault client for managing secrets
// Will use default values for name and namespace if nil values are applied
func (f *factory) CreateVaultClient(name string, namespace string) (vault.Client, error) {
vopClient, err := f.CreateVaultOperatorClient()
kubeClient, defaultNamespace, err := f.CreateKubeClient()
if err != nil {
return nil, err
}
// Use defaults if nothing is specified by the user
if namespace == "" {
devNamespace, _, err := kube.GetDevNamespace(kubeClient, defaultNamespace)
if err != nil {
return nil, errors.Wrapf(err, "getting the dev namespace from current namespace %q",
defaultNamespace)
}
namespace = devNamespace
}
if name == "" {
name, err = f.getVaultName(namespace)
if err != nil {
return nil, err
}
}
if !kubevault.FindVault(vopClient, name, namespace) {
name2, err2 := f.getVaultName(namespace)
if err2 != nil {
return nil, errors.Wrapf(err, "no '%s' vault found in namespace '%s' and could not find vault name", name, namespace)
}
if name2 != name {
log.Warnf("was using wrong vault name %s which should be %s\n", name, name2)
debug.PrintStack()
name = name2
if !kubevault.FindVault(vopClient, name, namespace) {
return nil, fmt.Errorf("no '%s' vault found in namespace '%s'", name, namespace)
}
} else {
debug.PrintStack()
return nil, fmt.Errorf("no '%s' vault found in namespace '%s' despite it being the vault name from jx-install-config ConfigMap", name, namespace)
}
}
clientFactory, err := kubevault.NewVaultClientFactory(kubeClient, vopClient, namespace)
if err != nil {
return nil, errors.Wrap(err, "creating vault client")
}
vaultClient, err := clientFactory.NewVaultClient(name, namespace)
return vault.NewVaultClient(vaultClient), err
}
func (f *factory) CreateJXClient() (versioned.Interface, string, error) {
config, err := f.CreateKubeConfig()
if err != nil {
return nil, "", err
}
kubeConfig, _, err := f.kubeConfig.LoadConfig()
if err != nil {
return nil, "", err
}
ns := kube.CurrentNamespace(kubeConfig)
client, err := versioned.NewForConfig(config)
if err != nil {
return nil, ns, err
}
return client, ns, err
}
func (f *factory) CreateKnativeBuildClient() (build.Interface, string, error) {
config, err := f.CreateKubeConfig()
if err != nil {
return nil, "", err
}
kubeConfig, _, err := f.kubeConfig.LoadConfig()
if err != nil {
return nil, "", err
}
ns := kube.CurrentNamespace(kubeConfig)
client, err := build.NewForConfig(config)
if err != nil {
return nil, ns, err
}
return client, ns, err
}
func (f *factory) CreateTektonClient() (tektonclient.Interface, string, error) {
config, err := f.CreateKubeConfig()
if err != nil {
return nil, "", err
}
kubeConfig, _, err := f.kubeConfig.LoadConfig()
if err != nil {
return nil, "", err
}
ns := kube.CurrentNamespace(kubeConfig)
client, err := tektonclient.NewForConfig(config)
if err != nil {
return nil, ns, err
}
return client, ns, err
}
func (f *factory) CreateDynamicClient() (*dynamic.APIHelper, string, error) {
config, err := f.CreateKubeConfig()
if err != nil {
return nil, "", err
}
kubeConfig, _, err := f.kubeConfig.LoadConfig()
if err != nil {
return nil, "", err
}
ns := kube.CurrentNamespace(kubeConfig)
client, err := dynamic.NewAPIHelperFromRESTConfig(config)
if err != nil {
return nil, ns, err
}
return client, ns, err
}
func (f *factory) CreateApiExtensionsClient() (apiextensionsclientset.Interface, error) {
config, err := f.CreateKubeConfig()
if err != nil {
return nil, err
}
return apiextensionsclientset.NewForConfig(config)
}
func (f *factory) CreateMetricsClient() (*metricsclient.Clientset, error) {
config, err := f.CreateKubeConfig()
if err != nil {
return nil, err
}
return metricsclient.NewForConfig(config)
}
func (f *factory) CreateKubeClient() (kubernetes.Interface, string, error) {
cfg, err := f.CreateKubeConfig()
if err != nil {
return nil, "", err
}
client, err := kubernetes.NewForConfig(cfg)
if err != nil {
return nil, "", err
}
if client == nil {
return nil, "", fmt.Errorf("Failed to create Kubernetes Client")
}
ns := ""
config, _, err := f.kubeConfig.LoadConfig()
if err != nil {
return client, ns, err
}
ns = kube.CurrentNamespace(config)
// TODO allow namsepace to be specified as a CLI argument!
return client, ns, nil
}
func (f *factory) CreateGitProvider(gitURL string, message string, authConfigSvc auth.ConfigService, gitKind string, batchMode bool, gitter gits.Gitter, in terminal.FileReader, out terminal.FileWriter, errOut io.Writer) (gits.GitProvider, error) {
gitInfo, err := gits.ParseGitURL(gitURL)
if err != nil {
return nil, err
}
return gitInfo.CreateProvider(f.IsInCluster(), authConfigSvc, gitKind, gitter, batchMode, in, out, errOut)
}
var kubeConfigCache *string
func createKubeConfig(offline bool) *string {
if offline {
panic("not supposed to be making a network connection")
}
var kubeconfig *string
if kubeConfigCache != nil {
return kubeConfigCache
}
if home := util.HomeDir(); home != "" {
kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
} else {
kubeconfig = flag.String("kubeconfig", "", "absolute path to the kubeconfig file")
}
kubeConfigCache = kubeconfig
return kubeconfig
}
func (f *factory) CreateKubeConfig() (*rest.Config, error) {
masterURL := ""
kubeConfigEnv := os.Getenv("KUBECONFIG")
if kubeConfigEnv != "" {
pathList := filepath.SplitList(kubeConfigEnv)
return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{Precedence: pathList},
&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: masterURL}}).ClientConfig()
}
kubeconfig := createKubeConfig(f.offline)
var config *rest.Config
var err error
if kubeconfig != nil {
exists, err := util.FileExists(*kubeconfig)
if err == nil && exists {
// use the current context in kubeconfig
config, err = clientcmd.BuildConfigFromFlags(masterURL, *kubeconfig)
if err != nil {
return nil, err
}
}
}
if config == nil {
config, err = rest.InClusterConfig()
if err != nil {
return nil, err
}
}
if config != nil && f.bearerToken != "" {
config.BearerToken = f.bearerToken
return config, nil
}
user := f.getImpersonateUser()
if config != nil && user != "" && config.Impersonate.UserName == "" {
config.Impersonate.UserName = user
}
return config, nil
}
func (f *factory) getImpersonateUser() string {
user := f.impersonateUser
if user == "" {
// this is really only used for testing really
user = os.Getenv("JX_IMPERSONATE_USER")
}
return user
}
func (f *factory) CreateTable(out io.Writer) table.Table {
return table.CreateTable(out)
}
// IsInCDPipeline we should only load the git / issue tracker API tokens if the current pod
// is in a pipeline and running as the Jenkins service account
func (f *factory) IsInCDPipeline() bool {
// TODO should we let RBAC decide if we can see the Secrets in the dev namespace?
// or we should test if we are in the cluster and get the current ServiceAccount name?
buildNumber := builds.GetBuildNumber()
return buildNumber != ""
}
// function to tell if we are running incluster
func (f *factory) IsInCluster() bool {
_, err := rest.InClusterConfig()
if err != nil {
return false
}
return true
}
// CreateComplianceClient creates a new Sonobuoy compliance client
func (f *factory) CreateComplianceClient() (*client.SonobuoyClient, error) {
config, err := f.CreateKubeConfig()
if err != nil {
return nil, errors.Wrap(err, "compliance client failed to load the Kubernetes configuration")
}
skc, err := dynamic.NewAPIHelperFromRESTConfig(config)
if err != nil {
return nil, errors.Wrap(err, "compliance dynamic client failed to be created")
}
return client.NewSonobuoyClient(config, skc)
}
// CreateVaultOperatorClient creates a new vault operator client
func (f *factory) CreateVaultOperatorClient() (vaultoperatorclient.Interface, error) {
config, err := f.CreateKubeConfig()
if err != nil {
return nil, err
}
return vaultoperatorclient.NewForConfig(config)
}
// CreateHelm creates a new Helm client
func (f *factory) CreateHelm(verbose bool,
helmBinary string,
noTiller bool,
helmTemplate bool) helm.Helmer {
if helmBinary == "" {
helmBinary = "helm"
}
featureFlag := "none"
if helmTemplate {
featureFlag = "template-mode"
} else if noTiller {
featureFlag = "no-tiller-server"
}
if verbose {
log.Infof("Using helmBinary %s with feature flag: %s\n", util.ColorInfo(helmBinary), util.ColorInfo(featureFlag))
}
helmCLI := helm.NewHelmCLI(helmBinary, helm.V2, "", verbose)
var h helm.Helmer = helmCLI
if helmTemplate {
kubeClient, ns, _ := f.CreateKubeClient()
h = helm.NewHelmTemplate(helmCLI, "", kubeClient, ns)
} else {
h = helmCLI
}
if noTiller && !helmTemplate {
h.SetHost(helm.GetTillerAddress())
helm.StartLocalTillerIfNotRunning()
}
return h
}
// CreateCertManagerClient creates a new Kuberntes client for cert-manager resources
func (f *factory) CreateCertManagerClient() (certmngclient.Interface, error) {
config, err := f.CreateKubeConfig()
if err != nil {
return nil, err
}
return certmngclient.NewForConfig(config)
}
| [
"\"KUBECONFIG\"",
"\"JX_IMPERSONATE_USER\""
]
| []
| [
"JX_IMPERSONATE_USER",
"KUBECONFIG"
]
| [] | ["JX_IMPERSONATE_USER", "KUBECONFIG"] | go | 2 | 0 | |
discovery/discovery.go | package discovery
import (
"fmt"
"os"
"strconv"
"strings"
"github.com/google/uuid"
consulapi "github.com/hashicorp/consul/api"
"github.com/kubitre/diplom/config"
log "github.com/sirupsen/logrus"
)
const (
SlavePattern = "slave-executor#"
MasterPattern = "master-executor#"
TagSlave = "slave"
TagMaster = "master"
)
type Discovery struct {
CurrentServiceName string
CurrentServiceType string
ConsulClient *consulapi.Client
ServiceConfig *config.ServiceConfig
}
/*InitializeDiscovery - инициализация текущего Discovery*/
func InitializeDiscovery(
typeService string,
configService *config.ServiceConfig) *Discovery {
return &Discovery{
CurrentServiceName: typeService + uuid.New().String(),
CurrentServiceType: typeService,
ConsulClient: nil,
ServiceConfig: configService,
}
}
/*NewClientForConsule - инициализация подключения до consul*/
func (discovery *Discovery) NewClientForConsule() error {
log.Info("initialize new client for consul")
config := consulapi.Config{
Address: discovery.ServiceConfig.ConsulAddress,
HttpAuth: &consulapi.HttpBasicAuth{
Username: discovery.ServiceConfig.ConsulUsername,
Password: discovery.ServiceConfig.ConsulPassword,
},
}
consul, err := consulapi.NewClient(&config)
if err != nil {
return err
}
discovery.ConsulClient = consul
return nil
}
/*RegisterServiceWithConsul - регистрация сервиса в consul*/
func (discovery *Discovery) RegisterServiceWithConsul(tags []string) {
log.Info("start registration" + discovery.CurrentServiceName + "in consul")
registration := new(consulapi.AgentServiceRegistration)
registration.ID = discovery.CurrentServiceName
registration.Name = discovery.CurrentServiceType
registration.Tags = tags
log.Info("registration information about out service: ", registration)
address := hostname()
registration.Address = address
registration.Port = discovery.ServiceConfig.APIPORT
registration.Check = new(consulapi.AgentServiceCheck)
registration.Check.HTTP = "http://" + address + ":" + strconv.Itoa(discovery.ServiceConfig.APIPORT) + "/health"
registration.Check.Interval = "5s"
registration.Check.Timeout = "3s"
log.Info("registration information: ", registration.Check.HTTP)
if errRegister := discovery.ConsulClient.Agent().ServiceRegister(registration); errRegister != nil {
log.Error("can not registering in consule: ", errRegister)
os.Exit(1)
}
log.Info("completed registered service in consul")
}
/*UnregisterCurrentService - удаление сервиса из consul*/
func (discovery *Discovery) UnregisterCurrentService() {
log.Info("start de register service in consul")
if err := discovery.ConsulClient.Agent().ServiceDeregister(discovery.CurrentServiceName); err != nil {
log.Error(err)
}
}
/*GetService - получение текущих сервисов из consul*/
func (discovery *Discovery) GetService(serviceName, tag string) []*consulapi.ServiceEntry {
log.Info("getting service from consul by service name: ", serviceName)
allHealthServices, _, err2 := discovery.ConsulClient.Health().Service(serviceName, tag, true, nil)
if err2 != nil {
log.Error(err2)
}
return allHealthServices
}
func (discovery *Discovery) getCatalogService(serviceName, tag string) []*consulapi.CatalogService {
allServices, _, err := discovery.ConsulClient.Catalog().Service(serviceName, tag, nil)
if err != nil {
log.Error(err)
}
return allServices
}
func port(port int) string {
p := os.Getenv("API_PORT")
if len(strings.TrimSpace(p)) == 0 {
return ":9997"
}
return fmt.Sprintf(":%s", p)
}
func hostname() string {
hn, err := os.Hostname()
if err != nil {
log.Fatalln(err)
}
return hn
}
| [
"\"API_PORT\""
]
| []
| [
"API_PORT"
]
| [] | ["API_PORT"] | go | 1 | 0 | |
daily_report/app.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from boto3.session import Session
from datetime import datetime, timedelta, timezone
from operator import itemgetter
import textwrap
import os
import slackweb
Namespace = "AWS/Billing"
MetricName = "EstimatedCharges"
def post_slack(message):
SlackWebhookURL = os.environ['SlackWebhookURL']
SlackChannel = os.environ['SlackChannel']
SlackUsername = os.environ['SlackUsername']
SlackIconEmoji = os.environ['SlackIconEmoji']
SlackColor = os.environ['SlackColor']
SlackTitle = os.environ['SlackTitle']
slack = slackweb.Slack(url=SlackWebhookURL)
attachments = [{
'title': SlackTitle,
'text': message,
'color': SlackColor,
}]
response = slack.notify(
channel=SlackChannel,
username=SlackUsername,
icon_emoji=SlackIconEmoji,
attachments=attachments,
)
return response
def cw_get_metric_data_diff(client, dimensions, start_time, end_time, period=86400, stat='Maximum'):
print(f"dimensions: {dimensions}")
response = client.get_metric_data(
MetricDataQueries=[
{
'Id': 'string',
'MetricStat': {
'Metric': {
'Namespace': Namespace,
'MetricName': MetricName,
'Dimensions': dimensions
},
'Period': period,
'Stat': stat,
},
'ReturnData': True,
},
],
StartTime=start_time,
EndTime=end_time,
ScanBy='TimestampAscending',
)
data = response['MetricDataResults'][0]
print(f"values: {data['Values']}")
if len(data['Values']) == 2:
diff = round(data['Values'][1] - data['Values'][0], 2)
if diff < 0:
diff = 0
output = {
'bef': data['Values'][0],
'aft': data['Values'][1],
'diff': diff,
}
else:
output = {
'bef': 0,
'aft': 0,
'diff': 0,
}
return output
def cw_list_metrics(client):
response = client.list_metrics(
Namespace=Namespace,
MetricName=MetricName,
Dimensions=[
{
'Name': 'Currency',
'Value': 'USD'
},
],
)
return response['Metrics']
def format_message(time_range, total, details):
headers = ["ServiceName", "Prev", "Now", "Diff"]
header = "| " + " | ".join(headers) + " |"
table_separate = ("| --- " * len(headers)) + "|"
total_rows = "| " + " | ".join([str(v) for v in total.values()]) + " |"
rows = []
for detail in details:
row = "| " + " | ".join([str(v) for v in detail.values()]) + " |"
rows.append(row)
detail_rows = "\n".join(rows)
message = textwrap.dedent('''
```
* {time_range}
# Total
{header}
{table_separate}
{total_rows}
## Details
{header}
{table_separate}
{detail_rows}
```
''').format(
time_range=time_range,
header=header,
table_separate=table_separate,
total_rows=total_rows,
detail_rows=detail_rows,
)
return message
def lambda_handler(event, context):
session = Session(region_name="us-east-1")
cw = session.client('cloudwatch')
until = datetime.now(timezone.utc)
since = until - timedelta(days=2)
until_str = until.strftime("%Y/%m/%d %H:%M:%S %Z")
since_str = since.strftime("%Y/%m/%d %H:%M:%S %Z")
time_range = f"{since_str} - {until_str}"
total = ''
details = []
for metric in cw_list_metrics(cw):
dimensions = metric['Dimensions']
row = {'service': 'Total'}
for dimension in dimensions:
if dimension['Name'] == 'ServiceName':
row['service'] = dimension['Value']
if dimension['Name'] == 'LinkedAccount':
row['linked_account'] = dimension['Value']
if 'linked_account' in row:
continue
response = cw_get_metric_data_diff(cw, dimensions, since, until)
row.update(response)
if row['service'] == "Total":
total = row
else:
if row['bef'] + row['aft'] > 0:
details.append(row)
details_sorted = sorted(details, key=itemgetter('diff'), reverse=True)
message = format_message(time_range, total, details_sorted)
print(f"message: {message}")
response = post_slack(message)
print(f"response: {response}")
if __name__ == '__main__':
lambda_handler(None, None)
| []
| []
| [
"SlackUsername",
"SlackColor",
"SlackIconEmoji",
"SlackTitle",
"SlackWebhookURL",
"SlackChannel"
]
| [] | ["SlackUsername", "SlackColor", "SlackIconEmoji", "SlackTitle", "SlackWebhookURL", "SlackChannel"] | python | 6 | 0 | |
analytics-layer/analytics.go | package main
import (
"context"
"encoding/json"
"fmt"
"log"
"os"
"time"
"github.com/apache/pulsar-client-go/pulsar"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/exporters/otlp"
"go.opentelemetry.io/otel/exporters/otlp/otlpgrpc"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/global"
"go.opentelemetry.io/otel/propagation"
controller "go.opentelemetry.io/otel/sdk/metric/controller/basic"
processor "go.opentelemetry.io/otel/sdk/metric/processor/basic"
"go.opentelemetry.io/otel/sdk/metric/selector/simple"
"go.opentelemetry.io/otel/sdk/resource"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
"go.opentelemetry.io/otel/semconv"
"go.opentelemetry.io/otel/trace"
)
const (
metricPrefix = "custom.metric."
brandCountName = metricPrefix + "brand.name"
brandCountCount = metricPrefix + "brand.count"
brandCountDesc = "Count the number of estimates per brand"
serviceName = "analytics-layer"
serviceVersion = "1.0"
topicName = "estimates"
)
func main() {
/***************************************************/
/****** Creates the background OTel resources ******/
/***************************************************/
ctx := context.Background()
endpoint := os.Getenv("OTEL_EXPORTER_OTLP_ENDPOINT")
driver := otlpgrpc.NewDriver(
otlpgrpc.WithInsecure(),
otlpgrpc.WithEndpoint(endpoint),
)
exporter, err := otlp.NewExporter(ctx, driver)
if err != nil {
log.Fatalf("%s: %v", "failed to create exporter", err)
}
res0urce, err := resource.New(ctx,
resource.WithAttributes(
semconv.TelemetrySDKNameKey.String("opentelemetry"),
semconv.TelemetrySDKLanguageKey.String("go"),
semconv.TelemetrySDKVersionKey.String("v0.16.0")))
if err != nil {
log.Fatalf("%s: %v", "failed to create resource", err)
}
bsp := sdktrace.NewBatchSpanProcessor(exporter)
tracerProvider := sdktrace.NewTracerProvider(
sdktrace.WithSampler(sdktrace.AlwaysSample()),
sdktrace.WithSpanProcessor(bsp),
sdktrace.WithResource(res0urce))
pusher := controller.New(
processor.New(
simple.NewWithExactDistribution(),
exporter,
),
controller.WithResource(res0urce),
controller.WithExporter(exporter),
controller.WithCollectPeriod(5*time.Second),
)
err = pusher.Start(ctx)
if err != nil {
log.Fatalf("%s: %v", "failed to start the controller", err)
}
defer func() { _ = pusher.Stop(ctx) }()
otel.SetTracerProvider(tracerProvider)
global.SetMeterProvider(pusher.MeterProvider())
otel.SetTextMapPropagator(
propagation.NewCompositeTextMapPropagator(
propagation.Baggage{},
propagation.TraceContext{},
),
)
tracer := otel.Tracer(serviceName)
meter := global.Meter(serviceName)
brandCountMetric := metric.Must(meter).
NewInt64Counter(
brandCountCount,
metric.WithDescription(brandCountDesc))
/***************************************************/
/***** Connect with Pulsar to process messages *****/
/***************************************************/
client, err := pulsar.NewClient(pulsar.ClientOptions{
URL: os.Getenv("PULSAR_SERVICE_URL"),
OperationTimeout: 30 * time.Second,
ConnectionTimeout: 30 * time.Second,
})
if err != nil {
log.Fatalf("Could not instantiate Pulsar client: %v", err)
}
defer client.Close()
channel := make(chan pulsar.ConsumerMessage, 100)
options := pulsar.ConsumerOptions{
Topic: topicName,
SubscriptionName: serviceName,
Type: pulsar.Shared,
}
options.MessageChannel = channel
consumer, err := client.Subscribe(options)
if err != nil {
log.Fatal(err)
}
defer consumer.Close()
brandCount := make(map[string]int)
for consumerMessage := range channel {
message := consumerMessage.Message
extractedContext := otel.GetTextMapPropagator().Extract(ctx, PulsarCarrier{message})
_, receiveSpan := tracer.Start(extractedContext, topicName+" receive",
trace.WithAttributes(
semconv.MessagingSystemKey.String("pulsar"),
semconv.MessagingDestinationKindKey.String("topic"),
semconv.MessagingDestinationKey.String(topicName),
))
var estimate Estimate
err := json.Unmarshal(message.Payload(), &estimate)
if err == nil {
count := brandCount[estimate.Brand]
if count == 0 {
count = 1
} else {
count = count + 1
}
brandCount[estimate.Brand] = count
brandCountMetric.Add(ctx, 1,
[]attribute.KeyValue{
attribute.String(
brandCountName,
estimate.Brand),
attribute.String(
brandCountCount,
brandCountDesc),
}...)
fmt.Printf("Count for brand '%s': %d\n", estimate.Brand, brandCount[estimate.Brand])
consumer.Ack(message)
receiveSpan.End()
}
}
}
// Estimate type
type Estimate struct {
Brand string `json:"brand"`
Price float32 `json:"price"`
}
// PulsarCarrier type
type PulsarCarrier struct {
Message pulsar.Message
}
// Get returns the value associated with the passed key.
func (pulsar PulsarCarrier) Get(key string) string {
return pulsar.Message.Properties()[key]
}
// Set stores the key-value pair.
func (pulsar PulsarCarrier) Set(key string, value string) {
pulsar.Message.Properties()[key] = value
}
// Keys lists the available keys
func (pulsar PulsarCarrier) Keys() []string {
return []string{pulsar.Message.Key()}
}
| [
"\"OTEL_EXPORTER_OTLP_ENDPOINT\"",
"\"PULSAR_SERVICE_URL\""
]
| []
| [
"OTEL_EXPORTER_OTLP_ENDPOINT",
"PULSAR_SERVICE_URL"
]
| [] | ["OTEL_EXPORTER_OTLP_ENDPOINT", "PULSAR_SERVICE_URL"] | go | 2 | 0 | |
01-Login/auth/auth.go | package auth
import (
"context"
"log"
"os"
"golang.org/x/oauth2"
oidc "github.com/coreos/go-oidc"
)
// Authenticator whatever
type Authenticator struct {
Provider *oidc.Provider
Config oauth2.Config
Ctx context.Context
}
// NewAuthenticator whatever
func NewAuthenticator() (*Authenticator, error) {
ctx := context.Background()
provider, err := oidc.NewProvider(ctx, "https://"+os.Getenv("AUTH0_DOMAIN")+"/")
if err != nil {
log.Printf("failed to get provider: %v", err)
return nil, err
}
conf := oauth2.Config{
ClientID: os.Getenv("AUTH0_CLIENT_ID"),
ClientSecret: os.Getenv("AUTH0_CLIENT_SECRET"),
RedirectURL: os.Getenv("AUTH0_CALLBACK_URL"),
Endpoint: provider.Endpoint(),
Scopes: []string{oidc.ScopeOpenID, "profile", "email"},
}
return &Authenticator{
Provider: provider,
Config: conf,
Ctx: ctx,
}, nil
}
| [
"\"AUTH0_DOMAIN\"",
"\"AUTH0_CLIENT_ID\"",
"\"AUTH0_CLIENT_SECRET\"",
"\"AUTH0_CALLBACK_URL\""
]
| []
| [
"AUTH0_DOMAIN",
"AUTH0_CALLBACK_URL",
"AUTH0_CLIENT_SECRET",
"AUTH0_CLIENT_ID"
]
| [] | ["AUTH0_DOMAIN", "AUTH0_CALLBACK_URL", "AUTH0_CLIENT_SECRET", "AUTH0_CLIENT_ID"] | go | 4 | 0 | |
src/toil/test/batchSystems/batchSystemTest.py | # Copyright (C) 2015-2021 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fcntl
import itertools
import logging
import os
import stat
import subprocess
import sys
import tempfile
import time
from abc import ABCMeta, abstractmethod
from fractions import Fraction
from inspect import getsource
from textwrap import dedent
from unittest import skipIf
from toil.batchSystems.abstractBatchSystem import (AbstractBatchSystem,
BatchSystemSupport,
InsufficientSystemResources)
# Don't import any batch systems here that depend on extras
# in order to import properly. Import them later, in tests
# protected by annotations.
from toil.batchSystems.mesos.test import MesosTestSupport
from toil.batchSystems.parasol import ParasolBatchSystem
from toil.batchSystems.registry import (BATCH_SYSTEM_FACTORY_REGISTRY,
BATCH_SYSTEMS,
single_machine_batch_system_factory,
addBatchSystemFactory)
from toil.test.batchSystems.parasolTestSupport import ParasolTestSupport
from toil.batchSystems.singleMachine import SingleMachineBatchSystem
from toil.common import Config, Toil
from toil.job import Job, JobDescription
from toil.lib.threading import cpu_count
from toil.lib.retry import retry_flaky_test
from toil.test import (ToilTest,
needs_aws_s3,
needs_fetchable_appliance,
needs_gridengine,
needs_htcondor,
needs_kubernetes,
needs_lsf,
needs_mesos,
needs_parasol,
needs_slurm,
needs_torque,
slow,
travis_test)
logger = logging.getLogger(__name__)
# How many cores should be utilized by this test. The test will fail if the running system
# doesn't have at least that many cores.
numCores = 2
preemptable = False
defaultRequirements = dict(memory=int(100e6), cores=1, disk=1000, preemptable=preemptable)
class hidden:
"""
Hide abstract base class from unittest's test case loader
http://stackoverflow.com/questions/1323455/python-unit-test-with-base-and-sub-class#answer-25695512
"""
class AbstractBatchSystemTest(ToilTest, metaclass=ABCMeta):
"""
A base test case with generic tests that every batch system should pass.
Cannot assume that the batch system actually executes commands on the local machine/filesystem.
"""
@abstractmethod
def createBatchSystem(self) -> AbstractBatchSystem:
raise NotImplementedError
def supportsWallTime(self):
return False
@classmethod
def createConfig(cls):
"""
Returns a dummy config for the batch system tests. We need a workflowID to be set up
since we are running tests without setting up a jobstore. This is the class version
to be used when an instance is not available.
:rtype: toil.common.Config
"""
config = Config()
from uuid import uuid4
config.workflowID = str(uuid4())
config.cleanWorkDir = 'always'
return config
def _createConfig(self):
"""
Returns a dummy config for the batch system tests. We need a workflowID to be set up
since we are running tests without setting up a jobstore.
:rtype: toil.common.Config
"""
return self.createConfig()
def _mockJobDescription(self, jobStoreID=None, command=None, **kwargs):
"""
Create a mock-up JobDescription with the given ID, command, and other parameters.
"""
# TODO: Use a real unittest.Mock? For now we make a real instance and just hack it up.
desc = JobDescription(**kwargs)
# Normally we can't pass in a command or ID, and the job
# serialization logic takes care of filling them in. We set them
# here.
if command is not None:
desc.command = command
if jobStoreID is not None:
desc.jobStoreID = jobStoreID
return desc
@classmethod
def setUpClass(cls):
super(hidden.AbstractBatchSystemTest, cls).setUpClass()
logging.basicConfig(level=logging.DEBUG)
def setUp(self):
super(hidden.AbstractBatchSystemTest, self).setUp()
self.config = self._createConfig()
self.batchSystem = self.createBatchSystem()
self.tempDir = self._createTempDir('testFiles')
def tearDown(self):
self.batchSystem.shutdown()
super(hidden.AbstractBatchSystemTest, self).tearDown()
def test_available_cores(self):
self.assertTrue(cpu_count() >= numCores)
@retry_flaky_test()
def test_run_jobs(self):
jobDesc1 = self._mockJobDescription(command='sleep 1000', jobName='test1', unitName=None,
jobStoreID='1', requirements=defaultRequirements)
jobDesc2 = self._mockJobDescription(command='sleep 1000', jobName='test2', unitName=None,
jobStoreID='2', requirements=defaultRequirements)
job1 = self.batchSystem.issueBatchJob(jobDesc1)
job2 = self.batchSystem.issueBatchJob(jobDesc2)
issuedIDs = self._waitForJobsToIssue(2)
self.assertEqual(set(issuedIDs), {job1, job2})
# Now at some point we want these jobs to become running
# But since we may be testing against a live cluster (Kubernetes)
# we want to handle weird cases and high cluster load as much as we can.
# Wait a bit for any Dockers to download and for the
# jobs to have a chance to start.
# TODO: We insist on neither of these ever finishing when we test
# getUpdatedBatchJob, and the sleep time is longer than the time we
# should spend waiting for both to start, so if our cluster can
# only run one job at a time, we will fail the test.
runningJobIDs = self._waitForJobsToStart(2, tries=120)
self.assertEqual(set(runningJobIDs), {job1, job2})
# Killing the jobs instead of allowing them to complete means this test can run very
# quickly if the batch system issues and starts the jobs quickly.
self.batchSystem.killBatchJobs([job1, job2])
self.assertEqual({}, self.batchSystem.getRunningBatchJobIDs())
# Issue a job and then allow it to finish by itself, causing it to be added to the
# updated jobs queue.
# We would like to have this touch something on the filesystem and
# then check for it having happened, but we can't guarantee that
# the batch system will run against the same filesystem we are
# looking at.
jobDesc3 = self._mockJobDescription(command="mktemp -d", jobName='test3', unitName=None,
jobStoreID='3', requirements=defaultRequirements)
job3 = self.batchSystem.issueBatchJob(jobDesc3)
jobUpdateInfo = self.batchSystem.getUpdatedBatchJob(maxWait=1000)
jobID, exitStatus, wallTime = jobUpdateInfo.jobID, jobUpdateInfo.exitStatus, jobUpdateInfo.wallTime
logger.info(f'Third job completed: {jobID} {exitStatus} {wallTime}')
# Since the first two jobs were killed, the only job in the updated jobs queue should
# be job 3. If the first two jobs were (incorrectly) added to the queue, this will
# fail with jobID being equal to job1 or job2.
self.assertEqual(jobID, job3)
self.assertEqual(exitStatus, 0)
if self.supportsWallTime():
self.assertTrue(wallTime > 0)
else:
self.assertIsNone(wallTime)
# TODO: Work out a way to check if the job we asked to run actually ran.
# Don't just believe the batch system, but don't assume it ran on this machine either.
self.assertFalse(self.batchSystem.getUpdatedBatchJob(0))
# Make sure killBatchJobs can handle jobs that don't exist
self.batchSystem.killBatchJobs([10])
def test_set_env(self):
# Parasol disobeys shell rules and splits the command at the space
# character into arguments before exec'ing it, whether the space is
# quoted, escaped or not.
# Start with a relatively safe script
script_shell = 'if [ "x${FOO}" == "xbar" ] ; then exit 23 ; else exit 42 ; fi'
# Escape the semicolons
script_protected = script_shell.replace(';', r'\;')
# Turn into a string which convinces bash to take all args and paste them back together and run them
command = "bash -c \"\\${@}\" bash eval " + script_protected
jobDesc4 = self._mockJobDescription(command=command, jobName='test4', unitName=None,
jobStoreID='4', requirements=defaultRequirements)
job4 = self.batchSystem.issueBatchJob(jobDesc4)
jobUpdateInfo = self.batchSystem.getUpdatedBatchJob(maxWait=1000)
jobID, exitStatus, wallTime = jobUpdateInfo.jobID, jobUpdateInfo.exitStatus, jobUpdateInfo.wallTime
self.assertEqual(exitStatus, 42)
self.assertEqual(jobID, job4)
# Now set the variable and ensure that it is present
self.batchSystem.setEnv('FOO', 'bar')
jobDesc5 = self._mockJobDescription(command=command, jobName='test5', unitName=None,
jobStoreID='5', requirements=defaultRequirements)
job5 = self.batchSystem.issueBatchJob(jobDesc5)
jobUpdateInfo = self.batchSystem.getUpdatedBatchJob(maxWait=1000)
self.assertEqual(jobUpdateInfo.exitStatus, 23)
self.assertEqual(jobUpdateInfo.jobID, job5)
def test_set_job_env(self):
""" Test the mechanism for setting per-job environment variables to batch system jobs."""
script = 'if [ "x${FOO}" == "xbar" ] ; then exit 23 ; else exit 42 ; fi'
command = "bash -c \"\\${@}\" bash eval " + script.replace(';', r'\;')
# Issue a job with a job environment variable
job_desc_6 = self._mockJobDescription(command=command, jobName='test6', unitName=None,
jobStoreID='6', requirements=defaultRequirements)
job6 = self.batchSystem.issueBatchJob(job_desc_6, job_environment={
'FOO': 'bar'
})
job_update_info = self.batchSystem.getUpdatedBatchJob(maxWait=1000)
self.assertEqual(job_update_info.exitStatus, 23) # this should succeed
self.assertEqual(job_update_info.jobID, job6)
# Now check that the environment variable doesn't exist for other jobs
job_desc_7 = self._mockJobDescription(command=command, jobName='test7', unitName=None,
jobStoreID='7', requirements=defaultRequirements)
job7 = self.batchSystem.issueBatchJob(job_desc_7)
job_update_info = self.batchSystem.getUpdatedBatchJob(maxWait=1000)
self.assertEqual(job_update_info.exitStatus, 42)
self.assertEqual(job_update_info.jobID, job7)
def testCheckResourceRequest(self):
if isinstance(self.batchSystem, BatchSystemSupport):
checkResourceRequest = self.batchSystem.checkResourceRequest
self.assertRaises(InsufficientSystemResources, checkResourceRequest,
memory=1000, cores=200, disk=1e9)
self.assertRaises(InsufficientSystemResources, checkResourceRequest,
memory=5, cores=200, disk=1e9)
self.assertRaises(InsufficientSystemResources, checkResourceRequest,
memory=1001e9, cores=1, disk=1e9)
self.assertRaises(InsufficientSystemResources, checkResourceRequest,
memory=5, cores=1, disk=2e9)
self.assertRaises(AssertionError, checkResourceRequest,
memory=None, cores=1, disk=1000)
self.assertRaises(AssertionError, checkResourceRequest,
memory=10, cores=None, disk=1000)
checkResourceRequest(memory=10, cores=1, disk=100)
def testScalableBatchSystem(self):
# If instance of scalable batch system
pass
def _waitForJobsToIssue(self, numJobs):
issuedIDs = []
for it in range(20):
issuedIDs = self.batchSystem.getIssuedBatchJobIDs()
if len(issuedIDs) == numJobs:
break
time.sleep(1)
return issuedIDs
def _waitForJobsToStart(self, numJobs, tries=20):
"""
Loop until the given number of distinct jobs are in the
running state, or until the given number of tries is exhausted
(with 1 second polling period).
Returns the list of IDs that are running.
"""
runningIDs = []
# prevent an endless loop, give it a few tries
for it in range(tries):
running = self.batchSystem.getRunningBatchJobIDs()
logger.info(f'Running jobs now: {running}')
runningIDs = list(running.keys())
if len(runningIDs) == numJobs:
break
time.sleep(1)
return runningIDs
def testAddBatchSystemFactory(self):
def test_batch_system_factory():
return SingleMachineBatchSystem
addBatchSystemFactory('testBatchSystem', test_batch_system_factory)
assert ('testBatchSystem', test_batch_system_factory) in BATCH_SYSTEM_FACTORY_REGISTRY.items()
assert 'testBatchSystem' in BATCH_SYSTEMS
class AbstractBatchSystemJobTest(ToilTest, metaclass=ABCMeta):
"""
An abstract base class for batch system tests that use a full Toil workflow rather
than using the batch system directly.
"""
cpuCount = cpu_count()
allocatedCores = sorted({1, 2, cpuCount})
sleepTime = 5
@abstractmethod
def getBatchSystemName(self):
"""
:rtype: (str, AbstractBatchSystem)
"""
raise NotImplementedError
def getOptions(self, tempDir):
"""
Configures options for Toil workflow and makes job store.
:param str tempDir: path to test directory
:return: Toil options object
"""
options = Job.Runner.getDefaultOptions(self._getTestJobStorePath())
options.logLevel = "DEBUG"
options.batchSystem = self.batchSystemName
options.workDir = tempDir
options.maxCores = self.cpuCount
return options
def setUp(self):
self.batchSystemName = self.getBatchSystemName()
super(hidden.AbstractBatchSystemJobTest, self).setUp()
def tearDown(self):
super(hidden.AbstractBatchSystemJobTest, self).tearDown()
@slow
def testJobConcurrency(self):
"""
Tests that the batch system is allocating core resources properly for concurrent tasks.
"""
for coresPerJob in self.allocatedCores:
tempDir = self._createTempDir('testFiles')
options = self.getOptions(tempDir)
counterPath = os.path.join(tempDir, 'counter')
resetCounters(counterPath)
value, maxValue = getCounters(counterPath)
assert (value, maxValue) == (0, 0)
root = Job()
for _ in range(self.cpuCount):
root.addFollowOn(Job.wrapFn(measureConcurrency, counterPath, self.sleepTime,
cores=coresPerJob, memory='1M', disk='1Mi'))
with Toil(options) as toil:
toil.start(root)
_, maxValue = getCounters(counterPath)
self.assertEqual(maxValue, self.cpuCount // coresPerJob)
def test_omp_threads(self):
"""
Test if the OMP_NUM_THREADS env var is set correctly based on jobs.cores.
"""
test_cases = {
# mapping of the number of cores to the OMP_NUM_THREADS value
0.1: "1",
1: "1",
2: "2"
}
temp_dir = self._createTempDir()
options = self.getOptions(temp_dir)
for cores, expected_omp_threads in test_cases.items():
if os.environ.get('OMP_NUM_THREADS'):
expected_omp_threads = os.environ.get('OMP_NUM_THREADS')
logger.info(f"OMP_NUM_THREADS is set. Using OMP_NUM_THREADS={expected_omp_threads} instead.")
with Toil(options) as toil:
output = toil.start(Job.wrapFn(get_omp_threads, memory='1Mi', cores=cores, disk='1Mi'))
self.assertEqual(output, expected_omp_threads)
class AbstractGridEngineBatchSystemTest(AbstractBatchSystemTest):
"""
An abstract class to reduce redundancy between Grid Engine, Slurm, and other similar batch
systems
"""
def _createConfig(self):
config = super(hidden.AbstractGridEngineBatchSystemTest, self)._createConfig()
# can't use _getTestJobStorePath since that method removes the directory
config.jobStore = 'file:' + self._createTempDir('jobStore')
return config
@needs_kubernetes
@needs_aws_s3
@needs_fetchable_appliance
class KubernetesBatchSystemTest(hidden.AbstractBatchSystemTest):
"""
Tests against the Kubernetes batch system
"""
def supportsWallTime(self):
return True
def createBatchSystem(self):
# We know we have Kubernetes so we can import the batch system
from toil.batchSystems.kubernetes import KubernetesBatchSystem
return KubernetesBatchSystem(config=self.config,
maxCores=numCores, maxMemory=1e9, maxDisk=2001)
@slow
@needs_mesos
class MesosBatchSystemTest(hidden.AbstractBatchSystemTest, MesosTestSupport):
"""
Tests against the Mesos batch system
"""
@classmethod
def createConfig(cls):
"""
needs to set mesosMasterAddress to localhost for testing since the default is now the
private IP address
"""
config = super().createConfig()
config.mesosMasterAddress = 'localhost:5050'
return config
def supportsWallTime(self):
return True
def createBatchSystem(self):
# We know we have Mesos so we can import the batch system
from toil.batchSystems.mesos.batchSystem import MesosBatchSystem
self._startMesos(numCores)
return MesosBatchSystem(config=self.config,
maxCores=numCores, maxMemory=1e9, maxDisk=1001)
def tearDown(self):
self._stopMesos()
super().tearDown()
def testIgnoreNode(self):
self.batchSystem.ignoreNode('localhost')
jobDesc = self._mockJobDescription(command='sleep 1000', jobName='test2', unitName=None,
jobStoreID='1', requirements=defaultRequirements)
job = self.batchSystem.issueBatchJob(jobDesc)
issuedID = self._waitForJobsToIssue(1)
self.assertEqual(set(issuedID), {job})
# Wait until a job starts or we go a while without that happening
runningJobIDs = self._waitForJobsToStart(1, tries=20)
# Make sure job is NOT running
self.assertEqual(set(runningJobIDs), set({}))
def write_temp_file(s: str, temp_dir: str) -> str:
"""
Dump a string into a temp file and return its path.
"""
fd, path = tempfile.mkstemp(dir=temp_dir)
try:
encoded = s.encode('utf-8')
assert os.write(fd, encoded) == len(encoded)
except:
os.unlink(path)
raise
else:
return path
finally:
os.close(fd)
@travis_test
class SingleMachineBatchSystemTest(hidden.AbstractBatchSystemTest):
"""
Tests against the single-machine batch system
"""
def supportsWallTime(self) -> bool:
return True
def createBatchSystem(self) -> AbstractBatchSystem:
return SingleMachineBatchSystem(config=self.config,
maxCores=numCores, maxMemory=1e9, maxDisk=2001)
def testProcessEscape(self, hide: bool = False) -> None:
"""
Test to make sure that child processes and their descendants go away
when the Toil workflow stops.
If hide is true, will try and hide the child processes to make them
hard to stop.
"""
def script() -> None:
#!/usr/bin/env python3
import fcntl
import os
import sys
import signal
import time
from typing import Any
def handle_signal(sig: Any, frame: Any) -> None:
sys.stderr.write(f'{os.getpid()} ignoring signal {sig}\n')
if hasattr(signal, 'valid_signals'):
# We can just ask about the signals
all_signals = signal.valid_signals()
else:
# Fish them out by name
all_signals = [getattr(signal, n) for n in dir(signal) if n.startswith('SIG') and not n.startswith('SIG_')]
for sig in all_signals:
# Set up to ignore all signals we can and generally be obstinate
if sig != signal.SIGKILL and sig != signal.SIGSTOP:
signal.signal(sig, handle_signal)
if len(sys.argv) > 2:
# Instructed to hide
if os.fork():
# Try and hide the first process immediately so getting its
# pgid won't work.
sys.exit(0)
for depth in range(3):
# Bush out into a tree of processes
os.fork()
if len(sys.argv) > 1:
fd = os.open(sys.argv[1], os.O_RDONLY)
fcntl.lockf(fd, fcntl.LOCK_SH)
sys.stderr.write(f'{os.getpid()} waiting...\n')
while True:
# Wait around forever
time.sleep(60)
# Get a directory where we can safely dump files.
temp_dir = self._createTempDir()
script_path = write_temp_file(self._getScriptSource(script), temp_dir)
# We will have all the job processes try and lock this file shared while they are alive.
lockable_path = write_temp_file('', temp_dir)
try:
command = f'{sys.executable} {script_path} {lockable_path}'
if hide:
# Tell the children to stop the first child and hide out in the
# process group it made.
command += ' hide'
# Start the job
self.batchSystem.issueBatchJob(self._mockJobDescription(command=command, jobName='fork',
jobStoreID='1', requirements=defaultRequirements))
# Wait
time.sleep(10)
lockfile = open(lockable_path, 'w')
if not hide:
# In hiding mode the job will finish, and the batch system will
# clean up after it promptly. In non-hiding mode the job will
# stick around until shutdown, so make sure we can see it.
# Try to lock the file and make sure it fails
try:
fcntl.lockf(lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
assert False, "Should not be able to lock file while job is running"
except OSError:
pass
# Shut down the batch system
self.batchSystem.shutdown()
# After the batch system shuts down, we should be able to get the
# lock immediately, because all the children should be gone.
fcntl.lockf(lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
# Then we can release it
fcntl.lockf(lockfile, fcntl.LOCK_UN)
finally:
os.unlink(script_path)
os.unlink(lockable_path)
def testHidingProcessEscape(self):
"""
Test to make sure that child processes and their descendants go away
when the Toil workflow stops, even if the job process stops and leaves children.
"""
self.testProcessEscape(hide=True)
@slow
class MaxCoresSingleMachineBatchSystemTest(ToilTest):
"""
This test ensures that single machine batch system doesn't exceed the configured number
cores
"""
@classmethod
def setUpClass(cls) -> None:
super().setUpClass()
logging.basicConfig(level=logging.DEBUG)
def setUp(self) -> None:
super().setUp()
temp_dir = self._createTempDir()
# Write initial value of counter file containing a tuple of two integers (i, n) where i
# is the number of currently executing tasks and n the maximum observed value of i
self.counterPath = write_temp_file('0,0', temp_dir)
def script() -> None:
import fcntl
import os
import sys
import time
def count(delta: int) -> None:
"""
Adjust the first integer value in a file by the given amount. If the result
exceeds the second integer value, set the second one to the first.
"""
fd = os.open(sys.argv[1], os.O_RDWR)
try:
fcntl.flock(fd, fcntl.LOCK_EX)
try:
s = os.read(fd, 10).decode('utf-8')
value, maxValue = list(map(int, s.split(',')))
value += delta
if value > maxValue: maxValue = value
os.lseek(fd, 0, 0)
os.ftruncate(fd, 0)
os.write(fd, f'{value},{maxValue}'.encode('utf-8'))
finally:
fcntl.flock(fd, fcntl.LOCK_UN)
finally:
os.close(fd)
# Without the second argument, increment counter, sleep one second and decrement.
# Othwerise, adjust the counter by the given delta, which can be useful for services.
if len(sys.argv) < 3:
count(1)
try:
time.sleep(1)
finally:
count(-1)
else:
count(int(sys.argv[2]))
self.scriptPath = write_temp_file(self._getScriptSource(script), temp_dir)
def tearDown(self) -> None:
os.unlink(self.scriptPath)
os.unlink(self.counterPath)
def scriptCommand(self) -> str:
return ' '.join([sys.executable, self.scriptPath, self.counterPath])
@retry_flaky_test()
def test(self):
# We'll use fractions to avoid rounding errors. Remember that not every fraction can be
# represented as a floating point number.
F = Fraction
# This test isn't general enough to cover every possible value of minCores in
# SingleMachineBatchSystem. Instead we hard-code a value and assert it.
minCores = F(1, 10)
self.assertEqual(float(minCores), SingleMachineBatchSystem.minCores)
for maxCores in {F(minCores), minCores * 10, F(1), F(numCores, 2), F(numCores)}:
for coresPerJob in {F(minCores), F(minCores * 10), F(1), F(maxCores, 2), F(maxCores)}:
for load in (F(1, 10), F(1), F(10)):
jobs = int(maxCores / coresPerJob * load)
if jobs >= 1 and minCores <= coresPerJob < maxCores:
self.assertEqual(maxCores, float(maxCores))
bs = SingleMachineBatchSystem(
config=hidden.AbstractBatchSystemTest.createConfig(),
maxCores=float(maxCores),
# Ensure that memory or disk requirements don't get in the way.
maxMemory=jobs * 10,
maxDisk=jobs * 10)
try:
jobIds = set()
for i in range(0, int(jobs)):
jobIds.add(bs.issueBatchJob(JobDescription(command=self.scriptCommand(),
requirements=dict(
cores=float(coresPerJob),
memory=1, disk=1,
preemptable=preemptable),
jobName=str(i), unitName='')))
self.assertEqual(len(jobIds), jobs)
while jobIds:
job = bs.getUpdatedBatchJob(maxWait=10)
self.assertIsNotNone(job)
jobId, status, wallTime = job.jobID, job.exitStatus, job.wallTime
self.assertEqual(status, 0)
# would raise KeyError on absence
jobIds.remove(jobId)
finally:
bs.shutdown()
concurrentTasks, maxConcurrentTasks = getCounters(self.counterPath)
self.assertEqual(concurrentTasks, 0)
logger.info('maxCores: {maxCores}, '
'coresPerJob: {coresPerJob}, '
'load: {load}'.format(**locals()))
# This is the key assertion:
expectedMaxConcurrentTasks = min(maxCores // coresPerJob, jobs)
self.assertEqual(maxConcurrentTasks, expectedMaxConcurrentTasks)
resetCounters(self.counterPath)
@skipIf(SingleMachineBatchSystem.numCores < 3, 'Need at least three cores to run this test')
def testServices(self):
options = Job.Runner.getDefaultOptions(self._getTestJobStorePath())
options.logDebug = True
options.maxCores = 3
self.assertTrue(options.maxCores <= SingleMachineBatchSystem.numCores)
Job.Runner.startToil(Job.wrapJobFn(parentJob, self.scriptCommand()), options)
with open(self.counterPath, 'r+') as f:
s = f.read()
logger.info('Counter is %s', s)
self.assertEqual(getCounters(self.counterPath), (0, 3))
# Toil can use only top-level functions so we have to add them here:
def parentJob(job, cmd):
job.addChildJobFn(childJob, cmd)
def childJob(job, cmd):
job.addService(Service(cmd))
job.addChildJobFn(grandChildJob, cmd)
subprocess.check_call(cmd, shell=True)
def grandChildJob(job, cmd):
job.addService(Service(cmd))
job.addChildFn(greatGrandChild, cmd)
subprocess.check_call(cmd, shell=True)
def greatGrandChild(cmd):
subprocess.check_call(cmd, shell=True)
class Service(Job.Service):
def __init__(self, cmd):
super().__init__()
self.cmd = cmd
def start(self, fileStore):
subprocess.check_call(self.cmd + ' 1', shell=True)
def check(self):
return True
def stop(self, fileStore):
subprocess.check_call(self.cmd + ' -1', shell=True)
@slow
@needs_parasol
class ParasolBatchSystemTest(hidden.AbstractBatchSystemTest, ParasolTestSupport):
"""
Tests the Parasol batch system
"""
def supportsWallTime(self):
return True
def _createConfig(self):
config = super()._createConfig()
# can't use _getTestJobStorePath since that method removes the directory
config.jobStore = self._createTempDir('jobStore')
return config
def createBatchSystem(self) -> AbstractBatchSystem:
memory = int(3e9)
self._startParasol(numCores=numCores, memory=memory)
return ParasolBatchSystem(config=self.config,
maxCores=numCores,
maxMemory=memory,
maxDisk=1001)
def tearDown(self):
super().tearDown()
self._stopParasol()
def testBatchResourceLimits(self):
jobDesc1 = JobDescription(command="sleep 1000",
requirements=dict(memory=1 << 30, cores=1,
disk=1000, preemptable=preemptable),
jobName='testResourceLimits')
job1 = self.batchSystem.issueBatchJob(jobDesc1)
self.assertIsNotNone(job1)
jobDesc2 = JobDescription(command="sleep 1000",
requirements=dict(memory=2 << 30, cores=1,
disk=1000, preemptable=preemptable),
jobName='testResourceLimits')
job2 = self.batchSystem.issueBatchJob(jobDesc2)
self.assertIsNotNone(job2)
batches = self._getBatchList()
self.assertEqual(len(batches), 2)
# It would be better to directly check that the batches have the correct memory and cpu
# values, but Parasol seems to slightly change the values sometimes.
self.assertNotEqual(batches[0]['ram'], batches[1]['ram'])
# Need to kill one of the jobs because there are only two cores available
self.batchSystem.killBatchJobs([job2])
job3 = self.batchSystem.issueBatchJob(jobDesc1)
self.assertIsNotNone(job3)
batches = self._getBatchList()
self.assertEqual(len(batches), 1)
def _parseBatchString(self, batchString):
import re
batchInfo = dict()
memPattern = re.compile(r"(\d+\.\d+)([kgmbt])")
items = batchString.split()
batchInfo["cores"] = int(items[7])
memMatch = memPattern.match(items[8])
ramValue = float(memMatch.group(1))
ramUnits = memMatch.group(2)
ramConversion = {'b': 1e0, 'k': 1e3, 'm': 1e6, 'g': 1e9, 't': 1e12}
batchInfo["ram"] = ramValue * ramConversion[ramUnits]
return batchInfo
def _getBatchList(self):
# noinspection PyUnresolvedReferences
exitStatus, batchLines = self.batchSystem._runParasol(['list', 'batches'])
self.assertEqual(exitStatus, 0)
return [self._parseBatchString(line) for line in batchLines[1:] if line]
@slow
@needs_gridengine
class GridEngineBatchSystemTest(hidden.AbstractGridEngineBatchSystemTest):
"""
Tests against the GridEngine batch system
"""
def createBatchSystem(self) -> AbstractBatchSystem:
from toil.batchSystems.gridengine import GridEngineBatchSystem
return GridEngineBatchSystem(config=self.config, maxCores=numCores, maxMemory=1000e9,
maxDisk=1e9)
def tearDown(self):
super().tearDown()
# Cleanup GridEngine output log file from qsub
from glob import glob
for f in glob('toil_job*.o*'):
os.unlink(f)
@slow
@needs_slurm
class SlurmBatchSystemTest(hidden.AbstractGridEngineBatchSystemTest):
"""
Tests against the Slurm batch system
"""
def createBatchSystem(self) -> AbstractBatchSystem:
from toil.batchSystems.slurm import SlurmBatchSystem
return SlurmBatchSystem(config=self.config, maxCores=numCores, maxMemory=1000e9,
maxDisk=1e9)
def tearDown(self):
super().tearDown()
# Cleanup 'slurm-%j.out' produced by sbatch
from glob import glob
for f in glob('slurm-*.out'):
os.unlink(f)
@slow
@needs_lsf
class LSFBatchSystemTest(hidden.AbstractGridEngineBatchSystemTest):
"""
Tests against the LSF batch system
"""
def createBatchSystem(self) -> AbstractBatchSystem:
from toil.batchSystems.lsf import LSFBatchSystem
return LSFBatchSystem(config=self.config, maxCores=numCores,
maxMemory=1000e9, maxDisk=1e9)
@slow
@needs_torque
class TorqueBatchSystemTest(hidden.AbstractGridEngineBatchSystemTest):
"""
Tests against the Torque batch system
"""
def _createDummyConfig(self):
config = super()._createDummyConfig()
# can't use _getTestJobStorePath since that method removes the directory
config.jobStore = self._createTempDir('jobStore')
return config
def createBatchSystem(self) -> AbstractBatchSystem:
from toil.batchSystems.torque import TorqueBatchSystem
return TorqueBatchSystem(config=self.config, maxCores=numCores, maxMemory=1000e9,
maxDisk=1e9)
def tearDown(self):
super().tearDown()
# Cleanup 'toil_job-%j.out' produced by sbatch
from glob import glob
for f in glob('toil_job_*.[oe]*'):
os.unlink(f)
@slow
@needs_htcondor
class HTCondorBatchSystemTest(hidden.AbstractGridEngineBatchSystemTest):
"""
Tests against the HTCondor batch system
"""
def createBatchSystem(self) -> AbstractBatchSystem:
from toil.batchSystems.htcondor import HTCondorBatchSystem
return HTCondorBatchSystem(config=self.config, maxCores=numCores, maxMemory=1000e9,
maxDisk=1e9)
def tearDown(self):
super().tearDown()
@travis_test
class SingleMachineBatchSystemJobTest(hidden.AbstractBatchSystemJobTest):
"""
Tests Toil workflow against the SingleMachine batch system
"""
def getBatchSystemName(self):
return "single_machine"
@slow
@retry_flaky_test()
def testConcurrencyWithDisk(self):
"""
Tests that the batch system is allocating disk resources properly
"""
tempDir = self._createTempDir('testFiles')
options = Job.Runner.getDefaultOptions(self._getTestJobStorePath())
options.workDir = tempDir
from toil import physicalDisk
availableDisk = physicalDisk(options.workDir)
logger.info('Testing disk concurrency limits with %s disk space', availableDisk)
# More disk might become available by the time Toil starts, so we limit it here
options.maxDisk = availableDisk
options.batchSystem = self.batchSystemName
counterPath = os.path.join(tempDir, 'counter')
resetCounters(counterPath)
value, maxValue = getCounters(counterPath)
assert (value, maxValue) == (0, 0)
half_disk = availableDisk // 2
more_than_half_disk = half_disk + 500
logger.info('Dividing into parts of %s and %s', half_disk, more_than_half_disk)
root = Job()
# Physically, we're asking for 50% of disk and 50% of disk + 500bytes in the two jobs. The
# batchsystem should not allow the 2 child jobs to run concurrently.
root.addChild(Job.wrapFn(measureConcurrency, counterPath, self.sleepTime, cores=1,
memory='1M', disk=half_disk))
root.addChild(Job.wrapFn(measureConcurrency, counterPath, self.sleepTime, cores=1,
memory='1M', disk=more_than_half_disk))
Job.Runner.startToil(root, options)
_, maxValue = getCounters(counterPath)
logger.info('After run: %s disk space', physicalDisk(options.workDir))
self.assertEqual(maxValue, 1)
@skipIf(SingleMachineBatchSystem.numCores < 4, 'Need at least four cores to run this test')
@slow
def testNestedResourcesDoNotBlock(self):
"""
Resources are requested in the order Memory > Cpu > Disk.
Test that inavailability of cpus for one job that is scheduled does not block another job
that can run.
"""
tempDir = self._createTempDir('testFiles')
options = Job.Runner.getDefaultOptions(self._getTestJobStorePath())
options.workDir = tempDir
options.maxCores = 4
from toil import physicalMemory
availableMemory = physicalMemory()
options.batchSystem = self.batchSystemName
outFile = os.path.join(tempDir, 'counter')
open(outFile, 'w').close()
root = Job()
blocker = Job.wrapFn(_resourceBlockTestAuxFn, outFile=outFile, sleepTime=30, writeVal='b',
cores=2, memory='1M', disk='1M')
firstJob = Job.wrapFn(_resourceBlockTestAuxFn, outFile=outFile, sleepTime=5, writeVal='fJ',
cores=1, memory='1M', disk='1M')
secondJob = Job.wrapFn(_resourceBlockTestAuxFn, outFile=outFile, sleepTime=10,
writeVal='sJ', cores=1, memory='1M', disk='1M')
# Should block off 50% of memory while waiting for it's 3 cores
firstJobChild = Job.wrapFn(_resourceBlockTestAuxFn, outFile=outFile, sleepTime=0,
writeVal='fJC', cores=3, memory=int(availableMemory // 2), disk='1M')
# These two shouldn't be able to run before B because there should be only
# (50% of memory - 1M) available (firstJobChild should be blocking 50%)
secondJobChild = Job.wrapFn(_resourceBlockTestAuxFn, outFile=outFile, sleepTime=5,
writeVal='sJC', cores=2, memory=int(availableMemory // 1.5),
disk='1M')
secondJobGrandChild = Job.wrapFn(_resourceBlockTestAuxFn, outFile=outFile, sleepTime=5,
writeVal='sJGC', cores=2, memory=int(availableMemory // 1.5),
disk='1M')
root.addChild(blocker)
root.addChild(firstJob)
root.addChild(secondJob)
firstJob.addChild(firstJobChild)
secondJob.addChild(secondJobChild)
secondJobChild.addChild(secondJobGrandChild)
"""
The tree is:
root
/ | \
b fJ sJ
| |
fJC sJC
|
sJGC
But the order of execution should be
root > b , fJ, sJ > sJC > sJGC > fJC
since fJC cannot run till bl finishes but sJC and sJGC can(fJC blocked by disk). If the
resource acquisition is written properly, then fJC which is scheduled before sJC and sJGC
should not block them, and should only run after they finish.
"""
Job.Runner.startToil(root, options)
with open(outFile) as oFH:
outString = oFH.read()
# The ordering of b, fJ and sJ is non-deterministic since they are scheduled at the same
# time. We look for all possible permutations.
possibleStarts = tuple([''.join(x) for x in itertools.permutations(['b', 'fJ', 'sJ'])])
assert outString.startswith(possibleStarts)
assert outString.endswith('sJCsJGCfJC')
def _resourceBlockTestAuxFn(outFile, sleepTime, writeVal):
"""
Write a value to the out file and then sleep for requested seconds.
:param str outFile: File to write to
:param int sleepTime: Time to sleep for
:param str writeVal: Character to write
"""
with open(outFile, 'a') as oFH:
fcntl.flock(oFH, fcntl.LOCK_EX)
oFH.write(writeVal)
time.sleep(sleepTime)
@slow
@needs_mesos
class MesosBatchSystemJobTest(hidden.AbstractBatchSystemJobTest, MesosTestSupport):
"""
Tests Toil workflow against the Mesos batch system
"""
def getOptions(self, tempDir):
options = super().getOptions(tempDir)
options.mesosMasterAddress = 'localhost:5050'
return options
def getBatchSystemName(self):
self._startMesos(self.cpuCount)
return "mesos"
def tearDown(self):
self._stopMesos()
def measureConcurrency(filepath, sleep_time=3):
"""
Run in parallel to determine the number of concurrent tasks.
This code was copied from toil.batchSystemTestMaxCoresSingleMachineBatchSystemTest
:param str filepath: path to counter file
:param int sleep_time: number of seconds to sleep before counting down
:return int max concurrency value:
"""
count(1, filepath)
try:
time.sleep(sleep_time)
finally:
return count(-1, filepath)
def count(delta, file_path):
"""
Increments counter file and returns the max number of times the file
has been modified. Counter data must be in the form:
concurrent tasks, max concurrent tasks (counter should be initialized to 0,0)
:param int delta: increment value
:param str file_path: path to shared counter file
:return int max concurrent tasks:
"""
fd = os.open(file_path, os.O_RDWR)
try:
fcntl.flock(fd, fcntl.LOCK_EX)
try:
s = os.read(fd, 10)
value, maxValue = [int(i) for i in s.decode('utf-8').split(',')]
value += delta
if value > maxValue: maxValue = value
os.lseek(fd, 0, 0)
os.ftruncate(fd, 0)
os.write(fd, f'{value},{maxValue}'.encode('utf-8'))
finally:
fcntl.flock(fd, fcntl.LOCK_UN)
finally:
os.close(fd)
return maxValue
def getCounters(path):
with open(path, 'r+') as f:
concurrentTasks, maxConcurrentTasks = [int(i) for i in f.read().split(',')]
return concurrentTasks, maxConcurrentTasks
def resetCounters(path):
with open(path, "w") as f:
f.write("0,0")
f.close()
def get_omp_threads() -> str:
return os.environ['OMP_NUM_THREADS']
| []
| []
| [
"OMP_NUM_THREADS"
]
| [] | ["OMP_NUM_THREADS"] | python | 1 | 0 | |
kachery/_temporarydirectory.py | import os
import shutil
import tempfile
import time
class TemporaryDirectory():
def __init__(self, remove: bool=True, prefix: str='tmp'):
self._remove = remove
self._prefix = prefix
def __enter__(self) -> str:
if 'KACHERY_STORAGE_DIR' in os.environ:
storage_dir = os.getenv('KACHERY_STORAGE_DIR')
else:
storage_dir = None
if storage_dir is not None:
dirpath = os.path.join(storage_dir, 'tmp')
if not os.path.exists(dirpath):
try:
os.mkdir(dirpath)
except:
# maybe somebody else created this directory
if not os.path.exists:
raise Exception(f'Unexpected problem creating temporary directory: {dirpath}')
else:
dirpath = None
self._path = str(tempfile.mkdtemp(prefix=self._prefix, dir=dirpath))
return self._path
def __exit__(self, exc_type, exc_val, exc_tb):
if self._remove:
_rmdir_with_retries(self._path, num_retries=5)
def path(self):
return self._path
def _rmdir_with_retries(dirname: str, num_retries: int, delay_between_tries: float=1):
for retry_num in range(1, num_retries + 1):
if not os.path.exists(dirname):
return
try:
shutil.rmtree(dirname)
break
except: # pragma: no cover
if retry_num < num_retries:
print('Retrying to remove directory: {}'.format(dirname))
time.sleep(delay_between_tries)
else:
raise Exception('Unable to remove directory after {} tries: {}'.format(num_retries, dirname))
| []
| []
| [
"KACHERY_STORAGE_DIR"
]
| [] | ["KACHERY_STORAGE_DIR"] | python | 1 | 0 | |
registry/nats/options_test.go | package nats
import (
"fmt"
"os"
"sync"
"testing"
"time"
"github.com/go-log/log"
"github.com/btccom/go-micro/v2/registry"
"github.com/nats-io/nats.go"
)
var addrTestCases = []struct {
name string
description string
addrs map[string]string // expected address : set address
}{
{
"registryOption",
"set registry addresses through a registry.Option in constructor",
map[string]string{
"nats://192.168.10.1:5222": "192.168.10.1:5222",
"nats://10.20.10.0:4222": "10.20.10.0:4222"},
},
{
"natsOption",
"set registry addresses through the nats.Option in constructor",
map[string]string{
"nats://192.168.10.1:5222": "192.168.10.1:5222",
"nats://10.20.10.0:4222": "10.20.10.0:4222"},
},
{
"default",
"check if default Address is set correctly",
map[string]string{
"nats://localhost:4222": ""},
},
}
func TestInitAddrs(t *testing.T) {
for _, tc := range addrTestCases {
t.Run(fmt.Sprintf("%s: %s", tc.name, tc.description), func(t *testing.T) {
var reg registry.Registry
var addrs []string
for _, addr := range tc.addrs {
addrs = append(addrs, addr)
}
switch tc.name {
case "registryOption":
// we know that there are just two addrs in the dict
reg = NewRegistry(registry.Addrs(addrs[0], addrs[1]))
case "natsOption":
nopts := nats.GetDefaultOptions()
nopts.Servers = addrs
reg = NewRegistry(Options(nopts))
case "default":
reg = NewRegistry()
}
// if err := reg.Register(dummyService); err != nil {
// t.Fatal(err)
// }
natsRegistry, ok := reg.(*natsRegistry)
if !ok {
t.Fatal("Expected registry to be of types *natsRegistry")
}
// check if the same amount of addrs we set has actually been set
if len(natsRegistry.addrs) != len(tc.addrs) {
t.Errorf("Expected Addr count = %d, Actual Addr count = %d",
len(natsRegistry.addrs), len(tc.addrs))
}
for _, addr := range natsRegistry.addrs {
_, ok := tc.addrs[addr]
if !ok {
t.Errorf("Expected '%s' has not been set", addr)
}
}
})
}
}
func TestWatchQueryTopic(t *testing.T) {
natsURL := os.Getenv("NATS_URL")
if natsURL == "" {
log.Logf("NATS_URL is undefined - skipping tests")
return
}
watchTopic := "custom.test.watch"
queryTopic := "custom.test.query"
wt := WatchTopic(watchTopic)
qt := QueryTopic(queryTopic)
// connect to NATS and subscribe to the Watch & Query topics where the
// registry will publish a msg
nopts := nats.GetDefaultOptions()
nopts.Servers = setAddrs([]string{natsURL})
conn, err := nopts.Connect()
if err != nil {
t.Fatal(err)
}
wg := sync.WaitGroup{}
wg.Add(2)
okCh := make(chan struct{})
// Wait until we have received something on both topics
go func() {
wg.Wait()
close(okCh)
}()
// handler just calls wg.Done()
rcvdHdlr := func(m *nats.Msg) {
wg.Done()
}
_, err = conn.Subscribe(queryTopic, rcvdHdlr)
if err != nil {
t.Fatal(err)
}
_, err = conn.Subscribe(watchTopic, rcvdHdlr)
if err != nil {
t.Fatal(err)
}
dummyService := ®istry.Service{
Name: "TestInitAddr",
Version: "1.0.0",
}
reg := NewRegistry(qt, wt, registry.Addrs(natsURL))
// trigger registry to send out message on watchTopic
if err := reg.Register(dummyService); err != nil {
t.Fatal(err)
}
// trigger registry to send out message on queryTopic
if _, err := reg.ListServices(); err != nil {
t.Fatal(err)
}
// make sure that we received something on tc.topic
select {
case <-okCh:
// fine - we received on both topics a message from the registry
case <-time.After(time.Millisecond * 200):
t.Fatal("timeout - no data received on watch topic")
}
}
| [
"\"NATS_URL\""
]
| []
| [
"NATS_URL"
]
| [] | ["NATS_URL"] | go | 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.