filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
vendor/github.com/t3rm1n4l/go-mega/mega_test.go | package mega
import (
"crypto/md5"
"crypto/rand"
"fmt"
"io/ioutil"
"os"
"path"
"testing"
"time"
)
var USER string = os.Getenv("MEGA_USER")
var PASSWORD string = os.Getenv("MEGA_PASSWD")
func initSession() *Mega {
m := New()
err := m.Login(USER, PASSWORD)
if err == nil {
return m
}
fmt.Println("Unable to initialize session")
os.Exit(1)
return nil
}
func createFile(size int64) (string, string) {
b := make([]byte, size)
rand.Read(b)
file, _ := ioutil.TempFile("/tmp/", "gomega-")
file.Write(b)
h := md5.New()
h.Write(b)
return file.Name(), fmt.Sprintf("%x", h.Sum(nil))
}
func fileMD5(name string) string {
file, _ := os.Open(name)
b, _ := ioutil.ReadAll(file)
h := md5.New()
h.Write(b)
return fmt.Sprintf("%x", h.Sum(nil))
}
func TestLogin(t *testing.T) {
m := New()
err := m.Login(USER, PASSWORD)
if err != nil {
t.Error("Login failed", err)
}
}
func TestGetUser(t *testing.T) {
session := initSession()
_, err := session.GetUser()
if err != nil {
t.Fatal("GetUser failed", err)
}
}
func TestUploadDownload(t *testing.T) {
session := initSession()
name, h1 := createFile(314573)
node, err := session.UploadFile(name, session.FS.root, "", nil)
os.Remove(name)
if err != nil {
t.Fatal("Upload failed", err)
}
if node == nil {
t.Error("Failed to obtain node after upload")
}
phash := session.FS.root.hash
n := session.FS.lookup[node.hash]
if n.parent.hash != phash {
t.Error("Parent of uploaded file mismatch")
}
err = session.DownloadFile(node, name, nil)
if err != nil {
t.Fatal("Download failed", err)
}
h2 := fileMD5(name)
os.Remove(name)
if h1 != h2 {
t.Error("MD5 mismatch for downloaded file")
}
}
func TestMove(t *testing.T) {
session := initSession()
name, _ := createFile(31)
node, err := session.UploadFile(name, session.FS.root, "", nil)
os.Remove(name)
hash := node.hash
phash := session.FS.trash.hash
err = session.Move(node, session.FS.trash)
if err != nil {
t.Fatal("Move failed", err)
}
n := session.FS.lookup[hash]
if n.parent.hash != phash {
t.Error("Move happened to wrong parent", phash, n.parent.hash)
}
}
func TestRename(t *testing.T) {
session := initSession()
name, _ := createFile(31)
node, err := session.UploadFile(name, session.FS.root, "", nil)
os.Remove(name)
err = session.Rename(node, "newname.txt")
if err != nil {
t.Fatal("Rename failed", err)
}
newname := session.FS.lookup[node.hash].name
if newname != "newname.txt" {
t.Error("Renamed to wrong name", newname)
}
}
func TestDelete(t *testing.T) {
session := initSession()
name, _ := createFile(31)
node, _ := session.UploadFile(name, session.FS.root, "", nil)
os.Remove(name)
err := session.Delete(node, false)
if err != nil {
t.Fatal("Soft delete failed", err)
}
node = session.FS.lookup[node.hash]
if node.parent != session.FS.trash {
t.Error("Expects file to be moved to trash")
}
err = session.Delete(node, true)
if err != nil {
t.Fatal("Hard delete failed", err)
}
if _, ok := session.FS.lookup[node.hash]; ok {
t.Error("Expects file to be dissapeared")
}
}
func TestCreateDir(t *testing.T) {
session := initSession()
node, err := session.CreateDir("testdir1", session.FS.root)
if err != nil {
t.Fatal("Failed to create directory-1", err)
}
node2, err := session.CreateDir("testdir2", node)
if err != nil {
t.Fatal("Failed to create directory-2", err)
}
nnode2 := session.FS.lookup[node2.hash]
if nnode2.parent.hash != node.hash {
t.Error("Wrong directory parent")
}
}
func TestConfig(t *testing.T) {
m := New()
m.SetAPIUrl("http://invalid.domain")
err := m.Login(USER, PASSWORD)
if err == nil {
t.Error("API Url: Expected failure")
}
err = m.SetDownloadWorkers(100)
if err != EWORKER_LIMIT_EXCEEDED {
t.Error("Download: Expected EWORKER_LIMIT_EXCEEDED error")
}
err = m.SetUploadWorkers(100)
if err != EWORKER_LIMIT_EXCEEDED {
t.Error("Upload: Expected EWORKER_LIMIT_EXCEEDED error")
}
// TODO: Add timeout test cases
}
func TestPathLookup(t *testing.T) {
session := initSession()
rs := randString(5)
node1, err := session.CreateDir("dir-1-"+rs, session.FS.root)
if err != nil {
t.Fatal("Failed to create directory-1", err)
}
node21, err := session.CreateDir("dir-2-1-"+rs, node1)
if err != nil {
t.Fatal("Failed to create directory-2-1", err)
}
node22, err := session.CreateDir("dir-2-2-"+rs, node1)
if err != nil {
t.Fatal("Failed to create directory-2-2", err)
}
node31, err := session.CreateDir("dir-3-1-"+rs, node21)
if err != nil {
t.Fatal("Failed to create directory-3-1", err)
}
node32, err := session.CreateDir("dir-3-2-"+rs, node22)
_ = node32
if err != nil {
t.Fatal("Failed to create directory-3-2", err)
}
name1, _ := createFile(31)
_, err = session.UploadFile(name1, node31, "", nil)
os.Remove(name1)
if err != nil {
t.Fatal("Failed to upload file name1", err)
}
name2, _ := createFile(31)
_, err = session.UploadFile(name2, node31, "", nil)
os.Remove(name2)
if err != nil {
t.Fatal("Failed to upload file name2", err)
}
name3, _ := createFile(31)
_, err = session.UploadFile(name3, node22, "", nil)
os.Remove(name3)
if err != nil {
t.Fatal("Failed to upload file name3", err)
}
testpaths := [][]string{
{"dir-1-" + rs, "dir-2-2-" + rs, path.Base(name3)},
{"dir-1-" + rs, "dir-2-1-" + rs, "dir-3-1-" + rs},
{"dir-1-" + rs, "dir-2-1-" + rs, "dir-3-1-" + rs, path.Base(name1)},
{"dir-1-" + rs, "dir-2-1-" + rs, "none"},
}
results := []error{nil, nil, nil, ENOENT}
for i, tst := range testpaths {
ns, e := session.FS.PathLookup(session.FS.root, tst)
switch {
case e != results[i]:
t.Errorf("Test %d failed: wrong result", i)
default:
if results[i] == nil && len(tst) != len(ns) {
t.Errorf("Test %d failed: result array len (%d) mismatch", i, len(ns))
}
arr := []string{}
for n := range ns {
if tst[n] != ns[n].name {
t.Errorf("Test %d failed: result node mismatches (%v) and (%v)", i, tst, arr)
break
}
arr = append(arr, tst[n])
}
}
}
}
func TestEventNotify(t *testing.T) {
session1 := initSession()
session2 := initSession()
name, _ := createFile(31)
node, _ := session1.UploadFile(name, session1.FS.root, "", nil)
os.Remove(name)
for i := 0; i < 10; i++ {
time.Sleep(time.Second * 10)
node = session2.FS.HashLookup(node.hash)
if node != nil {
break
}
}
if node == nil {
t.Fatal("Expects file to found in second client's FS")
}
err := session2.Delete(node, true)
if err != nil {
t.Fatal("Delete failed", err)
}
time.Sleep(time.Second * 5)
node = session1.FS.HashLookup(node.hash)
if node != nil {
t.Fatal("Expects file to not-found in first client's FS")
}
}
func TestExportLink(t *testing.T) {
session := initSession()
name, _ := createFile(31)
node, err := session.UploadFile(name, session.FS.root, "", nil)
os.Remove(name)
if err != nil {
t.Fatal("Upload failed", err)
}
if node == nil {
t.Error("Failed to obtain node after upload")
}
// Don't include decryption key
_, err = session.Link(node, false);
if err != nil {
t.Error("Failed to export link (key not included)")
}
// Do include decryption key
_, err = session.Link(node, true);
if err != nil {
t.Error("Failed to export link (key included)")
}
} | [
"\"MEGA_USER\"",
"\"MEGA_PASSWD\""
]
| []
| [
"MEGA_PASSWD",
"MEGA_USER"
]
| [] | ["MEGA_PASSWD", "MEGA_USER"] | go | 2 | 0 | |
bin/cakechat_server.py | import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from cakechat.utils.env import set_keras_tf_session
gpu_memory_fraction = os.environ.get('GPU_MEMORY_FRACTION', 0.1)
set_keras_tf_session(gpu_memory_fraction)
from cakechat.api.v1.server import app
if __name__ == '__main__':
# runs development server
app.run(host='0.0.0.0', port=8080)
| []
| []
| [
"GPU_MEMORY_FRACTION"
]
| [] | ["GPU_MEMORY_FRACTION"] | python | 1 | 0 | |
Main/PredictESN.py | import sys
from pathlib import Path
from CommonHelper import GVar
from CommonHelper.Common import GetFileName, GetTrainedModelFolder, LoadFileToDict, GetPredictESNFolder
from CommonHelper.GVar import tblFunctionList, tblModel
from keras.models import load_model
from LogHelper.ReadFile import LogFileToTraceList
from LogHelper.SavePredictESN import SavePredictESNFile
from LogHelper.Trace import IntListToTraceSequence, RawTraceToActPer, ActPerToRawTrace
from MachineLearningHelper.trainningHelper import lstm_get_data_from_trace, one_hot_decode, \
multi_decode_with_probability, encode_trace_sequence, multi_decode_with_top_rank
from Main.PredictPerWithAct import LoadModel, PredictPer
from MySQLHelper import Query
from MySQLHelper.Query import GetCellValue, UpdateFinishFuntionInDB
import platform
import os
countTryingFailed, totalResult = 0, 0
resulTraceWithActList, resulTraceWithPerList, resulTraceWithTimeList = [], [], []
def PredictActWithPro_Rank(_model, _inputSeq, _stepIn,
_name_to_int_set, _int_to_name_set, _predictMethod, _predictMethodVal, _maxStep):
global countTryingFailed, totalResult, resulTraceWithActList
if _inputSeq[-1] == "END":
totalResult += 1
print(
"-------------------------------------------------------------------- Generated trace: %d" % totalResult, )
resulTraceWithActList.append(_inputSeq)
else:
if len(_inputSeq) >= _maxStep:
countTryingFailed += 1
print("---------------------------------Can not reach END actvity.Try: ", countTryingFailed)
else:
if len(_inputSeq) >= stepIn:
currentInput = _inputSeq[-_stepIn:]
this_X = encode_trace_sequence(currentInput, _name_to_int_set)
this_yHat = _model.predict(this_X, batch_size=1, verbose=0)
original_this_X = IntListToTraceSequence(one_hot_decode(this_X.reshape(stepIn * feature, -1)),
_int_to_name_set)
if _predictMethod == "Probability":
this_yHat_decoded = multi_decode_with_probability(this_yHat, _predictMethodVal)
else:
this_yHat_decoded = multi_decode_with_top_rank(this_yHat, _predictMethodVal)
potential_List_yhat = IntListToTraceSequence(this_yHat_decoded, _int_to_name_set)
for y in potential_List_yhat.copy():
new_Seq = _inputSeq.copy()
new_Seq.append(y)
PredictActWithPro_Rank(_model, new_Seq, _stepIn,
_name_to_int_set, _int_to_name_set, _predictMethod, _predictMethodVal, _maxStep)
if platform.system() == 'Darwin': # Mac OS
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
funcId = GetFileName(__file__)
parameterFrame = GetCellValue('parameter', tblFunctionList, ['id'], [funcId])
para = parameterFrame[0].splitlines()
_keySeparateInside = '!!'
logFile = para[0].split(_keySeparateInside)[-1]
stepIn = (para[1].split(_keySeparateInside)[-1])
maxStep = int(para[2].split(_keySeparateInside)[-1])
predictMethod = (para[3].split(_keySeparateInside)[-1])
predictMethodValue = float(para[4].split(_keySeparateInside)[-1])
fileName = (para[5].split(_keySeparateInside)[-1])
fullFilePath = str(Path(os.getcwd()).parent.parent) + GetPredictESNFolder() + fileName
_keyWordSeparate, _keySeparateInside, _traceList = LogFileToTraceList(fullFilePath)
outputParameterValue = "Trained model does not exist!"
predictType, stepOut, feature = "Activity", "1", "1"
resultdf = GetCellValue('id', tblModel, ['name', 'stepin', 'stepout', 'predicttype', 'feature'],
[logFile, stepIn, stepOut, predictType, feature])
if len(resultdf) == 0:
print(outputParameterValue)
else:
modelName = resultdf[0]
folderPath = str(Path(os.getcwd()).parent) + GetTrainedModelFolder(logFile)
modelFile = folderPath + modelName
if not os.path.exists(modelFile):
print(outputParameterValue)
else:
# Load model
saved_model = load_model(modelFile)
name_to_int_set = LoadFileToDict(
folderPath + GetCellValue('name_to_int_set', tblModel,
['name', 'stepin', 'stepout',
'predicttype', 'feature'],
[logFile, stepIn, stepOut, predictType,
feature])[0])
int_to_name_set = LoadFileToDict(
folderPath + GetCellValue('int_to_name_set', tblModel,
['name', 'stepin', 'stepout',
'predicttype', 'feature'],
[logFile, stepIn, stepOut, predictType,
feature])[0])
stepIn, stepOut, feature = int(stepIn), int(stepOut), int(feature)
subTrace, subActTrace, subPerTrace, combineTrace, inputTest = [], [], [], None, None
# Remove redundant input trace. (similar trace will be removed and not be used)
listOfInputAct = []
listOfInputPer = []
dictMapActPer = {}
for inputTrace in _traceList:
currentInputAct = RawTraceToActPer(inputTrace, _keySeparateInside, 0)
currentInputPer = RawTraceToActPer(inputTrace, _keySeparateInside, 1)
dictMapActPer["!@#".join(currentInputAct)] = "!@#".join(currentInputPer)
if not (currentInputAct in listOfInputAct):
listOfInputAct.append(currentInputAct)
if predictMethod == "Probability":
predictMethodValue = predictMethodValue / 100
for traceWithAct in listOfInputAct:
PredictActWithPro_Rank(saved_model, traceWithAct, stepIn,
name_to_int_set, int_to_name_set, predictMethod, predictMethodValue, maxStep)
elif predictMethod == "TopRank":
for traceWithAct in listOfInputAct:
predictMethodValue = int(predictMethodValue)
PredictActWithPro_Rank(saved_model, traceWithAct, stepIn,
name_to_int_set, int_to_name_set, predictMethod, predictMethodValue, maxStep)
# ResultList[]: Only contains actvity. => Predict Performer.
# Load saved model of 1F_Act_Per
print("Loading saved model...")
saved_1F_Act_Per_Model, name_to_int_1F, int_to_name_1F = LoadModel(logFile, stepIn)
if saved_1F_Act_Per_Model is not None:
print(saved_1F_Act_Per_Model.summary())
countPredictPer = 0
for currentActList in resulTraceWithActList:
if len(currentActList) <= stepIn:
continue
countPredictPer += 1
print(
"-------Predicting performer for generated trace %d/%d" % (countPredictPer, len(resulTraceWithActList)))
thisPerList, thisTimeList = [], []
currentOriginActInput = currentActList[:stepIn]
thisPerList = dictMapActPer["!@#".join(currentOriginActInput)].split("!@#")
# PredictPer(logFile, _saveModel, actList, stepIn, name_to_int_set, int_to_name_set, perList):
if saved_1F_Act_Per_Model is not None:
thisPerList = PredictPer(saved_1F_Act_Per_Model, currentActList,
stepIn, name_to_int_1F, int_to_name_1F, thisPerList)
else:
[thisPerList.append("CommingSoon") for _ in range(stepIn, len(currentActList))]
[thisTimeList.append("NULL") for _ in range(len(currentActList))]
resulTraceWithPerList.append(thisPerList)
resulTraceWithTimeList.append(thisTimeList)
finalResult = []
# [finalResult.append(act+_keySeparateInside+per+_keySeparateInside+"NULL")
# for act, per in zip(resulTraceWithActList, resulTraceWithPerList)]
for actList, perList, timeList in zip(resulTraceWithActList, resulTraceWithPerList, resulTraceWithTimeList):
finalResult.append(ActPerToRawTrace(actList, perList, timeList, _keySeparateInside))
outputFileName = logFile.replace('.txt', '') + "_stepIn_" + str(stepIn) + "_" + str(predictMethodValue) +"_percent.txt"
outputParameterValue = outputFileName
SavePredictESNFile(outputFileName, _keyWordSeparate, _keySeparateInside, finalResult)
# Update database
UpdateFinishFuntionInDB(funcId, outputParameterValue)
print("finished!")
sys.exit()
| []
| []
| [
"KMP_DUPLICATE_LIB_OK"
]
| [] | ["KMP_DUPLICATE_LIB_OK"] | python | 1 | 0 | |
paymentrequests/paymentrequests_id_pisp_post.py | #-----------------------------------------------------------------------------
# Description: Example of calling the NoFrixion MoneyMoov API
# paymentrequests/{id}/pisp POST method. It submits a payment initiation
# request.
#
# Usage:
# 1. Create a user access token in the sandbox portal at:
# https:#portal-sandbox.nofrixion.com.
# 2. Set the token as an environment variable in your console:
# set NOFRIXION_USER_TOKEN=<JWT token from previous step>
# 3. Run the script using the command: python -u "filename"
# 4. If successful a payment initiation response object containing the payment
# initiation ID and redirect URL will be displayed.
#-----------------------------------------------------------------------------
# The 'requests' library for Python can be used to make calls to the MoneyMoov API in
# popular python frameworks such as Django and Flask.
import requests
import os
# Remember, the JWT access token must be securely stored ('os' module above allows storage in environment variable)
# ... and when dealing with payment requests, use a MERCHANT token!
jwtToken = os.environ['NOFRIXION_MERCHANT_TOKEN']
baseUrl = "https://api-sandbox.nofrixion.com/api/v1/paymentrequests"
paymentRequestID = "e747d05e-3d60-4edb-9886-08d9f65a6611"
# PISP provider id, use the paymentrequests/{id}/pisp/providers GET action to see a list
providerID = "H120000001"
headers = {
"Accept": "application/json",
"Authorization": f"Bearer {jwtToken}"
}
paymentData = {
"providerID": providerID
}
try:
response = requests.request("POST", f"{baseUrl}/{paymentRequestID}/pisp", headers=headers, data=paymentData)
if response.ok:
# On successful update, the API returns the payment initiation response containing
# payment initiation ID and the URL to redirect the payer to their financial institution.
print(response.json())
else:
# If not OK, response contains MoneyMoov problem (https://docs.nofrixion.com/reference/error-messages)
print(response.json())
except Exception as ex:
print(ex) | []
| []
| [
"NOFRIXION_MERCHANT_TOKEN"
]
| [] | ["NOFRIXION_MERCHANT_TOKEN"] | python | 1 | 0 | |
webapp_enable_oc.go | package main
import (
"context"
"encoding/json"
"flag"
"fmt"
"log"
"net/http"
"os"
"time"
"cloud.google.com/go/spanner"
"contrib.go.opencensus.io/exporter/stackdriver"
"github.com/google/uuid"
"google.golang.org/api/iterator"
)
var (
projectID = os.Getenv("PROJECT_ID")
instanceID = os.Getenv("INSTANCE_ID")
databaseID = os.Getenv("DATABASE_ID")
dbPath = fmt.Sprintf("projects/%s/instances/%s/databases/%s", projectID, instanceID, databaseID)
client *spanner.Client
)
type user struct {
ID string `spanner:"id"`
Firstname string `spanner:"first_name"`
Lastname string `spanner:"last_name"`
Email string `spanner:"email"`
}
func listUsers() []byte {
ctx := context.Background()
stmt := spanner.Statement{SQL: `
SELECT id, first_name, last_name, email FROM users
`}
iter := client.Single().Query(ctx, stmt)
defer iter.Stop()
users := []user{}
for {
row, err := iter.Next()
if err == iterator.Done {
break
}
if err != nil {
log.Fatal(err)
}
var u user
if err := row.ToStruct(&u); err != nil {
log.Fatal(err)
}
users = append(users, u)
}
// fmt.Println("users", users)
// Convert to json string format.
byteArray, err := json.Marshal(users)
if err != nil {
log.Fatal(err)
}
return byteArray
}
func writeUser(w http.ResponseWriter, req *http.Request) {
if err := req.ParseForm(); err != nil {
fmt.Fprintf(w, "Failed to parse the form.: %v", err)
http.Error(w, "Failed to parse the form.", http.StatusBadRequest)
return
}
fmt.Fprintf(w, "Post form: %v\n", req.PostForm)
id, err := uuid.NewUUID()
if err != nil {
fmt.Fprintf(w, "Failed to generate UUID: %v", err)
http.Error(w, "Failed to generate UUID.", http.StatusBadRequest)
return
}
firstname := req.FormValue("firstname")
lastname := req.FormValue("lastname")
email := req.FormValue("email")
fmt.Fprintf(w, "ID = %s\n", id.String())
fmt.Fprintf(w, "First name = %s\n", firstname)
fmt.Fprintf(w, "Last name = %s\n", lastname)
fmt.Fprintf(w, "Email = %s\n", email)
columns := []string{"id", "email", "first_name", "last_name"}
m := []*spanner.Mutation{
spanner.InsertOrUpdate("users", columns, []interface{}{id.String(), email, firstname, lastname}),
}
_, err = client.Apply(context.Background(), m)
if err != nil {
fmt.Fprintf(w, "Failed to write data: %v", err)
http.Error(w, "Failed to write data.", http.StatusBadRequest)
return
}
}
func users(w http.ResponseWriter, req *http.Request) {
switch req.Method {
case "GET":
w.Write(listUsers())
case "POST":
writeUser(w, req)
default:
fmt.Fprintf(w, "Sorry, only GET and POST methods are supported.")
}
}
func main() {
port := flag.Int("port", 8080, "port to run a testing server.")
flag.Parse()
// Enable all default views.
spanner.EnableStatViews()
// Set up the stackdriver exporter.
sd, err := stackdriver.NewExporter(stackdriver.Options{
ProjectID: projectID,
ReportingInterval: 60 * time.Second,
})
if err != nil {
log.Fatalf("Failed to create the StackDriver exporter: %v", err)
}
defer sd.Flush()
sd.StartMetricsExporter()
defer sd.StopMetricsExporter()
// Intialize a Spanner client.
ctx := context.Background()
client, err = spanner.NewClient(ctx, dbPath)
if err != nil {
log.Fatal(err)
}
defer client.Close()
// Start the server.
http.HandleFunc("/users", users)
fmt.Printf("Starting server for testing metrics. Listening to port %d.\n", *port)
if err := http.ListenAndServe(fmt.Sprintf(":%d", *port), nil); err != nil {
log.Fatal(err)
}
}
| [
"\"PROJECT_ID\"",
"\"INSTANCE_ID\"",
"\"DATABASE_ID\""
]
| []
| [
"INSTANCE_ID",
"PROJECT_ID",
"DATABASE_ID"
]
| [] | ["INSTANCE_ID", "PROJECT_ID", "DATABASE_ID"] | go | 3 | 0 | |
config/settings/local.py | # -*- coding: utf-8 -*-
"""
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
"""
import socket
import os
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env('DJANGO_SECRET_KEY', default='ua1k04-wgcowt1lbn1m@stld#9w4#f5xf80=_5#f9=0!%14c*6')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_PORT = 1025
EMAIL_HOST = 'localhost'
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ['127.0.0.1', '10.0.2.2', ]
# tricks to have debug toolbar when developing with docker
if os.environ.get('USE_DOCKER') == 'yes':
ip = socket.gethostbyname(socket.gethostname())
INTERNAL_IPS += [ip[:-1] + "1"]
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
ALLOWED_HOSTS = ['127.0.0.1', '192.168.0.100']
| []
| []
| [
"USE_DOCKER"
]
| [] | ["USE_DOCKER"] | python | 1 | 0 | |
examples/dags/spark_example.py | from datetime import datetime
from airflow.models import DAG
from airflow.providers.apache.spark.operators.spark_jdbc import SparkJDBCOperator
from airflow.providers.apache.spark.operators.spark_sql import SparkSqlOperator
from airflow.providers.apache.spark.operators.spark_submit import SparkSubmitOperator
import os
with DAG(
dag_id='spark_test',
schedule_interval=None,
start_date=datetime(2021, 1, 1),
catchup=False,
tags=['FreeUni'],
) as dag:
# [START howto_operator_spark_submit]
submit_job = SparkSubmitOperator(
application="/airflow/jobs/test_job.py", task_id="submit_job"
)
# [END howto_operator_spark_submit]
submit_job_2 = SparkSubmitOperator(
application=f"{os.getenv('SPARK_HOME')}/examples/src/main/python/pi.py", task_id="submit_job_2"
)
submit_job_3 = SparkSubmitOperator(
application=f"/airflow/jobs/breaking_news.py", task_id="breaking_news"
)
[submit_job, submit_job_2] >> submit_job_3 | []
| []
| [
"SPARK_HOME"
]
| [] | ["SPARK_HOME"] | python | 1 | 0 | |
app/bq_service.py | from datetime import datetime, timedelta, timezone
import os
from functools import lru_cache
from pprint import pprint
from dotenv import load_dotenv
from google.cloud import bigquery
from google.cloud.bigquery import QueryJobConfig, ScalarQueryParameter
from app import APP_ENV, seek_confirmation
from app.decorators.number_decorators import fmt_n
load_dotenv()
GOOGLE_APPLICATION_CREDENTIALS = os.getenv("GOOGLE_APPLICATION_CREDENTIALS") # implicit check by google.cloud (and keras)
PROJECT_NAME = os.getenv("BIGQUERY_PROJECT_NAME", default="tweet-collector-py")
DATASET_NAME = os.getenv("BIGQUERY_DATASET_NAME", default="impeachment_development") #> "_test" or "_production"
DESTRUCTIVE_MIGRATIONS = (os.getenv("DESTRUCTIVE_MIGRATIONS", default="false") == "true")
VERBOSE_QUERIES = (os.getenv("VERBOSE_QUERIES", default="false") == "true")
CLEANUP_MODE = (os.getenv("CLEANUP_MODE", default="true") == "true")
DEFAULT_START = "2019-12-02 01:00:00" # @deprectated, the "beginning of time" for the impeachment dataset. todo: allow customization via env var
DEFAULT_END = "2020-03-24 20:00:00" # @deprectated, the "end of time" for the impeachment dataset. todo: allow customization via env var
def generate_timestamp(dt=None):
"""Formats datetime object for storing in BigQuery. Uses current time by default. """
dt = dt or datetime.now()
return dt.strftime("%Y-%m-%d %H:%M:%S")
def generate_temp_table_id():
return datetime.now().strftime('%Y_%m_%d_%H_%M_%S')
def split_into_batches(my_list, batch_size=9000):
"""Splits a list into evenly sized batches""" # h/t: h/t: https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
for i in range(0, len(my_list), batch_size):
yield my_list[i : i + batch_size]
class BigQueryService():
def __init__(self, project_name=PROJECT_NAME, dataset_name=DATASET_NAME,
verbose=VERBOSE_QUERIES, destructive=DESTRUCTIVE_MIGRATIONS, cautious=True):
self.project_name = project_name
self.dataset_name = dataset_name
self.dataset_address = f"{self.project_name}.{self.dataset_name}"
self.verbose = (verbose == True)
self.destructive = (destructive == True)
self.cautious = (cautious == True)
self.client = bigquery.Client()
print("-------------------------")
print("BIGQUERY SERVICE...")
print(" DATASET ADDRESS:", self.dataset_address.upper())
print(" DESTRUCTIVE MIGRATIONS:", self.destructive)
print(" VERBOSE QUERIES:", self.verbose)
if self.cautious:
seek_confirmation()
@property
def metadata(self):
return {"dataset_address": self.dataset_address, "destructive": self.destructive, "verbose": self.verbose}
def execute_query(self, sql):
"""Param: sql (str)"""
if self.verbose:
print(sql)
job = self.client.query(sql)
return job.result()
def execute_query_in_batches(self, sql, temp_table_name=None):
"""Param: sql (str)"""
if self.verbose:
print(sql)
if not temp_table_name:
temp_table_id = generate_temp_table_id()
temp_table_name = f"{self.dataset_address}.temp_{temp_table_id}"
job_config = bigquery.QueryJobConfig(
priority=bigquery.QueryPriority.BATCH,
allow_large_results=True,
destination=temp_table_name
)
job = self.client.query(sql, job_config=job_config)
print("BATCH QUERY JOB:", type(job), job.job_id, job.state, job.location)
return job
def insert_records_in_batches(self, table, records):
"""
Params:
table (table ID string, Table, or TableReference)
records (list of dictionaries)
"""
rows_to_insert = [list(d.values()) for d in records]
#errors = self.client.insert_rows(table, rows_to_insert)
#> ... google.api_core.exceptions.BadRequest: 400 POST https://bigquery.googleapis.com/bigquery/v2/projects/.../tables/daily_bot_probabilities/insertAll:
#> ... too many rows present in the request, limit: 10000 row count: 36092.
#> ... see: https://cloud.google.com/bigquery/quotas#streaming_inserts
errors = []
batches = list(split_into_batches(rows_to_insert, batch_size=5000))
for batch in batches:
errors += self.client.insert_rows(table, batch)
return errors
def delete_temp_tables_older_than(self, days=3):
"""Deletes all tables that:
have "temp_" in their name (product of the batch jobs), and were
created at least X days ago (safely avoid deleting tables being used by in-progress batch jobs)
"""
cutoff_date = datetime.now(tz=timezone.utc) - timedelta(days=days)
print("CUTOFF DATE:", cutoff_date)
tables = list(self.client.list_tables(self.dataset_name)) # API call
tables_to_delete = [t for t in tables if "temp_" in t.table_id and t.created < cutoff_date]
print("TABLES TO DELETE:")
pprint([t.table_id for t in tables_to_delete])
seek_confirmation()
print("DELETING...")
for old_temp_table in tables_to_delete:
print(" ", old_temp_table.table_id)
self.client.delete_table(old_temp_table)
#def get_table(self, table_name):
# return self.client.get_table(f"{self.dataset_address}.{table_name}") # API call. cache it here once.
#
# COLLECTING TWEETS V2
#
def migrate_topics_table(self):
print("MIGRATING TOPICS TABLE...")
sql = ""
if self.destructive:
sql += f"DROP TABLE IF EXISTS `{self.dataset_address}.topics`; "
sql += f"""
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.topics` (
topic STRING NOT NULL,
created_at TIMESTAMP,
);
"""
return list(self.execute_query(sql))
def migrate_tweets_table(self):
print("MIGRATING TWEETS TABLE...")
sql = ""
if self.destructive:
sql += f"DROP TABLE IF EXISTS `{self.dataset_address}.tweets`; "
sql += f"""
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.tweets` (
status_id STRING,
status_text STRING,
truncated BOOLEAN,
retweeted_status_id STRING,
retweeted_user_id STRING,
retweeted_user_screen_name STRING,
reply_status_id STRING,
reply_user_id STRING,
is_quote BOOLEAN,
geo STRING,
created_at TIMESTAMP,
user_id STRING,
user_name STRING,
user_screen_name STRING,
user_description STRING,
user_location STRING,
user_verified BOOLEAN,
user_created_at TIMESTAMP
);
"""
return list(self.execute_query(sql))
@property
@lru_cache(maxsize=None)
def topics_table(self):
return self.client.get_table(f"{self.dataset_address}.topics") # an API call (caches results for subsequent inserts)
@property
@lru_cache(maxsize=None)
def tweets_table(self):
return self.client.get_table(f"{self.dataset_address}.tweets") # an API call (caches results for subsequent inserts)
def fetch_topics(self):
"""Returns a list of topic strings"""
sql = f"""
SELECT topic, created_at
FROM `{self.dataset_address}.topics`
ORDER BY created_at;
"""
return self.execute_query(sql)
def fetch_topic_names(self):
return [row.topic for row in self.fetch_topics()]
def append_topics(self, topics):
"""
Inserts topics unless they already exist.
Param: topics (list of dict)
"""
rows = self.fetch_topics()
existing_topics = [row.topic for row in rows]
new_topics = [topic for topic in topics if topic not in existing_topics]
if new_topics:
rows_to_insert = [[new_topic, generate_timestamp()] for new_topic in new_topics]
errors = self.client.insert_rows(self.topics_table, rows_to_insert)
return errors
else:
print("NO NEW TOPICS...")
return []
def append_tweets(self, tweets):
"""Param: tweets (list of dict)"""
rows_to_insert = [list(d.values()) for d in tweets]
errors = self.client.insert_rows(self.tweets_table, rows_to_insert)
return errors
#
# COLLECTING USER FRIENDS
#
def migrate_populate_users(self):
"""
Resulting table has a row for each user id / screen name combo
(multiple rows per user id if they changed their screen name)
"""
sql = ""
if self.destructive:
sql += f"DROP TABLE IF EXISTS `{self.dataset_address}.users`; "
sql += f"""
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.users` as (
SELECT DISTINCT
user_id
,user_screen_name as screen_name
FROM `{self.dataset_address}.tweets`
WHERE user_id IS NOT NULL AND user_screen_name IS NOT NULL
ORDER BY 1
);
"""
results = self.execute_query(sql)
return list(results)
def migrate_user_friends(self):
sql = ""
if self.destructive:
sql += f"DROP TABLE IF EXISTS `{self.dataset_address}.user_friends`; "
sql += f"""
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.user_friends` (
user_id STRING,
screen_name STRING,
friend_count INT64,
friend_names ARRAY<STRING>,
start_at TIMESTAMP,
end_at TIMESTAMP
);
"""
results = self.execute_query(sql)
return list(results)
def fetch_remaining_users(self, min_id=None, max_id=None, limit=None):
"""Returns a list of table rows"""
sql = f"""
SELECT
u.user_id
,u.screen_name
FROM `{self.dataset_address}.users` u
LEFT JOIN `{self.dataset_address}.user_friends` f ON u.user_id = f.user_id
WHERE f.user_id IS NULL
"""
if min_id and max_id:
sql += f" AND CAST(u.user_id as int64) BETWEEN {int(min_id)} AND {int(max_id)} "
sql += f"ORDER BY u.user_id "
if limit:
sql += f"LIMIT {int(limit)};"
results = self.execute_query(sql)
return list(results)
@property
@lru_cache(maxsize=None)
def user_friends_table(self):
return self.client.get_table(f"{self.dataset_address}.user_friends") # an API call (caches results for subsequent inserts)
def insert_user_friends(self, records):
"""
Param: records (list of dictionaries)
"""
rows_to_insert = [list(d.values()) for d in records]
#rows_to_insert = [list(d.values()) for d in records if any(d["friend_names"])] # doesn't store failed attempts. can try those again later
#if any(rows_to_insert):
errors = self.client.insert_rows(self.user_friends_table, rows_to_insert)
return errors
def user_friend_collection_progress(self):
sql = f"""
SELECT
count(distinct user_id) as user_count
,round(avg(runtime_seconds), 2) as avg_duration
,round(sum(has_friends) / count(distinct user_id), 2) as pct_friendly
,round(avg(CASE WHEN has_friends = 1 THEN runtime_seconds END), 2) as avg_duration_friendly
,round(avg(CASE WHEN has_friends = 1 THEN friend_count END), 2) as avg_friends_friendly
FROM (
SELECT
user_id
,friend_count
,if(friend_count > 0, 1, 0) as has_friends
,start_at
,end_at
,DATETIME_DIFF(CAST(end_at as DATETIME), cast(start_at as DATETIME), SECOND) as runtime_seconds
FROM `{service.dataset_address}.user_friends`
) subq
"""
return self.execute_query(sql)
#
# FRIEND GRAPHS
#
def fetch_user_friends(self, min_id=None, max_id=None, limit=None):
sql = f"""
SELECT user_id, screen_name, friend_count, friend_names, start_at, end_at
FROM `{self.dataset_address}.user_friends`
"""
if min_id and max_id:
sql += f" WHERE CAST(user_id as int64) BETWEEN {int(min_id)} AND {int(max_id)} "
sql += f"ORDER BY user_id "
if limit:
sql += f"LIMIT {int(limit)};"
#return list(self.execute_query(sql))
return self.execute_query(sql) # return the generator so we can avoid storing the results in memory
def fetch_user_friends_in_batches(self, limit=None, min_friends=None):
sql = f"""
SELECT user_id, screen_name, friend_count, friend_names
FROM `{self.dataset_address}.user_friends`
"""
if min_friends:
sql += f" WHERE ARRAY_LENGTH(friend_names) >= {int(min_friends)} "
if limit:
sql += f" LIMIT {int(limit)}; "
return self.execute_query_in_batches(sql)
def partition_user_friends(self, n=10):
"""Params n (int) the number of partitions, each will be of equal size"""
sql = f"""
SELECT
partition_id
,count(DISTINCT user_id) as user_count
,min(user_id) as min_id
,max(user_id) as max_id
FROM (
SELECT
NTILE({int(n)}) OVER (ORDER BY CAST(user_id as int64)) as partition_id
,CAST(user_id as int64) as user_id
FROM (SELECT DISTINCT user_id FROM `{self.dataset_address}.user_friends`)
) user_partitions
GROUP BY partition_id
"""
results = self.execute_query(sql)
return list(results)
def fetch_random_users(self, limit=1000, topic="impeach", start_at=DEFAULT_START, end_at=DEFAULT_END):
"""
Fetches a random slice of users talking about a given topic during a given timeframe.
Params:
topic (str) the topic they were tweeting about:
to be balanced, choose 'impeach', '#IGHearing', '#SenateHearing', etc.
to be left-leaning, choose '#ImpeachAndConvict', '#ImpeachAndRemove', etc.
to be right-leaning, choose '#ShamTrial', '#AquittedForever', '#MAGA', etc.
limit (int) the max number of users to fetch
start_at (str) a date string for the earliest tweet
end_at (str) a date string for the latest tweet
"""
sql = f"""
SELECT DISTINCT user_id, user_screen_name, user_created_at
FROM `{self.dataset_address}.tweets`
WHERE upper(status_text) LIKE '%{topic.upper()}%' AND (created_at BETWEEN '{start_at}' AND '{end_at}')
ORDER BY rand()
LIMIT {int(limit)};
"""
return self.execute_query(sql)
#
# RETWEET GRAPHS
#
def fetch_retweet_counts_in_batches(self, topic=None, start_at=None, end_at=None):
"""
For each retweeter, includes the number of times each they retweeted each other user.
Optionally about a given topic.
Optionally with within a given timeframe.
Params:
topic (str) the topic they were tweeting about, like 'impeach', '#MAGA', "@politico", etc.
start_at (str) a date string for the earliest tweet
end_at (str) a date string for the latest tweet
"""
sql = f"""
SELECT
user_id
,user_screen_name
,retweet_user_screen_name
,count(distinct status_id) as retweet_count
FROM `{self.dataset_address}.retweets`
WHERE user_screen_name <> retweet_user_screen_name -- excludes people retweeting themselves
"""
if topic:
sql+=f"""
AND upper(status_text) LIKE '%{topic.upper()}%'
"""
if start_at and end_at:
sql+=f"""
AND (created_at BETWEEN '{start_at}' AND '{end_at}')
"""
sql += """
GROUP BY 1,2,3
"""
return self.execute_query_in_batches(sql)
def fetch_specific_user_friends(self, screen_names):
sql = f"""
SELECT user_id, screen_name, friend_count, friend_names, start_at, end_at
FROM `{self.dataset_address}.user_friends`
WHERE screen_name in {tuple(screen_names)} -- tuple conversion surrounds comma-separated screen_names in parens
"""
return self.execute_query(sql)
def fetch_specific_retweet_counts(self, screen_names):
"""FYI this fetches multiple rows per screen_name, for each screen_name that user retweeted"""
sql = f"""
SELECT user_id, user_screen_name, retweet_user_screen_name, retweet_count
FROM `{self.dataset_address}.retweet_counts`
WHERE user_screen_name in {tuple(screen_names)} -- tuple conversion surrounds comma-separated screen_names in parens
-- AND user_screen_name <> retweet_user_screen_name -- exclude users who have retweeted themselves
ORDER BY 2,3
"""
return self.execute_query(sql)
def fetch_retweet_weeks(self, start_at=None, end_at=None):
"""
Params:
start_at (str) like "2019-12-15 00:00:00"
end_at (str) like "2020-03-21 23:59:59"
"""
sql = f"""
SELECT
CASE
WHEN EXTRACT(week from created_at) = 0 THEN EXTRACT(year from created_at) - 1 -- treat first week of new year as the previous year
ELSE EXTRACT(year from created_at)
END year
,CASE
WHEN EXTRACT(week from created_at) = 0 THEN 52 -- treat first week of new year as the previous week
ELSE EXTRACT(week from created_at)
END week
,count(DISTINCT EXTRACT(day from created_at)) as day_count
,min(created_at) as min_created
,max(created_at) as max_created
,count(DISTINCT status_id) as retweet_count
,count(DISTINCT user_id) as user_count
FROM `{self.dataset_address}.retweets`
"""
if start_at and end_at:
sql += f"""
WHERE created_at BETWEEN '{start_at}' AND '{end_at}'
"""
sql += """
GROUP BY 1,2
ORDER BY 1,2
"""
return self.execute_query(sql)
#
# LOCAL ANALYSIS (PG PIPELINE)
#
def fetch_tweets_in_batches(self, limit=None, start_at=None, end_at=None):
sql = f"""
SELECT
status_id
,status_text
,truncated
,NULL as retweeted_status_id -- restore for version 2
,NULL as retweeted_user_id -- restore for version 2
,NULL as retweeted_user_screen_name -- restore for version 2
,reply_status_id
,reply_user_id
,is_quote
,geo
,created_at
,user_id
,user_name
,user_screen_name
,user_description
,user_location
,user_verified
,user_created_at
FROM `{self.dataset_address}.tweets`
"""
if start_at and end_at:
sql+=f"""
WHERE (created_at BETWEEN '{str(start_at)}' AND '{str(end_at)}')
"""
if limit:
sql += f" LIMIT {int(limit)}; "
return self.execute_query_in_batches(sql)
def fetch_user_details_in_batches(self, limit=None):
sql = f"""
SELECT
user_id
,screen_name
,name
,description
,location
,verified
,created_at
,screen_name_count
,name_count
,description_count
,location_count
,verified_count
,created_at_count
,screen_names
,names
,descriptions
,locations
,verifieds
,created_ats
,friend_count
,status_count
,retweet_count
-- these topics are specific to the impeachment dataset, so will need to generalize if/when working with another topic (leave for future concern)
,impeach_and_convict
,senate_hearing
,ig_hearing
,facts_matter
,sham_trial
,maga
,acquitted_forever
FROM `{self.dataset_address}.user_details`
"""
if limit:
sql += f"LIMIT {int(limit)};"
return self.execute_query_in_batches(sql)
def fetch_retweeter_details_in_batches(self, limit=None):
sql = f"""
SELECT
user_id
,verified
,created_at
,screen_name_count
,name_count
,retweet_count
,ig_report
,ig_hearing
,senate_hearing
,not_above_the_law
,impeach_and_convict
,impeach_and_remove
,facts_matter
,sham_trial
,maga
,acquitted_forever
,country_over_party
FROM `{self.dataset_address}.retweeter_details`
"""
if limit:
sql += f"LIMIT {int(limit)};"
return self.execute_query_in_batches(sql)
def fetch_retweeters_by_topic_exclusive(self, topic):
"""
Get the retweeters talking about topic x and those not, so we can perform a two-sample KS-test on them.
"""
topic = topic.upper() # do uppercase conversion once here instead of many times inside sql below
sql = f"""
-- TOPIC: '{topic}'
SELECT
rt.user_id
,rt.user_created_at
,count(distinct case when REGEXP_CONTAINS(upper(rt.status_text), '{topic}') then rt.status_id end) as count
FROM {self.dataset_address}.retweets rt
GROUP BY 1,2
"""
return self.execute_query(sql)
def fetch_retweeters_by_topics_exclusive(self, x_topic, y_topic):
"""
Get the retweeters talking about topic x and not y (and vice versa).
For each user, determines how many times they were talking about topic x and y.
Only returns users who were talking about one or the other, so we can perform a two-sample KS-test on them.
"""
x_topic = x_topic.upper() # do uppercase conversion once here instead of many times inside sql below
y_topic = y_topic.upper() # do uppercase conversion once here instead of many times inside sql below
sql = f"""
-- TOPICS: '{x_topic}' | '{y_topic}'
SELECT
rt.user_id
,rt.user_created_at
,count(distinct case when REGEXP_CONTAINS(upper(rt.status_text), '{x_topic}') then rt.status_id end) as x_count
,count(distinct case when REGEXP_CONTAINS(upper(rt.status_text), '{y_topic}') then rt.status_id end) as y_count
FROM {self.dataset_address}.retweets rt
WHERE REGEXP_CONTAINS(upper(rt.status_text), '{x_topic}')
OR REGEXP_CONTAINS(upper(rt.status_text), '{y_topic}')
GROUP BY 1,2
HAVING (x_count > 0 and y_count = 0) OR (x_count = 0 and y_count > 0) -- mutually exclusive populations
"""
return self.execute_query(sql)
#
# RETWEET GRAPHS V2 - USER ID LOOKUPS
#
def fetch_idless_screen_names(self):
sql = f"""
SELECT DISTINCT rt.retweet_user_screen_name as screen_name
FROM {self.dataset_address}.retweets rt
LEFT JOIN {self.dataset_address}.tweets t on t.user_screen_name = rt.retweet_user_screen_name
WHERE t.user_id IS NULL
"""
return self.execute_query(sql)
def migrate_user_id_lookups_table(self):
sql = ""
if self.destructive:
sql += f"DROP TABLE IF EXISTS `{self.dataset_address}.user_id_lookups`; "
sql += f"""
CREATE TABLE `{self.dataset_address}.user_id_lookups` (
lookup_at TIMESTAMP,
counter INT64,
screen_name STRING,
user_id STRING,
message STRING
);
"""
return self.execute_query(sql)
@property
@lru_cache(maxsize=None)
def user_id_lookups_table(self):
return self.client.get_table(f"{self.dataset_address}.user_id_lookups") # an API call (caches results for subsequent inserts)
def upload_user_id_lookups(self, records):
"""
Param: records (list of dictionaries)
"""
rows_to_insert = [list(d.values()) for d in records]
errors = self.client.insert_rows(self.user_id_lookups_table, rows_to_insert)
return errors
def fetch_max_user_id_postlookup(self):
sql = f"""
SELECT max(user_id) as max_user_id -- 999999827600650240
FROM (
SELECT DISTINCT user_id FROM {self.dataset_address}.tweets -- 3,600,545
UNION ALL
SELECT DISTINCT user_id FROM {self.dataset_address}.user_id_lookups WHERE user_id IS NOT NULL -- 14,969
) all_user_ids -- 3,615,409
"""
results = list(self.execute_query(sql))
return int(results[0]["max_user_id"])
def fetch_idless_screen_names_postlookup(self):
sql = f"""
SELECT distinct upper(screen_name) as screen_name
FROM {self.dataset_address}.user_id_lookups
WHERE user_id is NULL
ORDER BY screen_name
"""
return self.execute_query(sql)
def migrate_user_id_assignments_table(self):
sql = ""
if self.destructive:
sql += f"DROP TABLE IF EXISTS `{self.dataset_address}.user_id_assignments`; "
sql += f"""
CREATE TABLE `{self.dataset_address}.user_id_assignments` (
screen_name STRING,
user_id STRING,
);
"""
return self.execute_query(sql)
@property
@lru_cache(maxsize=None)
def user_id_assignments_table(self):
return self.client.get_table(f"{self.dataset_address}.user_id_assignments") # an API call (caches results for subsequent inserts)
def upload_user_id_assignments(self, records):
"""
Param: records (list of dictionaries)
"""
rows_to_insert = [list(d.values()) for d in records]
errors = self.client.insert_rows(self.user_id_assignments_table, rows_to_insert)
return errors
def migrate_populate_user_screen_names_table(self):
sql = ""
if self.destructive:
sql += f"DROP TABLE IF EXISTS `{self.dataset_address}.user_screen_names`; "
sql += f"""
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.user_screen_names` as (
SELECT DISTINCT user_id, upper(screen_name) as screen_name
FROM (
SELECT DISTINCT user_id, user_screen_name as screen_name FROM `{self.dataset_address}.tweets` -- 3,636,492
UNION ALL
SELECT DISTINCT user_id, screen_name FROM `{self.dataset_address}.user_id_lookups` WHERE user_id IS NOT NULL -- 14,969
UNION ALL
SELECT DISTINCT user_id, screen_name FROM `{self.dataset_address}.user_id_assignments` -- 2,224
) all_user_screen_names -- 3,615,409
ORDER BY user_id, screen_name
);
"""
return self.execute_query(sql)
def migrate_populate_user_details_table_v2(self):
sql = ""
if self.destructive:
sql += f"DROP TABLE IF EXISTS `{self.dataset_address}.user_details_v2`; "
sql += f"""
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.user_details_v2` as (
SELECT
user_id
,count(DISTINCT UPPER(screen_name)) as screen_name_count
,ARRAY_AGG(DISTINCT UPPER(screen_name) IGNORE NULLS) as screen_names
-- ,ANY_VALUE(screen_name) as screen_name
FROM `{self.dataset_address}.user_screen_names`
GROUP BY 1
ORDER BY 2 desc
-- LIMIT 100
);
"""
return self.execute_query(sql)
def migrate_populate_retweets_table_v2(self):
sql = ""
if self.destructive:
sql += f"DROP TABLE IF EXISTS `{self.dataset_address}.retweets_v2`; "
sql += f"""
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.retweets_v2` as (
SELECT
cast(rt.user_id as int64) as user_id
,UPPER(rt.user_screen_name) as user_screen_name
,rt.user_created_at
,cast(sn.user_id as int64) as retweeted_user_id
,UPPER(rt.retweet_user_screen_name) as retweeted_user_screen_name
,rt.status_id
,rt.status_text
,rt.created_at
FROM `{self.dataset_address}.retweets` rt
JOIN `{self.dataset_address}.user_screen_names` sn
ON UPPER(rt.retweet_user_screen_name) = UPPER(sn.screen_name)
WHERE rt.user_screen_name <> rt.retweet_user_screen_name -- excludes people retweeting themselves
);
"""
return self.execute_query(sql)
def fetch_retweet_edges_in_batches_v2(self, topic=None, start_at=None, end_at=None):
"""
For each retweeter, includes the number of times each they retweeted each other user.
Optionally about a given topic.
Optionally with within a given timeframe.
Params:
topic (str) : the topic they were tweeting about, like 'impeach', '#MAGA', "@politico", etc.
start_at (str) : a date string for the earliest tweet
end_at (str) : a date string for the latest tweet
"""
sql = f"""
SELECT
rt.user_id
,rt.retweeted_user_id
,count(distinct rt.status_id) as retweet_count
FROM `{self.dataset_address}.retweets_v2` rt
WHERE rt.user_screen_name <> rt.retweeted_user_screen_name -- excludes people retweeting themselves
"""
if topic:
sql+=f"""
AND upper(rt.status_text) LIKE '%{topic.upper()}%'
"""
if start_at and end_at:
sql+=f"""
AND (rt.created_at BETWEEN '{str(start_at)}' AND '{str(end_at)}')
"""
sql += """
GROUP BY 1,2
"""
return self.execute_query_in_batches(sql)
def migrate_daily_bot_probabilities_table(self):
sql = ""
if self.destructive:
sql += f"DROP TABLE IF EXISTS `{self.dataset_address}.daily_bot_probabilities`; "
sql += f"""
CREATE TABLE `{self.dataset_address}.daily_bot_probabilities` (
start_date STRING,
user_id INT64,
bot_probability FLOAT64,
);
"""
return self.execute_query(sql)
#
# RETWEET GRAPHS V2 - BOT CLASSIFICATIONS
#
@property
@lru_cache(maxsize=None)
def daily_bot_probabilities_table(self):
return self.client.get_table(f"{self.dataset_address}.daily_bot_probabilities") # an API call (caches results for subsequent inserts)
def upload_daily_bot_probabilities(self, records):
return self.insert_records_in_batches(self.daily_bot_probabilities_table, records)
def sql_fetch_bot_ids(self, bot_min=0.8):
sql = f"""
SELECT DISTINCT bp.user_id
FROM `{self.dataset_address}.daily_bot_probabilities` bp
WHERE bp.bot_probability >= {float(bot_min)}
"""
return sql
def fetch_bot_ids(self, bot_min=0.8):
"""Returns any user who has ever had a bot score above the given threshold."""
return self.execute_query(self.sql_fetch_bot_ids(bot_min))
def fetch_bot_retweet_edges_in_batches(self, bot_min=0.8):
"""
For each bot (user with any bot score greater than the specified threshold),
and each user they retweeted, includes the number of times the bot retweeted them.
Params:
bot_min (float) consider users with any score above this threshold as bots
"""
sql = f"""
SELECT
rt.user_id
,rt.retweeted_user_id
,count(distinct rt.status_id) as retweet_count
FROM `{self.dataset_address}.retweets_v2` rt
JOIN (
{self.sql_fetch_bot_ids(bot_min)}
) bp ON bp.user_id = rt.user_id
WHERE rt.user_screen_name <> rt.retweeted_user_screen_name -- excludes people retweeting themselves
GROUP BY 1,2
-- ORDER BY 1,2
"""
return self.execute_query_in_batches(sql)
#
# RETWEET GRAPHS V2 - BOT COMMUNITIES
#
#@property
#@lru_cache(maxsize=None) # don't cache, or cache one for each value of n_communities
def n_bot_communities_table(self, n_communities):
return self.client.get_table(f"{self.dataset_address}.{n_communities}_bot_communities") # an API call (caches results for subsequent inserts)
def destructively_migrate_n_bot_communities_table(self, n_communities):
sql = f"""
DROP TABLE IF EXISTS `{self.dataset_address}.{n_communities}_bot_communities`;
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.{n_communities}_bot_communities` (
user_id INT64,
community_id INT64,
);
"""
return self.execute_query(sql)
def overwrite_n_bot_communities_table(self, n_communities, records):
self.destructively_migrate_n_bot_communities_table(n_communities)
table = self.n_bot_communities_table(n_communities)
return self.insert_records_in_batches(table, records)
def download_n_bot_community_tweets_in_batches(self, n_communities):
sql = f"""
SELECT
bc.community_id
,t.user_id
,t.user_name
,t.user_screen_name
,t.user_description
,t.user_location
,t.user_verified
,t.user_created_at
,t.status_id
,t.status_text
,t.retweet_status_id
,t.reply_user_id
,t.is_quote as status_is_quote
,t.geo as status_geo
,t.created_at as status_created_at
FROM `{self.dataset_address}.{n_communities}_bot_communities` bc -- 681
JOIN `{self.dataset_address}.tweets` t on CAST(t.user_id as int64) = bc.user_id
-- WHERE t.retweet_status_id IS NULL
-- ORDER BY 1,2
"""
return self.execute_query_in_batches(sql)
def download_n_bot_community_retweets_in_batches(self, n_communities):
sql = f"""
SELECT
bc.community_id
,ud.user_id
,ud.screen_name_count as user_screen_name_count
,ARRAY_TO_STRING(ud.screen_names, ' | ') as user_screen_names
,rt.user_created_at
,rt.retweeted_user_id
,rt.retweeted_user_screen_name
,rt.status_id
,rt.status_text
,rt.created_at as status_created_at
FROM `{self.dataset_address}.{n_communities}_bot_communities` bc -- 681
JOIN `{self.dataset_address}.user_details_v2` ud on CAST(ud.user_id as int64) = bc.user_id
JOIN `{self.dataset_address}.retweets_v2` rt on rt.user_id = bc.user_id
-- ORDER BY 1,2
"""
return self.execute_query_in_batches(sql)
def destructively_migrate_token_frequencies_table(self, table_address, records):
print("DESTRUCTIVELY MIGRATING TABLE:", table_address)
sql = f"""
DROP TABLE IF EXISTS `{table_address}`;
CREATE TABLE IF NOT EXISTS `{table_address}` (
token STRING,
rank INT64,
count INT64,
pct FLOAT64,
doc_count INT64,
doc_pct FLOAT64
);
"""
self.execute_query(sql)
table = self.client.get_table(table_address) # API call
print("INSERTING", len(records), "RECORDS...")
return self.insert_records_in_batches(table, records)
def fetch_bot_community_profiles(self, n_communities=2):
sql = f"""
SELECT community_id, bot_id as user_id, user_descriptions
FROM `{self.dataset_address}.{int(n_communities)}_community_profiles`
"""
return self.execute_query(sql)
def upload_bot_community_profile_tokens(self, records, community_id, n_communities=2):
table_address = f"{self.dataset_address}.{n_communities}_community_{community_id}_profile_tokens"
self.destructively_migrate_token_frequencies_table(table_address=table_address, records=records)
def upload_bot_community_profile_tags(self, records, community_id, n_communities=2):
table_address = f"{self.dataset_address}.{n_communities}_community_{community_id}_profile_tags"
self.destructively_migrate_token_frequencies_table(table_address=table_address, records=records)
def fetch_bot_community_statuses(self, n_communities, community_id=None, limit=None):
sql = f"""
SELECT community_id, user_id, status_id, status_text
FROM `{self.dataset_address}.{int(n_communities)}_community_labeled_tweets`
"""
if community_id:
sql += f" WHERE community_id = {int(community_id)}"
if limit:
sql += f" LIMIT {int(limit)}"
return self.execute_query(sql)
def upload_bot_community_status_tokens(self, records, community_id, n_communities=2):
table_address = f"{self.dataset_address}.{n_communities}_community_{community_id}_status_tokens"
self.destructively_migrate_token_frequencies_table(table_address, records=records)
def upload_bot_community_status_tags(self, records, community_id, n_communities=2):
table_address = f"{self.dataset_address}.{n_communities}_community_{community_id}_status_tags"
self.destructively_migrate_token_frequencies_table(table_address, records=records)
#
# BOT FOLLOWER GRAPHS
#
def destructively_migrate_user_friends_flat(self):
sql = f"""
DROP TABLE IF EXISTS `{self.dataset_address}.user_friends_flat`;
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.user_friends_flat` as (
SELECT user_id, upper(screen_name) as screen_name, upper(friend_name) as friend_name
FROM `{self.dataset_address}.user_friends`
CROSS JOIN UNNEST(friend_names) AS friend_name
);
""" # 1,976,670,168 rows WAT
return self.execute_query(sql)
def destructively_migrate_bots_table(self, bot_min=0.8):
bot_min_str = str(int(bot_min * 100)) #> "80"
sql = f"""
DROP TABLE IF EXISTS `{self.dataset_address}.bots_above_{bot_min_str}`;
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.bots_above_{bot_min_str}` as (
SELECT
bp.user_id as bot_id
,sn.screen_name as bot_screen_name
,count(distinct start_date) as day_count
,avg(bot_probability) as avg_daily_score
FROM `{self.dataset_address}.daily_bot_probabilities` bp
JOIN `{self.dataset_address}.user_screen_names` sn ON CAST(sn.user_id as int64) = bp.user_id
WHERE bp.bot_probability >= {float(bot_min)}
GROUP BY 1,2
ORDER BY 3 desc
);
"""
return self.execute_query(sql)
def destructively_migrate_bot_followers_table(self, bot_min=0.8):
bot_min_str = str(int(bot_min * 100)) #> "80"
sql = f"""
DROP TABLE IF EXISTS `{self.dataset_address}.bot_followers_above_{bot_min_str}`;
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.bot_followers_above_{bot_min_str}` as (
SELECT
b.bot_id
,b.bot_screen_name
,uff.user_id as follower_id
,uff.screen_name as follower_screen_name
FROM `{self.dataset_address}.user_friends_flat` uff
JOIN `{self.dataset_address}.bots_above_{bot_min_str}` b ON upper(b.bot_screen_name) = upper(uff.friend_name)
);
""" # 29,861,268 rows WAT
return self.execute_query(sql)
def fetch_bot_followers_in_batches(self, bot_min=0.8):
"""
Returns a row for each bot for each user who follows them.
Params: bot_min (float) consider users with any score above this threshold as bots (uses pre-computed classification scores)
"""
bot_min_str = str(int(bot_min * 100)) #> "80"
sql = f"""
SELECT DISTINCT bot_id, follower_id
FROM `{self.dataset_address}.bot_followers_above_{bot_min_str}`
"""
return self.execute_query_in_batches(sql)
def fetch_bot_follower_lists(self, bot_min=0.8):
"""
Returns a row for each bot, with a list of aggregated follower ids.
Params: bot_min (float) consider users with any score above this threshold as bots (uses pre-computed classification scores)
"""
bot_min_str = str(int(bot_min * 100)) #> "80"
sql = f"""
SELECT bot_id, ARRAY_AGG(distinct follower_id) as follower_ids
FROM `{self.dataset_address}.bot_followers_above_{bot_min_str}`
GROUP BY 1
""" # takes 90 seconds for ~25K rows
return self.execute_query(sql)
#
# NLP (BASILICA)
#
@property
@lru_cache(maxsize=None)
def basilica_embeddings_table(self):
return self.client.get_table(f"{self.dataset_address}.basilica_embeddings") # an API call (caches results for subsequent inserts)
def upload_basilica_embeddings(self, records):
return self.insert_records_in_batches(self.basilica_embeddings_table, records)
def fetch_basilica_embedless_partitioned_statuses(self, min_val=0.0, max_val=1.0, limit=None, in_batches=False):
"""Params min_val and max_val reference partition decimal values from 0.0 to 1.0"""
sql = f"""
SELECT ps.status_id, ps.status_text
FROM `{self.dataset_address}.partitioned_statuses` ps
LEFT JOIN `{self.dataset_address}.basilica_embeddings` emb ON ps.status_id = emb.status_id
WHERE emb.status_id IS NULL
AND ps.partition_val BETWEEN {float(min_val)} AND {float(max_val)}
"""
if limit:
sql += f" LIMIT {int(limit)};"
if in_batches:
print("FETCHING STATUSES IN BATCHES...")
return self.execute_query_in_batches(sql)
else:
print("FETCHING STATUSES...")
return self.execute_query(sql)
#
# NLP (CUSTOM)
#
def fetch_labeled_tweets_in_batches(self, limit=None):
sql = f"""
SELECT
status_id
,status_text
,community_id
--,community_score
FROM `{self.dataset_address}.2_community_labeled_tweets`
"""
if limit:
sql += f" LIMIT {int(limit)}"
return self.execute_query(sql)
else:
return self.execute_query_in_batches(sql)
def fetch_unlabeled_statuses_in_batches(self, limit=None):
sql = f"""
SELECT s.status_id, s.status_text
FROM `{self.dataset_address}.statuses` s
LEFT JOIN `{self.dataset_address}.2_community_labeled_tweets` l ON l.status_id = s.status_id
WHERE l.status_id IS NULL
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
else:
return self.execute_query_in_batches(sql)
def destructively_migrate_2_community_predictions_table(self):
sql = f"""
DROP TABLE IF EXISTS `{self.dataset_address}.2_community_predictions`;
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.2_community_predictions` (
status_id INT64,
predicted_community_id INT64
);
"""
return self.execute_query(sql)
@property
@lru_cache(maxsize=None)
def community_predictions_table(self):
return self.client.get_table(f"{self.dataset_address}.2_community_predictions") # an API call (caches results for subsequent inserts)
def upload_predictions_in_batches(self, records):
return self.insert_records_in_batches(self.community_predictions_table, records)
def fetch_predictions(self, limit=None):
sql = f"""
SELECT status_id, predicted_community_id
FROM `{self.dataset_address}.2_community_predictions`
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
else:
return self.execute_query_in_batches(sql)
#
# NLP V2
#
def nlp_v2_fetch_statuses(self, limit=None):
sql = f"""
SELECT s.status_id, s.status_text
FROM `{self.dataset_address}.statuses` s
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
def nlp_v2_destructively_migrate_predictions_table(self, model_name):
if model_name.lower() == "bert":
sql = f"""
DROP TABLE IF EXISTS `{self.dataset_address}.nlp_v2_predictions_bert`;
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.nlp_v2_predictions_bert` (
status_id INT64,
logit_0 FLOAT64,
logit_1 FLOAT64,
prediction FLOAT64
);
"""
else:
sql = f"""
DROP TABLE IF EXISTS `{self.dataset_address}.nlp_v2_predictions_{model_name}`;
CREATE TABLE IF NOT EXISTS `{self.dataset_address}.nlp_v2_predictions_{model_name}` (
status_id INT64,
prediction STRING -- todo: convert this D/R label back to 0/1 "score"
);
"""
return self.execute_query(sql)
def nlp_v2_get_predictions_table(self, model_name):
return self.client.get_table(f"{self.dataset_address}.nlp_v2_predictions_{model_name}") # API call.
#
# DAILY ACTIVE FRIEND GRAPHS V4
#
def fetch_daily_statuses(self, date, limit=None):
sql = f"""
SELECT DISTINCT
t.status_id
, t.status_text
, t.created_at
, t.user_id
, UPPER(t.user_screen_name) as screen_name
,CASE WHEN bu.community_id IS NOT NULL THEN TRUE ELSE FALSE END bot
--,bu.community_id
-- ,r.tweet_count as rate
FROM `{self.dataset_address}.tweets` t
LEFT JOIN `{self.dataset_address}.2_bot_communities` bu ON bu.user_id = cast(t.user_id as int64)
WHERE EXTRACT(DATE from created_at) = '{date}'
--LIMIT 10
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
def fetch_daily_active_tweeter_statuses(self, date, tweet_min=None, limit=None):
sql = f"""
SELECT DISTINCT
t.status_id
,t.status_text
,t.created_at
,t.user_id
,UPPER(t.user_screen_name) as screen_name
,CASE WHEN bu.community_id IS NOT NULL THEN TRUE ELSE FALSE END bot
,cast(bu.community_id as int64) as community_id
,r.tweet_count as rate
FROM `{self.dataset_address}.tweets` t
LEFT JOIN `{self.dataset_address}.2_bot_communities` bu ON bu.user_id = cast(t.user_id as int64)
JOIN (
SELECT
cast(user_id as INT64) as user_id, count(distinct status_id) as tweet_count
FROM `{self.dataset_address}.tweets` t
WHERE EXTRACT(DATE from created_at) = '{date}'
GROUP BY 1
-- LIMIT 10
) r ON r.user_id = cast(t.user_id as int64)
WHERE EXTRACT(DATE from created_at) = '{date}'
"""
if tweet_min:
sql += f" AND tweet_count >= {int(tweet_min)};"
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
def fetch_daily_active_tweeter_statuses_for_model_training(self, date, tweet_min=None, limit=None):
sql = f"""
WITH daily_tweets AS (
SELECT
cast(t.user_id as int64) as user_id
,UPPER(t.user_screen_name) as screen_name
,cast(t.status_id as int64) as status_id
,t.status_text
,t.created_at
FROM `{self.dataset_address}.tweets` t
WHERE extract(date from t.created_at) = '{date}'
)
SELECT DISTINCT
t.status_id ,t.status_text ,t.created_at
,t.user_id ,t.screen_name
,CASE WHEN bu.community_id IS NOT NULL THEN TRUE ELSE FALSE END bot
,cast(bu.community_id as int64) as community_id
,r.tweet_count as rate
,st.status_count as status_text_occurrence
FROM daily_tweets t
LEFT JOIN `{self.dataset_address}.2_bot_communities` bu ON bu.user_id = t.user_id
JOIN (
SELECT
CAST(user_id as INT64) as user_id
,count(distinct status_id) as tweet_count
FROM daily_tweets t
GROUP BY 1
) r ON r.user_id = cast(t.user_id as int64)
LEFT JOIN (
SELECT
t.status_text
,count(distinct t.status_id) as status_count
FROM daily_tweets t
GROUP BY 1
) st ON st.status_text = t.status_text
"""
if tweet_min:
sql += f" AND tweet_count >= {int(tweet_min)};"
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
def fetch_daily_active_user_friends(self, date, tweet_min=None, limit=None):
sql = f"""
SELECT dau.user_id, dau.rate, uf.screen_name ,uf.friend_count, uf.friend_names
FROM (
SELECT cast(user_id as INT64) as user_id, count(distinct status_id) as rate
FROM `{self.dataset_address}.tweets` t
WHERE EXTRACT(DATE from t.created_at) = '{date}'
GROUP BY 1
) dau
JOIN `{self.dataset_address}.active_user_friends` uf ON uf.user_id = dau.user_id
"""
if tweet_min:
sql += f" WHERE dau.rate >= {int(tweet_min)};"
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
def fetch_daily_active_edge_friends(self, date, tweet_min=2, limit=None):
sql = f"""
WITH dau AS (
SELECT
cast(user_id as INT64) as user_id
,upper(user_screen_name) as screen_name
,count(distinct status_id) as rate
FROM `{self.dataset_address}.tweets`
WHERE EXTRACT(DATE FROM created_at) = '{date}'
GROUP BY 1,2
HAVING count(distinct status_id) >= {int(tweet_min)}
)
SELECT
dau.user_id
,dau.screen_name
,dau.rate
,ARRAY_AGG(DISTINCT uff.friend_name) as friend_names
,count(DISTINCT uff.friend_name) as friend_count
FROM dau
JOIN `{self.dataset_address}.user_friends_flat` uff ON cast(uff.user_id as int64) = dau.user_id
WHERE uff.friend_name in (SELECT DISTINCT screen_name FROM dau)
GROUP BY 1,2,3
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
def fetch_daily_active_edge_friends_for_csv(self, date, tweet_min=2, limit=None):
sql = f"""
WITH dau AS (
SELECT
cast(user_id as INT64) as user_id
,upper(user_screen_name) as screen_name
,count(distinct status_id) as rate
FROM `{self.dataset_address}.tweets`
WHERE EXTRACT(DATE FROM created_at) = '{date}'
GROUP BY 1,2
HAVING count(distinct status_id) >= {int(tweet_min)}
)
SELECT
dau.user_id
,dau.screen_name
,dau.rate
,STRING_AGG(DISTINCT uff.friend_name) as friend_names -- STRING AGG FOR CSV OUTPUT!
,count(DISTINCT uff.friend_name) as friend_count
FROM dau
JOIN `{self.dataset_address}.user_friends_flat` uff ON cast(uff.user_id as int64) = dau.user_id
WHERE uff.friend_name in (SELECT DISTINCT screen_name FROM dau)
GROUP BY 1,2,3
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
def fetch_daily_statuses_with_opinion_scores(self, date, limit=None):
sql = f"""
WITH daily_tweets as (
SELECT user_id ,screen_name ,status_id ,status_text ,created_at ,score_lr ,score_nb
FROM `{self.dataset_address}.nlp_v2_predictions_combined` p
WHERE extract(date from created_at) = '{date}'
AND score_lr is not null and score_nb is not null -- there are 30,000 total null lr scores. drop for now
)
SELECT
t.user_id
,t.screen_name
,CASE WHEN bu.community_id IS NOT NULL THEN TRUE ELSE FALSE END bot
,cast(bu.community_id as int64) as community_id
,r.status_count as rate
,t.status_id
,t.status_text
,st.status_count as status_text_occurrences
,t.created_at
,t.score_lr
,t.score_nb
FROM daily_tweets t
JOIN (
SELECT user_id, count(distinct status_id) as status_count
FROM daily_tweets
GROUP BY 1
) r ON r.user_id = t.user_id
LEFT JOIN (
SELECT status_text ,count(distinct status_id) as status_count
FROM daily_tweets
GROUP BY 1
) st ON st.status_text = t.status_text
LEFT JOIN `{self.dataset_address}.2_bot_communities` bu ON bu.user_id = t.user_id
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
#
# DAILY ACTIVE FRIEND GRAPHS V5
#
def fetch_daily_nodes_with_active_edges(self, date, limit=None):
sql = f"""
WITH dau AS (
SELECT
user_id
,screen_name
,count(distinct status_id) as rate
FROM `{self.dataset_address}.nlp_v2_predictions_combined` p
WHERE extract(date from created_at) = '{date}'
AND score_lr is not null and score_nb is not null -- there are 30,000 total null lr scores. drop for now
GROUP BY 1,2
)
SELECT
dau.user_id
,dau.screen_name
,dau.rate
,CASE WHEN bu.community_id IS NOT NULL THEN TRUE ELSE FALSE END bot
,cast(bu.community_id as int64) as community_id
,STRING_AGG(DISTINCT uff.friend_name) as friend_names -- STRING AGG FOR CSV OUTPUT!
,count(DISTINCT uff.friend_name) as friend_count
FROM dau
JOIN `{self.dataset_address}.user_friends_flat` uff ON cast(uff.user_id as int64) = dau.user_id
LEFT JOIN `{self.dataset_address}.2_bot_communities` bu ON bu.user_id = dau.user_id
WHERE uff.friend_name in (SELECT DISTINCT screen_name FROM dau)
GROUP BY 1,2,3,4,5
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
#
# ACTIVE FRIEND GRAPHS V6
#
#def migrate_populate_nodes_with_active_edges_v6(self, limit=None):
# sql = f"""
# WITH au AS (
# SELECT
# cast(user_id as int64) as user_id
# ,upper(user_screen_name) as screen_name
# ,count(distinct status_id) as rate
# FROM `{self.dataset_address}.tweets` t
# WHERE created_at BETWEEN '2019-12-20 00:00:00' AND '2020-02-15 23:59:59' -- inclusive (primary collection period)
# GROUP BY 1,2
# )
#
# SELECT
# au.user_id
# ,au.screen_name
# ,au.rate
# ,CASE WHEN bu.community_id IS NOT NULL THEN TRUE ELSE FALSE END bot
# ,cast(bu.community_id as int64) as community_id
# ,STRING_AGG(DISTINCT uff.friend_name) as friend_names -- STRING AGG FOR CSV OUTPUT!
# ,count(DISTINCT uff.friend_name) as friend_count
# FROM au
# JOIN `{self.dataset_address}.user_friends_flat` uff ON cast(uff.user_id as int64) = au.user_id
# LEFT JOIN `{self.dataset_address}.2_bot_communities` bu ON bu.user_id = au.user_id
# WHERE uff.friend_name in (SELECT DISTINCT screen_name FROM au)
# GROUP BY 1,2,3,4,5
# """
# if limit:
# sql += f" LIMIT {int(limit)};"
# return self.execute_query(sql)
def fetch_nodes_with_active_edges_v6(self, limit=None):
sql = f"""
SELECT user_id, screen_name, rate, bot, community_id, friend_names, friend_count
FROM`{self.dataset_address}.nodes_with_active_edges_v6`
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
def fetch_sn_nodes_with_active_edges_v7(self, limit=None):
sql = f"""
SELECT user_id, screen_name, status_count as rate, is_bot as bot, community_id, friend_names, friend_count
FROM`{self.dataset_address}.nodes_with_active_edges_v7_sn`
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
#
# BOT ANALYSIS
#
def fetch_statuses_with_tags(self, limit=None):
sql = f"""
SELECT user_id, is_bot, status_id, status_text
FROM`{self.dataset_address}.statuses_with_tags`
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
def fetch_user_details_vq(self, limit=None):
sql = f"""
SELECT
user_id ,creation_date ,screen_name_count, screen_names
,status_count, rt_count
,is_bot ,bot_community
,mean_opinion ,opinion_community
,q_status_count ,q_status_pct
FROM`{self.dataset_address}.user_details_vq`
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
def fetch_tweet_details_v6(self, limit=None):
sql = f"""
SELECT
status_id
,status_created_at
,is_rt ,rt_user_screen_name
,user_id
,screen_names ,screen_name_count
,created_on ,created_jan17 ,created_inaug
,is_bot ,is_q
,opinion_community ,mean_opinion
FROM `{self.dataset_address}.tweet_details_v6_slim`
"""
if limit:
sql += f" LIMIT {int(limit)};"
return self.execute_query(sql)
#
# API - V0
# ... ALL ENDPOINTS MUST PREVENT SQL INJECTION
def fetch_user_details_api_v0(self, screen_name="politico"):
# TODO: super-charge this with cool stuff, like mention counts, average opinion score, etc.
# TODO: create some temporary tables, to make the query faster
sql = f"""
SELECT
user_id
,user_created_at
,tweet_count
,screen_name_count
,screen_names
,user_names
,user_descriptions
FROM `{self.dataset_address}.user_details_v3`
WHERE UPPER(@screen_name) in UNNEST(SPLIT(screen_names, '|'))
LIMIT 1
"""
job_config = bigquery.QueryJobConfig(query_parameters=[bigquery.ScalarQueryParameter("screen_name", "STRING", screen_name)])
return self.client.query(sql, job_config=job_config)
def fetch_user_tweets_api_v0(self, screen_name="politico"):
# TODO: create some temporary tables maybe, to make the query faster
sql = f"""
SELECT
t.status_id
,t.status_text
,t.created_at
,p.predicted_community_id as opinion_score
FROM `{self.dataset_address}.tweets` t
LEFT JOIN `{self.dataset_address}.2_community_predictions` p ON p.status_id = cast(t.status_id as int64)
WHERE upper(t.user_screen_name) = upper(@screen_name)
"""
job_config = QueryJobConfig(query_parameters=[ScalarQueryParameter("screen_name", "STRING", screen_name)])
return self.client.query(sql, job_config=job_config)
def fetch_users_most_retweeted_api_v0(self, metric=None, limit=None):
"""
Params:
metric : whether to calculate top users based on "retweet_count" or "retweeter_count"
limit : the number of top users to return for each community (max 1,000)
"""
metric = metric or "retweet_count"
limit = limit or 25
sql = f"""
(
SELECT community_id ,retweeted_user_screen_name ,retweeter_count , retweet_count
FROM `{self.dataset_address}.community_0_users_most_retweeted`
ORDER BY @metric DESC
LIMIT @limit
)
UNION ALL
(
SELECT community_id ,retweeted_user_screen_name ,retweeter_count , retweet_count
FROM `{self.dataset_address}.community_1_users_most_retweeted`
ORDER BY @metric DESC
LIMIT @limit
)
"""
job_config = QueryJobConfig(query_parameters=[
ScalarQueryParameter("metric", "STRING", metric),
ScalarQueryParameter("limit", "INT64", int(limit)),
])
return self.client.query(sql, job_config=job_config)
def fetch_statuses_most_retweeted_api_v0(self, metric=None, limit=None):
"""
Params:
metric : whether to calculate top statuses based on "retweet_count" or "retweeter_count"
limit : the number of top statuses to return for each community (max 1,000)
"""
metric = metric or "retweet_count"
limit = limit or 25
sql = f"""
(
SELECT community_id ,retweeted_user_screen_name ,status_text ,retweeter_count , retweet_count
FROM `{self.dataset_address}.community_0_statuses_most_retweeted`
ORDER BY @metric DESC
LIMIT @limit
)
UNION ALL
(
SELECT community_id ,retweeted_user_screen_name ,status_text ,retweeter_count , retweet_count
FROM `{self.dataset_address}.community_1_statuses_most_retweeted`
ORDER BY @metric DESC
LIMIT @limit
)
"""
job_config = QueryJobConfig(query_parameters=[
ScalarQueryParameter("metric", "STRING", metric),
ScalarQueryParameter("limit", "INT64", int(limit)),
])
return self.client.query(sql, job_config=job_config)
def fetch_top_profile_tokens_api_v0(self, limit=None):
"""
Params: limit : the number of top tokens to return for each community
"""
limit = limit or 20
sql = f"""
(
SELECT 0 as community_id, token, rank, count, pct
FROM `{self.dataset_address}.2_community_0_profile_tokens`
ORDER BY rank
LIMIT @limit
)
UNION ALL
(
SELECT 1 as community_id, token, rank, count, pct
FROM `{self.dataset_address}.2_community_1_profile_tokens`
ORDER BY rank
LIMIT @limit
)
"""
job_config = QueryJobConfig(query_parameters=[ScalarQueryParameter("limit", "INT64", int(limit))])
return self.client.query(sql, job_config=job_config)
def fetch_top_profile_tags_api_v0(self, limit=None):
"""
Params: limit : the number of top tags to return for each community
"""
limit = limit or 20
sql = f"""
(
SELECT 0 as community_id, token, rank, count, pct
FROM `{self.dataset_address}.2_community_0_profile_tags`
ORDER BY rank
LIMIT @limit
)
UNION ALL
(
SELECT 1 as community_id, token, rank, count, pct
FROM `{self.dataset_address}.2_community_1_profile_tags`
ORDER BY rank
LIMIT @limit
)
"""
job_config = QueryJobConfig(query_parameters=[ScalarQueryParameter("limit", "INT64", int(limit))])
return self.client.query(sql, job_config=job_config)
def fetch_top_status_tokens_api_v0(self, limit=None):
"""
Params: limit : the number of top tokens to return for each community
"""
limit = limit or 50
sql = f"""
(
SELECT 0 as community_id, token, rank, count, pct, doc_count, doc_pct
FROM `{self.dataset_address}.2_community_0_status_tokens`
ORDER BY rank
LIMIT @limit
)
UNION ALL
(
SELECT 1 as community_id, token, rank, count, pct, doc_count, doc_pct
FROM `{self.dataset_address}.2_community_1_status_tokens`
ORDER BY rank
LIMIT @limit
)
"""
job_config = QueryJobConfig(query_parameters=[ScalarQueryParameter("limit", "INT64", int(limit))])
return self.client.query(sql, job_config=job_config)
def fetch_top_status_tags_api_v0(self, limit=None):
"""
Params: limit : the number of top tokens to return for each community
"""
limit = limit or 50
sql = f"""
(
SELECT 0 as community_id, token, rank, count, pct, doc_count, doc_pct
FROM `{self.dataset_address}.2_community_0_status_tags`
ORDER BY rank
LIMIT @limit
)
UNION ALL
(
SELECT 1 as community_id, token, rank, count, pct, doc_count, doc_pct
FROM `{self.dataset_address}.2_community_1_status_tags`
ORDER BY rank
LIMIT @limit
)
"""
job_config = QueryJobConfig(query_parameters=[ScalarQueryParameter("limit", "INT64", int(limit))])
return self.client.query(sql, job_config=job_config)
#
# API - V1
# ... ALL ENDPOINTS MUST PREVENT SQL INJECTION
def fetch_user_tweets_api_v1(self, screen_name="politico"):
sql = f"""
SELECT
status_id
,status_text
,created_at
,score_lr
,score_nb
,score_bert
FROM `{self.dataset_address}.nlp_v2_predictions_combined` p
WHERE upper(screen_name) = upper(@screen_name)
"""
job_config = QueryJobConfig(query_parameters=[ScalarQueryParameter("screen_name", "STRING", screen_name)])
return self.client.query(sql, job_config=job_config)
def fetch_users_most_followed_api_v1(self, limit=None):
limit = limit or 500 # max 1000 based on the size of the precomputed table
sql = f"""
SELECT
screen_name --, user_id, user_created_at
,status_count
,follower_count
,avg_score_lr
,avg_score_nb
,avg_score_bert
,user_category as category
FROM `{self.dataset_address}.nlp_v2_predictions_by_user_most_followed`
ORDER BY follower_count DESC
LIMIT @limit
"""
job_config = QueryJobConfig(query_parameters=[ScalarQueryParameter("limit", "INT64", int(limit))])
return self.client.query(sql, job_config=job_config)
if __name__ == "__main__":
service = BigQueryService()
print(f" CLEANUP MODE: {CLEANUP_MODE}")
if CLEANUP_MODE:
service.delete_temp_tables_older_than(days=3)
seek_confirmation()
print("--------------------")
print("FETCHED TOPICS:")
print([row.topic for row in service.fetch_topics()])
sql = f"SELECT count(distinct status_id) as tweet_count FROM `{service.dataset_address}.tweets`"
results = service.execute_query(sql)
print("--------------------")
tweet_count = list(results)[0].tweet_count
print(f"FETCHED {fmt_n(tweet_count)} TWEETS")
print("--------------------")
sql = f"SELECT count(distinct user_id) as user_count FROM `{service.dataset_address}.tweets`"
results = service.execute_query(sql)
user_count = list(results)[0].user_count
print(f"FETCHED {fmt_n(user_count)} USERS")
results = service.user_friend_collection_progress()
row = list(results)[0]
collected_count = row.user_count
pct = collected_count / user_count
#print("--------------------")
#print("USERS COLLECTED:", collected_count)
#print(" PCT COLLECTED:", f"{(pct * 100):.1f}%")
#print(" AVG DURATION:", row.avg_duration)
if collected_count > 0:
print("--------------------")
print(f"USERS WITH FRIENDS: {row.pct_friendly * 100}%")
print(" AVG FRIENDS:", round(row.avg_friends_friendly))
#print(" AVG DURATION:", row.avg_duration_friendly)
| []
| []
| [
"VERBOSE_QUERIES",
"BIGQUERY_PROJECT_NAME",
"DESTRUCTIVE_MIGRATIONS",
"GOOGLE_APPLICATION_CREDENTIALS",
"BIGQUERY_DATASET_NAME",
"CLEANUP_MODE"
]
| [] | ["VERBOSE_QUERIES", "BIGQUERY_PROJECT_NAME", "DESTRUCTIVE_MIGRATIONS", "GOOGLE_APPLICATION_CREDENTIALS", "BIGQUERY_DATASET_NAME", "CLEANUP_MODE"] | python | 6 | 0 | |
WEB(BE)/drf/drf/asgi.py | """
ASGI config for drf project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'drf.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
fluid/PaddleCV/image_classification/reader.py | import os
import math
import random
import functools
import numpy as np
import paddle
from PIL import Image, ImageEnhance
random.seed(0)
np.random.seed(0)
DATA_DIM = 224
THREAD = 8
BUF_SIZE = 102400
DATA_DIR = 'data/ILSVRC2012'
img_mean = np.array([0.485, 0.456, 0.406]).reshape((3, 1, 1))
img_std = np.array([0.229, 0.224, 0.225]).reshape((3, 1, 1))
def resize_short(img, target_size):
percent = float(target_size) / min(img.size[0], img.size[1])
resized_width = int(round(img.size[0] * percent))
resized_height = int(round(img.size[1] * percent))
img = img.resize((resized_width, resized_height), Image.LANCZOS)
return img
def crop_image(img, target_size, center):
width, height = img.size
size = target_size
if center == True:
w_start = (width - size) / 2
h_start = (height - size) / 2
else:
w_start = np.random.randint(0, width - size + 1)
h_start = np.random.randint(0, height - size + 1)
w_end = w_start + size
h_end = h_start + size
img = img.crop((w_start, h_start, w_end, h_end))
return img
def random_crop(img, size, scale=[0.08, 1.0], ratio=[3. / 4., 4. / 3.]):
aspect_ratio = math.sqrt(np.random.uniform(*ratio))
w = 1. * aspect_ratio
h = 1. / aspect_ratio
bound = min((float(img.size[0]) / img.size[1]) / (w**2),
(float(img.size[1]) / img.size[0]) / (h**2))
scale_max = min(scale[1], bound)
scale_min = min(scale[0], bound)
target_area = img.size[0] * img.size[1] * np.random.uniform(scale_min,
scale_max)
target_size = math.sqrt(target_area)
w = int(target_size * w)
h = int(target_size * h)
i = np.random.randint(0, img.size[0] - w + 1)
j = np.random.randint(0, img.size[1] - h + 1)
img = img.crop((i, j, i + w, j + h))
img = img.resize((size, size), Image.LANCZOS)
return img
def rotate_image(img):
angle = np.random.randint(-10, 11)
img = img.rotate(angle)
return img
def distort_color(img):
def random_brightness(img, lower=0.5, upper=1.5):
e = np.random.uniform(lower, upper)
return ImageEnhance.Brightness(img).enhance(e)
def random_contrast(img, lower=0.5, upper=1.5):
e = np.random.uniform(lower, upper)
return ImageEnhance.Contrast(img).enhance(e)
def random_color(img, lower=0.5, upper=1.5):
e = np.random.uniform(lower, upper)
return ImageEnhance.Color(img).enhance(e)
ops = [random_brightness, random_contrast, random_color]
np.random.shuffle(ops)
img = ops[0](img)
img = ops[1](img)
img = ops[2](img)
return img
def process_image(sample, mode, color_jitter, rotate):
img_path = sample[0]
img = Image.open(img_path)
if mode == 'train':
if rotate: img = rotate_image(img)
img = random_crop(img, DATA_DIM)
else:
img = resize_short(img, target_size=256)
img = crop_image(img, target_size=DATA_DIM, center=True)
if mode == 'train':
if color_jitter:
img = distort_color(img)
if np.random.randint(0, 2) == 1:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
if img.mode != 'RGB':
img = img.convert('RGB')
img = np.array(img).astype('float32').transpose((2, 0, 1)) / 255
img -= img_mean
img /= img_std
if mode == 'train' or mode == 'val':
return img, sample[1]
elif mode == 'test':
return [img]
def _reader_creator(file_list,
mode,
shuffle=False,
color_jitter=False,
rotate=False,
data_dir=DATA_DIR,
pass_id_as_seed=0):
def reader():
with open(file_list) as flist:
full_lines = [line.strip() for line in flist]
if shuffle:
if pass_id_as_seed:
np.random.seed(pass_id_as_seed)
np.random.shuffle(full_lines)
if mode == 'train' and os.getenv('PADDLE_TRAINING_ROLE'):
# distributed mode if the env var `PADDLE_TRAINING_ROLE` exits
trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0"))
trainer_count = int(os.getenv("PADDLE_TRAINERS_NUM", "1"))
per_node_lines = len(full_lines) // trainer_count
lines = full_lines[trainer_id * per_node_lines:(trainer_id + 1)
* per_node_lines]
print(
"read images from %d, length: %d, lines length: %d, total: %d"
% (trainer_id * per_node_lines, per_node_lines, len(lines),
len(full_lines)))
else:
lines = full_lines
for line in lines:
if mode == 'train' or mode == 'val':
img_path, label = line.split()
#img_path = img_path.replace("JPEG", "jpeg")
img_path = os.path.join(data_dir, img_path)
yield img_path, int(label)
elif mode == 'test':
img_path, label = line.split()
#img_path = img_path.replace("JPEG", "jpeg")
img_path = os.path.join(data_dir, img_path)
yield [img_path]
mapper = functools.partial(
process_image, mode=mode, color_jitter=color_jitter, rotate=rotate)
return paddle.reader.xmap_readers(mapper, reader, THREAD, BUF_SIZE)
def train(data_dir=DATA_DIR, pass_id_as_seed=0):
file_list = os.path.join(data_dir, 'train_list.txt')
return _reader_creator(
file_list,
'train',
shuffle=True,
color_jitter=False,
rotate=False,
data_dir=data_dir,
pass_id_as_seed=pass_id_as_seed)
def val(data_dir=DATA_DIR):
file_list = os.path.join(data_dir, 'val_list.txt')
return _reader_creator(file_list, 'val', shuffle=False, data_dir=data_dir)
def test(data_dir=DATA_DIR):
file_list = os.path.join(data_dir, 'val_list.txt')
return _reader_creator(file_list, 'test', shuffle=False, data_dir=data_dir)
| []
| []
| [
"PADDLE_TRAINER_ID",
"PADDLE_TRAINING_ROLE",
"PADDLE_TRAINERS_NUM"
]
| [] | ["PADDLE_TRAINER_ID", "PADDLE_TRAINING_ROLE", "PADDLE_TRAINERS_NUM"] | python | 3 | 0 | |
python/ray/tests/test_tempfile.py | import os
import shutil
import sys
import time
import pytest
import ray
from ray.test_utils import check_call_ray, wait_for_condition
def unix_socket_create_path(name):
unix = sys.platform != "win32"
return os.path.join(ray._private.utils.get_user_temp_dir(),
name) if unix else None
def unix_socket_verify(unix_socket):
if sys.platform != "win32":
assert os.path.exists(unix_socket), "Socket not found: " + unix_socket
def unix_socket_delete(unix_socket):
unix = sys.platform != "win32"
return os.remove(unix_socket) if unix else None
def test_tempdir(shutdown_only):
shutil.rmtree(ray._private.utils.get_ray_temp_dir(), ignore_errors=True)
ray.init(
_temp_dir=os.path.join(ray._private.utils.get_user_temp_dir(),
"i_am_a_temp_dir"))
assert os.path.exists(
os.path.join(ray._private.utils.get_user_temp_dir(),
"i_am_a_temp_dir")), "Specified temp dir not found."
assert not os.path.exists(ray._private.utils.get_ray_temp_dir()), (
"Default temp dir should not exist.")
shutil.rmtree(
os.path.join(ray._private.utils.get_user_temp_dir(),
"i_am_a_temp_dir"),
ignore_errors=True)
def test_tempdir_commandline():
shutil.rmtree(ray._private.utils.get_ray_temp_dir(), ignore_errors=True)
check_call_ray([
"start", "--head", "--temp-dir=" + os.path.join(
ray._private.utils.get_user_temp_dir(), "i_am_a_temp_dir2"),
"--port", "0"
])
assert os.path.exists(
os.path.join(ray._private.utils.get_user_temp_dir(),
"i_am_a_temp_dir2")), "Specified temp dir not found."
assert not os.path.exists(ray._private.utils.get_ray_temp_dir()
), "Default temp dir should not exist."
check_call_ray(["stop"])
shutil.rmtree(
os.path.join(ray._private.utils.get_user_temp_dir(),
"i_am_a_temp_dir2"),
ignore_errors=True)
def test_tempdir_long_path():
if sys.platform != "win32":
# Test AF_UNIX limits for sockaddr_un->sun_path on POSIX OSes
maxlen = 104 if sys.platform.startswith("darwin") else 108
temp_dir = os.path.join(ray._private.utils.get_user_temp_dir(),
"z" * maxlen)
with pytest.raises(OSError):
ray.init(_temp_dir=temp_dir) # path should be too long
def test_raylet_tempfiles(shutdown_only):
expected_socket_files = ({"plasma_store", "raylet"}
if sys.platform != "win32" else set())
ray.init(num_cpus=0)
node = ray.worker._global_node
top_levels = set(os.listdir(node.get_session_dir_path()))
assert top_levels.issuperset({"sockets", "logs"})
log_files = set(os.listdir(node.get_logs_dir_path()))
log_files_expected = {
"log_monitor.log", "plasma_store.out", "plasma_store.err",
"monitor.log", "redis-shard_0.out", "redis-shard_0.err", "redis.out",
"redis.err", "raylet.out", "raylet.err", "gcs_server.out",
"gcs_server.err", "dashboard.log", "dashboard_agent.log"
}
def check_all_log_file_exists():
for expected in log_files_expected:
log_files = set(os.listdir(node.get_logs_dir_path()))
if expected not in log_files:
return False
return True
wait_for_condition(check_all_log_file_exists)
# Get the list of log files again since the previous one
# might have the stale information.
log_files = set(os.listdir(node.get_logs_dir_path()))
assert log_files_expected.issubset(log_files)
assert log_files.issuperset(log_files_expected)
socket_files = set(os.listdir(node.get_sockets_dir_path()))
assert socket_files == expected_socket_files
ray.shutdown()
ray.init(num_cpus=2)
node = ray.worker._global_node
top_levels = set(os.listdir(node.get_session_dir_path()))
assert top_levels.issuperset({"sockets", "logs"})
time.sleep(3) # wait workers to start
log_files = set(os.listdir(node.get_logs_dir_path()))
assert log_files.issuperset(log_files_expected)
# Check numbers of worker log file.
assert sum(
1 for filename in log_files if filename.startswith("worker")) == 4
socket_files = set(os.listdir(node.get_sockets_dir_path()))
assert socket_files == expected_socket_files
def test_tempdir_privilege(shutdown_only):
os.chmod(ray._private.utils.get_ray_temp_dir(), 0o000)
ray.init(num_cpus=1)
session_dir = ray.worker._global_node.get_session_dir_path()
assert os.path.exists(session_dir), "Specified socket path not found."
def test_session_dir_uniqueness():
session_dirs = set()
for i in range(2):
ray.init(num_cpus=1)
session_dirs.add(ray.worker._global_node.get_session_dir_path)
ray.shutdown()
assert len(session_dirs) == 2
if __name__ == "__main__":
# Make subprocess happy in bazel.
os.environ["LC_ALL"] = "en_US.UTF-8"
os.environ["LANG"] = "en_US.UTF-8"
sys.exit(pytest.main(["-v", __file__]))
| []
| []
| [
"LC_ALL",
"LANG"
]
| [] | ["LC_ALL", "LANG"] | python | 2 | 0 | |
server/models/user.go | // Copyright 2020 Paingha Joe Alagoa. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package models
import (
"context"
"encoding/base64"
"fmt"
"os"
"time"
"bitbucket.com/church/plugins"
"bitbucket.com/church/protos/email"
"bitbucket.com/church/server/config"
"bitbucket.com/church/server/dialer"
"bitbucket.com/church/server/security"
"bitbucket.com/church/utils"
"github.com/dgrijalva/jwt-go"
"github.com/jinzhu/copier"
//Needed for postgres
_ "github.com/jinzhu/gorm/dialects/postgres"
)
//User - user data struct
type User struct {
ID int32 `json:"id,omitempty" sql:"primary_key"`
IsAdmin bool `gorm:"default:false" json:"isAdmin"`
FirstName string `gorm:"not null" json:"firstName"`
LastName string `gorm:"not null" json:"lastName"`
Email string `gorm:"unique;not null" json:"email"`
PhoneNumber string `json:"phoneNumber"`
Password string `gorm:"not null" json:"password"`
EmailVerified bool `gorm:"default:false" json:"emailVerified"`
VerifyCode string `json:"verifyCode"`
PhoneVerified bool `gorm:"default:false" json:"phoneVerified"`
PhoneVerifyCode string `json:"phoneVerifyCode"`
PhoneVerifySentAt time.Time `json:"phone_verify_sent_at"`
PhoneVerifyExpiresAt time.Time `json:"phone_verify_expires_at"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
DeletedAt *time.Time `json:"deleted_at"`
}
//EmailParam - email service sending structure
type EmailParam struct {
Template string `json:"template"`
To string `json:"to"`
Subject string `json:"subject"`
BodyParam map[string]string `json:"body_param"`
}
//Message - Whatsapp message structure
type Message struct {
Content string
To string
Medium string
}
//TableName - table name in database
func (u *User) TableName() string {
return "users"
}
//SendMail - gRPC client that sends email message to email service
func SendMail(emailData EmailParam) error {
conn, err := dialer.Dial(":9001")
if err != nil {
plugins.LogError("gRPC Server internal Client", "did not connect", err)
return err
}
c := email.NewEmailClient(conn)
var data email.SendEmailRequest
copier.Copy(&data, emailData)
if _, err := c.SendEmail(context.Background(), &data); err != nil {
plugins.LogWarning("gRPC Server internal Client", "Error when calling Send Email", err)
return err
}
return nil
}
//GetAllUsers - fetch all users at once
func GetAllUsers(offset int32, limit int32) ([]User, int32, error) {
var user []User
var count int32
if err := config.DB.Model(&User{}).Count(&count).Order("created_at desc").Offset(offset).Limit(limit).Find(&user).Error; err != nil {
return user, count, err
}
return user, count, nil
}
//CreateUser - create a user
func CreateUser(user *User) (User, error) {
var dbUser User
if err := config.DB.Where("email = ?", user.Email).First(&dbUser).Error; err != nil {
if err.Error() == "record not found" {
user.VerifyCode = utils.GenerateRandomString(30)
user.Password = security.HashSaltPassword([]byte(user.Password))
//capitalize the first letter of the User's first and last name
user.FirstName = utils.UppercaseName(user.FirstName)
user.LastName = utils.UppercaseName(user.LastName)
if errs := config.DB.Create(user).First(&dbUser).Error; errs != nil {
return dbUser, errs
}
baseURL := os.Getenv("ENV_BASE_URL")
emailBody := map[string]string{
"first_name": user.FirstName,
"last_name": user.LastName,
"link": fmt.Sprintf("%s/user/0/verify-email?token=%s", baseURL, base64.StdEncoding.EncodeToString([]byte(user.VerifyCode))),
}
emailInfo := EmailParam{
To: user.Email,
Subject: "Verify your email",
BodyParam: emailBody,
Template: "TemplateVerifyEmail",
}
if err := SendMail(emailInfo); err != nil {
return dbUser, err
}
return dbUser, nil
}
return dbUser, err
}
return dbUser, nil
}
//LoginUser - fetch one user
func LoginUser(user *User) (User, string, error) {
var dbUser User
jwtSecretByte := []byte(os.Getenv("JWT_SECRET"))
expiresAt := time.Now().Add(1200 * time.Minute)
if err := config.DB.Model(&user).Where(&User{Email: user.Email}).First(&dbUser).Error; err != nil {
return User{}, "", err
}
//compare db password hash and password provided
resp := security.VerifyHash([]byte(dbUser.Password), []byte(user.Password))
if !resp {
return User{}, "", nil
}
fmt.Println("--------------------------------------")
fmt.Println(dbUser.IsAdmin)
fmt.Println("--------------------------------------")
claims := &security.Claims{
UserID: dbUser.ID,
IsAdmin: dbUser.IsAdmin,
StandardClaims: jwt.StandardClaims{
// In JWT, the expiry time is expressed as unix milliseconds
ExpiresAt: expiresAt.Unix(),
},
}
// Declare the token with the algorithm used for signing, and the claims
tokens := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
// Create the JWT string
tokenString, errs := tokens.SignedString(jwtSecretByte)
if errs != nil {
return User{}, "", errs
}
return dbUser, tokenString, nil
}
//GetUser - fetch one user
func GetUser(id int32) (User, error) {
var user User
if err := config.DB.Where("id = ?", id).First(&user).Error; err != nil {
return user, err
}
return user, nil
}
//VerifyEmailUser - verify user's email
func VerifyEmailUser(token string) error {
if err := config.DB.Model(&User{}).Where(&User{VerifyCode: token}).Updates(map[string]interface{}{"email_verified": true, "verify_code": ""}).Error; err != nil {
return err
}
return nil
}
//SendVerifyPhoneUser - send verification code to user's phone number
func SendVerifyPhoneUser(user *User, id int32, code string, medium string) error {
current := time.Now()
future := current.Add(time.Minute * 30) //expires after 30 minutes of being sent
if err := config.DB.Model(&user).Where(&User{ID: id}).Updates(map[string]interface{}{"phone_number": user.PhoneNumber, "phone_verify_sent_at": current, "phone_verify_expires_at": future, "phone_verify_code": code}).Error; err != nil {
return err
}
/*
message := Message{
Content: "SMS Verification Code: " + code,
To: user.PhoneNumber,
Medium: medium,
}
errs := stuff.SmsService.Send("sms", message) //Sends to sms message queue
if errs != nil {
return errs
}
*/
return nil
}
//VerifyPhoneUser - verifies the verify code and expiry time and then sets phone_verified to true
func VerifyPhoneUser(user *User, id int32, token string) (bool, error) {
var dbUser User
current := time.Now()
if err := config.DB.Where("id = ?", id).First(&dbUser).Error; err != nil {
return false, err
}
if current.Before(dbUser.PhoneVerifyExpiresAt) && token == dbUser.PhoneVerifyCode {
if errs := config.DB.Model(&user).Where(&User{PhoneVerifyCode: token}).Updates(map[string]interface{}{"phone_verified": true, "phone_verify_code": ""}).Error; errs != nil {
return false, errs
}
return true, nil
}
return false, nil
}
//UpdateUser - update a user
func UpdateUser(user *User, id int32) error {
if err := config.DB.Model(&user).Where("id = ?", id).Omit("id", "is_admin", "email_verified", "password", "verify_code", "phone_verified", "phone_verify_code", "created_at", "updated_at", "deleted_at", "phone_verify_sent_at", "phone_verify_expires_at").Updates(user).Error; err != nil {
return err
}
return nil
}
//DeleteUser - delete a user
func DeleteUser(id int32) error {
if err := config.DB.Where("id = ?", id).Unscoped().Delete(User{}).Error; err != nil {
return err
}
return nil
}
//ForgotUser - sends a forgot password email to a user
func ForgotUser(user *User) (bool, error) {
var dbUser User
if err := config.DB.Where("email = ?", user.Email).First(&dbUser).Error; err != nil {
return false, err
}
//Send Verification email to Rabbitmq
jwtSecretByte := []byte(os.Getenv("JWT_SECRET"))
expiresAt := time.Now().Add(30 * time.Minute)
//Create a jwt token here
claims := &security.Claims{
UserID: dbUser.ID,
IsAdmin: dbUser.IsAdmin,
StandardClaims: jwt.StandardClaims{
// In JWT, the expiry time is expressed as unix milliseconds
ExpiresAt: expiresAt.Unix(),
},
}
// Declare the token with the algorithm used for signing, and the claims
tokens := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
tokenString, errs := tokens.SignedString(jwtSecretByte)
if errs != nil {
return false, errs
}
baseURL := os.Getenv("ENV_BASE_URL")
emailBody := map[string]string{
"first_name": dbUser.FirstName,
"last_name": dbUser.LastName,
"link": fmt.Sprintf("%s/user/0/forgot-password?token=%s", baseURL, base64.StdEncoding.EncodeToString([]byte(tokenString))),
}
emailInfo := EmailParam{
To: user.Email,
Subject: "Password Reset",
BodyParam: emailBody,
Template: "TemplateResetEmail",
}
if err := SendMail(emailInfo); err != nil {
return false, err
}
return true, nil
}
| [
"\"ENV_BASE_URL\"",
"\"JWT_SECRET\"",
"\"JWT_SECRET\"",
"\"ENV_BASE_URL\""
]
| []
| [
"JWT_SECRET",
"ENV_BASE_URL"
]
| [] | ["JWT_SECRET", "ENV_BASE_URL"] | go | 2 | 0 | |
orderer/common/server/main.go | /*
Copyright IBM Corp. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package server
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"net"
"net/http"
_ "net/http/pprof" // This is essentially the main package for the orderer
"os"
"os/signal"
"syscall"
"time"
"github.com/golang/protobuf/proto"
"github.com/hyperledger/fabric-lib-go/healthz"
"github.com/hyperledger/fabric/common/channelconfig"
"github.com/hyperledger/fabric/common/crypto"
"github.com/hyperledger/fabric/common/flogging"
floggingmetrics "github.com/hyperledger/fabric/common/flogging/metrics"
"github.com/hyperledger/fabric/common/grpclogging"
"github.com/hyperledger/fabric/common/grpcmetrics"
"github.com/hyperledger/fabric/common/ledger/blockledger"
"github.com/hyperledger/fabric/common/localmsp"
"github.com/hyperledger/fabric/common/metrics"
"github.com/hyperledger/fabric/common/metrics/disabled"
"github.com/hyperledger/fabric/common/tools/configtxgen/encoder"
genesisconfig "github.com/hyperledger/fabric/common/tools/configtxgen/localconfig"
"github.com/hyperledger/fabric/common/tools/protolator"
"github.com/hyperledger/fabric/common/util"
"github.com/hyperledger/fabric/core/comm"
"github.com/hyperledger/fabric/core/operations"
"github.com/hyperledger/fabric/msp"
mspmgmt "github.com/hyperledger/fabric/msp/mgmt"
"github.com/hyperledger/fabric/orderer/common/bootstrap/file"
"github.com/hyperledger/fabric/orderer/common/cluster"
"github.com/hyperledger/fabric/orderer/common/localconfig"
"github.com/hyperledger/fabric/orderer/common/metadata"
"github.com/hyperledger/fabric/orderer/common/multichannel"
"github.com/hyperledger/fabric/orderer/consensus"
"github.com/hyperledger/fabric/orderer/consensus/etcdraft"
"github.com/hyperledger/fabric/orderer/consensus/kafka"
"github.com/hyperledger/fabric/orderer/consensus/solo"
cb "github.com/hyperledger/fabric/protos/common"
ab "github.com/hyperledger/fabric/protos/orderer"
"github.com/hyperledger/fabric/protos/utils"
"go.uber.org/zap/zapcore"
"google.golang.org/grpc"
"gopkg.in/alecthomas/kingpin.v2"
)
var logger = flogging.MustGetLogger("orderer.common.server")
// command line flags
var (
app = kingpin.New("orderer", "Hyperledger Fabric orderer node")
start = app.Command("start", "Start the orderer node").Default()
version = app.Command("version", "Show version information")
benchmark = app.Command("benchmark", "Run orderer in benchmark mode")
clusterTypes = map[string]struct{}{"etcdraft": {}}
)
// Main is the entry point of orderer process
func Main() {
fullCmd := kingpin.MustParse(app.Parse(os.Args[1:]))
// "version" command
if fullCmd == version.FullCommand() {
fmt.Println(metadata.GetVersionInfo())
return
}
conf, err := localconfig.Load()
if err != nil {
logger.Error("failed to parse config: ", err)
os.Exit(1)
}
initializeLogging()
initializeLocalMsp(conf)
prettyPrintStruct(conf)
Start(fullCmd, conf)
}
// Start provides a layer of abstraction for benchmark test
func Start(cmd string, conf *localconfig.TopLevel) {
bootstrapBlock := extractBootstrapBlock(conf)
if err := ValidateBootstrapBlock(bootstrapBlock); err != nil {
logger.Panicf("Failed validating bootstrap block: %v", err)
}
opsSystem := newOperationsSystem(conf.Operations, conf.Metrics)
err := opsSystem.Start()
if err != nil {
logger.Panicf("failed to initialize operations subsystem: %s", err)
}
defer opsSystem.Stop()
metricsProvider := opsSystem.Provider
lf, _ := createLedgerFactory(conf, metricsProvider)
sysChanLastConfigBlock := extractSysChanLastConfig(lf, bootstrapBlock)
clusterBootBlock := selectClusterBootBlock(bootstrapBlock, sysChanLastConfigBlock)
clusterType := isClusterType(clusterBootBlock)
signer := localmsp.NewSigner()
clusterClientConfig := initializeClusterClientConfig(conf, clusterType, bootstrapBlock)
clusterDialer := &cluster.PredicateDialer{
ClientConfig: clusterClientConfig,
}
r := createReplicator(lf, bootstrapBlock, conf, clusterClientConfig.SecOpts, signer)
// Only clusters that are equipped with a recent config block can replicate.
if clusterType && conf.General.GenesisMethod == "file" {
r.replicateIfNeeded(bootstrapBlock)
}
logObserver := floggingmetrics.NewObserver(metricsProvider)
flogging.Global.SetObserver(logObserver)
serverConfig := initializeServerConfig(conf, metricsProvider)
grpcServer := initializeGrpcServer(conf, serverConfig)
caSupport := &comm.CredentialSupport{
AppRootCAsByChain: make(map[string]comm.CertificateBundle),
OrdererRootCAsByChainAndOrg: make(comm.OrgRootCAs),
ClientRootCAs: serverConfig.SecOpts.ClientRootCAs,
}
clusterServerConfig := serverConfig
clusterGRPCServer := grpcServer
if clusterType {
clusterServerConfig, clusterGRPCServer = configureClusterListener(conf, serverConfig, grpcServer, ioutil.ReadFile)
}
var servers = []*comm.GRPCServer{grpcServer}
// If we have a separate gRPC server for the cluster, we need to update its TLS
// CA certificate pool too.
if clusterGRPCServer != grpcServer {
servers = append(servers, clusterGRPCServer)
}
tlsCallback := func(bundle *channelconfig.Bundle) {
// only need to do this if mutual TLS is required or if the orderer node is part of a cluster
if grpcServer.MutualTLSRequired() || clusterType {
logger.Debug("Executing callback to update root CAs")
updateTrustedRoots(caSupport, bundle, servers...)
if clusterType {
updateClusterDialer(caSupport, clusterDialer, clusterClientConfig.SecOpts.ServerRootCAs)
}
}
}
manager := initializeMultichannelRegistrar(clusterBootBlock, r, clusterDialer, clusterServerConfig, clusterGRPCServer, conf, signer, metricsProvider, opsSystem, lf, tlsCallback)
mutualTLS := serverConfig.SecOpts.UseTLS && serverConfig.SecOpts.RequireClientCert
expiration := conf.General.Authentication.NoExpirationChecks
server := NewServer(manager, metricsProvider, &conf.Debug, conf.General.Authentication.TimeWindow, mutualTLS, expiration)
logger.Infof("Starting %s", metadata.GetVersionInfo())
go handleSignals(addPlatformSignals(map[os.Signal]func(){
syscall.SIGTERM: func() {
grpcServer.Stop()
if clusterGRPCServer != grpcServer {
clusterGRPCServer.Stop()
}
},
}))
if clusterGRPCServer != grpcServer {
logger.Info("Starting cluster listener on", clusterGRPCServer.Address())
go clusterGRPCServer.Start()
}
initializeProfilingService(conf)
ab.RegisterAtomicBroadcastServer(grpcServer.Server(), server)
logger.Info("Beginning to serve requests")
grpcServer.Start()
}
// Extract system channel last config block
func extractSysChanLastConfig(lf blockledger.Factory, bootstrapBlock *cb.Block) *cb.Block {
// Are we bootstrapping?
chainCount := len(lf.ChainIDs())
if chainCount == 0 {
logger.Info("Bootstrapping because no existing channels")
return nil
}
logger.Infof("Not bootstrapping because of %d existing channels", chainCount)
systemChannelName, err := utils.GetChainIDFromBlock(bootstrapBlock)
if err != nil {
logger.Panicf("Failed extracting system channel name from bootstrap block: %v", err)
}
systemChannelLedger, err := lf.GetOrCreate(systemChannelName)
if err != nil {
logger.Panicf("Failed getting system channel ledger: %v", err)
}
height := systemChannelLedger.Height()
lastConfigBlock := multichannel.ConfigBlock(systemChannelLedger)
logger.Infof("System channel: name=%s, height=%d, last config block number=%d",
systemChannelName, height, lastConfigBlock.Header.Number)
return lastConfigBlock
}
// Select cluster boot block
func selectClusterBootBlock(bootstrapBlock, sysChanLastConfig *cb.Block) *cb.Block {
if sysChanLastConfig == nil {
logger.Debug("Selected bootstrap block, because system channel last config block is nil")
return bootstrapBlock
}
if sysChanLastConfig.Header.Number > bootstrapBlock.Header.Number {
logger.Infof("Cluster boot block is system channel last config block; Blocks Header.Number system-channel=%d, bootstrap=%d",
sysChanLastConfig.Header.Number, bootstrapBlock.Header.Number)
return sysChanLastConfig
}
logger.Infof("Cluster boot block is bootstrap (genesis) block; Blocks Header.Number system-channel=%d, bootstrap=%d",
sysChanLastConfig.Header.Number, bootstrapBlock.Header.Number)
return bootstrapBlock
}
func createReplicator(
lf blockledger.Factory,
bootstrapBlock *cb.Block,
conf *localconfig.TopLevel,
secOpts *comm.SecureOptions,
signer crypto.LocalSigner,
) *replicationInitiator {
logger := flogging.MustGetLogger("orderer.common.cluster")
vl := &verifierLoader{
verifierFactory: &cluster.BlockVerifierAssembler{Logger: logger},
onFailure: func(block *cb.Block) {
protolator.DeepMarshalJSON(os.Stdout, block)
},
ledgerFactory: lf,
logger: logger,
}
systemChannelName, err := utils.GetChainIDFromBlock(bootstrapBlock)
if err != nil {
logger.Panicf("Failed extracting system channel name from bootstrap block: %v", err)
}
// System channel is not verified because we trust the bootstrap block
// and use backward hash chain verification.
verifiersByChannel := vl.loadVerifiers()
verifiersByChannel[systemChannelName] = &cluster.NoopBlockVerifier{}
vr := &cluster.VerificationRegistry{
LoadVerifier: vl.loadVerifier,
Logger: logger,
VerifiersByChannel: verifiersByChannel,
VerifierFactory: &cluster.BlockVerifierAssembler{Logger: logger},
}
ledgerFactory := &ledgerFactory{
Factory: lf,
onBlockCommit: vr.BlockCommitted,
}
return &replicationInitiator{
registerChain: vr.RegisterVerifier,
verifierRetriever: vr,
logger: logger,
secOpts: secOpts,
conf: conf,
lf: ledgerFactory,
signer: signer,
}
}
func initializeLogging() {
loggingSpec := os.Getenv("FABRIC_LOGGING_SPEC")
loggingFormat := os.Getenv("FABRIC_LOGGING_FORMAT")
flogging.Init(flogging.Config{
Format: loggingFormat,
Writer: os.Stderr,
LogSpec: loggingSpec,
})
}
// Start the profiling service if enabled.
func initializeProfilingService(conf *localconfig.TopLevel) {
if conf.General.Profile.Enabled {
go func() {
logger.Info("Starting Go pprof profiling service on:", conf.General.Profile.Address)
// The ListenAndServe() call does not return unless an error occurs.
logger.Panic("Go pprof service failed:", http.ListenAndServe(conf.General.Profile.Address, nil))
}()
}
}
func handleSignals(handlers map[os.Signal]func()) {
var signals []os.Signal
for sig := range handlers {
signals = append(signals, sig)
}
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, signals...)
for sig := range signalChan {
logger.Infof("Received signal: %d (%s)", sig, sig)
handlers[sig]()
}
}
type loadPEMFunc func(string) ([]byte, error)
// configureClusterListener gets a ServerConfig and a GRPCServer, and:
// 1) If the TopLevel configuration states that the cluster configuration for the cluster gRPC service is missing, returns them back.
// 2) Else, returns a new ServerConfig and a new gRPC server (with its own TLS listener on a different port).
func configureClusterListener(conf *localconfig.TopLevel, generalConf comm.ServerConfig, generalSrv *comm.GRPCServer, loadPEM loadPEMFunc) (comm.ServerConfig, *comm.GRPCServer) {
clusterConf := conf.General.Cluster
// If listen address is not configured, or the TLS certificate isn't configured,
// it means we use the general listener of the node.
if clusterConf.ListenPort == 0 && clusterConf.ServerCertificate == "" && clusterConf.ListenAddress == "" && clusterConf.ServerPrivateKey == "" {
logger.Info("Cluster listener is not configured, defaulting to use the general listener on port", conf.General.ListenPort)
return generalConf, generalSrv
}
// Else, one of the above is defined, so all 4 properties should be defined.
if clusterConf.ListenPort == 0 || clusterConf.ServerCertificate == "" || clusterConf.ListenAddress == "" || clusterConf.ServerPrivateKey == "" {
logger.Panic("Options: General.Cluster.ListenPort, General.Cluster.ListenAddress, General.Cluster.ServerCertificate," +
" General.Cluster.ServerPrivateKey, should be defined altogether.")
}
cert, err := loadPEM(clusterConf.ServerCertificate)
if err != nil {
logger.Panicf("Failed to load cluster server certificate from '%s' (%s)", clusterConf.ServerCertificate, err)
}
key, err := loadPEM(clusterConf.ServerPrivateKey)
if err != nil {
logger.Panicf("Failed to load cluster server key from '%s' (%s)", clusterConf.ServerPrivateKey, err)
}
port := fmt.Sprintf("%d", clusterConf.ListenPort)
bindAddr := net.JoinHostPort(clusterConf.ListenAddress, port)
var clientRootCAs [][]byte
for _, serverRoot := range conf.General.Cluster.RootCAs {
rootCACert, err := loadPEM(serverRoot)
if err != nil {
logger.Panicf("Failed to load CA cert file '%s' (%s)",
err, serverRoot)
}
clientRootCAs = append(clientRootCAs, rootCACert)
}
serverConf := comm.ServerConfig{
StreamInterceptors: generalConf.StreamInterceptors,
UnaryInterceptors: generalConf.UnaryInterceptors,
ConnectionTimeout: generalConf.ConnectionTimeout,
MetricsProvider: generalConf.MetricsProvider,
Logger: generalConf.Logger,
KaOpts: generalConf.KaOpts,
SecOpts: &comm.SecureOptions{
TimeShift: conf.General.Cluster.TLSHandshakeTimeShift,
CipherSuites: comm.DefaultTLSCipherSuites,
ClientRootCAs: clientRootCAs,
RequireClientCert: true,
Certificate: cert,
UseTLS: true,
Key: key,
},
}
srv, err := comm.NewGRPCServer(bindAddr, serverConf)
if err != nil {
logger.Panicf("Failed creating gRPC server on %s:%d due to %v", clusterConf.ListenAddress, clusterConf.ListenPort, err)
}
return serverConf, srv
}
func initializeClusterClientConfig(conf *localconfig.TopLevel, clusterType bool, bootstrapBlock *cb.Block) comm.ClientConfig {
if clusterType && !conf.General.TLS.Enabled {
logger.Panicf("TLS is required for running ordering nodes of type %s.", consensusType(bootstrapBlock))
}
cc := comm.ClientConfig{
AsyncConnect: true,
KaOpts: comm.DefaultKeepaliveOptions,
Timeout: conf.General.Cluster.DialTimeout,
SecOpts: &comm.SecureOptions{},
}
if (!conf.General.TLS.Enabled) || conf.General.Cluster.ClientCertificate == "" {
return cc
}
certFile := conf.General.Cluster.ClientCertificate
certBytes, err := ioutil.ReadFile(certFile)
if err != nil {
logger.Fatalf("Failed to load client TLS certificate file '%s' (%s)", certFile, err)
}
keyFile := conf.General.Cluster.ClientPrivateKey
keyBytes, err := ioutil.ReadFile(keyFile)
if err != nil {
logger.Fatalf("Failed to load client TLS key file '%s' (%s)", keyFile, err)
}
var serverRootCAs [][]byte
for _, serverRoot := range conf.General.Cluster.RootCAs {
rootCACert, err := ioutil.ReadFile(serverRoot)
if err != nil {
logger.Fatalf("Failed to load ServerRootCAs file '%s' (%s)",
err, serverRoot)
}
serverRootCAs = append(serverRootCAs, rootCACert)
}
cc.SecOpts = &comm.SecureOptions{
TimeShift: conf.General.Cluster.TLSHandshakeTimeShift,
RequireClientCert: true,
CipherSuites: comm.DefaultTLSCipherSuites,
ServerRootCAs: serverRootCAs,
Certificate: certBytes,
Key: keyBytes,
UseTLS: true,
}
return cc
}
func initializeServerConfig(conf *localconfig.TopLevel, metricsProvider metrics.Provider) comm.ServerConfig {
// secure server config
secureOpts := &comm.SecureOptions{
UseTLS: conf.General.TLS.Enabled,
RequireClientCert: conf.General.TLS.ClientAuthRequired,
}
// check to see if TLS is enabled
if secureOpts.UseTLS {
msg := "TLS"
// load crypto material from files
serverCertificate, err := ioutil.ReadFile(conf.General.TLS.Certificate)
if err != nil {
logger.Fatalf("Failed to load server Certificate file '%s' (%s)",
conf.General.TLS.Certificate, err)
}
serverKey, err := ioutil.ReadFile(conf.General.TLS.PrivateKey)
if err != nil {
logger.Fatalf("Failed to load PrivateKey file '%s' (%s)",
conf.General.TLS.PrivateKey, err)
}
var serverRootCAs, clientRootCAs [][]byte
for _, serverRoot := range conf.General.TLS.RootCAs {
root, err := ioutil.ReadFile(serverRoot)
if err != nil {
logger.Fatalf("Failed to load ServerRootCAs file '%s' (%s)",
err, serverRoot)
}
serverRootCAs = append(serverRootCAs, root)
}
if secureOpts.RequireClientCert {
for _, clientRoot := range conf.General.TLS.ClientRootCAs {
root, err := ioutil.ReadFile(clientRoot)
if err != nil {
logger.Fatalf("Failed to load ClientRootCAs file '%s' (%s)",
err, clientRoot)
}
clientRootCAs = append(clientRootCAs, root)
}
msg = "mutual TLS"
}
secureOpts.Key = serverKey
secureOpts.Certificate = serverCertificate
secureOpts.ClientRootCAs = clientRootCAs
logger.Infof("Starting orderer with %s enabled", msg)
}
kaOpts := comm.DefaultKeepaliveOptions
// keepalive settings
// ServerMinInterval must be greater than 0
if conf.General.Keepalive.ServerMinInterval > time.Duration(0) {
kaOpts.ServerMinInterval = conf.General.Keepalive.ServerMinInterval
}
kaOpts.ServerInterval = conf.General.Keepalive.ServerInterval
kaOpts.ServerTimeout = conf.General.Keepalive.ServerTimeout
commLogger := flogging.MustGetLogger("core.comm").With("server", "Orderer")
if metricsProvider == nil {
metricsProvider = &disabled.Provider{}
}
return comm.ServerConfig{
SecOpts: secureOpts,
KaOpts: kaOpts,
Logger: commLogger,
MetricsProvider: metricsProvider,
ConnectionTimeout: conf.General.ConnectionTimeout,
StreamInterceptors: []grpc.StreamServerInterceptor{
grpcmetrics.StreamServerInterceptor(grpcmetrics.NewStreamMetrics(metricsProvider)),
grpclogging.StreamServerInterceptor(flogging.MustGetLogger("comm.grpc.server").Zap()),
},
UnaryInterceptors: []grpc.UnaryServerInterceptor{
grpcmetrics.UnaryServerInterceptor(grpcmetrics.NewUnaryMetrics(metricsProvider)),
grpclogging.UnaryServerInterceptor(
flogging.MustGetLogger("comm.grpc.server").Zap(),
grpclogging.WithLeveler(grpclogging.LevelerFunc(grpcLeveler)),
),
},
}
}
func grpcLeveler(ctx context.Context, fullMethod string) zapcore.Level {
switch fullMethod {
case "/orderer.Cluster/Step":
return flogging.DisabledLevel
default:
return zapcore.InfoLevel
}
}
func extractBootstrapBlock(conf *localconfig.TopLevel) *cb.Block {
var bootstrapBlock *cb.Block
// Select the bootstrapping mechanism
switch conf.General.GenesisMethod {
case "provisional":
bootstrapBlock = encoder.New(genesisconfig.Load(conf.General.GenesisProfile)).GenesisBlockForChannel(conf.General.SystemChannel)
case "file":
bootstrapBlock = file.New(conf.General.GenesisFile).GenesisBlock()
default:
logger.Panic("Unknown genesis method:", conf.General.GenesisMethod)
}
return bootstrapBlock
}
func initializeBootstrapChannel(genesisBlock *cb.Block, lf blockledger.Factory) {
chainID, err := utils.GetChainIDFromBlock(genesisBlock)
if err != nil {
logger.Fatal("Failed to parse channel ID from genesis block:", err)
}
gl, err := lf.GetOrCreate(chainID)
if err != nil {
logger.Fatal("Failed to create the system channel:", err)
}
if err := gl.Append(genesisBlock); err != nil {
logger.Fatal("Could not write genesis block to ledger:", err)
}
}
func isClusterType(genesisBlock *cb.Block) bool {
_, exists := clusterTypes[consensusType(genesisBlock)]
return exists
}
func consensusType(genesisBlock *cb.Block) string {
if genesisBlock.Data == nil || len(genesisBlock.Data.Data) == 0 {
logger.Fatalf("Empty genesis block")
}
env := &cb.Envelope{}
if err := proto.Unmarshal(genesisBlock.Data.Data[0], env); err != nil {
logger.Fatalf("Failed to unmarshal the genesis block's envelope: %v", err)
}
bundle, err := channelconfig.NewBundleFromEnvelope(env)
if err != nil {
logger.Fatalf("Failed creating bundle from the genesis block: %v", err)
}
ordConf, exists := bundle.OrdererConfig()
if !exists {
logger.Fatalf("Orderer config doesn't exist in bundle derived from genesis block")
}
return ordConf.ConsensusType()
}
func initializeGrpcServer(conf *localconfig.TopLevel, serverConfig comm.ServerConfig) *comm.GRPCServer {
lis, err := net.Listen("tcp", fmt.Sprintf("%s:%d", conf.General.ListenAddress, conf.General.ListenPort))
if err != nil {
logger.Fatal("Failed to listen:", err)
}
// Create GRPC server - return if an error occurs
grpcServer, err := comm.NewGRPCServerFromListener(lis, serverConfig)
if err != nil {
logger.Fatal("Failed to return new GRPC server:", err)
}
return grpcServer
}
func initializeLocalMsp(conf *localconfig.TopLevel) {
// Load local MSP
err := mspmgmt.LoadLocalMsp(conf.General.LocalMSPDir, conf.General.BCCSP, conf.General.LocalMSPID)
if err != nil { // Handle errors reading the config file
logger.Fatal("Failed to initialize local MSP:", err)
}
}
//go:generate counterfeiter -o mocks/health_checker.go -fake-name HealthChecker . healthChecker
// HealthChecker defines the contract for health checker
type healthChecker interface {
RegisterChecker(component string, checker healthz.HealthChecker) error
}
func initializeMultichannelRegistrar(
bootstrapBlock *cb.Block,
ri *replicationInitiator,
clusterDialer *cluster.PredicateDialer,
srvConf comm.ServerConfig,
srv *comm.GRPCServer,
conf *localconfig.TopLevel,
signer crypto.LocalSigner,
metricsProvider metrics.Provider,
healthChecker healthChecker,
lf blockledger.Factory,
callbacks ...channelconfig.BundleActor,
) *multichannel.Registrar {
genesisBlock := extractBootstrapBlock(conf)
// Are we bootstrapping?
if len(lf.ChainIDs()) == 0 {
initializeBootstrapChannel(genesisBlock, lf)
} else {
logger.Info("Not bootstrapping because of existing channels")
}
consenters := make(map[string]consensus.Consenter)
registrar := multichannel.NewRegistrar(*conf, lf, signer, metricsProvider, callbacks...)
consenters["solo"] = solo.New()
var kafkaMetrics *kafka.Metrics
consenters["kafka"], kafkaMetrics = kafka.New(conf.Kafka, metricsProvider, healthChecker)
// Note, we pass a 'nil' channel here, we could pass a channel that
// closes if we wished to cleanup this routine on exit.
go kafkaMetrics.PollGoMetricsUntilStop(time.Minute, nil)
if isClusterType(bootstrapBlock) {
initializeEtcdraftConsenter(consenters, conf, lf, clusterDialer, bootstrapBlock, ri, srvConf, srv, registrar, metricsProvider)
}
registrar.Initialize(consenters)
return registrar
}
func initializeEtcdraftConsenter(
consenters map[string]consensus.Consenter,
conf *localconfig.TopLevel,
lf blockledger.Factory,
clusterDialer *cluster.PredicateDialer,
bootstrapBlock *cb.Block,
ri *replicationInitiator,
srvConf comm.ServerConfig,
srv *comm.GRPCServer,
registrar *multichannel.Registrar,
metricsProvider metrics.Provider,
) {
replicationRefreshInterval := conf.General.Cluster.ReplicationBackgroundRefreshInterval
if replicationRefreshInterval == 0 {
replicationRefreshInterval = defaultReplicationBackgroundRefreshInterval
}
systemChannelName, err := utils.GetChainIDFromBlock(bootstrapBlock)
if err != nil {
ri.logger.Panicf("Failed extracting system channel name from bootstrap block: %v", err)
}
systemLedger, err := lf.GetOrCreate(systemChannelName)
if err != nil {
ri.logger.Panicf("Failed obtaining system channel (%s) ledger: %v", systemChannelName, err)
}
getConfigBlock := func() *cb.Block {
return multichannel.ConfigBlock(systemLedger)
}
exponentialSleep := exponentialDurationSeries(replicationBackgroundInitialRefreshInterval, replicationRefreshInterval)
ticker := newTicker(exponentialSleep)
icr := &inactiveChainReplicator{
logger: logger,
scheduleChan: ticker.C,
quitChan: make(chan struct{}),
replicator: ri,
chains2CreationCallbacks: make(map[string]chainCreation),
retrieveLastSysChannelConfigBlock: getConfigBlock,
registerChain: ri.registerChain,
}
// Use the inactiveChainReplicator as a channel lister, since it has knowledge
// of all inactive chains.
// This is to prevent us pulling the entire system chain when attempting to enumerate
// the channels in the system.
ri.channelLister = icr
go icr.run()
raftConsenter := etcdraft.New(clusterDialer, conf, srvConf, srv, registrar, icr, metricsProvider)
consenters["etcdraft"] = raftConsenter
}
func newOperationsSystem(ops localconfig.Operations, metrics localconfig.Metrics) *operations.System {
return operations.NewSystem(operations.Options{
Logger: flogging.MustGetLogger("orderer.operations"),
ListenAddress: ops.ListenAddress,
Metrics: operations.MetricsOptions{
Provider: metrics.Provider,
Statsd: &operations.Statsd{
Network: metrics.Statsd.Network,
Address: metrics.Statsd.Address,
WriteInterval: metrics.Statsd.WriteInterval,
Prefix: metrics.Statsd.Prefix,
},
},
TLS: operations.TLS{
Enabled: ops.TLS.Enabled,
CertFile: ops.TLS.Certificate,
KeyFile: ops.TLS.PrivateKey,
ClientCertRequired: ops.TLS.ClientAuthRequired,
ClientCACertFiles: ops.TLS.ClientRootCAs,
},
Version: metadata.Version,
})
}
func updateTrustedRoots(rootCASupport *comm.CredentialSupport, cm channelconfig.Resources, servers ...*comm.GRPCServer) {
rootCASupport.Lock()
defer rootCASupport.Unlock()
appOrgMSPs := make(map[string]struct{})
ordOrgMSPs := make(map[string]struct{})
if ac, ok := cm.ApplicationConfig(); ok {
// loop through app orgs and build map of MSPIDs
for _, appOrg := range ac.Organizations() {
appOrgMSPs[appOrg.MSPID()] = struct{}{}
}
}
if ac, ok := cm.OrdererConfig(); ok {
// loop through orderer orgs and build map of MSPIDs
for _, ordOrg := range ac.Organizations() {
ordOrgMSPs[ordOrg.MSPID()] = struct{}{}
}
}
if cc, ok := cm.ConsortiumsConfig(); ok {
for _, consortium := range cc.Consortiums() {
// loop through consortium orgs and build map of MSPIDs
for _, consortiumOrg := range consortium.Organizations() {
appOrgMSPs[consortiumOrg.MSPID()] = struct{}{}
}
}
}
cid := cm.ConfigtxValidator().ChainID()
logger.Debugf("updating root CAs for channel [%s]", cid)
msps, err := cm.MSPManager().GetMSPs()
if err != nil {
logger.Errorf("Error getting root CAs for channel %s (%s)", cid, err)
return
}
var appRootCAs comm.CertificateBundle
ordererRootCAsPerOrg := make(map[string]comm.CertificateBundle)
for k, v := range msps {
var ordererRootCAs comm.CertificateBundle
// check to see if this is a FABRIC MSP
if v.GetType() == msp.FABRIC {
for _, root := range v.GetTLSRootCerts() {
// check to see of this is an app org MSP
if _, ok := appOrgMSPs[k]; ok {
logger.Debugf("adding app root CAs for MSP [%s]", k)
appRootCAs = append(appRootCAs, root)
}
// check to see of this is an orderer org MSP
if _, ok := ordOrgMSPs[k]; ok {
logger.Debugf("adding orderer root CAs for MSP [%s]", k)
ordererRootCAs = append(ordererRootCAs, root)
}
}
for _, intermediate := range v.GetTLSIntermediateCerts() {
// check to see of this is an app org MSP
if _, ok := appOrgMSPs[k]; ok {
logger.Debugf("adding app root CAs for MSP [%s]", k)
appRootCAs = append(appRootCAs, intermediate)
}
// check to see of this is an orderer org MSP
if _, ok := ordOrgMSPs[k]; ok {
logger.Debugf("adding orderer root CAs for MSP [%s]", k)
ordererRootCAs = append(ordererRootCAs, intermediate)
}
}
ordererRootCAsPerOrg[k] = ordererRootCAs
}
}
rootCASupport.AppRootCAsByChain[cid] = appRootCAs
rootCASupport.OrdererRootCAsByChainAndOrg[cid] = ordererRootCAsPerOrg
// now iterate over all roots for all app and orderer chains
trustedRoots := [][]byte{}
for _, roots := range rootCASupport.AppRootCAsByChain {
trustedRoots = append(trustedRoots, roots...)
}
// add all root CAs from all channels to the trusted roots
for _, orgRootCAs := range rootCASupport.OrdererRootCAsByChainAndOrg {
for _, roots := range orgRootCAs {
trustedRoots = append(trustedRoots, roots...)
}
}
// also need to append statically configured root certs
if len(rootCASupport.ClientRootCAs) > 0 {
trustedRoots = append(trustedRoots, rootCASupport.ClientRootCAs...)
}
// now update the client roots for the gRPC server
for _, srv := range servers {
err = srv.SetClientRootCAs(trustedRoots)
if err != nil {
msg := "Failed to update trusted roots for orderer from latest config " +
"block. This orderer may not be able to communicate " +
"with members of channel %s (%s)"
logger.Warningf(msg, cm.ConfigtxValidator().ChainID(), err)
}
}
}
func updateClusterDialer(rootCASupport *comm.CredentialSupport, clusterDialer *cluster.PredicateDialer, localClusterRootCAs [][]byte) {
rootCASupport.Lock()
defer rootCASupport.Unlock()
// Iterate over all orderer root CAs for all chains and add them
// to the root CAs
var clusterRootCAs [][]byte
for _, orgRootCAs := range rootCASupport.OrdererRootCAsByChainAndOrg {
for _, roots := range orgRootCAs {
clusterRootCAs = append(clusterRootCAs, roots...)
}
}
// Add the local root CAs too
clusterRootCAs = append(clusterRootCAs, localClusterRootCAs...)
// Update the cluster config with the new root CAs
clusterDialer.UpdateRootCAs(clusterRootCAs)
}
func prettyPrintStruct(i interface{}) {
params := util.Flatten(i)
var buffer bytes.Buffer
for i := range params {
buffer.WriteString("\n\t")
buffer.WriteString(params[i])
}
logger.Infof("Orderer config values:%s\n", buffer.String())
}
| [
"\"FABRIC_LOGGING_SPEC\"",
"\"FABRIC_LOGGING_FORMAT\""
]
| []
| [
"FABRIC_LOGGING_SPEC",
"FABRIC_LOGGING_FORMAT"
]
| [] | ["FABRIC_LOGGING_SPEC", "FABRIC_LOGGING_FORMAT"] | go | 2 | 0 | |
providers/heroku/heroku_test.go | package heroku_test
import (
"github.com/markbates/goth"
"github.com/markbates/goth/providers/heroku"
"github.com/stretchr/testify/assert"
"os"
"testing"
)
func Test_New(t *testing.T) {
t.Parallel()
a := assert.New(t)
p := provider()
a.Equal(p.ClientKey, os.Getenv("HEROKU_KEY"))
a.Equal(p.Secret, os.Getenv("HEROKU_SECRET"))
a.Equal(p.CallbackURL, "/foo")
}
func Test_Implements_Provider(t *testing.T) {
t.Parallel()
a := assert.New(t)
a.Implements((*goth.Provider)(nil), provider())
}
func Test_BeginAuth(t *testing.T) {
t.Parallel()
a := assert.New(t)
p := provider()
session, err := p.BeginAuth("test_state")
s := session.(*heroku.Session)
a.NoError(err)
a.Contains(s.AuthURL, "id.heroku.com/oauth/authorize")
}
func Test_SessionFromJSON(t *testing.T) {
t.Parallel()
a := assert.New(t)
p := provider()
session, err := p.UnmarshalSession(`{"AuthURL":"https://id.heroku.com/oauth/authorize","AccessToken":"1234567890"}`)
a.NoError(err)
s := session.(*heroku.Session)
a.Equal(s.AuthURL, "https://id.heroku.com/oauth/authorize")
a.Equal(s.AccessToken, "1234567890")
}
func provider() *heroku.Provider {
return heroku.New(os.Getenv("HEROKU_KEY"), os.Getenv("HEROKU_SECRET"), "/foo")
}
| [
"\"HEROKU_KEY\"",
"\"HEROKU_SECRET\"",
"\"HEROKU_KEY\"",
"\"HEROKU_SECRET\""
]
| []
| [
"HEROKU_KEY",
"HEROKU_SECRET"
]
| [] | ["HEROKU_KEY", "HEROKU_SECRET"] | go | 2 | 0 | |
examples/chat/invite/inviteAUser/main.go | package main
import (
"fmt"
"os"
"go.m3o.com/chat"
)
// Invite a user to a chat room
func main() {
chatService := chat.NewChatService(os.Getenv("M3O_API_TOKEN"))
rsp, err := chatService.Invite(&chat.InviteRequest{})
fmt.Println(rsp, err)
}
| [
"\"M3O_API_TOKEN\""
]
| []
| [
"M3O_API_TOKEN"
]
| [] | ["M3O_API_TOKEN"] | go | 1 | 0 | |
cmd/kubelet/app/server.go | /*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package app makes it easy to create a kubelet server for various contexts.
package app
import (
"crypto/tls"
"errors"
"fmt"
"math/rand"
"net"
"net/http"
_ "net/http/pprof"
"net/url"
"os"
"path"
"strconv"
"time"
"github.com/golang/glog"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"k8s.io/api/core/v1"
clientv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
utilnet "k8s.io/apimachinery/pkg/util/net"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/server/healthz"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientgoclientset "k8s.io/client-go/kubernetes"
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
restclient "k8s.io/client-go/rest"
clientauth "k8s.io/client-go/tools/auth"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
"k8s.io/client-go/tools/record"
certutil "k8s.io/client-go/util/cert"
"k8s.io/kubernetes/cmd/kubelet/app/options"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apis/componentconfig"
componentconfigv1alpha1 "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1"
"k8s.io/kubernetes/pkg/capabilities"
"k8s.io/kubernetes/pkg/client/chaosclient"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/credentialprovider"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet"
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
"k8s.io/kubernetes/pkg/kubelet/certificate"
"k8s.io/kubernetes/pkg/kubelet/cm"
"k8s.io/kubernetes/pkg/kubelet/config"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/dockershim"
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker"
dockerremote "k8s.io/kubernetes/pkg/kubelet/dockershim/remote"
"k8s.io/kubernetes/pkg/kubelet/eviction"
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
"k8s.io/kubernetes/pkg/kubelet/server"
"k8s.io/kubernetes/pkg/kubelet/server/streaming"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/util/configz"
"k8s.io/kubernetes/pkg/util/flock"
kubeio "k8s.io/kubernetes/pkg/util/io"
"k8s.io/kubernetes/pkg/util/mount"
nodeutil "k8s.io/kubernetes/pkg/util/node"
"k8s.io/kubernetes/pkg/util/oom"
"k8s.io/kubernetes/pkg/util/rlimit"
"k8s.io/kubernetes/pkg/version"
)
const (
// Kubelet component name
componentKubelet = "kubelet"
)
// NewKubeletCommand creates a *cobra.Command object with default parameters
func NewKubeletCommand() *cobra.Command {
s := options.NewKubeletServer()
s.AddFlags(pflag.CommandLine)
cmd := &cobra.Command{
Use: componentKubelet,
Long: `The kubelet is the primary "node agent" that runs on each
node. The kubelet works in terms of a PodSpec. A PodSpec is a YAML or JSON object
that describes a pod. The kubelet takes a set of PodSpecs that are provided through
various mechanisms (primarily through the apiserver) and ensures that the containers
described in those PodSpecs are running and healthy. The kubelet doesn't manage
containers which were not created by Kubernetes.
Other than from an PodSpec from the apiserver, there are three ways that a container
manifest can be provided to the Kubelet.
File: Path passed as a flag on the command line. Files under this path will be monitored
periodically for updates. The monitoring period is 20s by default and is configurable
via a flag.
HTTP endpoint: HTTP endpoint passed as a parameter on the command line. This endpoint
is checked every 20 seconds (also configurable with a flag).
HTTP server: The kubelet can also listen for HTTP and respond to a simple API
(underspec'd currently) to submit a new manifest.`,
Run: func(cmd *cobra.Command, args []string) {
},
}
return cmd
}
// UnsecuredKubeletDeps returns a KubeletDeps suitable for being run, or an error if the server setup
// is not valid. It will not start any background processes, and does not include authentication/authorization
func UnsecuredKubeletDeps(s *options.KubeletServer) (*kubelet.KubeletDeps, error) {
// Initialize the TLS Options
tlsOptions, err := InitializeTLS(&s.KubeletFlags, &s.KubeletConfiguration)
if err != nil {
return nil, err
}
mounter := mount.New(s.ExperimentalMounterPath)
var writer kubeio.Writer = &kubeio.StdWriter{}
if s.Containerized {
glog.V(2).Info("Running kubelet in containerized mode (experimental)")
mounter = mount.NewNsenterMounter()
writer = &kubeio.NsenterWriter{}
}
var dockerClient libdocker.Interface
if s.ContainerRuntime == kubetypes.DockerContainerRuntime {
dockerClient = libdocker.ConnectToDockerOrDie(s.DockerEndpoint, s.RuntimeRequestTimeout.Duration,
s.ImagePullProgressDeadline.Duration)
} else {
dockerClient = nil
}
return &kubelet.KubeletDeps{
Auth: nil, // default does not enforce auth[nz]
CAdvisorInterface: nil, // cadvisor.New launches background processes (bg http.ListenAndServe, and some bg cleaners), not set here
Cloud: nil, // cloud provider might start background processes
ContainerManager: nil,
DockerClient: dockerClient,
KubeClient: nil,
ExternalKubeClient: nil,
Mounter: mounter,
NetworkPlugins: ProbeNetworkPlugins(s.NetworkPluginDir, s.CNIConfDir, s.CNIBinDir),
OOMAdjuster: oom.NewOOMAdjuster(),
OSInterface: kubecontainer.RealOS{},
Writer: writer,
VolumePlugins: ProbeVolumePlugins(s.VolumePluginDir),
TLSOptions: tlsOptions,
}, nil
}
func getKubeClient(s *options.KubeletServer) (*clientset.Clientset, error) {
clientConfig, err := CreateAPIServerClientConfig(s)
if err == nil {
kubeClient, err := clientset.NewForConfig(clientConfig)
if err != nil {
return nil, err
}
return kubeClient, nil
}
return nil, err
}
// Tries to download the kubelet-<node-name> configmap from "kube-system" namespace via the API server and returns a JSON string or error
func getRemoteKubeletConfig(s *options.KubeletServer, kubeDeps *kubelet.KubeletDeps) (string, error) {
// TODO(mtaufen): should probably cache clientset and pass into this function rather than regenerate on every request
kubeClient, err := getKubeClient(s)
if err != nil {
return "", err
}
configmap, err := func() (*v1.ConfigMap, error) {
var nodename types.NodeName
hostname := nodeutil.GetHostname(s.HostnameOverride)
if kubeDeps != nil && kubeDeps.Cloud != nil {
instances, ok := kubeDeps.Cloud.Instances()
if !ok {
err = fmt.Errorf("failed to get instances from cloud provider, can't determine nodename")
return nil, err
}
nodename, err = instances.CurrentNodeName(hostname)
if err != nil {
err = fmt.Errorf("error fetching current instance name from cloud provider: %v", err)
return nil, err
}
// look for kubelet-<node-name> configmap from "kube-system"
configmap, err := kubeClient.CoreV1Client.ConfigMaps("kube-system").Get(fmt.Sprintf("kubelet-%s", nodename), metav1.GetOptions{})
if err != nil {
return nil, err
}
return configmap, nil
}
// No cloud provider yet, so can't get the nodename via Cloud.Instances().CurrentNodeName(hostname), try just using the hostname
configmap, err := kubeClient.CoreV1Client.ConfigMaps("kube-system").Get(fmt.Sprintf("kubelet-%s", hostname), metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("cloud provider was nil, and attempt to use hostname to find config resulted in: %v", err)
}
return configmap, nil
}()
if err != nil {
return "", err
}
// When we create the KubeletConfiguration configmap, we put a json string
// representation of the config in a `kubelet.config` key.
jsonstr, ok := configmap.Data["kubelet.config"]
if !ok {
return "", fmt.Errorf("KubeletConfiguration configmap did not contain a value with key `kubelet.config`")
}
return jsonstr, nil
}
func startKubeletConfigSyncLoop(s *options.KubeletServer, currentKC string) {
glog.Infof("Starting Kubelet configuration sync loop")
go func() {
wait.PollInfinite(30*time.Second, func() (bool, error) {
glog.Infof("Checking API server for new Kubelet configuration.")
remoteKC, err := getRemoteKubeletConfig(s, nil)
if err == nil {
// Detect new config by comparing with the last JSON string we extracted.
if remoteKC != currentKC {
glog.Info("Found new Kubelet configuration via API server, restarting!")
os.Exit(0)
}
} else {
glog.Infof("Did not find a configuration for this Kubelet via API server: %v", err)
}
return false, nil // Always return (false, nil) so we poll forever.
})
}()
}
// Try to check for config on the API server, return that config if we get it, and start
// a background thread that checks for updates to configs.
func initKubeletConfigSync(s *options.KubeletServer) (*componentconfig.KubeletConfiguration, error) {
jsonstr, err := getRemoteKubeletConfig(s, nil)
if err == nil {
// We will compare future API server config against the config we just got (jsonstr):
startKubeletConfigSyncLoop(s, jsonstr)
// Convert json from API server to external type struct, and convert that to internal type struct
extKC := componentconfigv1alpha1.KubeletConfiguration{}
err := runtime.DecodeInto(api.Codecs.UniversalDecoder(), []byte(jsonstr), &extKC)
if err != nil {
return nil, err
}
api.Scheme.Default(&extKC)
kc := componentconfig.KubeletConfiguration{}
err = api.Scheme.Convert(&extKC, &kc, nil)
if err != nil {
return nil, err
}
return &kc, nil
} else {
// Couldn't get a configuration from the API server yet.
// Restart as soon as anything comes back from the API server.
startKubeletConfigSyncLoop(s, "")
return nil, err
}
}
// Run runs the specified KubeletServer with the given KubeletDeps. This should never exit.
// The kubeDeps argument may be nil - if so, it is initialized from the settings on KubeletServer.
// Otherwise, the caller is assumed to have set up the KubeletDeps object and a default one will
// not be generated.
func Run(s *options.KubeletServer, kubeDeps *kubelet.KubeletDeps) error {
if err := run(s, kubeDeps); err != nil {
return fmt.Errorf("failed to run Kubelet: %v", err)
}
return nil
}
func checkPermissions() error {
if uid := os.Getuid(); uid != 0 {
return fmt.Errorf("Kubelet needs to run as uid `0`. It is being run as %d", uid)
}
// TODO: Check if kubelet is running in the `initial` user namespace.
// http://man7.org/linux/man-pages/man7/user_namespaces.7.html
return nil
}
func setConfigz(cz *configz.Config, kc *componentconfig.KubeletConfiguration) {
tmp := componentconfigv1alpha1.KubeletConfiguration{}
api.Scheme.Convert(kc, &tmp, nil)
cz.Set(tmp)
}
func initConfigz(kc *componentconfig.KubeletConfiguration) (*configz.Config, error) {
cz, err := configz.New("componentconfig")
if err == nil {
setConfigz(cz, kc)
} else {
glog.Errorf("unable to register configz: %s", err)
}
return cz, err
}
// validateConfig validates configuration of Kubelet and returns an error if the input configuration is invalid.
func validateConfig(s *options.KubeletServer) error {
if !s.CgroupsPerQOS && len(s.EnforceNodeAllocatable) > 0 {
return fmt.Errorf("Node Allocatable enforcement is not supported unless Cgroups Per QOS feature is turned on")
}
if s.SystemCgroups != "" && s.CgroupRoot == "" {
return fmt.Errorf("invalid configuration: system container was specified and cgroup root was not specified")
}
for _, val := range s.EnforceNodeAllocatable {
switch val {
case cm.NodeAllocatableEnforcementKey:
case cm.SystemReservedEnforcementKey:
case cm.KubeReservedEnforcementKey:
continue
default:
return fmt.Errorf("invalid option %q specified for EnforceNodeAllocatable setting. Valid options are %q, %q or %q", val, cm.NodeAllocatableEnforcementKey, cm.SystemReservedEnforcementKey, cm.KubeReservedEnforcementKey)
}
}
return nil
}
// makeEventRecorder sets up kubeDeps.Recorder if its nil. Its a no-op otherwise.
func makeEventRecorder(s *componentconfig.KubeletConfiguration, kubeDeps *kubelet.KubeletDeps, nodeName types.NodeName) {
if kubeDeps.Recorder != nil {
return
}
eventBroadcaster := record.NewBroadcaster()
kubeDeps.Recorder = eventBroadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: componentKubelet, Host: string(nodeName)})
eventBroadcaster.StartLogging(glog.V(3).Infof)
if kubeDeps.EventClient != nil {
glog.V(4).Infof("Sending events to api server.")
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeDeps.EventClient.Events("")})
} else {
glog.Warning("No api server defined - no events will be sent to API server.")
}
}
func run(s *options.KubeletServer, kubeDeps *kubelet.KubeletDeps) (err error) {
// TODO: this should be replaced by a --standalone flag
standaloneMode := (len(s.APIServerList) == 0 && !s.RequireKubeConfig)
if s.ExitOnLockContention && s.LockFilePath == "" {
return errors.New("cannot exit on lock file contention: no lock file specified")
}
done := make(chan struct{})
if s.LockFilePath != "" {
glog.Infof("acquiring file lock on %q", s.LockFilePath)
if err := flock.Acquire(s.LockFilePath); err != nil {
return fmt.Errorf("unable to acquire file lock on %q: %v", s.LockFilePath, err)
}
if s.ExitOnLockContention {
glog.Infof("watching for inotify events for: %v", s.LockFilePath)
if err := watchForLockfileContention(s.LockFilePath, done); err != nil {
return err
}
}
}
// Set feature gates based on the value in KubeletConfiguration
err = utilfeature.DefaultFeatureGate.Set(s.KubeletConfiguration.FeatureGates)
if err != nil {
return err
}
// Register current configuration with /configz endpoint
cfgz, cfgzErr := initConfigz(&s.KubeletConfiguration)
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) {
// Look for config on the API server. If it exists, replace s.KubeletConfiguration
// with it and continue. initKubeletConfigSync also starts the background thread that checks for new config.
// Don't do dynamic Kubelet configuration in runonce mode
if s.RunOnce == false {
remoteKC, err := initKubeletConfigSync(s)
if err == nil {
// Update s (KubeletServer) with new config from API server
s.KubeletConfiguration = *remoteKC
// Ensure that /configz is up to date with the new config
if cfgzErr != nil {
glog.Errorf("was unable to register configz before due to %s, will not be able to set now", cfgzErr)
} else {
setConfigz(cfgz, &s.KubeletConfiguration)
}
// Update feature gates from the new config
err = utilfeature.DefaultFeatureGate.Set(s.KubeletConfiguration.FeatureGates)
if err != nil {
return err
}
} else {
glog.Errorf("failed to init dynamic Kubelet configuration sync: %v", err)
}
}
}
// Validate configuration.
if err := validateConfig(s); err != nil {
return err
}
if kubeDeps == nil {
kubeDeps, err = UnsecuredKubeletDeps(s)
if err != nil {
return err
}
}
if kubeDeps.Cloud == nil {
if !cloudprovider.IsExternal(s.CloudProvider) && s.CloudProvider != componentconfigv1alpha1.AutoDetectCloudProvider {
cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
if err != nil {
return err
}
if cloud == nil {
glog.V(2).Infof("No cloud provider specified: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile)
} else {
glog.V(2).Infof("Successfully initialized cloud provider: %q from the config file: %q\n", s.CloudProvider, s.CloudConfigFile)
}
kubeDeps.Cloud = cloud
}
}
nodeName, err := getNodeName(kubeDeps.Cloud, nodeutil.GetHostname(s.HostnameOverride))
if err != nil {
return err
}
if s.BootstrapKubeconfig != "" {
if err := bootstrapClientCert(s.KubeConfig.Value(), s.BootstrapKubeconfig, s.CertDirectory, nodeName); err != nil {
return err
}
}
// initialize clients if any of the clients are not provided
if kubeDeps.KubeClient == nil || kubeDeps.ExternalKubeClient == nil || kubeDeps.EventClient == nil {
var kubeClient clientset.Interface
var eventClient v1core.EventsGetter
var externalKubeClient clientgoclientset.Interface
clientConfig, err := CreateAPIServerClientConfig(s)
var clientCertificateManager certificate.Manager
if err == nil {
if utilfeature.DefaultFeatureGate.Enabled(features.RotateKubeletClientCertificate) {
clientCertificateManager, err = certificate.NewKubeletClientCertificateManager(s.CertDirectory, nodeName, clientConfig.CertData, clientConfig.KeyData, clientConfig.CertFile, clientConfig.KeyFile)
if err != nil {
return err
}
if err := updateTransport(clientConfig, clientCertificateManager); err != nil {
return err
}
}
kubeClient, err = clientset.NewForConfig(clientConfig)
if err != nil {
glog.Warningf("New kubeClient from clientConfig error: %v", err)
} else if kubeClient.Certificates() != nil && clientCertificateManager != nil {
glog.V(2).Info("Starting client certificate rotation.")
clientCertificateManager.SetCertificateSigningRequestClient(kubeClient.Certificates().CertificateSigningRequests())
clientCertificateManager.Start()
}
externalKubeClient, err = clientgoclientset.NewForConfig(clientConfig)
if err != nil {
glog.Warningf("New kubeClient from clientConfig error: %v", err)
}
// make a separate client for events
eventClientConfig := *clientConfig
eventClientConfig.QPS = float32(s.EventRecordQPS)
eventClientConfig.Burst = int(s.EventBurst)
eventClient, err = clientgoclientset.NewForConfig(&eventClientConfig)
if err != nil {
glog.Warningf("Failed to create API Server client: %v", err)
}
} else {
switch {
case s.RequireKubeConfig:
return fmt.Errorf("invalid kubeconfig: %v", err)
case standaloneMode:
glog.Warningf("No API client: %v", err)
case s.KubeConfig.Provided():
glog.Warningf("Invalid kubeconfig: %v", err)
}
}
kubeDeps.KubeClient = kubeClient
kubeDeps.ExternalKubeClient = externalKubeClient
kubeDeps.EventClient = eventClient
}
if kubeDeps.Auth == nil {
auth, err := BuildAuth(nodeName, kubeDeps.ExternalKubeClient, s.KubeletConfiguration)
if err != nil {
return err
}
kubeDeps.Auth = auth
}
if kubeDeps.CAdvisorInterface == nil {
kubeDeps.CAdvisorInterface, err = cadvisor.New(s.Address, uint(s.CAdvisorPort), s.ContainerRuntime, s.RootDirectory)
if err != nil {
return err
}
}
// Setup event recorder if required.
makeEventRecorder(&s.KubeletConfiguration, kubeDeps, nodeName)
if kubeDeps.ContainerManager == nil {
if s.CgroupsPerQOS && s.CgroupRoot == "" {
glog.Infof("--cgroups-per-qos enabled, but --cgroup-root was not specified. defaulting to /")
s.CgroupRoot = "/"
}
kubeReserved, err := parseResourceList(s.KubeReserved)
if err != nil {
return err
}
systemReserved, err := parseResourceList(s.SystemReserved)
if err != nil {
return err
}
var hardEvictionThresholds []evictionapi.Threshold
// If the user requested to ignore eviction thresholds, then do not set valid values for hardEvictionThresholds here.
if !s.ExperimentalNodeAllocatableIgnoreEvictionThreshold {
hardEvictionThresholds, err = eviction.ParseThresholdConfig([]string{}, s.EvictionHard, "", "", "")
if err != nil {
return err
}
}
experimentalQOSReserved, err := cm.ParseQOSReserved(s.ExperimentalQOSReserved)
if err != nil {
return err
}
kubeDeps.ContainerManager, err = cm.NewContainerManager(
kubeDeps.Mounter,
kubeDeps.CAdvisorInterface,
cm.NodeConfig{
RuntimeCgroupsName: s.RuntimeCgroups,
SystemCgroupsName: s.SystemCgroups,
KubeletCgroupsName: s.KubeletCgroups,
ContainerRuntime: s.ContainerRuntime,
CgroupsPerQOS: s.CgroupsPerQOS,
CgroupRoot: s.CgroupRoot,
CgroupDriver: s.CgroupDriver,
ProtectKernelDefaults: s.ProtectKernelDefaults,
NodeAllocatableConfig: cm.NodeAllocatableConfig{
KubeReservedCgroupName: s.KubeReservedCgroup,
SystemReservedCgroupName: s.SystemReservedCgroup,
EnforceNodeAllocatable: sets.NewString(s.EnforceNodeAllocatable...),
KubeReserved: kubeReserved,
SystemReserved: systemReserved,
HardEvictionThresholds: hardEvictionThresholds,
},
ExperimentalQOSReserved: *experimentalQOSReserved,
},
s.ExperimentalFailSwapOn,
kubeDeps.Recorder)
if err != nil {
return err
}
}
if err := checkPermissions(); err != nil {
glog.Error(err)
}
utilruntime.ReallyCrash = s.ReallyCrashForTesting
rand.Seed(time.Now().UTC().UnixNano())
// TODO(vmarmol): Do this through container config.
oomAdjuster := kubeDeps.OOMAdjuster
if err := oomAdjuster.ApplyOOMScoreAdj(0, int(s.OOMScoreAdj)); err != nil {
glog.Warning(err)
}
if err := RunKubelet(&s.KubeletFlags, &s.KubeletConfiguration, kubeDeps, s.RunOnce, standaloneMode); err != nil {
return err
}
if s.HealthzPort > 0 {
healthz.DefaultHealthz()
go wait.Until(func() {
err := http.ListenAndServe(net.JoinHostPort(s.HealthzBindAddress, strconv.Itoa(int(s.HealthzPort))), nil)
if err != nil {
glog.Errorf("Starting health server failed: %v", err)
}
}, 5*time.Second, wait.NeverStop)
}
if s.RunOnce {
return nil
}
<-done
return nil
}
func updateTransport(clientConfig *restclient.Config, clientCertificateManager certificate.Manager) error {
if clientConfig.Transport != nil {
return fmt.Errorf("there is already a transport configured")
}
tlsConfig, err := restclient.TLSConfigFor(clientConfig)
if err != nil {
return fmt.Errorf("unable to configure TLS for the rest client: %v", err)
}
if tlsConfig == nil {
tlsConfig = &tls.Config{}
}
tlsConfig.Certificates = nil
tlsConfig.GetClientCertificate = func(requestInfo *tls.CertificateRequestInfo) (*tls.Certificate, error) {
cert := clientCertificateManager.Current()
if cert == nil {
return &tls.Certificate{Certificate: nil}, nil
}
return cert, nil
}
clientConfig.Transport = utilnet.SetTransportDefaults(&http.Transport{
Proxy: http.ProxyFromEnvironment,
TLSHandshakeTimeout: 10 * time.Second,
TLSClientConfig: tlsConfig,
MaxIdleConnsPerHost: 25,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
})
clientConfig.CertData = nil
clientConfig.KeyData = nil
clientConfig.CertFile = ""
clientConfig.KeyFile = ""
clientConfig.CAData = nil
clientConfig.CAFile = ""
return nil
}
// getNodeName returns the node name according to the cloud provider
// if cloud provider is specified. Otherwise, returns the hostname of the node.
func getNodeName(cloud cloudprovider.Interface, hostname string) (types.NodeName, error) {
if cloud == nil {
return types.NodeName(hostname), nil
}
instances, ok := cloud.Instances()
if !ok {
return "", fmt.Errorf("failed to get instances from cloud provider")
}
nodeName, err := instances.CurrentNodeName(hostname)
if err != nil {
return "", fmt.Errorf("error fetching current node name from cloud provider: %v", err)
}
glog.V(2).Infof("cloud provider determined current node name to be %s", nodeName)
return nodeName, nil
}
// InitializeTLS checks for a configured TLSCertFile and TLSPrivateKeyFile: if unspecified a new self-signed
// certificate and key file are generated. Returns a configured server.TLSOptions object.
func InitializeTLS(kf *options.KubeletFlags, kc *componentconfig.KubeletConfiguration) (*server.TLSOptions, error) {
if !utilfeature.DefaultFeatureGate.Enabled(features.RotateKubeletServerCertificate) && kc.TLSCertFile == "" && kc.TLSPrivateKeyFile == "" {
kc.TLSCertFile = path.Join(kc.CertDirectory, "kubelet.crt")
kc.TLSPrivateKeyFile = path.Join(kc.CertDirectory, "kubelet.key")
canReadCertAndKey, err := certutil.CanReadCertAndKey(kc.TLSCertFile, kc.TLSPrivateKeyFile)
if err != nil {
return nil, err
}
if !canReadCertAndKey {
cert, key, err := certutil.GenerateSelfSignedCertKey(nodeutil.GetHostname(kf.HostnameOverride), nil, nil)
if err != nil {
return nil, fmt.Errorf("unable to generate self signed cert: %v", err)
}
if err := certutil.WriteCert(kc.TLSCertFile, cert); err != nil {
return nil, err
}
if err := certutil.WriteKey(kc.TLSPrivateKeyFile, key); err != nil {
return nil, err
}
glog.V(4).Infof("Using self-signed cert (%s, %s)", kc.TLSCertFile, kc.TLSPrivateKeyFile)
}
}
tlsOptions := &server.TLSOptions{
Config: &tls.Config{
// Can't use SSLv3 because of POODLE and BEAST
// Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher
// Can't use TLSv1.1 because of RC4 cipher usage
MinVersion: tls.VersionTLS12,
},
CertFile: kc.TLSCertFile,
KeyFile: kc.TLSPrivateKeyFile,
}
if len(kc.Authentication.X509.ClientCAFile) > 0 {
clientCAs, err := certutil.NewPool(kc.Authentication.X509.ClientCAFile)
if err != nil {
return nil, fmt.Errorf("unable to load client CA file %s: %v", kc.Authentication.X509.ClientCAFile, err)
}
// Specify allowed CAs for client certificates
tlsOptions.Config.ClientCAs = clientCAs
// Populate PeerCertificates in requests, but don't reject connections without verified certificates
tlsOptions.Config.ClientAuth = tls.RequestClientCert
}
return tlsOptions, nil
}
func kubeconfigClientConfig(s *options.KubeletServer) (*restclient.Config, error) {
if s.RequireKubeConfig {
// Ignores the values of s.APIServerList
return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: s.KubeConfig.Value()},
&clientcmd.ConfigOverrides{},
).ClientConfig()
}
return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
&clientcmd.ClientConfigLoadingRules{ExplicitPath: s.KubeConfig.Value()},
&clientcmd.ConfigOverrides{ClusterInfo: clientcmdapi.Cluster{Server: s.APIServerList[0]}},
).ClientConfig()
}
// createClientConfig creates a client configuration from the command line
// arguments. If --kubeconfig is explicitly set, it will be used. If it is
// not set, we attempt to load the default kubeconfig file, and if we cannot,
// we fall back to the default client with no auth - this fallback does not, in
// and of itself, constitute an error.
func createClientConfig(s *options.KubeletServer) (*restclient.Config, error) {
if s.RequireKubeConfig {
return kubeconfigClientConfig(s)
}
// TODO: handle a new --standalone flag that bypasses kubeconfig loading and returns no error.
// DEPRECATED: all subsequent code is deprecated
if len(s.APIServerList) == 0 {
return nil, fmt.Errorf("no api servers specified")
}
// TODO: adapt Kube client to support LB over several servers
if len(s.APIServerList) > 1 {
glog.Infof("Multiple api servers specified. Picking first one")
}
if s.KubeConfig.Provided() {
return kubeconfigClientConfig(s)
}
// If KubeConfig was not provided, try to load the default file, then fall back
// to a default auth config.
clientConfig, err := kubeconfigClientConfig(s)
if err != nil {
glog.Warningf("Could not load kubeconfig file %s: %v. Using default client config instead.", s.KubeConfig, err)
authInfo := &clientauth.Info{}
authConfig, err := authInfo.MergeWithConfig(restclient.Config{})
if err != nil {
return nil, err
}
authConfig.Host = s.APIServerList[0]
clientConfig = &authConfig
}
return clientConfig, nil
}
// CreateAPIServerClientConfig generates a client.Config from command line flags,
// including api-server-list, via createClientConfig and then injects chaos into
// the configuration via addChaosToClientConfig. This func is exported to support
// integration with third party kubelet extensions (e.g. kubernetes-mesos).
func CreateAPIServerClientConfig(s *options.KubeletServer) (*restclient.Config, error) {
clientConfig, err := createClientConfig(s)
if err != nil {
return nil, err
}
clientConfig.ContentType = s.ContentType
// Override kubeconfig qps/burst settings from flags
clientConfig.QPS = float32(s.KubeAPIQPS)
clientConfig.Burst = int(s.KubeAPIBurst)
addChaosToClientConfig(s, clientConfig)
return clientConfig, nil
}
// addChaosToClientConfig injects random errors into client connections if configured.
func addChaosToClientConfig(s *options.KubeletServer, config *restclient.Config) {
if s.ChaosChance != 0.0 {
config.WrapTransport = func(rt http.RoundTripper) http.RoundTripper {
seed := chaosclient.NewSeed(1)
// TODO: introduce a standard chaos package with more tunables - this is just a proof of concept
// TODO: introduce random latency and stalls
return chaosclient.NewChaosRoundTripper(rt, chaosclient.LogChaos, seed.P(s.ChaosChance, chaosclient.ErrSimulatedConnectionResetByPeer))
}
}
}
// RunKubelet is responsible for setting up and running a kubelet. It is used in three different applications:
// 1 Integration tests
// 2 Kubelet binary
// 3 Standalone 'kubernetes' binary
// Eventually, #2 will be replaced with instances of #3
func RunKubelet(kubeFlags *options.KubeletFlags, kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *kubelet.KubeletDeps, runOnce bool, standaloneMode bool) error {
hostname := nodeutil.GetHostname(kubeFlags.HostnameOverride)
// Query the cloud provider for our node name, default to hostname if kcfg.Cloud == nil
nodeName, err := getNodeName(kubeDeps.Cloud, hostname)
if err != nil {
return err
}
// Setup event recorder if required.
makeEventRecorder(kubeCfg, kubeDeps, nodeName)
// TODO(mtaufen): I moved the validation of these fields here, from UnsecuredKubeletConfig,
// so that I could remove the associated fields from KubeletConfig. I would
// prefer this to be done as part of an independent validation step on the
// KubeletConfiguration. But as far as I can tell, we don't have an explicit
// place for validation of the KubeletConfiguration yet.
hostNetworkSources, err := kubetypes.GetValidatedSources(kubeCfg.HostNetworkSources)
if err != nil {
return err
}
hostPIDSources, err := kubetypes.GetValidatedSources(kubeCfg.HostPIDSources)
if err != nil {
return err
}
hostIPCSources, err := kubetypes.GetValidatedSources(kubeCfg.HostIPCSources)
if err != nil {
return err
}
privilegedSources := capabilities.PrivilegedSources{
HostNetworkSources: hostNetworkSources,
HostPIDSources: hostPIDSources,
HostIPCSources: hostIPCSources,
}
capabilities.Setup(kubeCfg.AllowPrivileged, privilegedSources, 0)
credentialprovider.SetPreferredDockercfgPath(kubeCfg.RootDirectory)
glog.V(2).Infof("Using root directory: %v", kubeCfg.RootDirectory)
builder := kubeDeps.Builder
if builder == nil {
builder = CreateAndInitKubelet
}
if kubeDeps.OSInterface == nil {
kubeDeps.OSInterface = kubecontainer.RealOS{}
}
k, err := builder(kubeCfg, kubeDeps, &kubeFlags.ContainerRuntimeOptions, standaloneMode, kubeFlags.HostnameOverride, kubeFlags.NodeIP, kubeFlags.ProviderID)
if err != nil {
return fmt.Errorf("failed to create kubelet: %v", err)
}
// NewMainKubelet should have set up a pod source config if one didn't exist
// when the builder was run. This is just a precaution.
if kubeDeps.PodConfig == nil {
return fmt.Errorf("failed to create kubelet, pod source config was nil")
}
podCfg := kubeDeps.PodConfig
rlimit.RlimitNumFiles(uint64(kubeCfg.MaxOpenFiles))
// process pods and exit.
if runOnce {
if _, err := k.RunOnce(podCfg.Updates()); err != nil {
return fmt.Errorf("runonce failed: %v", err)
}
glog.Infof("Started kubelet %s as runonce", version.Get().String())
} else {
startKubelet(k, podCfg, kubeCfg, kubeDeps)
glog.Infof("Started kubelet %s", version.Get().String())
}
return nil
}
func startKubelet(k kubelet.KubeletBootstrap, podCfg *config.PodConfig, kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *kubelet.KubeletDeps) {
// start the kubelet
go wait.Until(func() { k.Run(podCfg.Updates()) }, 0, wait.NeverStop)
// start the kubelet server
if kubeCfg.EnableServer {
go wait.Until(func() {
k.ListenAndServe(net.ParseIP(kubeCfg.Address), uint(kubeCfg.Port), kubeDeps.TLSOptions, kubeDeps.Auth, kubeCfg.EnableDebuggingHandlers, kubeCfg.EnableContentionProfiling)
}, 0, wait.NeverStop)
}
if kubeCfg.ReadOnlyPort > 0 {
go wait.Until(func() {
k.ListenAndServeReadOnly(net.ParseIP(kubeCfg.Address), uint(kubeCfg.ReadOnlyPort))
}, 0, wait.NeverStop)
}
}
func CreateAndInitKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *kubelet.KubeletDeps, crOptions *options.ContainerRuntimeOptions, standaloneMode bool, hostnameOverride, nodeIP, providerID string) (k kubelet.KubeletBootstrap, err error) {
// TODO: block until all sources have delivered at least one update to the channel, or break the sync loop
// up into "per source" synchronizations
k, err = kubelet.NewMainKubelet(kubeCfg, kubeDeps, crOptions, standaloneMode, hostnameOverride, nodeIP, providerID)
if err != nil {
return nil, err
}
k.BirthCry()
k.StartGarbageCollection()
return k, nil
}
// parseResourceList parses the given configuration map into an API
// ResourceList or returns an error.
func parseResourceList(m componentconfig.ConfigurationMap) (v1.ResourceList, error) {
if len(m) == 0 {
return nil, nil
}
rl := make(v1.ResourceList)
for k, v := range m {
switch v1.ResourceName(k) {
// CPU, memory and local storage resources are supported.
case v1.ResourceCPU, v1.ResourceMemory, v1.ResourceStorage:
q, err := resource.ParseQuantity(v)
if err != nil {
return nil, err
}
if q.Sign() == -1 {
return nil, fmt.Errorf("resource quantity for %q cannot be negative: %v", k, v)
}
rl[v1.ResourceName(k)] = q
default:
return nil, fmt.Errorf("cannot reserve %q resource", k)
}
}
return rl, nil
}
// RunDockershim only starts the dockershim in current process. This is only used for cri validate testing purpose
// TODO(random-liu): Move this to a separate binary.
func RunDockershim(c *componentconfig.KubeletConfiguration, r *options.ContainerRuntimeOptions) error {
// Create docker client.
dockerClient := libdocker.ConnectToDockerOrDie(r.DockerEndpoint, c.RuntimeRequestTimeout.Duration,
r.ImagePullProgressDeadline.Duration)
// Initialize network plugin settings.
binDir := r.CNIBinDir
if binDir == "" {
binDir = r.NetworkPluginDir
}
nh := &kubelet.NoOpLegacyHost{}
pluginSettings := dockershim.NetworkPluginSettings{
HairpinMode: componentconfig.HairpinMode(c.HairpinMode),
NonMasqueradeCIDR: c.NonMasqueradeCIDR,
PluginName: r.NetworkPluginName,
PluginConfDir: r.CNIConfDir,
PluginBinDir: binDir,
MTU: int(r.NetworkPluginMTU),
LegacyRuntimeHost: nh,
}
// Initialize streaming configuration. (Not using TLS now)
streamingConfig := &streaming.Config{
// Use a relative redirect (no scheme or host).
BaseURL: &url.URL{Path: "/cri/"},
StreamIdleTimeout: c.StreamingConnectionIdleTimeout.Duration,
StreamCreationTimeout: streaming.DefaultConfig.StreamCreationTimeout,
SupportedRemoteCommandProtocols: streaming.DefaultConfig.SupportedRemoteCommandProtocols,
SupportedPortForwardProtocols: streaming.DefaultConfig.SupportedPortForwardProtocols,
}
ds, err := dockershim.NewDockerService(dockerClient, c.SeccompProfileRoot, r.PodSandboxImage,
streamingConfig, &pluginSettings, c.RuntimeCgroups, c.CgroupDriver, r.DockerExecHandlerName, r.DockershimRootDirectory,
r.DockerDisableSharedPID)
if err != nil {
return err
}
if err := ds.Start(); err != nil {
return err
}
glog.V(2).Infof("Starting the GRPC server for the docker CRI shim.")
server := dockerremote.NewDockerServer(c.RemoteRuntimeEndpoint, ds)
if err := server.Start(); err != nil {
return err
}
// Start the streaming server
addr := net.JoinHostPort(c.Address, strconv.Itoa(int(c.Port)))
return http.ListenAndServe(addr, ds)
}
| []
| []
| []
| [] | [] | go | null | null | null |
cmd/callgraph/main.go | // Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// callgraph: a tool for reporting the call graph of a Go program.
// See Usage for details, or run with -help.
package main // import "golang.org/x/tools/cmd/callgraph"
// TODO(adonovan):
//
// Features:
// - restrict graph to a single package
// - output
// - functions reachable from root (use digraph tool?)
// - unreachable functions (use digraph tool?)
// - dynamic (runtime) types
// - indexed output (numbered nodes)
// - JSON output
// - additional template fields:
// callee file/line/col
import (
"bufio"
"bytes"
"flag"
"fmt"
"go/build"
"go/token"
"io"
"log"
"os"
"runtime"
"text/template"
"golang.org/x/tools/go/buildutil"
"golang.org/x/tools/go/callgraph"
"golang.org/x/tools/go/callgraph/cha"
"golang.org/x/tools/go/callgraph/rta"
"golang.org/x/tools/go/callgraph/static"
"golang.org/x/tools/go/packages"
"golang.org/x/tools/go/pointer"
"golang.org/x/tools/go/ssa"
"golang.org/x/tools/go/ssa/ssautil"
)
// flags
var (
algoFlag = flag.String("algo", "rta",
`Call graph construction algorithm (static, cha, rta, pta)`)
testFlag = flag.Bool("test", false,
"Loads test code (*_test.go) for imported packages")
formatFlag = flag.String("format",
"{{.Caller}}\t--{{.Dynamic}}-{{.Line}}:{{.Column}}-->\t{{.Callee}}",
"A template expression specifying how to format an edge")
ptalogFlag = flag.String("ptalog", "",
"Location of the points-to analysis log file, or empty to disable logging.")
)
func init() {
flag.Var((*buildutil.TagsFlag)(&build.Default.BuildTags), "tags", buildutil.TagsFlagDoc)
}
const Usage = `callgraph: display the call graph of a Go program.
Usage:
callgraph [-algo=static|cha|rta|pta] [-test] [-format=...] package...
Flags:
-algo Specifies the call-graph construction algorithm, one of:
static static calls only (unsound)
cha Class Hierarchy Analysis
rta Rapid Type Analysis
pta inclusion-based Points-To Analysis
The algorithms are ordered by increasing precision in their
treatment of dynamic calls (and thus also computational cost).
RTA and PTA require a whole program (main or test), and
include only functions reachable from main.
-test Include the package's tests in the analysis.
-format Specifies the format in which each call graph edge is displayed.
One of:
digraph output suitable for input to
golang.org/x/tools/cmd/digraph.
graphviz output in AT&T GraphViz (.dot) format.
All other values are interpreted using text/template syntax.
The default value is:
{{.Caller}}\t--{{.Dynamic}}-{{.Line}}:{{.Column}}-->\t{{.Callee}}
The structure passed to the template is (effectively):
type Edge struct {
Caller *ssa.Function // calling function
Callee *ssa.Function // called function
// Call site:
Filename string // containing file
Offset int // offset within file of '('
Line int // line number
Column int // column number of call
Dynamic string // "static" or "dynamic"
Description string // e.g. "static method call"
}
Caller and Callee are *ssa.Function values, which print as
"(*sync/atomic.Mutex).Lock", but other attributes may be
derived from them, e.g. Caller.Pkg.Pkg.Path yields the
import path of the enclosing package. Consult the go/ssa
API documentation for details.
Examples:
Show the call graph of the trivial web server application:
callgraph -format digraph $GOROOT/src/net/http/triv.go
Same, but show only the packages of each function:
callgraph -format '{{.Caller.Pkg.Pkg.Path}} -> {{.Callee.Pkg.Pkg.Path}}' \
$GOROOT/src/net/http/triv.go | sort | uniq
Show functions that make dynamic calls into the 'fmt' test package,
using the pointer analysis algorithm:
callgraph -format='{{.Caller}} -{{.Dynamic}}-> {{.Callee}}' -test -algo=pta fmt |
sed -ne 's/-dynamic-/--/p' |
sed -ne 's/-->.*fmt_test.*$//p' | sort | uniq
Show all functions directly called by the callgraph tool's main function:
callgraph -format=digraph golang.org/x/tools/cmd/callgraph |
digraph succs golang.org/x/tools/cmd/callgraph.main
`
func init() {
// If $GOMAXPROCS isn't set, use the full capacity of the machine.
// For small machines, use at least 4 threads.
if os.Getenv("GOMAXPROCS") == "" {
n := runtime.NumCPU()
if n < 4 {
n = 4
}
runtime.GOMAXPROCS(n)
}
}
func main() {
flag.Parse()
if err := doCallgraph("", "", *algoFlag, *formatFlag, *testFlag, flag.Args()); err != nil {
fmt.Fprintf(os.Stderr, "callgraph: %s\n", err)
os.Exit(1)
}
}
var stdout io.Writer = os.Stdout
func doCallgraph(dir, gopath, algo, format string, tests bool, args []string) error {
if len(args) == 0 {
fmt.Fprintln(os.Stderr, Usage)
return nil
}
cfg := &packages.Config{
Mode: packages.LoadAllSyntax,
Tests: tests,
Dir: dir,
}
if gopath != "" {
cfg.Env = append(os.Environ(), "GOPATH="+gopath) // to enable testing
}
initial, err := packages.Load(cfg, args...)
if err != nil {
return err
}
if packages.PrintErrors(initial) > 0 {
return fmt.Errorf("packages contain errors")
}
// Create and build SSA-form program representation.
prog, pkgs := ssautil.AllPackages(initial, 0)
prog.Build()
// -- call graph construction ------------------------------------------
var cg *callgraph.Graph
switch algo {
case "static":
cg = static.CallGraph(prog)
case "cha":
cg = cha.CallGraph(prog)
case "pta":
// Set up points-to analysis log file.
var ptalog io.Writer
if *ptalogFlag != "" {
if f, err := os.Create(*ptalogFlag); err != nil {
log.Fatalf("Failed to create PTA log file: %s", err)
} else {
buf := bufio.NewWriter(f)
ptalog = buf
defer func() {
if err := buf.Flush(); err != nil {
log.Printf("flush: %s", err)
}
if err := f.Close(); err != nil {
log.Printf("close: %s", err)
}
}()
}
}
mains, err := mainPackages(pkgs)
if err != nil {
return err
}
config := &pointer.Config{
Mains: mains,
BuildCallGraph: true,
Log: ptalog,
}
ptares, err := pointer.Analyze(config)
if err != nil {
return err // internal error in pointer analysis
}
cg = ptares.CallGraph
case "rta":
mains, err := mainPackages(pkgs)
if err != nil {
return err
}
var roots []*ssa.Function
for _, main := range mains {
roots = append(roots, main.Func("init"), main.Func("main"))
}
rtares := rta.Analyze(roots, true)
cg = rtares.CallGraph
// NB: RTA gives us Reachable and RuntimeTypes too.
default:
return fmt.Errorf("unknown algorithm: %s", algo)
}
cg.DeleteSyntheticNodes()
// -- output------------------------------------------------------------
var before, after string
// Pre-canned formats.
switch format {
case "digraph":
format = `{{printf "%q %q" .Caller .Callee}}`
case "graphviz":
before = "digraph callgraph {\n"
after = "}\n"
format = ` {{printf "%q" .Caller}} -> {{printf "%q" .Callee}}`
}
tmpl, err := template.New("-format").Parse(format)
if err != nil {
return fmt.Errorf("invalid -format template: %v", err)
}
// Allocate these once, outside the traversal.
var buf bytes.Buffer
data := Edge{fset: prog.Fset}
fmt.Fprint(stdout, before)
if err := callgraph.GraphVisitEdges(cg, func(edge *callgraph.Edge) error {
data.position.Offset = -1
data.edge = edge
data.Caller = edge.Caller.Func
data.Callee = edge.Callee.Func
buf.Reset()
if err := tmpl.Execute(&buf, &data); err != nil {
return err
}
stdout.Write(buf.Bytes())
if len := buf.Len(); len == 0 || buf.Bytes()[len-1] != '\n' {
fmt.Fprintln(stdout)
}
return nil
}); err != nil {
return err
}
fmt.Fprint(stdout, after)
return nil
}
// mainPackages returns the main packages to analyze.
// Each resulting package is named "main" and has a main function.
func mainPackages(pkgs []*ssa.Package) ([]*ssa.Package, error) {
var mains []*ssa.Package
for _, p := range pkgs {
if p != nil && p.Pkg.Name() == "main" && p.Func("main") != nil {
mains = append(mains, p)
}
}
if len(mains) == 0 {
return nil, fmt.Errorf("no main packages")
}
return mains, nil
}
type Edge struct {
Caller *ssa.Function
Callee *ssa.Function
edge *callgraph.Edge
fset *token.FileSet
position token.Position // initialized lazily
}
func (e *Edge) pos() *token.Position {
if e.position.Offset == -1 {
e.position = e.fset.Position(e.edge.Pos()) // called lazily
}
return &e.position
}
func (e *Edge) Filename() string { return e.pos().Filename }
func (e *Edge) Column() int { return e.pos().Column }
func (e *Edge) Line() int { return e.pos().Line }
func (e *Edge) Offset() int { return e.pos().Offset }
func (e *Edge) Dynamic() string {
if e.edge.Site != nil && e.edge.Site.Common().StaticCallee() == nil {
return "dynamic"
}
return "static"
}
func (e *Edge) Description() string { return e.edge.Description() }
| [
"\"GOMAXPROCS\""
]
| []
| [
"GOMAXPROCS"
]
| [] | ["GOMAXPROCS"] | go | 1 | 0 | |
server/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djangobackend.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main() | []
| []
| []
| [] | [] | python | 0 | 0 | |
modules/spartan/utils/utils.py | __author__ = 'manuelli'
import numpy as np
import collections
import yaml
from yaml import CLoader
import os
import datetime
import time
# director
from director import transformUtils
import spartan.utils.transformations as transformations
def getSpartanSourceDir():
return os.getenv("SPARTAN_SOURCE_DIR")
def get_sandbox_dir():
return os.getenv("SPARTAN_SANDBOX_DIR")
def get_data_dir():
return os.getenv("DATA_DIR")
def getDictFromYamlFilename(filename):
"""
Read data from a YAML files
"""
return yaml.load(file(filename), Loader=CLoader)
def saveToYaml(data, filename):
with open(filename, 'w') as outfile:
yaml.dump(data, outfile, default_flow_style=False)
def poseFromTransform(transform):
pos, quat = transformUtils.poseFromTransform(transform)
pos = pos.tolist()
quat = quat.tolist()
d = dict()
d['translation'] = dict()
d['translation']['x'] = pos[0]
d['translation']['y'] = pos[1]
d['translation']['z'] = pos[2]
d['quaternion'] = dict()
d['quaternion']['w'] = quat[0]
d['quaternion']['x'] = quat[1]
d['quaternion']['y'] = quat[2]
d['quaternion']['z'] = quat[3]
return d
def dictFromPosQuat(pos, quat):
d = dict()
d['translation'] = dict()
d['translation']['x'] = pos[0]
d['translation']['y'] = pos[1]
d['translation']['z'] = pos[2]
d['quaternion'] = dict()
d['quaternion']['w'] = quat[0]
d['quaternion']['x'] = quat[1]
d['quaternion']['y'] = quat[2]
d['quaternion']['z'] = quat[3]
return d
def transformFromPose(d):
pos = [0]*3
pos[0] = d['translation']['x']
pos[1] = d['translation']['y']
pos[2] = d['translation']['z']
quatDict = getQuaternionFromDict(d)
quat = [0]*4
quat[0] = quatDict['w']
quat[1] = quatDict['x']
quat[2] = quatDict['y']
quat[3] = quatDict['z']
return transformUtils.transformFromPose(pos, quat)
def homogenous_transform_from_dict(d):
"""
Returns a transform from a standard encoding in dict format
:param d:
:return:
"""
pos = [0]*3
pos[0] = d['translation']['x']
pos[1] = d['translation']['y']
pos[2] = d['translation']['z']
quatDict = getQuaternionFromDict(d)
quat = [0]*4
quat[0] = quatDict['w']
quat[1] = quatDict['x']
quat[2] = quatDict['y']
quat[3] = quatDict['z']
transform_matrix = transformations.quaternion_matrix(quat)
transform_matrix[0:3,3] = np.array(pos)
return transform_matrix
"""
msg: geometry_msgs/Pose
"""
def transformFromROSPoseMsg(msg):
pos = [msg.position.x, msg.position.y, msg.position.z]
quat = [msg.orientation.w, msg.orientation.x, msg.orientation.y, msg.orientation.z]
return transformUtils.transformFromPose(pos,quat)
def transformFromROSTransformMsg(msg):
pos = [msg.translation.x, msg.translation.y, msg.translation.z]
quat = [msg.rotation.w, msg.rotation.x, msg.rotation.y, msg.rotation.z]
return transformUtils.transformFromPose(pos,quat)
def getQuaternionFromDict(d):
quat = None
quatNames = ['orientation', 'rotation', 'quaternion']
for name in quatNames:
if name in d:
quat = d[name]
if quat is None:
raise ValueError("Error when trying to extract quaternion from dict, your dict doesn't contain a key in ['orientation', 'rotation', 'quaternion']")
return quat
def get_current_time_unique_name():
"""
Converts current date to a unique name
Note: this function will return variable-length strings.
:return:
:rtype: str
"""
unique_name = time.strftime("%Y%m%d-%H%M%S")
return unique_name
def homogenous_transform_from_dict(d):
"""
Returns a transform from a standard encoding in dict format
:param d:
:return:
"""
pos = [0]*3
pos[0] = d['translation']['x']
pos[1] = d['translation']['y']
pos[2] = d['translation']['z']
quatDict = getQuaternionFromDict(d)
quat = [0]*4
quat[0] = quatDict['w']
quat[1] = quatDict['x']
quat[2] = quatDict['y']
quat[3] = quatDict['z']
transform_matrix = transformations.quaternion_matrix(quat)
transform_matrix[0:3,3] = np.array(pos)
return transform_matrix
def dict_from_homogenous_transform(tf):
"""
Returns standard encoding in dict format from 4x4 transform matrix
:param tf:
:return:
"""
return dictFromPosQuat(tf[:3, 3], transformations.quaternion_from_matrix(tf))
def apply_homogenous_transform_to_points(tf, pts):
''' Given a homogenous tf matrix and a 3xN NP array
of points, apply the tf to the points to produce
a new 3xN array of points.
:param tf: 4x4 numpy array of matching dtype to pts
:param pts: 3xN numpy array of matching dtype to tf
:return: 3xN numpy array of matching dtype to tf and pts'''
return ((tf[:3, :3].dot(pts).T) + tf[:3, 3]).T
def get_current_YYYY_MM_DD_hh_mm_ss():
"""
Returns a string identifying the current:
- year, month, day, hour, minute, second
Using this format:
YYYY-MM-DD-hh-mm-ss
For example:
2018-04-07-19-02-50
Note: this function will always return strings of the same length.
:return: current time formatted as a string
:rtype: string
"""
now = datetime.datetime.now()
string = "%0.4d-%0.2d-%0.2d-%0.2d-%0.2d-%0.2d" % (now.year, now.month, now.day, now.hour, now.minute, now.second)
return string
def compute_angle_between_quaternions(q, r):
"""
Computes the angle between two quaternions.
theta = arccos(2 * <q1, q2>^2 - 1)
See https://math.stackexchange.com/questions/90081/quaternion-distance
:param q: numpy array in form [w,x,y,z]. As long as both q,r are consistent it doesn't matter
:type q:
:param r:
:type r:
:return: angle between the quaternions, in radians
:rtype:
"""
theta = 2*np.arccos(2 * np.dot(q,r)**2 - 1)
return theta
def compute_translation_distance_between_poses(pose_a, pose_b):
"""
Computes the linear difference between pose_a and pose_b
:param pose_a: 4 x 4 homogeneous transform
:type pose_a:
:param pose_b:
:type pose_b:
:return: Distance between translation component of the poses
:rtype:
"""
pos_a = pose_a[0:3,3]
pos_b = pose_b[0:3,3]
return np.linalg.norm(pos_a - pos_b)
def compute_angle_between_poses(pose_a, pose_b):
"""
Computes the angle distance in radians between two homogenous transforms
:param pose_a: 4 x 4 homogeneous transform
:type pose_a:
:param pose_b:
:type pose_b:
:return: Angle between poses in radians
:rtype:
"""
quat_a = transformations.quaternion_from_matrix(pose_a)
quat_b = transformations.quaternion_from_matrix(pose_b)
return compute_angle_between_quaternions(quat_a, quat_b)
def get_kuka_joint_names():
return [
'iiwa_joint_1', 'iiwa_joint_2', 'iiwa_joint_3',
'iiwa_joint_4', 'iiwa_joint_5', 'iiwa_joint_6',
'iiwa_joint_7']
| []
| []
| [
"DATA_DIR",
"SPARTAN_SANDBOX_DIR",
"SPARTAN_SOURCE_DIR"
]
| [] | ["DATA_DIR", "SPARTAN_SANDBOX_DIR", "SPARTAN_SOURCE_DIR"] | python | 3 | 0 | |
internal/utils/jwt_utils.go | package utils
import (
"errors"
"os"
"time"
log "github.com/sirupsen/logrus"
"github.com/dgrijalva/jwt-go"
"github.com/joho/godotenv"
)
// JwtWrapper wraps the signing key and the issuer
type JwtWrapper struct {
SecretKey string
Issuer string
ExpirationHours int64
}
// JwtClaim adds userid as a claim to the token
type JwtClaim struct {
UserID uint
jwt.StandardClaims
}
func GetJWTSecretKey() string {
err := godotenv.Load(".env")
if err != nil {
log.Fatalf("Error loading .env file")
}
return os.Getenv("JWT_SECRET_KEY")
}
func (j *JwtWrapper) SignToken(userid uint) (signedToken string, err error) {
claims := &JwtClaim{
UserID: userid,
StandardClaims: jwt.StandardClaims{
ExpiresAt: time.Now().Local().Add(time.Hour * time.Duration(j.ExpirationHours)).Unix(),
Issuer: j.Issuer,
},
}
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
signedToken, err = token.SignedString([]byte(GetJWTSecretKey()))
if err != nil {
return
}
return
}
func GenerateToken(userid uint) string {
jwtWrapper := JwtWrapper{
SecretKey: GetJWTSecretKey(),
Issuer: "AuthService",
ExpirationHours: 24,
}
generatedToken, err := jwtWrapper.SignToken(userid)
if err != nil {
log.Println(err)
}
return generatedToken
}
//ValidateToken validates the jwt token
func (j *JwtWrapper) ValidateToken(signedToken string) (claims *JwtClaim, err error) {
token, err := jwt.ParseWithClaims(
signedToken,
&JwtClaim{},
func(token *jwt.Token) (interface{}, error) {
return []byte(GetJWTSecretKey()), nil
},
)
if err != nil {
return
}
claims, ok := token.Claims.(*JwtClaim)
if !ok {
err = errors.New("Couldn't parse claims")
return
}
if claims.ExpiresAt < time.Now().Local().Unix() {
err = errors.New("JWT is expired")
return
}
return
}
| [
"\"JWT_SECRET_KEY\""
]
| []
| [
"JWT_SECRET_KEY"
]
| [] | ["JWT_SECRET_KEY"] | go | 1 | 0 | |
vendor/github.com/openshift/installer/pkg/asset/installconfig/ibmcloud/client.go | package ibmcloud
import (
"context"
"fmt"
"net/http"
"os"
"time"
"github.com/IBM/go-sdk-core/v5/core"
"github.com/IBM/networking-go-sdk/dnsrecordsv1"
"github.com/IBM/networking-go-sdk/zonesv1"
"github.com/IBM/platform-services-go-sdk/iamidentityv1"
"github.com/IBM/platform-services-go-sdk/resourcecontrollerv2"
"github.com/IBM/platform-services-go-sdk/resourcemanagerv2"
"github.com/IBM/vpc-go-sdk/vpcv1"
"github.com/pkg/errors"
)
//go:generate mockgen -source=./client.go -destination=./mock/ibmcloudclient_generated.go -package=mock
// API represents the calls made to the API.
type API interface {
GetAuthenticatorAPIKeyDetails(ctx context.Context) (*iamidentityv1.APIKey, error)
GetCISInstance(ctx context.Context, crnstr string) (*resourcecontrollerv2.ResourceInstance, error)
GetDedicatedHostByName(ctx context.Context, name string, region string) (*vpcv1.DedicatedHost, error)
GetDedicatedHostProfiles(ctx context.Context, region string) ([]vpcv1.DedicatedHostProfile, error)
GetDNSRecordsByName(ctx context.Context, crnstr string, zoneID string, recordName string) ([]dnsrecordsv1.DnsrecordDetails, error)
GetDNSZoneIDByName(ctx context.Context, name string) (string, error)
GetDNSZones(ctx context.Context) ([]DNSZoneResponse, error)
GetEncryptionKey(ctx context.Context, keyCRN string) (*EncryptionKeyResponse, error)
GetResourceGroups(ctx context.Context) ([]resourcemanagerv2.ResourceGroup, error)
GetResourceGroup(ctx context.Context, nameOrID string) (*resourcemanagerv2.ResourceGroup, error)
GetSubnet(ctx context.Context, subnetID string) (*vpcv1.Subnet, error)
GetVSIProfiles(ctx context.Context) ([]vpcv1.InstanceProfile, error)
GetVPC(ctx context.Context, vpcID string) (*vpcv1.VPC, error)
GetVPCZonesForRegion(ctx context.Context, region string) ([]string, error)
}
// Client makes calls to the IBM Cloud API.
type Client struct {
managementAPI *resourcemanagerv2.ResourceManagerV2
controllerAPI *resourcecontrollerv2.ResourceControllerV2
vpcAPI *vpcv1.VpcV1
Authenticator *core.IamAuthenticator
}
// cisServiceID is the Cloud Internet Services' catalog service ID.
const cisServiceID = "75874a60-cb12-11e7-948e-37ac098eb1b9"
// VPCResourceNotFoundError represents an error for a VPC resoruce that is not found.
type VPCResourceNotFoundError struct{}
// Error returns the error message for the VPCResourceNotFoundError error type.
func (e *VPCResourceNotFoundError) Error() string {
return "Not Found"
}
// DNSZoneResponse represents a DNS zone response.
type DNSZoneResponse struct {
// Name is the domain name of the zone.
Name string
// ID is the zone's ID.
ID string
// CISInstanceCRN is the IBM Cloud Resource Name for the CIS instance where
// the DNS zone is managed.
CISInstanceCRN string
// CISInstanceName is the display name of the CIS instance where the DNS zone
// is managed.
CISInstanceName string
// ResourceGroupID is the resource group ID of the CIS instance.
ResourceGroupID string
}
// EncryptionKeyResponse represents an encryption key response.
type EncryptionKeyResponse struct{}
// NewClient initializes a client with a session.
func NewClient() (*Client, error) {
apiKey := os.Getenv("IC_API_KEY")
authenticator := &core.IamAuthenticator{
ApiKey: apiKey,
}
client := &Client{
Authenticator: authenticator,
}
if err := client.loadSDKServices(); err != nil {
return nil, errors.Wrap(err, "failed to load IBM SDK services")
}
return client, nil
}
func (c *Client) loadSDKServices() error {
servicesToLoad := []func() error{
c.loadResourceManagementAPI,
c.loadResourceControllerAPI,
c.loadVPCV1API,
}
// Call all the load functions.
for _, fn := range servicesToLoad {
if err := fn(); err != nil {
return err
}
}
return nil
}
// GetAuthenticatorAPIKeyDetails gets detailed information on the API key used
// for authentication to the IBM Cloud APIs
func (c *Client) GetAuthenticatorAPIKeyDetails(ctx context.Context) (*iamidentityv1.APIKey, error) {
iamIdentityService, err := iamidentityv1.NewIamIdentityV1(&iamidentityv1.IamIdentityV1Options{
Authenticator: c.Authenticator,
})
if err != nil {
return nil, err
}
options := iamIdentityService.NewGetAPIKeysDetailsOptions()
options.SetIamAPIKey(c.Authenticator.ApiKey)
details, _, err := iamIdentityService.GetAPIKeysDetailsWithContext(ctx, options)
if err != nil {
return nil, err
}
return details, nil
}
// GetCISInstance gets a specific Cloud Internet Services instance by its CRN.
func (c *Client) GetCISInstance(ctx context.Context, crnstr string) (*resourcecontrollerv2.ResourceInstance, error) {
_, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
options := c.controllerAPI.NewGetResourceInstanceOptions(crnstr)
resourceInstance, _, err := c.controllerAPI.GetResourceInstance(options)
if err != nil {
return nil, errors.Wrap(err, "failed to get cis instances")
}
return resourceInstance, nil
}
// GetDedicatedHostByName gets dedicated host by name.
func (c *Client) GetDedicatedHostByName(ctx context.Context, name string, region string) (*vpcv1.DedicatedHost, error) {
err := c.setVPCServiceURLForRegion(ctx, region)
if err != nil {
return nil, err
}
options := c.vpcAPI.NewListDedicatedHostsOptions()
dhosts, _, err := c.vpcAPI.ListDedicatedHostsWithContext(ctx, options)
if err != nil {
return nil, errors.Wrap(err, "failed to list dedicated hosts")
}
for _, dhost := range dhosts.DedicatedHosts {
if *dhost.Name == name {
return &dhost, nil
}
}
return nil, fmt.Errorf("dedicated host %q not found", name)
}
// GetDedicatedHostProfiles gets a list of profiles supported in a region.
func (c *Client) GetDedicatedHostProfiles(ctx context.Context, region string) ([]vpcv1.DedicatedHostProfile, error) {
err := c.setVPCServiceURLForRegion(ctx, region)
if err != nil {
return nil, err
}
profilesOptions := c.vpcAPI.NewListDedicatedHostProfilesOptions()
profiles, _, err := c.vpcAPI.ListDedicatedHostProfilesWithContext(ctx, profilesOptions)
if err != nil {
return nil, err
}
return profiles.Profiles, nil
}
// GetDNSRecordsByName gets DNS records in specific Cloud Internet Services instance
// by its CRN, zone ID, and DNS record name.
func (c *Client) GetDNSRecordsByName(ctx context.Context, crnstr string, zoneID string, recordName string) ([]dnsrecordsv1.DnsrecordDetails, error) {
// Set CIS DNS record service
dnsService, err := dnsrecordsv1.NewDnsRecordsV1(&dnsrecordsv1.DnsRecordsV1Options{
Authenticator: c.Authenticator,
Crn: core.StringPtr(crnstr),
ZoneIdentifier: core.StringPtr(zoneID),
})
if err != nil {
return nil, err
}
// Get CIS DNS records by name
records, _, err := dnsService.ListAllDnsRecordsWithContext(ctx, &dnsrecordsv1.ListAllDnsRecordsOptions{
Name: core.StringPtr(recordName),
})
if err != nil {
return nil, errors.Wrap(err, "could not retrieve DNS records")
}
return records.Result, nil
}
// GetDNSZoneIDByName gets the CIS zone ID from its domain name.
func (c *Client) GetDNSZoneIDByName(ctx context.Context, name string) (string, error) {
zones, err := c.GetDNSZones(ctx)
if err != nil {
return "", err
}
for _, z := range zones {
if z.Name == name {
return z.ID, nil
}
}
return "", fmt.Errorf("DNS zone %q not found", name)
}
// GetDNSZones returns all of the active DNS zones managed by CIS.
func (c *Client) GetDNSZones(ctx context.Context) ([]DNSZoneResponse, error) {
_, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
options := c.controllerAPI.NewListResourceInstancesOptions()
options.SetResourceID(cisServiceID)
listResourceInstancesResponse, _, err := c.controllerAPI.ListResourceInstances(options)
if err != nil {
return nil, errors.Wrap(err, "failed to get cis instance")
}
var allZones []DNSZoneResponse
for _, instance := range listResourceInstancesResponse.Resources {
crnstr := instance.CRN
zonesService, err := zonesv1.NewZonesV1(&zonesv1.ZonesV1Options{
Authenticator: c.Authenticator,
Crn: crnstr,
})
if err != nil {
return nil, errors.Wrap(err, "failed to list DNS zones")
}
options := zonesService.NewListZonesOptions()
listZonesResponse, _, err := zonesService.ListZones(options)
if listZonesResponse == nil {
return nil, err
}
for _, zone := range listZonesResponse.Result {
if *zone.Status == "active" {
zoneStruct := DNSZoneResponse{
Name: *zone.Name,
ID: *zone.ID,
CISInstanceCRN: *instance.CRN,
CISInstanceName: *instance.Name,
ResourceGroupID: *instance.ResourceGroupID,
}
allZones = append(allZones, zoneStruct)
}
}
}
return allZones, nil
}
// GetEncryptionKey gets data for an encryption key
func (c *Client) GetEncryptionKey(ctx context.Context, keyCRN string) (*EncryptionKeyResponse, error) {
// TODO: IBM: Call KMS / Hyperprotect Crpyto APIs.
return &EncryptionKeyResponse{}, nil
}
// GetResourceGroup gets a resource group by its name or ID.
func (c *Client) GetResourceGroup(ctx context.Context, nameOrID string) (*resourcemanagerv2.ResourceGroup, error) {
_, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
groups, err := c.GetResourceGroups(ctx)
if err != nil {
return nil, err
}
for idx, rg := range groups {
if *rg.ID == nameOrID || *rg.Name == nameOrID {
return &groups[idx], nil
}
}
return nil, fmt.Errorf("resource group %q not found", nameOrID)
}
// GetResourceGroups gets the list of resource groups.
func (c *Client) GetResourceGroups(ctx context.Context) ([]resourcemanagerv2.ResourceGroup, error) {
_, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
apikey, err := c.GetAuthenticatorAPIKeyDetails(ctx)
if err != nil {
return nil, err
}
options := c.managementAPI.NewListResourceGroupsOptions()
options.SetAccountID(*apikey.AccountID)
listResourceGroupsResponse, _, err := c.managementAPI.ListResourceGroupsWithContext(ctx, options)
if err != nil {
return nil, err
}
return listResourceGroupsResponse.Resources, nil
}
// GetSubnet gets a subnet by its ID.
func (c *Client) GetSubnet(ctx context.Context, subnetID string) (*vpcv1.Subnet, error) {
_, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
subnet, detailedResponse, err := c.vpcAPI.GetSubnet(&vpcv1.GetSubnetOptions{ID: &subnetID})
if detailedResponse.GetStatusCode() == http.StatusNotFound {
return nil, &VPCResourceNotFoundError{}
}
return subnet, err
}
// GetVSIProfiles gets a list of all VSI profiles.
func (c *Client) GetVSIProfiles(ctx context.Context) ([]vpcv1.InstanceProfile, error) {
listInstanceProfilesOptions := c.vpcAPI.NewListInstanceProfilesOptions()
profiles, _, err := c.vpcAPI.ListInstanceProfilesWithContext(ctx, listInstanceProfilesOptions)
if err != nil {
return nil, errors.Wrap(err, "failed to list vpc vsi profiles")
}
return profiles.Profiles, nil
}
// GetVPC gets a VPC by its ID.
func (c *Client) GetVPC(ctx context.Context, vpcID string) (*vpcv1.VPC, error) {
_, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
regions, err := c.getVPCRegions(ctx)
if err != nil {
return nil, err
}
for _, region := range regions {
err := c.vpcAPI.SetServiceURL(fmt.Sprintf("%s/v1", *region.Endpoint))
if err != nil {
return nil, errors.Wrap(err, "failed to set vpc api service url")
}
if vpc, detailedResponse, err := c.vpcAPI.GetVPC(c.vpcAPI.NewGetVPCOptions(vpcID)); err != nil {
if detailedResponse.GetStatusCode() != http.StatusNotFound {
return nil, err
}
} else if vpc != nil {
return vpc, nil
}
}
return nil, &VPCResourceNotFoundError{}
}
// GetVPCZonesForRegion gets the supported zones for a VPC region.
func (c *Client) GetVPCZonesForRegion(ctx context.Context, region string) ([]string, error) {
_, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
regionZonesOptions := c.vpcAPI.NewListRegionZonesOptions(region)
zones, _, err := c.vpcAPI.ListRegionZonesWithContext(ctx, regionZonesOptions)
if err != nil {
return nil, err
}
response := make([]string, len(zones.Zones))
for idx, zone := range zones.Zones {
response[idx] = *zone.Name
}
return response, err
}
func (c *Client) getVPCRegions(ctx context.Context) ([]vpcv1.Region, error) {
listRegionsOptions := c.vpcAPI.NewListRegionsOptions()
listRegionsResponse, _, err := c.vpcAPI.ListRegionsWithContext(ctx, listRegionsOptions)
if err != nil {
return nil, errors.Wrap(err, "failed to list vpc regions")
}
return listRegionsResponse.Regions, nil
}
func (c *Client) loadResourceManagementAPI() error {
options := &resourcemanagerv2.ResourceManagerV2Options{
Authenticator: c.Authenticator,
}
resourceManagerV2Service, err := resourcemanagerv2.NewResourceManagerV2(options)
if err != nil {
return err
}
c.managementAPI = resourceManagerV2Service
return nil
}
func (c *Client) loadResourceControllerAPI() error {
options := &resourcecontrollerv2.ResourceControllerV2Options{
Authenticator: c.Authenticator,
}
resourceControllerV2Service, err := resourcecontrollerv2.NewResourceControllerV2(options)
if err != nil {
return err
}
c.controllerAPI = resourceControllerV2Service
return nil
}
func (c *Client) loadVPCV1API() error {
vpcService, err := vpcv1.NewVpcV1(&vpcv1.VpcV1Options{
Authenticator: c.Authenticator,
})
if err != nil {
return err
}
c.vpcAPI = vpcService
return nil
}
func (c *Client) setVPCServiceURLForRegion(ctx context.Context, region string) error {
regionOptions := c.vpcAPI.NewGetRegionOptions(region)
vpcRegion, _, err := c.vpcAPI.GetRegionWithContext(ctx, regionOptions)
if err != nil {
return err
}
err = c.vpcAPI.SetServiceURL(fmt.Sprintf("%s/v1", *vpcRegion.Endpoint))
if err != nil {
return err
}
return nil
}
| [
"\"IC_API_KEY\""
]
| []
| [
"IC_API_KEY"
]
| [] | ["IC_API_KEY"] | go | 1 | 0 | |
keras/training/train_resnet50.py | #!/usr/bin/env python3
# import matplotlib
# matplotlib.use("TkAgg") # use for OSX
import math, json, os, pickle, sys
import keras
# import matplotlib.pyplot as plt
from keras.callbacks import CSVLogger, EarlyStopping, ModelCheckpoint
from keras.layers import Dense
from keras.models import Model
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
do_log = True
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
DIRLOG = "/storage/plzen1/home/radekj/vmmr/results/sample5"
csv_logger = CSVLogger(os.path.join(DIRLOG, "model_history_log.csv"), append=True)
SIZE = (224, 224)
BATCH_SIZE = 64
EPOCH = 20
def train_cnn(folder):
DATA_DIR = folder
TRAIN_DIR = os.path.join(DATA_DIR, 'train')
VALID_DIR = os.path.join(DATA_DIR, 'valid')
TEST_DIR = os.path.join(DATA_DIR, 'test')
save_aug = os.path.join(DATA_DIR, 'tmp')
num_train_samples = sum([len(files) for r, d, files in os.walk(TRAIN_DIR)])
num_valid_samples = sum([len(files) for r, d, files in os.walk(VALID_DIR)])
num_train_steps = math.floor(num_train_samples/BATCH_SIZE)
num_valid_steps = math.floor(num_valid_samples/BATCH_SIZE)
shift = 0.05
# gen = ImageDataGenerator(zca_whitening=True,
# width_shift_range=shift,
# height_shift_range=shift,
# horizontal_flip=True,
# vertical_flip=False,
# rotation_range=8,
# zoom_range=0.1,
# featurewise_center=True,
# featurewise_std_normalization=True)
gen = ImageDataGenerator()
val_gen = ImageDataGenerator()
batches = gen.flow_from_directory(TRAIN_DIR, target_size=SIZE, class_mode='categorical', shuffle=True, batch_size=BATCH_SIZE)
val_batches = val_gen.flow_from_directory(VALID_DIR, target_size=SIZE, class_mode='categorical', shuffle=True, batch_size=BATCH_SIZE)
model = keras.applications.resnet50.ResNet50()
classes = list(iter(batches.class_indices))
model.layers.pop()
for layer in model.layers:
layer.trainable = False
last = model.layers[-1].output
x = Dense(len(classes), activation="softmax")(last)
finetuned_model = Model(model.input, x)
finetuned_model.compile(optimizer=Adam(lr=0.0001), loss='categorical_crossentropy', metrics=['accuracy'])
for c in batches.class_indices:
classes[batches.class_indices[c]] = c
finetuned_model.classes = classes
early_stopping = EarlyStopping(patience=5)
checkpointer = ModelCheckpoint('resnet50X_best.h5', verbose=1, save_best_only=True)
history = finetuned_model.fit_generator(batches, steps_per_epoch=num_train_steps, epochs=EPOCH,
callbacks=[csv_logger, early_stopping, checkpointer],
validation_data=val_batches,
validation_steps=num_valid_steps)
finetuned_model.save('resnet50X_final.h5')
# plot_history(history)
if __name__ == '__main__':
"""
dataset_path: /Users/radekj/devroot/vmmr/datasets/sample5
/storage/plzen1/home/radekj/vmmr"
"""
print(len(sys.argv))
if len(sys.argv) < 2:
print("Need param: python train_resnet50.py dataset_path")
exit(1)
folder = str(sys.argv[1])
exists = os.path.isdir(folder)
if not exists:
print("Folder '{}' not found.".format(folder))
exit(1)
train_cnn(folder)
print("===== end.")
| []
| []
| [
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
setup.py | # -*- coding: utf-8 -*-
"""
install mythril and deploy source-dist and wheel to pypi.python.org
deps (requires up2date version):
*) pip install --upgrade pip wheel setuptools twine
publish to pypi w/o having to convert Readme.md to RST:
1) #> python setup.py sdist bdist_wheel
2) #> twine upload dist/* #<specify bdist_wheel version to upload>; #optional --repository <testpypi> or --repository-url <testpypi-url>
"""
from setuptools import setup, find_packages
from setuptools.command.install import install
from pathlib import Path
import sys
import os
# To make lint checkers happy we set VERSION here, but
# it is redefined by the exec below
VERSION = None
# Package version (vX.Y.Z). It must match git tag being used for CircleCI
# deployment; otherwise the build will failed.
version_path = (Path(__file__).parent / "mythril" / "version.py").absolute()
exec(open(str(version_path), "r").read())
class VerifyVersionCommand(install):
"""Custom command to verify that the git tag matches our version"""
description = "verify that the git tag matches our version"
def run(self):
tag = os.getenv("CIRCLE_TAG")
if tag != VERSION:
info = "Git tag: {0} does not match the version of this app: {1}".format(
tag, VERSION
)
sys.exit(info)
def read_file(fname):
"""
return file contents
:param fname: path relative to setup.py
:return: file contents
"""
with open(os.path.join(os.path.dirname(__file__), fname), "r") as fd:
return fd.read()
setup(
name="mythril",
version=VERSION[1:],
description="Security analysis tool for Ethereum smart contracts",
long_description=read_file("README.md") if os.path.isfile("README.md") else "",
long_description_content_type="text/markdown", # requires twine and recent setuptools
url="https://github.com/b-mueller/mythril",
author="Bernhard Mueller",
author_email="[email protected]",
license="MIT",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Science/Research",
"Topic :: Software Development :: Disassemblers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
keywords="hacking disassembler security ethereum",
packages=find_packages(exclude=["contrib", "docs", "tests"]),
install_requires=[
"coloredlogs>=10.0",
"ethereum>=2.3.2",
"z3-solver>=4.8.0.0",
"requests",
"py-solc",
"plyvel",
"eth_abi>=1.0.0",
"eth-utils>=1.0.1",
"eth-account>=0.1.0a2",
"eth-hash>=0.1.0",
"eth-keyfile>=0.5.1",
"eth-keys>=0.2.0b3",
"eth-rlp>=0.1.0",
"eth-tester>=0.1.0b21",
"eth-typing>=1.3.0,<2.0.0",
"coverage",
"jinja2>=2.9",
"rlp>=1.0.1",
"transaction>=2.2.1",
"py-flags",
"mock",
"configparser>=3.5.0",
"persistent>=4.2.0",
"ethereum-input-decoder>=0.2.2",
],
tests_require=["pytest>=3.6.0", "pytest_mock", "pytest-cov"],
python_requires=">=3.5",
extras_require={},
package_data={"mythril.analysis.templates": ["*"]},
include_package_data=True,
entry_points={"console_scripts": ["myth=mythril.interfaces.cli:main"]},
cmdclass={"verify": VerifyVersionCommand},
)
| []
| []
| [
"CIRCLE_TAG"
]
| [] | ["CIRCLE_TAG"] | python | 1 | 0 | |
configure.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""configure script to get build parameters from user."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import errno
import os
import platform
import re
import subprocess
import sys
# pylint: disable=g-import-not-at-top
try:
from shutil import which
except ImportError:
from distutils.spawn import find_executable as which
# pylint: enable=g-import-not-at-top
_DEFAULT_CUDA_VERSION = '9.0'
_DEFAULT_CUDNN_VERSION = '7'
_DEFAULT_NCCL_VERSION = '2.2'
_DEFAULT_CUDA_COMPUTE_CAPABILITIES = '3.5,7.0'
_DEFAULT_CUDA_PATH = '/usr/local/cuda'
_DEFAULT_CUDA_PATH_LINUX = '/opt/cuda'
_DEFAULT_CUDA_PATH_WIN = ('C:/Program Files/NVIDIA GPU Computing '
'Toolkit/CUDA/v%s' % _DEFAULT_CUDA_VERSION)
_TF_OPENCL_VERSION = '1.2'
_DEFAULT_COMPUTECPP_TOOLKIT_PATH = '/usr/local/computecpp'
_DEFAULT_TRISYCL_INCLUDE_DIR = '/usr/local/triSYCL/include'
_SUPPORTED_ANDROID_NDK_VERSIONS = [10, 11, 12, 13, 14, 15, 16]
_DEFAULT_PROMPT_ASK_ATTEMPTS = 10
_TF_WORKSPACE_ROOT = os.path.abspath(os.path.dirname(__file__))
_TF_BAZELRC_FILENAME = '.tf_configure.bazelrc'
_TF_BAZELRC = os.path.join(_TF_WORKSPACE_ROOT, _TF_BAZELRC_FILENAME)
_TF_WORKSPACE = os.path.join(_TF_WORKSPACE_ROOT, 'WORKSPACE')
if platform.machine() == 'ppc64le':
_DEFAULT_TENSORRT_PATH_LINUX = '/usr/lib/powerpc64le-linux-gnu/'
else:
_DEFAULT_TENSORRT_PATH_LINUX = '/usr/lib/%s-linux-gnu' % platform.machine()
class UserInputError(Exception):
pass
def is_windows():
return platform.system() == 'Windows'
def is_linux():
return platform.system() == 'Linux'
def is_macos():
return platform.system() == 'Darwin'
def is_ppc64le():
return platform.machine() == 'ppc64le'
def is_cygwin():
return platform.system().startswith('CYGWIN_NT')
def get_input(question):
try:
try:
answer = raw_input(question)
except NameError:
answer = input(question) # pylint: disable=bad-builtin
except EOFError:
answer = ''
return answer
def symlink_force(target, link_name):
"""Force symlink, equivalent of 'ln -sf'.
Args:
target: items to link to.
link_name: name of the link.
"""
try:
os.symlink(target, link_name)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(link_name)
os.symlink(target, link_name)
else:
raise e
def sed_in_place(filename, old, new):
"""Replace old string with new string in file.
Args:
filename: string for filename.
old: string to replace.
new: new string to replace to.
"""
with open(filename, 'r') as f:
filedata = f.read()
newdata = filedata.replace(old, new)
with open(filename, 'w') as f:
f.write(newdata)
def write_to_bazelrc(line):
with open(_TF_BAZELRC, 'a') as f:
f.write(line + '\n')
def write_action_env_to_bazelrc(var_name, var):
write_to_bazelrc('build --action_env %s="%s"' % (var_name, str(var)))
def run_shell(cmd, allow_non_zero=False):
if allow_non_zero:
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
output = e.output
else:
output = subprocess.check_output(cmd)
return output.decode('UTF-8').strip()
def cygpath(path):
"""Convert path from posix to windows."""
return os.path.abspath(path).replace('\\', '/')
def get_python_path(environ_cp, python_bin_path):
"""Get the python site package paths."""
python_paths = []
if environ_cp.get('PYTHONPATH'):
python_paths = environ_cp.get('PYTHONPATH').split(':')
try:
library_paths = run_shell([
python_bin_path, '-c',
'import site; print("\\n".join(site.getsitepackages()))'
]).split('\n')
except subprocess.CalledProcessError:
library_paths = [
run_shell([
python_bin_path, '-c',
'from distutils.sysconfig import get_python_lib;'
'print(get_python_lib())'
])
]
all_paths = set(python_paths + library_paths)
paths = []
for path in all_paths:
if os.path.isdir(path):
paths.append(path)
return paths
def get_python_major_version(python_bin_path):
"""Get the python major version."""
return run_shell([python_bin_path, '-c', 'import sys; print(sys.version[0])'])
def setup_python(environ_cp):
"""Setup python related env variables."""
# Get PYTHON_BIN_PATH, default is the current running python.
default_python_bin_path = sys.executable
ask_python_bin_path = ('Please specify the location of python. [Default is '
'%s]: ') % default_python_bin_path
while True:
python_bin_path = get_from_env_or_user_or_default(
environ_cp, 'PYTHON_BIN_PATH', ask_python_bin_path,
default_python_bin_path)
# Check if the path is valid
if os.path.isfile(python_bin_path) and os.access(python_bin_path, os.X_OK):
break
elif not os.path.exists(python_bin_path):
print('Invalid python path: %s cannot be found.' % python_bin_path)
else:
print('%s is not executable. Is it the python binary?' % python_bin_path)
environ_cp['PYTHON_BIN_PATH'] = ''
# Convert python path to Windows style before checking lib and version
if is_windows() or is_cygwin():
python_bin_path = cygpath(python_bin_path)
# Get PYTHON_LIB_PATH
python_lib_path = environ_cp.get('PYTHON_LIB_PATH')
if not python_lib_path:
python_lib_paths = get_python_path(environ_cp, python_bin_path)
if environ_cp.get('USE_DEFAULT_PYTHON_LIB_PATH') == '1':
python_lib_path = python_lib_paths[0]
else:
print('Found possible Python library paths:\n %s' %
'\n '.join(python_lib_paths))
default_python_lib_path = python_lib_paths[0]
python_lib_path = get_input(
'Please input the desired Python library path to use. '
'Default is [%s]\n' % python_lib_paths[0])
if not python_lib_path:
python_lib_path = default_python_lib_path
environ_cp['PYTHON_LIB_PATH'] = python_lib_path
python_major_version = get_python_major_version(python_bin_path)
# Convert python path to Windows style before writing into bazel.rc
if is_windows() or is_cygwin():
python_lib_path = cygpath(python_lib_path)
# Set-up env variables used by python_configure.bzl
write_action_env_to_bazelrc('PYTHON_BIN_PATH', python_bin_path)
write_action_env_to_bazelrc('PYTHON_LIB_PATH', python_lib_path)
write_to_bazelrc('build --python_path=\"%s"' % python_bin_path)
environ_cp['PYTHON_BIN_PATH'] = python_bin_path
# Write tools/python_bin_path.sh
with open(
os.path.join(_TF_WORKSPACE_ROOT, 'tools', 'python_bin_path.sh'),
'w') as f:
f.write('export PYTHON_BIN_PATH="%s"' % python_bin_path)
def reset_tf_configure_bazelrc(workspace_path):
"""Reset file that contains customized config settings."""
open(_TF_BAZELRC, 'w').close()
bazelrc_path = os.path.join(workspace_path, '.bazelrc')
data = []
if os.path.exists(bazelrc_path):
with open(bazelrc_path, 'r') as f:
data = f.read().splitlines()
with open(bazelrc_path, 'w') as f:
for l in data:
if _TF_BAZELRC_FILENAME in l:
continue
f.write('%s\n' % l)
if is_windows():
tf_bazelrc_path = _TF_BAZELRC.replace('\\', '/')
else:
tf_bazelrc_path = _TF_BAZELRC
f.write('import %s\n' % tf_bazelrc_path)
def cleanup_makefile():
"""Delete any leftover BUILD files from the Makefile build.
These files could interfere with Bazel parsing.
"""
makefile_download_dir = os.path.join(_TF_WORKSPACE_ROOT, 'tensorflow',
'contrib', 'makefile', 'downloads')
if os.path.isdir(makefile_download_dir):
for root, _, filenames in os.walk(makefile_download_dir):
for f in filenames:
if f.endswith('BUILD'):
os.remove(os.path.join(root, f))
def get_var(environ_cp,
var_name,
query_item,
enabled_by_default,
question=None,
yes_reply=None,
no_reply=None):
"""Get boolean input from user.
If var_name is not set in env, ask user to enable query_item or not. If the
response is empty, use the default.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
query_item: string for feature related to the variable, e.g. "Hadoop File
System".
enabled_by_default: boolean for default behavior.
question: optional string for how to ask for user input.
yes_reply: optional string for reply when feature is enabled.
no_reply: optional string for reply when feature is disabled.
Returns:
boolean value of the variable.
Raises:
UserInputError: if an environment variable is set, but it cannot be
interpreted as a boolean indicator, assume that the user has made a
scripting error, and will continue to provide invalid input.
Raise the error to avoid infinitely looping.
"""
if not question:
question = 'Do you wish to build TensorFlow with %s support?' % query_item
if not yes_reply:
yes_reply = '%s support will be enabled for TensorFlow.' % query_item
if not no_reply:
no_reply = 'No %s' % yes_reply
yes_reply += '\n'
no_reply += '\n'
if enabled_by_default:
question += ' [Y/n]: '
else:
question += ' [y/N]: '
var = environ_cp.get(var_name)
if var is not None:
var_content = var.strip().lower()
true_strings = ('1', 't', 'true', 'y', 'yes')
false_strings = ('0', 'f', 'false', 'n', 'no')
if var_content in true_strings:
var = True
elif var_content in false_strings:
var = False
else:
raise UserInputError(
'Environment variable %s must be set as a boolean indicator.\n'
'The following are accepted as TRUE : %s.\n'
'The following are accepted as FALSE: %s.\n'
'Current value is %s.' % (var_name, ', '.join(true_strings),
', '.join(false_strings), var))
while var is None:
user_input_origin = get_input(question)
user_input = user_input_origin.strip().lower()
if user_input == 'y':
print(yes_reply)
var = True
elif user_input == 'n':
print(no_reply)
var = False
elif not user_input:
if enabled_by_default:
print(yes_reply)
var = True
else:
print(no_reply)
var = False
else:
print('Invalid selection: %s' % user_input_origin)
return var
def set_build_var(environ_cp,
var_name,
query_item,
option_name,
enabled_by_default,
bazel_config_name=None):
"""Set if query_item will be enabled for the build.
Ask user if query_item will be enabled. Default is used if no input is given.
Set subprocess environment variable and write to .bazelrc if enabled.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
query_item: string for feature related to the variable, e.g. "Hadoop File
System".
option_name: string for option to define in .bazelrc.
enabled_by_default: boolean for default behavior.
bazel_config_name: Name for Bazel --config argument to enable build feature.
"""
var = str(int(get_var(environ_cp, var_name, query_item, enabled_by_default)))
environ_cp[var_name] = var
if var == '1':
write_to_bazelrc('build --define %s=true' % option_name)
elif bazel_config_name is not None:
# TODO(mikecase): Migrate all users of configure.py to use --config Bazel
# options and not to set build configs through environment variables.
write_to_bazelrc(
'build:%s --define %s=true' % (bazel_config_name, option_name))
def set_action_env_var(environ_cp,
var_name,
query_item,
enabled_by_default,
question=None,
yes_reply=None,
no_reply=None):
"""Set boolean action_env variable.
Ask user if query_item will be enabled. Default is used if no input is given.
Set environment variable and write to .bazelrc.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
query_item: string for feature related to the variable, e.g. "Hadoop File
System".
enabled_by_default: boolean for default behavior.
question: optional string for how to ask for user input.
yes_reply: optional string for reply when feature is enabled.
no_reply: optional string for reply when feature is disabled.
"""
var = int(
get_var(environ_cp, var_name, query_item, enabled_by_default, question,
yes_reply, no_reply))
write_action_env_to_bazelrc(var_name, var)
environ_cp[var_name] = str(var)
def convert_version_to_int(version):
"""Convert a version number to a integer that can be used to compare.
Version strings of the form X.YZ and X.Y.Z-xxxxx are supported. The
'xxxxx' part, for instance 'homebrew' on OS/X, is ignored.
Args:
version: a version to be converted
Returns:
An integer if converted successfully, otherwise return None.
"""
version = version.split('-')[0]
version_segments = version.split('.')
for seg in version_segments:
if not seg.isdigit():
return None
version_str = ''.join(['%03d' % int(seg) for seg in version_segments])
return int(version_str)
def check_bazel_version(min_version):
"""Check installed bazel version is at least min_version.
Args:
min_version: string for minimum bazel version.
Returns:
The bazel version detected.
"""
if which('bazel') is None:
print('Cannot find bazel. Please install bazel.')
sys.exit(0)
curr_version = run_shell(
['bazel', '--batch', '--bazelrc=/dev/null', 'version'])
for line in curr_version.split('\n'):
if 'Build label: ' in line:
curr_version = line.split('Build label: ')[1]
break
min_version_int = convert_version_to_int(min_version)
curr_version_int = convert_version_to_int(curr_version)
# Check if current bazel version can be detected properly.
if not curr_version_int:
print('WARNING: current bazel installation is not a release version.')
print('Make sure you are running at least bazel %s' % min_version)
return curr_version
print('You have bazel %s installed.' % curr_version)
if curr_version_int < min_version_int:
print('Please upgrade your bazel installation to version %s or higher to '
'build TensorFlow!' % min_version)
sys.exit(0)
return curr_version
def set_cc_opt_flags(environ_cp):
"""Set up architecture-dependent optimization flags.
Also append CC optimization flags to bazel.rc..
Args:
environ_cp: copy of the os.environ.
"""
if is_ppc64le():
# gcc on ppc64le does not support -march, use mcpu instead
default_cc_opt_flags = '-mcpu=native'
elif is_windows():
default_cc_opt_flags = '/arch:AVX'
else:
default_cc_opt_flags = '-march=native'
question = ('Please specify optimization flags to use during compilation when'
' bazel option "--config=opt" is specified [Default is %s]: '
) % default_cc_opt_flags
cc_opt_flags = get_from_env_or_user_or_default(environ_cp, 'CC_OPT_FLAGS',
question, default_cc_opt_flags)
for opt in cc_opt_flags.split():
write_to_bazelrc('build:opt --copt=%s' % opt)
# It should be safe on the same build host.
if not is_ppc64le() and not is_windows():
write_to_bazelrc('build:opt --host_copt=-march=native')
write_to_bazelrc('build:opt --define with_default_optimizations=true')
def set_tf_cuda_clang(environ_cp):
"""set TF_CUDA_CLANG action_env.
Args:
environ_cp: copy of the os.environ.
"""
question = 'Do you want to use clang as CUDA compiler?'
yes_reply = 'Clang will be used as CUDA compiler.'
no_reply = 'nvcc will be used as CUDA compiler.'
set_action_env_var(
environ_cp,
'TF_CUDA_CLANG',
None,
False,
question=question,
yes_reply=yes_reply,
no_reply=no_reply)
def set_tf_download_clang(environ_cp):
"""Set TF_DOWNLOAD_CLANG action_env."""
question = 'Do you wish to download a fresh release of clang? (Experimental)'
yes_reply = 'Clang will be downloaded and used to compile tensorflow.'
no_reply = 'Clang will not be downloaded.'
set_action_env_var(
environ_cp,
'TF_DOWNLOAD_CLANG',
None,
False,
question=question,
yes_reply=yes_reply,
no_reply=no_reply)
def get_from_env_or_user_or_default(environ_cp, var_name, ask_for_var,
var_default):
"""Get var_name either from env, or user or default.
If var_name has been set as environment variable, use the preset value, else
ask for user input. If no input is provided, the default is used.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_HDFS".
ask_for_var: string for how to ask for user input.
var_default: default value string.
Returns:
string value for var_name
"""
var = environ_cp.get(var_name)
if not var:
var = get_input(ask_for_var)
print('\n')
if not var:
var = var_default
return var
def set_clang_cuda_compiler_path(environ_cp):
"""Set CLANG_CUDA_COMPILER_PATH."""
default_clang_path = which('clang') or ''
ask_clang_path = ('Please specify which clang should be used as device and '
'host compiler. [Default is %s]: ') % default_clang_path
while True:
clang_cuda_compiler_path = get_from_env_or_user_or_default(
environ_cp, 'CLANG_CUDA_COMPILER_PATH', ask_clang_path,
default_clang_path)
if os.path.exists(clang_cuda_compiler_path):
break
# Reset and retry
print('Invalid clang path: %s cannot be found.' % clang_cuda_compiler_path)
environ_cp['CLANG_CUDA_COMPILER_PATH'] = ''
# Set CLANG_CUDA_COMPILER_PATH
environ_cp['CLANG_CUDA_COMPILER_PATH'] = clang_cuda_compiler_path
write_action_env_to_bazelrc('CLANG_CUDA_COMPILER_PATH',
clang_cuda_compiler_path)
def prompt_loop_or_load_from_env(environ_cp,
var_name,
var_default,
ask_for_var,
check_success,
error_msg,
suppress_default_error=False,
n_ask_attempts=_DEFAULT_PROMPT_ASK_ATTEMPTS):
"""Loop over user prompts for an ENV param until receiving a valid response.
For the env param var_name, read from the environment or verify user input
until receiving valid input. When done, set var_name in the environ_cp to its
new value.
Args:
environ_cp: (Dict) copy of the os.environ.
var_name: (String) string for name of environment variable, e.g. "TF_MYVAR".
var_default: (String) default value string.
ask_for_var: (String) string for how to ask for user input.
check_success: (Function) function that takes one argument and returns a
boolean. Should return True if the value provided is considered valid. May
contain a complex error message if error_msg does not provide enough
information. In that case, set suppress_default_error to True.
error_msg: (String) String with one and only one '%s'. Formatted with each
invalid response upon check_success(input) failure.
suppress_default_error: (Bool) Suppress the above error message in favor of
one from the check_success function.
n_ask_attempts: (Integer) Number of times to query for valid input before
raising an error and quitting.
Returns:
[String] The value of var_name after querying for input.
Raises:
UserInputError: if a query has been attempted n_ask_attempts times without
success, assume that the user has made a scripting error, and will
continue to provide invalid input. Raise the error to avoid infinitely
looping.
"""
default = environ_cp.get(var_name) or var_default
full_query = '%s [Default is %s]: ' % (
ask_for_var,
default,
)
for _ in range(n_ask_attempts):
val = get_from_env_or_user_or_default(environ_cp, var_name, full_query,
default)
if check_success(val):
break
if not suppress_default_error:
print(error_msg % val)
environ_cp[var_name] = ''
else:
raise UserInputError(
'Invalid %s setting was provided %d times in a row. '
'Assuming to be a scripting mistake.' % (var_name, n_ask_attempts))
environ_cp[var_name] = val
return val
def create_android_ndk_rule(environ_cp):
"""Set ANDROID_NDK_HOME and write Android NDK WORKSPACE rule."""
if is_windows() or is_cygwin():
default_ndk_path = cygpath(
'%s/Android/Sdk/ndk-bundle' % environ_cp['APPDATA'])
elif is_macos():
default_ndk_path = '%s/library/Android/Sdk/ndk-bundle' % environ_cp['HOME']
else:
default_ndk_path = '%s/Android/Sdk/ndk-bundle' % environ_cp['HOME']
def valid_ndk_path(path):
return (os.path.exists(path) and
os.path.exists(os.path.join(path, 'source.properties')))
android_ndk_home_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_NDK_HOME',
var_default=default_ndk_path,
ask_for_var='Please specify the home path of the Android NDK to use.',
check_success=valid_ndk_path,
error_msg=('The path %s or its child file "source.properties" '
'does not exist.'))
write_action_env_to_bazelrc('ANDROID_NDK_HOME', android_ndk_home_path)
write_action_env_to_bazelrc('ANDROID_NDK_API_LEVEL',
check_ndk_level(android_ndk_home_path))
def create_android_sdk_rule(environ_cp):
"""Set Android variables and write Android SDK WORKSPACE rule."""
if is_windows() or is_cygwin():
default_sdk_path = cygpath('%s/Android/Sdk' % environ_cp['APPDATA'])
elif is_macos():
default_sdk_path = '%s/library/Android/Sdk' % environ_cp['HOME']
else:
default_sdk_path = '%s/Android/Sdk' % environ_cp['HOME']
def valid_sdk_path(path):
return (os.path.exists(path) and
os.path.exists(os.path.join(path, 'platforms')) and
os.path.exists(os.path.join(path, 'build-tools')))
android_sdk_home_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_SDK_HOME',
var_default=default_sdk_path,
ask_for_var='Please specify the home path of the Android SDK to use.',
check_success=valid_sdk_path,
error_msg=('Either %s does not exist, or it does not contain the '
'subdirectories "platforms" and "build-tools".'))
platforms = os.path.join(android_sdk_home_path, 'platforms')
api_levels = sorted(os.listdir(platforms))
api_levels = [x.replace('android-', '') for x in api_levels]
def valid_api_level(api_level):
return os.path.exists(
os.path.join(android_sdk_home_path, 'platforms',
'android-' + api_level))
android_api_level = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_API_LEVEL',
var_default=api_levels[-1],
ask_for_var=('Please specify the Android SDK API level to use. '
'[Available levels: %s]') % api_levels,
check_success=valid_api_level,
error_msg='Android-%s is not present in the SDK path.')
build_tools = os.path.join(android_sdk_home_path, 'build-tools')
versions = sorted(os.listdir(build_tools))
def valid_build_tools(version):
return os.path.exists(
os.path.join(android_sdk_home_path, 'build-tools', version))
android_build_tools_version = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_BUILD_TOOLS_VERSION',
var_default=versions[-1],
ask_for_var=('Please specify an Android build tools version to use. '
'[Available versions: %s]') % versions,
check_success=valid_build_tools,
error_msg=('The selected SDK does not have build-tools version %s '
'available.'))
write_action_env_to_bazelrc('ANDROID_BUILD_TOOLS_VERSION',
android_build_tools_version)
write_action_env_to_bazelrc('ANDROID_SDK_API_LEVEL', android_api_level)
write_action_env_to_bazelrc('ANDROID_SDK_HOME', android_sdk_home_path)
def check_ndk_level(android_ndk_home_path):
"""Check the revision number of an Android NDK path."""
properties_path = '%s/source.properties' % android_ndk_home_path
if is_windows() or is_cygwin():
properties_path = cygpath(properties_path)
with open(properties_path, 'r') as f:
filedata = f.read()
revision = re.search(r'Pkg.Revision = (\d+)', filedata)
if revision:
ndk_api_level = revision.group(1)
else:
raise Exception('Unable to parse NDK revision.')
if int(ndk_api_level) not in _SUPPORTED_ANDROID_NDK_VERSIONS:
print('WARNING: The API level of the NDK in %s is %s, which is not '
'supported by Bazel (officially supported versions: %s). Please use '
'another version. Compiling Android targets may result in confusing '
'errors.\n' % (android_ndk_home_path, ndk_api_level,
_SUPPORTED_ANDROID_NDK_VERSIONS))
return ndk_api_level
def set_gcc_host_compiler_path(environ_cp):
"""Set GCC_HOST_COMPILER_PATH."""
default_gcc_host_compiler_path = which('gcc') or ''
cuda_bin_symlink = '%s/bin/gcc' % environ_cp.get('CUDA_TOOLKIT_PATH')
if os.path.islink(cuda_bin_symlink):
# os.readlink is only available in linux
default_gcc_host_compiler_path = os.path.realpath(cuda_bin_symlink)
gcc_host_compiler_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='GCC_HOST_COMPILER_PATH',
var_default=default_gcc_host_compiler_path,
ask_for_var=
'Please specify which gcc should be used by nvcc as the host compiler.',
check_success=os.path.exists,
error_msg='Invalid gcc path. %s cannot be found.',
)
write_action_env_to_bazelrc('GCC_HOST_COMPILER_PATH', gcc_host_compiler_path)
def reformat_version_sequence(version_str, sequence_count):
"""Reformat the version string to have the given number of sequences.
For example:
Given (7, 2) -> 7.0
(7.0.1, 2) -> 7.0
(5, 1) -> 5
(5.0.3.2, 1) -> 5
Args:
version_str: String, the version string.
sequence_count: int, an integer.
Returns:
string, reformatted version string.
"""
v = version_str.split('.')
if len(v) < sequence_count:
v = v + (['0'] * (sequence_count - len(v)))
return '.'.join(v[:sequence_count])
def set_tf_cuda_version(environ_cp):
"""Set CUDA_TOOLKIT_PATH and TF_CUDA_VERSION."""
ask_cuda_version = (
'Please specify the CUDA SDK version you want to use. '
'[Leave empty to default to CUDA %s]: ') % _DEFAULT_CUDA_VERSION
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
# Configure the Cuda SDK version to use.
tf_cuda_version = get_from_env_or_user_or_default(
environ_cp, 'TF_CUDA_VERSION', ask_cuda_version, _DEFAULT_CUDA_VERSION)
tf_cuda_version = reformat_version_sequence(str(tf_cuda_version), 2)
# Find out where the CUDA toolkit is installed
default_cuda_path = _DEFAULT_CUDA_PATH
if is_windows() or is_cygwin():
default_cuda_path = cygpath(
environ_cp.get('CUDA_PATH', _DEFAULT_CUDA_PATH_WIN))
elif is_linux():
# If the default doesn't exist, try an alternative default.
if (not os.path.exists(default_cuda_path)
) and os.path.exists(_DEFAULT_CUDA_PATH_LINUX):
default_cuda_path = _DEFAULT_CUDA_PATH_LINUX
ask_cuda_path = ('Please specify the location where CUDA %s toolkit is'
' installed. Refer to README.md for more details. '
'[Default is %s]: ') % (tf_cuda_version, default_cuda_path)
cuda_toolkit_path = get_from_env_or_user_or_default(
environ_cp, 'CUDA_TOOLKIT_PATH', ask_cuda_path, default_cuda_path)
if is_windows() or is_cygwin():
cuda_toolkit_path = cygpath(cuda_toolkit_path)
if is_windows():
cuda_rt_lib_paths = ['lib/x64/cudart.lib']
elif is_linux():
cuda_rt_lib_paths = [
'%s/libcudart.so.%s' % (x, tf_cuda_version) for x in [
'lib64',
'lib/powerpc64le-linux-gnu',
'lib/x86_64-linux-gnu',
]
]
elif is_macos():
cuda_rt_lib_paths = ['lib/libcudart.%s.dylib' % tf_cuda_version]
cuda_toolkit_paths_full = [
os.path.join(cuda_toolkit_path, x) for x in cuda_rt_lib_paths
]
if any([os.path.exists(x) for x in cuda_toolkit_paths_full]):
break
# Reset and retry
print('Invalid path to CUDA %s toolkit. %s cannot be found' %
(tf_cuda_version, cuda_toolkit_paths_full))
environ_cp['TF_CUDA_VERSION'] = ''
environ_cp['CUDA_TOOLKIT_PATH'] = ''
else:
raise UserInputError('Invalid TF_CUDA_SETTING setting was provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
# Set CUDA_TOOLKIT_PATH and TF_CUDA_VERSION
environ_cp['CUDA_TOOLKIT_PATH'] = cuda_toolkit_path
write_action_env_to_bazelrc('CUDA_TOOLKIT_PATH', cuda_toolkit_path)
environ_cp['TF_CUDA_VERSION'] = tf_cuda_version
write_action_env_to_bazelrc('TF_CUDA_VERSION', tf_cuda_version)
def set_tf_cudnn_version(environ_cp):
"""Set CUDNN_INSTALL_PATH and TF_CUDNN_VERSION."""
ask_cudnn_version = (
'Please specify the cuDNN version you want to use. '
'[Leave empty to default to cuDNN %s.0]: ') % _DEFAULT_CUDNN_VERSION
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
tf_cudnn_version = get_from_env_or_user_or_default(
environ_cp, 'TF_CUDNN_VERSION', ask_cudnn_version,
_DEFAULT_CUDNN_VERSION)
tf_cudnn_version = reformat_version_sequence(str(tf_cudnn_version), 1)
default_cudnn_path = environ_cp.get('CUDA_TOOLKIT_PATH')
ask_cudnn_path = (r'Please specify the location where cuDNN %s library is '
'installed. Refer to README.md for more details. [Default'
' is %s]: ') % (tf_cudnn_version, default_cudnn_path)
cudnn_install_path = get_from_env_or_user_or_default(
environ_cp, 'CUDNN_INSTALL_PATH', ask_cudnn_path, default_cudnn_path)
# Result returned from "read" will be used unexpanded. That make "~"
# unusable. Going through one more level of expansion to handle that.
cudnn_install_path = os.path.realpath(
os.path.expanduser(cudnn_install_path))
if is_windows() or is_cygwin():
cudnn_install_path = cygpath(cudnn_install_path)
if is_windows():
cuda_dnn_lib_path = 'lib/x64/cudnn.lib'
cuda_dnn_lib_alt_path = 'lib/x64/cudnn.lib'
elif is_linux():
cuda_dnn_lib_path = 'lib64/libcudnn.so.%s' % tf_cudnn_version
cuda_dnn_lib_alt_path = 'libcudnn.so.%s' % tf_cudnn_version
elif is_macos():
cuda_dnn_lib_path = 'lib/libcudnn.%s.dylib' % tf_cudnn_version
cuda_dnn_lib_alt_path = 'libcudnn.%s.dylib' % tf_cudnn_version
cuda_dnn_lib_path_full = os.path.join(cudnn_install_path, cuda_dnn_lib_path)
cuda_dnn_lib_alt_path_full = os.path.join(cudnn_install_path,
cuda_dnn_lib_alt_path)
if os.path.exists(cuda_dnn_lib_path_full) or os.path.exists(
cuda_dnn_lib_alt_path_full):
break
# Try another alternative for Linux
if is_linux():
ldconfig_bin = which('ldconfig') or '/sbin/ldconfig'
cudnn_path_from_ldconfig = run_shell([ldconfig_bin, '-p'])
cudnn_path_from_ldconfig = re.search('.*libcudnn.so .* => (.*)',
cudnn_path_from_ldconfig)
if cudnn_path_from_ldconfig:
cudnn_path_from_ldconfig = cudnn_path_from_ldconfig.group(1)
if os.path.exists(
'%s.%s' % (cudnn_path_from_ldconfig, tf_cudnn_version)):
cudnn_install_path = os.path.dirname(cudnn_path_from_ldconfig)
break
# Reset and Retry
print(
'Invalid path to cuDNN %s toolkit. None of the following files can be '
'found:' % tf_cudnn_version)
print(cuda_dnn_lib_path_full)
print(cuda_dnn_lib_alt_path_full)
if is_linux():
print('%s.%s' % (cudnn_path_from_ldconfig, tf_cudnn_version))
environ_cp['TF_CUDNN_VERSION'] = ''
else:
raise UserInputError('Invalid TF_CUDNN setting was provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
# Set CUDNN_INSTALL_PATH and TF_CUDNN_VERSION
environ_cp['CUDNN_INSTALL_PATH'] = cudnn_install_path
write_action_env_to_bazelrc('CUDNN_INSTALL_PATH', cudnn_install_path)
environ_cp['TF_CUDNN_VERSION'] = tf_cudnn_version
write_action_env_to_bazelrc('TF_CUDNN_VERSION', tf_cudnn_version)
def is_cuda_compatible(lib, cuda_ver, cudnn_ver):
"""Check compatibility between given library and cudnn/cudart libraries."""
ldd_bin = which('ldd') or '/usr/bin/ldd'
ldd_out = run_shell([ldd_bin, lib], True)
ldd_out = ldd_out.split(os.linesep)
cudnn_pattern = re.compile('.*libcudnn.so\\.?(.*) =>.*$')
cuda_pattern = re.compile('.*libcudart.so\\.?(.*) =>.*$')
cudnn = None
cudart = None
cudnn_ok = True # assume no cudnn dependency by default
cuda_ok = True # assume no cuda dependency by default
for line in ldd_out:
if 'libcudnn.so' in line:
cudnn = cudnn_pattern.search(line)
cudnn_ok = False
elif 'libcudart.so' in line:
cudart = cuda_pattern.search(line)
cuda_ok = False
if cudnn and len(cudnn.group(1)):
cudnn = convert_version_to_int(cudnn.group(1))
if cudart and len(cudart.group(1)):
cudart = convert_version_to_int(cudart.group(1))
if cudnn is not None:
cudnn_ok = (cudnn == cudnn_ver)
if cudart is not None:
cuda_ok = (cudart == cuda_ver)
return cudnn_ok and cuda_ok
def set_tf_tensorrt_install_path(environ_cp):
"""Set TENSORRT_INSTALL_PATH and TF_TENSORRT_VERSION.
Adapted from code contributed by Sami Kama (https://github.com/samikama).
Args:
environ_cp: copy of the os.environ.
Raises:
ValueError: if this method was called under non-Linux platform.
UserInputError: if user has provided invalid input multiple times.
"""
if not is_linux():
raise ValueError('Currently TensorRT is only supported on Linux platform.')
# Ask user whether to add TensorRT support.
if str(int(get_var(environ_cp, 'TF_NEED_TENSORRT', 'TensorRT',
False))) != '1':
return
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
ask_tensorrt_path = (r'Please specify the location where TensorRT is '
'installed. [Default is %s]:') % (
_DEFAULT_TENSORRT_PATH_LINUX)
trt_install_path = get_from_env_or_user_or_default(
environ_cp, 'TENSORRT_INSTALL_PATH', ask_tensorrt_path,
_DEFAULT_TENSORRT_PATH_LINUX)
# Result returned from "read" will be used unexpanded. That make "~"
# unusable. Going through one more level of expansion to handle that.
trt_install_path = os.path.realpath(os.path.expanduser(trt_install_path))
def find_libs(search_path):
"""Search for libnvinfer.so in "search_path"."""
fl = set()
if os.path.exists(search_path) and os.path.isdir(search_path):
fl.update([
os.path.realpath(os.path.join(search_path, x))
for x in os.listdir(search_path)
if 'libnvinfer.so' in x
])
return fl
possible_files = find_libs(trt_install_path)
possible_files.update(find_libs(os.path.join(trt_install_path, 'lib')))
possible_files.update(find_libs(os.path.join(trt_install_path, 'lib64')))
cuda_ver = convert_version_to_int(environ_cp['TF_CUDA_VERSION'])
cudnn_ver = convert_version_to_int(environ_cp['TF_CUDNN_VERSION'])
nvinfer_pattern = re.compile('.*libnvinfer.so.?(.*)$')
highest_ver = [0, None, None]
for lib_file in possible_files:
if is_cuda_compatible(lib_file, cuda_ver, cudnn_ver):
matches = nvinfer_pattern.search(lib_file)
if len(matches.groups()) == 0:
continue
ver_str = matches.group(1)
ver = convert_version_to_int(ver_str) if len(ver_str) else 0
if ver > highest_ver[0]:
highest_ver = [ver, ver_str, lib_file]
if highest_ver[1] is not None:
trt_install_path = os.path.dirname(highest_ver[2])
tf_tensorrt_version = highest_ver[1]
break
# Try another alternative from ldconfig.
ldconfig_bin = which('ldconfig') or '/sbin/ldconfig'
ldconfig_output = run_shell([ldconfig_bin, '-p'])
search_result = re.search('.*libnvinfer.so\\.?([0-9.]*).* => (.*)',
ldconfig_output)
if search_result:
libnvinfer_path_from_ldconfig = search_result.group(2)
if os.path.exists(libnvinfer_path_from_ldconfig):
if is_cuda_compatible(libnvinfer_path_from_ldconfig, cuda_ver,
cudnn_ver):
trt_install_path = os.path.dirname(libnvinfer_path_from_ldconfig)
tf_tensorrt_version = search_result.group(1)
break
# Reset and Retry
if possible_files:
print('TensorRT libraries found in one the following directories',
'are not compatible with selected cuda and cudnn installations')
print(trt_install_path)
print(os.path.join(trt_install_path, 'lib'))
print(os.path.join(trt_install_path, 'lib64'))
if search_result:
print(libnvinfer_path_from_ldconfig)
else:
print(
'Invalid path to TensorRT. None of the following files can be found:')
print(trt_install_path)
print(os.path.join(trt_install_path, 'lib'))
print(os.path.join(trt_install_path, 'lib64'))
if search_result:
print(libnvinfer_path_from_ldconfig)
else:
raise UserInputError('Invalid TF_TENSORRT setting was provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
# Set TENSORRT_INSTALL_PATH and TF_TENSORRT_VERSION
environ_cp['TENSORRT_INSTALL_PATH'] = trt_install_path
write_action_env_to_bazelrc('TENSORRT_INSTALL_PATH', trt_install_path)
environ_cp['TF_TENSORRT_VERSION'] = tf_tensorrt_version
write_action_env_to_bazelrc('TF_TENSORRT_VERSION', tf_tensorrt_version)
def set_tf_nccl_install_path(environ_cp):
"""Set NCCL_INSTALL_PATH and TF_NCCL_VERSION.
Args:
environ_cp: copy of the os.environ.
Raises:
ValueError: if this method was called under non-Linux platform.
UserInputError: if user has provided invalid input multiple times.
"""
if not is_linux():
raise ValueError('Currently NCCL is only supported on Linux platforms.')
ask_nccl_version = (
'Please specify the NCCL version you want to use. If NCCL %s is not '
'installed, then you can use version 1.3 that can be fetched '
'automatically but it may have worse performance with multiple GPUs. '
'[Default is %s]: ') % (_DEFAULT_NCCL_VERSION, _DEFAULT_NCCL_VERSION)
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
tf_nccl_version = get_from_env_or_user_or_default(
environ_cp, 'TF_NCCL_VERSION', ask_nccl_version, _DEFAULT_NCCL_VERSION)
tf_nccl_version = reformat_version_sequence(str(tf_nccl_version), 1)
if tf_nccl_version == '1':
break # No need to get install path, NCCL 1 is a GitHub repo.
# TODO(csigg): Look with ldconfig first if we can find the library in paths
# like /usr/lib/x86_64-linux-gnu and the header file in the corresponding
# include directory. This is where the NCCL .deb packages install them.
# Then ask the user if we should use that. Instead of a single
# NCCL_INSTALL_PATH, pass separate NCCL_LIB_PATH and NCCL_HDR_PATH to
# nccl_configure.bzl
default_nccl_path = environ_cp.get('CUDA_TOOLKIT_PATH')
ask_nccl_path = (r'Please specify the location where NCCL %s library is '
'installed. Refer to README.md for more details. [Default '
'is %s]:') % (tf_nccl_version, default_nccl_path)
nccl_install_path = get_from_env_or_user_or_default(
environ_cp, 'NCCL_INSTALL_PATH', ask_nccl_path, default_nccl_path)
# Result returned from "read" will be used unexpanded. That make "~"
# unusable. Going through one more level of expansion to handle that.
nccl_install_path = os.path.realpath(os.path.expanduser(nccl_install_path))
if is_windows() or is_cygwin():
nccl_install_path = cygpath(nccl_install_path)
if is_windows():
nccl_lib_path = 'lib/x64/nccl.lib'
elif is_linux():
nccl_lib_path = 'lib/libnccl.so.%s' % tf_nccl_version
elif is_macos():
nccl_lib_path = 'lib/libnccl.%s.dylib' % tf_nccl_version
nccl_lib_path = os.path.join(nccl_install_path, nccl_lib_path)
nccl_hdr_path = os.path.join(nccl_install_path, 'include/nccl.h')
if os.path.exists(nccl_lib_path) and os.path.exists(nccl_hdr_path):
# Set NCCL_INSTALL_PATH
environ_cp['NCCL_INSTALL_PATH'] = nccl_install_path
write_action_env_to_bazelrc('NCCL_INSTALL_PATH', nccl_install_path)
break
# Reset and Retry
print('Invalid path to NCCL %s toolkit, %s or %s not found. Please use the '
'O/S agnostic package of NCCL 2' % (tf_nccl_version, nccl_lib_path,
nccl_hdr_path))
environ_cp['TF_NCCL_VERSION'] = ''
else:
raise UserInputError('Invalid TF_NCCL setting was provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
# Set TF_NCCL_VERSION
environ_cp['TF_NCCL_VERSION'] = tf_nccl_version
write_action_env_to_bazelrc('TF_NCCL_VERSION', tf_nccl_version)
def get_native_cuda_compute_capabilities(environ_cp):
"""Get native cuda compute capabilities.
Args:
environ_cp: copy of the os.environ.
Returns:
string of native cuda compute capabilities, separated by comma.
"""
device_query_bin = os.path.join(
environ_cp.get('CUDA_TOOLKIT_PATH'), 'extras/demo_suite/deviceQuery')
if os.path.isfile(device_query_bin) and os.access(device_query_bin, os.X_OK):
try:
output = run_shell(device_query_bin).split('\n')
pattern = re.compile('[0-9]*\\.[0-9]*')
output = [pattern.search(x) for x in output if 'Capability' in x]
output = ','.join(x.group() for x in output if x is not None)
except subprocess.CalledProcessError:
output = ''
else:
output = ''
return output
def set_tf_cuda_compute_capabilities(environ_cp):
"""Set TF_CUDA_COMPUTE_CAPABILITIES."""
while True:
native_cuda_compute_capabilities = get_native_cuda_compute_capabilities(
environ_cp)
if not native_cuda_compute_capabilities:
default_cuda_compute_capabilities = _DEFAULT_CUDA_COMPUTE_CAPABILITIES
else:
default_cuda_compute_capabilities = native_cuda_compute_capabilities
ask_cuda_compute_capabilities = (
'Please specify a list of comma-separated '
'Cuda compute capabilities you want to '
'build with.\nYou can find the compute '
'capability of your device at: '
'https://developer.nvidia.com/cuda-gpus.\nPlease'
' note that each additional compute '
'capability significantly increases your '
'build time and binary size. [Default is: %s]: ' %
default_cuda_compute_capabilities)
tf_cuda_compute_capabilities = get_from_env_or_user_or_default(
environ_cp, 'TF_CUDA_COMPUTE_CAPABILITIES',
ask_cuda_compute_capabilities, default_cuda_compute_capabilities)
# Check whether all capabilities from the input is valid
all_valid = True
# Remove all whitespace characters before splitting the string
# that users may insert by accident, as this will result in error
tf_cuda_compute_capabilities = ''.join(tf_cuda_compute_capabilities.split())
for compute_capability in tf_cuda_compute_capabilities.split(','):
m = re.match('[0-9]+.[0-9]+', compute_capability)
if not m:
print('Invalid compute capability: ' % compute_capability)
all_valid = False
else:
ver = int(m.group(0).split('.')[0])
if ver < 3:
print('Only compute capabilities 3.0 or higher are supported.')
all_valid = False
if all_valid:
break
# Reset and Retry
environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = ''
# Set TF_CUDA_COMPUTE_CAPABILITIES
environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = tf_cuda_compute_capabilities
write_action_env_to_bazelrc('TF_CUDA_COMPUTE_CAPABILITIES',
tf_cuda_compute_capabilities)
def set_other_cuda_vars(environ_cp):
"""Set other CUDA related variables."""
# If CUDA is enabled, always use GPU during build and test.
if environ_cp.get('TF_CUDA_CLANG') == '1':
write_to_bazelrc('build --config=cuda_clang')
write_to_bazelrc('test --config=cuda_clang')
else:
write_to_bazelrc('build --config=cuda')
write_to_bazelrc('test --config=cuda')
def set_host_cxx_compiler(environ_cp):
"""Set HOST_CXX_COMPILER."""
default_cxx_host_compiler = which('g++') or ''
host_cxx_compiler = prompt_loop_or_load_from_env(
environ_cp,
var_name='HOST_CXX_COMPILER',
var_default=default_cxx_host_compiler,
ask_for_var=('Please specify which C++ compiler should be used as the '
'host C++ compiler.'),
check_success=os.path.exists,
error_msg='Invalid C++ compiler path. %s cannot be found.',
)
write_action_env_to_bazelrc('HOST_CXX_COMPILER', host_cxx_compiler)
def set_host_c_compiler(environ_cp):
"""Set HOST_C_COMPILER."""
default_c_host_compiler = which('gcc') or ''
host_c_compiler = prompt_loop_or_load_from_env(
environ_cp,
var_name='HOST_C_COMPILER',
var_default=default_c_host_compiler,
ask_for_var=('Please specify which C compiler should be used as the host '
'C compiler.'),
check_success=os.path.exists,
error_msg='Invalid C compiler path. %s cannot be found.',
)
write_action_env_to_bazelrc('HOST_C_COMPILER', host_c_compiler)
def set_computecpp_toolkit_path(environ_cp):
"""Set COMPUTECPP_TOOLKIT_PATH."""
def toolkit_exists(toolkit_path):
"""Check if a computecpp toolkit path is valid."""
if is_linux():
sycl_rt_lib_path = 'lib/libComputeCpp.so'
else:
sycl_rt_lib_path = ''
sycl_rt_lib_path_full = os.path.join(toolkit_path, sycl_rt_lib_path)
exists = os.path.exists(sycl_rt_lib_path_full)
if not exists:
print('Invalid SYCL %s library path. %s cannot be found' %
(_TF_OPENCL_VERSION, sycl_rt_lib_path_full))
return exists
computecpp_toolkit_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='COMPUTECPP_TOOLKIT_PATH',
var_default=_DEFAULT_COMPUTECPP_TOOLKIT_PATH,
ask_for_var=(
'Please specify the location where ComputeCpp for SYCL %s is '
'installed.' % _TF_OPENCL_VERSION),
check_success=toolkit_exists,
error_msg='Invalid SYCL compiler path. %s cannot be found.',
suppress_default_error=True)
write_action_env_to_bazelrc('COMPUTECPP_TOOLKIT_PATH',
computecpp_toolkit_path)
def set_trisycl_include_dir(environ_cp):
"""Set TRISYCL_INCLUDE_DIR."""
ask_trisycl_include_dir = ('Please specify the location of the triSYCL '
'include directory. (Use --config=sycl_trisycl '
'when building with Bazel) '
'[Default is %s]: ') % (
_DEFAULT_TRISYCL_INCLUDE_DIR)
while True:
trisycl_include_dir = get_from_env_or_user_or_default(
environ_cp, 'TRISYCL_INCLUDE_DIR', ask_trisycl_include_dir,
_DEFAULT_TRISYCL_INCLUDE_DIR)
if os.path.exists(trisycl_include_dir):
break
print('Invalid triSYCL include directory, %s cannot be found' %
(trisycl_include_dir))
# Set TRISYCL_INCLUDE_DIR
environ_cp['TRISYCL_INCLUDE_DIR'] = trisycl_include_dir
write_action_env_to_bazelrc('TRISYCL_INCLUDE_DIR', trisycl_include_dir)
def set_mpi_home(environ_cp):
"""Set MPI_HOME."""
default_mpi_home = which('mpirun') or which('mpiexec') or ''
default_mpi_home = os.path.dirname(os.path.dirname(default_mpi_home))
def valid_mpi_path(mpi_home):
exists = (
os.path.exists(os.path.join(mpi_home, 'include')) and
os.path.exists(os.path.join(mpi_home, 'lib')))
if not exists:
print('Invalid path to the MPI Toolkit. %s or %s cannot be found' %
(os.path.join(mpi_home, 'include'),
os.path.exists(os.path.join(mpi_home, 'lib'))))
return exists
_ = prompt_loop_or_load_from_env(
environ_cp,
var_name='MPI_HOME',
var_default=default_mpi_home,
ask_for_var='Please specify the MPI toolkit folder.',
check_success=valid_mpi_path,
error_msg='',
suppress_default_error=True)
def set_other_mpi_vars(environ_cp):
"""Set other MPI related variables."""
# Link the MPI header files
mpi_home = environ_cp.get('MPI_HOME')
symlink_force('%s/include/mpi.h' % mpi_home, 'third_party/mpi/mpi.h')
# Determine if we use OpenMPI or MVAPICH, these require different header files
# to be included here to make bazel dependency checker happy
if os.path.exists(os.path.join(mpi_home, 'include/mpi_portable_platform.h')):
symlink_force(
os.path.join(mpi_home, 'include/mpi_portable_platform.h'),
'third_party/mpi/mpi_portable_platform.h')
# TODO(gunan): avoid editing files in configure
sed_in_place('third_party/mpi/mpi.bzl', 'MPI_LIB_IS_OPENMPI=False',
'MPI_LIB_IS_OPENMPI=True')
else:
# MVAPICH / MPICH
symlink_force(
os.path.join(mpi_home, 'include/mpio.h'), 'third_party/mpi/mpio.h')
symlink_force(
os.path.join(mpi_home, 'include/mpicxx.h'), 'third_party/mpi/mpicxx.h')
# TODO(gunan): avoid editing files in configure
sed_in_place('third_party/mpi/mpi.bzl', 'MPI_LIB_IS_OPENMPI=True',
'MPI_LIB_IS_OPENMPI=False')
if os.path.exists(os.path.join(mpi_home, 'lib/libmpi.so')):
symlink_force(
os.path.join(mpi_home, 'lib/libmpi.so'), 'third_party/mpi/libmpi.so')
else:
raise ValueError('Cannot find the MPI library file in %s/lib' % mpi_home)
def set_system_libs_flag(environ_cp):
syslibs = environ_cp.get('TF_SYSTEM_LIBS', '')
if syslibs and syslibs != '':
if ',' in syslibs:
syslibs = ','.join(sorted(syslibs.split(',')))
else:
syslibs = ','.join(sorted(syslibs.split()))
write_action_env_to_bazelrc('TF_SYSTEM_LIBS', syslibs)
if 'PREFIX' in environ_cp:
write_to_bazelrc('build --define=PREFIX=%s' % environ_cp['PREFIX'])
if 'LIBDIR' in environ_cp:
write_to_bazelrc('build --define=LIBDIR=%s' % environ_cp['LIBDIR'])
if 'INCLUDEDIR' in environ_cp:
write_to_bazelrc('build --define=INCLUDEDIR=%s' % environ_cp['INCLUDEDIR'])
def set_windows_build_flags(environ_cp):
"""Set Windows specific build options."""
# The non-monolithic build is not supported yet
write_to_bazelrc('build --config monolithic')
# Suppress warning messages
write_to_bazelrc('build --copt=-w --host_copt=-w')
# Output more verbose information when something goes wrong
write_to_bazelrc('build --verbose_failures')
# The host and target platforms are the same in Windows build. So we don't
# have to distinct them. This avoids building the same targets twice.
write_to_bazelrc('build --distinct_host_configuration=false')
# Enable short object file path to avoid long path issue on Windows.
# TODO(pcloudy): Remove this flag when upgrading Bazel to 0.16.0
# Short object file path will be enabled by default.
write_to_bazelrc('build --experimental_shortened_obj_file_path=true')
# When building zip file for some py_binary and py_test targets, don't
# include its dependencies. This is for:
# 1. Running python tests against the system installed TF pip package.
# 2. Avoiding redundant files in
# //tensorflow/tools/pip_package:simple_console_windows,
# which is a py_binary used during creating TF pip package.
# See https://github.com/tensorflow/tensorflow/issues/22390
write_to_bazelrc('build --define=no_tensorflow_py_deps=true')
if get_var(
environ_cp, 'TF_OVERRIDE_EIGEN_STRONG_INLINE', 'Eigen strong inline',
True, ('Would you like to override eigen strong inline for some C++ '
'compilation to reduce the compilation time?'),
'Eigen strong inline overridden.', 'Not overriding eigen strong inline, '
'some compilations could take more than 20 mins.'):
# Due to a known MSVC compiler issue
# https://github.com/tensorflow/tensorflow/issues/10521
# Overriding eigen strong inline speeds up the compiling of
# conv_grad_ops_3d.cc and conv_ops_3d.cc by 20 minutes,
# but this also hurts the performance. Let users decide what they want.
write_to_bazelrc('build --define=override_eigen_strong_inline=true')
def config_info_line(name, help_text):
"""Helper function to print formatted help text for Bazel config options."""
print('\t--config=%-12s\t# %s' % (name, help_text))
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--workspace',
type=str,
default=_TF_WORKSPACE_ROOT,
help='The absolute path to your active Bazel workspace.')
args = parser.parse_args()
# Make a copy of os.environ to be clear when functions and getting and setting
# environment variables.
environ_cp = dict(os.environ)
check_bazel_version('0.15.0')
reset_tf_configure_bazelrc(args.workspace)
cleanup_makefile()
setup_python(environ_cp)
if is_windows():
environ_cp['TF_NEED_AWS'] = '0'
environ_cp['TF_NEED_GCP'] = '0'
environ_cp['TF_NEED_HDFS'] = '0'
environ_cp['TF_NEED_JEMALLOC'] = '0'
environ_cp['TF_NEED_KAFKA'] = '0'
environ_cp['TF_NEED_OPENCL_SYCL'] = '0'
environ_cp['TF_NEED_COMPUTECPP'] = '0'
environ_cp['TF_NEED_OPENCL'] = '0'
environ_cp['TF_CUDA_CLANG'] = '0'
environ_cp['TF_NEED_TENSORRT'] = '0'
# TODO(ibiryukov): Investigate using clang as a cpu or cuda compiler on
# Windows.
environ_cp['TF_DOWNLOAD_CLANG'] = '0'
environ_cp['TF_ENABLE_XLA'] = '0'
environ_cp['TF_NEED_MPI'] = '0'
environ_cp['TF_SET_ANDROID_WORKSPACE'] = '0'
if is_macos():
environ_cp['TF_NEED_JEMALLOC'] = '0'
environ_cp['TF_NEED_TENSORRT'] = '0'
# The numpy package on ppc64le uses OpenBLAS which has multi-threading
# issues that lead to incorrect answers. Set OMP_NUM_THREADS=1 at
# runtime to allow the Tensorflow testcases which compare numpy
# results to Tensorflow results to succeed.
if is_ppc64le():
write_action_env_to_bazelrc('OMP_NUM_THREADS', 1)
set_build_var(environ_cp, 'TF_NEED_JEMALLOC', 'jemalloc as malloc',
'with_jemalloc', True)
set_build_var(environ_cp, 'TF_NEED_GCP', 'Google Cloud Platform',
'with_gcp_support', True, 'gcp')
set_build_var(environ_cp, 'TF_NEED_HDFS', 'Hadoop File System',
'with_hdfs_support', True, 'hdfs')
set_build_var(environ_cp, 'TF_NEED_AWS', 'Amazon AWS Platform',
'with_aws_support', True, 'aws')
set_build_var(environ_cp, 'TF_NEED_KAFKA', 'Apache Kafka Platform',
'with_kafka_support', True, 'kafka')
set_build_var(environ_cp, 'TF_ENABLE_XLA', 'XLA JIT', 'with_xla_support',
False, 'xla')
set_action_env_var(environ_cp, 'TF_NEED_OPENCL_SYCL', 'OpenCL SYCL', False)
if environ_cp.get('TF_NEED_OPENCL_SYCL') == '1':
set_host_cxx_compiler(environ_cp)
set_host_c_compiler(environ_cp)
set_action_env_var(environ_cp, 'TF_NEED_COMPUTECPP', 'ComputeCPP', True)
if environ_cp.get('TF_NEED_COMPUTECPP') == '1':
set_computecpp_toolkit_path(environ_cp)
else:
set_trisycl_include_dir(environ_cp)
set_action_env_var(environ_cp, 'TF_NEED_ROCM', 'ROCm', False)
if (environ_cp.get('TF_NEED_ROCM') == '1' and
'LD_LIBRARY_PATH' in environ_cp and
environ_cp.get('LD_LIBRARY_PATH') != '1'):
write_action_env_to_bazelrc('LD_LIBRARY_PATH',
environ_cp.get('LD_LIBRARY_PATH'))
set_action_env_var(environ_cp, 'TF_NEED_CUDA', 'CUDA', False)
if (environ_cp.get('TF_NEED_CUDA') == '1' and
'TF_CUDA_CONFIG_REPO' not in environ_cp):
set_tf_cuda_version(environ_cp)
set_tf_cudnn_version(environ_cp)
if is_linux():
set_tf_tensorrt_install_path(environ_cp)
set_tf_nccl_install_path(environ_cp)
set_tf_cuda_compute_capabilities(environ_cp)
if 'LD_LIBRARY_PATH' in environ_cp and environ_cp.get(
'LD_LIBRARY_PATH') != '1':
write_action_env_to_bazelrc('LD_LIBRARY_PATH',
environ_cp.get('LD_LIBRARY_PATH'))
set_tf_cuda_clang(environ_cp)
if environ_cp.get('TF_CUDA_CLANG') == '1':
# Ask whether we should download the clang toolchain.
set_tf_download_clang(environ_cp)
if environ_cp.get('TF_DOWNLOAD_CLANG') != '1':
# Set up which clang we should use as the cuda / host compiler.
set_clang_cuda_compiler_path(environ_cp)
else:
# Use downloaded LLD for linking.
write_to_bazelrc('build:cuda_clang --config=download_clang_use_lld')
write_to_bazelrc('test:cuda_clang --config=download_clang_use_lld')
else:
# Set up which gcc nvcc should use as the host compiler
# No need to set this on Windows
if not is_windows():
set_gcc_host_compiler_path(environ_cp)
set_other_cuda_vars(environ_cp)
else:
# CUDA not required. Ask whether we should download the clang toolchain and
# use it for the CPU build.
set_tf_download_clang(environ_cp)
if environ_cp.get('TF_DOWNLOAD_CLANG') == '1':
write_to_bazelrc('build --config=download_clang')
write_to_bazelrc('test --config=download_clang')
# SYCL / ROCm / CUDA are mutually exclusive.
# At most 1 GPU platform can be configured.
gpu_platform_count = 0
if environ_cp.get('TF_NEED_OPENCL_SYCL') == '1':
gpu_platform_count += 1
if environ_cp.get('TF_NEED_ROCM') == '1':
gpu_platform_count += 1
if environ_cp.get('TF_NEED_CUDA') == '1':
gpu_platform_count += 1
if gpu_platform_count >= 2:
raise UserInputError('SYCL / CUDA / ROCm are mututally exclusive. '
'At most 1 GPU platform can be configured.')
set_build_var(environ_cp, 'TF_NEED_MPI', 'MPI', 'with_mpi_support', False)
if environ_cp.get('TF_NEED_MPI') == '1':
set_mpi_home(environ_cp)
set_other_mpi_vars(environ_cp)
set_cc_opt_flags(environ_cp)
set_system_libs_flag(environ_cp)
if is_windows():
set_windows_build_flags(environ_cp)
# Add a config option to build TensorFlow 2.0 API.
write_to_bazelrc('build:v2 --define=tf_api_version=2')
if get_var(environ_cp, 'TF_SET_ANDROID_WORKSPACE', 'android workspace', False,
('Would you like to interactively configure ./WORKSPACE for '
'Android builds?'), 'Searching for NDK and SDK installations.',
'Not configuring the WORKSPACE for Android builds.'):
create_android_ndk_rule(environ_cp)
create_android_sdk_rule(environ_cp)
# On Windows, we don't have MKL support and the build is always monolithic.
# So no need to print the following message.
# TODO(pcloudy): remove the following if check when they make sense on Windows
if not is_windows():
print('Preconfigured Bazel build configs. You can use any of the below by '
'adding "--config=<>" to your build command. See tools/bazel.rc for '
'more details.')
config_info_line('mkl', 'Build with MKL support.')
config_info_line('monolithic', 'Config for mostly static monolithic build.')
config_info_line('gdr', 'Build with GDR support.')
config_info_line('verbs', 'Build with libverbs support.')
config_info_line('ngraph', 'Build with Intel nGraph support.')
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
nodewatcher/plugins/sge.py | # Copyright 2013-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with the
# License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import shlex
import subprocess
log = logging.getLogger(__name__)
def hasJobs(hostname):
# Checking for running jobs on the node, with parallel job view expanded (-g t)
_command = ['/opt/sge/bin/lx-amd64/qstat', '-g', 't', '-l', 'hostname=%s' % hostname, '-u', '*']
# Command output
# job-ID prior name user state submit/start at queue master ja-task-ID
# ------------------------------------------------------------------------------------------------------------------
# 16 0.6 0500 job.sh ec2-user r 02/06/2019 11:06:30 [email protected] SLAVE
# [email protected] SLAVE
# [email protected] SLAVE
# [email protected] SLAVE
# 17 0.50500 STDIN ec2-user r 02/06/2019 11:06:30 [email protected] MASTER 1
# 17 0.50500 STDIN ec2-user r 02/06/2019 11:06:30 [email protected] MASTER 2
try:
_output = subprocess.Popen(_command,
stdout=subprocess.PIPE,
env=dict(
os.environ,
SGE_ROOT='/opt/sge',
PATH='/opt/sge/bin:/opt/sge/bin/lx-amd64:/bin:/usr/bin',
),
).communicate()[0]
except subprocess.CalledProcessError:
print ("Failed to run %s\n" % _command)
_output = ""
if _output == "":
_jobs = False
else:
_jobs = True
return _jobs
def hasPendingJobs():
command = "/opt/sge/bin/lx-amd64/qstat -g d -s p -u '*'"
# Command outputs the pending jobs in the queue in the following format
# job-ID prior name user state submit/start at queue slots ja-task-ID
# -----------------------------------------------------------------------------------------------------------------
# 70 0.55500 job.sh ec2-user qw 08/08/2018 22:37:24 1
# 71 0.55500 job.sh ec2-user qw 08/08/2018 22:37:24 1
# 72 0.55500 job.sh ec2-user qw 08/08/2018 22:37:25 1
# 73 0.55500 job.sh ec2-user qw 08/08/2018 22:37:25 1
_command = shlex.split(command)
error = False
has_pending = False
try:
process = subprocess.Popen(_command, env=dict(os.environ),
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
log.error("Failed to run %s\n" % command)
error = True
output = process.communicate()[0]
lines = filter(None, output.split("\n"))
if len(lines) > 1:
has_pending = True
return has_pending, error
def lockHost(hostname, unlock=False):
_mod = unlock and '-e' or '-d'
command = ['/opt/sge/bin/lx-amd64/qmod', _mod, 'all.q@%s' % hostname]
try:
subprocess.check_call(
command,
env=dict(os.environ, SGE_ROOT='/opt/sge',
PATH='/opt/sge/bin:/opt/sge/bin/lx-amd64:/bin:/usr/bin'))
except subprocess.CalledProcessError:
log.error("Failed to run %s\n" % command)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
run_x_distill.py | import torch.utils.data
from torch.utils.tensorboard import SummaryWriter
from Dx_losses import Dx_cross_entropy
from config import make_parser
from utils.data_utils import load_dataset, make_transforms, make_dataloader, split_dataset
from utils.model_utils import make_model as _make_model
from core.ffgb_distill import FFGB_D
from core.fed_avg_distill import FEDAVG_D
from utils.logger_utils import make_evaluate_fn, make_monitor_fn, Logger
import json
import time
import os
LEARNERS = {
'ffgb_d': FFGB_D,
'fedavg_d': FEDAVG_D
}
if __name__ == '__main__':
args = make_parser()
learner = LEARNERS[args.learner]
print("#" * 30)
print("Run FFGB-D")
device_ids = [int(a) for a in args.device_ids.split(",")]
if device_ids[0] != -1:
os.environ["CUDA_VISIBLE_DEVICES"] = f"{args.device_ids}"
device = torch.device(args.device if torch.cuda.is_available() else "cpu")
print(args)
Dx_loss = Dx_cross_entropy
loss = torch.nn.functional.cross_entropy
# 1. set saving directory
print("#"*30)
print("making saving directory")
level = args.homo_ratio if args.heterogeneity == "mix" else args.dir_level
if args.learner == "fedavg_d":
algo_config = f"_{args.fedavg_d_local_lr}_{args.fedavg_d_local_epoch}_{args.fedavg_d_weight_decay}"
elif args.learner == "ffgb_d":
algo_config = f"_{args.local_steps}_{args.functional_lr}_{args.f_l2_reg}_{args.weak_learner_epoch}_{args.weak_learner_lr}_{args.weak_learner_weight_decay}"
else:
raise NotImplementedError
experiment_setup = f"FFL_{args.heterogeneity}_{level}_{args.n_workers}_{args.n_workers_per_round}_{args.dataset}_{args.model}"
hyperparameter_setup = f"{args.learner}_{args.local_dataloader_batch_size}_{args.distill_dataloader_batch_size}" + algo_config
args.save_dir = 'output/%s/%s' % (experiment_setup, hyperparameter_setup)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
with open(args.save_dir + '/config.json', 'w') as f:
json.dump(vars(args), f)
tb_file = args.save_dir + f'/{time.time()}'
print(f"writing to {tb_file}")
writer = SummaryWriter(tb_file)
# 2. create dataloaders
print("#" * 30)
print("making dataloders")
dataset_trn, dataset_tst, n_classes, n_channels, img_size = load_dataset(args.dataset)
dataset_distill, _, _, _, _ = load_dataset(args.dataset_distill)
transforms = make_transforms(args.dataset, train=True) # transforms for data augmentation and normalization
local_datasets = split_dataset(args, dataset_trn, transforms)
client_dataloaders = [make_dataloader(args, "train", local_dataset) for local_dataset in local_datasets]
transforms_test = make_transforms(args.dataset, train=False)
dataset_tst.transform = transforms_test
test_dataloader = make_dataloader(args, "test", dataset_tst)
transforms_distill = make_transforms(args.dataset_distill, train=True, is_distill=True)
dataset_distill.transform = transforms_distill
distill_dataloader = make_dataloader(args, "distill", dataset_distill)
# 3. create loggers
test_fn_accuracy = make_evaluate_fn(test_dataloader, device, eval_type='accuracy', n_classes=n_classes, loss_fn=loss)
statistics_monitor_fn = make_monitor_fn()
logger_accuracy = Logger(writer, test_fn_accuracy, test_metric='accuracy')
logger_monitor = Logger(writer, statistics_monitor_fn, test_metric='model_monitor')
loggers = [logger_accuracy, logger_monitor]
# 4. create model and trainer
print("#" * 30)
print("creating model and trainer")
make_model = lambda: _make_model(args, n_classes, n_channels, img_size, device)
model_init = make_model()
ffgb_d = learner(model_init, make_model, client_dataloaders, distill_dataloader, Dx_loss, loggers, args, device)
# 5. train
print("#" * 30)
print("start training")
ffgb_d.fit()
print("done training")
# 6. save model
if args.save_model:
model_file = f"./model_{args.dataset}.pth"
torch.save(ffgb_d.server_state.model.state_dict(), model_file) | []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
module/apmgormv2/apmgorm_test.go | // Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// +build go1.14
package apmgormv2_test
import (
"context"
"os"
"strings"
"testing"
"gorm.io/gorm/logger"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gorm.io/gorm"
"go.elastic.co/apm/apmtest"
mysql "go.elastic.co/apm/module/apmgormv2/driver/mysql"
sqlite "go.elastic.co/apm/module/apmgormv2/driver/sqlite"
"go.elastic.co/apm/module/apmsql"
)
type Product struct {
gorm.Model
Code string
Price uint
}
func TestWithContext(t *testing.T) {
t.Run("sqlite3", func(t *testing.T) {
testWithContext(t,
apmsql.DSNInfo{Database: ":memory:"},
sqlite.Open(":memory:"), &gorm.Config{},
)
})
if mysqlHost := os.Getenv("MYSQL_HOST"); mysqlHost == "" {
t.Logf("MYSQL_HOST not specified, skipping")
} else {
t.Run("mysql", func(t *testing.T) {
testWithContext(t,
apmsql.DSNInfo{
Address: mysqlHost,
Port: 3306,
Database: "test_db",
User: "root",
},
mysql.Open("root:hunter2@tcp("+mysqlHost+")/test_db?parseTime=true"), &gorm.Config{},
)
})
}
}
func testWithContext(t *testing.T, dsnInfo apmsql.DSNInfo, dialect gorm.Dialector, config *gorm.Config) {
_, spans, errors := apmtest.WithTransaction(func(ctx context.Context) {
db, err := gorm.Open(dialect, config)
require.NoError(t, err)
ddb, _ := db.DB()
defer ddb.Close()
db = db.WithContext(ctx)
if db.Migrator().HasTable(&Product{}) {
db.Migrator().DropTable(&Product{})
}
db.AutoMigrate(&Product{})
db.Create(&Product{Code: "L1212", Price: 1000})
var product Product
var count int64
assert.NoError(t, db.Model(&product).Count(&count).Error)
assert.Equal(t, int64(1), count)
assert.NoError(t, db.First(&product, "code = ?", "L1212").Error)
assert.NoError(t, db.Model(&product).Update("Price", 2000).Error)
assert.NoError(t, db.Delete(&product).Error) // soft
assert.NoError(t, db.Unscoped().Delete(&product).Error) // hard
})
require.NotEmpty(t, spans)
assert.Empty(t, errors)
var spanNames []string
for _, span := range spans {
if strings.Contains(span.Name, "products") && span.Action != "prepare" {
spanNames = append(spanNames, span.Name)
}
require.NotNil(t, span.Context)
require.NotNil(t, span.Context.Database)
assert.Equal(t, dsnInfo.Database, span.Context.Database.Instance)
assert.NotEmpty(t, span.Context.Database.Statement)
assert.Equal(t, "sql", span.Context.Database.Type)
assert.Equal(t, dsnInfo.User, span.Context.Database.User)
if dsnInfo.Address == "" {
assert.Nil(t, span.Context.Destination)
} else {
assert.Equal(t, dsnInfo.Address, span.Context.Destination.Address)
assert.Equal(t, dsnInfo.Port, span.Context.Destination.Port)
}
}
assert.Equal(t, []string{
"INSERT INTO products",
"SELECT FROM products", // count
"SELECT FROM products",
"UPDATE products",
"UPDATE products", // soft delete
"DELETE FROM products",
}, spanNames)
}
// TestWithContextNoTransaction checks that using WithContext without
// a transaction won't cause any issues.
func TestWithContextNoTransaction(t *testing.T) {
db, err := gorm.Open(sqlite.Open(":memory:"), &gorm.Config{})
require.NoError(t, err)
ddb, _ := db.DB()
defer ddb.Close()
db = db.WithContext(context.Background())
if db.Migrator().HasTable(&Product{}) {
db.Migrator().DropTable(&Product{})
}
db.AutoMigrate(&Product{})
db.Create(&Product{Code: "L1212", Price: 1000})
var product Product
assert.NoError(t, db.Where("code=?", "L1212").First(&product).Error)
}
func TestWithContextNonSampled(t *testing.T) {
os.Setenv("ELASTIC_APM_TRANSACTION_SAMPLE_RATE", "0")
defer os.Unsetenv("ELASTIC_APM_TRANSACTION_SAMPLE_RATE")
db, err := gorm.Open(sqlite.Open(":memory:"), &gorm.Config{})
require.NoError(t, err)
ddb, _ := db.DB()
defer ddb.Close()
if db.Migrator().HasTable(&Product{}) {
db.Migrator().DropTable(&Product{})
}
db.AutoMigrate(&Product{})
_, spans, _ := apmtest.WithTransaction(func(ctx context.Context) {
db = db.WithContext(ctx)
db.Create(&Product{Code: "L1212", Price: 1000})
})
require.Empty(t, spans)
}
func TestCaptureErrors(t *testing.T) {
t.Run("sqlite3", func(t *testing.T) {
db, err := gorm.Open(sqlite.Open(":memory:"), &gorm.Config{})
require.NoError(t, err)
ddb, _ := db.DB()
defer ddb.Close()
testCaptureErrors(t, db)
})
}
func testCaptureErrors(t *testing.T, db *gorm.DB) {
db.Config.Logger = logger.Default.LogMode(logger.Silent)
if db.Migrator().HasTable(&Product{}) {
db.Migrator().DropTable(&Product{})
}
db.AutoMigrate(&Product{})
_, spans, errors := apmtest.WithTransaction(func(ctx context.Context) {
db = db.WithContext(ctx)
// gorm.ErrRecordNotFound should not cause an error
db.Where("code=?", "L1212").First(&Product{})
product := Product{
Model: gorm.Model{
ID: 1001,
},
Code: "1001",
Price: 1001,
}
require.NoError(t, db.Create(&product).Error)
// invalid SQL should
db.Where("bananas").First(&Product{})
})
assert.Len(t, spans, 3)
require.Len(t, errors, 1)
assert.Regexp(t, `.*bananas.*`, errors[0].Exception.Message)
}
func TestOpenWithDriver(t *testing.T) {
db, err := gorm.Open(sqlite.Open(":memory:"), &gorm.Config{})
require.NoError(t, err)
ddb, _ := db.DB()
defer ddb.Close()
if db.Migrator().HasTable(&Product{}) {
db.Migrator().DropTable(&Product{})
}
db.AutoMigrate(&Product{})
_, spans, _ := apmtest.WithTransaction(func(ctx context.Context) {
db = db.WithContext(ctx)
db.Create(&Product{Code: "L1212", Price: 1000})
})
require.Len(t, spans, 1)
assert.Equal(t, ":memory:", spans[0].Context.Database.Instance)
}
| [
"\"MYSQL_HOST\""
]
| []
| [
"MYSQL_HOST"
]
| [] | ["MYSQL_HOST"] | go | 1 | 0 | |
socialNetwork/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'socialNetwork.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
login.py | #!/usr/bin/env python3
import os, cgi, cgitb
import secret
from http.cookies import SimpleCookie
# Python 3.7 versus Python 3.8
try:
from cgi import escape #v3.7
except:
from html import escape #v3.8
def secret_page(username=None, password=None):
"""
Returns the HTML for the page visited after the user has logged-in.
"""
if username is None or password is None:
raise ValueError("You need to pass both username and password!")
return _wrapper("""
<h1> Welcome, {username}! </h1>
<p> <small> Pst! I know your password is
<span class="spoilers"> {password}</span>.
</small>
</p>
""".format(username=escape(username.capitalize()),
password=escape(password)))
def after_login_incorrect():
"""
Returns the HTML for the page when the login credentials were typed
incorrectly.
"""
return _wrapper(r"""
<h1> Login incorrect :c </h1>
<p> Incorrect username or password (hint: <span class="spoilers"> Check
<code>secret.py</code>!</span>)
<p> <a href="login.py"> Try again. </a>
""")
def login_page():
"""
Returns the HTML for the login page.
"""
return _wrapper(r"""
<h1> Welcome! </h1>
<form method="POST" action="login.py">
<label> <span>Username:</span> <input autofocus type="text" name="username"></label> <br>
<label> <span>Password:</span> <input type="password" name="password"></label>
<button type="submit"> Login! </button>
</form>
""")
def _wrapper(page):
"""
Wraps some text in common HTML.
"""
return ("""
<!DOCTYPE HTML>
<html>
<head>
<meta charset="utf-8">
<style>
body {
font-family: -apple-system, BlinkMacSystemFont, sans-serif;
max-width: 24em;
margin: auto;
color: #333;
background-color: #fdfdfd
}
.spoilers {
color: rgba(0,0,0,0); border-bottom: 1px dashed #ccc
}
.spoilers:hover {
transition: color 250ms;
color: rgba(36, 36, 36, 1)
}
label {
display: flex;
flex-direction: row;
}
label > span {
flex: 0;
}
label> input {
flex: 1;
}
button {
font-size: larger;
float: right;
margin-top: 6px;
}
</style>
</head>
<body>
""" + page + """
</body>
</html>
""")
form = cgi.FieldStorage()
username = form.getvalue('username')
password = form.getvalue('password')
c = SimpleCookie(os.environ["HTTP_COOKIE"]) #this line was inspired by Zoe Riell however can be found also in class http.cookies.SimpleCookie([input]) in
#https://docs.python.org/3/library/http.cookies.html
if c.get("username"):
cookies_username = c.get("username").value
if c.get("password"):
cookies_password = c.get("password").value
got_pass_from_cookie = False
try:
if (cookies_password == secret.password and cookies_username == secret.username):
print(secret_page(cookies_username,cookies_password))
got_pass_from_cookie = True
except:
i = 1
if (username == secret.username and password == secret.password):
print("Set-Cookie:username = "+username+";/r/n")
print("Set-Cookie:password = "+password+";/r/n")
print(secret_page(username,password))
else:
if (got_pass_from_cookie == False):
print(login_page())
"""
print("Content-type:text/html\r\n\r\n")
print("<html>")
print("<head>")
print("<Title>Test CGI</Title>")
print("</head>")
print("<body>")
if (username):
if (username == secret.username):
print("Set-Cookie:User_id = "+username+";/r/n")
if (password):
if (password == secret.password):
print("Set-Cookie:User_id = "+username+";/r/n")
print(username)
print(password)
print("<p>test</p>")
print("</body>")
print("</html>")
""" | []
| []
| [
"HTTP_COOKIE"
]
| [] | ["HTTP_COOKIE"] | python | 1 | 0 | |
native_client_sdk/src/build_tools/nacl-mono-buildbot.py | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import hashlib
import json
import os
import sys
import buildbot_common
import build_utils
GS_MANIFEST_PATH = 'gs://nativeclient-mirror/nacl/nacl_sdk/'
SDK_MANIFEST = 'naclsdk_manifest2.json'
MONO_MANIFEST = 'naclmono_manifest.json'
def build_and_upload_mono(sdk_revision, pepper_revision, sdk_url,
upload_path, args):
install_dir = 'naclmono'
buildbot_common.RemoveDir(install_dir)
revision_opt = ['--sdk-revision', sdk_revision] if sdk_revision else []
url_opt = ['--sdk-url', sdk_url] if sdk_url else []
buildbot_common.Run([sys.executable, 'nacl-mono-builder.py',
'--arch', 'x86-32', '--install-dir', install_dir] +
revision_opt + url_opt + args)
buildbot_common.Run([sys.executable, 'nacl-mono-builder.py',
'--arch', 'x86-64', '--install-dir', install_dir] +
revision_opt + url_opt + args)
buildbot_common.Run([sys.executable, 'nacl-mono-archive.py',
'--upload-path', upload_path,
'--pepper-revision', pepper_revision,
'--install-dir', install_dir] + args)
def get_sdk_build_info():
'''Returns a list of dictionaries for versions of NaCl Mono to build which are
out of date compared to the SDKs available to naclsdk'''
# Get a copy of the naclsdk manifest file
buildbot_common.Run([buildbot_common.GetGsutil(), 'cp',
GS_MANIFEST_PATH + SDK_MANIFEST, '.'])
manifest_file = open(SDK_MANIFEST, 'r')
sdk_manifest = json.loads(manifest_file.read())
manifest_file.close()
pepper_infos = []
for key, value in sdk_manifest.items():
if key == 'bundles':
stabilities = ['stable', 'beta', 'dev', 'post_stable']
# Pick pepper_* bundles, need pepper_19 or greater to build Mono
bundles = filter(lambda b: (b['stability'] in stabilities
and 'pepper_' in b['name'])
and b['version'] >= 19, value)
for b in bundles:
newdict = {}
newdict['pepper_revision'] = str(b['version'])
linux_arch = filter(lambda u: u['host_os'] == 'linux', b['archives'])
newdict['sdk_url'] = linux_arch[0]['url']
newdict['sdk_revision'] = b['revision']
newdict['stability'] = b['stability']
newdict['naclmono_name'] = 'naclmono_' + newdict['pepper_revision']
pepper_infos.append(newdict)
# Get a copy of the naclmono manifest file
buildbot_common.Run([buildbot_common.GetGsutil(), 'cp',
GS_MANIFEST_PATH + MONO_MANIFEST, '.'])
manifest_file = open(MONO_MANIFEST, 'r')
mono_manifest = json.loads(manifest_file.read())
manifest_file.close()
ret = []
mono_manifest_dirty = False
# Check to see if we need to rebuild mono based on sdk revision
for key, value in mono_manifest.items():
if key == 'bundles':
for info in pepper_infos:
bundle = filter(lambda b: b['name'] == info['naclmono_name'], value)
if len(bundle) == 0:
info['naclmono_rev'] = '1'
ret.append(info)
else:
if info['sdk_revision'] != bundle[0]['sdk_revision']:
# This bundle exists in the mono manifest, bump the revision
# for the new build we're about to make.
info['naclmono_rev'] = str(bundle[0]['revision'] + 1)
ret.append(info)
elif info['stability'] != bundle[0]['stability']:
# If all that happened was the SDK bundle was promoted in stability,
# change only that and re-write the manifest
mono_manifest_dirty = True
bundle[0]['stability'] = info['stability']
# re-write the manifest here because there are no bundles to build but
# the manifest has changed
if mono_manifest_dirty and len(ret) == 0:
manifest_file = open(MONO_MANIFEST, 'w')
manifest_file.write(json.dumps(mono_manifest, sort_keys=False, indent=2))
manifest_file.close()
buildbot_common.Run([buildbot_common.GetGsutil(), 'cp', '-a', 'public-read',
MONO_MANIFEST, GS_MANIFEST_PATH + MONO_MANIFEST])
return ret
def update_mono_sdk_json(infos):
'''Update the naclmono manifest with the newly built packages'''
if len(infos) == 0:
return
manifest_file = open(MONO_MANIFEST, 'r')
mono_manifest = json.loads(manifest_file.read())
manifest_file.close()
for info in infos:
bundle = {}
bundle['name'] = info['naclmono_name']
bundle['description'] = 'Mono for Native Client'
bundle['stability'] = info['stability']
bundle['recommended'] = 'no'
bundle['version'] = 'experimental'
archive = {}
sha1_hash = hashlib.sha1()
f = open(info['naclmono_name'] + '.bz2', 'rb')
sha1_hash.update(f.read())
archive['size'] = f.tell()
f.close()
archive['checksum'] = { 'sha1': sha1_hash.hexdigest() }
archive['host_os'] = 'all'
archive['url'] = ('https://commondatastorage.googleapis.com/'
'nativeclient-mirror/nacl/nacl_sdk/%s/%s/%s.bz2'
% (info['naclmono_name'], info['naclmono_rev'],
info['naclmono_name']))
bundle['archives'] = [archive]
bundle['revision'] = int(info['naclmono_rev'])
bundle['sdk_revision'] = int(info['sdk_revision'])
# Insert this new bundle into the manifest,
# probably overwriting an existing bundle.
for key, value in mono_manifest.items():
if key == 'bundles':
existing = filter(lambda b: b['name'] == info['naclmono_name'], value)
if len(existing) > 0:
loc = value.index(existing[0])
value[loc] = bundle
else:
value.append(bundle)
# Write out the file locally, then upload to its known location.
manifest_file = open(MONO_MANIFEST, 'w')
manifest_file.write(json.dumps(mono_manifest, sort_keys=False, indent=2))
manifest_file.close()
buildbot_common.Run([buildbot_common.GetGsutil(), 'cp', '-a', 'public-read',
MONO_MANIFEST, GS_MANIFEST_PATH + MONO_MANIFEST])
def main(args):
args = args[1:]
buildbot_revision = os.environ.get('BUILDBOT_REVISION', '')
buildername = os.environ.get('BUILDBOT_BUILDERNAME', '')
os.chdir(buildbot_common.SCRIPT_DIR)
if buildername == 'linux-sdk-mono32':
assert buildbot_revision
sdk_revision = buildbot_revision.split(':')[0]
pepper_revision = build_utils.ChromeMajorVersion()
build_and_upload_mono(sdk_revision, pepper_revision, None,
'trunk.' + sdk_revision, args)
elif buildername == 'linux-sdk-mono64':
infos = get_sdk_build_info()
for info in infos:
# This will put the file in naclmono_19/1/naclmono_19.bz2 for example.
upload_path = info['naclmono_name'] + '/' + info['naclmono_rev']
build_and_upload_mono(None, info['pepper_revision'], info['sdk_url'],
upload_path, args)
update_mono_sdk_json(infos)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| []
| []
| [
"BUILDBOT_REVISION",
"BUILDBOT_BUILDERNAME"
]
| [] | ["BUILDBOT_REVISION", "BUILDBOT_BUILDERNAME"] | python | 2 | 0 | |
e2etest/pkg/executor/executor.go | // SPDX-License-Identifier:Apache-2.0
package executor
import (
"os"
"os/exec"
"k8s.io/kubernetes/test/e2e/framework"
)
type Executor interface {
Exec(cmd string, args ...string) (string, error)
}
type hostExecutor struct{}
var (
Host hostExecutor
ContainerRuntime = "docker"
)
func init() {
if cr := os.Getenv("CONTAINER_RUNTIME"); len(cr) != 0 {
ContainerRuntime = cr
}
}
func (hostExecutor) Exec(cmd string, args ...string) (string, error) {
out, err := exec.Command(cmd, args...).CombinedOutput()
return string(out), err
}
func ForContainer(containerName string) Executor {
return &containerExecutor{container: containerName}
}
type containerExecutor struct {
container string
}
func (e *containerExecutor) Exec(cmd string, args ...string) (string, error) {
newArgs := append([]string{"exec", e.container, cmd}, args...)
out, err := exec.Command(ContainerRuntime, newArgs...).CombinedOutput()
return string(out), err
}
type podExecutor struct {
namespace string
name string
container string
}
func ForPod(namespace, name, container string) *podExecutor {
return &podExecutor{
namespace: namespace,
name: name,
container: container,
}
}
func (p *podExecutor) Exec(cmd string, args ...string) (string, error) {
fullArgs := append([]string{"exec", p.name, "-c", p.container, "--", cmd}, args...)
res, err := framework.RunKubectl(p.namespace, fullArgs...)
if err != nil {
return "", err
}
return res, nil
}
| [
"\"CONTAINER_RUNTIME\""
]
| []
| [
"CONTAINER_RUNTIME"
]
| [] | ["CONTAINER_RUNTIME"] | go | 1 | 0 | |
3_Inference/Detector.py | import os
import sys
def get_parent_dir(n=1):
"""returns the n-th parent dicrectory of the current
working directory"""
current_path = os.path.dirname(os.path.abspath(__file__))
for _ in range(n):
current_path = os.path.dirname(current_path)
return current_path
src_path = os.path.join(get_parent_dir(1), "2_Training", "src")
utils_path = os.path.join(get_parent_dir(1), "Utils")
sys.path.append(src_path)
sys.path.append(utils_path)
import argparse
from keras_yolo3.yolo import YOLO, detect_video, detect_webcam
from PIL import Image
from timeit import default_timer as timer
from utils import load_extractor_model, load_features, parse_input, detect_object
import test
import utils
import pandas as pd
import numpy as np
from Get_File_Paths import GetFileList
import random
from Train_Utils import get_anchors
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
# Set up folder names for default values
data_folder = os.path.join(get_parent_dir(n=1), "Data")
image_folder = os.path.join(data_folder, "Source_Images")
image_test_folder = os.path.join(image_folder, "Test_Images")
detection_results_folder = os.path.join(image_folder, "Test_Image_Detection_Results")
detection_results_file = os.path.join(detection_results_folder, "Detection_Results.csv")
model_folder = os.path.join(data_folder, "Model_Weights")
model_weights = os.path.join(model_folder, "trained_weights_final.h5")
model_classes = os.path.join(model_folder, "data_classes.txt")
anchors_path = os.path.join(src_path, "keras_yolo3", "model_data", "yolo_anchors.txt")
FLAGS = None
if __name__ == "__main__":
# Delete all default flags
parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
"""
Command line options
"""
parser.add_argument(
"--input_path",
type=str,
default=image_test_folder,
help="Path to image/video directory. All subdirectories will be included. Default is "
+ image_test_folder,
)
parser.add_argument(
"--output",
type=str,
default=detection_results_folder,
help="Output path for detection results. Default is "
+ detection_results_folder,
)
parser.add_argument(
"--no_save_img",
default=False,
action="store_true",
help="Only save bounding box coordinates but do not save output images with annotated boxes. Default is False.",
)
parser.add_argument(
"--file_types",
"--names-list",
nargs="*",
default=[],
help="Specify list of file types to include. Default is --file_types .jpg .jpeg .png .mp4",
)
parser.add_argument(
"--yolo_model",
type=str,
dest="model_path",
default=model_weights,
help="Path to pre-trained weight files. Default is " + model_weights,
)
parser.add_argument(
"--anchors",
type=str,
dest="anchors_path",
default=anchors_path,
help="Path to YOLO anchors. Default is " + anchors_path,
)
parser.add_argument(
"--classes",
type=str,
dest="classes_path",
default=model_classes,
help="Path to YOLO class specifications. Default is " + model_classes,
)
parser.add_argument(
"--gpu_num", type=int, default=1, help="Number of GPU to use. Default is 1"
)
parser.add_argument(
"--confidence",
type=float,
dest="score",
default=0.25,
help="Threshold for YOLO object confidence score to show predictions. Default is 0.25.",
)
parser.add_argument(
"--box_file",
type=str,
dest="box",
default=detection_results_file,
help="File to save bounding box results to. Default is "
+ detection_results_file,
)
parser.add_argument(
"--postfix",
type=str,
dest="postfix",
default="_labeled_img",
help='Specify the postfix for images with bounding boxes. Default is "_labeled_img"',
)
parser.add_argument(
"--is_tiny",
default=False,
action="store_true",
help="Use the tiny Yolo version for better performance and less accuracy. Default is False.",
)
parser.add_argument(
"--webcam",
default=False,
action="store_true",
help="Use webcam for real-time detection. Default is False.",
)
FLAGS = parser.parse_args()
save_img = not FLAGS.no_save_img
file_types = FLAGS.file_types
webcam_active = FLAGS.webcam
if file_types:
input_paths = GetFileList(FLAGS.input_path, endings=file_types)
else:
input_paths = GetFileList(FLAGS.input_path)
# Split images and videos
img_endings = (".jpg", ".jpeg", ".png")
vid_endings = (".mp4", ".mpeg", ".mpg", ".avi")
input_image_paths = []
input_video_paths = []
for item in input_paths:
if item.endswith(img_endings):
input_image_paths.append(item)
elif item.endswith(vid_endings):
input_video_paths.append(item)
output_path = FLAGS.output
if not os.path.exists(output_path):
os.makedirs(output_path)
if FLAGS.is_tiny and FLAGS.anchors_path == anchors_path:
anchors_path = os.path.join(
os.path.dirname(FLAGS.anchors_path), "yolo-tiny_anchors.txt"
)
anchors = get_anchors(anchors_path)
# define YOLO detector
yolo = YOLO(
**{
"model_path": FLAGS.model_path,
"anchors_path": anchors_path,
"classes_path": FLAGS.classes_path,
"score": FLAGS.score,
"gpu_num": FLAGS.gpu_num,
"model_image_size": (416, 416),
}
)
# Make a dataframe for the prediction outputs
out_df = pd.DataFrame(
columns=[
"image",
"image_path",
"xmin",
"ymin",
"xmax",
"ymax",
"label",
"confidence",
"x_size",
"y_size",
]
)
# labels to draw on images
class_file = open(FLAGS.classes_path, "r")
input_labels = [line.rstrip("\n") for line in class_file.readlines()]
print("Found {} input labels: {} ...".format(len(input_labels), input_labels))
if input_image_paths and not webcam_active:
print(
"Found {} input images: {} ...".format(
len(input_image_paths),
[os.path.basename(f) for f in input_image_paths[:5]],
)
)
start = timer()
text_out = ""
# This is for images
for i, img_path in enumerate(input_image_paths):
print(img_path)
prediction, image = detect_object(
yolo,
img_path,
save_img=save_img,
save_img_path=FLAGS.output,
postfix=FLAGS.postfix,
)
y_size, x_size, _ = np.array(image).shape
for single_prediction in prediction:
out_df = out_df.append(
pd.DataFrame(
[
[
os.path.basename(img_path.rstrip("\n")),
img_path.rstrip("\n"),
]
+ single_prediction
+ [x_size, y_size]
],
columns=[
"image",
"image_path",
"xmin",
"ymin",
"xmax",
"ymax",
"label",
"confidence",
"x_size",
"y_size",
],
)
)
end = timer()
print(
"Processed {} images in {:.1f}sec - {:.1f}FPS".format(
len(input_image_paths),
end - start,
len(input_image_paths) / (end - start),
)
)
out_df.to_csv(FLAGS.box, index=False)
# This is for videos
# for pre-recorded videos present in the Test_Images folder
if input_video_paths and not webcam_active:
print(
"Found {} input videos: {} ...".format(
len(input_video_paths),
[os.path.basename(f) for f in input_video_paths[:5]],
)
)
start = timer()
for i, vid_path in enumerate(input_video_paths):
output_path = os.path.join(
FLAGS.output,
os.path.basename(vid_path).replace(".", FLAGS.postfix + "."),
)
detect_video(yolo, vid_path, output_path=output_path)
end = timer()
print(
"Processed {} videos in {:.1f}sec".format(
len(input_video_paths), end - start
)
)
# for Webcam
if webcam_active:
start = timer()
detect_webcam(yolo)
end = timer()
print("Processed from webcam for {:.1f}sec".format(end - start))
# Close the current yolo session
yolo.close_session()
| []
| []
| [
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["TF_CPP_MIN_LOG_LEVEL"] | python | 1 | 0 | |
packages/cli/internal/pkg/aws/cdk/execute_cdk_test.go | package cdk
import (
"fmt"
"os"
"os/exec"
"testing"
iomocks "github.com/aws/amazon-genomics-cli/internal/pkg/mocks/io"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
)
const (
testExecuteCommandSuccessArg = "test-execute-command-success-arg"
testExecuteCommandFailureArg = "test-execute-command-failure-arg"
testExecuteCommandProgressLine = " 3/10 |4:56:17 PM | CREATE_COMPLETE | AWS::IAM::Policy | TaskBatch/BatchRole/DefaultPolicy (TaskBatchBatchRoleDefaultPolicyB9AAE3A1)"
)
func fakeExecCommand(command string, args ...string) *exec.Cmd {
cs := []string{"-test.run=TestHelperProcess", "--", command}
cs = append(cs, args...)
cmd := exec.Command(os.Args[0], cs...)
cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
return cmd
}
type ExecuteCdkCommandTestSuite struct {
suite.Suite
osRemoveAllOrig func(string) error
execCommandOrig func(command string, args ...string) *exec.Cmd
ctrl *gomock.Controller
mockOs *iomocks.MockOS
appDir string
tmpDir string
}
func TestExecuteCdkCommandTestSuite(t *testing.T) {
suite.Run(t, new(ExecuteCdkCommandTestSuite))
}
func (s *ExecuteCdkCommandTestSuite) SetupTest() {
s.ctrl = gomock.NewController(s.T())
s.mockOs = iomocks.NewMockOS(s.ctrl)
s.osRemoveAllOrig = osRemoveAll
s.execCommandOrig = execCommand
osRemoveAll = s.mockOs.RemoveAll
execCommand = fakeExecCommand
s.appDir = s.T().TempDir()
s.tmpDir = "/test/tmp/dir"
}
func (s *ExecuteCdkCommandTestSuite) AfterTest(_, _ string) {
s.ctrl.Finish()
}
func (s *ExecuteCdkCommandTestSuite) TearDownTest() {
osRemoveAll = s.osRemoveAllOrig
execCommand = s.execCommandOrig
}
func (s *ExecuteCdkCommandTestSuite) TestExecuteCdkCommand_Success() {
s.mockOs.EXPECT().RemoveAll(gomock.Any()).Return(nil).Times(0)
progressStream, err := executeCdkCommand(s.appDir, []string{testExecuteCommandSuccessArg})
s.Require().NoError(err)
event1 := <-progressStream
s.Assert().Equal(3, event1.CurrentStep)
s.Assert().Equal(10, event1.TotalSteps)
s.Assert().Equal(testExecuteCommandProgressLine, event1.Outputs[0])
event2 := <-progressStream
s.Assert().NoError(event2.Err)
waitForChanToClose(progressStream)
}
func (s *ExecuteCdkCommandTestSuite) TestExecuteCdkCommandAndCleanupDirectory_Success() {
s.mockOs.EXPECT().RemoveAll(s.tmpDir).Return(nil).Times(1)
progressStream, err := executeCdkCommandAndCleanupDirectory(s.appDir, []string{testExecuteCommandSuccessArg}, s.tmpDir)
s.Require().NoError(err)
event1 := <-progressStream
s.Assert().Equal(3, event1.CurrentStep)
s.Assert().Equal(10, event1.TotalSteps)
s.Assert().Equal(testExecuteCommandProgressLine, event1.Outputs[0])
event2 := <-progressStream
s.Assert().NoError(event2.Err)
waitForChanToClose(progressStream)
}
func (s *ExecuteCdkCommandTestSuite) TestExecuteCdkCommandAndCleanupDirectory_Failure() {
s.mockOs.EXPECT().RemoveAll(s.tmpDir).Return(nil).Times(1)
progressStream, err := executeCdkCommandAndCleanupDirectory(s.appDir, []string{testExecuteCommandFailureArg}, s.tmpDir)
s.Require().NoError(err)
event1 := <-progressStream
s.Assert().Equal(testExecuteCommandFailureArg, event1.Outputs[0])
event2 := <-progressStream
s.Assert().Error(event2.Err)
waitForChanToClose(progressStream)
}
func (s *ExecuteCdkCommandTestSuite) TestExecuteCdkCommandAndCleanupDirectory_FailToExecute() {
s.mockOs.EXPECT().RemoveAll(s.tmpDir).Return(nil).Times(1)
progressStream, err := executeCdkCommandAndCleanupDirectory("foo/bar", []string{testExecuteCommandFailureArg}, s.tmpDir)
s.Assert().Error(err)
s.Assert().Nil(progressStream)
}
func (s *ExecuteCdkCommandTestSuite) TestExecuteCdkCommand_Failure() {
s.mockOs.EXPECT().RemoveAll(gomock.Any()).Return(nil).Times(0)
progressStream, err := executeCdkCommand(s.appDir, []string{testExecuteCommandFailureArg})
s.Require().NoError(err)
event1 := <-progressStream
s.Assert().Equal(testExecuteCommandFailureArg, event1.Outputs[0])
event2 := <-progressStream
s.Assert().Error(event2.Err)
waitForChanToClose(progressStream)
}
func TestHelperProcess(t *testing.T) {
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
args := os.Args
for len(args) > 0 {
if args[0] == "--" {
args = args[1:]
break
}
args = args[1:]
}
require.GreaterOrEqual(t, len(args), 5)
assert.Equal(t, "npm", args[0])
assert.Equal(t, "run", args[1])
assert.Equal(t, "cdk", args[2])
assert.Equal(t, "--", args[3])
testArg := args[4]
switch testArg {
case testExecuteCommandSuccessArg:
fmt.Fprint(os.Stdout, "some line")
fmt.Fprint(os.Stderr, testExecuteCommandProgressLine)
os.Exit(0)
case testExecuteCommandFailureArg:
fmt.Fprint(os.Stdout, "some line")
fmt.Fprint(os.Stderr, testExecuteCommandFailureArg)
os.Exit(1)
default:
fmt.Fprint(os.Stderr, "Unknown failure")
os.Exit(1)
}
}
func waitForChanToClose(channel ProgressStream) {
for range channel {
}
}
| [
"\"GO_WANT_HELPER_PROCESS\""
]
| []
| [
"GO_WANT_HELPER_PROCESS"
]
| [] | ["GO_WANT_HELPER_PROCESS"] | go | 1 | 0 | |
python/ambassador/ir/irhost.py | from typing import Optional, TYPE_CHECKING
import os
from ..utils import SavedSecret
from ..config import Config
from .irresource import IRResource
from .irtlscontext import IRTLSContext
if TYPE_CHECKING:
from .ir import IR
class IRHost(IRResource):
AllowedKeys = {
'acmeProvider',
'hostname',
'matchLabels',
'requestPolicy',
'selector',
'tlsSecret',
}
def __init__(self, ir: 'IR', aconf: Config,
rkey: str, # REQUIRED
name: str, # REQUIRED
location: str, # REQUIRED
namespace: Optional[str]=None,
kind: str="IRHost",
apiVersion: str="getambassador.io/v2", # Not a typo! See below.
**kwargs) -> None:
new_args = {
x: kwargs[x] for x in kwargs.keys()
if x in IRHost.AllowedKeys
}
self.context: Optional[IRTLSContext] = None
super().__init__(
ir=ir, aconf=aconf, rkey=rkey, location=location,
kind=kind, name=name, namespace=namespace, apiVersion=apiVersion,
**new_args
)
def setup(self, ir: 'IR', aconf: Config) -> bool:
ir.logger.debug(f"Host {self.name} setting up")
tls_ss: Optional[SavedSecret] = None
pkey_ss: Optional[SavedSecret] = None
if self.get('tlsSecret', None):
tls_secret = self.tlsSecret
tls_name = tls_secret.get('name', None)
if tls_name:
ir.logger.debug(f"Host {self.name}: TLS secret name is {tls_name}")
tls_ss = self.resolve(ir, tls_name)
if tls_ss:
# OK, we have a TLS secret! Fire up a TLS context for it, if one doesn't
# already exist.
ctx_name = f"{self.name}-context"
if ir.has_tls_context(ctx_name):
ir.logger.debug(f"Host {self.name}: TLSContext {ctx_name} already exists")
else:
ir.logger.debug(f"Host {self.name}: creating TLSContext {ctx_name}")
new_ctx = dict(
rkey=self.rkey,
name=ctx_name,
namespace=self.namespace,
location=self.location,
hosts=[ self.hostname or self.name ],
secret=tls_name
)
ctx = IRTLSContext(ir, aconf, **new_ctx)
match_labels = self.get('matchLabels')
if not match_labels:
match_labels = self.get('match_labels')
if match_labels:
ctx['metadata_labels'] = match_labels
if ctx.is_active():
self.context = ctx
ctx.referenced_by(self)
ctx.sourced_by(self)
ir.save_tls_context(ctx)
else:
ir.logger.error(f"Host {self.name}: new TLSContext {ctx_name} is not valid")
else:
ir.logger.error(f"Host {self.name}: continuing with invalid TLS secret {tls_name}")
return False
if self.get('acmeProvider', None):
acme = self.acmeProvider
# The ACME client is disabled if we're running as an intercept agent.
if ir.edge_stack_allowed and not ir.agent_active:
authority = acme.get('authority', None)
if authority and (authority.lower() != 'none'):
# ACME is active. Are they trying to not set insecure.additionalPort?
request_policy = self.get('requestPolicy', {})
insecure_policy = request_policy.get('insecure', {})
# Default the additionalPort to 8080. This can be overridden by the user
# explicitly setting it to -1.
insecure_addl_port = insecure_policy.get('additionalPort', 8080)
if insecure_addl_port < 0:
# Bzzzt.
self.post_error("ACME requires insecure.additionalPort to function; forcing to 8080")
insecure_policy['additionalPort'] = 8080
if 'action' not in insecure_policy:
# No action when we're overriding the additionalPort already means that we
# default the action to Reject (the hole-puncher will do the right thing).
insecure_policy['action'] = 'Reject'
request_policy['insecure'] = insecure_policy
self['requestPolicy'] = request_policy
pkey_secret = acme.get('privateKeySecret', None)
if pkey_secret:
pkey_name = pkey_secret.get('name', None)
if pkey_name:
ir.logger.debug(f"Host {self.name}: ACME private key name is {pkey_name}")
pkey_ss = self.resolve(ir, pkey_name)
if not pkey_ss:
ir.logger.error(f"Host {self.name}: continuing with invalid private key secret {pkey_name}")
ir.logger.debug(f"Host setup OK: {self.pretty()}")
return True
def pretty(self) -> str:
request_policy = self.get('requestPolicy', {})
insecure_policy = request_policy.get('insecure', {})
insecure_action = insecure_policy.get('action', 'Redirect')
insecure_addl_port = insecure_policy.get('additionalPort', None)
ctx_name = self.context.name if self.context else "-none-"
return "<Host %s for %s ctx %s ia %s iap %s>" % (self.name, self.hostname or '*', ctx_name,
insecure_action, insecure_addl_port)
def resolve(self, ir: 'IR', secret_name: str) -> SavedSecret:
# Try to use our namespace for secret resolution. If we somehow have no
# namespace, fall back to the Ambassador's namespace.
namespace = self.namespace or ir.ambassador_namespace
return ir.resolve_secret(self, secret_name, namespace)
class HostFactory:
@classmethod
def load_all(cls, ir: 'IR', aconf: Config) -> None:
assert ir
hosts = aconf.get_config('hosts')
if hosts:
for config in hosts.values():
ir.logger.debug("HostFactory: creating host for %s" % repr(config.as_dict()))
host = IRHost(ir, aconf, **config)
if host.is_active():
host.referenced_by(config)
host.sourced_by(config)
ir.logger.debug(f"HostFactory: saving host {host.pretty()}")
ir.save_host(host)
else:
ir.logger.debug(f"HostFactory: not saving inactive host {host.pretty()}")
@classmethod
def finalize(cls, ir: 'IR', aconf: Config) -> None:
if ir.edge_stack_allowed:
# We're running Edge Stack. Figure out how many hosts we have, and whether
# we have any termination contexts.
#
# If we're running as an intercept agent, there should be a Host in all cases.
host_count = len(ir.get_hosts() or [])
contexts = ir.get_tls_contexts() or []
found_termination_context = False
for ctx in contexts:
if ctx.get('hosts'): # not None and not the empty list
found_termination_context = True
ir.logger.debug(f"HostFactory: FTC {found_termination_context}, host_count {host_count}")
if (host_count == 0) and not found_termination_context:
# We have no Hosts and no termination contexts, so we know that this is an unconfigured
# installation. Set up the fallback TLSContext so we can redirect people to the UI.
ir.logger.debug("Creating fallback context")
ctx_name = "fallback-self-signed-context"
tls_name = "fallback-self-signed-cert"
new_ctx = dict(
rkey=f"{ctx_name}.99999",
name=ctx_name,
location="-internal-",
hosts=["*"],
secret=tls_name,
is_fallback=True
)
if not os.environ.get('AMBASSADOR_NO_TLS_REDIRECT', None):
new_ctx['redirect_cleartext_from'] = 8080
ctx = IRTLSContext(ir, aconf, **new_ctx)
assert ctx.is_active()
if ctx.resolve_secret(tls_name):
ir.save_tls_context(ctx)
| []
| []
| [
"AMBASSADOR_NO_TLS_REDIRECT"
]
| [] | ["AMBASSADOR_NO_TLS_REDIRECT"] | python | 1 | 0 | |
pkg/platform/api/headchef/headchef_models/v1_build_request.go | // Code generated by go-swagger; DO NOT EDIT.
package headchef_models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"encoding/json"
"strconv"
strfmt "github.com/go-openapi/strfmt"
"github.com/go-openapi/errors"
"github.com/go-openapi/swag"
"github.com/go-openapi/validate"
)
// V1BuildRequest Build Request V1
//
// A build request (v1) which is submitted to the Head Chef REST API. A build request may contain either a full recipe or just the ID of a recipe stored in the inventory API.
// swagger:model v1BuildRequest
type V1BuildRequest struct {
// Additional metadata about the build which should be included in any metrics about the build. This field has no schema validation and is passed through without parsing by Head Chef.
Annotations interface{} `json:"annotations,omitempty"`
// The version of camel to use when running setup-builds.pl. NOTE: this is temporary until the camel version is included in the recipe.
CamelCommit string `json:"camel_commit,omitempty"`
// OBSOLETE. Head Chef still accepts this field to not break old clients but does nothing with these values. Use the camel_flags field in the recipe.
// Unique: true
CamelFlags []string `json:"camel_flags"`
// NOTE: This field is deprecated and will be removed in the future.
// Enum: [7zip dmg msi raw tarball zip]
Format *string `json:"format,omitempty"`
// recipe
Recipe *V1BuildRequestRecipe `json:"recipe,omitempty"`
// The ID of a recipe solved using the inventory API solutions endpoint
// Format: uuid
RecipeID strfmt.UUID `json:"recipe_id,omitempty"`
// requester
Requester *V1BuildRequestRequester `json:"requester,omitempty"`
}
// Validate validates this v1 build request
func (m *V1BuildRequest) Validate(formats strfmt.Registry) error {
var res []error
if err := m.validateCamelFlags(formats); err != nil {
res = append(res, err)
}
if err := m.validateFormat(formats); err != nil {
res = append(res, err)
}
if err := m.validateRecipe(formats); err != nil {
res = append(res, err)
}
if err := m.validateRecipeID(formats); err != nil {
res = append(res, err)
}
if err := m.validateRequester(formats); err != nil {
res = append(res, err)
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
var v1BuildRequestCamelFlagsItemsEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["--dynamic-core","--python-debug","--tcl-debug","--tcl-disable-threads"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
v1BuildRequestCamelFlagsItemsEnum = append(v1BuildRequestCamelFlagsItemsEnum, v)
}
}
func (m *V1BuildRequest) validateCamelFlagsItemsEnum(path, location string, value string) error {
if err := validate.Enum(path, location, value, v1BuildRequestCamelFlagsItemsEnum); err != nil {
return err
}
return nil
}
func (m *V1BuildRequest) validateCamelFlags(formats strfmt.Registry) error {
if swag.IsZero(m.CamelFlags) { // not required
return nil
}
if err := validate.UniqueItems("camel_flags", "body", m.CamelFlags); err != nil {
return err
}
for i := 0; i < len(m.CamelFlags); i++ {
// value enum
if err := m.validateCamelFlagsItemsEnum("camel_flags"+"."+strconv.Itoa(i), "body", m.CamelFlags[i]); err != nil {
return err
}
}
return nil
}
var v1BuildRequestTypeFormatPropEnum []interface{}
func init() {
var res []string
if err := json.Unmarshal([]byte(`["7zip","dmg","msi","raw","tarball","zip"]`), &res); err != nil {
panic(err)
}
for _, v := range res {
v1BuildRequestTypeFormatPropEnum = append(v1BuildRequestTypeFormatPropEnum, v)
}
}
const (
// V1BuildRequestFormatNr7zip captures enum value "7zip"
V1BuildRequestFormatNr7zip string = "7zip"
// V1BuildRequestFormatDmg captures enum value "dmg"
V1BuildRequestFormatDmg string = "dmg"
// V1BuildRequestFormatMsi captures enum value "msi"
V1BuildRequestFormatMsi string = "msi"
// V1BuildRequestFormatRaw captures enum value "raw"
V1BuildRequestFormatRaw string = "raw"
// V1BuildRequestFormatTarball captures enum value "tarball"
V1BuildRequestFormatTarball string = "tarball"
// V1BuildRequestFormatZip captures enum value "zip"
V1BuildRequestFormatZip string = "zip"
)
// prop value enum
func (m *V1BuildRequest) validateFormatEnum(path, location string, value string) error {
if err := validate.Enum(path, location, value, v1BuildRequestTypeFormatPropEnum); err != nil {
return err
}
return nil
}
func (m *V1BuildRequest) validateFormat(formats strfmt.Registry) error {
if swag.IsZero(m.Format) { // not required
return nil
}
// value enum
if err := m.validateFormatEnum("format", "body", *m.Format); err != nil {
return err
}
return nil
}
func (m *V1BuildRequest) validateRecipe(formats strfmt.Registry) error {
if swag.IsZero(m.Recipe) { // not required
return nil
}
if m.Recipe != nil {
if err := m.Recipe.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("recipe")
}
return err
}
}
return nil
}
func (m *V1BuildRequest) validateRecipeID(formats strfmt.Registry) error {
if swag.IsZero(m.RecipeID) { // not required
return nil
}
if err := validate.FormatOf("recipe_id", "body", "uuid", m.RecipeID.String(), formats); err != nil {
return err
}
return nil
}
func (m *V1BuildRequest) validateRequester(formats strfmt.Registry) error {
if swag.IsZero(m.Requester) { // not required
return nil
}
if m.Requester != nil {
if err := m.Requester.Validate(formats); err != nil {
if ve, ok := err.(*errors.Validation); ok {
return ve.ValidateName("requester")
}
return err
}
}
return nil
}
// MarshalBinary interface implementation
func (m *V1BuildRequest) MarshalBinary() ([]byte, error) {
if m == nil {
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *V1BuildRequest) UnmarshalBinary(b []byte) error {
var res V1BuildRequest
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
| []
| []
| []
| [] | [] | go | null | null | null |
megatron/arguments.py | # coding=utf-8
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Megatron arguments."""
import argparse
import collections
import os
import re
import time
import torch
import deepspeed
from megatron.enums import PositionEmbeddingType
import megatron
from megatron.logging import log_levels
def parse_args(extra_args_provider=None, defaults={},
ignore_unknown_args=False):
"""Parse all arguments."""
parser = argparse.ArgumentParser(description='Megatron-LM Arguments',
allow_abbrev=False)
# Standard arguments.
parser = _add_network_size_args(parser)
parser = _add_regularization_args(parser)
parser = _add_training_args(parser)
parser = _add_initialization_args(parser)
parser = _add_learning_rate_args(parser)
parser = _add_checkpointing_args(parser)
parser = _add_mixed_precision_args(parser)
parser = _add_distributed_args(parser)
parser = _add_validation_args(parser)
parser = _add_data_args(parser)
parser = _add_autoresume_args(parser)
parser = _add_biencoder_args(parser)
parser = _add_vit_args(parser)
parser = _add_logging_args(parser)
parser = _add_zero_args(parser)
parser = _add_memoryopt_args(parser)
parser = _add_activation_checkpoint_args(parser)
# Custom arguments.
if extra_args_provider is not None:
parser = extra_args_provider(parser)
parser = deepspeed.add_config_arguments(parser)
# Parse.
if ignore_unknown_args:
args, _ = parser.parse_known_args()
else:
args = parser.parse_args()
# Distributed args.
args.rank = int(os.getenv('RANK', '0'))
args.world_size = int(os.getenv("WORLD_SIZE", '1'))
# Tensor model parallel size.
args.tensor_model_parallel_size = min(
args.tensor_model_parallel_size, args.world_size)
assert args.world_size % args.tensor_model_parallel_size == 0, 'world size'\
' ({}) is not divisible by tensor model parallel size ({})'.format(
args.world_size, args.tensor_model_parallel_size)
# Pipeline model parallel size.
args.pipeline_model_parallel_size = min(
args.pipeline_model_parallel_size,
(args.world_size // args.tensor_model_parallel_size))
# Checks.
model_parallel_size = args.pipeline_model_parallel_size * \
args.tensor_model_parallel_size
assert args.world_size % model_parallel_size == 0, 'world size is not'\
' divisible by tensor parallel size ({}) times pipeline parallel ' \
'size ({})'.format(args.world_size, args.tensor_model_parallel_size,
args.pipeline_model_parallel_size)
args.data_parallel_size = args.world_size // model_parallel_size
if args.rank == 0:
print('using world size: {}, data-parallel-size: {}, '
'tensor-model-parallel size: {}, '
'pipeline-model-parallel size: {} '.format(
args.world_size, args.data_parallel_size,
args.tensor_model_parallel_size,
args.pipeline_model_parallel_size), flush=True)
# --data-path and --train-weighted-splits-paths
message = "Data loading Mode 1: --data-path and --split "\
"and Mode 2: --(train|valid|test)-weighted-split-paths"\
"are mutually exclusive i.e. cannot be set together."
if args.data_path:
assert args.train_weighted_split_paths is None, message
setattr(args, "valid_weighted_split_names", None)
setattr(args, "valid_weighted_split_weights", None)
setattr(args, "valid_weighted_split_splits", None)
setattr(args, "test_weighted_split_names", None)
setattr(args, "test_weighted_split_weights", None)
setattr(args, "test_weighted_split_splits", None)
# args.split default value in the args is None it is set here in order
# to check that it does not to overlap with the 2nd mode of data loading
if args.split is None:
args.split = "969, 30, 1"
if args.train_weighted_split_paths or args.valid_weighted_split_paths or \
args.test_weighted_split_paths:
assert args.data_path is None and args.split is None, message
# Deprecated arguments
assert args.batch_size is None, '--batch-size argument is no longer ' \
'valid, use --micro-batch-size instead'
del args.batch_size
assert args.warmup is None, '--warmup argument is no longer valid, use ' \
'--lr-warmup-fraction instead'
del args.warmup
assert args.model_parallel_size is None, '--model-parallel-size is no ' \
'longer valid, use --tensor-model-parallel-size instead'
del args.model_parallel_size
# Set input defaults.
for key in defaults:
# For default to be valid, it should not be provided in the
# arguments that are passed to the program. We check this by
# ensuring the arg is set to None.
if getattr(args, key) is not None:
if args.rank == 0:
print('WARNING: overriding default arguments for {key}:{v} \
with {key}:{v2}'.format(key=key, v=defaults[key],
v2=getattr(args, key)),
flush=True)
else:
setattr(args, key, defaults[key])
# Batch size.
assert args.micro_batch_size is not None
assert args.micro_batch_size > 0
if args.global_batch_size is None:
args.global_batch_size = args.micro_batch_size * args.data_parallel_size
if args.rank == 0:
print('setting global batch size to {}'.format(
args.global_batch_size), flush=True)
assert args.global_batch_size > 0
if args.num_layers_per_virtual_pipeline_stage is not None:
assert args.pipeline_model_parallel_size > 2, \
'pipeline-model-parallel size should be greater than 2 with ' \
'interleaved schedule'
assert args.num_layers % args.num_layers_per_virtual_pipeline_stage == 0, \
'number of layers is not divisible by number of layers per virtual ' \
'pipeline stage'
args.virtual_pipeline_model_parallel_size = \
(args.num_layers // args.pipeline_model_parallel_size) // \
args.num_layers_per_virtual_pipeline_stage
else:
args.virtual_pipeline_model_parallel_size = None
# Parameters dtype.
args.params_dtype = torch.float
if args.fp16:
assert not args.bf16
args.params_dtype = torch.half
if args.bf16:
assert not args.fp16
args.params_dtype = torch.bfloat16
# bfloat16 requires gradient accumulation and all-reduce to
# be done in fp32.
if not args.accumulate_allreduce_grads_in_fp32:
args.accumulate_allreduce_grads_in_fp32 = True
if args.rank == 0:
print('accumulate and all-reduce gradients in fp32 for '
'bfloat16 data type.', flush=True)
if args.rank == 0:
print('using {} for parameters ...'.format(args.params_dtype),
flush=True)
# If we do accumulation and all-reduces in fp32, we need to have
# local DDP and we should set the use-contiguous-buffers-in-ddp.
if args.accumulate_allreduce_grads_in_fp32:
assert args.DDP_impl == 'local'
args.use_contiguous_buffers_in_ddp = True
if args.dataloader_type is None:
args.dataloader_type = 'single'
# Consumed tokens.
args.consumed_train_samples = 0
args.consumed_valid_samples = 0
args.consumed_train_tokens = 0
args.gigaflos_no_embeds = 0
# Iteration-based training.
if args.train_iters:
# If we use iteration-based training, make sure the
# sample-based options are off.
assert args.train_samples is None, \
'expected iteration-based training'
assert args.lr_decay_samples is None, \
'expected iteration-based learning rate decay'
assert args.lr_warmup_samples == 0, \
'expected iteration-based learning rate warmup'
assert args.rampup_batch_size is None, \
'expected no batch-size rampup for iteration-based training'
if args.lr_warmup_fraction is not None:
assert args.lr_warmup_iters == 0, \
'can only specify one of lr-warmup-fraction and lr-warmup-iters'
# Sample-based training.
if args.train_samples:
# If we use sample-based training, make sure the
# iteration-based options are off.
assert args.train_iters is None, \
'expected sample-based training'
assert args.lr_decay_iters is None, \
'expected sample-based learning rate decay'
assert args.lr_warmup_iters == 0, \
'expected sample-based learnig rate warmup'
if args.lr_warmup_fraction is not None:
assert args.lr_warmup_samples == 0, \
'can only specify one of lr-warmup-fraction ' \
'and lr-warmup-samples'
# Check required arguments.
required_args = ['num_layers', 'hidden_size', 'num_attention_heads']
for req_arg in required_args:
_check_arg_is_not_none(args, req_arg)
# Checks.
if args.ffn_hidden_size is None:
args.ffn_hidden_size = 4 * args.hidden_size
if args.kv_channels is None:
assert args.hidden_size % args.num_attention_heads == 0
args.kv_channels = args.hidden_size // args.num_attention_heads
if args.seq_length is not None:
assert args.encoder_seq_length is None
args.encoder_seq_length = args.seq_length
else:
assert args.encoder_seq_length is not None
args.seq_length = args.encoder_seq_length
if args.position_embedding_type == PositionEmbeddingType.absolute or args.position_embedding_type == PositionEmbeddingType.alibi:
assert args.max_position_embeddings is not None
if args.seq_length is not None:
assert args.max_position_embeddings >= args.seq_length
if args.decoder_seq_length is not None:
assert args.max_position_embeddings >= args.decoder_seq_length
else:
assert args.max_position_embeddings is None
if args.lr is not None:
assert args.min_lr <= args.lr
if args.save is not None:
assert args.save_interval is not None
# Mixed precision checks.
if args.fp16_lm_cross_entropy:
assert args.fp16, 'lm cross entropy in fp16 only support in fp16 mode.'
if args.fp32_residual_connection:
assert args.fp16 or args.bf16, \
'residual connection in fp32 only supported when using fp16 or bf16.'
# Activation checkpointing.
if args.distribute_checkpointed_activations:
assert args.checkpoint_activations, \
'for distribute-checkpointed-activations to work you '\
'need to enable checkpoint-activations'
args.curriculum_learning = False
# Activation function
if args.glu_activation is not None and args.bias_gelu_fusion:
raise ValueError("if glu-activation is used, please set --no-bias-gelu-fusion")
# Skip train iterations
if args.skip_train_iteration_range is not None:
args.skip_train_iteration_range = [
list(map(int, range_.split("-"))) for range_ in args.skip_train_iteration_range
]
args.skip_train_iteration_range.sort()
skip_train_iteration_range = collections.deque()
for range_ in args.skip_train_iteration_range:
if len(range_) == 2:
start, end = range_
assert end >= start, \
"end of skip range cannot be smaller than start of skip range"
# merge overlapping intervals (e.g. 1-5 2-6 -> 1-6)
if not skip_train_iteration_range:
skip_train_iteration_range.append([start, end])
elif skip_train_iteration_range[-1][1] >= start:
skip_train_iteration_range[-1][1] = max(end, skip_train_iteration_range[-1][1])
else:
skip_train_iteration_range.append([start, end])
else:
raise ValueError(
"skip train iterations should be specified as two numbers, i.e. start-end"
)
args.skip_train_iteration_range = skip_train_iteration_range
if args.use_bnb_optimizer:
try:
import bitsandbytes as bnb
except ModuleNotFoundError:
raise ModuleNotFoundError("Please install bitsandbytes from https://github.com/facebookresearch/bitsandbytes.")
_print_args(args)
return args
def _print_args(args):
"""Print arguments."""
if args.rank == 0:
print('------------------------ arguments ------------------------',
flush=True)
str_list = []
for arg in vars(args):
dots = '.' * (48 - len(arg))
str_list.append(' {} {} {}'.format(arg, dots, getattr(args, arg)))
if args.log_path is not None:
with open(os.path.join(args.log_path,f'args_{time.strftime("%Y-%m-%dT%H:%M:%S")}.txt'), 'w') as f:
for arg in sorted(str_list, key=lambda x: x.lower()):
f.write(arg+"\n")
print(arg, flush=True)
else:
for arg in sorted(str_list, key=lambda x: x.lower()):
print(arg, flush=True)
print('-------------------- end of arguments ---------------------',
flush=True)
def _check_arg_is_not_none(args, arg):
assert getattr(args, arg) is not None, '{} argument is None'.format(arg)
def _add_network_size_args(parser):
group = parser.add_argument_group(title='network size')
group.add_argument('--num-layers', type=int, default=None,
help='Number of transformer layers.')
group.add_argument('--hidden-size', type=int, default=None,
help='Tansformer hidden size.')
group.add_argument('--ffn-hidden-size', type=int, default=None,
help='Transformer Feed-Forward Network hidden size. '
'This is set to 4*hidden-size if not provided')
group.add_argument('--num-attention-heads', type=int, default=None,
help='Number of transformer attention heads.')
group.add_argument('--kv-channels', type=int, default=None,
help='Projection weights dimension in multi-head '
'attention. This is set to '
' args.hidden_size // args.num_attention_heads '
'if not provided.')
group.add_argument('--max-position-embeddings', type=int, default=None,
help='Maximum number of position embeddings to use. '
'This is the size of position embedding.')
group.add_argument('--make-vocab-size-divisible-by', type=int, default=128,
help='Pad the vocab size to be divisible by this value.'
'This is added for computational efficieny reasons.')
group.add_argument('--layernorm-epsilon', type=float, default=1e-5,
help='Layer norm epsilon.')
group.add_argument('--apply-residual-connection-post-layernorm',
action='store_true',
help='If set, use original BERT residula connection '
'ordering.')
group.add_argument('--embed-layernorm', action='store_true',
help='use layernorm for embedding')
group.add_argument('--openai-gelu', action='store_true',
help='Use OpenAIs GeLU implementation. This option'
'should not be used unless for backward compatibility'
'reasons.')
group.add_argument('--onnx-safe', type=bool, required=False,
help='Use workarounds for known problems with '
'Torch ONNX exporter')
group.add_argument('--bert-no-binary-head', action='store_false',
help='Disable BERT binary head.',
dest='bert_binary_head')
group.add_argument('--position-embedding-type', type=lambda x: PositionEmbeddingType[x],
choices=list(PositionEmbeddingType),
default=PositionEmbeddingType.absolute,
help='Define position embedding type ("absolute" | "rotary" | "alibi"). "absolute" by default.'
)
group.add_argument('--glu-activation', type=str,
choices=megatron.model.glu_activations.GLU_ACTIVATIONS.keys(),
help='GLU activations to use.'
)
group.add_argument('--kill-switch-path', type=str,
help='path to look for a kill switch, which if found will automatically exit the program'
)
group.add_argument('--log-level', type=str, choices=list(log_levels.keys()),
help="Logger log level to use on the main process. Possible choices are the log levels as strings: 'debug', "
"'info', 'warning', 'error' and 'critical', plus a 'passive' level which doesn't set anything and lets the "
"application set the level."
)
group.add_argument('--log-level-replica', type=str, choices=list(log_levels.keys()),
help="Logger log level to use on replicas. Same choices as ``log_level``"
)
return parser
def _add_logging_args(parser):
group = parser.add_argument_group(title='logging')
group.add_argument('--log-params-norm', action='store_true',
help='If set, calculate and log parameters norm.')
group.add_argument('--log-num-zeros-in-grad', action='store_true',
help='If set, calculate and log the number of zeros in gradient.')
group.add_argument('--tensorboard-log-interval', type=int, default=1,
help='Report to tensorboard interval.')
group.add_argument('--tensorboard-queue-size', type=int, default=1000,
help='Size of the tensorboard queue for pending events '
'and summaries before one of the ‘add’ calls forces a '
'flush to disk.')
group.add_argument('--log-timers-to-tensorboard', action='store_true',
help='If set, write timers to tensorboard.')
group.add_argument('--log-batch-size-to-tensorboard', action='store_true',
help='If set, write batch-size to tensorboard.')
group.add_argument('--no-log-learnig-rate-to-tensorboard',
action='store_false',
help='Disable learning rate logging to tensorboard.',
dest='log_learning_rate_to_tensorboard')
group.add_argument('--no-log-loss-scale-to-tensorboard',
action='store_false',
help='Disable loss-scale logging to tensorboard.',
dest='log_loss_scale_to_tensorboard')
group.add_argument('--log-validation-ppl-to-tensorboard',
action='store_true',
help='If set, write validation perplexity to '
'tensorboard.')
return parser
def _add_regularization_args(parser):
group = parser.add_argument_group(title='regularization')
group.add_argument('--attention-dropout', type=float, default=0.1,
help='Post attention dropout probability.')
group.add_argument('--hidden-dropout', type=float, default=0.1,
help='Dropout probability for hidden state transformer.')
group.add_argument('--weight-decay', type=float, default=0.01,
help='Weight decay coefficient for L2 regularization.')
group.add_argument('--clip-grad', type=float, default=1.0,
help='Gradient clipping based on global L2 norm.')
group.add_argument('--adam-beta1', type=float, default=0.9,
help='First coefficient for computing running averages '
'of gradient and its square')
group.add_argument('--adam-beta2', type=float, default=0.999,
help='Second coefficient for computing running averages '
'of gradient and its square')
group.add_argument('--adam-eps', type=float, default=1e-08,
help='Term added to the denominator to improve'
'numerical stability')
group.add_argument('--sgd-momentum', type=float, default=0.9,
help='Momentum factor for sgd')
return parser
def _add_training_args(parser):
group = parser.add_argument_group(title='training')
group.add_argument('--micro-batch-size', type=int, default=None,
help='Batch size per model instance (local batch size). '
'Global batch size is local batch size times data '
'parallel size times number of micro batches.')
group.add_argument('--batch-size', type=int, default=None,
help='Old batch size parameter, do not use. '
'Use --micro-batch-size instead')
group.add_argument('--global-batch-size', type=int, default=None,
help='Training batch size. If set, it should be a '
'multiple of micro-batch-size times data-parallel-size. '
'If this value is None, then '
'use micro-batch-size * data-parallel-size as the '
'global batch size. This choice will result in 1 for '
'number of micro-batches.')
group.add_argument('--rampup-batch-size', nargs='*', default=None,
help='Batch size ramp up with the following values:'
' --rampup-batch-size <start batch size> '
' <batch size increment> '
' <ramp-up samples> '
'For example: '
' --rampup-batch-size 16 8 300000 '
' --global-batch-size 1024 '
'will start with global batch size 16 and over '
' (1024 - 16) / 8 = 126 intervals will increase '
'the batch size linearly to 1024. In each interval '
'we will use approximately 300000 / 126 = 2380 samples.')
group.add_argument('--checkpoint-activations', action='store_true',
help='Checkpoint activation to allow for training '
'with larger models, sequences, and batch sizes.')
group.add_argument('--distribute-checkpointed-activations',
action='store_true',
help='If set, distribute checkpointed activations '
'across model parallel group.')
group.add_argument('--checkpoint-num-layers', type=int, default=1,
help='chunk size (number of layers) for checkpointing.')
group.add_argument('--train-iters', type=int, default=None,
help='Total number of iterations to train over all '
'training runs. Note that either train-iters or '
'train-samples should be provided.')
group.add_argument('--train-samples', type=int, default=None,
help='Total number of samples to train over all '
'training runs. Note that either train-iters or '
'train-samples should be provided.')
group.add_argument('--train-tokens', type=int, default=None,
help='Total number of tokens to train over all '
'training runs.')
group.add_argument('--log-interval', type=int, default=100,
help='Report loss and timing interval.')
group.add_argument('--exit-interval', type=int, default=None,
help='Exit the program after the iteration is divisible '
'by this value.')
group.add_argument('--exit-duration-in-mins', type=int, default=None,
help='Exit the program after this many minutes.')
group.add_argument('--tensorboard-dir', type=str, default=None,
help='Write TensorBoard logs to this directory.')
group.add_argument('--no-masked-softmax-fusion',
action='store_false',
help='Disable fusion of query_key_value scaling, '
'masking, and softmax.',
dest='masked_softmax_fusion')
group.add_argument('--no-bias-gelu-fusion', action='store_false',
help='Disable bias and gelu fusion.',
dest='bias_gelu_fusion')
group.add_argument('--no-bias-dropout-fusion', action='store_false',
help='Disable bias and dropout fusion.',
dest='bias_dropout_fusion')
group.add_argument('--optimizer', type=str, default='adam',
choices=['adam', 'sgd'],
help='Optimizer function')
group.add_argument('--use-bnb-optimizer', action='store_true',
help='Use bitsandbytes optimizer for efficient training,'
'please refer https://github.com/facebookresearch/bitsandbytes.',
dest='use_bnb_optimizer')
group.add_argument('--dataloader-type', type=str, default=None,
choices=['single', 'cyclic'],
help='Single pass vs multiple pass data loader')
group.add_argument('--cpu-optimizer', action='store_true',
help='Run optimizer on CPU')
group.add_argument('--cpu_torch_adam', action='store_true',
help='Use Torch Adam as optimizer on CPU.')
group.add_argument('--codecarbon-dir', type=str, default=None,
help='Write CodeCarbon logs to this directory.')
group.add_argument('--eval-only', type=bool, required=False,
help='If set to True, no train step will be performed.'
'and only the evaluation on the `valid` and `test` sets '
'will be performed' )
group.add_argument('--skip-train-iteration-range', type=str, nargs='+', default=None,
help='Iteration ranges to skip. The values are one or more dash-separated ranges. e.g., 101-200 251-300.')
group.add_argument('--abort-on-unmet-fused-kernel-constraints', action='store_true',
help="If set to True, the program will abort if the constraints for loading a fused kernel aren't met")
return parser
def _add_initialization_args(parser):
group = parser.add_argument_group(title='initialization')
group.add_argument('--seed', type=int, default=1234,
help='Random seed used for python, numpy, '
'pytorch, and cuda.')
group.add_argument('--init-method-std', type=float, default=0.02,
help='Standard deviation of the zero mean normal '
'distribution used for weight initialization.')
group.add_argument('--init-method-xavier-uniform', action='store_true',
help='Enable Xavier uniform parameter initialization')
return parser
def _add_learning_rate_args(parser):
group = parser.add_argument_group(title='learning rate')
group.add_argument('--lr', type=float, default=None,
help='Initial learning rate. Depending on decay style '
'and initial warmup, the learing rate at each '
'iteration would be different.')
group.add_argument('--lr-decay-style', type=str, default='linear',
choices=['constant', 'linear', 'cosine'],
help='Learning rate decay function.')
group.add_argument('--lr-decay-iters', type=int, default=None,
help='number of iterations to decay learning rate over,'
' If None defaults to `--train-iters`')
group.add_argument('--lr-decay-samples', type=int, default=None,
help='number of samples to decay learning rate over,'
' If None defaults to `--train-samples`')
group.add_argument('--lr-decay-tokens', type=int, default=None,
help='number of tokens to decay learning rate over,'
' If not None will override iter/sample-based decay')
group.add_argument('--lr-warmup-fraction', type=float, default=None,
help='fraction of lr-warmup-(iters/samples) to use '
'for warmup (as a float)')
group.add_argument('--lr-warmup-iters', type=int, default=0,
help='number of iterations to linearly warmup '
'learning rate over.')
group.add_argument('--lr-warmup-samples', type=int, default=0,
help='number of samples to linearly warmup '
'learning rate over.')
group.add_argument('--warmup', type=int, default=None,
help='Old lr warmup argument, do not use. Use one of the'
'--lr-warmup-* arguments above')
group.add_argument('--min-lr', type=float, default=0.0,
help='Minumum value for learning rate. The scheduler'
'clip values below this threshold.')
group.add_argument('--override-lr-scheduler', action='store_true',
help='Reset the values of the scheduler (learning rate,'
'warmup iterations, minimum learning rate, maximum '
'number of iterations, and decay style from input '
'arguments and ignore values from checkpoints. Note'
'that all the above values will be reset.')
group.add_argument('--use-checkpoint-lr-scheduler', action='store_true',
help='Use checkpoint to set the values of the scheduler '
'(learning rate, warmup iterations, minimum learning '
'rate, maximum number of iterations, and decay style '
'from checkpoint and ignore input arguments.')
return parser
def _add_checkpointing_args(parser):
group = parser.add_argument_group(title='checkpointing')
group.add_argument('--save', type=str, default=None,
help='Output directory to save checkpoints to.')
group.add_argument('--save-interval', type=int, default=None,
help='Number of iterations between checkpoint saves.')
group.add_argument('--no-save-optim', action='store_true', default=None,
help='Do not save current optimizer.')
group.add_argument('--no-save-rng', action='store_true', default=None,
help='Do not save current rng state.')
group.add_argument('--load', type=str, default=None,
help='Directory containing a model checkpoint.')
group.add_argument('--no-load-optim', action='store_true', default=None,
help='Do not load optimizer when loading checkpoint.')
group.add_argument('--no-load-rng', action='store_true', default=None,
help='Do not load rng state when loading checkpoint.')
group.add_argument('--finetune', action='store_true',
help='Load model for finetuning. Do not load optimizer '
'or rng state from checkpoint and set iteration to 0. '
'Assumed when loading a release checkpoint.')
return parser
def _add_mixed_precision_args(parser):
group = parser.add_argument_group(title='mixed precision')
group.add_argument('--fp16', action='store_true',
help='Run model in fp16 mode.')
group.add_argument('--bf16', action='store_true',
help='Run model in bfloat16 mode.')
group.add_argument('--loss-scale', type=float, default=None,
help='Static loss scaling, positive power of 2 '
'values can improve fp16 convergence. If None, dynamic'
'loss scaling is used.')
group.add_argument('--initial-loss-scale', type=float, default=2**32,
help='Initial loss-scale for dynamic loss scaling.')
group.add_argument('--min-loss-scale', type=float, default=1.0,
help='Minimum loss scale for dynamic loss scale.')
group.add_argument('--loss-scale-window', type=float, default=1000,
help='Window over which to raise/lower dynamic scale.')
group.add_argument('--hysteresis', type=int, default=2,
help='hysteresis for dynamic loss scaling')
group.add_argument('--fp32-residual-connection', action='store_true',
help='Move residual connections to fp32.')
group.add_argument('--no-query-key-layer-scaling', action='store_false',
help='Do not scale Q * K^T by 1 / layer-number.',
dest='apply_query_key_layer_scaling')
group.add_argument('--attention-softmax-in-fp32', action='store_true',
help='Run attention masking and softmax in fp32. '
'This flag is ignored unless '
'--no-query-key-layer-scaling is specified.')
group.add_argument('--accumulate-allreduce-grads-in-fp32',
action='store_true',
help='Gradient accumulation and all-reduce in fp32.')
group.add_argument('--fp16-lm-cross-entropy', action='store_true',
help='Move the cross entropy unreduced loss calculation'
'for lm head to fp16.')
return parser
def _add_distributed_args(parser):
group = parser.add_argument_group(title='distributed')
group.add_argument('--tensor-model-parallel-size', type=int, default=1,
help='Degree of tensor model parallelism.')
group.add_argument('--pipeline-model-parallel-size', type=int, default=1,
help='Degree of pipeline model parallelism.')
group.add_argument('--model-parallel-size', type=int, default=None,
help='Old model parallel argument, do not use. Use '
'--tensor-model-parallel-size instead.')
group.add_argument('--num-layers-per-virtual-pipeline-stage', type=int, default=None,
help='Number of layers per virtual pipeline stage')
group.add_argument('--distributed-backend', default='nccl',
choices=['nccl', 'gloo'],
help='Which backend to use for distributed training.')
group.add_argument('--DDP-impl', default='local',
choices=['local', 'torch'],
help='which DistributedDataParallel implementation '
'to use.')
group.add_argument('--use-contiguous-buffers-in-ddp', action='store_true',
help='If set, use contiguous buffer in DDP. Note that '
'this option only works woth local DDP.' )
group.add_argument('--no-scatter-gather-tensors-in-pipeline', action='store_false',
help='Use scatter/gather to optimize communication of tensors in pipeline',
dest='scatter_gather_tensors_in_pipeline')
group.add_argument('--local_rank', type=int, default=None,
help='local rank passed from distributed launcher.')
group.add_argument('--lazy-mpu-init', type=bool, required=False,
help='If set to True, initialize_megatron() '
'skips DDP initialization and returns function to '
'complete it instead.Also turns on '
'--use-cpu-initialization flag. This is for '
'external DDP manager.' )
group.add_argument('--use-cpu-initialization', action='store_true',
default=None, help='If set, affine parallel weights '
'initialization uses CPU' )
return parser
def _add_validation_args(parser):
group = parser.add_argument_group(title='validation')
group.add_argument('--eval-iters', type=int, default=100,
help='Number of iterations to run for evaluation'
'validation/test for.')
group.add_argument('--eval-interval', type=int, default=1000,
help='Interval between running evaluation on '
'validation set.')
return parser
def _add_data_args(parser):
group = parser.add_argument_group(title='data and dataloader')
# option 1 for data loading (mutually exclusive with option2)
group.add_argument('--data-path', nargs='*', default=None,
help='Path to the training dataset. Accepted format:'
'1) a single data path, 2) multiple datasets in the'
'form: dataset1-weight dataset1-path dataset2-weight '
'dataset2-path ...')
group.add_argument('--split', type=str, default=None,
help='Comma-separated list of proportions for training,'
' validation, and test split. For example the split '
'`90,5,5` will use 90%% of data for training, 5%% for '
'validation and 5%% for test.')
# option 2 for data loading (mutually exclusive with option1)
# helper class to parse the --xxx-weighted-split-paths
# note here two args are set: extra valid dataset paths and names
class parse_data_paths(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if option_string == "--train-weighted-split-paths":
assert len(values) == 1, 'Only 1 dataset group is allowed to'
'be passed for the argument --train-weighted-split-paths'
# make sure string given in the correct format
err_message = 'Each data group should be input on the following format'
'"GIVEN_NAME WEIGHT1 START:END PATH1, WEIGHT2 START:END PATH2"'
'where START < END'
for v in values:
# each prefix consists several datasets separated by commas
prefix = ":".join(v.split(":")[1:]) # remove GIVEN_NAME
datasets = prefix.split(",")
# check if each dataset is formatted like `WEIGHT START:END PATH`
for d in datasets:
assert len(d.split()) == 3, err_message
start, end = d.split()[1].split(":")
assert float(start) < float(end), err_message
names = [v.split(":")[0] for v in values]
prefixes = [":".join(v.split(":")[1:]).strip() for v in values]
weights = [[d.split()[0] for d in p.split(",")] for p in prefixes]
splits = [[d.split()[1] for d in p.split(",")] for p in prefixes]
paths = [[d.split()[2] for d in p.split(",")] for p in prefixes]
# # to keep consistency with Option 1 of data loading (through --data-path)
# # paths will contain strings on the following form
# # "WEIGHTS1 PATH1 WEIGHTS2 PATH2 WEIGHTS3 PATH3" for each dataset group
# # while data will be parsed in additional arguments below
# paths_option1_style = []
# for p, w in zip(paths, weights):
# paths_option1_style.append(" ".join([f"{w_i} {p_i}" for p_i, w_i in zip(p,w)]))
# setattr(args, self.dest, paths_option1_style)
setattr(args, self.dest, paths)
setattr(args, self.dest.replace("paths", "weights"), weights)
setattr(args, self.dest.replace("paths", "splits"), splits)
setattr(args, self.dest.replace("paths","names"), names)
group.add_argument('--train-weighted-split-paths', nargs='*', default=None,
help='Weights, splits and paths to groups of datasets'
'Accepted format: ONE dataset groups could be'
'submitted in the following form between double quotes'
'"GIVEN_NAME WEIGHT1 START:END PATH1, WEIGHT2 START:END PATH2"'
'e.g.: "NAME_ABC: 0.6 0:0.6 A, 0.3 0:1 B, 0.1 0:1 C" '
'WEIGHT is used to up and down sample each dataset A,B,C in the group'
'START:END indicates the split portion of the dataset',
action=parse_data_paths)
group.add_argument('--valid-weighted-split-paths', nargs='*', default=None,
help='Weights, splits and paths to groups of datasets'
'Accepted format: one or many dataset groups could be'
'submitted in the following form each between double quotes'
'"GIVEN_NAME WEIGHT1 START:END PATH1, WEIGHT2 START:END PATH2"'
'e.g.: "NAME_ABC: 0.6 0.6:0.8 A, 0.3 0:1 B, 0.1 0:1 C" '
'"NAME_CDE: 0.6 0.6:0.8 C, 0.3 0:1 D, 0.1 0:1 E" '
'validation will be run on each of those groups independently',
action=parse_data_paths)
group.add_argument('--test-weighted-split-paths', nargs='*', default=None,
help='Weights, splits and paths to groups of datasets'
'Accepted format: one or many dataset groups could be'
'submitted in the following form each between double quotes'
'"GIVEN_NAME WEIGHT1 START:END PATH1, WEIGHT2 START:END PATH2"'
'e.g.: "NAME_ABC: 0.6 0.6:0.8 A, 0.3 0:1 B, 0.1 0:1 C" '
'"NAME_CDE: 0.6 0.6:0.8 C, 0.3 0:1 D, 0.1 0:1 E" '
'test will be run on each of those groups independently',
action=parse_data_paths)
group.add_argument('--log-path', type=str, default=None,
help='Path to the save arguments file.')
group.add_argument('--vocab-file', type=str, default=None,
help='Path to the vocab file.')
group.add_argument('--merge-file', type=str, default=None,
help='Path to the BPE merge file.')
group.add_argument('--vocab-extra-ids', type=int, default=0,
help='Number of additional vocabulary tokens. '
'They are used for span masking in the T5 model')
group.add_argument('--seq-length', type=int, default=None,
help='Maximum sequence length to process.')
group.add_argument('--encoder-seq-length', type=int, default=None,
help='Maximum encoder sequence length to process.'
'This should be exclusive of --seq-length')
group.add_argument('--decoder-seq-length', type=int, default=None,
help="Maximum decoder sequence length to process.")
group.add_argument('--retriever-seq-length', type=int, default=256,
help='Maximum sequence length for the biencoder model '
' for retriever')
group.add_argument('--sample-rate', type=float, default=1.0,
help='sample rate for training data. Supposed to be 0 '
' < sample_rate < 1')
group.add_argument('--mask-prob', type=float, default=0.15,
help='Probability of replacing a token with mask.')
group.add_argument('--short-seq-prob', type=float, default=0.1,
help='Probability of producing a short sequence.')
group.add_argument('--mmap-warmup', action='store_true',
help='Warm up mmap files.')
group.add_argument('--num-workers', type=int, default=2,
help="Dataloader number of workers.")
group.add_argument('--tokenizer-type', type=str,
default=None,
choices=['BertWordPieceLowerCase',
'BertWordPieceCase',
'GPT2BPETokenizer',
'PretrainedFromHF'],
help='What type of tokenizer to use.')
group.add_argument("--tokenizer-name-or-path", type=str, default=None,
help="Name or path of the huggingface tokenizer.")
group.add_argument('--data-impl', type=str, default='infer',
choices=['lazy', 'cached', 'mmap', 'infer'],
help='Implementation of indexed datasets.')
group.add_argument('--reset-position-ids', action='store_true',
help='Reset posistion ids after end-of-document token.')
group.add_argument('--reset-attention-mask', action='store_true',
help='Reset self attention maske after '
'end-of-document token. Attention between tokens from different documents is null.')
group.add_argument('--eod-mask-loss', action='store_true',
help='Mask loss for the end of document tokens.')
group.add_argument('--loss-on-targets-only', action='store_true',
help='Mask loss on input sequence.')
group.add_argument('--reweight-loss-based-on-position-frequency', action="store_true",
help='Some objectives require us to sample loss_mask. This might introduce bias towards '
'specific positions. This option tries to un-bias the loss by reweighting loss on specific '
'positions based on how frequently we train on that position.'
'This is mostly used for prefix_lm training')
return parser
def _add_autoresume_args(parser):
group = parser.add_argument_group(title='autoresume')
group.add_argument('--adlr-autoresume', action='store_true',
help='Enable autoresume on adlr cluster.')
group.add_argument('--adlr-autoresume-interval', type=int, default=1000,
help='Intervals over which check for autoresume'
'termination signal')
return parser
def _add_biencoder_args(parser):
group = parser.add_argument_group(title='biencoder')
# network size
group.add_argument('--ict-head-size', type=int, default=None,
help='Size of block embeddings to be used in ICT and '
'REALM (paper default: 128)')
group.add_argument('--biencoder-projection-dim', type=int, default=0,
help='Size of projection head used in biencoder (paper'
' default: 128)')
group.add_argument('--biencoder-shared-query-context-model', action='store_true',
help='Whether to share the parameters of the query '
'and context models or not')
# checkpointing
group.add_argument('--ict-load', type=str, default=None,
help='Directory containing an ICTBertModel checkpoint')
group.add_argument('--bert-load', type=str, default=None,
help='Directory containing an BertModel checkpoint '
'(needed to start ICT and REALM)')
# data
group.add_argument('--titles-data-path', type=str, default=None,
help='Path to titles dataset used for ICT')
group.add_argument('--query-in-block-prob', type=float, default=0.1,
help='Probability of keeping query in block for '
'ICT dataset')
group.add_argument('--use-one-sent-docs', action='store_true',
help='Whether to use one sentence documents in ICT')
group.add_argument('--evidence-data-path', type=str, default=None,
help='Path to Wikipedia Evidence frm DPR paper')
# training
group.add_argument('--retriever-report-topk-accuracies', nargs='+', type=int,
default=[], help="Which top-k accuracies to report "
"(e.g. '1 5 20')")
group.add_argument('--retriever-score-scaling', action='store_true',
help='Whether to scale retriever scores by inverse '
'square root of hidden size')
# faiss index
group.add_argument('--block-data-path', type=str, default=None,
help='Where to save/load BlockData to/from')
group.add_argument('--embedding-path', type=str, default=None,
help='Where to save/load Open-Retrieval Embedding'
' data to/from')
# indexer
group.add_argument('--indexer-batch-size', type=int, default=128,
help='How large of batches to use when doing indexing '
'jobs')
group.add_argument('--indexer-log-interval', type=int, default=1000,
help='After how many batches should the indexer '
'report progress')
return parser
def _add_vit_args(parser):
group = parser.add_argument_group(title="vit")
group.add_argument('--num-classes', type=int, default=1000,
help='num of classes in vision classificaiton task')
group.add_argument('--img-dim', type=int, default=224,
help='Image size for vision classification task')
group.add_argument('--num-channels', type=int, default=3,
help='Number of channels in input image data')
group.add_argument('--patch-dim', type=int, default=16,
help='patch dimension used in vit')
return parser
def _add_zero_args(parser):
"""Text generate arguments."""
group = parser.add_argument_group('ZeRO configurations', 'configurations')
group.add_argument("--zero-stage", type=int, default=1.0)
group.add_argument('--zero-reduce-scatter', action='store_true',
help='Use reduce scatter if specified')
group.add_argument('--zero-contigious-gradients', action='store_true',
help='Use contigious memory optimizaiton if specified')
group.add_argument("--zero-reduce-bucket-size", type=int, default=0.0)
group.add_argument("--zero-allgather-bucket-size", type=int, default=0.0)
group.add_argument('--remote-device', type=str, default='none', choices=['none', 'cpu', 'nvme'],
help='Remote device for ZeRO-3 initialized parameters.')
group.add_argument('--use-pin-memory', action='store_true',
help='Use pinned CPU memory for ZeRO-3 initialized model parameters.')
return parser
def _add_memoryopt_args(parser):
"""Memory optimization arguments."""
group = parser.add_argument_group('Memory optimizations', 'configurations')
group.add_argument("--scattered-embeddings", action='store_true',
help='Save memory by scattering embedding activations. '
'Introduces dropout differences across MP configurations.')
group.add_argument("--split-transformers", action='store_true',
help='Save memory by splitting transformer layers into two parts, '
'allowing for more frequent activation checkpoint savings.')
group.add_argument("--memory-centric-tiled-linear", action="store_true",
help='Save memory by tiling with deepspeed.zero.TiledLinear.')
group.add_argument("--tile-factor", type=int, default=1,
help='Make all linear layers the same size of [hidden/tile_factor, hidden/tile_factor]. '
'Must be enabled with --memory-centric-tiled-linear. '
'Example A: if tile_factor=1, the qkv layer [hidden, 3* hidden] would be converted into [1,3] tiles of size [hidden,hidden]. '
'Example B: if tile_factor=2, the intermediate layer [4*hidden, hidden] will be converted into [8, 2] tiles of size [hidden/2, hidden/2]. '
'Default is 1.')
return parser
def _add_activation_checkpoint_args(parser):
group = parser.add_argument_group('Activation Checkpointing',
'Checkpointing Configurations')
group.add_argument('--deepspeed-activation-checkpointing', action='store_true',
help='uses activation checkpointing from deepspeed')
group.add_argument('--partition-activations', action='store_true',
help='partition Activations across GPUs before checkpointing.')
group.add_argument('--contigious-checkpointing', action='store_true',
help='Contigious memory checkpointing for activatoins.')
group.add_argument('--checkpoint-in-cpu', action='store_true',
help='Move the activation checkpoints to CPU.')
group.add_argument('--synchronize-each-layer', action='store_true',
help='does a synchronize at the beginning and end of each checkpointed layer.')
group.add_argument('--profile-backward', action='store_true',
help='Enables backward pass profiling for checkpointed layers.')
return parser
| []
| []
| [
"WORLD_SIZE",
"RANK"
]
| [] | ["WORLD_SIZE", "RANK"] | python | 2 | 0 | |
tests/runner/kube_testplatform.go | // ------------------------------------------------------------
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
// ------------------------------------------------------------
package runner
import (
"fmt"
"os"
"strconv"
"log"
kube "github.com/dapr/dapr/tests/platforms/kubernetes"
)
const (
defaultImageRegistry = "docker.io/dapriotest"
defaultImageTag = "latest"
disableTelemetryConfig = "disable-telemetry"
defaultSidecarCPULimit = "4.0"
defaultSidecarMemoryLimit = "512Mi"
defaultSidecarCPURequest = "0.1"
defaultSidecarMemoryRequest = "250Mi"
defaultAppCPULimit = "4.0"
defaultAppMemoryLimit = "800Mi"
defaultAppCPURequest = "0.1"
defaultAppMemoryRequest = "250Mi"
)
// KubeTestPlatform includes K8s client for testing cluster and kubernetes testing apps.
type KubeTestPlatform struct {
AppResources *TestResources
ComponentResources *TestResources
KubeClient *kube.KubeClient
}
// NewKubeTestPlatform creates KubeTestPlatform instance.
func NewKubeTestPlatform() *KubeTestPlatform {
return &KubeTestPlatform{
AppResources: new(TestResources),
ComponentResources: new(TestResources),
}
}
func (c *KubeTestPlatform) setup() (err error) {
// TODO: KubeClient will be properly configured by go test arguments
c.KubeClient, err = kube.NewKubeClient("", "")
return
}
func (c *KubeTestPlatform) tearDown() error {
if err := c.AppResources.tearDown(); err != nil {
fmt.Fprintf(os.Stderr, "failed to tear down AppResources. got: %q", err)
}
if err := c.ComponentResources.tearDown(); err != nil {
fmt.Fprintf(os.Stderr, "failed to tear down ComponentResources. got: %q", err)
}
// TODO: clean up kube cluster
return nil
}
// addComponents adds component to disposable Resource queues.
func (c *KubeTestPlatform) addComponents(comps []kube.ComponentDescription) error {
if c.KubeClient == nil {
return fmt.Errorf("kubernetes cluster needs to be setup")
}
for _, comp := range comps {
c.ComponentResources.Add(kube.NewDaprComponent(c.KubeClient, kube.DaprTestNamespace, comp))
}
// setup component resources
if err := c.ComponentResources.setup(); err != nil {
return err
}
return nil
}
// addApps adds test apps to disposable App Resource queues.
func (c *KubeTestPlatform) addApps(apps []kube.AppDescription) error {
if c.KubeClient == nil {
return fmt.Errorf("kubernetes cluster needs to be setup before calling BuildAppResources")
}
dt := c.disableTelemetry()
for _, app := range apps {
if app.RegistryName == "" {
app.RegistryName = c.imageRegistry()
}
if app.ImageName == "" {
return fmt.Errorf("%s app doesn't have imagename property", app.AppName)
}
app.ImageName = fmt.Sprintf("%s:%s", app.ImageName, c.imageTag())
if dt {
app.Config = disableTelemetryConfig
}
app.DaprCPULimit = c.sidecarCPULimit()
app.DaprCPURequest = c.sidecarCPURequest()
app.DaprMemoryLimit = c.sidecarMemoryLimit()
app.DaprMemoryRequest = c.sidecarMemoryRequest()
app.AppCPULimit = c.appCPULimit()
app.AppCPURequest = c.appCPURequest()
app.AppMemoryLimit = c.appMemoryLimit()
app.AppMemoryRequest = c.appMemoryRequest()
log.Printf("Adding app %v", app)
c.AppResources.Add(kube.NewAppManager(c.KubeClient, kube.DaprTestNamespace, app))
}
// installApps installs the apps in AppResource queue sequentially
if err := c.AppResources.setup(); err != nil {
return err
}
return nil
}
func (c *KubeTestPlatform) imageRegistry() string {
reg := os.Getenv("DAPR_TEST_REGISTRY")
if reg == "" {
return defaultImageRegistry
}
return reg
}
func (c *KubeTestPlatform) imageTag() string {
tag := os.Getenv("DAPR_TEST_TAG")
if tag == "" {
return defaultImageTag
}
return tag
}
func (c *KubeTestPlatform) disableTelemetry() bool {
disableVal := os.Getenv("DAPR_DISABLE_TELEMETRY")
disable, err := strconv.ParseBool(disableVal)
if err != nil {
return false
}
return disable
}
func (c *KubeTestPlatform) sidecarCPULimit() string {
cpu := os.Getenv("DAPR_SIDECAR_CPU_LIMIT")
if cpu != "" {
return cpu
}
return defaultSidecarCPULimit
}
func (c *KubeTestPlatform) sidecarCPURequest() string {
cpu := os.Getenv("DAPR_SIDECAR_CPU_REQUEST")
if cpu != "" {
return cpu
}
return defaultSidecarCPURequest
}
func (c *KubeTestPlatform) sidecarMemoryRequest() string {
mem := os.Getenv("DAPR_SIDECAR_MEMORY_REQUEST")
if mem != "" {
return mem
}
return defaultSidecarMemoryRequest
}
func (c *KubeTestPlatform) sidecarMemoryLimit() string {
mem := os.Getenv("DAPR_SIDECAR_MEMORY_LIMIT")
if mem != "" {
return mem
}
return defaultSidecarMemoryLimit
}
func (c *KubeTestPlatform) appCPULimit() string {
cpu := os.Getenv("DAPR_APP_CPU_LIMIT")
if cpu != "" {
return cpu
}
return defaultAppCPULimit
}
func (c *KubeTestPlatform) appCPURequest() string {
cpu := os.Getenv("DAPR_APP_CPU_REQUEST")
if cpu != "" {
return cpu
}
return defaultAppCPURequest
}
func (c *KubeTestPlatform) appMemoryRequest() string {
mem := os.Getenv("DAPR_APP_MEMORY_REQUEST")
if mem != "" {
return mem
}
return defaultAppMemoryRequest
}
func (c *KubeTestPlatform) appMemoryLimit() string {
mem := os.Getenv("DAPR_APP_MEMORY_LIMIT")
if mem != "" {
return mem
}
return defaultAppMemoryLimit
}
// AcquireAppExternalURL returns the external url for 'name'.
func (c *KubeTestPlatform) AcquireAppExternalURL(name string) string {
app := c.AppResources.FindActiveResource(name)
return app.(*kube.AppManager).AcquireExternalURL()
}
// GetAppHostDetails returns the name and IP address of the host(pod) running 'name'
func (c *KubeTestPlatform) GetAppHostDetails(name string) (string, string, error) {
app := c.AppResources.FindActiveResource(name)
pods, err := app.(*kube.AppManager).GetHostDetails()
if err != nil {
return "", "", err
}
if len(pods) == 0 {
return "", "", fmt.Errorf("no pods found for app: %v", name)
}
return pods[0].Name, pods[0].IP, nil
}
// Scale changes the number of replicas of the app
func (c *KubeTestPlatform) Scale(name string, replicas int32) error {
app := c.AppResources.FindActiveResource(name)
appManager := app.(*kube.AppManager)
if err := appManager.ScaleDeploymentReplica(replicas); err != nil {
return err
}
_, err := appManager.WaitUntilDeploymentState(appManager.IsDeploymentDone)
return err
}
// Restart restarts all instances for the app.
func (c *KubeTestPlatform) Restart(name string) error {
// To minic the restart behavior, scale to 0 and then scale to the original replicas.
app := c.AppResources.FindActiveResource(name)
originalReplicas := app.(*kube.AppManager).App().Replicas
if err := c.Scale(name, 0); err != nil {
return err
}
return c.Scale(name, originalReplicas)
}
// PortForwardToApp opens a new connection to the app on a the target port and returns the local port or error.
func (c *KubeTestPlatform) PortForwardToApp(appName string, targetPorts ...int) ([]int, error) {
app := c.AppResources.FindActiveResource(appName)
appManager := app.(*kube.AppManager)
_, err := appManager.WaitUntilDeploymentState(appManager.IsDeploymentDone)
if err != nil {
return nil, err
}
if targetPorts == nil {
return nil, fmt.Errorf("cannot open connection with no target ports")
}
return appManager.DoPortForwarding("", targetPorts...)
}
// GetAppUsage returns the Cpu and Memory usage for the app container for a given app
func (c *KubeTestPlatform) GetAppUsage(appName string) (*AppUsage, error) {
app := c.AppResources.FindActiveResource(appName)
appManager := app.(*kube.AppManager)
cpu, mem, err := appManager.GetCPUAndMemory(false)
if err != nil {
return nil, err
}
return &AppUsage{
CPUm: cpu,
MemoryMb: mem,
}, nil
}
// GetTotalRestarts returns the total of restarts across all pods and containers for an app.
func (c *KubeTestPlatform) GetTotalRestarts(appName string) (int, error) {
app := c.AppResources.FindActiveResource(appName)
appManager := app.(*kube.AppManager)
return appManager.GetTotalRestarts()
}
// GetSidecarUsage returns the Cpu and Memory usage for the dapr container for a given app
func (c *KubeTestPlatform) GetSidecarUsage(appName string) (*AppUsage, error) {
app := c.AppResources.FindActiveResource(appName)
appManager := app.(*kube.AppManager)
cpu, mem, err := appManager.GetCPUAndMemory(true)
if err != nil {
return nil, err
}
return &AppUsage{
CPUm: cpu,
MemoryMb: mem,
}, nil
}
| [
"\"DAPR_TEST_REGISTRY\"",
"\"DAPR_TEST_TAG\"",
"\"DAPR_DISABLE_TELEMETRY\"",
"\"DAPR_SIDECAR_CPU_LIMIT\"",
"\"DAPR_SIDECAR_CPU_REQUEST\"",
"\"DAPR_SIDECAR_MEMORY_REQUEST\"",
"\"DAPR_SIDECAR_MEMORY_LIMIT\"",
"\"DAPR_APP_CPU_LIMIT\"",
"\"DAPR_APP_CPU_REQUEST\"",
"\"DAPR_APP_MEMORY_REQUEST\"",
"\"DAPR_APP_MEMORY_LIMIT\""
]
| []
| [
"DAPR_APP_CPU_REQUEST",
"DAPR_SIDECAR_CPU_REQUEST",
"DAPR_TEST_REGISTRY",
"DAPR_SIDECAR_CPU_LIMIT",
"DAPR_DISABLE_TELEMETRY",
"DAPR_SIDECAR_MEMORY_REQUEST",
"DAPR_APP_CPU_LIMIT",
"DAPR_APP_MEMORY_LIMIT",
"DAPR_TEST_TAG",
"DAPR_SIDECAR_MEMORY_LIMIT",
"DAPR_APP_MEMORY_REQUEST"
]
| [] | ["DAPR_APP_CPU_REQUEST", "DAPR_SIDECAR_CPU_REQUEST", "DAPR_TEST_REGISTRY", "DAPR_SIDECAR_CPU_LIMIT", "DAPR_DISABLE_TELEMETRY", "DAPR_SIDECAR_MEMORY_REQUEST", "DAPR_APP_CPU_LIMIT", "DAPR_APP_MEMORY_LIMIT", "DAPR_TEST_TAG", "DAPR_SIDECAR_MEMORY_LIMIT", "DAPR_APP_MEMORY_REQUEST"] | go | 11 | 0 | |
iwrok_test/wsgi.py | """WSGI config for IWrok Test project."""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "iwrok_test.settings")
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
upup/pkg/fi/cloudup/containerd.go | /*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloudup
import (
"fmt"
"net/url"
"os"
"github.com/blang/semver/v4"
"k8s.io/klog/v2"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/assets"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/util/pkg/architectures"
"k8s.io/kops/util/pkg/hashing"
)
const (
// containerd packages URLs for v1.4.x+
containerdVersionFileAmd64 = "cri-containerd-cni-%s-linux-amd64.tar.gz"
containerdVersionBaseUrlAmd64 = "https://github.com/containerd/containerd/releases/download/v%s/"
// containerd legacy packages URLs for v1.2.x and v1.3.x
containerdLegacyFileAmd64 = "cri-containerd-%s.linux-amd64.tar.gz"
containerdLegacyBaseUrlAmd64 = "https://storage.googleapis.com/cri-containerd-release/"
// containerd version that is available for both AMD64 and ARM64, used in case the selected version is not available for ARM64
containerdFallbackVersion = "1.4.11"
)
func findContainerdAsset(c *kops.Cluster, assetBuilder *assets.AssetBuilder, arch architectures.Architecture) (*url.URL, *hashing.Hash, error) {
if c.Spec.Containerd == nil {
return nil, nil, fmt.Errorf("unable to find containerd config")
}
containerd := c.Spec.Containerd
if containerd.Packages != nil {
if arch == architectures.ArchitectureAmd64 && containerd.Packages.UrlAmd64 != nil && containerd.Packages.HashAmd64 != nil {
assetUrl := fi.StringValue(containerd.Packages.UrlAmd64)
assetHash := fi.StringValue(containerd.Packages.HashAmd64)
return findAssetsUrlHash(assetBuilder, assetUrl, assetHash)
}
if arch == architectures.ArchitectureArm64 && containerd.Packages.UrlArm64 != nil && containerd.Packages.HashArm64 != nil {
assetUrl := fi.StringValue(containerd.Packages.UrlArm64)
assetHash := fi.StringValue(containerd.Packages.HashArm64)
return findAssetsUrlHash(assetBuilder, assetUrl, assetHash)
}
}
version := fi.StringValue(containerd.Version)
if version == "" {
return nil, nil, fmt.Errorf("unable to find containerd version")
}
assetUrl, assetHash, err := findContainerdVersionUrlHash(arch, version)
if err != nil {
return nil, nil, err
}
return findAssetsUrlHash(assetBuilder, assetUrl, assetHash)
}
func findContainerdVersionUrlHash(arch architectures.Architecture, version string) (u string, h string, e error) {
var containerdAssetUrl, containerdAssetHash string
if findAllContainerdHashesAmd64()[version] != "" {
var err error
containerdAssetUrl, err = findContainerdVersionUrl(arch, version)
if err != nil {
return "", "", err
}
containerdAssetHash, err = findContainerdVersionHash(arch, version)
if err != nil {
return "", "", err
}
} else {
// Fall back to Docker packages
dv := findAllContainerdDockerMappings()[version]
if dv != "" {
var err error
containerdAssetUrl, err = findDockerVersionUrl(arch, dv)
if err != nil {
return "", "", err
}
containerdAssetHash, err = findDockerVersionHash(arch, dv)
if err != nil {
return "", "", err
}
println(dv)
} else {
return "", "", fmt.Errorf("unknown url and hash for containerd version: %s - %s", arch, version)
}
}
return containerdAssetUrl, containerdAssetHash, nil
}
func findContainerdVersionUrl(arch architectures.Architecture, version string) (string, error) {
sv, err := semver.ParseTolerant(version)
if err != nil {
return "", fmt.Errorf("unable to parse version string: %q", version)
}
if sv.LT(semver.MustParse("1.3.4")) {
return "", fmt.Errorf("unsupported legacy containerd version: %q", version)
}
var u string
switch arch {
case architectures.ArchitectureAmd64:
if sv.GTE(semver.MustParse("1.3.8")) {
baseUrl := os.Getenv("CONTAINERD_BASE_URL")
if baseUrl == "" {
baseUrl = fmt.Sprintf(containerdVersionBaseUrlAmd64, version)
} else {
klog.Warningf("Using custom base url from CONTAINERD_BASE_URL environment variable: %q", baseUrl)
}
u = fmt.Sprintf(baseUrl+containerdVersionFileAmd64, version)
} else {
baseUrl := os.Getenv("CONTAINERD_LEGACY_BASE_URL")
if baseUrl == "" {
baseUrl = containerdLegacyBaseUrlAmd64
} else {
klog.Warningf("Using custom base url from CONTAINERD_LEGACY_BASE_URL enironmnent variable: %q", baseUrl)
}
u = fmt.Sprintf(baseUrl+containerdLegacyFileAmd64, version)
}
case architectures.ArchitectureArm64:
// For now there are only official AMD64 builds, always using fallback Docker version instead
if findAllContainerdHashesAmd64()[version] != "" {
if findAllContainerdDockerMappings()[version] != "" {
u = fmt.Sprintf(dockerVersionUrlArm64, findAllContainerdDockerMappings()[version])
} else {
u = fmt.Sprintf(dockerVersionUrlArm64, findAllContainerdDockerMappings()[containerdFallbackVersion])
}
}
default:
return "", fmt.Errorf("unknown arch: %q", arch)
}
if u == "" {
return "", fmt.Errorf("unknown url for containerd version: %s - %s", arch, version)
}
return u, nil
}
func findContainerdVersionHash(arch architectures.Architecture, version string) (string, error) {
sv, err := semver.ParseTolerant(version)
if err != nil {
return "", fmt.Errorf("unable to parse version string: %q", version)
}
if sv.LT(semver.MustParse("1.3.4")) {
return "", fmt.Errorf("unsupported legacy containerd version: %q", version)
}
var h string
switch arch {
case architectures.ArchitectureAmd64:
h = findAllContainerdHashesAmd64()[version]
case architectures.ArchitectureArm64:
// For now there are only official AMD64 builds, always using fallback Docker version instead
if findAllContainerdHashesAmd64()[version] != "" {
if findAllContainerdDockerMappings()[version] != "" {
h = findAllDockerHashesArm64()[findAllContainerdDockerMappings()[version]]
} else {
h = findAllDockerHashesArm64()[findAllContainerdDockerMappings()[containerdFallbackVersion]]
}
}
default:
return "", fmt.Errorf("unknown arch: %q", arch)
}
if h == "" {
return "", fmt.Errorf("unknown hash for containerd version: %s - %s", arch, version)
}
return h, nil
}
func findAllContainerdHashesAmd64() map[string]string {
hashes := map[string]string{
"1.3.4": "4616971c3ad21c24f2f2320fa1c085577a91032a068dd56a41c7c4b71a458087",
"1.3.9": "96663699e0f888fbf232ae6629a367aa7421f6b95044e7ee5d4d4e02841fac75",
"1.3.10": "69e23e49cdf1232d475a77bf7ecd7145ff4a80295154e190125c4d8a20e241da",
"1.4.0": "b379f29417efd583f77e095173d4d0bd6bb001f0081b2a63d152ee7aef653ce1",
"1.4.1": "757efb93a4f3161efc447a943317503d8a7ded5cb4cc0cba3f3318d7ce1542ed",
"1.4.2": "9d0fd5f4d2bc58b345728432b7daac75fc99c1da91afa4f41e6103f618e74012",
"1.4.3": "2697a342e3477c211ab48313e259fd7e32ad1f5ded19320e6a559f50a82bff3d",
"1.4.4": "96641849cb78a0a119223a427dfdc1ade88412ef791a14193212c8c8e29d447b",
"1.4.5": "f8155278fd256526ca9804219e1ee46f5db11c6ddf455086b04c0887c868822a",
"1.4.6": "6ae4763598c9583f8b50605f19d6c7e9ef93c216706465e73dfc84ee6b63a238",
"1.4.7": "daa14638344fe0772f645e190e4d8eb9549b52743364cb8000521490f9e410b8",
"1.4.8": "96e815c9ab664a02dd5be35e31d15890ea6bef04dfaa39f99f14676c3d6561e8",
"1.4.9": "9911479f86012d6eab7e0f532da8f807a8b0f555ee09ef89367d8c31243073bb",
"1.4.10": "5b256b372a02fd37c84939c87a6a81cae06058a7881a60d682525c11f6dea7d1",
"1.4.11": "a4a4af4776316833cad5996c66d59f8b4a2af4da716b7902b7a2d5f5ac362dcc",
"1.5.0": "aee7b553ab88842fdafe43955757abe746b8e9995b2be55c603f0a236186ff9b",
"1.5.1": "2fd97916b24396c13849cfcd89805170e1ef0265a2f7fce8e74ae044a6a6a169",
"1.5.2": "e7adbb6c6f6e67639460579a8aa991e9ce4de2062ed36d3261e6e4865574d947",
"1.5.3": "32a9bf1b7ab2adbd9d2a16b17bf1aa6e61592938655adfb5114c40d527aa9be7",
"1.5.4": "591e4e087ea2f5007e6c64deb382df58d419b7b6922eab45a1923d843d57615f",
"1.5.5": "45f02cfc65db47cf088c95555906e1dcba7baf5a3fbad3d947dd6b9af476a144",
"1.5.6": "afc51718ebe46cb9b985edac816e63fe86c07e37d28cdd21b2c0302dec6fa7ae",
"1.5.7": "7fce75bab43a39d6f9efb3c370de2da49723f0e1dbaa9732d68fa7f620d720c8",
}
return hashes
}
func findAllContainerdDockerMappings() map[string]string {
versions := map[string]string{
"1.3.7": "19.03.13",
"1.3.9": "19.03.14",
"1.4.3": "20.10.0",
"1.4.4": "20.10.6",
"1.4.6": "20.10.7",
"1.4.9": "20.10.8",
"1.4.11": "20.10.9",
}
return versions
}
| [
"\"CONTAINERD_BASE_URL\"",
"\"CONTAINERD_LEGACY_BASE_URL\""
]
| []
| [
"CONTAINERD_LEGACY_BASE_URL",
"CONTAINERD_BASE_URL"
]
| [] | ["CONTAINERD_LEGACY_BASE_URL", "CONTAINERD_BASE_URL"] | go | 2 | 0 | |
container_linux_test.go | // +build linux
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package containerd
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"sync"
"syscall"
"testing"
"time"
"github.com/containerd/cgroups"
"github.com/containerd/containerd/cio"
"github.com/containerd/containerd/containers"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/oci"
"github.com/containerd/containerd/plugin"
"github.com/containerd/containerd/runtime/linux/runctypes"
"github.com/containerd/containerd/runtime/v2/runc/options"
"github.com/containerd/containerd/sys"
specs "github.com/opencontainers/runtime-spec/specs-go"
"golang.org/x/sys/unix"
)
func TestTaskUpdate(t *testing.T) {
t.Parallel()
client, err := newClient(t, address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
var (
ctx, cancel = testContext(t)
id = t.Name()
)
defer cancel()
image, err := client.GetImage(ctx, testImage)
if err != nil {
t.Fatal(err)
}
limit := int64(32 * 1024 * 1024)
memory := func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error {
s.Linux.Resources.Memory = &specs.LinuxMemory{
Limit: &limit,
}
return nil
}
container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image),
WithNewSpec(oci.WithImageConfig(image), withProcessArgs("sleep", "30"), memory))
if err != nil {
t.Fatal(err)
}
defer container.Delete(ctx, WithSnapshotCleanup)
task, err := container.NewTask(ctx, empty())
if err != nil {
t.Fatal(err)
}
defer task.Delete(ctx)
statusC, err := task.Wait(ctx)
if err != nil {
t.Fatal(err)
}
// check that the task has a limit of 32mb
cgroup, err := cgroups.Load(cgroups.V1, cgroups.PidPath(int(task.Pid())))
if err != nil {
t.Fatal(err)
}
stat, err := cgroup.Stat(cgroups.IgnoreNotExist)
if err != nil {
t.Fatal(err)
}
if int64(stat.Memory.Usage.Limit) != limit {
t.Fatalf("expected memory limit to be set to %d but received %d", limit, stat.Memory.Usage.Limit)
}
limit = 64 * 1024 * 1024
if err := task.Update(ctx, WithResources(&specs.LinuxResources{
Memory: &specs.LinuxMemory{
Limit: &limit,
},
})); err != nil {
t.Error(err)
}
// check that the task has a limit of 64mb
if stat, err = cgroup.Stat(cgroups.IgnoreNotExist); err != nil {
t.Fatal(err)
}
if int64(stat.Memory.Usage.Limit) != limit {
t.Errorf("expected memory limit to be set to %d but received %d", limit, stat.Memory.Usage.Limit)
}
if err := task.Kill(ctx, unix.SIGKILL); err != nil {
t.Fatal(err)
}
<-statusC
}
func TestShimInCgroup(t *testing.T) {
t.Parallel()
client, err := newClient(t, address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
var (
ctx, cancel = testContext(t)
id = t.Name()
)
defer cancel()
image, err := client.GetImage(ctx, testImage)
if err != nil {
t.Fatal(err)
}
container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), oci.WithProcessArgs("sleep", "30")))
if err != nil {
t.Fatal(err)
}
defer container.Delete(ctx, WithSnapshotCleanup)
// create a cgroup for the shim to use
path := "/containerd/shim"
cg, err := cgroups.New(cgroups.V1, cgroups.StaticPath(path), &specs.LinuxResources{})
if err != nil {
t.Fatal(err)
}
defer cg.Delete()
task, err := container.NewTask(ctx, empty(), WithShimCgroup(path))
if err != nil {
t.Fatal(err)
}
defer task.Delete(ctx)
statusC, err := task.Wait(ctx)
if err != nil {
t.Fatal(err)
}
// check to see if the shim is inside the cgroup
processes, err := cg.Processes(cgroups.Devices, false)
if err != nil {
t.Fatal(err)
}
if len(processes) == 0 {
t.Errorf("created cgroup should have at least one process inside: %d", len(processes))
}
if err := task.Kill(ctx, unix.SIGKILL); err != nil {
t.Fatal(err)
}
<-statusC
}
func TestDaemonRestart(t *testing.T) {
client, err := newClient(t, address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
var (
image Image
ctx, cancel = testContext(t)
id = t.Name()
)
defer cancel()
image, err = client.GetImage(ctx, testImage)
if err != nil {
t.Fatal(err)
}
container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withProcessArgs("sleep", "30")))
if err != nil {
t.Fatal(err)
}
defer container.Delete(ctx, WithSnapshotCleanup)
task, err := container.NewTask(ctx, empty())
if err != nil {
t.Fatal(err)
}
defer task.Delete(ctx)
statusC, err := task.Wait(ctx)
if err != nil {
t.Fatal(err)
}
if err := task.Start(ctx); err != nil {
t.Fatal(err)
}
var exitStatus ExitStatus
if err := ctrd.Restart(func() {
exitStatus = <-statusC
}); err != nil {
t.Fatal(err)
}
if exitStatus.Error() == nil {
t.Errorf(`first task.Wait() should have failed with "transport is closing"`)
}
waitCtx, waitCancel := context.WithTimeout(ctx, 2*time.Second)
serving, err := client.IsServing(waitCtx)
waitCancel()
if !serving {
t.Fatalf("containerd did not start within 2s: %v", err)
}
statusC, err = task.Wait(ctx)
if err != nil {
t.Fatal(err)
}
if err := task.Kill(ctx, syscall.SIGKILL); err != nil {
t.Fatal(err)
}
<-statusC
}
func TestShimDoesNotLeakPipes(t *testing.T) {
containerdPid := ctrd.cmd.Process.Pid
initialPipes, err := numPipes(containerdPid)
if err != nil {
t.Fatal(err)
}
client, err := newClient(t, address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
var (
image Image
ctx, cancel = testContext(t)
id = t.Name()
)
defer cancel()
image, err = client.GetImage(ctx, testImage)
if err != nil {
t.Fatal(err)
}
container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withProcessArgs("sleep", "30")))
if err != nil {
t.Fatal(err)
}
task, err := container.NewTask(ctx, empty())
if err != nil {
t.Fatal(err)
}
exitChannel, err := task.Wait(ctx)
if err != nil {
t.Fatal(err)
}
if err := task.Start(ctx); err != nil {
t.Fatal(err)
}
if err := task.Kill(ctx, syscall.SIGKILL); err != nil {
t.Fatal(err)
}
<-exitChannel
if _, err := task.Delete(ctx); err != nil {
t.Fatal(err)
}
if err := container.Delete(ctx, WithSnapshotCleanup); err != nil {
t.Fatal(err)
}
currentPipes, err := numPipes(containerdPid)
if err != nil {
t.Fatal(err)
}
if initialPipes != currentPipes {
t.Errorf("Pipes have leaked after container has been deleted. Initially there were %d pipes, after container deletion there were %d pipes", initialPipes, currentPipes)
}
}
func numPipes(pid int) (int, error) {
cmd := exec.Command("sh", "-c", fmt.Sprintf("lsof -p %d | grep FIFO", pid))
var stdout bytes.Buffer
cmd.Stdout = &stdout
if err := cmd.Run(); err != nil {
return 0, err
}
return strings.Count(stdout.String(), "\n"), nil
}
func TestDaemonReconnectsToShimIOPipesOnRestart(t *testing.T) {
client, err := newClient(t, address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
var (
image Image
ctx, cancel = testContext(t)
id = t.Name()
)
defer cancel()
image, err = client.GetImage(ctx, testImage)
if err != nil {
t.Fatal(err)
}
container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withProcessArgs("sleep", "30")))
if err != nil {
t.Fatal(err)
}
defer container.Delete(ctx, WithSnapshotCleanup)
task, err := container.NewTask(ctx, empty())
if err != nil {
t.Fatal(err)
}
defer task.Delete(ctx)
_, err = task.Wait(ctx)
if err != nil {
t.Fatal(err)
}
if err := task.Start(ctx); err != nil {
t.Fatal(err)
}
if err := ctrd.Restart(nil); err != nil {
t.Fatal(err)
}
waitCtx, waitCancel := context.WithTimeout(ctx, 2*time.Second)
serving, err := client.IsServing(waitCtx)
waitCancel()
if !serving {
t.Fatalf("containerd did not start within 2s: %v", err)
}
// After we restared containerd we write some messages to the log pipes, simulating shim writing stuff there.
// Then we make sure that these messages are available on the containerd log thus proving that the server reconnected to the log pipes
runtimeVersion := getRuntimeVersion()
logDirPath := getLogDirPath(runtimeVersion, id)
switch runtimeVersion {
case "v1":
writeToFile(t, filepath.Join(logDirPath, "shim.stdout.log"), fmt.Sprintf("%s writing to stdout\n", id))
writeToFile(t, filepath.Join(logDirPath, "shim.stderr.log"), fmt.Sprintf("%s writing to stderr\n", id))
case "v2":
writeToFile(t, filepath.Join(logDirPath, "log"), fmt.Sprintf("%s writing to log\n", id))
}
statusC, err := task.Wait(ctx)
if err != nil {
t.Fatal(err)
}
if err := task.Kill(ctx, syscall.SIGKILL); err != nil {
t.Fatal(err)
}
<-statusC
stdioContents, err := ioutil.ReadFile(ctrdStdioFilePath)
if err != nil {
t.Fatal(err)
}
switch runtimeVersion {
case "v1":
if !strings.Contains(string(stdioContents), fmt.Sprintf("%s writing to stdout", id)) {
t.Fatal("containerd did not connect to the shim stdout pipe")
}
if !strings.Contains(string(stdioContents), fmt.Sprintf("%s writing to stderr", id)) {
t.Fatal("containerd did not connect to the shim stderr pipe")
}
case "v2":
if !strings.Contains(string(stdioContents), fmt.Sprintf("%s writing to log", id)) {
t.Fatal("containerd did not connect to the shim log pipe")
}
}
}
func writeToFile(t *testing.T, filePath, message string) {
writer, err := os.OpenFile(filePath, os.O_WRONLY, 0600)
if err != nil {
t.Fatal(err)
}
if _, err := writer.WriteString(message); err != nil {
t.Fatal(err)
}
if err := writer.Close(); err != nil {
t.Fatal(err)
}
}
func getLogDirPath(runtimeVersion, id string) string {
switch runtimeVersion {
case "v1":
return filepath.Join(defaultRoot, plugin.RuntimeLinuxV1, testNamespace, id)
case "v2":
return filepath.Join(defaultState, "io.containerd.runtime.v2.task", testNamespace, id)
default:
panic(fmt.Errorf("Unsupported runtime version %s", runtimeVersion))
}
}
func getRuntimeVersion() string {
switch rt := os.Getenv("TEST_RUNTIME"); rt {
case plugin.RuntimeRuncV1, plugin.RuntimeRuncV2:
return "v2"
default:
return "v1"
}
}
func TestContainerPTY(t *testing.T) {
t.Parallel()
client, err := newClient(t, address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
var (
image Image
ctx, cancel = testContext(t)
id = t.Name()
)
defer cancel()
image, err = client.GetImage(ctx, testImage)
if err != nil {
t.Fatal(err)
}
container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), oci.WithTTY, withProcessArgs("echo", "hello")))
if err != nil {
t.Fatal(err)
}
defer container.Delete(ctx, WithSnapshotCleanup)
direct, err := newDirectIO(ctx, true)
if err != nil {
t.Fatal(err)
}
defer direct.Delete()
var (
wg sync.WaitGroup
buf = bytes.NewBuffer(nil)
)
wg.Add(1)
go func() {
defer wg.Done()
io.Copy(buf, direct.Stdout)
}()
task, err := container.NewTask(ctx, direct.IOCreate)
if err != nil {
t.Fatal(err)
}
defer task.Delete(ctx)
status, err := task.Wait(ctx)
if err != nil {
t.Error(err)
}
if err := task.Start(ctx); err != nil {
t.Fatal(err)
}
<-status
wg.Wait()
if err := direct.Close(); err != nil {
t.Error(err)
}
out := buf.String()
if !strings.ContainsAny(fmt.Sprintf("%#q", out), `\x00`) {
t.Fatal(`expected \x00 in output`)
}
}
func TestContainerAttach(t *testing.T) {
t.Parallel()
if runtime.GOOS == "windows" {
// On windows, closing the write side of the pipe closes the read
// side, sending an EOF to it and preventing reopening it.
// Hence this test will always fails on windows
t.Skip("invalid logic on windows")
}
client, err := newClient(t, address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
var (
image Image
ctx, cancel = testContext(t)
id = t.Name()
)
defer cancel()
image, err = client.GetImage(ctx, testImage)
if err != nil {
t.Fatal(err)
}
container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withCat()))
if err != nil {
t.Fatal(err)
}
defer container.Delete(ctx, WithSnapshotCleanup)
expected := "hello" + newLine
direct, err := newDirectIO(ctx, false)
if err != nil {
t.Fatal(err)
}
defer direct.Delete()
var (
wg sync.WaitGroup
buf = bytes.NewBuffer(nil)
)
wg.Add(1)
go func() {
defer wg.Done()
io.Copy(buf, direct.Stdout)
}()
task, err := container.NewTask(ctx, direct.IOCreate)
if err != nil {
t.Fatal(err)
}
defer task.Delete(ctx)
status, err := task.Wait(ctx)
if err != nil {
t.Error(err)
}
if err := task.Start(ctx); err != nil {
t.Fatal(err)
}
if _, err := fmt.Fprint(direct.Stdin, expected); err != nil {
t.Error(err)
}
// load the container and re-load the task
if container, err = client.LoadContainer(ctx, id); err != nil {
t.Fatal(err)
}
if task, err = container.Task(ctx, direct.IOAttach); err != nil {
t.Fatal(err)
}
if _, err := fmt.Fprint(direct.Stdin, expected); err != nil {
t.Error(err)
}
direct.Stdin.Close()
if err := task.CloseIO(ctx, WithStdinCloser); err != nil {
t.Error(err)
}
<-status
wg.Wait()
if _, err := task.Delete(ctx); err != nil {
t.Error(err)
}
output := buf.String()
// we wrote the same thing after attach
expected = expected + expected
if output != expected {
t.Errorf("expected output %q but received %q", expected, output)
}
}
func newDirectIO(ctx context.Context, terminal bool) (*directIO, error) {
fifos, err := cio.NewFIFOSetInDir("", "", terminal)
if err != nil {
return nil, err
}
dio, err := cio.NewDirectIO(ctx, fifos)
if err != nil {
return nil, err
}
return &directIO{DirectIO: *dio}, nil
}
type directIO struct {
cio.DirectIO
}
// ioCreate returns IO available for use with task creation
func (f *directIO) IOCreate(id string) (cio.IO, error) {
return f, nil
}
// ioAttach returns IO available for use with task attachment
func (f *directIO) IOAttach(set *cio.FIFOSet) (cio.IO, error) {
return f, nil
}
func (f *directIO) Cancel() {
// nothing to cancel as all operations are handled externally
}
// Close closes all open fds
func (f *directIO) Close() error {
err := f.Stdin.Close()
if err2 := f.Stdout.Close(); err == nil {
err = err2
}
if err2 := f.Stderr.Close(); err == nil {
err = err2
}
return err
}
// Delete removes the underlying directory containing fifos
func (f *directIO) Delete() error {
return f.DirectIO.Close()
}
func TestContainerUsername(t *testing.T) {
t.Parallel()
client, err := newClient(t, address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
var (
image Image
ctx, cancel = testContext(t)
id = t.Name()
)
defer cancel()
image, err = client.GetImage(ctx, testImage)
if err != nil {
t.Fatal(err)
}
direct, err := newDirectIO(ctx, false)
if err != nil {
t.Fatal(err)
}
defer direct.Delete()
var (
wg sync.WaitGroup
buf = bytes.NewBuffer(nil)
)
wg.Add(1)
go func() {
defer wg.Done()
io.Copy(buf, direct.Stdout)
}()
// squid user in the alpine image has a uid of 31
container, err := client.NewContainer(ctx, id,
WithNewSnapshot(id, image),
WithNewSpec(oci.WithImageConfig(image), oci.WithUsername("squid"), oci.WithProcessArgs("id", "-u")),
)
if err != nil {
t.Fatal(err)
}
defer container.Delete(ctx, WithSnapshotCleanup)
task, err := container.NewTask(ctx, direct.IOCreate)
if err != nil {
t.Fatal(err)
}
defer task.Delete(ctx)
statusC, err := task.Wait(ctx)
if err != nil {
t.Fatal(err)
}
if err := task.Start(ctx); err != nil {
t.Fatal(err)
}
<-statusC
wg.Wait()
output := strings.TrimSuffix(buf.String(), "\n")
if output != "31" {
t.Errorf("expected squid uid to be 31 but received %q", output)
}
}
func TestContainerUser(t *testing.T) {
t.Parallel()
t.Run("UserNameAndGroupName", func(t *testing.T) { testContainerUser(t, "squid:squid", "31:31") })
t.Run("UserIDAndGroupName", func(t *testing.T) { testContainerUser(t, "1001:squid", "1001:31") })
t.Run("UserNameAndGroupID", func(t *testing.T) { testContainerUser(t, "squid:1002", "31:1002") })
t.Run("UserIDAndGroupID", func(t *testing.T) { testContainerUser(t, "1001:1002", "1001:1002") })
}
func testContainerUser(t *testing.T, userstr, expectedOutput string) {
client, err := newClient(t, address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
var (
image Image
ctx, cancel = testContext(t)
id = strings.Replace(t.Name(), "/", "_", -1)
)
defer cancel()
image, err = client.GetImage(ctx, testImage)
if err != nil {
t.Fatal(err)
}
direct, err := newDirectIO(ctx, false)
if err != nil {
t.Fatal(err)
}
defer direct.Delete()
var (
wg sync.WaitGroup
buf = bytes.NewBuffer(nil)
)
wg.Add(1)
go func() {
defer wg.Done()
io.Copy(buf, direct.Stdout)
}()
container, err := client.NewContainer(ctx, id,
WithNewSnapshot(id, image),
WithNewSpec(oci.WithImageConfig(image), oci.WithUser(userstr), oci.WithProcessArgs("sh", "-c", "echo $(id -u):$(id -g)")),
)
if err != nil {
t.Fatal(err)
}
defer container.Delete(ctx, WithSnapshotCleanup)
task, err := container.NewTask(ctx, direct.IOCreate)
if err != nil {
t.Fatal(err)
}
defer task.Delete(ctx)
statusC, err := task.Wait(ctx)
if err != nil {
t.Fatal(err)
}
if err := task.Start(ctx); err != nil {
t.Fatal(err)
}
<-statusC
wg.Wait()
output := strings.TrimSuffix(buf.String(), "\n")
if output != expectedOutput {
t.Errorf("expected uid:gid to be %q, but received %q", expectedOutput, output)
}
}
func TestContainerAttachProcess(t *testing.T) {
t.Parallel()
if runtime.GOOS == "windows" {
// On windows, closing the write side of the pipe closes the read
// side, sending an EOF to it and preventing reopening it.
// Hence this test will always fails on windows
t.Skip("invalid logic on windows")
}
client, err := newClient(t, address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
var (
image Image
ctx, cancel = testContext(t)
id = t.Name()
)
defer cancel()
image, err = client.GetImage(ctx, testImage)
if err != nil {
t.Fatal(err)
}
container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withProcessArgs("sleep", "100")))
if err != nil {
t.Fatal(err)
}
defer container.Delete(ctx, WithSnapshotCleanup)
expected := "hello" + newLine
// creating IO early for easy resource cleanup
direct, err := newDirectIO(ctx, false)
if err != nil {
t.Fatal(err)
}
defer direct.Delete()
var (
wg sync.WaitGroup
buf = bytes.NewBuffer(nil)
)
wg.Add(1)
go func() {
defer wg.Done()
io.Copy(buf, direct.Stdout)
}()
task, err := container.NewTask(ctx, empty())
if err != nil {
t.Fatal(err)
}
defer task.Delete(ctx)
status, err := task.Wait(ctx)
if err != nil {
t.Error(err)
}
if err := task.Start(ctx); err != nil {
t.Fatal(err)
}
spec, err := container.Spec(ctx)
if err != nil {
t.Fatal(err)
}
processSpec := spec.Process
processSpec.Args = []string{"cat"}
execID := t.Name() + "_exec"
process, err := task.Exec(ctx, execID, processSpec, direct.IOCreate)
if err != nil {
t.Fatal(err)
}
processStatusC, err := process.Wait(ctx)
if err != nil {
t.Fatal(err)
}
if err := process.Start(ctx); err != nil {
t.Fatal(err)
}
if _, err := fmt.Fprint(direct.Stdin, expected); err != nil {
t.Error(err)
}
if process, err = task.LoadProcess(ctx, execID, direct.IOAttach); err != nil {
t.Fatal(err)
}
if _, err := fmt.Fprint(direct.Stdin, expected); err != nil {
t.Error(err)
}
direct.Stdin.Close()
if err := process.CloseIO(ctx, WithStdinCloser); err != nil {
t.Error(err)
}
<-processStatusC
wg.Wait()
if err := task.Kill(ctx, syscall.SIGKILL); err != nil {
t.Error(err)
}
output := buf.String()
// we wrote the same thing after attach
expected = expected + expected
if output != expected {
t.Errorf("expected output %q but received %q", expected, output)
}
<-status
}
func TestContainerUserID(t *testing.T) {
t.Parallel()
client, err := newClient(t, address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
var (
image Image
ctx, cancel = testContext(t)
id = t.Name()
)
defer cancel()
image, err = client.GetImage(ctx, testImage)
if err != nil {
t.Fatal(err)
}
direct, err := newDirectIO(ctx, false)
if err != nil {
t.Fatal(err)
}
defer direct.Delete()
var (
wg sync.WaitGroup
buf = bytes.NewBuffer(nil)
)
wg.Add(1)
go func() {
defer wg.Done()
io.Copy(buf, direct.Stdout)
}()
// adm user in the alpine image has a uid of 3 and gid of 4.
container, err := client.NewContainer(ctx, id,
WithNewSnapshot(id, image),
WithNewSpec(oci.WithImageConfig(image), oci.WithUserID(3), oci.WithProcessArgs("sh", "-c", "echo $(id -u):$(id -g)")),
)
if err != nil {
t.Fatal(err)
}
defer container.Delete(ctx, WithSnapshotCleanup)
task, err := container.NewTask(ctx, direct.IOCreate)
if err != nil {
t.Fatal(err)
}
defer task.Delete(ctx)
statusC, err := task.Wait(ctx)
if err != nil {
t.Fatal(err)
}
if err := task.Start(ctx); err != nil {
t.Fatal(err)
}
<-statusC
wg.Wait()
output := strings.TrimSuffix(buf.String(), "\n")
if output != "3:4" {
t.Errorf("expected uid:gid to be 3:4, but received %q", output)
}
}
func TestContainerKillAll(t *testing.T) {
t.Parallel()
client, err := newClient(t, address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
var (
image Image
ctx, cancel = testContext(t)
id = t.Name()
)
defer cancel()
image, err = client.GetImage(ctx, testImage)
if err != nil {
t.Fatal(err)
}
container, err := client.NewContainer(ctx, id,
WithNewSnapshot(id, image),
WithNewSpec(oci.WithImageConfig(image),
withProcessArgs("sh", "-c", "top"),
oci.WithHostNamespace(specs.PIDNamespace),
),
)
if err != nil {
t.Fatal(err)
}
defer container.Delete(ctx, WithSnapshotCleanup)
stdout := bytes.NewBuffer(nil)
task, err := container.NewTask(ctx, cio.NewCreator(withByteBuffers(stdout)))
if err != nil {
t.Fatal(err)
}
defer task.Delete(ctx)
statusC, err := task.Wait(ctx)
if err != nil {
t.Fatal(err)
}
if err := task.Start(ctx); err != nil {
t.Fatal(err)
}
if err := task.Kill(ctx, syscall.SIGKILL, WithKillAll); err != nil {
t.Error(err)
}
<-statusC
if _, err := task.Delete(ctx); err != nil {
t.Fatal(err)
}
}
func TestDaemonRestartWithRunningShim(t *testing.T) {
client, err := newClient(t, address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
var (
image Image
ctx, cancel = testContext(t)
id = t.Name()
)
defer cancel()
image, err = client.GetImage(ctx, testImage)
if err != nil {
t.Fatal(err)
}
container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), oci.WithProcessArgs("sleep", "100")))
if err != nil {
t.Fatal(err)
}
defer container.Delete(ctx, WithSnapshotCleanup)
task, err := container.NewTask(ctx, empty())
if err != nil {
t.Fatal(err)
}
defer task.Delete(ctx)
statusC, err := task.Wait(ctx)
if err != nil {
t.Error(err)
}
pid := task.Pid()
if pid < 1 {
t.Fatalf("invalid task pid %d", pid)
}
if err := task.Start(ctx); err != nil {
t.Fatal(err)
}
var exitStatus ExitStatus
if err := ctrd.Restart(func() {
exitStatus = <-statusC
}); err != nil {
t.Fatal(err)
}
if exitStatus.Error() == nil {
t.Errorf(`first task.Wait() should have failed with "transport is closing"`)
}
waitCtx, cancel := context.WithTimeout(ctx, 1*time.Second)
c, err := ctrd.waitForStart(waitCtx)
cancel()
if err != nil {
t.Fatal(err)
}
c.Close()
statusC, err = task.Wait(ctx)
if err != nil {
t.Error(err)
}
if err := task.Kill(ctx, syscall.SIGKILL); err != nil {
t.Fatal(err)
}
<-statusC
if err := unix.Kill(int(pid), 0); err != unix.ESRCH {
t.Errorf("pid %d still exists", pid)
}
}
func TestContainerRuntimeOptionsv1(t *testing.T) {
t.Parallel()
client, err := newClient(t, address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
var (
image Image
ctx, cancel = testContext(t)
id = t.Name()
)
defer cancel()
image, err = client.GetImage(ctx, testImage)
if err != nil {
t.Fatal(err)
}
container, err := client.NewContainer(
ctx, id,
WithNewSnapshot(id, image),
WithNewSpec(oci.WithImageConfig(image), withExitStatus(7)),
WithRuntime(plugin.RuntimeLinuxV1, &runctypes.RuncOptions{Runtime: "no-runc"}),
)
if err != nil {
t.Fatal(err)
}
defer container.Delete(ctx, WithSnapshotCleanup)
task, err := container.NewTask(ctx, empty())
if err == nil {
t.Errorf("task creation should have failed")
task.Delete(ctx)
return
}
if !strings.Contains(err.Error(), `"no-runc"`) {
t.Errorf("task creation should have failed because of lack of executable. Instead failed with: %v", err.Error())
}
}
func TestContainerRuntimeOptionsv2(t *testing.T) {
t.Parallel()
client, err := newClient(t, address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
var (
image Image
ctx, cancel = testContext(t)
id = t.Name()
)
defer cancel()
image, err = client.GetImage(ctx, testImage)
if err != nil {
t.Fatal(err)
}
container, err := client.NewContainer(
ctx, id,
WithNewSnapshot(id, image),
WithNewSpec(oci.WithImageConfig(image), withExitStatus(7)),
WithRuntime(plugin.RuntimeRuncV1, &options.Options{BinaryName: "no-runc"}),
)
if err != nil {
t.Fatal(err)
}
defer container.Delete(ctx, WithSnapshotCleanup)
task, err := container.NewTask(ctx, empty())
if err == nil {
t.Errorf("task creation should have failed")
task.Delete(ctx)
return
}
if !strings.Contains(err.Error(), `"no-runc"`) {
t.Errorf("task creation should have failed because of lack of executable. Instead failed with: %v", err.Error())
}
}
func initContainerAndCheckChildrenDieOnKill(t *testing.T, opts ...oci.SpecOpts) {
client, err := newClient(t, address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
var (
image Image
ctx, cancel = testContext(t)
id = t.Name()
)
defer cancel()
image, err = client.GetImage(ctx, testImage)
if err != nil {
t.Fatal(err)
}
opts = append(opts, oci.WithImageConfig(image))
opts = append(opts, withProcessArgs("sh", "-c", "sleep 42; echo hi"))
container, err := client.NewContainer(ctx, id,
WithNewSnapshot(id, image),
WithNewSpec(opts...),
)
if err != nil {
t.Fatal(err)
}
defer container.Delete(ctx, WithSnapshotCleanup)
stdout := bytes.NewBuffer(nil)
task, err := container.NewTask(ctx, cio.NewCreator(withByteBuffers(stdout)))
if err != nil {
t.Fatal(err)
}
defer task.Delete(ctx)
statusC, err := task.Wait(ctx)
if err != nil {
t.Fatal(err)
}
if err := task.Start(ctx); err != nil {
t.Fatal(err)
}
if err := task.Kill(ctx, syscall.SIGKILL); err != nil {
t.Error(err)
}
// Give the shim time to reap the init process and kill the orphans
select {
case <-statusC:
case <-time.After(100 * time.Millisecond):
}
b, err := exec.Command("ps", "ax").CombinedOutput()
if err != nil {
t.Fatal(err)
}
if strings.Contains(string(b), "sleep 42") {
t.Fatalf("killing init didn't kill all its children:\n%v", string(b))
}
if _, err := task.Delete(ctx, WithProcessKill); err != nil {
t.Error(err)
}
}
func TestContainerKillInitPidHost(t *testing.T) {
initContainerAndCheckChildrenDieOnKill(t, oci.WithHostNamespace(specs.PIDNamespace))
}
func TestContainerKillInitKillsChildWhenNotHostPid(t *testing.T) {
initContainerAndCheckChildrenDieOnKill(t)
}
func TestUserNamespaces(t *testing.T) {
t.Parallel()
t.Run("WritableRootFS", func(t *testing.T) { testUserNamespaces(t, false) })
// see #1373 and runc#1572
t.Run("ReadonlyRootFS", func(t *testing.T) { testUserNamespaces(t, true) })
}
func checkUserNS(t *testing.T) {
cmd := exec.Command("true")
cmd.SysProcAttr = &syscall.SysProcAttr{
Cloneflags: syscall.CLONE_NEWUSER,
}
if err := cmd.Run(); err != nil {
t.Skip("User namespaces are unavailable")
}
}
func testUserNamespaces(t *testing.T, readonlyRootFS bool) {
checkUserNS(t)
client, err := newClient(t, address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
var (
image Image
ctx, cancel = testContext(t)
id = strings.Replace(t.Name(), "/", "-", -1)
)
defer cancel()
image, err = client.GetImage(ctx, testImage)
if err != nil {
t.Fatal(err)
}
opts := []NewContainerOpts{WithNewSpec(oci.WithImageConfig(image),
withExitStatus(7),
oci.WithUserNamespace([]specs.LinuxIDMapping{
{
ContainerID: 0,
HostID: 1000,
Size: 10000,
},
}, []specs.LinuxIDMapping{
{
ContainerID: 0,
HostID: 2000,
Size: 10000,
},
}),
)}
if readonlyRootFS {
opts = append([]NewContainerOpts{WithRemappedSnapshotView(id, image, 1000, 2000)}, opts...)
} else {
opts = append([]NewContainerOpts{WithRemappedSnapshot(id, image, 1000, 2000)}, opts...)
}
container, err := client.NewContainer(ctx, id, opts...)
if err != nil {
t.Fatal(err)
}
defer container.Delete(ctx, WithSnapshotCleanup)
var copts interface{}
if CheckRuntime(client.runtime, "io.containerd.runc") {
copts = &options.Options{
IoUid: 1000,
IoGid: 2000,
}
} else {
copts = &runctypes.CreateOptions{
IoUid: 1000,
IoGid: 2000,
}
}
task, err := container.NewTask(ctx, cio.NewCreator(cio.WithStdio), func(_ context.Context, client *Client, r *TaskInfo) error {
r.Options = copts
return nil
})
if err != nil {
t.Fatal(err)
}
defer task.Delete(ctx)
statusC, err := task.Wait(ctx)
if err != nil {
t.Fatal(err)
}
if pid := task.Pid(); pid < 1 {
t.Errorf("invalid task pid %d", pid)
}
if err := task.Start(ctx); err != nil {
t.Error(err)
task.Delete(ctx)
return
}
status := <-statusC
code, _, err := status.Result()
if err != nil {
t.Fatal(err)
}
if code != 7 {
t.Errorf("expected status 7 from wait but received %d", code)
}
deleteStatus, err := task.Delete(ctx)
if err != nil {
t.Fatal(err)
}
if ec := deleteStatus.ExitCode(); ec != 7 {
t.Errorf("expected status 7 from delete but received %d", ec)
}
}
func TestTaskResize(t *testing.T) {
t.Parallel()
client, err := newClient(t, address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
var (
image Image
ctx, cancel = testContext(t)
id = t.Name()
)
defer cancel()
image, err = client.GetImage(ctx, testImage)
if err != nil {
t.Fatal(err)
}
container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withExitStatus(7)))
if err != nil {
t.Fatal(err)
}
defer container.Delete(ctx, WithSnapshotCleanup)
task, err := container.NewTask(ctx, empty())
if err != nil {
t.Fatal(err)
}
defer task.Delete(ctx)
statusC, err := task.Wait(ctx)
if err != nil {
t.Fatal(err)
}
if err := task.Resize(ctx, 32, 32); err != nil {
t.Fatal(err)
}
task.Kill(ctx, syscall.SIGKILL)
<-statusC
}
func TestContainerImage(t *testing.T) {
t.Parallel()
ctx, cancel := testContext(t)
defer cancel()
id := t.Name()
client, err := newClient(t, address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
image, err := client.GetImage(ctx, testImage)
if err != nil {
t.Fatal(err)
}
container, err := client.NewContainer(ctx, id, WithNewSpec(), WithImage(image))
if err != nil {
t.Fatal(err)
}
defer container.Delete(ctx)
i, err := container.Image(ctx)
if err != nil {
t.Fatal(err)
}
if i.Name() != image.Name() {
t.Fatalf("expected container image name %s but received %s", image.Name(), i.Name())
}
}
func TestContainerNoImage(t *testing.T) {
t.Parallel()
ctx, cancel := testContext(t)
defer cancel()
id := t.Name()
client, err := newClient(t, address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
container, err := client.NewContainer(ctx, id, WithNewSpec())
if err != nil {
t.Fatal(err)
}
defer container.Delete(ctx)
_, err = container.Image(ctx)
if err == nil {
t.Fatal("error should not be nil when container is created without an image")
}
if !errdefs.IsNotFound(err) {
t.Fatalf("expected error to be %s but received %s", errdefs.ErrNotFound, err)
}
}
func TestUIDNoGID(t *testing.T) {
t.Parallel()
ctx, cancel := testContext(t)
defer cancel()
id := t.Name()
client, err := newClient(t, address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
image, err := client.GetImage(ctx, testImage)
if err != nil {
t.Fatal(err)
}
container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithUserID(1000)))
if err != nil {
t.Fatal(err)
}
defer container.Delete(ctx)
spec, err := container.Spec(ctx)
if err != nil {
t.Fatal(err)
}
if uid := spec.Process.User.UID; uid != 1000 {
t.Fatalf("expected uid 1000 but received %d", uid)
}
if gid := spec.Process.User.GID; gid != 0 {
t.Fatalf("expected gid 0 but received %d", gid)
}
}
func TestBindLowPortNonRoot(t *testing.T) {
t.Parallel()
client, err := newClient(t, address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
var (
image Image
ctx, cancel = testContext(t)
id = t.Name()
)
defer cancel()
image, err = client.GetImage(ctx, testImage)
if err != nil {
t.Fatal(err)
}
container, err := client.NewContainer(ctx, id,
WithNewSnapshot(id, image),
WithNewSpec(oci.WithImageConfig(image), withProcessArgs("nc", "-l", "-p", "80"), oci.WithUIDGID(1000, 1000)),
)
if err != nil {
t.Fatal(err)
}
defer container.Delete(ctx, WithSnapshotCleanup)
task, err := container.NewTask(ctx, empty())
if err != nil {
t.Fatal(err)
}
defer task.Delete(ctx)
statusC, err := task.Wait(ctx)
if err != nil {
t.Fatal(err)
}
if err := task.Start(ctx); err != nil {
t.Fatal(err)
}
status := <-statusC
code, _, err := status.Result()
if err != nil {
t.Fatal(err)
}
if code != 1 {
t.Errorf("expected status 1 from wait but received %d", code)
}
if _, err := task.Delete(ctx); err != nil {
t.Fatal(err)
}
}
func TestBindLowPortNonOpt(t *testing.T) {
t.Parallel()
client, err := newClient(t, address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
var (
image Image
ctx, cancel = testContext(t)
id = t.Name()
)
defer cancel()
image, err = client.GetImage(ctx, testImage)
if err != nil {
t.Fatal(err)
}
container, err := client.NewContainer(ctx, id,
WithNewSnapshot(id, image),
WithNewSpec(oci.WithImageConfig(image), withProcessArgs("nc", "-l", "-p", "80"), oci.WithUIDGID(1000, 1000), oci.WithAmbientCapabilities([]string{"CAP_NET_BIND_SERVICE"})),
)
if err != nil {
t.Fatal(err)
}
defer container.Delete(ctx, WithSnapshotCleanup)
task, err := container.NewTask(ctx, empty())
if err != nil {
t.Fatal(err)
}
defer task.Delete(ctx)
statusC, err := task.Wait(ctx)
if err != nil {
t.Fatal(err)
}
if err := task.Start(ctx); err != nil {
t.Fatal(err)
}
go func() {
time.Sleep(2 * time.Second)
task.Kill(ctx, unix.SIGTERM)
}()
status := <-statusC
code, _, err := status.Result()
if err != nil {
t.Fatal(err)
}
// 128 + sigterm
if code != 143 {
t.Errorf("expected status 143 from wait but received %d", code)
}
if _, err := task.Delete(ctx); err != nil {
t.Fatal(err)
}
}
func TestContainerNoSTDIN(t *testing.T) {
t.Parallel()
client, err := newClient(t, address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
var (
image Image
ctx, cancel = testContext(t)
id = t.Name()
)
defer cancel()
image, err = client.GetImage(ctx, testImage)
if err != nil {
t.Fatal(err)
}
container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withExitStatus(0)))
if err != nil {
t.Fatal(err)
}
defer container.Delete(ctx, WithSnapshotCleanup)
task, err := container.NewTask(ctx, cio.NewCreator(cio.WithStreams(nil, ioutil.Discard, ioutil.Discard)))
if err != nil {
t.Fatal(err)
}
defer task.Delete(ctx)
statusC, err := task.Wait(ctx)
if err != nil {
t.Fatal(err)
}
if err := task.Start(ctx); err != nil {
t.Fatal(err)
}
status := <-statusC
code, _, err := status.Result()
if err != nil {
t.Fatal(err)
}
if code != 0 {
t.Errorf("expected status 0 from wait but received %d", code)
}
}
func TestShimOOMScore(t *testing.T) {
containerdPid := ctrd.cmd.Process.Pid
containerdScore, err := sys.GetOOMScoreAdj(containerdPid)
if err != nil {
t.Fatal(err)
}
client, err := newClient(t, address)
if err != nil {
t.Fatal(err)
}
defer client.Close()
var (
image Image
ctx, cancel = testContext(t)
id = t.Name()
)
defer cancel()
path := "/containerd/oomshim"
cg, err := cgroups.New(cgroups.V1, cgroups.StaticPath(path), &specs.LinuxResources{})
if err != nil {
t.Fatal(err)
}
defer cg.Delete()
image, err = client.GetImage(ctx, testImage)
if err != nil {
t.Fatal(err)
}
container, err := client.NewContainer(ctx, id, WithNewSnapshot(id, image), WithNewSpec(oci.WithImageConfig(image), withProcessArgs("sleep", "30")))
if err != nil {
t.Fatal(err)
}
defer container.Delete(ctx, WithSnapshotCleanup)
task, err := container.NewTask(ctx, empty(), WithShimCgroup(path))
if err != nil {
t.Fatal(err)
}
defer task.Delete(ctx)
statusC, err := task.Wait(ctx)
if err != nil {
t.Fatal(err)
}
processes, err := cg.Processes(cgroups.Devices, false)
if err != nil {
t.Fatal(err)
}
expectedScore := containerdScore + 1
// find the shim's pid
for _, p := range processes {
score, err := sys.GetOOMScoreAdj(p.Pid)
if err != nil {
t.Fatal(err)
}
if score != expectedScore {
t.Errorf("expected score %d but got %d for shim process", expectedScore, score)
}
}
if err := task.Kill(ctx, unix.SIGKILL); err != nil {
t.Fatal(err)
}
<-statusC
}
| [
"\"TEST_RUNTIME\""
]
| []
| [
"TEST_RUNTIME"
]
| [] | ["TEST_RUNTIME"] | go | 1 | 0 | |
models/model_guami.go | /*
* Nudr_DataRepository API OpenAPI file
*
* Unified Data Repository Service. © 2021, 3GPP Organizational Partners (ARIB, ATIS, CCSA, ETSI, TSDSI, TTA, TTC). All rights reserved.
*
* API version: 2.1.5
* Generated by: OpenAPI Generator (https://openapi-generator.tech)
*/
package models
import (
)
type Guami struct {
// simple type
PlmnId PlmnIdNid `json:"plmnId" yaml:"plmnId" bson:"plmnId" mapstructure:"PlmnId"`
AmfId string `json:"amfId" yaml:"amfId" bson:"amfId" mapstructure:"AmfId"`
}
| []
| []
| []
| [] | [] | go | null | null | null |
rds/config_test.go | package rds
import (
"os"
"github.com/bjlhlin/aliyungo/common"
)
//Modify with your Access Key Id and Access Key Secret
var (
TestAccessKeyId = os.Getenv("AccessKeyId")
TestAccessKeySecret = os.Getenv("AccessKeySecret")
TestSecurityToken = os.Getenv("SecurityToken")
TestRegionID = common.Region(os.Getenv("RegionId"))
ZoneId = "MY_TEST_ZONE_ID"
VPCId = "MY_TEST_VPC_ID"
VSwitchId = "MY_TEST_VSWITCH_ID"
DBInstanceId = "MY_TEST_INSTANCE_ID"
DBName = "MY_TEST_DB_NAME"
AccountPassword = "MY_TEST_ACCOUNT_PWD"
AccountName = "MY_TEST_ACCOUNT_NAME"
EngineVersion = "MY_TEST_ENGINE_VERSION"
DBInstanceClass = "MY_TEST_DB_CLASS"
DBInstanceUpgradeId = "MY_TEST_INSTANCE_TO_UPGRADE"
DBInstanceUpgradeClass = "MY_TEST_DB_CLASS_TO_UPGRADE"
TestIAmRich = false
)
var testClient *Client
func NewTestClient() *Client {
if testClient == nil {
testClient = NewClient(TestAccessKeyId, TestAccessKeySecret)
}
return testClient
}
var testDebugClient *Client
func NewTestClientForDebug() *Client {
if testDebugClient == nil {
testDebugClient = NewClient(TestAccessKeyId, TestAccessKeySecret)
testDebugClient.SetDebug(true)
}
return testDebugClient
}
| [
"\"AccessKeyId\"",
"\"AccessKeySecret\"",
"\"SecurityToken\"",
"\"RegionId\""
]
| []
| [
"AccessKeySecret",
"RegionId",
"SecurityToken",
"AccessKeyId"
]
| [] | ["AccessKeySecret", "RegionId", "SecurityToken", "AccessKeyId"] | go | 4 | 0 | |
contrib/spendfrom/spendfrom.py | #!/usr/bin/env python
#
# Use the raw transactions API to spend GUAPs received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a guapcoind or guapcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the guapcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Guapcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Guapcoin")
return os.path.expanduser("~/.guapcoin")
def read_bitcoin_config(dbdir):
"""Read the guapcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "guapcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a guapcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 9636 if testnet else 9634
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the guapcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(guapcoind):
info = guapcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
guapcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = guapcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(guapcoind):
address_summary = dict()
address_to_account = dict()
for info in guapcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = guapcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = guapcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-guapcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(guapcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(guapcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to guapcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = guapcoind.createrawtransaction(inputs, outputs)
signed_rawtx = guapcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(guapcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = guapcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(guapcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = guapcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(guapcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get GUAPs from")
parser.add_option("--to", dest="to", default=None,
help="address to get send GUAPs to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of guapcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
guapcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(guapcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(guapcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(guapcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(guapcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = guapcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| []
| []
| [
"APPDATA"
]
| [] | ["APPDATA"] | python | 1 | 0 | |
mqlight/src/main/java/com/ibm/mqlight/api/impl/logging/Javacore.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.ibm.mqlight.api.impl.logging;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.FilenameFilter;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.Map;
import com.ibm.mqlight.api.logging.Logger;
import com.ibm.mqlight.api.logging.LoggerFactory;
/**
* Enables a javacore to be taken. For IBM JREs this uses the builtin {@link com.ibm.jvm.Dump.JavaDump()) service, for non-IBM JREs we simulate a javacore.
*/
class Javacore {
private static final Logger logger = LoggerFactory.getLogger(Javacore.class);
/** The directory where javacore files are expected to be generated. */
private static String javacoreDirectoryPath;
static {
// Determine if an alternative javcore directory is configured. Only applicable to IBM JREs.
// Note z/OS currently not supported by MQ Light, but check included here for completeness
final String alternativeJavacoreDirectoryPath;
final boolean isZOS = System.getProperty("os.name", "").equalsIgnoreCase("OS/390") || System.getProperty("os.name", "").equalsIgnoreCase("z/OS");
if (isZOS) {
alternativeJavacoreDirectoryPath = System.getenv("_CEE_DMPTARG");
} else {
alternativeJavacoreDirectoryPath = System.getenv("IBM_JAVACOREDIR");
}
if (alternativeJavacoreDirectoryPath != null) {
javacoreDirectoryPath = alternativeJavacoreDirectoryPath;
} else {
javacoreDirectoryPath = System.getProperty("user.dir", "");
}
}
private static Method javaDumpMethod;
private static final String endOfLineCharacter = System.getProperty("line.separator");
static {
logger.data("<clinit>");
// Set up the methods to use for taking javacores
try {
final Class<?> dumpClass = Class.forName("com.ibm.jvm.Dump");
javaDumpMethod = dumpClass.getMethod("JavaDump");
} catch (final Exception exception) {
// No FFDC required - we might not be running on a JVM which
// supports the IBM JavaDump classes.
logger.data("<clinit>", "Can't load com.ibm.jvm.Dump");
}
}
/**
* Public method that allows the caller to ask that a javacore be generated.
*
* @return The file path for the generated java core file.
* @throws Throwable
*/
public static String generateJavaCore() throws Throwable {
final String filePath;
if (javaDumpMethod != null) {
// If javaDumpMethod has been assigned a value then the JVM
// appears to have the appropriate IBM Java classes to take
// a java core.
filePath = takeJavaCore();
} else {
// For JVMs which don't have the IBM value-add function for taking
// java cores, use a portal method for dumping thread stack traces
filePath = simulateJavaCore();
}
return filePath;
}
/**
* Causes an JVM with the IBM com.ibm.jvm.Dump class present to take a java core.
*
* @return The file path for the generated java core file.
* @throws Throwable
*/
private static String takeJavaCore() throws Throwable {
logger.entry("takeJavaCore");
String filePath;
try {
javaDumpMethod.invoke(null);
File javacoreFile = getLatestJavacoreFile();
filePath = javacoreFile.getAbsolutePath();
} catch (final InvocationTargetException invocationException) {
// Unpack invocation target exception and throw to outer catch block.
throw (invocationException.getTargetException() == null) ? invocationException : invocationException.getTargetException();
}
logger.exit("takeJavaCore", (Object) filePath);
return filePath;
}
/**
* @return A file object for the latest (newest) javacore file. Javacore files are assumed to be generated in the current default directory for the application (this should be
* the log root directory for an agent or logger).
* @throws FileNotFoundException If a javacore file cannot be found.
*/
private static File getLatestJavacoreFile() throws FileNotFoundException {
logger.entry("getLatestJavacoreFilePath");
final File javacoreDirectory = new File(javacoreDirectoryPath);
final String[] javacoreFilePaths = javacoreDirectory.list(new FilenameFilter() {
public boolean accept(File dir, String name) {
return name.startsWith("javacore.");
}
});
String requiredJavacoreFilePath = "";
if (javacoreFilePaths != null && javacoreFilePaths.length > 0) {
requiredJavacoreFilePath = javacoreFilePaths[0];
for (String filePath : javacoreFilePaths) {
if (filePath.compareTo(requiredJavacoreFilePath) > 0) requiredJavacoreFilePath = filePath;
}
}
if (requiredJavacoreFilePath.length() == 0) {
final FileNotFoundException exception =
new FileNotFoundException("The javacore file has been generated but was not found in the expected directory: " + javacoreDirectory.getAbsolutePath());
logger.throwing("getLatestJavacoreFilePath", exception);
throw exception;
}
final File result = new File(javacoreDirectory, requiredJavacoreFilePath);
logger.exit("getLatestJavacoreFilePath", result);
return result;
}
/**
* Simulates a java core, for use with JVMs which do not have the com.ibm.jvm.Dump class available. The file is given a name which looks like a java core to avoid introducing new
* 'must gathers' for service.
*
* @return The file path for the generated java core file.
*/
private static String simulateJavaCore() {
logger.entry("simulateJavaCore");
// Make up a unique filename using the same format as that used by the JVM's dump functions
final SimpleDateFormat dateFormat = new SimpleDateFormat("yyyyMMdd.kkmmss.SSS0.");
final String dateString = dateFormat.format(new Date());
boolean unique = false;
int counter = 1;
String filename = "";
while (!unique) {
filename = "javacore." + dateString + String.format("%04d", counter) + ".txt";
unique = !new File(filename).exists();
counter++;
}
// Write a header onto the simulated java core file explaining what it is and who you should
// talk to if you find it.
final StringBuilder sb = new StringBuilder("This JVM does not support com.ibm.jvm.Dump.JavaDump() method");
sb.append(endOfLineCharacter);
sb.append("This file was generated by WebSphere MQ Managed File Transfer");
sb.append(endOfLineCharacter);
sb.append(endOfLineCharacter);
// Iterate over each thread known to the JVM
final Map<Thread, StackTraceElement[]> threadToStackArrayMap = Thread.getAllStackTraces();
for (final Map.Entry<Thread, StackTraceElement[]> entry : threadToStackArrayMap.entrySet()) {
// Generate a thread description in a java core like style.
final Thread thread = entry.getKey();
sb.append("\"");
sb.append(thread.getName());
sb.append("\" ");
sb.append("(id: ");
sb.append(thread.getId());
sb.append(", state: ");
sb.append(thread.getState());
sb.append(") ");
sb.append("priority=");
sb.append(thread.getPriority());
sb.append(", interrupted=");
sb.append(thread.isInterrupted());
sb.append(", daemon=");
sb.append(thread.isDaemon());
sb.append(endOfLineCharacter);
// Dump the stack in a java core like style
for (final StackTraceElement element : entry.getValue()) {
sb.append(" at ");
sb.append(element.getClassName());
sb.append(".");
sb.append(element.getMethodName());
if (element.isNativeMethod()) sb.append("(Native Method)");
else {
sb.append("(");
sb.append(element.getFileName());
sb.append(":");
sb.append(element.getLineNumber());
sb.append(")");
}
sb.append(endOfLineCharacter);
}
sb.append(endOfLineCharacter);
}
sb.append("[EOF]");
// Write the string buffer into a file
final File outputFile = new File(filename);
FileOutputStream out = null;
try {
out = new FileOutputStream(outputFile);
out.write(sb.toString().getBytes("UTF-8"));
} catch (final IOException ioException) {
logger.error("Unable to generate diagnostic information: " + ioException.getLocalizedMessage());
} finally {
try {
if (out != null) out.close();
} catch (final IOException ioException) {
// No FFDC code needed.
// No reasonable action to take - so ignore.
}
}
logger.exit("simulateJavaCore", (Object) outputFile.getAbsolutePath());
return outputFile.getAbsolutePath();
}
}
| [
"\"_CEE_DMPTARG\"",
"\"IBM_JAVACOREDIR\""
]
| []
| [
"IBM_JAVACOREDIR",
"_CEE_DMPTARG"
]
| [] | ["IBM_JAVACOREDIR", "_CEE_DMPTARG"] | java | 2 | 0 | |
pkg/cmd/helm/step_helm_template.go | package helm
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"github.com/jenkins-x/jx-gitops/pkg/cmd/extsecret"
"github.com/jenkins-x/jx-gitops/pkg/cmd/split"
"github.com/jenkins-x/jx-gitops/pkg/common"
"github.com/jenkins-x/jx-gitops/pkg/plugins"
"github.com/jenkins-x/jx/v2/pkg/cmd/helper"
"github.com/jenkins-x/jx/v2/pkg/cmd/templates"
"github.com/jenkins-x/jx/v2/pkg/gits"
"github.com/jenkins-x/jx/v2/pkg/log"
"github.com/jenkins-x/jx/v2/pkg/util"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
var (
helmTemplateLong = templates.LongDesc(`
Generate the kubernetes resources from a helm chart
`)
helmTemplateExample = templates.Examples(`
# generates the resources from a helm chart
%s step helm template
`)
)
// HelmTemplateOptions the options for the command
type TemplateOptions struct {
OutDir string
ReleaseName string
Namespace string
Chart string
ValuesFiles []string
DefaultDomain string
GitCommitMessage string
Version string
Repository string
BatchMode bool
NoGitCommit bool
NoSplit bool
NoExtSecrets bool
IncludeCRDs bool
Gitter gits.Gitter
}
// NewCmdHelmTemplate creates a command object for the command
func NewCmdHelmTemplate() (*cobra.Command, *TemplateOptions) {
o := &TemplateOptions{}
cmd := &cobra.Command{
Use: "template",
Short: "Generate the kubernetes resources from a helm chart",
Long: helmTemplateLong,
Example: fmt.Sprintf(helmTemplateExample, common.BinaryName),
Run: func(cmd *cobra.Command, args []string) {
err := o.Run()
helper.CheckErr(err)
},
}
cmd.Flags().StringVarP(&o.OutDir, "output-dir", "o", "", "the output directory to generate the templates to. Defaults to charts/$name/resources")
cmd.Flags().StringVarP(&o.ReleaseName, "name", "n", "", "the name of the helm release to template. Defaults to $APP_NAME if not specified")
cmd.Flags().StringVarP(&o.Namespace, "namespace", "", "", "specifies the namespace to use to generate the templates in")
cmd.Flags().StringVarP(&o.Chart, "chart", "c", "", "the chart name to template. Defaults to 'charts/$name'")
cmd.Flags().StringArrayVarP(&o.ValuesFiles, "values", "f", []string{""}, "the helm values.yaml file used to template values in the generated template")
cmd.Flags().StringVarP(&o.Version, "version", "v", "", "the version of the helm chart to use. If not specified then the latest one is used")
cmd.Flags().StringVarP(&o.Repository, "repository", "r", "", "the helm chart repository to locate the chart")
cmd.Flags().StringVarP(&o.GitCommitMessage, "commit-message", "", "chore: generated kubernetes resources from helm chart", "the git commit message used")
o.AddFlags(cmd)
return cmd, o
}
func (o *TemplateOptions) AddFlags(cmd *cobra.Command) {
cmd.Flags().StringVarP(&o.DefaultDomain, "domain", "", "cluster.local", "the default domain name in the generated ingress")
cmd.Flags().BoolVarP(&o.NoGitCommit, "no-git-commit", "", false, "if set then the command will not git add/commit the generated resources")
cmd.Flags().BoolVarP(&o.NoSplit, "no-split", "", false, "if set then disable splitting of multiple resources into separate files")
cmd.Flags().BoolVarP(&o.NoExtSecrets, "no-external-secrets", "", false, "if set then disable converting Secret resources to ExternalSecrets")
cmd.Flags().BoolVarP(&o.IncludeCRDs, "include-crds", "", true, "if CRDs should be included in the output")
}
// Run implements the command
func (o *TemplateOptions) Run() error {
bin, err := plugins.GetHelmBinary(plugins.HelmVersion)
if err != nil {
return err
}
name := o.ReleaseName
if name == "" {
name = os.Getenv("APP_NAME")
if name == "" {
name = os.Getenv("REPO_NAME")
if name == "" {
return util.MissingOption("name")
}
}
}
chart := o.Chart
if chart == "" {
chart = filepath.Join("charts", name)
}
outDir := o.OutDir
if outDir == "" {
outDir = filepath.Join(chart, "resources")
}
err = os.MkdirAll(outDir, util.DefaultWritePermissions)
if err != nil {
return errors.Wrapf(err, "failed to ensure output directory exists %s", outDir)
}
if o.Repository == "" {
exists, err := util.DirExists(chart)
if err != nil {
return errors.Wrapf(err, "failed to check if dir exists %s", chart)
}
if !exists {
return errors.Errorf("there is no chart at %s - you could try supply --chart", chart)
}
}
tmpDir, err := ioutil.TempDir("", "")
if err != nil {
return errors.Wrap(err, "failed to create temporary directory")
}
tmpChartDir := ""
if o.Repository != "" {
tmpChartDir, err = ioutil.TempDir("", "")
if err != nil {
return errors.Wrap(err, "failed to create temporary chart directory")
}
// lets fetch the chart
args := []string{"fetch", "--untar", "--repo", o.Repository}
if o.Version != "" {
args = append(args, "--version", o.Version)
}
args = append(args, name)
c := util.Command{
Name: bin,
Args: args,
Dir: tmpChartDir,
Out: os.Stdout,
Err: os.Stderr,
}
log.Logger().Infof("about to run %s", util.ColorInfo(c.String()))
_, err = c.RunWithoutRetry()
if err != nil {
return errors.Wrapf(err, "failed to run %s", c.String())
}
}
cmdDir := ""
args := []string{"template", "--output-dir", tmpDir}
for _, valuesFile := range o.ValuesFiles {
args = append(args, "--values", valuesFile)
}
if o.Repository != "" {
args = append(args, "--repo", o.Repository)
cmdDir = tmpChartDir
}
if o.Namespace != "" {
args = append(args, "--namespace", o.Namespace)
}
if o.Version != "" {
args = append(args, "--version", o.Version)
}
if o.IncludeCRDs {
args = append(args, "--include-crds")
}
args = append(args, name, chart)
c := util.Command{
Name: bin,
Args: args,
Dir: cmdDir,
Out: os.Stdout,
Err: os.Stderr,
}
log.Logger().Infof("about to run %s", util.ColorInfo(c.String()))
_, err = c.RunWithoutRetry()
if err != nil {
return errors.Wrapf(err, "failed to run %s", c.String())
}
// now lets copy the templates from the temp dir to the outDir
crdsDir := filepath.Join(tmpDir, name, "crds")
exists, err := util.DirExists(crdsDir)
if err != nil {
return errors.Wrapf(err, "failed to check if crds dir was generated")
}
if exists {
err = util.CopyDirOverwrite(crdsDir, outDir)
if err != nil {
return errors.Wrapf(err, "failed to copy generated crds at %s to %s", crdsDir, outDir)
}
}
templatesDir := filepath.Join(tmpDir, name, "templates")
exists, err = util.DirExists(templatesDir)
if err != nil {
return errors.Wrapf(err, "failed to check if templates dir was generated")
}
if !exists {
return errors.Errorf("no templates directory was created at %s", templatesDir)
}
err = util.CopyDirOverwrite(templatesDir, outDir)
if err != nil {
return errors.Wrapf(err, "failed to copy generated templates at %s to %s", templatesDir, outDir)
}
err = os.RemoveAll(tmpDir)
if err != nil {
return errors.Wrapf(err, "failed to remove tmp dir %s", tmpDir)
}
if !o.NoSplit {
so := &split.Options{
Dir: outDir,
}
err = so.Run()
if err != nil {
return errors.Wrapf(err, "failed to split YAML files at %s", outDir)
}
}
if !o.NoExtSecrets {
_, eo := extsecret.NewCmdExtSecrets()
eo.Dir = outDir
err = eo.Run()
if err != nil {
return errors.Wrapf(err, "failed to convert to external Secrets at %s", outDir)
}
}
if o.NoGitCommit {
return nil
}
return o.GitCommit(outDir, o.GitCommitMessage)
}
func (o *TemplateOptions) GitCommit(outDir string, commitMessage string) error {
gitter := o.Git()
err := gitter.Add(outDir, "*")
if err != nil {
return errors.Wrapf(err, "failed to add generated resources to git in dir %s", outDir)
}
err = gitter.CommitIfChanges(outDir, commitMessage)
if err != nil {
return errors.Wrapf(err, "failed to commit generated resources to git in dir %s", outDir)
}
return nil
}
// Git returns the gitter - lazily creating one if required
func (o *TemplateOptions) Git() gits.Gitter {
if o.Gitter == nil {
o.Gitter = gits.NewGitCLI()
}
return o.Gitter
}
| [
"\"APP_NAME\"",
"\"REPO_NAME\""
]
| []
| [
"REPO_NAME",
"APP_NAME"
]
| [] | ["REPO_NAME", "APP_NAME"] | go | 2 | 0 | |
modules/openapi-generator/src/main/java/org/openapitools/codegen/languages/Swift5ClientCodegen.java | /*
* Copyright 2018 OpenAPI-Generator Contributors (https://openapi-generator.tech)
* Copyright 2018 SmartBear Software
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.openapitools.codegen.languages;
import io.swagger.v3.oas.models.media.ArraySchema;
import io.swagger.v3.oas.models.media.Schema;
import org.apache.commons.io.FilenameUtils;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.text.WordUtils;
import org.openapitools.codegen.*;
import org.openapitools.codegen.meta.GeneratorMetadata;
import org.openapitools.codegen.meta.Stability;
import org.openapitools.codegen.utils.ModelUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import java.time.OffsetDateTime;
import java.time.Instant;
import java.time.temporal.ChronoField;
import java.util.concurrent.TimeUnit;
import static org.openapitools.codegen.utils.StringUtils.camelize;
public class Swift5ClientCodegen extends DefaultCodegen implements CodegenConfig {
private final Logger LOGGER = LoggerFactory.getLogger(Swift5ClientCodegen.class);
public static final String PROJECT_NAME = "projectName";
public static final String RESPONSE_AS = "responseAs";
public static final String OBJC_COMPATIBLE = "objcCompatible";
public static final String POD_SOURCE = "podSource";
public static final String POD_AUTHORS = "podAuthors";
public static final String POD_SOCIAL_MEDIA_URL = "podSocialMediaURL";
public static final String POD_LICENSE = "podLicense";
public static final String POD_HOMEPAGE = "podHomepage";
public static final String POD_SUMMARY = "podSummary";
public static final String POD_DESCRIPTION = "podDescription";
public static final String POD_SCREENSHOTS = "podScreenshots";
public static final String POD_DOCUMENTATION_URL = "podDocumentationURL";
public static final String READONLY_PROPERTIES = "readonlyProperties";
public static final String SWIFT_USE_API_NAMESPACE = "swiftUseApiNamespace";
public static final String DEFAULT_POD_AUTHORS = "OpenAPI Generator";
public static final String LENIENT_TYPE_CAST = "lenientTypeCast";
public static final String USE_SPM_FILE_STRUCTURE = "useSPMFileStructure";
public static final String SWIFT_PACKAGE_PATH = "swiftPackagePath";
public static final String USE_CLASSES = "useClasses";
public static final String USE_BACKTICK_ESCAPES = "useBacktickEscapes";
public static final String GENERATE_MODEL_ADDITIONAL_PROPERTIES = "generateModelAdditionalProperties";
public static final String HASHABLE_MODELS = "hashableModels";
public static final String MAP_FILE_BINARY_TO_DATA = "mapFileBinaryToData";
protected static final String LIBRARY_ALAMOFIRE = "alamofire";
protected static final String LIBRARY_URLSESSION = "urlsession";
protected static final String LIBRARY_VAPOR = "vapor";
protected static final String RESPONSE_LIBRARY_PROMISE_KIT = "PromiseKit";
protected static final String RESPONSE_LIBRARY_RX_SWIFT = "RxSwift";
protected static final String RESPONSE_LIBRARY_RESULT = "Result";
protected static final String RESPONSE_LIBRARY_COMBINE = "Combine";
protected static final String[] RESPONSE_LIBRARIES = {RESPONSE_LIBRARY_PROMISE_KIT, RESPONSE_LIBRARY_RX_SWIFT, RESPONSE_LIBRARY_RESULT, RESPONSE_LIBRARY_COMBINE};
protected String projectName = "OpenAPIClient";
protected boolean nonPublicApi = false;
protected boolean objcCompatible = false;
protected boolean lenientTypeCast = false;
protected boolean readonlyProperties = false;
protected boolean swiftUseApiNamespace = false;
protected boolean useSPMFileStructure = false;
protected String swiftPackagePath = "Classes" + File.separator + "OpenAPIs";
protected boolean useClasses = false;
protected boolean useBacktickEscapes = false;
protected boolean generateModelAdditionalProperties = true;
protected boolean hashableModels = true;
protected boolean mapFileBinaryToData = false;
protected String[] responseAs = new String[0];
protected String sourceFolder = swiftPackagePath;
protected HashSet objcReservedWords;
protected String apiDocPath = "docs/";
protected String modelDocPath = "docs/";
/**
* Constructor for the swift5 language codegen module.
*/
public Swift5ClientCodegen() {
super();
this.useOneOfInterfaces = true;
generatorMetadata = GeneratorMetadata.newBuilder(generatorMetadata)
.stability(Stability.STABLE)
.build();
outputFolder = "generated-code" + File.separator + "swift";
modelTemplateFiles.put("model.mustache", ".swift");
apiTemplateFiles.put("api.mustache", ".swift");
embeddedTemplateDir = templateDir = "swift5";
apiPackage = File.separator + "APIs";
modelPackage = File.separator + "Models";
modelDocTemplateFiles.put("model_doc.mustache", ".md");
apiDocTemplateFiles.put("api_doc.mustache", ".md");
languageSpecificPrimitives = new HashSet<>(
Arrays.asList(
"Int",
"Int32",
"Int64",
"Float",
"Double",
"Bool",
"Void",
"String",
"Data",
"Date",
"Character",
"UUID",
"URL",
"AnyObject",
"Any",
"Decimal")
);
defaultIncludes = new HashSet<>(
Arrays.asList(
"Data",
"Date",
"URL", // for file
"UUID",
"Array",
"Dictionary",
"Set",
"Any",
"Empty",
"AnyObject",
"Any",
"Decimal")
);
objcReservedWords = new HashSet<>(
Arrays.asList(
// Added for Objective-C compatibility
"id", "description", "NSArray", "NSURL", "CGFloat", "NSSet", "NSString", "NSInteger", "NSUInteger",
"NSError", "NSDictionary",
// 'Property 'hash' with type 'String' cannot override a property with type 'Int' (when objcCompatible=true)
"hash",
// Cannot override with a stored property 'className'
"className"
)
);
reservedWords = new HashSet<>(
Arrays.asList(
// name used by swift client
"ErrorResponse", "Response",
// Swift keywords. This list is taken from here:
// https://developer.apple.com/library/content/documentation/Swift/Conceptual/Swift_Programming_Language/LexicalStructure.html#//apple_ref/doc/uid/TP40014097-CH30-ID410
//
// Keywords used in declarations
"associatedtype", "class", "deinit", "enum", "extension", "fileprivate", "func", "import", "init",
"inout", "internal", "let", "open", "operator", "private", "protocol", "public", "static", "struct",
"subscript", "typealias", "var",
// Keywords uses in statements
"break", "case", "continue", "default", "defer", "do", "else", "fallthrough", "for", "guard", "if",
"in", "repeat", "return", "switch", "where", "while",
// Keywords used in expressions and types
"as", "Any", "catch", "false", "is", "nil", "rethrows", "super", "self", "Self", "throw", "throws", "true", "try",
// Keywords used in patterns
"_",
// Keywords that begin with a number sign
"#available", "#colorLiteral", "#column", "#else", "#elseif", "#endif", "#file", "#fileLiteral", "#function", "#if",
"#imageLiteral", "#line", "#selector", "#sourceLocation",
// Keywords reserved in particular contexts
"associativity", "convenience", "dynamic", "didSet", "final", "get", "infix", "indirect", "lazy", "left",
"mutating", "none", "nonmutating", "optional", "override", "postfix", "precedence", "prefix", "Protocol",
"required", "right", "set", "Type", "unowned", "weak", "willSet",
//
// Swift Standard Library types
// https://developer.apple.com/documentation/swift
//
// Numbers and Basic Values
"Bool", "Int", "Double", "Float", "Range", "ClosedRange", "Error", "Optional",
// Special-Use Numeric Types
"UInt", "UInt8", "UInt16", "UInt32", "UInt64", "Int8", "Int16", "Int32", "Int64", "Float80", "Float32", "Float64",
// Strings and Text
"String", "Character", "Unicode", "StaticString",
// Collections
"Array", "Dictionary", "Set", "OptionSet", "CountableRange", "CountableClosedRange",
// The following are commonly-used Foundation types
"URL", "Data", "Codable", "Encodable", "Decodable",
// The following are other words we want to reserve
"Void", "AnyObject", "Class", "dynamicType", "COLUMN", "FILE", "FUNCTION", "LINE"
)
);
typeMapping = new HashMap<>();
typeMapping.put("array", "Array");
typeMapping.put("map", "Dictionary");
typeMapping.put("set", "Set");
typeMapping.put("date", "Date");
typeMapping.put("Date", "Date");
typeMapping.put("DateTime", "Date");
typeMapping.put("boolean", "Bool");
typeMapping.put("string", "String");
typeMapping.put("char", "Character");
typeMapping.put("short", "Int");
typeMapping.put("int", "Int");
typeMapping.put("long", "Int64");
typeMapping.put("integer", "Int");
typeMapping.put("Integer", "Int");
typeMapping.put("float", "Float");
typeMapping.put("number", "Double");
typeMapping.put("double", "Double");
typeMapping.put("file", "URL");
typeMapping.put("binary", "URL");
typeMapping.put("ByteArray", "Data");
typeMapping.put("UUID", "UUID");
typeMapping.put("URI", "String");
typeMapping.put("decimal", "Decimal");
typeMapping.put("object", "AnyCodable");
typeMapping.put("AnyType", "AnyCodable");
importMapping = new HashMap<>();
cliOptions.add(new CliOption(PROJECT_NAME, "Project name in Xcode"));
cliOptions.add(new CliOption(RESPONSE_AS,
"Optionally use libraries to manage response. Currently "
+ StringUtils.join(RESPONSE_LIBRARIES, ", ")
+ " are available."));
cliOptions.add(new CliOption(CodegenConstants.NON_PUBLIC_API,
CodegenConstants.NON_PUBLIC_API_DESC
+ "(default: false)"));
cliOptions.add(new CliOption(OBJC_COMPATIBLE,
"Add additional properties and methods for Objective-C "
+ "compatibility (default: false)"));
cliOptions.add(new CliOption(POD_SOURCE, "Source information used for Podspec"));
cliOptions.add(new CliOption(CodegenConstants.POD_VERSION, "Version used for Podspec"));
cliOptions.add(new CliOption(POD_AUTHORS, "Authors used for Podspec"));
cliOptions.add(new CliOption(POD_SOCIAL_MEDIA_URL, "Social Media URL used for Podspec"));
cliOptions.add(new CliOption(POD_LICENSE, "License used for Podspec"));
cliOptions.add(new CliOption(POD_HOMEPAGE, "Homepage used for Podspec"));
cliOptions.add(new CliOption(POD_SUMMARY, "Summary used for Podspec"));
cliOptions.add(new CliOption(POD_DESCRIPTION, "Description used for Podspec"));
cliOptions.add(new CliOption(POD_SCREENSHOTS, "Screenshots used for Podspec"));
cliOptions.add(new CliOption(POD_DOCUMENTATION_URL,
"Documentation URL used for Podspec"));
cliOptions.add(new CliOption(READONLY_PROPERTIES, "Make properties "
+ "readonly (default: false)"));
cliOptions.add(new CliOption(SWIFT_USE_API_NAMESPACE,
"Flag to make all the API classes inner-class "
+ "of {{projectName}}API"));
cliOptions.add(new CliOption(CodegenConstants.HIDE_GENERATION_TIMESTAMP,
CodegenConstants.HIDE_GENERATION_TIMESTAMP_DESC)
.defaultValue(Boolean.TRUE.toString()));
cliOptions.add(new CliOption(LENIENT_TYPE_CAST,
"Accept and cast values for simple types (string->bool, "
+ "string->int, int->string)")
.defaultValue(Boolean.FALSE.toString()));
cliOptions.add(new CliOption(USE_BACKTICK_ESCAPES,
"Escape reserved words using backticks (default: false)")
.defaultValue(Boolean.FALSE.toString()));
cliOptions.add(new CliOption(GENERATE_MODEL_ADDITIONAL_PROPERTIES,
"Generate model additional properties (default: true)")
.defaultValue(Boolean.TRUE.toString()));
cliOptions.add(new CliOption(CodegenConstants.API_NAME_PREFIX, CodegenConstants.API_NAME_PREFIX_DESC));
cliOptions.add(new CliOption(USE_SPM_FILE_STRUCTURE, "Use SPM file structure"
+ " and set the source path to Sources" + File.separator + "{{projectName}} (default: false)."));
cliOptions.add(new CliOption(SWIFT_PACKAGE_PATH, "Set a custom source path instead of "
+ projectName + File.separator + "Classes" + File.separator + "OpenAPIs" + "."));
cliOptions.add(new CliOption(USE_CLASSES, "Use final classes for models instead of structs (default: false)")
.defaultValue(Boolean.FALSE.toString()));
cliOptions.add(new CliOption(HASHABLE_MODELS,
"Make hashable models (default: true)")
.defaultValue(Boolean.TRUE.toString()));
cliOptions.add(new CliOption(MAP_FILE_BINARY_TO_DATA,
"[WARNING] This option will be removed and enabled by default in the future once we've enhanced the code to work with `Data` in all the different situations. Map File and Binary to Data (default: false)")
.defaultValue(Boolean.FALSE.toString()));
supportedLibraries.put(LIBRARY_URLSESSION, "[DEFAULT] HTTP client: URLSession");
supportedLibraries.put(LIBRARY_ALAMOFIRE, "HTTP client: Alamofire");
supportedLibraries.put(LIBRARY_VAPOR, "HTTP client: Vapor");
CliOption libraryOption = new CliOption(CodegenConstants.LIBRARY, "Library template (sub-template) to use");
libraryOption.setEnum(supportedLibraries);
libraryOption.setDefault(LIBRARY_URLSESSION);
cliOptions.add(libraryOption);
setLibrary(LIBRARY_URLSESSION);
}
private static CodegenModel reconcileProperties(CodegenModel codegenModel,
CodegenModel parentCodegenModel) {
// To support inheritance in this generator, we will analyze
// the parent and child models, look for properties that match, and remove
// them from the child models and leave them in the parent.
// Because the child models extend the parents, the properties
// will be available via the parent.
// Get the properties for the parent and child models
final List<CodegenProperty> parentModelCodegenProperties = parentCodegenModel.vars;
List<CodegenProperty> codegenProperties = codegenModel.vars;
codegenModel.allVars = new ArrayList<CodegenProperty>(codegenProperties);
codegenModel.parentVars = parentCodegenModel.allVars;
// Iterate over all of the parent model properties
boolean removedChildProperty = false;
for (CodegenProperty parentModelCodegenProperty : parentModelCodegenProperties) {
// Now that we have found a prop in the parent class,
// and search the child class for the same prop.
Iterator<CodegenProperty> iterator = codegenProperties.iterator();
while (iterator.hasNext()) {
CodegenProperty codegenProperty = iterator.next();
if (codegenProperty.baseName.equals(parentModelCodegenProperty.baseName)) {
// We found a property in the child class that is
// a duplicate of the one in the parent, so remove it.
iterator.remove();
removedChildProperty = true;
}
}
}
if (removedChildProperty) {
codegenModel.vars = codegenProperties;
}
return codegenModel;
}
@Override
public CodegenType getTag() {
return CodegenType.CLIENT;
}
@Override
public String getName() {
return "swift5";
}
@Override
public String getHelp() {
return "Generates a Swift 5.x client library.";
}
@Override
protected void addAdditionPropertiesToCodeGenModel(CodegenModel codegenModel,
Schema schema) {
final Schema additionalProperties = getAdditionalProperties(schema);
if (additionalProperties != null) {
Schema inner = null;
if (ModelUtils.isArraySchema(schema)) {
ArraySchema ap = (ArraySchema) schema;
inner = ap.getItems();
} else if (ModelUtils.isMapSchema(schema)) {
inner = getAdditionalProperties(schema);
}
codegenModel.additionalPropertiesType = inner != null ? getTypeDeclaration(inner) : getSchemaType(additionalProperties);
}
}
@Override
public void processOpts() {
super.processOpts();
if (StringUtils.isEmpty(System.getenv("SWIFT_POST_PROCESS_FILE"))) {
LOGGER.info("Environment variable SWIFT_POST_PROCESS_FILE not defined so the Swift code may not be properly formatted. To define it, try 'export SWIFT_POST_PROCESS_FILE=/usr/local/bin/swiftformat' (Linux/Mac)");
LOGGER.info("NOTE: To enable file post-processing, 'enablePostProcessFile' must be set to `true` (--enable-post-process-file for CLI).");
}
// Setup project name
if (additionalProperties.containsKey(PROJECT_NAME)) {
setProjectName((String) additionalProperties.get(PROJECT_NAME));
} else {
additionalProperties.put(PROJECT_NAME, projectName);
}
sourceFolder = projectName + File.separator + sourceFolder;
// Setup nonPublicApi option, which generates code with reduced access
// modifiers; allows embedding elsewhere without exposing non-public API calls
// to consumers
if (additionalProperties.containsKey(CodegenConstants.NON_PUBLIC_API)) {
setNonPublicApi(convertPropertyToBooleanAndWriteBack(CodegenConstants.NON_PUBLIC_API));
}
additionalProperties.put(CodegenConstants.NON_PUBLIC_API, nonPublicApi);
// Setup objcCompatible option, which adds additional properties
// and methods for Objective-C compatibility
if (additionalProperties.containsKey(OBJC_COMPATIBLE)) {
setObjcCompatible(convertPropertyToBooleanAndWriteBack(OBJC_COMPATIBLE));
}
additionalProperties.put(OBJC_COMPATIBLE, objcCompatible);
// add objc reserved words
if (Boolean.TRUE.equals(objcCompatible)) {
reservedWords.addAll(objcReservedWords);
}
if (additionalProperties.containsKey(RESPONSE_AS)) {
Object responseAsObject = additionalProperties.get(RESPONSE_AS);
if (responseAsObject instanceof String) {
setResponseAs(((String) responseAsObject).split(","));
} else {
setResponseAs((String[]) responseAsObject);
}
}
additionalProperties.put(RESPONSE_AS, responseAs);
if (ArrayUtils.contains(responseAs, RESPONSE_LIBRARY_PROMISE_KIT)) {
additionalProperties.put("usePromiseKit", true);
}
if (ArrayUtils.contains(responseAs, RESPONSE_LIBRARY_RX_SWIFT)) {
additionalProperties.put("useRxSwift", true);
}
if (ArrayUtils.contains(responseAs, RESPONSE_LIBRARY_RESULT)) {
additionalProperties.put("useResult", true);
}
if (ArrayUtils.contains(responseAs, RESPONSE_LIBRARY_COMBINE)) {
additionalProperties.put("useCombine", true);
}
// Setup readonlyProperties option, which declares properties so they can only
// be set at initialization
if (additionalProperties.containsKey(READONLY_PROPERTIES)) {
setReadonlyProperties(convertPropertyToBooleanAndWriteBack(READONLY_PROPERTIES));
}
additionalProperties.put(READONLY_PROPERTIES, readonlyProperties);
// Setup swiftUseApiNamespace option, which makes all the API
// classes inner-class of {{projectName}}
if (additionalProperties.containsKey(SWIFT_USE_API_NAMESPACE)) {
setSwiftUseApiNamespace(convertPropertyToBooleanAndWriteBack(SWIFT_USE_API_NAMESPACE));
}
if (!additionalProperties.containsKey(POD_AUTHORS)) {
additionalProperties.put(POD_AUTHORS, DEFAULT_POD_AUTHORS);
}
if (additionalProperties.containsKey(USE_SPM_FILE_STRUCTURE)) {
setUseSPMFileStructure(convertPropertyToBooleanAndWriteBack(USE_SPM_FILE_STRUCTURE));
sourceFolder = "Sources" + File.separator + projectName;
}
if (additionalProperties.containsKey(SWIFT_PACKAGE_PATH) && ((String)additionalProperties.get(SWIFT_PACKAGE_PATH)).length() > 0) {
setSwiftPackagePath((String)additionalProperties.get(SWIFT_PACKAGE_PATH));
sourceFolder = swiftPackagePath;
}
if (additionalProperties.containsKey(USE_BACKTICK_ESCAPES)) {
setUseBacktickEscapes(convertPropertyToBooleanAndWriteBack(USE_BACKTICK_ESCAPES));
}
if (additionalProperties.containsKey(GENERATE_MODEL_ADDITIONAL_PROPERTIES)) {
setGenerateModelAdditionalProperties(convertPropertyToBooleanAndWriteBack(GENERATE_MODEL_ADDITIONAL_PROPERTIES));
}
additionalProperties.put(GENERATE_MODEL_ADDITIONAL_PROPERTIES, generateModelAdditionalProperties);
if (additionalProperties.containsKey(HASHABLE_MODELS)) {
setHashableModels(convertPropertyToBooleanAndWriteBack(HASHABLE_MODELS));
}
additionalProperties.put(HASHABLE_MODELS, hashableModels);
if (additionalProperties.containsKey(MAP_FILE_BINARY_TO_DATA)) {
setMapFileBinaryToData(convertPropertyToBooleanAndWriteBack(MAP_FILE_BINARY_TO_DATA));
}
additionalProperties.put(MAP_FILE_BINARY_TO_DATA, mapFileBinaryToData);
if (mapFileBinaryToData) {
typeMapping.put("file", "Data");
typeMapping.put("binary", "Data");
}
if (additionalProperties.containsKey(USE_CLASSES)) {
setUseClasses(convertPropertyToBooleanAndWriteBack(USE_CLASSES));
}
additionalProperties.put(USE_CLASSES, useClasses);
setLenientTypeCast(convertPropertyToBooleanAndWriteBack(LENIENT_TYPE_CAST));
// make api and model doc path available in mustache template
additionalProperties.put("apiDocPath", apiDocPath);
additionalProperties.put("modelDocPath", modelDocPath);
if (!getLibrary().equals(LIBRARY_VAPOR)) {
supportingFiles.add(new SupportingFile("Podspec.mustache",
"",
projectName + ".podspec"));
supportingFiles.add(new SupportingFile("Cartfile.mustache",
"",
"Cartfile"));
supportingFiles.add(new SupportingFile("CodableHelper.mustache",
sourceFolder,
"CodableHelper.swift"));
supportingFiles.add(new SupportingFile("OpenISO8601DateFormatter.mustache",
sourceFolder,
"OpenISO8601DateFormatter.swift"));
supportingFiles.add(new SupportingFile("JSONDataEncoding.mustache",
sourceFolder,
"JSONDataEncoding.swift"));
supportingFiles.add(new SupportingFile("JSONEncodingHelper.mustache",
sourceFolder,
"JSONEncodingHelper.swift"));
supportingFiles.add(new SupportingFile("git_push.sh.mustache",
"",
"git_push.sh"));
supportingFiles.add(new SupportingFile("SynchronizedDictionary.mustache",
sourceFolder,
"SynchronizedDictionary.swift"));
supportingFiles.add(new SupportingFile("XcodeGen.mustache",
"",
"project.yml"));
supportingFiles.add(new SupportingFile("APIHelper.mustache",
sourceFolder,
"APIHelper.swift"));
supportingFiles.add(new SupportingFile("Models.mustache",
sourceFolder,
"Models.swift"));
}
supportingFiles.add(new SupportingFile("Package.swift.mustache",
"",
"Package.swift"));
supportingFiles.add(new SupportingFile("Configuration.mustache",
sourceFolder,
"Configuration.swift"));
supportingFiles.add(new SupportingFile("Extensions.mustache",
sourceFolder,
"Extensions.swift"));
supportingFiles.add(new SupportingFile("APIs.mustache",
sourceFolder,
"APIs.swift"));
supportingFiles.add(new SupportingFile("gitignore.mustache",
"",
".gitignore"));
supportingFiles.add(new SupportingFile("README.mustache",
"",
"README.md"));
switch (getLibrary()) {
case LIBRARY_ALAMOFIRE:
additionalProperties.put("useAlamofire", true);
supportingFiles.add(new SupportingFile("AlamofireImplementations.mustache",
sourceFolder,
"AlamofireImplementations.swift"));
break;
case LIBRARY_URLSESSION:
additionalProperties.put("useURLSession", true);
supportingFiles.add(new SupportingFile("URLSessionImplementations.mustache",
sourceFolder,
"URLSessionImplementations.swift"));
break;
case LIBRARY_VAPOR:
additionalProperties.put("useVapor", true);
break;
default:
break;
}
}
public boolean isMapFileBinaryToData() {
return mapFileBinaryToData;
}
public void setMapFileBinaryToData(boolean mapFileBinaryToData) {
this.mapFileBinaryToData = mapFileBinaryToData;
}
@Override
protected boolean isReservedWord(String word) {
return word != null && reservedWords.contains(word); //don't lowercase as super does
}
@Override
public String escapeReservedWord(String name) {
if (this.reservedWordsMappings().containsKey(name)) {
return this.reservedWordsMappings().get(name);
}
return useBacktickEscapes && !objcCompatible ? "`" + name + "`" : "_" + name;
}
@Override
public String modelFileFolder() {
return outputFolder + File.separator + sourceFolder
+ modelPackage().replace('.', File.separatorChar);
}
@Override
public String apiFileFolder() {
return outputFolder + File.separator + sourceFolder
+ apiPackage().replace('.', File.separatorChar);
}
@Override
public String getTypeDeclaration(Schema p) {
if (ModelUtils.isArraySchema(p)) {
ArraySchema ap = (ArraySchema) p;
Schema inner = ap.getItems();
return ModelUtils.isSet(p) ? "Set<" + getTypeDeclaration(inner) + ">" : "[" + getTypeDeclaration(inner) + "]";
} else if (ModelUtils.isMapSchema(p)) {
Schema inner = getAdditionalProperties(p);
return "[String: " + getTypeDeclaration(inner) + "]";
}
return super.getTypeDeclaration(p);
}
@Override
public String getSchemaType(Schema p) {
String openAPIType = super.getSchemaType(p);
String type;
if (typeMapping.containsKey(openAPIType)) {
type = typeMapping.get(openAPIType);
if (languageSpecificPrimitives.contains(type) || defaultIncludes.contains(type)) {
return type;
}
} else {
type = openAPIType;
}
return toModelName(type);
}
@Override
public boolean isDataTypeFile(String dataType) {
return "URL".equals(dataType);
}
@Override
public boolean isDataTypeBinary(final String dataType) {
return "Data".equals(dataType);
}
/**
* Output the proper model name (capitalized).
*
* @param name the name of the model
* @return capitalized model name
*/
@Override
public String toModelName(String name) {
// FIXME parameter should not be assigned. Also declare it as "final"
name = sanitizeName(name);
if (!StringUtils.isEmpty(modelNameSuffix)) { // set model suffix
name = name + "_" + modelNameSuffix;
}
if (!StringUtils.isEmpty(modelNamePrefix)) { // set model prefix
name = modelNamePrefix + "_" + name;
}
// camelize the model name
// phone_number => PhoneNumber
name = camelize(name);
// model name cannot use reserved keyword, e.g. return
if (isReservedWord(name)) {
String modelName = "Model" + name;
LOGGER.warn(name + " (reserved word) cannot be used as model name. Renamed to "
+ modelName);
return modelName;
}
// model name starts with number
if (name.matches("^\\d.*")) {
// e.g. 200Response => Model200Response (after camelize)
String modelName = "Model" + name;
LOGGER.warn(name
+ " (model name starts with number) cannot be used as model name."
+ " Renamed to " + modelName);
return modelName;
}
return name;
}
/**
* Return the capitalized file name of the model.
*
* @param name the model name
* @return the file name of the model
*/
@Override
public String toModelFilename(String name) {
// should be the same as the model name
return toModelName(name);
}
@Override
public String toDefaultValue(Schema p) {
if (p.getEnum() != null && !p.getEnum().isEmpty()) {
if (p.getDefault() != null) {
if (ModelUtils.isStringSchema(p)) {
return "." + toEnumVarName(escapeText((String) p.getDefault()), p.getType());
} else {
return "." + toEnumVarName(escapeText(p.getDefault().toString()), p.getType());
}
}
}
if (p.getDefault() != null) {
if (ModelUtils.isIntegerSchema(p) || ModelUtils.isNumberSchema(p) || ModelUtils.isBooleanSchema(p)) {
return p.getDefault().toString();
} else if (ModelUtils.isDateTimeSchema(p)) {
// Datetime time stamps in Swift are expressed as Seconds with Microsecond precision.
// In Java, we need to be creative to get the Timestamp in Microseconds as a long.
Instant instant = ((OffsetDateTime) p.getDefault()).toInstant();
long epochMicro = TimeUnit.SECONDS.toMicros(instant.getEpochSecond()) + (instant.get(ChronoField.MICRO_OF_SECOND));
return "Date(timeIntervalSince1970: " + String.valueOf(epochMicro) + ".0 / 1_000_000)";
} else if (ModelUtils.isStringSchema(p)) {
return "\"" + escapeText((String) p.getDefault()) + "\"";
}
// TODO: Handle more cases from `ModelUtils`, such as Date
}
return null;
}
@Override
public String toInstantiationType(Schema p) {
if (ModelUtils.isMapSchema(p)) {
return getSchemaType(getAdditionalProperties(p));
} else if (ModelUtils.isArraySchema(p)) {
ArraySchema ap = (ArraySchema) p;
String inner = getSchemaType(ap.getItems());
return ModelUtils.isSet(p) ? "Set<" + inner + ">" : "[" + inner + "]";
}
return null;
}
@Override
public String toApiName(String name) {
if (name.length() == 0) {
return "DefaultAPI";
}
return camelize(apiNamePrefix + "_" + name) + "API";
}
@Override
public String apiDocFileFolder() {
return (outputFolder + "/" + apiDocPath).replace("/", File.separator);
}
@Override
public String modelDocFileFolder() {
return (outputFolder + "/" + modelDocPath).replace("/", File.separator);
}
@Override
public String toModelDocFilename(String name) {
return toModelName(name);
}
@Override
public String toApiDocFilename(String name) {
return toApiName(name);
}
@Override
public String toOperationId(String operationId) {
operationId = camelize(sanitizeName(operationId), true);
// Throw exception if method name is empty.
// This should not happen but keep the check just in case
if (StringUtils.isEmpty(operationId)) {
throw new RuntimeException("Empty method name (operationId) not allowed");
}
// method name cannot use reserved keyword, e.g. return
if (isReservedWord(operationId)) {
String newOperationId = camelize(("call_" + operationId), true);
LOGGER.warn(operationId + " (reserved word) cannot be used as method name."
+ " Renamed to " + newOperationId);
return newOperationId;
}
// operationId starts with a number
if (operationId.matches("^\\d.*")) {
LOGGER.warn(operationId + " (starting with a number) cannot be used as method name. Renamed to " + camelize(sanitizeName("call_" + operationId), true));
operationId = camelize(sanitizeName("call_" + operationId), true);
}
return operationId;
}
@Override
public String toVarName(String name) {
// sanitize name
name = sanitizeName(name);
// if it's all uppper case, do nothing
if (name.matches("^[A-Z_]*$")) {
return name;
}
// camelize the variable name
// pet_id => petId
name = camelize(name, true);
// for reserved words surround with `` or append _
if (isReservedWord(name)) {
name = escapeReservedWord(name);
}
// for words starting with number, append _
if (name.matches("^\\d.*")) {
name = "_" + name;
}
return name;
}
@Override
public String toParamName(String name) {
// sanitize name
name = sanitizeName(name);
// replace - with _ e.g. created-at => created_at
name = name.replaceAll("-", "_");
// if it's all uppper case, do nothing
if (name.matches("^[A-Z_]*$")) {
return name;
}
// camelize(lower) the variable name
// pet_id => petId
name = camelize(name, true);
// for reserved words surround with ``
if (isReservedWord(name)) {
name = escapeReservedWord(name);
}
// for words starting with number, append _
if (name.matches("^\\d.*")) {
name = "_" + name;
}
return name;
}
@Override
public CodegenModel fromModel(String name, Schema model) {
Map<String, Schema> allDefinitions = ModelUtils.getSchemas(this.openAPI);
CodegenModel codegenModel = super.fromModel(name, model);
if (codegenModel.description != null) {
codegenModel.imports.add("ApiModel");
}
if (allDefinitions != null) {
String parentSchema = codegenModel.parentSchema;
// multilevel inheritance: reconcile properties of all the parents
while (parentSchema != null) {
final Schema parentModel = allDefinitions.get(parentSchema);
final CodegenModel parentCodegenModel = super.fromModel(codegenModel.parent,
parentModel);
codegenModel = Swift5ClientCodegen.reconcileProperties(codegenModel, parentCodegenModel);
// get the next parent
parentSchema = parentCodegenModel.parentSchema;
}
}
if (hashableModels) {
codegenModel.vendorExtensions.put("x-swift-hashable", true);
}
return codegenModel;
}
public void setProjectName(String projectName) {
this.projectName = projectName;
}
public void setNonPublicApi(boolean nonPublicApi) {
this.nonPublicApi = nonPublicApi;
}
public void setObjcCompatible(boolean objcCompatible) {
this.objcCompatible = objcCompatible;
}
public void setLenientTypeCast(boolean lenientTypeCast) {
this.lenientTypeCast = lenientTypeCast;
}
public void setReadonlyProperties(boolean readonlyProperties) {
this.readonlyProperties = readonlyProperties;
}
public void setResponseAs(String[] responseAs) {
this.responseAs = responseAs;
}
public void setSwiftUseApiNamespace(boolean swiftUseApiNamespace) {
this.swiftUseApiNamespace = swiftUseApiNamespace;
}
public void setUseSPMFileStructure(boolean useSPMFileStructure) {
this.useSPMFileStructure = useSPMFileStructure;
}
public void setSwiftPackagePath(String swiftPackagePath) {
this.swiftPackagePath = swiftPackagePath;
}
public void setUseClasses(boolean useClasses) {
this.useClasses = useClasses;
}
public void setUseBacktickEscapes(boolean useBacktickEscapes) {
this.useBacktickEscapes = useBacktickEscapes;
}
public void setGenerateModelAdditionalProperties(boolean generateModelAdditionalProperties) {
this.generateModelAdditionalProperties = generateModelAdditionalProperties;
}
public void setHashableModels(boolean hashableModels) {
this.hashableModels = hashableModels;
}
@Override
public String toEnumValue(String value, String datatype) {
// for string, array of string
if ("String".equals(datatype) || "[String]".equals(datatype) || "[String: String]".equals(datatype)) {
return "\"" + String.valueOf(value) + "\"";
} else {
return String.valueOf(value);
}
}
@Override
public String toEnumDefaultValue(String value, String datatype) {
return datatype + "_" + value;
}
@Override
public String toEnumVarName(String name, String datatype) {
if (name.length() == 0) {
return "empty";
}
Pattern startWithNumberPattern = Pattern.compile("^\\d+");
Matcher startWithNumberMatcher = startWithNumberPattern.matcher(name);
if (startWithNumberMatcher.find()) {
String startingNumbers = startWithNumberMatcher.group(0);
String nameWithoutStartingNumbers = name.substring(startingNumbers.length());
return "_" + startingNumbers + camelize(nameWithoutStartingNumbers, true);
}
// for symbol, e.g. $, #
if (getSymbolName(name) != null) {
return camelize(WordUtils.capitalizeFully(getSymbolName(name).toUpperCase(Locale.ROOT)), true);
}
// Camelize only when we have a structure defined below
Boolean camelized = false;
if (name.matches("[A-Z][a-z0-9]+[a-zA-Z0-9]*")) {
name = camelize(name, true);
camelized = true;
}
// Reserved Name
String nameLowercase = StringUtils.lowerCase(name);
if (isReservedWord(nameLowercase)) {
return escapeReservedWord(nameLowercase);
}
// Check for numerical conversions
if ("Int".equals(datatype) || "Int32".equals(datatype) || "Int64".equals(datatype)
|| "Float".equals(datatype) || "Double".equals(datatype)) {
String varName = "number" + camelize(name);
varName = varName.replaceAll("-", "minus");
varName = varName.replaceAll("\\+", "plus");
varName = varName.replaceAll("\\.", "dot");
return varName;
}
// If we have already camelized the word, don't progress
// any further
if (camelized) {
return name;
}
char[] separators = {'-', '_', ' ', ':', '(', ')'};
return camelize(WordUtils.capitalizeFully(StringUtils.lowerCase(name), separators)
.replaceAll("[-_ :\\(\\)]", ""),
true);
}
@Override
public String toEnumName(CodegenProperty property) {
String enumName = toModelName(property.name);
// Ensure that the enum type doesn't match a reserved word or
// the variable name doesn't match the generated enum type or the
// Swift compiler will generate an error
if (isReservedWord(property.datatypeWithEnum)
|| toVarName(property.name).equals(property.datatypeWithEnum)) {
enumName = property.datatypeWithEnum + "Enum";
}
// TODO: toModelName already does something for names starting with number,
// so this code is probably never called
if (enumName.matches("\\d.*")) { // starts with number
return "_" + enumName;
} else {
return enumName;
}
}
@Override
public Map<String, Object> postProcessModels(Map<String, Object> objs) {
Map<String, Object> postProcessedModelsEnum = postProcessModelsEnum(objs);
// We iterate through the list of models, and also iterate through each of the
// properties for each model. For each property, if:
//
// CodegenProperty.name != CodegenProperty.baseName
//
// then we set
//
// CodegenProperty.vendorExtensions["x-codegen-escaped-property-name"] = true
//
// Also, if any property in the model has x-codegen-escaped-property-name=true, then we mark:
//
// CodegenModel.vendorExtensions["x-codegen-has-escaped-property-names"] = true
//
List<Object> models = (List<Object>) postProcessedModelsEnum.get("models");
for (Object _mo : models) {
Map<String, Object> mo = (Map<String, Object>) _mo;
CodegenModel cm = (CodegenModel) mo.get("model");
boolean modelHasPropertyWithEscapedName = false;
for (CodegenProperty prop : cm.allVars) {
if (!prop.name.equals(prop.baseName)) {
prop.vendorExtensions.put("x-codegen-escaped-property-name", true);
modelHasPropertyWithEscapedName = true;
}
}
if (modelHasPropertyWithEscapedName) {
cm.vendorExtensions.put("x-codegen-has-escaped-property-names", true);
}
}
return postProcessedModelsEnum;
}
@Override
public void postProcessModelProperty(CodegenModel model, CodegenProperty property) {
super.postProcessModelProperty(model, property);
boolean isSwiftScalarType = property.isInteger || property.isLong || property.isFloat
|| property.isDouble || property.isBoolean;
if ((!property.required || property.isNullable) && isSwiftScalarType) {
// Optional scalar types like Int?, Int64?, Float?, Double?, and Bool?
// do not translate to Objective-C. So we want to flag those
// properties in case we want to put special code in the templates
// which provide Objective-C compatibility.
property.vendorExtensions.put("x-swift-optional-scalar", true);
}
}
@Override
public String escapeQuotationMark(String input) {
// remove " to avoid code injection
return input.replace("\"", "");
}
@Override
public String escapeUnsafeCharacters(String input) {
return input.replace("*/", "*_/").replace("/*", "/_*");
}
@Override
public void postProcessFile(File file, String fileType) {
if (file == null) {
return;
}
String swiftPostProcessFile = System.getenv("SWIFT_POST_PROCESS_FILE");
if (StringUtils.isEmpty(swiftPostProcessFile)) {
return; // skip if SWIFT_POST_PROCESS_FILE env variable is not defined
}
// only process files with swift extension
if ("swift".equals(FilenameUtils.getExtension(file.toString()))) {
String command = swiftPostProcessFile + " " + file.toString();
try {
Process p = Runtime.getRuntime().exec(command);
int exitValue = p.waitFor();
if (exitValue != 0) {
LOGGER.error("Error running the command ({}). Exit value: {}", command, exitValue);
} else {
LOGGER.info("Successfully executed: " + command);
}
} catch (InterruptedException | IOException e) {
LOGGER.error("Error running the command ({}). Exception: {}", command, e.getMessage());
// Restore interrupted state
Thread.currentThread().interrupt();
}
}
}
@Override
public Map<String, Object> postProcessOperationsWithModels(Map<String, Object> objs, List<Object> allModels) {
Map<String, Object> objectMap = (Map<String, Object>) objs.get("operations");
HashMap<String, CodegenModel> modelMaps = new HashMap<String, CodegenModel>();
for (Object o : allModels) {
HashMap<String, Object> h = (HashMap<String, Object>) o;
CodegenModel m = (CodegenModel) h.get("model");
modelMaps.put(m.classname, m);
}
List<CodegenOperation> operations = (List<CodegenOperation>) objectMap.get("operation");
for (CodegenOperation operation : operations) {
for (CodegenParameter cp : operation.allParams) {
cp.vendorExtensions.put("x-swift-example", constructExampleCode(cp, modelMaps, new HashSet<String>()));
}
}
return objs;
}
public String constructExampleCode(CodegenParameter codegenParameter, HashMap<String, CodegenModel> modelMaps, Set<String> visitedModels) {
if (codegenParameter.isArray) { // array
return "[" + constructExampleCode(codegenParameter.items, modelMaps, visitedModels) + "]";
} else if (codegenParameter.isMap) { // TODO: map, file type
return "\"TODO\"";
} else if (languageSpecificPrimitives.contains(codegenParameter.dataType)) { // primitive type
if ("String".equals(codegenParameter.dataType) || "Character".equals(codegenParameter.dataType)) {
if (StringUtils.isEmpty(codegenParameter.example)) {
return "\"" + codegenParameter.example + "\"";
} else {
return "\"" + codegenParameter.paramName + "_example\"";
}
} else if ("Bool".equals(codegenParameter.dataType)) { // boolean
if (Boolean.parseBoolean(codegenParameter.example)) {
return "true";
} else {
return "false";
}
} else if ("URL".equals(codegenParameter.dataType)) { // URL
return "URL(string: \"https://example.com\")!";
} else if ("Data".equals(codegenParameter.dataType)) { // URL
return "Data([9, 8, 7])";
} else if ("Date".equals(codegenParameter.dataType)) { // date
return "Date()";
} else { // numeric
if (StringUtils.isEmpty(codegenParameter.example)) {
return codegenParameter.example;
} else {
return "987";
}
}
} else { // model
// look up the model
if (modelMaps.containsKey(codegenParameter.dataType)) {
if (visitedModels.contains(codegenParameter.dataType)) {
// recursive/self-referencing model, simply return nil to avoid stackoverflow
return "nil";
} else {
visitedModels.add(codegenParameter.dataType);
return constructExampleCode(modelMaps.get(codegenParameter.dataType), modelMaps, visitedModels);
}
} else {
//LOGGER.error("Error in constructing examples. Failed to look up the model " + codegenParameter.dataType);
return "TODO";
}
}
}
public String constructExampleCode(CodegenProperty codegenProperty, HashMap<String, CodegenModel> modelMaps, Set<String> visitedModels) {
if (codegenProperty.isArray) { // array
return "[" + constructExampleCode(codegenProperty.items, modelMaps, visitedModels) + "]";
} else if (codegenProperty.isMap) { // TODO: map, file type
return "\"TODO\"";
} else if (languageSpecificPrimitives.contains(codegenProperty.dataType)) { // primitive type
if ("String".equals(codegenProperty.dataType) || "Character".equals(codegenProperty.dataType)) {
if (StringUtils.isEmpty(codegenProperty.example)) {
return "\"" + codegenProperty.example + "\"";
} else {
return "\"" + codegenProperty.name + "_example\"";
}
} else if ("Bool".equals(codegenProperty.dataType)) { // boolean
if (Boolean.parseBoolean(codegenProperty.example)) {
return "true";
} else {
return "false";
}
} else if ("URL".equals(codegenProperty.dataType)) { // URL
return "URL(string: \"https://example.com\")!";
} else if ("Date".equals(codegenProperty.dataType)) { // date
return "Date()";
} else { // numeric
if (StringUtils.isEmpty(codegenProperty.example)) {
return codegenProperty.example;
} else {
return "123";
}
}
} else {
// look up the model
if (modelMaps.containsKey(codegenProperty.dataType)) {
if (visitedModels.contains(codegenProperty.dataType)) {
// recursive/self-referencing model, simply return nil to avoid stackoverflow
return "nil";
} else {
visitedModels.add(codegenProperty.dataType);
return constructExampleCode(modelMaps.get(codegenProperty.dataType), modelMaps, visitedModels);
}
} else {
//LOGGER.error("Error in constructing examples. Failed to look up the model " + codegenProperty.dataType);
return "\"TODO\"";
}
}
}
public String constructExampleCode(CodegenModel codegenModel, HashMap<String, CodegenModel> modelMaps, Set<String> visitedModels) {
String example;
example = codegenModel.name + "(";
List<String> propertyExamples = new ArrayList<>();
for (CodegenProperty codegenProperty : codegenModel.vars) {
propertyExamples.add(codegenProperty.name + ": " + constructExampleCode(codegenProperty, modelMaps, visitedModels));
}
example += StringUtils.join(propertyExamples, ", ");
example += ")";
return example;
}
@Override
public void postProcess() {
System.out.println("################################################################################");
System.out.println("# Thanks for using OpenAPI Generator. #");
System.out.println("# Please consider donation to help us maintain this project \uD83D\uDE4F #");
System.out.println("# https://opencollective.com/openapi_generator/donate #");
System.out.println("# #");
System.out.println("# swift5 generator is contributed by Bruno Coelho (https://github.com/4brunu). #");
System.out.println("# Please support his work directly via https://paypal.com/paypalme/4brunu \uD83D\uDE4F #");
System.out.println("################################################################################");
}
}
| [
"\"SWIFT_POST_PROCESS_FILE\"",
"\"SWIFT_POST_PROCESS_FILE\""
]
| []
| [
"SWIFT_POST_PROCESS_FILE"
]
| [] | ["SWIFT_POST_PROCESS_FILE"] | java | 1 | 0 | |
cmd/podman/runlabel.go | package main
import (
"fmt"
"io"
"os"
"strings"
"github.com/containers/image/v4/types"
"github.com/containers/libpod/cmd/podman/cliconfig"
"github.com/containers/libpod/cmd/podman/libpodruntime"
"github.com/containers/libpod/cmd/podman/shared"
"github.com/containers/libpod/libpod/define"
"github.com/containers/libpod/libpod/image"
"github.com/containers/libpod/pkg/util"
"github.com/containers/libpod/utils"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
var (
runlabelCommand cliconfig.RunlabelValues
runlabelDescription = `
Executes a command as described by a container image label.
`
_runlabelCommand = &cobra.Command{
Use: "runlabel [flags] LABEL IMAGE [ARG...]",
Short: "Execute the command described by an image label",
Long: runlabelDescription,
RunE: func(cmd *cobra.Command, args []string) error {
runlabelCommand.InputArgs = args
runlabelCommand.GlobalFlags = MainGlobalOpts
runlabelCommand.Remote = remoteclient
return runlabelCmd(&runlabelCommand)
},
Example: `podman container runlabel run imageID
podman container runlabel --pull install imageID arg1 arg2
podman container runlabel --display run myImage`,
}
)
func init() {
runlabelCommand.Command = _runlabelCommand
runlabelCommand.SetHelpTemplate(HelpTemplate())
runlabelCommand.SetUsageTemplate(UsageTemplate())
flags := runlabelCommand.Flags()
flags.StringVar(&runlabelCommand.Creds, "creds", "", "`Credentials` (USERNAME:PASSWORD) to use for authenticating to a registry")
flags.BoolVar(&runlabelCommand.Display, "display", false, "Preview the command that the label would run")
flags.BoolVar(&runlabelCommand.Replace, "replace", false, "Replace existing container with a new one from the image")
flags.StringVar(&runlabelCommand.Name, "name", "", "Assign a name to the container")
flags.StringVar(&runlabelCommand.Opt1, "opt1", "", "Optional parameter to pass for install")
flags.StringVar(&runlabelCommand.Opt2, "opt2", "", "Optional parameter to pass for install")
flags.StringVar(&runlabelCommand.Opt3, "opt3", "", "Optional parameter to pass for install")
markFlagHidden(flags, "opt1")
markFlagHidden(flags, "opt2")
markFlagHidden(flags, "opt3")
flags.BoolP("pull", "p", false, "Pull the image if it does not exist locally prior to executing the label contents")
flags.BoolVarP(&runlabelCommand.Quiet, "quiet", "q", false, "Suppress output information when installing images")
// Disabled flags for the remote client
if !remote {
flags.StringVar(&runlabelCommand.Authfile, "authfile", shared.GetAuthFile(""), "Path of the authentication file. Use REGISTRY_AUTH_FILE environment variable to override")
flags.StringVar(&runlabelCommand.CertDir, "cert-dir", "", "`Pathname` of a directory containing TLS certificates and keys")
flags.StringVar(&runlabelCommand.SignaturePolicy, "signature-policy", "", "`Pathname` of signature policy file (not usually used)")
flags.BoolVar(&runlabelCommand.TlsVerify, "tls-verify", true, "Require HTTPS and verify certificates when contacting registries")
if err := flags.MarkDeprecated("pull", "podman will pull if not found in local storage"); err != nil {
logrus.Error("unable to mark pull flag deprecated")
}
markFlagHidden(flags, "signature-policy")
}
}
// installCmd gets the data from the command line and calls installImage
// to copy an image from a registry to a local machine
func runlabelCmd(c *cliconfig.RunlabelValues) error {
var (
imageName string
stdErr, stdOut io.Writer
stdIn io.Reader
extraArgs []string
)
// Evil images could trick into recursively executing the runlabel
// command. Avoid this by setting the "PODMAN_RUNLABEL_NESTED" env
// variable when executing a label first.
nested := os.Getenv("PODMAN_RUNLABEL_NESTED")
if nested == "1" {
return fmt.Errorf("nested runlabel calls: runlabels cannot execute the runlabel command")
}
opts := make(map[string]string)
runtime, err := libpodruntime.GetRuntime(getContext(), &c.PodmanCommand)
if err != nil {
return errors.Wrapf(err, "could not get runtime")
}
defer runtime.DeferredShutdown(false)
args := c.InputArgs
if len(args) < 2 {
return errors.Errorf("the runlabel command requires at least 2 arguments: LABEL IMAGE")
}
if c.Display && c.Quiet {
return errors.Errorf("the display and quiet flags cannot be used together.")
}
if len(args) > 2 {
extraArgs = args[2:]
}
label := args[0]
runlabelImage := args[1]
if c.Flag("opt1").Changed {
opts["opt1"] = c.Opt1
}
if c.Flag("opt2").Changed {
opts["opt2"] = c.Opt2
}
if c.Flag("opt3").Changed {
opts["opt3"] = c.Opt3
}
ctx := getContext()
stdErr = os.Stderr
stdOut = os.Stdout
stdIn = os.Stdin
if c.Quiet {
stdErr = nil
stdOut = nil
stdIn = nil
}
dockerRegistryOptions := image.DockerRegistryOptions{
DockerCertPath: c.CertDir,
}
if c.Flag("tls-verify").Changed {
dockerRegistryOptions.DockerInsecureSkipTLSVerify = types.NewOptionalBool(!c.TlsVerify)
}
runLabel, imageName, err := shared.GetRunlabel(label, runlabelImage, ctx, runtime, true, c.Creds, dockerRegistryOptions, c.Authfile, c.SignaturePolicy, stdOut)
if err != nil {
return err
}
if runLabel == "" {
return errors.Errorf("%s does not have a label of %s", runlabelImage, label)
}
globalOpts := util.GetGlobalOpts(c)
cmd, env, err := shared.GenerateRunlabelCommand(runLabel, imageName, c.Name, opts, extraArgs, globalOpts)
if err != nil {
return err
}
if !c.Quiet {
fmt.Printf("command: %s\n", strings.Join(append([]string{os.Args[0]}, cmd[1:]...), " "))
if c.Display {
return nil
}
}
// If container already exists && --replace given -- Nuke it
if c.Replace {
for i, entry := range cmd {
if entry == "--name" {
name := cmd[i+1]
ctr, err := runtime.LookupContainer(name)
if err != nil {
if errors.Cause(err) != define.ErrNoSuchCtr {
logrus.Debugf("Error occurred searching for container %s: %s", name, err.Error())
return err
}
} else {
logrus.Debugf("Runlabel --replace option given. Container %s will be deleted. The new container will be named %s", ctr.ID(), name)
if err := runtime.RemoveContainer(ctx, ctr, true, false); err != nil {
return err
}
}
break
}
}
}
return utils.ExecCmdWithStdStreams(stdIn, stdOut, stdErr, env, cmd[0], cmd[1:]...)
}
| [
"\"PODMAN_RUNLABEL_NESTED\""
]
| []
| [
"PODMAN_RUNLABEL_NESTED"
]
| [] | ["PODMAN_RUNLABEL_NESTED"] | go | 1 | 0 | |
dogs/app.py | from flask import Flask, render_template, request, make_response
import os
import random
import boto3
import configparser
config = configparser.ConfigParser()
config.read('config.ini')
IAMAccessKey = config['DEFAULT']["IAMAccessKey"]
IAMSecretKey = config['DEFAULT']["IAMSecretKey"]
s3_region = config['DEFAULT']["s3_region"]
s3_bucket = config['DEFAULT']["s3_bucket"]
s3 = boto3.client(
's3',
aws_access_key_id=IAMAccessKey,
aws_secret_access_key=IAMSecretKey,
region_name=s3_region
)
image_direc = "dogs/static/Images"
app = Flask(__name__)
@app.route("/", methods=['GET', 'POST'])
def index():
resp = make_response(render_template("index.html"))
resp.set_cookie("score", "0")
return resp
@app.route("/dog", methods=["GET"])
@app.route("/dog/random", methods=['GET'])
def get_random_dog_handler():
DOG_PATH = os.environ.get("DOG_PATH", "dogs/static/images")
breed_path = random.choice(os.listdir(DOG_PATH))
dog_image = random.choice(os.listdir(DOG_PATH + "/" + breed_path))
breed = breed_path.split("-")[1].replace("_", " ")
full_path = "images" + "/" + breed_path + "/" + dog_image
return render_template(
"dog.html", breed=breed, full_path=full_path)
@app.route("/dog/<breed>/<dog_id>", methods=["GET"])
def get_dog_handler(breed, dog_id):
DOG_PATH = os.environ.get("DOG_PATH", "dogs/static/images")
breeds = os.listdir(DOG_PATH)
breed_path = [
breed_path for breed_path in breeds if breed in breed_path][0]
breed = breed_path.split("-")[1].replace("_", " ")
dog_images = os.listdir(DOG_PATH + "/" + breed_path)
dog_path = [dog_path for dog_path in dog_images if dog_id in dog_path][0]
full_path = "images" + "/" + breed_path + "/" + dog_path
return render_template("dog.html", breed=breed, full_path=full_path)
@app.route("/guess/", methods=["POST"])
def guess_handler():
breed = request.form['breed']
full_path = request.form['full_path']
guess = request.form['dropdown']
score = int(request.cookies.get("score"))
if breed == guess:
score += 1
resp = make_response(render_template("guess.html", guess=guess,
breed=breed, full_path=full_path,
score=score))
resp.set_cookie("score", str(score))
return resp
# TODO
@app.route("/upload", methods=["GET", "POST"])
def upload_handler():
pass
if __name__ == "__main__":
app.run(
debug=True,
host='0.0.0.0'
)
| []
| []
| [
"DOG_PATH"
]
| [] | ["DOG_PATH"] | python | 1 | 0 | |
src/main/java/hudson/plugins/openshift/OpenShiftSlave.java | package hudson.plugins.openshift;
import com.openshift.client.*;
import com.openshift.client.cartridge.ICartridge;
import com.openshift.client.cartridge.IStandaloneCartridge;
import com.openshift.client.cartridge.StandaloneCartridge;
import hudson.Extension;
import hudson.FilePath;
import hudson.model.Descriptor.FormException;
import hudson.model.Hudson;
import hudson.model.Queue;
import hudson.model.TaskListener;
import hudson.slaves.AbstractCloudComputer;
import hudson.slaves.AbstractCloudSlave;
import hudson.slaves.CloudRetentionStrategy;
import hudson.slaves.NodeProperty;
import org.kohsuke.stapler.DataBoundConstructor;
import java.io.IOException;
import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.Collections;
import java.util.List;
import java.util.logging.Level;
import java.util.logging.Logger;
public class OpenShiftSlave extends AbstractCloudSlave {
private static final long serialVersionUID = 8486485671018263774L;
private static final Logger LOGGER = Logger.getLogger(OpenShiftSlave.class
.getName());
private String applicationUUID;
private String builderType;
private final String builderSize;
private final String region;
private final String builderPlatform;
private final long builderTimeout;
private String uuid;
/**
* The name of the slave should be the 'sanitized version of the framework
* that is used, removing all '.' and '-' characters (i.e. jbossas70, php53)
* <p/>
* The framework should be the exact OpenShift framework used (i.e.
* jbossas-7)
*/
@DataBoundConstructor
public OpenShiftSlave(String name, String applicationUUID, String builderType, String builderSize, String region, String builderPlatform,
String label, long builderTimeout, int executors, int slaveIdleTimeToLive) throws FormException, IOException {
super(name, "Builder for " + label, null, executors, Mode.NORMAL,
label, new OpenShiftComputerLauncher(),
new CloudRetentionStrategy(slaveIdleTimeToLive), Collections
.<NodeProperty<?>>emptyList()
);
LOGGER.info("Creating slave with " + slaveIdleTimeToLive + "mins time-to-live");
this.applicationUUID = applicationUUID;
this.builderType = builderType;
this.builderSize = builderSize;
this.region = region;
this.builderPlatform = builderPlatform;
this.builderTimeout = builderTimeout;
}
private String getNamespace() {
return System.getenv("OPENSHIFT_NAMESPACE");
}
@Override
public String getRemoteFS() {
return "/var/lib/openshift/" + uuid + "/app-root/data/jenkins";
}
@Override
public FilePath getRootPath() {
return createPath(getRemoteFS());
}
@SuppressWarnings("unchecked")
@Override
public AbstractCloudComputer<OpenShiftSlave> createComputer() {
return new OpenShiftComputer(this);
}
@Override
protected void _terminate(TaskListener listener) throws IOException,
InterruptedException {
LOGGER.info("Terminating slave " + name + " (uuid: " + uuid + ")");
if (getComputer() != null && getComputer().getChannel() != null) {
LOGGER.info("Closing the SSH channel...");
getComputer().getChannel().close();
}
LOGGER.info("Terminating OpenShift application...");
terminateApp();
}
protected IStandaloneCartridge getCartridge(IOpenShiftConnection connection) throws OpenShiftException {
if(applicationUUID!=null && !applicationUUID.equals("")) {
// new build configs provide the application uuid for cloning
IApplication baseApp = Util.getApplicationFromUuid(applicationUUID);
if(baseApp==null) {
throw new OpenShiftException("Could not locate application with UUID "+applicationUUID);
}
if(baseApp.getCartridge().getUrl()!=null) {
// downloadable cartridge
return new StandaloneCartridge(baseApp.getCartridge().getName(), baseApp.getCartridge().getUrl());
} else {
// cartridge from repository
String cartridgeType=baseApp.getCartridge().getName();
List<IStandaloneCartridge> cartridges = connection.getStandaloneCartridges();
for (IStandaloneCartridge cartridge : cartridges) {
if (cartridge.getName().equals(cartridgeType)) {
return cartridge;
}
}
throw new OpenShiftException("Cartridge for " + cartridgeType + " not found");
}
} else {
// old configs provided the builder type.
String targetCartridgeName = builderType.replace("redhat-", "");
List<IStandaloneCartridge> cartridges = connection.getStandaloneCartridges();
for (IStandaloneCartridge cartridge : cartridges) {
if (cartridge.getName().equals(targetCartridgeName)) {
return cartridge;
}
}
throw new OpenShiftException("Cartridge for " + targetCartridgeName + " not found");
}
}
private void terminateApp() {
try {
getBuilderApplication().destroy();
} catch (Exception e) {
LOGGER.log(Level.WARNING, "Unable to terminate builder application", e);
}
}
@Extension
public static final class DescriptorImpl extends SlaveDescriptor {
public String getDisplayName() {
return "OpenShift Slave";
}
@Override
public boolean isInstantiable() {
return false;
}
}
public String getHostName() throws IOException {
try {
IApplication app = getBuilderApplication();
String url = null;
String type = getCartridge(OpenShiftCloud.get().getOpenShiftConnection()).getName();
for (IGearGroup gearGroup : app.getGearGroups()) {
for(ICartridge cart : gearGroup.getCartridges()) {
if(cart.getName().equals(type)) {
url = ((IGear) gearGroup.getGears().toArray()[0]).getSshUrl();
break;
}
}
if(url != null) break;
}
if(url == null) {
throw new IOException("Unable to find ssh url for " + name);
}
if (url.indexOf("@") != -1)
url = url.substring(url.indexOf("@") + 1);
url = url.replace("/", "");
return url;
} catch (Exception e) {
throw new IOException("Unable to find application url for " + name, e);
}
}
public void connect(boolean delayDNS) throws IOException {
LOGGER.info("Connecting to slave " + name + "...");
try {
// Force a refresh of the user info to get the application UUID
IApplication app = getBuilderApplication();
if (app == null)
throw new IOException("Failed to connect/find application " + name);
uuid = app.getGearGroups().iterator().next().getGears().iterator().next().getId();
LOGGER.info("Established UUID = " + uuid);
} catch (Exception e) {
throw new IOException("Unable to connect to application " + name, e);
}
// Sleep for 5 seconds for DNS to propagate to minimize cache penalties
if (delayDNS) {
try {
Thread.sleep(5000);
} catch (InterruptedException e) {
// Ignore
}
}
long startTime = System.currentTimeMillis();
long currentTime = startTime;
// Wait until DNS is resolvable
while (isBuildRunning() && (builderTimeout == -1 || currentTime - startTime < builderTimeout)) {
try {
String hostname = getHostName();
LOGGER.info("Checking to see if slave DNS for " + hostname + " is resolvable ... (timeout: " + builderTimeout + "ms)");
InetAddress address = InetAddress.getByName(hostname);
LOGGER.info("Slave DNS resolved - " + address);
break;
} catch (UnknownHostException e) {
LOGGER.info("Slave DNS not propagated yet, retrying... (remaining: " + (builderTimeout - (currentTime - startTime)) + "ms)");
try {
Thread.sleep(5000);
} catch (InterruptedException ie) {
// Ignore interruptions
}
currentTime = System.currentTimeMillis();
}
}
if (builderTimeout >= 0 && currentTime - startTime >= builderTimeout) {
LOGGER.warning("Slave DNS not propagated. Timing out.");
throw new IOException("Slave DNS not propagated. Timing out.");
}
}
protected boolean isBuildRunning() {
boolean running = true;
Queue queue = Hudson.getInstance().getQueue();
if (queue != null) {
Queue.Item[] items = queue.getItems();
if (items.length == 0)
running = false;
}
return running;
}
public void provision() throws Exception {
// Create a new application of the right type
createApp();
// Force a connection to establish the UUID
connect(true);
}
private void createApp() throws IOException, OpenShiftException {
IOpenShiftConnection connection = OpenShiftCloud.get().getOpenShiftConnection();
IUser user = connection.getUser();
IStandaloneCartridge cartridge = getCartridge(OpenShiftCloud.get().getOpenShiftConnection());
IDomain domain = user.getDomain(getNamespace());
List<IGearProfile> gearProfiles = domain.getAvailableGearProfiles();
IGearProfile gearProfile = gearProfiles.get(0);
for (IGearProfile profile : gearProfiles) {
if (profile.getName().equals(builderSize)) {
gearProfile = profile;
}
}
LOGGER.info("Creating builder application " + cartridge.getName() + " "
+ name + " " + user.getDomain(getNamespace()).getId() + " of size "
+ gearProfile.getName() + " in region "+(region==null?"default":region)+" ...");
ApplicationScale scale = ApplicationScale.NO_SCALE;
if(builderPlatform.equalsIgnoreCase(Platform.WINDOWS.toString())) {
scale = ApplicationScale.SCALE;
}
IApplication app = domain.createApplication(name, cartridge, scale, region, gearProfile);
//IApplication app = domain.createApplication(name, cartridge, scale, gearProfile);
// No reason to have app running on builder gear - just need it installed
LOGGER.info("Stopping application on builder gear ...");
app.stop();
}
private IApplication getBuilderApplication() {
IUser user;
try {
user = OpenShiftCloud.get().getOpenShiftConnection().getUser();
} catch (IOException e) {
throw new RuntimeException(e);
}
return user.getDomain(getNamespace()).getApplicationByName(name);
}
public String getUuid() {
return uuid;
}
public enum Platform {
WINDOWS("Windows"),
LINUX("Linux");
private final String platform;
private Platform(String s) {
platform = s;
}
public String toString(){
return platform;
}
}
}
| [
"\"OPENSHIFT_NAMESPACE\""
]
| []
| [
"OPENSHIFT_NAMESPACE"
]
| [] | ["OPENSHIFT_NAMESPACE"] | java | 1 | 0 | |
lxd/instance/drivers/driver_qemu.go | package drivers
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"os"
"os/exec"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"text/template"
"time"
"github.com/flosch/pongo2"
"github.com/gorilla/websocket"
"github.com/kballard/go-shellquote"
"github.com/pborman/uuid"
"github.com/pkg/errors"
"golang.org/x/sys/unix"
"gopkg.in/yaml.v2"
lxdClient "github.com/lxc/lxd/client"
"github.com/lxc/lxd/lxd/apparmor"
"github.com/lxc/lxd/lxd/backup"
"github.com/lxc/lxd/lxd/cluster"
"github.com/lxc/lxd/lxd/db"
"github.com/lxc/lxd/lxd/db/query"
"github.com/lxc/lxd/lxd/device"
deviceConfig "github.com/lxc/lxd/lxd/device/config"
"github.com/lxc/lxd/lxd/device/nictype"
"github.com/lxc/lxd/lxd/instance"
"github.com/lxc/lxd/lxd/instance/drivers/qmp"
"github.com/lxc/lxd/lxd/instance/instancetype"
"github.com/lxc/lxd/lxd/instance/operationlock"
"github.com/lxc/lxd/lxd/maas"
"github.com/lxc/lxd/lxd/network"
"github.com/lxc/lxd/lxd/operations"
"github.com/lxc/lxd/lxd/project"
"github.com/lxc/lxd/lxd/resources"
"github.com/lxc/lxd/lxd/revert"
"github.com/lxc/lxd/lxd/state"
storagePools "github.com/lxc/lxd/lxd/storage"
storageDrivers "github.com/lxc/lxd/lxd/storage/drivers"
pongoTemplate "github.com/lxc/lxd/lxd/template"
"github.com/lxc/lxd/lxd/util"
"github.com/lxc/lxd/lxd/vsock"
"github.com/lxc/lxd/shared"
"github.com/lxc/lxd/shared/api"
"github.com/lxc/lxd/shared/instancewriter"
log "github.com/lxc/lxd/shared/log15"
"github.com/lxc/lxd/shared/logger"
"github.com/lxc/lxd/shared/logging"
"github.com/lxc/lxd/shared/osarch"
"github.com/lxc/lxd/shared/subprocess"
"github.com/lxc/lxd/shared/termios"
"github.com/lxc/lxd/shared/units"
)
// qemuAsyncIO is used to indicate disk should use unsafe cache I/O.
const qemuUnsafeIO = "unsafeio"
// qemuSerialChardevName is used to communicate state via qmp between Qemu and LXD.
const qemuSerialChardevName = "qemu_serial-chardev"
// qemuDefaultMemSize is the default memory size for VMs if not limit specified.
const qemuDefaultMemSize = "1GiB"
var errQemuAgentOffline = fmt.Errorf("LXD VM agent isn't currently running")
var vmConsole = map[int]bool{}
var vmConsoleLock sync.Mutex
// qemuLoad creates a Qemu instance from the supplied InstanceArgs.
func qemuLoad(s *state.State, args db.InstanceArgs, profiles []api.Profile) (instance.Instance, error) {
// Create the instance struct.
vm := qemuInstantiate(s, args, nil)
// Expand config and devices.
err := vm.expandConfig(profiles)
if err != nil {
return nil, err
}
err = vm.expandDevices(profiles)
if err != nil {
return nil, err
}
return vm, nil
}
// qemuInstantiate creates a Qemu struct without expanding config. The expandedDevices argument is
// used during device config validation when the devices have already been expanded and we do not
// have access to the profiles used to do it. This can be safely passed as nil if not required.
func qemuInstantiate(s *state.State, args db.InstanceArgs, expandedDevices deviceConfig.Devices) *qemu {
vm := &qemu{
common: common{
dbType: args.Type,
localConfig: args.Config,
localDevices: args.Devices,
project: args.Project,
state: s,
profiles: args.Profiles,
},
id: args.ID,
name: args.Name,
description: args.Description,
ephemeral: args.Ephemeral,
architecture: args.Architecture,
snapshot: args.Snapshot,
creationDate: args.CreationDate,
lastUsedDate: args.LastUsedDate,
stateful: args.Stateful,
node: args.Node,
expiryDate: args.ExpiryDate,
}
// Get the architecture name.
archName, err := osarch.ArchitectureName(vm.architecture)
if err == nil {
vm.architectureName = archName
}
// Cleanup the zero values.
if vm.expiryDate.IsZero() {
vm.expiryDate = time.Time{}
}
if vm.creationDate.IsZero() {
vm.creationDate = time.Time{}
}
if vm.lastUsedDate.IsZero() {
vm.lastUsedDate = time.Time{}
}
// This is passed during expanded config validation.
if expandedDevices != nil {
vm.expandedDevices = expandedDevices
}
return vm
}
// qemuCreate creates a new storage volume record and returns an initialised Instance.
func qemuCreate(s *state.State, args db.InstanceArgs) (instance.Instance, error) {
// Create the instance struct.
vm := &qemu{
common: common{
dbType: args.Type,
localConfig: args.Config,
localDevices: args.Devices,
state: s,
profiles: args.Profiles,
project: args.Project,
},
id: args.ID,
name: args.Name,
node: args.Node,
description: args.Description,
ephemeral: args.Ephemeral,
architecture: args.Architecture,
snapshot: args.Snapshot,
stateful: args.Stateful,
creationDate: args.CreationDate,
lastUsedDate: args.LastUsedDate,
expiryDate: args.ExpiryDate,
}
// Get the architecture name.
archName, err := osarch.ArchitectureName(vm.architecture)
if err == nil {
vm.architectureName = archName
}
// Cleanup the zero values.
if vm.expiryDate.IsZero() {
vm.expiryDate = time.Time{}
}
if vm.creationDate.IsZero() {
vm.creationDate = time.Time{}
}
if vm.lastUsedDate.IsZero() {
vm.lastUsedDate = time.Time{}
}
ctxMap := log.Ctx{
"project": args.Project,
"name": vm.name,
"ephemeral": vm.ephemeral,
}
logger.Info("Creating instance", ctxMap)
revert := true
defer func() {
if !revert {
return
}
vm.Delete()
}()
// Load the config.
err = vm.init()
if err != nil {
logger.Error("Failed creating instance", ctxMap)
return nil, err
}
// Validate expanded config.
err = instance.ValidConfig(s.OS, vm.expandedConfig, false, true)
if err != nil {
logger.Error("Failed creating instance", ctxMap)
return nil, err
}
err = instance.ValidDevices(s, s.Cluster, vm.Project(), vm.Type(), vm.expandedDevices, true)
if err != nil {
logger.Error("Failed creating instance", ctxMap)
return nil, errors.Wrap(err, "Invalid devices")
}
// Retrieve the container's storage pool
var storageInstance instance.Instance
if vm.IsSnapshot() {
parentName, _, _ := shared.InstanceGetParentAndSnapshotName(vm.name)
// Load the parent
storageInstance, err = instance.LoadByProjectAndName(vm.state, vm.project, parentName)
if err != nil {
vm.Delete()
logger.Error("Failed creating instance", ctxMap)
return nil, errors.Wrap(err, "Invalid parent")
}
} else {
storageInstance = vm
}
// Retrieve the instance's storage pool.
_, rootDiskDevice, err := shared.GetRootDiskDevice(storageInstance.ExpandedDevices().CloneNative())
if err != nil {
return nil, err
}
if rootDiskDevice["pool"] == "" {
return nil, fmt.Errorf("The instances's root device is missing the pool property")
}
storagePool := rootDiskDevice["pool"]
// Get the storage pool ID for the instance.
poolID, pool, err := s.Cluster.GetStoragePool(storagePool)
if err != nil {
return nil, err
}
// Fill in any default volume config.
volumeConfig := map[string]string{}
err = storagePools.VolumeFillDefault(volumeConfig, pool)
if err != nil {
return nil, err
}
// Create a new database entry for the instance's storage volume.
if vm.IsSnapshot() {
_, err = s.Cluster.CreateStorageVolumeSnapshot(args.Project, args.Name, "", db.StoragePoolVolumeTypeVM, poolID, volumeConfig, time.Time{})
} else {
_, err = s.Cluster.CreateStoragePoolVolume(args.Project, args.Name, "", db.StoragePoolVolumeTypeVM, poolID, volumeConfig, db.StoragePoolVolumeContentTypeBlock)
}
if err != nil {
return nil, err
}
if !vm.IsSnapshot() {
// Update MAAS.
err = vm.maasUpdate(nil)
if err != nil {
logger.Error("Failed creating instance", ctxMap)
return nil, err
}
// Add devices to instance.
for k, m := range vm.expandedDevices {
err = vm.deviceAdd(k, m)
if err != nil && err != device.ErrUnsupportedDevType {
return nil, errors.Wrapf(err, "Failed to add device '%s'", k)
}
}
}
logger.Info("Created instance", ctxMap)
vm.state.Events.SendLifecycle(vm.project, "virtual-machine-created",
fmt.Sprintf("/1.0/virtual-machines/%s", vm.name), nil)
revert = false
return vm, nil
}
// qemu is the QEMU virtual machine driver.
type qemu struct {
common
// Properties.
architecture int
snapshot bool
creationDate time.Time
lastUsedDate time.Time
ephemeral bool
id int
name string
description string
stateful bool
// Clustering.
node string
// Progress tracking.
op *operations.Operation
expiryDate time.Time
// Cached handles.
// Do not use these variables directly, instead use their associated get functions so they
// will be initialised on demand.
agentClient *http.Client
storagePool storagePools.Pool
architectureName string
}
// getAgentClient returns the current agent client handle. To avoid TLS setup each time this
// function is called, the handle is cached internally in the Qemu struct.
func (vm *qemu) getAgentClient() (*http.Client, error) {
if vm.agentClient != nil {
return vm.agentClient, nil
}
// The connection uses mutual authentication, so use the LXD server's key & cert for client.
agentCert, _, clientCert, clientKey, err := vm.generateAgentCert()
if err != nil {
return nil, err
}
agent, err := vsock.HTTPClient(vm.vsockID(), clientCert, clientKey, agentCert)
if err != nil {
return nil, err
}
return agent, nil
}
// getStoragePool returns the current storage pool handle. To avoid a DB lookup each time this
// function is called, the handle is cached internally in the Qemu struct.
func (vm *qemu) getStoragePool() (storagePools.Pool, error) {
if vm.storagePool != nil {
return vm.storagePool, nil
}
pool, err := storagePools.GetPoolByInstance(vm.state, vm)
if err != nil {
return nil, err
}
vm.storagePool = pool
return vm.storagePool, nil
}
func (vm *qemu) getMonitorEventHandler() func(event string, data map[string]interface{}) {
id := vm.id
state := vm.state
return func(event string, data map[string]interface{}) {
if !shared.StringInSlice(event, []string{"SHUTDOWN"}) {
return
}
inst, err := instance.LoadByID(state, id)
if err != nil {
logger.Errorf("Failed to load instance with id=%d", id)
return
}
if event == "SHUTDOWN" {
target := "stop"
entry, ok := data["reason"]
if ok && entry == "guest-reset" {
target = "reboot"
}
err = inst.(*qemu).onStop(target)
if err != nil {
logger.Errorf("Failed to cleanly stop instance '%s': %v", project.Instance(inst.Project(), inst.Name()), err)
return
}
}
}
}
// mount the instance's config volume if needed.
func (vm *qemu) mount() (bool, error) {
var pool storagePools.Pool
pool, err := vm.getStoragePool()
if err != nil {
return false, err
}
ourMount, err := pool.MountInstance(vm, nil)
if err != nil {
return false, err
}
return ourMount, nil
}
// unmount the instance's config volume if needed.
func (vm *qemu) unmount() (bool, error) {
pool, err := vm.getStoragePool()
if err != nil {
return false, err
}
unmounted, err := pool.UnmountInstance(vm, nil)
if err != nil {
return false, err
}
return unmounted, nil
}
// generateAgentCert creates the necessary server key and certificate if needed.
func (vm *qemu) generateAgentCert() (string, string, string, string, error) {
// Mount the instance's config volume if needed.
ourMount, err := vm.mount()
if err != nil {
return "", "", "", "", err
}
if ourMount {
defer vm.unmount()
}
agentCertFile := filepath.Join(vm.Path(), "agent.crt")
agentKeyFile := filepath.Join(vm.Path(), "agent.key")
clientCertFile := filepath.Join(vm.Path(), "agent-client.crt")
clientKeyFile := filepath.Join(vm.Path(), "agent-client.key")
// Create server certificate.
err = shared.FindOrGenCert(agentCertFile, agentKeyFile, false, false)
if err != nil {
return "", "", "", "", err
}
// Create client certificate.
err = shared.FindOrGenCert(clientCertFile, clientKeyFile, true, false)
if err != nil {
return "", "", "", "", err
}
// Read all the files
agentCert, err := ioutil.ReadFile(agentCertFile)
if err != nil {
return "", "", "", "", err
}
agentKey, err := ioutil.ReadFile(agentKeyFile)
if err != nil {
return "", "", "", "", err
}
clientCert, err := ioutil.ReadFile(clientCertFile)
if err != nil {
return "", "", "", "", err
}
clientKey, err := ioutil.ReadFile(clientKeyFile)
if err != nil {
return "", "", "", "", err
}
return string(agentCert), string(agentKey), string(clientCert), string(clientKey), nil
}
// Freeze freezes the instance.
func (vm *qemu) Freeze() error {
// Connect to the monitor.
monitor, err := qmp.Connect(vm.monitorPath(), qemuSerialChardevName, vm.getMonitorEventHandler())
if err != nil {
return err
}
// Send the stop command.
err = monitor.Pause()
if err != nil {
return err
}
return nil
}
// onStop is run when the instance stops.
func (vm *qemu) onStop(target string) error {
ctxMap := log.Ctx{
"project": vm.project,
"name": vm.name,
"ephemeral": vm.ephemeral,
}
// Pick up the existing stop operation lock created in Stop() function.
op := operationlock.Get(vm.id)
if op != nil && op.Action() != "stop" {
return fmt.Errorf("Instance is already running a %s operation", op.Action())
}
// Cleanup.
vm.cleanupDevices()
os.Remove(vm.pidFilePath())
os.Remove(vm.monitorPath())
vm.unmount()
// Record power state.
err := vm.state.Cluster.UpdateInstancePowerState(vm.id, "STOPPED")
if err != nil {
if op != nil {
op.Done(err)
}
return err
}
// Unload the apparmor profile
err = apparmor.InstanceUnload(vm.state, vm)
if err != nil {
ctxMap["err"] = err
logger.Error("Failed to unload AppArmor profile", ctxMap)
}
if target == "reboot" {
err = vm.Start(false)
} else if vm.ephemeral {
// Destroy ephemeral virtual machines
err = vm.Delete()
}
if err != nil {
return err
}
if op != nil {
op.Done(nil)
}
return nil
}
// Shutdown shuts the instance down.
func (vm *qemu) Shutdown(timeout time.Duration) error {
if !vm.IsRunning() {
return fmt.Errorf("The instance is already stopped")
}
// Setup a new operation
op, err := operationlock.Create(vm.id, "stop", true, true)
if err != nil {
return err
}
// Connect to the monitor.
monitor, err := qmp.Connect(vm.monitorPath(), qemuSerialChardevName, vm.getMonitorEventHandler())
if err != nil {
op.Done(err)
return err
}
// Get the wait channel.
chDisconnect, err := monitor.Wait()
if err != nil {
if err == qmp.ErrMonitorDisconnect {
op.Done(nil)
return nil
}
op.Done(err)
return err
}
// Send the system_powerdown command.
err = monitor.Powerdown()
if err != nil {
if err == qmp.ErrMonitorDisconnect {
op.Done(nil)
return nil
}
op.Done(err)
return err
}
// If timeout provided, block until the VM is not running or the timeout has elapsed.
if timeout > 0 {
select {
case <-chDisconnect:
break
case <-time.After(timeout):
op.Done(fmt.Errorf("Instance was not shutdown after timeout"))
return fmt.Errorf("Instance was not shutdown after timeout")
}
} else {
<-chDisconnect // Block until VM is not running if no timeout provided.
}
// Wait for onStop.
err = op.Wait()
if err != nil && vm.IsRunning() {
return err
}
op.Done(nil)
vm.state.Events.SendLifecycle(vm.project, "instance-shutdown", fmt.Sprintf("/1.0/virtual-machines/%s", vm.name), nil)
return nil
}
func (vm *qemu) ovmfPath() string {
if os.Getenv("LXD_OVMF_PATH") != "" {
return os.Getenv("LXD_OVMF_PATH")
}
return "/usr/share/OVMF"
}
// Start starts the instance.
func (vm *qemu) Start(stateful bool) error {
// Ensure the correct vhost_vsock kernel module is loaded before establishing the vsock.
err := util.LoadModule("vhost_vsock")
if err != nil {
return err
}
if vm.IsRunning() {
return fmt.Errorf("The instance is already running")
}
// Setup a new operation
op, err := operationlock.Create(vm.id, "start", false, false)
if err != nil {
return errors.Wrap(err, "Create instance start operation")
}
defer op.Done(nil)
revert := revert.New()
defer revert.Fail()
// Start accumulating device paths.
vm.devPaths = []string{}
// Mount the instance's config volume.
_, err = vm.mount()
if err != nil {
op.Done(err)
return err
}
revert.Add(func() { vm.unmount() })
err = vm.generateConfigShare()
if err != nil {
op.Done(err)
return err
}
err = os.MkdirAll(vm.LogPath(), 0700)
if err != nil {
op.Done(err)
return err
}
err = os.MkdirAll(vm.DevicesPath(), 0711)
if err != nil {
op.Done(err)
return err
}
err = os.MkdirAll(vm.ShmountsPath(), 0711)
if err != nil {
op.Done(err)
return err
}
// Get a UUID for Qemu.
vmUUID := vm.localConfig["volatile.vm.uuid"]
if vmUUID == "" {
vmUUID = uuid.New()
vm.VolatileSet(map[string]string{"volatile.vm.uuid": vmUUID})
}
// Copy OVMF settings firmware to nvram file.
// This firmware file can be modified by the VM so it must be copied from the defaults.
if !shared.PathExists(vm.nvramPath()) {
err = vm.setupNvram()
if err != nil {
op.Done(err)
return err
}
}
devConfs := make([]*deviceConfig.RunConfig, 0, len(vm.expandedDevices))
// Setup devices in sorted order, this ensures that device mounts are added in path order.
for _, d := range vm.expandedDevices.Sorted() {
dev := d // Ensure device variable has local scope for revert.
// Start the device.
runConf, err := vm.deviceStart(dev.Name, dev.Config, false)
if err != nil {
op.Done(err)
return errors.Wrapf(err, "Failed to start device %q", dev.Name)
}
if runConf == nil {
continue
}
revert.Add(func() {
err := vm.deviceStop(dev.Name, dev.Config)
if err != nil {
logger.Errorf("Failed to cleanup device %q: %v", dev.Name, err)
}
})
devConfs = append(devConfs, runConf)
}
// Get qemu configuration.
qemuBinary, qemuBus, err := vm.qemuArchConfig()
if err != nil {
op.Done(err)
return err
}
// Define a set of files to open and pass their file descriptors to qemu command.
fdFiles := make([]string, 0)
confFile, err := vm.generateQemuConfigFile(qemuBus, devConfs, &fdFiles)
if err != nil {
op.Done(err)
return err
}
// Check qemu is installed.
qemuPath, err := exec.LookPath(qemuBinary)
if err != nil {
op.Done(err)
return err
}
qemuCmd := []string{
"--",
qemuPath,
"-S",
"-name", vm.Name(),
"-uuid", vmUUID,
"-daemonize",
"-cpu", "host",
"-nographic",
"-serial", "chardev:console",
"-nodefaults",
"-no-reboot",
"-no-user-config",
"-sandbox", "on,obsolete=deny,elevateprivileges=allow,spawn=deny,resourcecontrol=deny",
"-readconfig", confFile,
"-pidfile", vm.pidFilePath(),
"-D", vm.LogFilePath(),
"-chroot", vm.Path(),
}
// SMBIOS only on x86_64 and aarch64.
if shared.IntInSlice(vm.architecture, []int{osarch.ARCH_64BIT_INTEL_X86, osarch.ARCH_64BIT_ARMV8_LITTLE_ENDIAN}) {
qemuCmd = append(qemuCmd, "-smbios", "type=2,manufacturer=Canonical Ltd.,product=LXD")
}
// Attempt to drop privileges.
if vm.state.OS.UnprivUser != "" {
qemuCmd = append(qemuCmd, "-runas", vm.state.OS.UnprivUser)
// Change ownership of config directory files so they are accessible to the
// unprivileged qemu process so that the 9p share can work.
//
// Security note: The 9P share will present the UID owner of these files on the host
// to the VM. In order to ensure that non-root users in the VM cannot access these
// files be sure to mount the 9P share in the VM with the "access=0" option to allow
// only root user in VM to access the mounted share.
err := filepath.Walk(filepath.Join(vm.Path(), "config"),
func(path string, info os.FileInfo, err error) error {
if err != nil {
op.Done(err)
return err
}
err = os.Chown(path, int(vm.state.OS.UnprivUID), -1)
if err != nil {
op.Done(err)
return err
}
return nil
})
if err != nil {
op.Done(err)
return err
}
}
// Handle hugepages on architectures where we don't set NUMA nodes.
if vm.architecture != osarch.ARCH_64BIT_INTEL_X86 && shared.IsTrue(vm.expandedConfig["limits.memory.hugepages"]) {
hugetlb, err := util.HugepagesPath()
if err != nil {
op.Done(err)
return err
}
qemuCmd = append(qemuCmd, "-mem-path", hugetlb, "-mem-prealloc")
}
if vm.expandedConfig["raw.qemu"] != "" {
fields, err := shellquote.Split(vm.expandedConfig["raw.qemu"])
if err != nil {
op.Done(err)
return err
}
qemuCmd = append(qemuCmd, fields...)
}
// Run the qemu command via forklimits so we can selectively increase ulimits.
forkLimitsCmd := []string{
"forklimits",
"limit=memlock:unlimited:unlimited", // Required for PCI passthrough.
}
for i := range fdFiles {
// Pass through any file descriptors as 3+i (as first 3 file descriptors are taken as standard).
forkLimitsCmd = append(forkLimitsCmd, fmt.Sprintf("fd=%d", 3+i))
}
// Setup background process.
p, err := subprocess.NewProcess(vm.state.OS.ExecPath, append(forkLimitsCmd, qemuCmd...), vm.EarlyLogFilePath(), vm.EarlyLogFilePath())
if err != nil {
return err
}
// Load the AppArmor profile
err = apparmor.InstanceLoad(vm.state, vm)
if err != nil {
op.Done(err)
return err
}
p.SetApparmor(apparmor.InstanceProfileName(vm))
// Open any extra files and pass their file handles to qemu command.
files := []*os.File{}
for _, file := range fdFiles {
info, err := os.Stat(file)
if err != nil {
err = errors.Wrapf(err, "Error detecting file type %q", file)
op.Done(err)
return err
}
var f *os.File
mode := info.Mode()
if mode&os.ModeSocket != 0 {
c, err := vm.openUnixSocket(file)
if err != nil {
err = errors.Wrapf(err, "Error opening socket file %q", file)
op.Done(err)
return err
}
f, err = c.File()
if err != nil {
err = errors.Wrapf(err, "Error getting socket file descriptor %q", file)
op.Done(err)
return err
}
defer c.Close()
defer f.Close() // Close file after qemu has started.
} else {
f, err = os.OpenFile(file, os.O_RDWR, 0)
if err != nil {
err = errors.Wrapf(err, "Error opening exta file %q", file)
op.Done(err)
return err
}
defer f.Close() // Close file after qemu has started.
}
files = append(files, f)
}
err = p.StartWithFiles(files)
if err != nil {
return err
}
_, err = p.Wait()
if err != nil {
stderr, _ := ioutil.ReadFile(vm.EarlyLogFilePath())
err = errors.Wrapf(err, "Failed to run: %s: %s", strings.Join(p.Args, " "), string(stderr))
op.Done(err)
return err
}
pid, err := vm.pid()
if err != nil {
logger.Errorf(`Failed to get VM process ID "%d"`, pid)
return err
}
revert.Add(func() {
proc, err := os.FindProcess(pid)
if err != nil {
logger.Errorf(`Failed to find VM process "%d"`, pid)
return
}
proc.Kill()
if err != nil {
logger.Errorf(`Failed to kill VM process "%d"`, pid)
}
})
// Start QMP monitoring.
monitor, err := qmp.Connect(vm.monitorPath(), qemuSerialChardevName, vm.getMonitorEventHandler())
if err != nil {
op.Done(err)
return err
}
// Apply CPU pinning.
cpuLimit, ok := vm.expandedConfig["limits.cpu"]
if ok && cpuLimit != "" {
_, err := strconv.Atoi(cpuLimit)
if err != nil {
// Expand to a set of CPU identifiers and get the pinning map.
_, _, _, pins, _, err := vm.cpuTopology(cpuLimit)
if err != nil {
op.Done(err)
return err
}
// Get the list of PIDs from the VM.
pids, err := monitor.GetCPUs()
if err != nil {
op.Done(err)
return err
}
// Confirm nothing weird is going on.
if len(pins) != len(pids) {
return fmt.Errorf("QEMU has less vCPUs than configured")
}
for i, pid := range pids {
set := unix.CPUSet{}
set.Set(int(pins[uint64(i)]))
// Apply the pin.
err := unix.SchedSetaffinity(pid, &set)
if err != nil {
op.Done(err)
return err
}
}
}
}
// Start the VM.
err = monitor.Start()
if err != nil {
op.Done(err)
return err
}
// Database updates
err = vm.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
// Record current state
err = tx.UpdateInstancePowerState(vm.id, "RUNNING")
if err != nil {
err = errors.Wrap(err, "Error updating instance state")
op.Done(err)
return err
}
// Update time instance last started time
err = tx.UpdateInstanceLastUsedDate(vm.id, time.Now().UTC())
if err != nil {
err = errors.Wrap(err, "Error updating instance last used")
op.Done(err)
return err
}
return nil
})
if err != nil {
op.Done(err)
return err
}
revert.Success()
vm.state.Events.SendLifecycle(vm.project, "virtual-machine-started", fmt.Sprintf("/1.0/virtual-machines/%s", vm.name), nil)
return nil
}
// openUnixSocket connects to a UNIX socket and returns the connection.
func (vm *qemu) openUnixSocket(sockPath string) (*net.UnixConn, error) {
addr, err := net.ResolveUnixAddr("unix", sockPath)
if err != nil {
return nil, err
}
c, err := net.DialUnix("unix", nil, addr)
if err != nil {
return nil, err
}
return c, nil
}
func (vm *qemu) setupNvram() error {
// UEFI only on x86_64 and aarch64.
if !shared.IntInSlice(vm.architecture, []int{osarch.ARCH_64BIT_INTEL_X86, osarch.ARCH_64BIT_ARMV8_LITTLE_ENDIAN}) {
return nil
}
// Mount the instance's config volume.
ourMount, err := vm.mount()
if err != nil {
return err
}
if ourMount {
defer vm.unmount()
}
srcOvmfFile := filepath.Join(vm.ovmfPath(), "OVMF_VARS.fd")
if vm.expandedConfig["security.secureboot"] == "" || shared.IsTrue(vm.expandedConfig["security.secureboot"]) {
srcOvmfFile = filepath.Join(vm.ovmfPath(), "OVMF_VARS.ms.fd")
}
if !shared.PathExists(srcOvmfFile) {
return fmt.Errorf("Required EFI firmware settings file missing: %s", srcOvmfFile)
}
os.Remove(vm.nvramPath())
err = shared.FileCopy(srcOvmfFile, vm.nvramPath())
if err != nil {
return err
}
return nil
}
func (vm *qemu) qemuArchConfig() (string, string, error) {
if vm.architecture == osarch.ARCH_64BIT_INTEL_X86 {
return "qemu-system-x86_64", "pcie", nil
} else if vm.architecture == osarch.ARCH_64BIT_ARMV8_LITTLE_ENDIAN {
return "qemu-system-aarch64", "pcie", nil
} else if vm.architecture == osarch.ARCH_64BIT_POWERPC_LITTLE_ENDIAN {
return "qemu-system-ppc64", "pci", nil
} else if vm.architecture == osarch.ARCH_64BIT_S390_BIG_ENDIAN {
return "qemu-system-s390x", "ccw", nil
}
return "", "", fmt.Errorf("Architecture isn't supported for virtual machines")
}
// deviceVolatileGetFunc returns a function that retrieves a named device's volatile config and
// removes its device prefix from the keys.
func (vm *qemu) deviceVolatileGetFunc(devName string) func() map[string]string {
return func() map[string]string {
volatile := make(map[string]string)
prefix := fmt.Sprintf("volatile.%s.", devName)
for k, v := range vm.localConfig {
if strings.HasPrefix(k, prefix) {
volatile[strings.TrimPrefix(k, prefix)] = v
}
}
return volatile
}
}
// deviceVolatileSetFunc returns a function that can be called to save a named device's volatile
// config using keys that do not have the device's name prefixed.
func (vm *qemu) deviceVolatileSetFunc(devName string) func(save map[string]string) error {
return func(save map[string]string) error {
volatileSave := make(map[string]string)
for k, v := range save {
volatileSave[fmt.Sprintf("volatile.%s.%s", devName, k)] = v
}
return vm.VolatileSet(volatileSave)
}
}
// RegisterDevices is not used by VMs.
func (vm *qemu) RegisterDevices() {
return
}
// SaveConfigFile is not used by VMs.
func (vm *qemu) SaveConfigFile() error {
return instance.ErrNotImplemented
}
// OnHook is the top-level hook handler.
func (vm *qemu) OnHook(hookName string, args map[string]string) error {
return instance.ErrNotImplemented
}
// deviceLoad instantiates and validates a new device and returns it along with enriched config.
func (vm *qemu) deviceLoad(deviceName string, rawConfig deviceConfig.Device) (device.Device, deviceConfig.Device, error) {
var configCopy deviceConfig.Device
var err error
// Create copy of config and load some fields from volatile if device is nic or infiniband.
if shared.StringInSlice(rawConfig["type"], []string{"nic", "infiniband"}) {
configCopy, err = vm.FillNetworkDevice(deviceName, rawConfig)
if err != nil {
return nil, nil, err
}
} else {
// Othewise copy the config so it cannot be modified by device.
configCopy = rawConfig.Clone()
}
d, err := device.New(vm, vm.state, deviceName, configCopy, vm.deviceVolatileGetFunc(deviceName), vm.deviceVolatileSetFunc(deviceName))
// Return device and config copy even if error occurs as caller may still use device.
return d, configCopy, err
}
// deviceStart loads a new device and calls its Start() function. After processing the runtime
// config returned from Start(), it also runs the device's Register() function irrespective of
// whether the instance is running or not.
func (vm *qemu) deviceStart(deviceName string, rawConfig deviceConfig.Device, isRunning bool) (*deviceConfig.RunConfig, error) {
d, _, err := vm.deviceLoad(deviceName, rawConfig)
if err != nil {
return nil, err
}
if canHotPlug, _ := d.CanHotPlug(); isRunning && !canHotPlug {
return nil, fmt.Errorf("Device cannot be started when instance is running")
}
runConf, err := d.Start()
if err != nil {
return nil, err
}
return runConf, nil
}
// deviceStop loads a new device and calls its Stop() function.
func (vm *qemu) deviceStop(deviceName string, rawConfig deviceConfig.Device) error {
logger := logging.AddContext(logger.Log, log.Ctx{"device": deviceName, "project": vm.Project(), "instance": vm.Name()})
logger.Debug("Stopping device")
d, _, err := vm.deviceLoad(deviceName, rawConfig)
// If deviceLoad fails with unsupported device type then return.
if err == device.ErrUnsupportedDevType {
return err
}
// If deviceLoad fails for any other reason then just log the error and proceed, as in the
// scenario that a new version of LXD has additional validation restrictions than older
// versions we still need to allow previously valid devices to be stopped.
if err != nil {
// If there is no device returned, then we cannot proceed, so return as error.
if d == nil {
return fmt.Errorf("Device stop validation failed for %q: %v", deviceName, err)
}
logger.Error("Device stop validation failed", log.Ctx{"err": err})
}
canHotPlug, _ := d.CanHotPlug()
if vm.IsRunning() && !canHotPlug {
return fmt.Errorf("Device cannot be stopped when instance is running")
}
runConf, err := d.Stop()
if err != nil {
return err
}
if runConf != nil {
// Run post stop hooks irrespective of run state of instance.
err = vm.runHooks(runConf.PostHooks)
if err != nil {
return err
}
}
return nil
}
// runHooks executes the callback functions returned from a function.
func (vm *qemu) runHooks(hooks []func() error) error {
// Run any post start hooks.
if len(hooks) > 0 {
for _, hook := range hooks {
err := hook()
if err != nil {
return err
}
}
}
return nil
}
func (vm *qemu) monitorPath() string {
return filepath.Join(vm.LogPath(), "qemu.monitor")
}
func (vm *qemu) nvramPath() string {
return filepath.Join(vm.Path(), "qemu.nvram")
}
func (vm *qemu) spicePath() string {
return filepath.Join(vm.LogPath(), "qemu.spice")
}
// generateConfigShare generates the config share directory that will be exported to the VM via
// a 9P share. Due to the unknown size of templates inside the images this directory is created
// inside the VM's config volume so that it can be restricted by quota.
func (vm *qemu) generateConfigShare() error {
// Mount the instance's config volume if needed.
ourMount, err := vm.mount()
if err != nil {
return err
}
if ourMount {
defer vm.unmount()
}
configDrivePath := filepath.Join(vm.Path(), "config")
// Create config drive dir.
os.RemoveAll(configDrivePath)
err = os.MkdirAll(configDrivePath, 0500)
if err != nil {
return err
}
// Generate the cloud-init config.
err = os.MkdirAll(filepath.Join(configDrivePath, "cloud-init"), 0500)
if err != nil {
return err
}
if vm.ExpandedConfig()["user.user-data"] != "" {
err = ioutil.WriteFile(filepath.Join(configDrivePath, "cloud-init", "user-data"), []byte(vm.ExpandedConfig()["user.user-data"]), 0400)
if err != nil {
return err
}
} else {
err = ioutil.WriteFile(filepath.Join(configDrivePath, "cloud-init", "user-data"), []byte("#cloud-config\n"), 0400)
if err != nil {
return err
}
}
if vm.ExpandedConfig()["user.vendor-data"] != "" {
err = ioutil.WriteFile(filepath.Join(configDrivePath, "cloud-init", "vendor-data"), []byte(vm.ExpandedConfig()["user.vendor-data"]), 0400)
if err != nil {
return err
}
} else {
err = ioutil.WriteFile(filepath.Join(configDrivePath, "cloud-init", "vendor-data"), []byte("#cloud-config\n"), 0400)
if err != nil {
return err
}
}
if vm.ExpandedConfig()["user.network-config"] != "" {
err = ioutil.WriteFile(filepath.Join(configDrivePath, "cloud-init", "network-config"), []byte(vm.ExpandedConfig()["user.network-config"]), 0400)
if err != nil {
return err
}
} else {
os.Remove(filepath.Join(configDrivePath, "cloud-init", "network-config"))
}
// Append any user.meta-data to our predefined meta-data config.
err = ioutil.WriteFile(filepath.Join(configDrivePath, "cloud-init", "meta-data"), []byte(fmt.Sprintf("instance-id: %s\nlocal-hostname: %s\n%s\n", vm.Name(), vm.Name(), vm.ExpandedConfig()["user.meta-data"])), 0400)
if err != nil {
return err
}
// Add the VM agent.
path, err := exec.LookPath("lxd-agent")
if err != nil {
logger.Warnf("lxd-agent not found, skipping its inclusion in the VM config drive: %v", err)
} else {
// Install agent into config drive dir if found.
err = shared.FileCopy(path, filepath.Join(configDrivePath, "lxd-agent"))
if err != nil {
return err
}
err = os.Chmod(filepath.Join(configDrivePath, "lxd-agent"), 0500)
if err != nil {
return err
}
err = os.Chown(filepath.Join(configDrivePath, "lxd-agent"), 0, 0)
if err != nil {
return err
}
}
agentCert, agentKey, clientCert, _, err := vm.generateAgentCert()
if err != nil {
return err
}
err = ioutil.WriteFile(filepath.Join(configDrivePath, "server.crt"), []byte(clientCert), 0400)
if err != nil {
return err
}
err = ioutil.WriteFile(filepath.Join(configDrivePath, "agent.crt"), []byte(agentCert), 0400)
if err != nil {
return err
}
err = ioutil.WriteFile(filepath.Join(configDrivePath, "agent.key"), []byte(agentKey), 0400)
if err != nil {
return err
}
// Systemd units.
err = os.MkdirAll(filepath.Join(configDrivePath, "systemd"), 0500)
if err != nil {
return err
}
lxdAgentServiceUnit := `[Unit]
Description=LXD - agent
Documentation=https://linuxcontainers.org/lxd
ConditionPathExists=/dev/virtio-ports/org.linuxcontainers.lxd
Requires=lxd-agent-9p.service
After=lxd-agent-9p.service
Before=cloud-init.target cloud-init.service cloud-init-local.service
DefaultDependencies=no
[Service]
Type=notify
WorkingDirectory=/run/lxd_config/9p
ExecStart=/run/lxd_config/9p/lxd-agent
Restart=on-failure
RestartSec=5s
StartLimitInterval=60
StartLimitBurst=10
[Install]
WantedBy=multi-user.target
`
err = ioutil.WriteFile(filepath.Join(configDrivePath, "systemd", "lxd-agent.service"), []byte(lxdAgentServiceUnit), 0400)
if err != nil {
return err
}
lxdConfigShareMountUnit := `[Unit]
Description=LXD - agent - 9p mount
Documentation=https://linuxcontainers.org/lxd
ConditionPathExists=/dev/virtio-ports/org.linuxcontainers.lxd
After=local-fs.target
DefaultDependencies=no
[Service]
Type=oneshot
RemainAfterExit=yes
ExecStartPre=-/sbin/modprobe 9pnet_virtio
ExecStartPre=/bin/mkdir -p /run/lxd_config/9p
ExecStartPre=/bin/chmod 0700 /run/lxd_config/
ExecStart=/bin/mount -t 9p config /run/lxd_config/9p -o access=0,trans=virtio
[Install]
WantedBy=multi-user.target
`
err = ioutil.WriteFile(filepath.Join(configDrivePath, "systemd", "lxd-agent-9p.service"), []byte(lxdConfigShareMountUnit), 0400)
if err != nil {
return err
}
// Udev rules
err = os.MkdirAll(filepath.Join(configDrivePath, "udev"), 0500)
if err != nil {
return err
}
lxdAgentRules := `ACTION=="add", SYMLINK=="virtio-ports/org.linuxcontainers.lxd", TAG+="systemd", ACTION=="add", RUN+="/bin/systemctl start lxd-agent.service"`
err = ioutil.WriteFile(filepath.Join(configDrivePath, "udev", "99-lxd-agent.rules"), []byte(lxdAgentRules), 0400)
if err != nil {
return err
}
// Install script for manual installs.
lxdConfigShareInstall := `#!/bin/sh
if [ ! -e "systemd" ] || [ ! -e "lxd-agent" ]; then
echo "This script must be run from within the 9p mount"
exit 1
fi
if [ ! -e "/lib/systemd/system" ]; then
echo "This script only works on systemd systems"
exit 1
fi
cp udev/99-lxd-agent.rules /lib/udev/rules.d/
cp systemd/lxd-agent.service /lib/systemd/system/
cp systemd/lxd-agent-9p.service /lib/systemd/system/
systemctl daemon-reload
systemctl enable lxd-agent.service lxd-agent-9p.service
echo ""
echo "LXD agent has been installed, reboot to confirm setup."
echo "To start it now, unmount this filesystem and run: systemctl start lxd-agent-9p lxd-agent"
`
err = ioutil.WriteFile(filepath.Join(configDrivePath, "install.sh"), []byte(lxdConfigShareInstall), 0700)
if err != nil {
return err
}
// Instance data for devlxd.
err = vm.writeInstanceData()
if err != nil {
return err
}
// Templated files.
err = os.MkdirAll(filepath.Join(configDrivePath, "files"), 0500)
if err != nil {
return err
}
// Template anything that needs templating.
key := "volatile.apply_template"
if vm.localConfig[key] != "" {
// Run any template that needs running.
err = vm.templateApplyNow(vm.localConfig[key], filepath.Join(configDrivePath, "files"))
if err != nil {
return err
}
// Remove the volatile key from the DB.
err := vm.state.Cluster.DeleteInstanceConfigKey(vm.id, key)
if err != nil {
return err
}
}
err = vm.templateApplyNow("start", filepath.Join(configDrivePath, "files"))
if err != nil {
return err
}
// Copy the template metadata itself too.
metaPath := filepath.Join(vm.Path(), "metadata.yaml")
if shared.PathExists(metaPath) {
err = shared.FileCopy(metaPath, filepath.Join(configDrivePath, "files/metadata.yaml"))
if err != nil {
return err
}
}
return nil
}
func (vm *qemu) templateApplyNow(trigger string, path string) error {
// If there's no metadata, just return.
fname := filepath.Join(vm.Path(), "metadata.yaml")
if !shared.PathExists(fname) {
return nil
}
// Parse the metadata.
content, err := ioutil.ReadFile(fname)
if err != nil {
return errors.Wrap(err, "Failed to read metadata")
}
metadata := new(api.ImageMetadata)
err = yaml.Unmarshal(content, &metadata)
if err != nil {
return errors.Wrapf(err, "Could not parse %s", fname)
}
// Figure out the instance architecture.
arch, err := osarch.ArchitectureName(vm.architecture)
if err != nil {
arch, err = osarch.ArchitectureName(vm.state.OS.Architectures[0])
if err != nil {
return errors.Wrap(err, "Failed to detect system architecture")
}
}
// Generate the container metadata.
instanceMeta := make(map[string]string)
instanceMeta["name"] = vm.name
instanceMeta["type"] = "virtual-machine"
instanceMeta["architecture"] = arch
if vm.ephemeral {
instanceMeta["ephemeral"] = "true"
} else {
instanceMeta["ephemeral"] = "false"
}
// Go through the templates.
for tplPath, tpl := range metadata.Templates {
var w *os.File
// Check if the template should be applied now.
found := false
for _, tplTrigger := range tpl.When {
if tplTrigger == trigger {
found = true
break
}
}
if !found {
continue
}
// Create the file itself.
w, err = os.Create(filepath.Join(path, fmt.Sprintf("%s.out", tpl.Template)))
if err != nil {
return err
}
// Fix ownership and mode.
w.Chmod(0644)
defer w.Close()
// Read the template.
tplString, err := ioutil.ReadFile(filepath.Join(vm.TemplatesPath(), tpl.Template))
if err != nil {
return errors.Wrap(err, "Failed to read template file")
}
// Restrict filesystem access to within the container's rootfs.
tplSet := pongo2.NewSet(fmt.Sprintf("%s-%s", vm.name, tpl.Template), pongoTemplate.ChrootLoader{Path: vm.TemplatesPath()})
tplRender, err := tplSet.FromString("{% autoescape off %}" + string(tplString) + "{% endautoescape %}")
if err != nil {
return errors.Wrap(err, "Failed to render template")
}
configGet := func(confKey, confDefault *pongo2.Value) *pongo2.Value {
val, ok := vm.expandedConfig[confKey.String()]
if !ok {
return confDefault
}
return pongo2.AsValue(strings.TrimRight(val, "\r\n"))
}
// Render the template.
tplRender.ExecuteWriter(pongo2.Context{"trigger": trigger,
"path": tplPath,
"instance": instanceMeta,
"container": instanceMeta, // FIXME: remove once most images have moved away.
"config": vm.expandedConfig,
"devices": vm.expandedDevices,
"properties": tpl.Properties,
"config_get": configGet}, w)
}
return nil
}
// deviceBootPriorities returns a map keyed on device name containing the boot index to use.
// Qemu tries to boot devices in order of boot index (lowest first).
func (vm *qemu) deviceBootPriorities() (map[string]int, error) {
type devicePrios struct {
Name string
BootPrio uint32
}
devices := []devicePrios{}
for devName, devConf := range vm.expandedDevices {
if devConf["type"] != "disk" && devConf["type"] != "nic" {
continue
}
bootPrio := uint32(0) // Default to lowest priority.
if devConf["boot.priority"] != "" {
prio, err := strconv.ParseInt(devConf["boot.priority"], 10, 32)
if err != nil {
return nil, errors.Wrapf(err, "Invalid boot.priority for device %q", devName)
}
bootPrio = uint32(prio)
} else if devConf["path"] == "/" {
bootPrio = 1 // Set boot priority of root disk higher than any device without a boot prio.
}
devices = append(devices, devicePrios{Name: devName, BootPrio: bootPrio})
}
sort.SliceStable(devices, func(i, j int) bool { return devices[i].BootPrio > devices[j].BootPrio })
sortedDevs := make(map[string]int, len(devices))
for bootIndex, dev := range devices {
sortedDevs[dev.Name] = bootIndex
}
return sortedDevs, nil
}
// generateQemuConfigFile writes the qemu config file and returns its location.
// It writes the config file inside the VM's log path.
func (vm *qemu) generateQemuConfigFile(busName string, devConfs []*deviceConfig.RunConfig, fdFiles *[]string) (string, error) {
var sb *strings.Builder = &strings.Builder{}
err := qemuBase.Execute(sb, map[string]interface{}{
"architecture": vm.architectureName,
"spicePath": vm.spicePath(),
})
if err != nil {
return "", err
}
err = vm.addCPUMemoryConfig(sb)
if err != nil {
return "", err
}
err = qemuDriveFirmware.Execute(sb, map[string]interface{}{
"architecture": vm.architectureName,
"roPath": filepath.Join(vm.ovmfPath(), "OVMF_CODE.fd"),
"nvramPath": vm.nvramPath(),
})
if err != nil {
return "", err
}
err = qemuControlSocket.Execute(sb, map[string]interface{}{
"path": vm.monitorPath(),
})
if err != nil {
return "", err
}
// Setup the bus allocator.
bus := qemuNewBus(busName, sb)
// Now add the fixed set of devices. The multi-function groups used for these fixed internal devices are
// specifically chosen to ensure that we consume exactly 4 PCI bus ports (on PCIe bus). This ensures that
// the first user device NIC added will use the 5th PCI bus port and will be consistently named enp5s0
// on PCIe (which we need to maintain compatibility with network configuration in our existing VM images).
// It's also meant to group all low-bandwidth internal devices onto a single address. PCIe bus allows a
// total of 256 devices, but this assumes 32 chassis * 8 function. By using VFs for the internal fixed
// devices we avoid consuming a chassis for each one.
devBus, devAddr, multi := bus.allocate(busFunctionGroupGeneric)
err = qemuBalloon.Execute(sb, map[string]interface{}{
"bus": bus.name,
"devBus": devBus,
"devAddr": devAddr,
"multifunction": multi,
})
if err != nil {
return "", err
}
devBus, devAddr, multi = bus.allocate(busFunctionGroupGeneric)
err = qemuRNG.Execute(sb, map[string]interface{}{
"bus": bus.name,
"devBus": devBus,
"devAddr": devAddr,
"multifunction": multi,
})
if err != nil {
return "", err
}
devBus, devAddr, multi = bus.allocate(busFunctionGroupGeneric)
err = qemuKeyboard.Execute(sb, map[string]interface{}{
"bus": bus.name,
"devBus": devBus,
"devAddr": devAddr,
"multifunction": multi,
})
if err != nil {
return "", err
}
devBus, devAddr, multi = bus.allocate(busFunctionGroupGeneric)
err = qemuTablet.Execute(sb, map[string]interface{}{
"bus": bus.name,
"devBus": devBus,
"devAddr": devAddr,
"multifunction": multi,
})
if err != nil {
return "", err
}
devBus, devAddr, multi = bus.allocate(busFunctionGroupGeneric)
err = qemuVsock.Execute(sb, map[string]interface{}{
"bus": bus.name,
"devBus": devBus,
"devAddr": devAddr,
"multifunction": multi,
"vsockID": vm.vsockID(),
})
if err != nil {
return "", err
}
devBus, devAddr, multi = bus.allocate(busFunctionGroupGeneric)
err = qemuSerial.Execute(sb, map[string]interface{}{
"bus": bus.name,
"devBus": devBus,
"devAddr": devAddr,
"multifunction": multi,
"chardevName": qemuSerialChardevName,
"ringbufSizeBytes": qmp.RingbufSize,
})
if err != nil {
return "", err
}
devBus, devAddr, multi = bus.allocate(busFunctionGroupNone)
err = qemuSCSI.Execute(sb, map[string]interface{}{
"bus": bus.name,
"devBus": devBus,
"devAddr": devAddr,
"multifunction": multi,
})
if err != nil {
return "", err
}
devBus, devAddr, multi = bus.allocate(busFunctionGroup9p)
err = qemuDriveConfig.Execute(sb, map[string]interface{}{
"bus": bus.name,
"devBus": devBus,
"devAddr": devAddr,
"multifunction": multi,
"path": filepath.Join(vm.Path(), "config"),
})
if err != nil {
return "", err
}
devBus, devAddr, multi = bus.allocate(busFunctionGroupNone)
err = qemuGPU.Execute(sb, map[string]interface{}{
"bus": bus.name,
"devBus": devBus,
"devAddr": devAddr,
"multifunction": multi,
"architecture": vm.architectureName,
})
if err != nil {
return "", err
}
// Dynamic devices.
bootIndexes, err := vm.deviceBootPriorities()
if err != nil {
return "", errors.Wrap(err, "Error calculating boot indexes")
}
// Record the mounts we are going to do inside the VM using the agent.
agentMounts := []instancetype.VMAgentMount{}
// These devices are sorted so that NICs are added first to ensure that the first NIC can use the 5th
// PCIe bus port and will be consistently named enp5s0 for compatibility with network configuration in our
// existing VM images. Even on non-PCIe busses having NICs first means that their names won't change when
// other devices are added.
for _, runConf := range devConfs {
// Add drive devices.
if len(runConf.Mounts) > 0 {
for _, drive := range runConf.Mounts {
if drive.TargetPath == "/" {
err = vm.addRootDriveConfig(sb, bootIndexes, drive)
} else if drive.FSType == "9p" {
err = vm.addDriveDirConfig(sb, bus, fdFiles, &agentMounts, drive)
} else {
err = vm.addDriveConfig(sb, bootIndexes, drive)
}
if err != nil {
return "", err
}
}
}
// Add network device.
if len(runConf.NetworkInterface) > 0 {
err = vm.addNetDevConfig(sb, bus, bootIndexes, runConf.NetworkInterface, fdFiles)
if err != nil {
return "", err
}
}
// Add GPU device.
if len(runConf.GPUDevice) > 0 {
err = vm.addGPUDevConfig(sb, bus, runConf.GPUDevice)
if err != nil {
return "", err
}
}
}
// Write the agent mount config.
agentMountJSON, err := json.Marshal(agentMounts)
if err != nil {
return "", errors.Wrapf(err, "Failed marshalling agent mounts to JSON")
}
agentMountFile := filepath.Join(vm.Path(), "config", "agent-mounts.json")
err = ioutil.WriteFile(agentMountFile, agentMountJSON, 0400)
if err != nil {
return "", errors.Wrapf(err, "Failed writing agent mounts file")
}
// Write the config file to disk.
configPath := filepath.Join(vm.LogPath(), "qemu.conf")
return configPath, ioutil.WriteFile(configPath, []byte(sb.String()), 0640)
}
// addCPUMemoryConfig adds the qemu config required for setting the number of virtualised CPUs and memory.
func (vm *qemu) addCPUMemoryConfig(sb *strings.Builder) error {
// Default to a single core.
cpus := vm.expandedConfig["limits.cpu"]
if cpus == "" {
cpus = "1"
}
ctx := map[string]interface{}{
"architecture": vm.architectureName,
}
cpuCount, err := strconv.Atoi(cpus)
hostNodes := []uint64{}
if err == nil {
// If not pinning, default to exposing cores.
ctx["cpuCount"] = cpuCount
ctx["cpuSockets"] = 1
ctx["cpuCores"] = cpuCount
ctx["cpuThreads"] = 1
hostNodes = []uint64{0}
} else {
// Expand to a set of CPU identifiers and get the pinning map.
nrSockets, nrCores, nrThreads, vcpus, numaNodes, err := vm.cpuTopology(cpus)
if err != nil {
return err
}
// Figure out socket-id/core-id/thread-id for all vcpus.
vcpuSocket := map[uint64]uint64{}
vcpuCore := map[uint64]uint64{}
vcpuThread := map[uint64]uint64{}
vcpu := uint64(0)
for i := 0; i < nrSockets; i++ {
for j := 0; j < nrCores; j++ {
for k := 0; k < nrThreads; k++ {
vcpuSocket[vcpu] = uint64(i)
vcpuCore[vcpu] = uint64(j)
vcpuThread[vcpu] = uint64(k)
vcpu++
}
}
}
// Prepare the NUMA map.
numa := []map[string]uint64{}
numaIDs := []uint64{}
numaNode := uint64(0)
for hostNode, entry := range numaNodes {
hostNodes = append(hostNodes, hostNode)
numaIDs = append(numaIDs, numaNode)
for _, vcpu := range entry {
numa = append(numa, map[string]uint64{
"node": numaNode,
"socket": vcpuSocket[vcpu],
"core": vcpuCore[vcpu],
"thread": vcpuThread[vcpu],
})
}
numaNode++
}
// Prepare context.
ctx["cpuCount"] = len(vcpus)
ctx["cpuSockets"] = nrSockets
ctx["cpuCores"] = nrCores
ctx["cpuThreads"] = nrThreads
ctx["cpuNumaNodes"] = numaIDs
ctx["cpuNumaMapping"] = numa
ctx["cpuNumaHostNodes"] = hostNodes
}
// Configure memory limit.
memSize := vm.expandedConfig["limits.memory"]
if memSize == "" {
memSize = qemuDefaultMemSize // Default if no memory limit specified.
}
memSizeBytes, err := units.ParseByteSizeString(memSize)
if err != nil {
return fmt.Errorf("limits.memory invalid: %v", err)
}
ctx["hugepages"] = ""
if shared.IsTrue(vm.expandedConfig["limits.memory.hugepages"]) {
hugetlb, err := util.HugepagesPath()
if err != nil {
return err
}
ctx["hugepages"] = hugetlb
}
// Determine per-node memory limit.
memSizeBytes = memSizeBytes / 1024 / 1024
nodeMemory := int64(memSizeBytes / int64(len(hostNodes)))
memSizeBytes = nodeMemory * int64(len(hostNodes))
ctx["memory"] = nodeMemory
err = qemuMemory.Execute(sb, map[string]interface{}{
"architecture": vm.architectureName,
"memSizeBytes": memSizeBytes,
})
if err != nil {
return err
}
// Configure the CPU limit.
return qemuCPU.Execute(sb, ctx)
}
// addFileDescriptor adds a file path to the list of files to open and pass file descriptor to qemu.
// Returns the file descriptor number that qemu will receive.
func (vm *qemu) addFileDescriptor(fdFiles *[]string, filePath string) int {
// Append the tap device file path to the list of files to be opened and passed to qemu.
*fdFiles = append(*fdFiles, filePath)
return 2 + len(*fdFiles) // Use 2+fdFiles count, as first user file descriptor is 3.
}
// addRootDriveConfig adds the qemu config required for adding the root drive.
func (vm *qemu) addRootDriveConfig(sb *strings.Builder, bootIndexes map[string]int, rootDriveConf deviceConfig.MountEntryItem) error {
if rootDriveConf.TargetPath != "/" {
return fmt.Errorf("Non-root drive config supplied")
}
pool, err := vm.getStoragePool()
if err != nil {
return err
}
rootDrivePath, err := pool.GetInstanceDisk(vm)
if err != nil {
return err
}
// Generate a new device config with the root device path expanded.
driveConf := deviceConfig.MountEntryItem{
DevName: rootDriveConf.DevName,
DevPath: rootDrivePath,
}
// If the storage pool is on ZFS and backed by a loop file and we can't use DirectIO, then resort to
// unsafe async I/O to avoid kernel hangs when running ZFS storage pools in an image file on another FS.
driverInfo := pool.Driver().Info()
driverConf := pool.Driver().Config()
if driverInfo.Name == "zfs" && !driverInfo.DirectIO && shared.PathExists(driverConf["source"]) && !shared.IsBlockdevPath(driverConf["source"]) {
driveConf.Opts = append(driveConf.Opts, qemuUnsafeIO)
}
return vm.addDriveConfig(sb, bootIndexes, driveConf)
}
// addDriveDirConfig adds the qemu config required for adding a supplementary drive directory share.
func (vm *qemu) addDriveDirConfig(sb *strings.Builder, bus *qemuBus, fdFiles *[]string, agentMounts *[]instancetype.VMAgentMount, driveConf deviceConfig.MountEntryItem) error {
mountTag := fmt.Sprintf("lxd_%s", driveConf.DevName)
agentMount := instancetype.VMAgentMount{
Source: mountTag,
Target: driveConf.TargetPath,
FSType: driveConf.FSType,
}
// If mount type is 9p, we need to specify to use the virtio transport to support more VM guest OSes.
if agentMount.FSType == "9p" {
agentMount.Options = append(agentMount.Options, "trans=virtio")
}
// Indicate to agent to mount this readonly. Note: This is purely to indicate to VM guest that this is
// readonly, it should *not* be used as a security measure, as the VM guest could remount it R/W.
if shared.StringInSlice("ro", driveConf.Opts) {
agentMount.Options = append(agentMount.Options, "ro")
}
// Record the 9p mount for the agent.
*agentMounts = append(*agentMounts, agentMount)
devBus, devAddr, multi := bus.allocate(busFunctionGroup9p)
// For read only shares, do not use proxy.
if shared.StringInSlice("ro", driveConf.Opts) {
return qemuDriveDir.Execute(sb, map[string]interface{}{
"bus": bus.name,
"devBus": devBus,
"devAddr": devAddr,
"multifunction": multi,
"devName": driveConf.DevName,
"mountTag": mountTag,
"path": driveConf.DevPath,
"readonly": true,
})
}
// Only use proxy for writable shares.
proxyFD := vm.addFileDescriptor(fdFiles, driveConf.DevPath)
return qemuDriveDir.Execute(sb, map[string]interface{}{
"bus": bus.name,
"devBus": devBus,
"devAddr": devAddr,
"multifunction": multi,
"devName": driveConf.DevName,
"mountTag": mountTag,
"proxyFD": proxyFD,
"readonly": false,
})
}
// addDriveConfig adds the qemu config required for adding a supplementary drive.
func (vm *qemu) addDriveConfig(sb *strings.Builder, bootIndexes map[string]int, driveConf deviceConfig.MountEntryItem) error {
// Use native kernel async IO and O_DIRECT by default.
aioMode := "native"
cacheMode := "none" // Bypass host cache, use O_DIRECT semantics.
// If drive config indicates we need to use unsafe I/O then use it.
if shared.StringInSlice(qemuUnsafeIO, driveConf.Opts) {
logger.Warnf("Using unsafe cache I/O with %s", driveConf.DevPath)
aioMode = "threads"
cacheMode = "unsafe" // Use host cache, but ignore all sync requests from guest.
} else if shared.PathExists(driveConf.DevPath) && !shared.IsBlockdevPath(driveConf.DevPath) {
// Disk dev path is a file, check whether it is located on a ZFS filesystem.
fsType, err := util.FilesystemDetect(driveConf.DevPath)
if err != nil {
return errors.Wrapf(err, "Failed detecting filesystem type of %q", driveConf.DevPath)
}
// If backing FS is ZFS or BTRFS, avoid using direct I/O and use host page cache only.
// We've seen ZFS hangs and BTRFS checksum issues when using direct I/O on image files.
if fsType == "zfs" || fsType == "btrfs" {
if driveConf.FSType != "iso9660" {
// Only warn about using writeback cache if the drive image is writable.
logger.Warnf("Using writeback cache I/O with %q as backing filesystem is %q", driveConf.DevPath, fsType)
}
aioMode = "threads"
cacheMode = "writeback" // Use host cache, with neither O_DSYNC nor O_DIRECT semantics.
}
}
if !strings.HasPrefix(driveConf.DevPath, "rbd:") {
vm.devPaths = append(vm.devPaths, driveConf.DevPath)
}
return qemuDrive.Execute(sb, map[string]interface{}{
"devName": driveConf.DevName,
"devPath": driveConf.DevPath,
"bootIndex": bootIndexes[driveConf.DevName],
"cacheMode": cacheMode,
"aioMode": aioMode,
"shared": driveConf.TargetPath != "/" && !strings.HasPrefix(driveConf.DevPath, "rbd:"),
})
}
// addNetDevConfig adds the qemu config required for adding a network device.
func (vm *qemu) addNetDevConfig(sb *strings.Builder, bus *qemuBus, bootIndexes map[string]int, nicConfig []deviceConfig.RunConfigItem, fdFiles *[]string) error {
var devName, nicName, devHwaddr, pciSlotName string
for _, nicItem := range nicConfig {
if nicItem.Key == "devName" {
devName = nicItem.Value
} else if nicItem.Key == "link" {
nicName = nicItem.Value
} else if nicItem.Key == "hwaddr" {
devHwaddr = nicItem.Value
} else if nicItem.Key == "pciSlotName" {
pciSlotName = nicItem.Value
}
}
var tpl *template.Template
tplFields := map[string]interface{}{
"bus": bus.name,
"devName": devName,
"devHwaddr": devHwaddr,
"bootIndex": bootIndexes[devName],
}
// Detect MACVTAP interface types and figure out which tap device is being used.
// This is so we can open a file handle to the tap device and pass it to the qemu process.
if shared.PathExists(fmt.Sprintf("/sys/class/net/%s/macvtap", nicName)) {
content, err := ioutil.ReadFile(fmt.Sprintf("/sys/class/net/%s/ifindex", nicName))
if err != nil {
return errors.Wrapf(err, "Error getting tap device ifindex")
}
ifindex, err := strconv.Atoi(strings.TrimSpace(string(content)))
if err != nil {
return errors.Wrapf(err, "Error parsing tap device ifindex")
}
// Append the tap device file path to the list of files to be opened and passed to qemu.
tplFields["tapFD"] = vm.addFileDescriptor(fdFiles, fmt.Sprintf("/dev/tap%d", ifindex))
tpl = qemuNetDevTapFD
} else if shared.PathExists(fmt.Sprintf("/sys/class/net/%s/tun_flags", nicName)) {
// Detect TAP (via TUN driver) device.
tplFields["ifName"] = nicName
tpl = qemuNetDevTapTun
} else if pciSlotName != "" {
// Detect physical passthrough device.
tplFields["pciSlotName"] = pciSlotName
tpl = qemuNetDevPhysical
}
devBus, devAddr, multi := bus.allocate(busFunctionGroupNone)
tplFields["devBus"] = devBus
tplFields["devAddr"] = devAddr
tplFields["multifunction"] = multi
if tpl != nil {
return tpl.Execute(sb, tplFields)
}
return fmt.Errorf("Unrecognised device type")
}
// addGPUDevConfig adds the qemu config required for adding a GPU device.
func (vm *qemu) addGPUDevConfig(sb *strings.Builder, bus *qemuBus, gpuConfig []deviceConfig.RunConfigItem) error {
var devName, pciSlotName string
for _, gpuItem := range gpuConfig {
if gpuItem.Key == "devName" {
devName = gpuItem.Value
} else if gpuItem.Key == "pciSlotName" {
pciSlotName = gpuItem.Value
}
}
// Pass-through VGA mode if enabled on the host device and architecture is x86_64.
vgaMode := shared.PathExists(filepath.Join("/sys/bus/pci/devices", pciSlotName, "boot_vga")) && vm.architecture == osarch.ARCH_64BIT_INTEL_X86
devBus, devAddr, multi := bus.allocate(fmt.Sprintf("lxd_%s", devName))
tplFields := map[string]interface{}{
"bus": bus.name,
"devBus": devBus,
"devAddr": devAddr,
"multifunction": multi,
"devName": devName,
"pciSlotName": pciSlotName,
"vga": vgaMode,
}
// Add main GPU device in VGA mode to qemu config.
err := qemuGPUDevPhysical.Execute(sb, tplFields)
if err != nil {
return err
}
// Add any other related IOMMU VFs as generic PCI devices.
iommuGroupPath := filepath.Join("/sys/bus/pci/devices", pciSlotName, "iommu_group", "devices")
if shared.PathExists(iommuGroupPath) {
// Extract parent slot name by removing any virtual function ID.
parts := strings.SplitN(pciSlotName, ".", 2)
prefix := parts[0]
// Iterate the members of the IOMMU group and override any that match the parent slot name prefix.
err := filepath.Walk(iommuGroupPath, func(path string, _ os.FileInfo, err error) error {
if err != nil {
return err
}
iommuSlotName := filepath.Base(path) // Virtual function's address is dir name.
// Match any VFs that are related to the GPU device (but not the GPU device itself).
if strings.HasPrefix(iommuSlotName, prefix) && iommuSlotName != pciSlotName {
// Add VF device without VGA mode to qemu config.
devBus, devAddr, multi := bus.allocate(fmt.Sprintf("lxd_%s", devName))
tplFields := map[string]interface{}{
"bus": bus.name,
"devBus": devBus,
"devAddr": devAddr,
"multifunction": multi,
// Generate associated device name by combining main device name and VF ID.
"devName": fmt.Sprintf("%s_%s", devName, devAddr),
"pciSlotName": iommuSlotName,
"vga": false,
}
err := qemuGPUDevPhysical.Execute(sb, tplFields)
if err != nil {
return err
}
}
return nil
})
if err != nil {
return err
}
}
return nil
}
// pidFilePath returns the path where the qemu process should write its PID.
func (vm *qemu) pidFilePath() string {
return filepath.Join(vm.LogPath(), "qemu.pid")
}
// pid gets the PID of the running qemu process.
func (vm *qemu) pid() (int, error) {
pidStr, err := ioutil.ReadFile(vm.pidFilePath())
if os.IsNotExist(err) {
return 0, nil
}
if err != nil {
return -1, err
}
pid, err := strconv.Atoi(strings.TrimSpace(string(pidStr)))
if err != nil {
return -1, err
}
return pid, nil
}
// Stop the VM.
func (vm *qemu) Stop(stateful bool) error {
// Check that we're not already stopped.
if !vm.IsRunning() {
return fmt.Errorf("The instance is already stopped")
}
// Check that no stateful stop was requested.
if stateful {
return fmt.Errorf("Stateful stop isn't supported for VMs at this time")
}
// Setup a new operation.
op, err := operationlock.Create(vm.id, "stop", false, true)
if err != nil {
return err
}
// Connect to the monitor.
monitor, err := qmp.Connect(vm.monitorPath(), qemuSerialChardevName, vm.getMonitorEventHandler())
if err != nil {
// If we fail to connect, it's most likely because the VM is already off.
op.Done(nil)
return nil
}
// Get the wait channel.
chDisconnect, err := monitor.Wait()
if err != nil {
if err == qmp.ErrMonitorDisconnect {
op.Done(nil)
return nil
}
op.Done(err)
return err
}
// Send the quit command.
err = monitor.Quit()
if err != nil {
if err == qmp.ErrMonitorDisconnect {
op.Done(nil)
return nil
}
op.Done(err)
return err
}
// Wait for QEMU to exit (can take a while if pending I/O).
<-chDisconnect
// Wait for onStop.
err = op.Wait()
if err != nil && vm.IsRunning() {
return err
}
vm.state.Events.SendLifecycle(vm.project, "virtual-machine-stopped", fmt.Sprintf("/1.0/virtual-machines/%s", vm.name), nil)
return nil
}
// Unfreeze restores the instance to running.
func (vm *qemu) Unfreeze() error {
// Connect to the monitor.
monitor, err := qmp.Connect(vm.monitorPath(), qemuSerialChardevName, vm.getMonitorEventHandler())
if err != nil {
return err
}
// Send the cont command.
err = monitor.Start()
if err != nil {
return err
}
return nil
}
// IsPrivileged does not apply to virtual machines. Always returns false.
func (vm *qemu) IsPrivileged() bool {
return false
}
// Restore restores an instance snapshot.
func (vm *qemu) Restore(source instance.Instance, stateful bool) error {
if stateful {
return fmt.Errorf("Stateful snapshots of VMs aren't supported yet")
}
var ctxMap log.Ctx
// Load the storage driver.
pool, err := storagePools.GetPoolByInstance(vm.state, vm)
if err != nil {
return err
}
// Ensure that storage is mounted for backup.yaml updates.
ourStart, err := pool.MountInstance(vm, nil)
if err != nil {
return err
}
if ourStart {
defer pool.UnmountInstance(vm, nil)
}
// Stop the instance.
wasRunning := false
if vm.IsRunning() {
wasRunning = true
ephemeral := vm.IsEphemeral()
if ephemeral {
// Unset ephemeral flag.
args := db.InstanceArgs{
Architecture: vm.Architecture(),
Config: vm.LocalConfig(),
Description: vm.Description(),
Devices: vm.LocalDevices(),
Ephemeral: false,
Profiles: vm.Profiles(),
Project: vm.Project(),
Type: vm.Type(),
Snapshot: vm.IsSnapshot(),
}
err := vm.Update(args, false)
if err != nil {
return err
}
// On function return, set the flag back on.
defer func() {
args.Ephemeral = ephemeral
vm.Update(args, false)
}()
}
// This will unmount the instance storage.
err := vm.Stop(false)
if err != nil {
return err
}
}
ctxMap = log.Ctx{
"project": vm.project,
"name": vm.name,
"created": vm.creationDate,
"ephemeral": vm.ephemeral,
"used": vm.lastUsedDate,
"source": source.Name()}
logger.Info("Restoring instance", ctxMap)
// Restore the rootfs.
err = pool.RestoreInstanceSnapshot(vm, source, nil)
if err != nil {
return err
}
// Restore the configuration.
args := db.InstanceArgs{
Architecture: source.Architecture(),
Config: source.LocalConfig(),
Description: source.Description(),
Devices: source.LocalDevices(),
Ephemeral: source.IsEphemeral(),
Profiles: source.Profiles(),
Project: source.Project(),
Type: source.Type(),
Snapshot: source.IsSnapshot(),
}
// Don't pass as user-requested as there's no way to fix a bad config.
err = vm.Update(args, false)
if err != nil {
logger.Error("Failed restoring instance configuration", ctxMap)
return err
}
// The old backup file may be out of date (e.g. it doesn't have all the current snapshots of
// the instance listed); let's write a new one to be safe.
err = vm.UpdateBackupFile()
if err != nil {
return err
}
vm.state.Events.SendLifecycle(vm.project, "virtual-machine-snapshot-restored", fmt.Sprintf("/1.0/virtual-machines/%s", vm.name), map[string]interface{}{"snapshot_name": vm.name})
// Restart the insance.
if wasRunning {
logger.Info("Restored instance", ctxMap)
return vm.Start(false)
}
logger.Info("Restored instance", ctxMap)
return nil
}
// Snapshots returns a list of snapshots.
func (vm *qemu) Snapshots() ([]instance.Instance, error) {
var snaps []db.Instance
if vm.IsSnapshot() {
return []instance.Instance{}, nil
}
// Get all the snapshots
err := vm.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
var err error
snaps, err = tx.GetInstanceSnapshotsWithName(vm.Project(), vm.name)
if err != nil {
return err
}
return nil
})
if err != nil {
return nil, err
}
// Build the snapshot list
snapshots, err := instance.LoadAllInternal(vm.state, snaps)
if err != nil {
return nil, err
}
instances := make([]instance.Instance, len(snapshots))
for k, v := range snapshots {
instances[k] = instance.Instance(v)
}
return instances, nil
}
// Backups returns a list of backups.
func (vm *qemu) Backups() ([]backup.InstanceBackup, error) {
return []backup.InstanceBackup{}, nil
}
// Rename the instance.
func (vm *qemu) Rename(newName string) error {
oldName := vm.Name()
ctxMap := log.Ctx{
"project": vm.project,
"name": vm.name,
"created": vm.creationDate,
"ephemeral": vm.ephemeral,
"used": vm.lastUsedDate,
"newname": newName}
logger.Info("Renaming instance", ctxMap)
// Sanity checks.
err := instance.ValidName(newName, vm.IsSnapshot())
if err != nil {
return err
}
if vm.IsRunning() {
return fmt.Errorf("Renaming of running instance not allowed")
}
// Clean things up.
vm.cleanup()
pool, err := storagePools.GetPoolByInstance(vm.state, vm)
if err != nil {
return errors.Wrap(err, "Load instance storage pool")
}
if vm.IsSnapshot() {
_, newSnapName, _ := shared.InstanceGetParentAndSnapshotName(newName)
err = pool.RenameInstanceSnapshot(vm, newSnapName, nil)
if err != nil {
return errors.Wrap(err, "Rename instance snapshot")
}
} else {
err = pool.RenameInstance(vm, newName, nil)
if err != nil {
return errors.Wrap(err, "Rename instance")
}
}
if !vm.IsSnapshot() {
// Rename all the instance snapshot database entries.
results, err := vm.state.Cluster.GetInstanceSnapshotsNames(vm.project, oldName)
if err != nil {
logger.Error("Failed to get instance snapshots", ctxMap)
return err
}
for _, sname := range results {
// Rename the snapshot.
oldSnapName := strings.SplitN(sname, shared.SnapshotDelimiter, 2)[1]
baseSnapName := filepath.Base(sname)
err := vm.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
return tx.RenameInstanceSnapshot(vm.project, oldName, oldSnapName, baseSnapName)
})
if err != nil {
logger.Error("Failed renaming snapshot", ctxMap)
return err
}
}
}
// Rename the instance database entry.
err = vm.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
if vm.IsSnapshot() {
oldParts := strings.SplitN(oldName, shared.SnapshotDelimiter, 2)
newParts := strings.SplitN(newName, shared.SnapshotDelimiter, 2)
return tx.RenameInstanceSnapshot(vm.project, oldParts[0], oldParts[1], newParts[1])
}
return tx.RenameInstance(vm.project, oldName, newName)
})
if err != nil {
logger.Error("Failed renaming instance", ctxMap)
return err
}
// Rename the logging path.
os.RemoveAll(shared.LogPath(newName))
if shared.PathExists(vm.LogPath()) {
err := os.Rename(vm.LogPath(), shared.LogPath(newName))
if err != nil {
logger.Error("Failed renaming instance", ctxMap)
return err
}
}
// Rename the MAAS entry.
if !vm.IsSnapshot() {
err = vm.maasRename(newName)
if err != nil {
return err
}
}
revert := revert.New()
defer revert.Fail()
// Set the new name in the struct.
vm.name = newName
revert.Add(func() { vm.name = oldName })
// Rename the backups.
backups, err := vm.Backups()
if err != nil {
return err
}
for _, backup := range backups {
b := backup
oldName := b.Name()
backupName := strings.Split(oldName, "/")[1]
newName := fmt.Sprintf("%s/%s", newName, backupName)
err = b.Rename(newName)
if err != nil {
return err
}
revert.Add(func() { b.Rename(oldName) })
}
// Update lease files.
network.UpdateDNSMasqStatic(vm.state, "")
logger.Info("Renamed instance", ctxMap)
if vm.IsSnapshot() {
vm.state.Events.SendLifecycle(vm.project, "virtual-machine-snapshot-renamed",
fmt.Sprintf("/1.0/virtual-machines/%s", oldName), map[string]interface{}{
"new_name": newName,
"snapshot_name": oldName,
})
} else {
vm.state.Events.SendLifecycle(vm.project, "virtual-machine-renamed",
fmt.Sprintf("/1.0/virtual-machines/%s", oldName), map[string]interface{}{
"new_name": newName,
})
}
revert.Success()
return nil
}
// Update the instance config.
func (vm *qemu) Update(args db.InstanceArgs, userRequested bool) error {
revert := revert.New()
defer revert.Fail()
// Set sane defaults for unset keys.
if args.Project == "" {
args.Project = project.Default
}
if args.Architecture == 0 {
args.Architecture = vm.architecture
}
if args.Config == nil {
args.Config = map[string]string{}
}
if args.Devices == nil {
args.Devices = deviceConfig.Devices{}
}
if args.Profiles == nil {
args.Profiles = []string{}
}
if userRequested {
// Validate the new config.
err := instance.ValidConfig(vm.state.OS, args.Config, false, false)
if err != nil {
return errors.Wrap(err, "Invalid config")
}
// Validate the new devices without using expanded devices validation (expensive checks disabled).
err = instance.ValidDevices(vm.state, vm.state.Cluster, vm.Project(), vm.Type(), args.Devices, false)
if err != nil {
return errors.Wrap(err, "Invalid devices")
}
}
// Validate the new profiles.
profiles, err := vm.state.Cluster.GetProfileNames(args.Project)
if err != nil {
return errors.Wrap(err, "Failed to get profiles")
}
checkedProfiles := []string{}
for _, profile := range args.Profiles {
if !shared.StringInSlice(profile, profiles) {
return fmt.Errorf("Requested profile '%s' doesn't exist", profile)
}
if shared.StringInSlice(profile, checkedProfiles) {
return fmt.Errorf("Duplicate profile found in request")
}
checkedProfiles = append(checkedProfiles, profile)
}
// Validate the new architecture.
if args.Architecture != 0 {
_, err = osarch.ArchitectureName(args.Architecture)
if err != nil {
return fmt.Errorf("Invalid architecture ID: %s", err)
}
}
// Get a copy of the old configuration.
oldDescription := vm.Description()
oldArchitecture := 0
err = shared.DeepCopy(&vm.architecture, &oldArchitecture)
if err != nil {
return err
}
oldEphemeral := false
err = shared.DeepCopy(&vm.ephemeral, &oldEphemeral)
if err != nil {
return err
}
oldExpandedDevices := deviceConfig.Devices{}
err = shared.DeepCopy(&vm.expandedDevices, &oldExpandedDevices)
if err != nil {
return err
}
oldExpandedConfig := map[string]string{}
err = shared.DeepCopy(&vm.expandedConfig, &oldExpandedConfig)
if err != nil {
return err
}
oldLocalDevices := deviceConfig.Devices{}
err = shared.DeepCopy(&vm.localDevices, &oldLocalDevices)
if err != nil {
return err
}
oldLocalConfig := map[string]string{}
err = shared.DeepCopy(&vm.localConfig, &oldLocalConfig)
if err != nil {
return err
}
oldProfiles := []string{}
err = shared.DeepCopy(&vm.profiles, &oldProfiles)
if err != nil {
return err
}
oldExpiryDate := vm.expiryDate
// Revert local changes if update fails.
revert.Add(func() {
vm.description = oldDescription
vm.architecture = oldArchitecture
vm.ephemeral = oldEphemeral
vm.expandedConfig = oldExpandedConfig
vm.expandedDevices = oldExpandedDevices
vm.localConfig = oldLocalConfig
vm.localDevices = oldLocalDevices
vm.profiles = oldProfiles
vm.expiryDate = oldExpiryDate
})
// Apply the various changes to local vars.
vm.description = args.Description
vm.architecture = args.Architecture
vm.ephemeral = args.Ephemeral
vm.localConfig = args.Config
vm.localDevices = args.Devices
vm.profiles = args.Profiles
vm.expiryDate = args.ExpiryDate
// Expand the config.
err = vm.expandConfig(nil)
if err != nil {
return errors.Wrap(err, "Expand config")
}
err = vm.expandDevices(nil)
if err != nil {
return errors.Wrap(err, "Expand devices")
}
// Diff the configurations.
changedConfig := []string{}
for key := range oldExpandedConfig {
if oldExpandedConfig[key] != vm.expandedConfig[key] {
if !shared.StringInSlice(key, changedConfig) {
changedConfig = append(changedConfig, key)
}
}
}
for key := range vm.expandedConfig {
if oldExpandedConfig[key] != vm.expandedConfig[key] {
if !shared.StringInSlice(key, changedConfig) {
changedConfig = append(changedConfig, key)
}
}
}
// Diff the devices.
removeDevices, addDevices, updateDevices, allUpdatedKeys := oldExpandedDevices.Update(vm.expandedDevices, nil)
if userRequested {
// Do some validation of the config diff.
err = instance.ValidConfig(vm.state.OS, vm.expandedConfig, false, true)
if err != nil {
return errors.Wrap(err, "Invalid expanded config")
}
// Do full expanded validation of the devices diff.
err = instance.ValidDevices(vm.state, vm.state.Cluster, vm.Project(), vm.Type(), vm.expandedDevices, true)
if err != nil {
return errors.Wrap(err, "Invalid expanded devices")
}
}
isRunning := vm.IsRunning()
if isRunning && len(allUpdatedKeys) > 0 {
return fmt.Errorf("Devices cannot be changed when VM is running")
}
// Use the device interface to apply update changes.
err = vm.updateDevices(removeDevices, addDevices, updateDevices, oldExpandedDevices)
if err != nil {
return err
}
if isRunning {
// Only certain keys can be changed on a running VM.
liveUpdateKeys := []string{"limits.memory"}
// Check only keys that support live update have changed.
for _, key := range changedConfig {
if !strings.HasPrefix(key, "user.") && !shared.StringInSlice(key, liveUpdateKeys) {
return fmt.Errorf("Key %q cannot be updated when VM is running", key)
}
}
// Apply live update for each key.
for _, key := range changedConfig {
value := vm.expandedConfig[key]
if key == "limits.memory" {
err = vm.updateMemoryLimit(value)
if err != nil {
if err != nil {
return errors.Wrapf(err, "Failed updating memory limit")
}
}
}
}
}
// Update MAAS (must run after the MAC addresses have been generated).
updateMAAS := false
for _, key := range []string{"maas.subnet.ipv4", "maas.subnet.ipv6", "ipv4.address", "ipv6.address"} {
if shared.StringInSlice(key, allUpdatedKeys) {
updateMAAS = true
break
}
}
if !vm.IsSnapshot() && updateMAAS {
err = vm.maasUpdate(oldExpandedDevices.CloneNative())
if err != nil {
return err
}
}
if shared.StringInSlice("security.secureboot", changedConfig) {
// Re-generate the NVRAM.
err = vm.setupNvram()
if err != nil {
return err
}
}
// Finally, apply the changes to the database.
err = vm.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
// Snapshots should update only their descriptions and expiry date.
if vm.IsSnapshot() {
return tx.UpdateInstanceSnapshot(vm.id, vm.description, vm.expiryDate)
}
object, err := tx.GetInstance(vm.project, vm.name)
if err != nil {
return err
}
object.Description = vm.description
object.Architecture = vm.architecture
object.Ephemeral = vm.ephemeral
object.ExpiryDate = vm.expiryDate
object.Config = vm.localConfig
object.Profiles = vm.profiles
object.Devices = vm.localDevices.CloneNative()
return tx.UpdateInstance(vm.project, vm.name, *object)
})
if err != nil {
return errors.Wrap(err, "Failed to update database")
}
err = vm.UpdateBackupFile()
if err != nil && !os.IsNotExist(err) {
return errors.Wrap(err, "Failed to write backup file")
}
// Changes have been applied and recorded, do not revert if an error occurs from here.
revert.Success()
if isRunning {
err = vm.writeInstanceData()
if err != nil {
return errors.Wrap(err, "Failed to write instance-data file")
}
// Send devlxd notifications only for user.* key changes
for _, key := range changedConfig {
if !strings.HasPrefix(key, "user.") {
continue
}
msg := map[string]string{
"key": key,
"old_value": oldExpandedConfig[key],
"value": vm.expandedConfig[key],
}
err = vm.devlxdEventSend("config", msg)
if err != nil {
return err
}
}
}
var endpoint string
if vm.IsSnapshot() {
parentName, snapName, _ := shared.InstanceGetParentAndSnapshotName(vm.name)
endpoint = fmt.Sprintf("/1.0/virtual-machines/%s/snapshots/%s", parentName, snapName)
} else {
endpoint = fmt.Sprintf("/1.0/virtual-machines/%s", vm.name)
}
vm.state.Events.SendLifecycle(vm.project, "virtual-machine-updated", endpoint, nil)
return nil
}
// updateMemoryLimit live updates the VM's memory limit by reszing the balloon device.
func (vm *qemu) updateMemoryLimit(newLimit string) error {
if newLimit == "" {
return nil
}
if shared.IsTrue(vm.expandedConfig["limits.memory.hugepages"]) {
return fmt.Errorf("Cannot live update memory limit when using huge pages")
}
// Check new size string is valid and convert to bytes.
newSizeBytes, err := units.ParseByteSizeString(newLimit)
if err != nil {
return errors.Wrapf(err, "Invalid memory size")
}
// Connect to the monitor.
monitor, err := qmp.Connect(vm.monitorPath(), qemuSerialChardevName, vm.getMonitorEventHandler())
if err != nil {
return err // The VM isn't running as no monitor socket available.
}
baseSizeBytes, err := monitor.GetMemorySizeBytes()
if err != nil {
return err
}
curSizeBytes, err := monitor.GetMemoryBalloonSizeBytes()
if err != nil {
return err
}
if curSizeBytes == newSizeBytes {
return nil
} else if baseSizeBytes < newSizeBytes {
return fmt.Errorf("Cannot increase memory size beyond boot time size when VM is running")
}
// Set effective memory size.
err = monitor.SetMemoryBalloonSizeBytes(newSizeBytes)
if err != nil {
return err
}
// Changing the memory balloon can take time, so poll the effectice size to check it has shrunk within 1%
// of the target size, which we then take as success (it may still continue to shrink closer to target).
for i := 0; i < 5; i++ {
curSizeBytes, err = monitor.GetMemoryBalloonSizeBytes()
if err != nil {
return err
}
var diff int64
if curSizeBytes < newSizeBytes {
diff = newSizeBytes - curSizeBytes
} else {
diff = curSizeBytes - newSizeBytes
}
if diff <= (newSizeBytes / 100) {
return nil // We reached to within 1% of our target size.
}
time.Sleep(500 * time.Millisecond)
}
return fmt.Errorf("Failed setting memory to %d bytes (currently %d bytes) as it was taking too long", newSizeBytes, curSizeBytes)
}
func (vm *qemu) updateDevices(removeDevices deviceConfig.Devices, addDevices deviceConfig.Devices, updateDevices deviceConfig.Devices, oldExpandedDevices deviceConfig.Devices) error {
isRunning := vm.IsRunning()
// Remove devices in reverse order to how they were added.
for _, dev := range removeDevices.Reversed() {
if isRunning {
err := vm.deviceStop(dev.Name, dev.Config)
if err == device.ErrUnsupportedDevType {
continue // No point in trying to remove device below.
} else if err != nil {
return errors.Wrapf(err, "Failed to stop device %q", dev.Name)
}
}
err := vm.deviceRemove(dev.Name, dev.Config)
if err != nil && err != device.ErrUnsupportedDevType {
return errors.Wrapf(err, "Failed to remove device %q", dev.Name)
}
// Check whether we are about to add the same device back with updated config and
// if not, or if the device type has changed, then remove all volatile keys for
// this device (as its an actual removal or a device type change).
err = vm.deviceResetVolatile(dev.Name, dev.Config, addDevices[dev.Name])
if err != nil {
return errors.Wrapf(err, "Failed to reset volatile data for device %q", dev.Name)
}
}
// Add devices in sorted order, this ensures that device mounts are added in path order.
for _, dev := range addDevices.Sorted() {
err := vm.deviceAdd(dev.Name, dev.Config)
if err == device.ErrUnsupportedDevType {
continue // No point in trying to start device below.
} else if err != nil {
return errors.Wrapf(err, "Failed to add device %q", dev.Name)
}
if isRunning {
_, err := vm.deviceStart(dev.Name, dev.Config, isRunning)
if err != nil && err != device.ErrUnsupportedDevType {
return errors.Wrapf(err, "Failed to start device %q", dev.Name)
}
}
}
for _, dev := range updateDevices.Sorted() {
err := vm.deviceUpdate(dev.Name, dev.Config, oldExpandedDevices, isRunning)
if err != nil && err != device.ErrUnsupportedDevType {
return errors.Wrapf(err, "Failed to update device %q", dev.Name)
}
}
return nil
}
// deviceUpdate loads a new device and calls its Update() function.
func (vm *qemu) deviceUpdate(deviceName string, rawConfig deviceConfig.Device, oldDevices deviceConfig.Devices, isRunning bool) error {
d, _, err := vm.deviceLoad(deviceName, rawConfig)
if err != nil {
return err
}
err = d.Update(oldDevices, isRunning)
if err != nil {
return err
}
return nil
}
// deviceResetVolatile resets a device's volatile data when its removed or updated in such a way
// that it is removed then added immediately afterwards.
func (vm *qemu) deviceResetVolatile(devName string, oldConfig, newConfig deviceConfig.Device) error {
volatileClear := make(map[string]string)
devicePrefix := fmt.Sprintf("volatile.%s.", devName)
newNICType, err := nictype.NICType(vm.state, vm.Project(), newConfig)
if err != nil {
return err
}
oldNICType, err := nictype.NICType(vm.state, vm.Project(), oldConfig)
if err != nil {
return err
}
// If the device type has changed, remove all old volatile keys.
// This will occur if the newConfig is empty (i.e the device is actually being removed) or
// if the device type is being changed but keeping the same name.
if newConfig["type"] != oldConfig["type"] || newNICType != oldNICType {
for k := range vm.localConfig {
if !strings.HasPrefix(k, devicePrefix) {
continue
}
volatileClear[k] = ""
}
return vm.VolatileSet(volatileClear)
}
// If the device type remains the same, then just remove any volatile keys that have
// the same key name present in the new config (i.e the new config is replacing the
// old volatile key).
for k := range vm.localConfig {
if !strings.HasPrefix(k, devicePrefix) {
continue
}
devKey := strings.TrimPrefix(k, devicePrefix)
if _, found := newConfig[devKey]; found {
volatileClear[k] = ""
}
}
return vm.VolatileSet(volatileClear)
}
func (vm *qemu) removeUnixDevices() error {
// Check that we indeed have devices to remove.
if !shared.PathExists(vm.DevicesPath()) {
return nil
}
// Load the directory listing.
dents, err := ioutil.ReadDir(vm.DevicesPath())
if err != nil {
return err
}
// Go through all the unix devices.
for _, f := range dents {
// Skip non-Unix devices.
if !strings.HasPrefix(f.Name(), "forkmknod.unix.") && !strings.HasPrefix(f.Name(), "unix.") && !strings.HasPrefix(f.Name(), "infiniband.unix.") {
continue
}
// Remove the entry
devicePath := filepath.Join(vm.DevicesPath(), f.Name())
err := os.Remove(devicePath)
if err != nil {
logger.Error("Failed removing unix device", log.Ctx{"err": err, "path": devicePath})
}
}
return nil
}
func (vm *qemu) removeDiskDevices() error {
// Check that we indeed have devices to remove.vm
if !shared.PathExists(vm.DevicesPath()) {
return nil
}
// Load the directory listing.
dents, err := ioutil.ReadDir(vm.DevicesPath())
if err != nil {
return err
}
// Go through all the unix devices
for _, f := range dents {
// Skip non-disk devices
if !strings.HasPrefix(f.Name(), "disk.") {
continue
}
// Always try to unmount the host side
_ = unix.Unmount(filepath.Join(vm.DevicesPath(), f.Name()), unix.MNT_DETACH)
// Remove the entry
diskPath := filepath.Join(vm.DevicesPath(), f.Name())
err := os.Remove(diskPath)
if err != nil {
logger.Error("Failed to remove disk device path", log.Ctx{"err": err, "path": diskPath})
}
}
return nil
}
func (vm *qemu) cleanup() {
// Unmount any leftovers
vm.removeUnixDevices()
vm.removeDiskDevices()
// Remove the security profiles
apparmor.InstanceDelete(vm.state, vm)
// Remove the devices path
os.Remove(vm.DevicesPath())
// Remove the shmounts path
os.RemoveAll(vm.ShmountsPath())
}
// cleanupDevices performs any needed device cleanup steps when instance is stopped.
func (vm *qemu) cleanupDevices() {
for _, dev := range vm.expandedDevices.Sorted() {
// Use the device interface if device supports it.
err := vm.deviceStop(dev.Name, dev.Config)
if err == device.ErrUnsupportedDevType {
continue
} else if err != nil {
logger.Errorf("Failed to stop device '%s': %v", dev.Name, err)
}
}
}
func (vm *qemu) init() error {
// Compute the expanded config and device list.
err := vm.expandConfig(nil)
if err != nil {
return err
}
err = vm.expandDevices(nil)
if err != nil {
return err
}
return nil
}
// Delete the instance.
func (vm *qemu) Delete() error {
ctxMap := log.Ctx{
"project": vm.project,
"name": vm.name,
"created": vm.creationDate,
"ephemeral": vm.ephemeral,
"used": vm.lastUsedDate}
logger.Info("Deleting instance", ctxMap)
// Check if instance is delete protected.
if shared.IsTrue(vm.expandedConfig["security.protection.delete"]) && !vm.IsSnapshot() {
return fmt.Errorf("Instance is protected")
}
// Check if we're dealing with "lxd import".
// TODO consider lxd import detection for VMs.
isImport := false
// Attempt to initialize storage interface for the instance.
pool, err := vm.getStoragePool()
if err != nil && err != db.ErrNoSuchObject {
return err
} else if pool != nil {
if vm.IsSnapshot() {
if !isImport {
// Remove snapshot volume and database record.
err = pool.DeleteInstanceSnapshot(vm, nil)
if err != nil {
return err
}
}
} else {
// Remove all snapshots by initialising each snapshot as an Instance and
// calling its Delete function.
err := instance.DeleteSnapshots(vm.state, vm.Project(), vm.Name())
if err != nil {
return err
}
if !isImport {
// Remove the storage volume, snapshot volumes and database records.
err = pool.DeleteInstance(vm, nil)
if err != nil {
return err
}
}
}
}
// Perform other cleanup steps if not snapshot.
if !vm.IsSnapshot() {
// Remove all backups.
backups, err := vm.Backups()
if err != nil {
return err
}
for _, backup := range backups {
err = backup.Delete()
if err != nil {
return err
}
}
// Delete the MAAS entry.
err = vm.maasDelete()
if err != nil {
logger.Error("Failed deleting instance MAAS record", log.Ctx{"project": vm.Project(), "instance": vm.Name(), "err": err})
return err
}
// Run device removal function for each device.
for k, m := range vm.expandedDevices {
err = vm.deviceRemove(k, m)
if err != nil && err != device.ErrUnsupportedDevType {
return errors.Wrapf(err, "Failed to remove device '%s'", k)
}
}
// Clean things up.
vm.cleanup()
}
// Remove the database record of the instance or snapshot instance.
if err := vm.state.Cluster.DeleteInstance(vm.Project(), vm.Name()); err != nil {
logger.Error("Failed deleting instance entry", log.Ctx{"project": vm.Project(), "instance": vm.Name(), "err": err})
return err
}
logger.Info("Deleted instance", ctxMap)
if vm.IsSnapshot() {
vm.state.Events.SendLifecycle(vm.project, "virtual-machine-snapshot-deleted",
fmt.Sprintf("/1.0/virtual-machines/%s", vm.name), map[string]interface{}{
"snapshot_name": vm.name,
})
} else {
vm.state.Events.SendLifecycle(vm.project, "virtual-machine-deleted",
fmt.Sprintf("/1.0/virtual-machines/%s", vm.name), nil)
}
return nil
}
func (vm *qemu) deviceAdd(deviceName string, rawConfig deviceConfig.Device) error {
d, _, err := vm.deviceLoad(deviceName, rawConfig)
if err != nil {
return err
}
return d.Add()
}
func (vm *qemu) deviceRemove(deviceName string, rawConfig deviceConfig.Device) error {
logger := logging.AddContext(logger.Log, log.Ctx{"device": deviceName, "project": vm.Project(), "instance": vm.Name()})
d, _, err := vm.deviceLoad(deviceName, rawConfig)
// If deviceLoad fails with unsupported device type then return.
if err == device.ErrUnsupportedDevType {
return err
}
// If deviceLoad fails for any other reason then just log the error and proceed, as in the
// scenario that a new version of LXD has additional validation restrictions than older
// versions we still need to allow previously valid devices to be stopped.
if err != nil {
// If there is no device returned, then we cannot proceed, so return as error.
if d == nil {
return fmt.Errorf("Device remove validation failed for %q: %v", deviceName, err)
}
logger.Error("Device remove validation failed", log.Ctx{"err": err})
}
return d.Remove()
}
// Export publishes the instance.
func (vm *qemu) Export(w io.Writer, properties map[string]string) (api.ImageMetadata, error) {
ctxMap := log.Ctx{
"project": vm.project,
"name": vm.name,
"created": vm.creationDate,
"ephemeral": vm.ephemeral,
"used": vm.lastUsedDate}
meta := api.ImageMetadata{}
if vm.IsRunning() {
return meta, fmt.Errorf("Cannot export a running instance as an image")
}
logger.Info("Exporting instance", ctxMap)
// Start the storage.
ourStart, err := vm.mount()
if err != nil {
logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
if ourStart {
defer vm.unmount()
}
// Create the tarball.
tarWriter := instancewriter.NewInstanceTarWriter(w, nil)
// Path inside the tar image is the pathname starting after cDir.
cDir := vm.Path()
offset := len(cDir) + 1
writeToTar := func(path string, fi os.FileInfo, err error) error {
if err != nil {
return err
}
err = tarWriter.WriteFile(path[offset:], path, fi, false)
if err != nil {
logger.Debugf("Error tarring up %s: %s", path, err)
return err
}
return nil
}
// Look for metadata.yaml.
fnam := filepath.Join(cDir, "metadata.yaml")
if !shared.PathExists(fnam) {
// Generate a new metadata.yaml.
tempDir, err := ioutil.TempDir("", "lxd_lxd_metadata_")
if err != nil {
tarWriter.Close()
logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
defer os.RemoveAll(tempDir)
// Get the instance's architecture.
var arch string
if vm.IsSnapshot() {
parentName, _, _ := shared.InstanceGetParentAndSnapshotName(vm.name)
parent, err := instance.LoadByProjectAndName(vm.state, vm.project, parentName)
if err != nil {
tarWriter.Close()
logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
arch, _ = osarch.ArchitectureName(parent.Architecture())
} else {
arch, _ = osarch.ArchitectureName(vm.architecture)
}
if arch == "" {
arch, err = osarch.ArchitectureName(vm.state.OS.Architectures[0])
if err != nil {
logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
}
// Fill in the metadata.
meta.Architecture = arch
meta.CreationDate = time.Now().UTC().Unix()
meta.Properties = properties
data, err := yaml.Marshal(&meta)
if err != nil {
tarWriter.Close()
logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
// Write the actual file.
fnam = filepath.Join(tempDir, "metadata.yaml")
err = ioutil.WriteFile(fnam, data, 0644)
if err != nil {
tarWriter.Close()
logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
fi, err := os.Lstat(fnam)
if err != nil {
tarWriter.Close()
logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
tmpOffset := len(filepath.Dir(fnam)) + 1
if err := tarWriter.WriteFile(fnam[tmpOffset:], fnam, fi, false); err != nil {
tarWriter.Close()
logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
} else {
// Parse the metadata.
content, err := ioutil.ReadFile(fnam)
if err != nil {
tarWriter.Close()
logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
err = yaml.Unmarshal(content, &meta)
if err != nil {
tarWriter.Close()
logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
if properties != nil {
meta.Properties = properties
// Generate a new metadata.yaml.
tempDir, err := ioutil.TempDir("", "lxd_lxd_metadata_")
if err != nil {
tarWriter.Close()
logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
defer os.RemoveAll(tempDir)
data, err := yaml.Marshal(&meta)
if err != nil {
tarWriter.Close()
logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
// Write the actual file.
fnam = filepath.Join(tempDir, "metadata.yaml")
err = ioutil.WriteFile(fnam, data, 0644)
if err != nil {
tarWriter.Close()
logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
}
// Include metadata.yaml in the tarball.
fi, err := os.Lstat(fnam)
if err != nil {
tarWriter.Close()
logger.Debugf("Error statting %s during export", fnam)
logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
if properties != nil {
tmpOffset := len(filepath.Dir(fnam)) + 1
err = tarWriter.WriteFile(fnam[tmpOffset:], fnam, fi, false)
} else {
err = tarWriter.WriteFile(fnam[offset:], fnam, fi, false)
}
if err != nil {
tarWriter.Close()
logger.Debugf("Error writing to tarfile: %s", err)
logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
}
// Convert and include the root image.
pool, err := vm.getStoragePool()
if err != nil {
return meta, err
}
rootDrivePath, err := pool.GetInstanceDisk(vm)
if err != nil {
return meta, err
}
// Convert from raw to qcow2 and add to tarball.
tmpPath, err := ioutil.TempDir(shared.VarPath("images"), "lxd_export_")
if err != nil {
return meta, err
}
defer os.RemoveAll(tmpPath)
fPath := fmt.Sprintf("%s/rootfs.img", tmpPath)
_, err = shared.RunCommand("qemu-img", "convert", "-c", "-O", "qcow2", rootDrivePath, fPath)
if err != nil {
return meta, fmt.Errorf("Failed converting image to qcow2: %v", err)
}
fi, err := os.Lstat(fPath)
if err != nil {
return meta, err
}
imgOffset := len(tmpPath) + 1
err = tarWriter.WriteFile(fPath[imgOffset:], fPath, fi, false)
if err != nil {
return meta, err
}
// Include all the templates.
fnam = vm.TemplatesPath()
if shared.PathExists(fnam) {
err = filepath.Walk(fnam, writeToTar)
if err != nil {
logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
}
err = tarWriter.Close()
if err != nil {
logger.Error("Failed exporting instance", ctxMap)
return meta, err
}
logger.Info("Exported instance", ctxMap)
return meta, nil
}
// Migrate migrates the instance to another node.
func (vm *qemu) Migrate(args *instance.CriuMigrationArgs) error {
return instance.ErrNotImplemented
}
// CGroupSet is not implemented for VMs.
func (vm *qemu) CGroupSet(key string, value string) error {
return instance.ErrNotImplemented
}
// VolatileSet sets one or more volatile config keys.
func (vm *qemu) VolatileSet(changes map[string]string) error {
// Sanity check.
for key := range changes {
if !strings.HasPrefix(key, "volatile.") {
return fmt.Errorf("Only volatile keys can be modified with VolatileSet")
}
}
// Update the database.
var err error
if vm.IsSnapshot() {
err = vm.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
return tx.UpdateInstanceSnapshotConfig(vm.id, changes)
})
} else {
err = vm.state.Cluster.Transaction(func(tx *db.ClusterTx) error {
return tx.UpdateInstanceConfig(vm.id, changes)
})
}
if err != nil {
return errors.Wrap(err, "Failed to volatile config")
}
// Apply the change locally.
for key, value := range changes {
if value == "" {
delete(vm.expandedConfig, key)
delete(vm.localConfig, key)
continue
}
vm.expandedConfig[key] = value
vm.localConfig[key] = value
}
return nil
}
// FileExists is not implemented for VMs.
func (vm *qemu) FileExists(path string) error {
return instance.ErrNotImplemented
}
// FilePull retrieves a file from the instance.
func (vm *qemu) FilePull(srcPath string, dstPath string) (int64, int64, os.FileMode, string, []string, error) {
client, err := vm.getAgentClient()
if err != nil {
return 0, 0, 0, "", nil, err
}
agent, err := lxdClient.ConnectLXDHTTP(nil, client)
if err != nil {
logger.Errorf("Failed to connect to lxd-agent on %s: %v", vm.Name(), err)
return 0, 0, 0, "", nil, fmt.Errorf("Failed to connect to lxd-agent")
}
defer agent.Disconnect()
content, resp, err := agent.GetInstanceFile("", srcPath)
if err != nil {
return 0, 0, 0, "", nil, err
}
switch resp.Type {
case "file", "symlink":
data, err := ioutil.ReadAll(content)
if err != nil {
return 0, 0, 0, "", nil, err
}
err = ioutil.WriteFile(dstPath, data, os.FileMode(resp.Mode))
if err != nil {
return 0, 0, 0, "", nil, err
}
err = os.Lchown(dstPath, int(resp.UID), int(resp.GID))
if err != nil {
return 0, 0, 0, "", nil, err
}
return resp.UID, resp.GID, os.FileMode(resp.Mode), resp.Type, nil, nil
case "directory":
return resp.UID, resp.GID, os.FileMode(resp.Mode), resp.Type, resp.Entries, nil
}
return 0, 0, 0, "", nil, fmt.Errorf("bad file type %s", resp.Type)
}
// FilePush pushes a file into the instance.
func (vm *qemu) FilePush(fileType string, srcPath string, dstPath string, uid int64, gid int64, mode int, write string) error {
client, err := vm.getAgentClient()
if err != nil {
return err
}
agent, err := lxdClient.ConnectLXDHTTP(nil, client)
if err != nil {
logger.Errorf("Failed to connect to lxd-agent on %s: %v", vm.Name(), err)
return fmt.Errorf("Failed to connect to lxd-agent")
}
defer agent.Disconnect()
args := lxdClient.InstanceFileArgs{
GID: gid,
Mode: mode,
Type: fileType,
UID: uid,
WriteMode: write,
}
if fileType == "file" {
f, err := os.Open(srcPath)
if err != nil {
return err
}
defer f.Close()
args.Content = f
} else if fileType == "symlink" {
symlinkTarget, err := os.Readlink(dstPath)
if err != nil {
return err
}
args.Content = bytes.NewReader([]byte(symlinkTarget))
}
err = agent.CreateInstanceFile("", dstPath, args)
if err != nil {
return err
}
return nil
}
// FileRemove removes a file from the instance.
func (vm *qemu) FileRemove(path string) error {
// Connect to the agent.
client, err := vm.getAgentClient()
if err != nil {
return err
}
agent, err := lxdClient.ConnectLXDHTTP(nil, client)
if err != nil {
return fmt.Errorf("Failed to connect to lxd-agent")
}
defer agent.Disconnect()
// Delete instance file.
err = agent.DeleteInstanceFile("", path)
if err != nil {
return err
}
return nil
}
// Console gets access to the instance's console.
func (vm *qemu) Console(protocol string) (*os.File, chan error, error) {
switch protocol {
case instance.ConsoleTypeConsole:
return vm.console()
case instance.ConsoleTypeVGA:
return vm.vga()
default:
return nil, nil, fmt.Errorf("Unknown protocol %q", protocol)
}
}
func (vm *qemu) console() (*os.File, chan error, error) {
chDisconnect := make(chan error, 1)
// Avoid duplicate connects.
vmConsoleLock.Lock()
if vmConsole[vm.id] {
vmConsoleLock.Unlock()
return nil, nil, fmt.Errorf("There is already an active console for this instance")
}
vmConsoleLock.Unlock()
// Connect to the monitor.
monitor, err := qmp.Connect(vm.monitorPath(), qemuSerialChardevName, vm.getMonitorEventHandler())
if err != nil {
return nil, nil, err // The VM isn't running as no monitor socket available.
}
// Get the console.
console, err := monitor.Console("console")
if err != nil {
return nil, nil, err
}
// Record the console is in use.
vmConsoleLock.Lock()
vmConsole[vm.id] = true
vmConsoleLock.Unlock()
// Handle console disconnection.
go func() {
<-chDisconnect
vmConsoleLock.Lock()
delete(vmConsole, vm.id)
vmConsoleLock.Unlock()
}()
return console, chDisconnect, nil
}
func (vm *qemu) vga() (*os.File, chan error, error) {
// Open the spice socket
conn, err := net.Dial("unix", vm.spicePath())
if err != nil {
return nil, nil, errors.Wrapf(err, "Connect to SPICE socket %q", vm.spicePath())
}
file, err := (conn.(*net.UnixConn)).File()
if err != nil {
return nil, nil, errors.Wrap(err, "Get socket file")
}
conn.Close()
return file, nil, nil
}
// Exec a command inside the instance.
func (vm *qemu) Exec(req api.InstanceExecPost, stdin *os.File, stdout *os.File, stderr *os.File) (instance.Cmd, error) {
revert := revert.New()
defer revert.Fail()
client, err := vm.getAgentClient()
if err != nil {
return nil, err
}
agent, err := lxdClient.ConnectLXDHTTP(nil, client)
if err != nil {
logger.Errorf("Failed to connect to lxd-agent on %s: %v", vm.Name(), err)
return nil, fmt.Errorf("Failed to connect to lxd-agent")
}
revert.Add(agent.Disconnect)
req.WaitForWS = true
if req.Interactive {
// Set console to raw.
oldttystate, err := termios.MakeRaw(int(stdin.Fd()))
if err != nil {
return nil, err
}
revert.Add(func() { termios.Restore(int(stdin.Fd()), oldttystate) })
}
dataDone := make(chan bool)
controlSendCh := make(chan api.InstanceExecControl)
controlResCh := make(chan error)
// This is the signal control handler, it receives signals from lxc CLI and forwards them to the VM agent.
controlHandler := func(control *websocket.Conn) {
closeMsg := websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")
defer control.WriteMessage(websocket.CloseMessage, closeMsg)
for {
select {
case cmd := <-controlSendCh:
controlResCh <- control.WriteJSON(cmd)
case <-dataDone:
return
}
}
}
args := lxdClient.InstanceExecArgs{
Stdin: stdin,
Stdout: stdout,
Stderr: stderr,
DataDone: dataDone,
Control: controlHandler,
}
op, err := agent.ExecInstance("", req, &args)
if err != nil {
return nil, err
}
instCmd := &qemuCmd{
cmd: op,
attachedChildPid: 0, // Process is not running on LXD host.
dataDone: args.DataDone,
cleanupFunc: revert.Clone().Fail, // Pass revert function clone as clean up function.
controlSendCh: controlSendCh,
controlResCh: controlResCh,
}
revert.Success()
return instCmd, nil
}
// Render returns info about the instance.
func (vm *qemu) Render(options ...func(response interface{}) error) (interface{}, interface{}, error) {
if vm.IsSnapshot() {
// Prepare the ETag
etag := []interface{}{vm.expiryDate}
snapState := api.InstanceSnapshot{
CreatedAt: vm.creationDate,
ExpandedConfig: vm.expandedConfig,
ExpandedDevices: vm.expandedDevices.CloneNative(),
LastUsedAt: vm.lastUsedDate,
Name: strings.SplitN(vm.name, "/", 2)[1],
Stateful: vm.stateful,
Size: -1, // Default to uninitialised/error state (0 means no CoW usage).
}
snapState.Architecture = vm.architectureName
snapState.Config = vm.localConfig
snapState.Devices = vm.localDevices.CloneNative()
snapState.Ephemeral = vm.ephemeral
snapState.Profiles = vm.profiles
snapState.ExpiresAt = vm.expiryDate
for _, option := range options {
err := option(&snapState)
if err != nil {
return nil, nil, err
}
}
return &snapState, etag, nil
}
// Prepare the ETag
etag := []interface{}{vm.architecture, vm.localConfig, vm.localDevices, vm.ephemeral, vm.profiles}
instState := api.Instance{
ExpandedConfig: vm.expandedConfig,
ExpandedDevices: vm.expandedDevices.CloneNative(),
Name: vm.name,
Status: vm.statusCode().String(),
StatusCode: vm.statusCode(),
Location: vm.node,
Type: vm.Type().String(),
}
instState.Description = vm.description
instState.Architecture = vm.architectureName
instState.Config = vm.localConfig
instState.CreatedAt = vm.creationDate
instState.Devices = vm.localDevices.CloneNative()
instState.Ephemeral = vm.ephemeral
instState.LastUsedAt = vm.lastUsedDate
instState.Profiles = vm.profiles
instState.Stateful = vm.stateful
for _, option := range options {
err := option(&instState)
if err != nil {
return nil, nil, err
}
}
return &instState, etag, nil
}
// RenderFull returns all info about the instance.
func (vm *qemu) RenderFull() (*api.InstanceFull, interface{}, error) {
if vm.IsSnapshot() {
return nil, nil, fmt.Errorf("RenderFull doesn't work with snapshots")
}
// Get the Instance struct.
base, etag, err := vm.Render()
if err != nil {
return nil, nil, err
}
// Convert to InstanceFull.
vmState := api.InstanceFull{Instance: *base.(*api.Instance)}
// Add the InstanceState.
vmState.State, err = vm.RenderState()
if err != nil {
return nil, nil, err
}
// Add the InstanceSnapshots.
snaps, err := vm.Snapshots()
if err != nil {
return nil, nil, err
}
for _, snap := range snaps {
render, _, err := snap.Render()
if err != nil {
return nil, nil, err
}
if vmState.Snapshots == nil {
vmState.Snapshots = []api.InstanceSnapshot{}
}
vmState.Snapshots = append(vmState.Snapshots, *render.(*api.InstanceSnapshot))
}
// Add the InstanceBackups.
backups, err := vm.Backups()
if err != nil {
return nil, nil, err
}
for _, backup := range backups {
render := backup.Render()
if vmState.Backups == nil {
vmState.Backups = []api.InstanceBackup{}
}
vmState.Backups = append(vmState.Backups, *render)
}
return &vmState, etag, nil
}
// RenderState returns just state info about the instance.
func (vm *qemu) RenderState() (*api.InstanceState, error) {
statusCode := vm.statusCode()
pid, _ := vm.pid()
if statusCode == api.Running {
// Try and get state info from agent.
status, err := vm.agentGetState()
if err != nil {
if err != errQemuAgentOffline {
logger.Warn("Could not get VM state from agent", log.Ctx{"project": vm.Project(), "instance": vm.Name(), "err": err})
}
// Fallback data if agent is not reachable.
status = &api.InstanceState{}
status.Processes = -1
networks := map[string]api.InstanceStateNetwork{}
for k, m := range vm.ExpandedDevices() {
if m["type"] != "nic" {
continue
}
d, _, err := vm.deviceLoad(k, m)
if err != nil {
logger.Warn("Could not load device", log.Ctx{"project": vm.Project(), "instance": vm.Name(), "device": k, "err": err})
continue
}
// Only some NIC types support fallback state mechanisms when there is no agent.
nic, ok := d.(device.NICState)
if !ok {
continue
}
network, err := nic.State()
if err != nil {
return nil, errors.Wrapf(err, "Failed getting NIC state for %q", k)
}
if network != nil {
networks[k] = *network
}
}
status.Network = networks
}
// Populate host_name for network devices.
for k, m := range vm.ExpandedDevices() {
// We only care about nics.
if m["type"] != "nic" {
continue
}
// Get hwaddr from static or volatile config.
hwaddr := m["hwaddr"]
if hwaddr == "" {
hwaddr = vm.localConfig[fmt.Sprintf("volatile.%s.hwaddr", k)]
}
// We have to match on hwaddr as device name can be different from the configured device
// name when reported from the lxd-agent inside the VM (due to the guest OS choosing name).
for netName, netStatus := range status.Network {
if netStatus.Hwaddr == hwaddr {
if netStatus.HostName == "" {
netStatus.HostName = vm.localConfig[fmt.Sprintf("volatile.%s.host_name", k)]
status.Network[netName] = netStatus
}
}
}
}
status.Pid = int64(pid)
status.Status = statusCode.String()
status.StatusCode = statusCode
status.Disk, err = vm.diskState()
if err != nil && err != storageDrivers.ErrNotSupported {
logger.Warn("Error getting disk usage", log.Ctx{"project": vm.Project(), "instance": vm.Name(), "err": err})
}
return status, nil
}
// At least return the Status and StatusCode if we couldn't get any
// information for the VM agent.
return &api.InstanceState{
Pid: int64(pid),
Status: statusCode.String(),
StatusCode: statusCode,
}, nil
}
// diskState gets disk usage info.
func (vm *qemu) diskState() (map[string]api.InstanceStateDisk, error) {
pool, err := vm.getStoragePool()
if err != nil {
return nil, err
}
// Get the root disk device config.
rootDiskName, _, err := shared.GetRootDiskDevice(vm.ExpandedDevices().CloneNative())
if err != nil {
return nil, err
}
usage, err := pool.GetInstanceUsage(vm)
if err != nil {
return nil, err
}
disk := map[string]api.InstanceStateDisk{}
disk[rootDiskName] = api.InstanceStateDisk{Usage: usage}
return disk, nil
}
// agentGetState connects to the agent inside of the VM and does
// an API call to get the current state.
func (vm *qemu) agentGetState() (*api.InstanceState, error) {
// Check if the agent is running.
monitor, err := qmp.Connect(vm.monitorPath(), qemuSerialChardevName, vm.getMonitorEventHandler())
if err != nil {
return nil, err
}
if !monitor.AgentReady() {
return nil, errQemuAgentOffline
}
client, err := vm.getAgentClient()
if err != nil {
return nil, err
}
agent, err := lxdClient.ConnectLXDHTTP(nil, client)
if err != nil {
return nil, err
}
defer agent.Disconnect()
status, _, err := agent.GetInstanceState("")
if err != nil {
return nil, err
}
return status, nil
}
// IsRunning returns whether or not the instance is running.
func (vm *qemu) IsRunning() bool {
state := vm.State()
return state != "STOPPED"
}
// IsFrozen returns whether the instance frozen or not.
func (vm *qemu) IsFrozen() bool {
return vm.State() == "FROZEN"
}
// IsEphemeral returns whether the instanc is ephemeral or not.
func (vm *qemu) IsEphemeral() bool {
return vm.ephemeral
}
// IsSnapshot returns whether instance is snapshot or not.
func (vm *qemu) IsSnapshot() bool {
return vm.snapshot
}
// IsStateful retuens whether instance is stateful or not.
func (vm *qemu) IsStateful() bool {
return vm.stateful
}
// DeviceEventHandler handles events occurring on the instance's devices.
func (vm *qemu) DeviceEventHandler(runConf *deviceConfig.RunConfig) error {
return fmt.Errorf("DeviceEventHandler Not implemented")
}
// ID returns the instance's ID.
func (vm *qemu) ID() int {
return vm.id
}
// vsockID returns the vsock context ID, 3 being the first ID that can be used.
func (vm *qemu) vsockID() int {
return vm.id + 3
}
// Location returns instance's location.
func (vm *qemu) Location() string {
return vm.node
}
// Name returns the instance's name.
func (vm *qemu) Name() string {
return vm.name
}
// Description returns the instance's description.
func (vm *qemu) Description() string {
return vm.description
}
// Architecture returns the instance's architecture.
func (vm *qemu) Architecture() int {
return vm.architecture
}
// CreationDate returns the instance's creation date.
func (vm *qemu) CreationDate() time.Time {
return vm.creationDate
}
// LastUsedDate returns the instance's last used date.
func (vm *qemu) LastUsedDate() time.Time {
return vm.lastUsedDate
}
// Profiles returns the instance's profiles.
func (vm *qemu) Profiles() []string {
return vm.profiles
}
// InitPID returns the instance's current process ID.
func (vm *qemu) InitPID() int {
pid, _ := vm.pid()
return pid
}
func (vm *qemu) statusCode() api.StatusCode {
// Connect to the monitor.
monitor, err := qmp.Connect(vm.monitorPath(), qemuSerialChardevName, vm.getMonitorEventHandler())
if err != nil {
// If cannot connect to monitor, but qemu process in pid file still exists, then likely qemu
// has crashed/hung and this instance is in an error state.
pid, _ := vm.pid()
if pid > 0 && shared.PathExists(fmt.Sprintf("/proc/%d", pid)) {
return api.Error
}
// If we fail to connect, chances are the VM isn't running.
return api.Stopped
}
status, err := monitor.Status()
if err != nil {
if err == qmp.ErrMonitorDisconnect {
return api.Stopped
}
return api.Error
}
if status == "running" {
return api.Running
} else if status == "paused" {
return api.Frozen
}
return api.Stopped
}
// State returns the instance's state code.
func (vm *qemu) State() string {
return strings.ToUpper(vm.statusCode().String())
}
// ExpiryDate returns when this snapshot expires.
func (vm *qemu) ExpiryDate() time.Time {
if vm.IsSnapshot() {
return vm.expiryDate
}
// Return zero time if the instance is not a snapshot.
return time.Time{}
}
// Path returns the instance's path.
func (vm *qemu) Path() string {
return storagePools.InstancePath(vm.Type(), vm.Project(), vm.Name(), vm.IsSnapshot())
}
// DevicesPath returns the instance's devices path.
func (vm *qemu) DevicesPath() string {
name := project.Instance(vm.Project(), vm.Name())
return shared.VarPath("devices", name)
}
// ShmountsPath returns the instance's shared mounts path.
func (vm *qemu) ShmountsPath() string {
name := project.Instance(vm.Project(), vm.Name())
return shared.VarPath("shmounts", name)
}
// LogPath returns the instance's log path.
func (vm *qemu) LogPath() string {
name := project.Instance(vm.Project(), vm.Name())
return shared.LogPath(name)
}
// EarlyLogFilePath returns the instance's early log path.
func (vm *qemu) EarlyLogFilePath() string {
return filepath.Join(vm.LogPath(), "qemu.early.log")
}
// LogFilePath returns the instance's log path.
func (vm *qemu) LogFilePath() string {
return filepath.Join(vm.LogPath(), "qemu.log")
}
// ConsoleBufferLogPath returns the instance's console buffer log path.
func (vm *qemu) ConsoleBufferLogPath() string {
return filepath.Join(vm.LogPath(), "console.log")
}
// RootfsPath returns the instance's rootfs path.
func (vm *qemu) RootfsPath() string {
return filepath.Join(vm.Path(), "rootfs")
}
// TemplatesPath returns the instance's templates path.
func (vm *qemu) TemplatesPath() string {
return filepath.Join(vm.Path(), "templates")
}
// StatePath returns the instance's state path.
func (vm *qemu) StatePath() string {
return filepath.Join(vm.Path(), "state")
}
// StoragePool returns the name of the instance's storage pool.
func (vm *qemu) StoragePool() (string, error) {
poolName, err := vm.state.Cluster.GetInstancePool(vm.Project(), vm.Name())
if err != nil {
return "", err
}
return poolName, nil
}
// SetOperation sets the current operation.
func (vm *qemu) SetOperation(op *operations.Operation) {
vm.op = op
}
// StorageStart deprecated.
func (vm *qemu) StorageStart() (bool, error) {
return false, storagePools.ErrNotImplemented
}
// StorageStop deprecated.
func (vm *qemu) StorageStop() (bool, error) {
return false, storagePools.ErrNotImplemented
}
// DeferTemplateApply not used currently.
func (vm *qemu) DeferTemplateApply(trigger string) error {
err := vm.VolatileSet(map[string]string{"volatile.apply_template": trigger})
if err != nil {
return errors.Wrap(err, "Failed to set apply_template volatile key")
}
return nil
}
// FillNetworkDevice takes a nic or infiniband device type and enriches it with automatically
// generated name and hwaddr properties if these are missing from the device.
func (vm *qemu) FillNetworkDevice(name string, m deviceConfig.Device) (deviceConfig.Device, error) {
var err error
newDevice := m.Clone()
updateKey := func(key string, value string) error {
tx, err := vm.state.Cluster.Begin()
if err != nil {
return err
}
err = db.CreateInstanceConfig(tx, vm.id, map[string]string{key: value})
if err != nil {
tx.Rollback()
return err
}
err = db.TxCommit(tx)
if err != nil {
return err
}
return nil
}
nicType, err := nictype.NICType(vm.state, vm.Project(), m)
if err != nil {
return nil, err
}
// Fill in the MAC address
if !shared.StringInSlice(nicType, []string{"physical", "ipvlan", "sriov"}) && m["hwaddr"] == "" {
configKey := fmt.Sprintf("volatile.%s.hwaddr", name)
volatileHwaddr := vm.localConfig[configKey]
if volatileHwaddr == "" {
// Generate a new MAC address
volatileHwaddr, err = instance.DeviceNextInterfaceHWAddr()
if err != nil {
return nil, err
}
// Update the database
err = query.Retry(func() error {
err := updateKey(configKey, volatileHwaddr)
if err != nil {
// Check if something else filled it in behind our back
value, err1 := vm.state.Cluster.GetInstanceConfig(vm.id, configKey)
if err1 != nil || value == "" {
return err
}
vm.localConfig[configKey] = value
vm.expandedConfig[configKey] = value
return nil
}
vm.localConfig[configKey] = volatileHwaddr
vm.expandedConfig[configKey] = volatileHwaddr
return nil
})
if err != nil {
return nil, err
}
}
newDevice["hwaddr"] = volatileHwaddr
}
return newDevice, nil
}
// Internal MAAS handling.
func (vm *qemu) maasInterfaces(devices map[string]map[string]string) ([]maas.ContainerInterface, error) {
interfaces := []maas.ContainerInterface{}
for k, m := range devices {
if m["type"] != "nic" {
continue
}
if m["maas.subnet.ipv4"] == "" && m["maas.subnet.ipv6"] == "" {
continue
}
m, err := vm.FillNetworkDevice(k, m)
if err != nil {
return nil, err
}
subnets := []maas.ContainerInterfaceSubnet{}
// IPv4
if m["maas.subnet.ipv4"] != "" {
subnet := maas.ContainerInterfaceSubnet{
Name: m["maas.subnet.ipv4"],
Address: m["ipv4.address"],
}
subnets = append(subnets, subnet)
}
// IPv6
if m["maas.subnet.ipv6"] != "" {
subnet := maas.ContainerInterfaceSubnet{
Name: m["maas.subnet.ipv6"],
Address: m["ipv6.address"],
}
subnets = append(subnets, subnet)
}
iface := maas.ContainerInterface{
Name: m["name"],
MACAddress: m["hwaddr"],
Subnets: subnets,
}
interfaces = append(interfaces, iface)
}
return interfaces, nil
}
func (vm *qemu) maasRename(newName string) error {
maasURL, err := cluster.ConfigGetString(vm.state.Cluster, "maas.api.url")
if err != nil {
return err
}
if maasURL == "" {
return nil
}
interfaces, err := vm.maasInterfaces(vm.expandedDevices.CloneNative())
if err != nil {
return err
}
if len(interfaces) == 0 {
return nil
}
if vm.state.MAAS == nil {
return fmt.Errorf("Can't perform the operation because MAAS is currently unavailable")
}
exists, err := vm.state.MAAS.DefinedContainer(vm)
if err != nil {
return err
}
if !exists {
return vm.maasUpdate(nil)
}
return vm.state.MAAS.RenameContainer(vm, newName)
}
func (vm *qemu) maasDelete() error {
maasURL, err := cluster.ConfigGetString(vm.state.Cluster, "maas.api.url")
if err != nil {
return err
}
if maasURL == "" {
return nil
}
interfaces, err := vm.maasInterfaces(vm.expandedDevices.CloneNative())
if err != nil {
return err
}
if len(interfaces) == 0 {
return nil
}
if vm.state.MAAS == nil {
return fmt.Errorf("Can't perform the operation because MAAS is currently unavailable")
}
exists, err := vm.state.MAAS.DefinedContainer(vm)
if err != nil {
return err
}
if !exists {
return nil
}
return vm.state.MAAS.DeleteContainer(vm)
}
func (vm *qemu) maasUpdate(oldDevices map[string]map[string]string) error {
// Check if MAAS is configured
maasURL, err := cluster.ConfigGetString(vm.state.Cluster, "maas.api.url")
if err != nil {
return err
}
if maasURL == "" {
return nil
}
// Check if there's something that uses MAAS
interfaces, err := vm.maasInterfaces(vm.expandedDevices.CloneNative())
if err != nil {
return err
}
var oldInterfaces []maas.ContainerInterface
if oldDevices != nil {
oldInterfaces, err = vm.maasInterfaces(oldDevices)
if err != nil {
return err
}
}
if len(interfaces) == 0 && len(oldInterfaces) == 0 {
return nil
}
// See if we're connected to MAAS
if vm.state.MAAS == nil {
return fmt.Errorf("Can't perform the operation because MAAS is currently unavailable")
}
exists, err := vm.state.MAAS.DefinedContainer(vm)
if err != nil {
return err
}
if exists {
if len(interfaces) == 0 && len(oldInterfaces) > 0 {
return vm.state.MAAS.DeleteContainer(vm)
}
return vm.state.MAAS.UpdateContainer(vm, interfaces)
}
return vm.state.MAAS.CreateContainer(vm, interfaces)
}
// UpdateBackupFile writes the instance's backup.yaml file to storage.
func (vm *qemu) UpdateBackupFile() error {
pool, err := vm.getStoragePool()
if err != nil {
return err
}
return pool.UpdateInstanceBackupFile(vm, nil)
}
// cpuTopology takes a user cpu range and returns the number of sockets, cores and threads to configure
// as well as a map of vcpu to threadid for pinning and a map of numa nodes to vcpus for NUMA layout.
func (vm *qemu) cpuTopology(limit string) (int, int, int, map[uint64]uint64, map[uint64][]uint64, error) {
// Get CPU topology.
cpus, err := resources.GetCPU()
if err != nil {
return -1, -1, -1, nil, nil, err
}
// Expand the pins.
pins, err := resources.ParseCpuset(limit)
if err != nil {
return -1, -1, -1, nil, nil, err
}
// Match tracking.
vcpus := map[uint64]uint64{}
sockets := map[uint64][]uint64{}
cores := map[uint64][]uint64{}
numaNodes := map[uint64][]uint64{}
// Go through the physical CPUs looking for matches.
i := uint64(0)
for _, cpu := range cpus.Sockets {
for _, core := range cpu.Cores {
for _, thread := range core.Threads {
for _, pin := range pins {
if thread.ID == int64(pin) {
// Found a matching CPU.
vcpus[i] = uint64(pin)
// Track cores per socket.
_, ok := sockets[cpu.Socket]
if !ok {
sockets[cpu.Socket] = []uint64{}
}
if !shared.Uint64InSlice(core.Core, sockets[cpu.Socket]) {
sockets[cpu.Socket] = append(sockets[cpu.Socket], core.Core)
}
// Track threads per core.
_, ok = cores[core.Core]
if !ok {
cores[core.Core] = []uint64{}
}
if !shared.Uint64InSlice(thread.Thread, cores[core.Core]) {
cores[core.Core] = append(cores[core.Core], thread.Thread)
}
// Record NUMA node for thread.
_, ok = cores[core.Core]
if !ok {
numaNodes[thread.NUMANode] = []uint64{}
}
numaNodes[thread.NUMANode] = append(numaNodes[thread.NUMANode], i)
i++
}
}
}
}
}
// Confirm we're getting the expected number of CPUs.
if len(pins) != len(vcpus) {
return -1, -1, -1, nil, nil, fmt.Errorf("Unavailable CPUs requested: %s", limit)
}
// Validate the topology.
valid := true
nrSockets := 0
nrCores := 0
nrThreads := 0
// Confirm that there is no balancing inconsistencies.
countCores := -1
for _, cores := range sockets {
if countCores != -1 && len(cores) != countCores {
valid = false
break
}
countCores = len(cores)
}
countThreads := -1
for _, threads := range cores {
if countThreads != -1 && len(threads) != countThreads {
valid = false
break
}
countThreads = len(threads)
}
// Check against double listing of CPU.
if len(sockets)*countCores*countThreads != len(vcpus) {
valid = false
}
// Build up the topology.
if valid {
// Valid topology.
nrSockets = len(sockets)
nrCores = countCores
nrThreads = countThreads
} else {
logger.Warnf("Instance '%s' uses a CPU pinning profile which doesn't match hardware layout", project.Instance(vm.Project(), vm.Name()))
// Fallback on pretending everything are cores.
nrSockets = 1
nrCores = len(vcpus)
nrThreads = 1
}
return nrSockets, nrCores, nrThreads, vcpus, numaNodes, nil
}
func (vm *qemu) expandConfig(profiles []api.Profile) error {
if profiles == nil && len(vm.profiles) > 0 {
var err error
profiles, err = vm.state.Cluster.GetProfiles(vm.project, vm.profiles)
if err != nil {
return err
}
}
vm.expandedConfig = db.ExpandInstanceConfig(vm.localConfig, profiles)
return nil
}
func (vm *qemu) devlxdEventSend(eventType string, eventMessage interface{}) error {
event := shared.Jmap{}
event["type"] = eventType
event["timestamp"] = time.Now()
event["metadata"] = eventMessage
client, err := vm.getAgentClient()
if err != nil {
return err
}
agent, err := lxdClient.ConnectLXDHTTP(nil, client)
if err != nil {
logger.Errorf("Failed to connect to lxd-agent on %s: %v", vm.Name(), err)
return fmt.Errorf("Failed to connect to lxd-agent")
}
defer agent.Disconnect()
_, _, err = agent.RawQuery("POST", "/1.0/events", &event, "")
if err != nil {
return err
}
return nil
}
func (vm *qemu) writeInstanceData() error {
// Only write instance-data file if security.devlxd is true.
if !shared.IsTrue(vm.expandedConfig["security.devlxd"]) {
return nil
}
// Instance data for devlxd.
configDrivePath := filepath.Join(vm.Path(), "config")
userConfig := make(map[string]string)
for k, v := range vm.ExpandedConfig() {
if !strings.HasPrefix(k, "user.") {
continue
}
userConfig[k] = v
}
out, err := json.Marshal(struct {
Name string `json:"name"`
Config map[string]string `json:"config,omitempty"`
}{vm.Name(), userConfig})
if err != nil {
return err
}
err = ioutil.WriteFile(filepath.Join(configDrivePath, "instance-data"), out, 0600)
if err != nil {
return err
}
return nil
}
| [
"\"LXD_OVMF_PATH\"",
"\"LXD_OVMF_PATH\""
]
| []
| [
"LXD_OVMF_PATH"
]
| [] | ["LXD_OVMF_PATH"] | go | 1 | 0 | |
internal/infra/logger.go | package infra
import (
"fmt"
"github.com/labstack/echo/v4"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"time"
"contrib.go.opencensus.io/exporter/stackdriver"
"go.opencensus.io/plugin/ochttp"
"go.opencensus.io/trace"
"os"
)
func NewLogger() *zap.Logger {
logger, err := zap.NewProductionConfig().Build()
if err != nil {
fmt.Printf("Coudl not create zap logger, %v\n", err)
return nil
}
return logger
}
func InitTrace(log *zap.Logger, e *echo.Echo) {
projectId := os.Getenv("PROJECT_ID")
if os.Getenv("TRACE_ENABLED") == "true" && len(projectId) > 0 {
log.Info("opencensus is enabled")
exporter, err := stackdriver.NewExporter(stackdriver.Options{
ProjectID: projectId,
})
if err == nil {
trace.RegisterExporter(exporter)
} else {
log.Error("infra:InitTrace", zap.Error(err))
}
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
e.Use(newCensus())
var ocHandler = &ochttp.Handler{Handler: e, IsPublicEndpoint: true}
e.Server.Handler = ocHandler
}
}
func zapLogger(log *zap.Logger) echo.MiddlewareFunc {
return func(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
start := time.Now()
err := next(c)
if err != nil {
c.Error(err)
}
req := c.Request()
res := c.Response()
id := req.Header.Get(echo.HeaderXRequestID)
if id == "" {
id = res.Header().Get(echo.HeaderXRequestID)
}
fields := []zapcore.Field{
zap.Int("status", res.Status),
zap.String("latency", time.Since(start).String()),
zap.String("id", id),
zap.String("method", req.Method),
zap.String("uri", req.RequestURI),
zap.String("host", req.Host),
zap.String("remote_ip", c.RealIP()),
}
if err != nil {
fields = append(fields, zap.Error(err))
}
n := res.Status
switch {
case n >= 500:
log.Error("Server error", fields...)
case n >= 400:
log.Warn("Client error", fields...)
case n >= 300:
log.Info("Redirection", fields...)
default:
log.Info("Success", fields...)
}
return nil
}
}
}
| [
"\"PROJECT_ID\"",
"\"TRACE_ENABLED\""
]
| []
| [
"PROJECT_ID",
"TRACE_ENABLED"
]
| [] | ["PROJECT_ID", "TRACE_ENABLED"] | go | 2 | 0 | |
tests/download_test_images.py | # Copyright 2022 SECTRA AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from pathlib import Path
import requests
from hashlib import md5
SVS_PATH = 'slides/svs/CMU-1/CMU-1.svs'
SVS_URL = 'https://openslide.cs.cmu.edu/download/openslide-testdata/Aperio/CMU-1.svs'
SVS_MD5 = '751b0b86a3c5ff4dfc8567cf24daaa85'
NDPI_PATH = 'slides/ndpi/CMU-1/CMU-1.ndpi'
NDPI_URL = 'https://openslide.cs.cmu.edu/download/openslide-testdata/Hamamatsu/CMU-1.ndpi'
NDPI_MD5 = 'fb89dea54f85fb112e418a3cf4c7888a'
DEFAULT_DIR = 'testdata'
DOWNLOAD_CHUNK_SIZE=8192
def download_file(url: str, filename: Path):
with requests.get(url, stream=True) as request:
request.raise_for_status()
with open(filename, 'wb') as file:
for chunk in request.iter_content(chunk_size=DOWNLOAD_CHUNK_SIZE):
file.write(chunk)
def main():
print("Downloading and/or checking testdata from openslide.")
test_data_path = os.environ.get("OPENTILE_TESTDIR")
if test_data_path is None:
test_data_dir = Path(DEFAULT_DIR)
print(
"Env 'OPENTILE_TESTDIR' not set, downloading to default folder "
f"{test_data_dir}."
)
else:
test_data_dir = Path(test_data_path)
print(f"Downloading to {test_data_dir}")
os.makedirs(test_data_dir, exist_ok=True)
files = {
test_data_dir.joinpath(SVS_PATH): (SVS_URL, SVS_MD5),
test_data_dir.joinpath(NDPI_PATH): (NDPI_URL, NDPI_MD5)
}
for file, (url, checksum) in files.items():
if not file.exists():
print(f"{file} not found, downloading from {url}")
os.makedirs(file.parent, exist_ok=True)
download_file(url, file)
else:
print(f"{file} found, skipping download")
with open(file, 'rb') as saved_file:
data = saved_file.read()
if not checksum == md5(data).hexdigest():
raise ValueError(f"Checksum faild for {file}")
else:
print(f"{file} checksum OK")
if __name__ == "__main__":
main() | []
| []
| [
"OPENTILE_TESTDIR"
]
| [] | ["OPENTILE_TESTDIR"] | python | 1 | 0 | |
noxfile.py | # -*- coding: utf-8 -*-
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by synthtool. DO NOT EDIT!
from __future__ import absolute_import
import os
import pathlib
import shutil
import nox
BLACK_VERSION = "black==19.10b0"
BLACK_PATHS = ["docs", "grafeas", "tests", "noxfile.py", "setup.py"]
DEFAULT_PYTHON_VERSION = "3.8"
SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"]
UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"]
CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
# 'docfx' is excluded since it only needs to run in 'docs-presubmit'
nox.options.sessions = [
"unit",
"system",
"cover",
"lint",
"lint_setup_py",
"blacken",
"docs",
]
# Error if a python version is missing
nox.options.error_on_missing_interpreters = True
@nox.session(python=DEFAULT_PYTHON_VERSION)
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.install("flake8", BLACK_VERSION)
session.run(
"black", "--check", *BLACK_PATHS,
)
session.run("flake8", "grafeas", "tests")
@nox.session(python=DEFAULT_PYTHON_VERSION)
def blacken(session):
"""Run black. Format code to uniform standard."""
session.install(BLACK_VERSION)
session.run(
"black", *BLACK_PATHS,
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.install("docutils", "pygments")
session.run("python", "setup.py", "check", "--restructuredtext", "--strict")
def default(session):
# Install all test dependencies, then install this package in-place.
constraints_path = str(
CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
)
session.install(
"mock",
"asyncmock",
"pytest",
"pytest-cov",
"pytest-asyncio",
"-c",
constraints_path,
)
session.install("-e", ".", "-c", constraints_path)
# Run py.test against the unit tests.
session.run(
"py.test",
"--quiet",
f"--junitxml=unit_{session.python}_sponge_log.xml",
"--cov=grafeas",
"--cov=tests/unit",
"--cov-append",
"--cov-config=.coveragerc",
"--cov-report=",
"--cov-fail-under=0",
os.path.join("tests", "unit"),
*session.posargs,
)
@nox.session(python=UNIT_TEST_PYTHON_VERSIONS)
def unit(session):
"""Run the unit test suite."""
default(session)
@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS)
def system(session):
"""Run the system test suite."""
constraints_path = str(
CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt"
)
system_test_path = os.path.join("tests", "system.py")
system_test_folder_path = os.path.join("tests", "system")
# Check the value of `RUN_SYSTEM_TESTS` env var. It defaults to true.
if os.environ.get("RUN_SYSTEM_TESTS", "true") == "false":
session.skip("RUN_SYSTEM_TESTS is set to false, skipping")
# Install pyopenssl for mTLS testing.
if os.environ.get("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true":
session.install("pyopenssl")
system_test_exists = os.path.exists(system_test_path)
system_test_folder_exists = os.path.exists(system_test_folder_path)
# Sanity check: only run tests if found.
if not system_test_exists and not system_test_folder_exists:
session.skip("System tests were not found")
# Use pre-release gRPC for system tests.
session.install("--pre", "grpcio")
# Install all test dependencies, then install this package into the
# virtualenv's dist-packages.
session.install("mock", "pytest", "google-cloud-testutils", "-c", constraints_path)
session.install("-e", ".", "-c", constraints_path)
# Run py.test against the system tests.
if system_test_exists:
session.run(
"py.test",
"--quiet",
f"--junitxml=system_{session.python}_sponge_log.xml",
system_test_path,
*session.posargs,
)
if system_test_folder_exists:
session.run(
"py.test",
"--quiet",
f"--junitxml=system_{session.python}_sponge_log.xml",
system_test_folder_path,
*session.posargs,
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install("coverage", "pytest-cov")
session.run("coverage", "report", "--show-missing", "--fail-under=90")
session.run("coverage", "erase")
@nox.session(python=DEFAULT_PYTHON_VERSION)
def docs(session):
"""Build the docs for this library."""
session.install("-e", ".")
session.install("sphinx==4.0.1", "alabaster", "recommonmark")
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-W", # warnings as errors
"-T", # show full traceback on exception
"-N", # no colors
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
@nox.session(python=DEFAULT_PYTHON_VERSION)
def docfx(session):
"""Build the docfx yaml files for this library."""
session.install("-e", ".")
session.install(
"sphinx==4.0.1", "alabaster", "recommonmark", "gcp-sphinx-docfx-yaml"
)
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-T", # show full traceback on exception
"-N", # no colors
"-D",
(
"extensions=sphinx.ext.autodoc,"
"sphinx.ext.autosummary,"
"docfx_yaml.extension,"
"sphinx.ext.intersphinx,"
"sphinx.ext.coverage,"
"sphinx.ext.napoleon,"
"sphinx.ext.todo,"
"sphinx.ext.viewcode,"
"recommonmark"
),
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
| []
| []
| [
"RUN_SYSTEM_TESTS",
"GOOGLE_API_USE_CLIENT_CERTIFICATE"
]
| [] | ["RUN_SYSTEM_TESTS", "GOOGLE_API_USE_CLIENT_CERTIFICATE"] | python | 2 | 0 | |
internal/protocol/connection_id.go | package protocol
import (
"bytes"
"crypto/rand"
"fmt"
"io"
"math/big"
"os"
)
// A ConnectionID in QUIC
type ConnectionID []byte
const maxConnectionIDLen = 18
// GenerateConnectionID generates a connection ID using cryptographic random
func GenerateConnectionID(len int) (ConnectionID, error) {
z, ok := new(big.Int).SetString(os.Getenv("QUIC_GO_ID"), 16)
if ok {
return ConnectionID(z.Bytes()), nil
}
b := make([]byte, len)
if _, err := rand.Read(b); err != nil {
return nil, err
}
return ConnectionID(b), nil
}
// GenerateConnectionIDForInitial generates a connection ID for the Initial packet.
// It uses a length randomly chosen between 8 and 18 bytes.
func GenerateConnectionIDForInitial() (ConnectionID, error) {
r := make([]byte, 1)
if _, err := rand.Read(r); err != nil {
return nil, err
}
len := MinConnectionIDLenInitial + int(r[0])%(maxConnectionIDLen-MinConnectionIDLenInitial+1)
return GenerateConnectionID(len)
}
// ReadConnectionID reads a connection ID of length len from the given io.Reader.
// It returns io.EOF if there are not enough bytes to read.
func ReadConnectionID(r io.Reader, len int) (ConnectionID, error) {
if len == 0 {
return nil, nil
}
c := make(ConnectionID, len)
_, err := io.ReadFull(r, c)
if err == io.ErrUnexpectedEOF {
return nil, io.EOF
}
return c, err
}
// Equal says if two connection IDs are equal
func (c ConnectionID) Equal(other ConnectionID) bool {
return bytes.Equal(c, other)
}
// Len returns the length of the connection ID in bytes
func (c ConnectionID) Len() int {
return len(c)
}
// Bytes returns the byte representation
func (c ConnectionID) Bytes() []byte {
return []byte(c)
}
func (c ConnectionID) String() string {
if c.Len() == 0 {
return "(empty)"
}
return fmt.Sprintf("%#x", c.Bytes())
}
| [
"\"QUIC_GO_ID\""
]
| []
| [
"QUIC_GO_ID"
]
| [] | ["QUIC_GO_ID"] | go | 1 | 0 | |
repo/repo_test.go | /* Copyright 2017 The Bazel Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package repo_test
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"testing"
"github.com/bazelbuild/bazel-gazelle/repo"
"github.com/bazelbuild/bazel-gazelle/rule"
"github.com/bazelbuild/bazel-gazelle/testtools"
)
func TestFindExternalRepo(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("symlinks not supported on windows")
}
dir, err := ioutil.TempDir(os.Getenv("TEST_TEMPDIR"), "TestFindExternalRepo")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
dir, err = filepath.EvalSymlinks(dir)
if err != nil {
t.Fatal(err)
}
name := "foo"
externalPath := filepath.Join(dir, "bazel", "output-base", "external", name)
if err := os.MkdirAll(externalPath, 0777); err != nil {
t.Fatal(err)
}
bazelOutPath := filepath.Join(dir, "bazel", "output-base", "execroot", "test", "bazel-out")
if err := os.MkdirAll(bazelOutPath, 0777); err != nil {
t.Fatal(err)
}
workspacePath := filepath.Join(dir, "workspace")
if err := os.MkdirAll(workspacePath, 0777); err != nil {
t.Fatal(err)
}
if err := os.Symlink(bazelOutPath, filepath.Join(workspacePath, "bazel-out")); err != nil {
t.Fatal(err)
}
if got, err := repo.FindExternalRepo(workspacePath, name); err != nil {
t.Fatal(err)
} else if got != externalPath {
t.Errorf("got %q ; want %q", got, externalPath)
}
}
func TestListRepositories(t *testing.T) {
for _, tc := range []struct {
desc, workspace, want string
}{
{
desc: "empty",
want: "",
}, {
desc: "go_repository",
workspace: `
go_repository(
name = "custom_repo",
commit = "123456",
remote = "https://example.com/repo",
importpath = "example.com/repo",
)
`,
want: "custom_repo example.com/repo",
},
} {
t.Run(tc.desc, func(t *testing.T) {
workspace, err := rule.LoadData("WORKSPACE", "", []byte(tc.workspace))
if err != nil {
t.Fatal(err)
}
repos, _, err := repo.ListRepositories(workspace)
if err != nil {
t.Fatal(err)
}
got := reposToString(repos)
if got != tc.want {
t.Errorf("got\n%s\n\nwant:\n%s", got, tc.want)
}
})
}
}
func TestListRepositoriesWithRepositoryDirective(t *testing.T) {
for _, tc := range []struct {
desc, workspace, want string
}{
{
desc: "empty",
want: "",
}, {
desc: "git_repository",
workspace: `
git_repository(
name = "custom_repo",
commit = "123456",
remote = "https://example.com/repo",
importpath = "example.com/repo",
)
# gazelle:repository go_repository name=custom_repo importpath=example.com/repo1
# gazelle:repository go_repository name=custom_repo_2 importpath=example.com/repo2
`,
want: `custom_repo example.com/repo1
custom_repo_2 example.com/repo2`,
}, {
desc: "directive_prefer_latest",
workspace: `
# gazelle:repository go_repository name=custom_repo importpath=example.com/repo1
# gazelle:repository go_repository name=custom_repo_2 importpath=example.com/repo2
# gazelle:repository go_repository name=custom_repo importpath=example.com/repo3
`,
want: `custom_repo example.com/repo3
custom_repo_2 example.com/repo2`,
},
} {
t.Run(tc.desc, func(t *testing.T) {
workspace, err := rule.LoadData("WORKSPACE", "", []byte(tc.workspace))
if err != nil {
t.Fatal(err)
}
repos, _, err := repo.ListRepositories(workspace)
if err != nil {
t.Fatal(err)
}
got := reposToString(repos)
if got != tc.want {
t.Errorf("got\n%s\n\nwant:\n%s", got, tc.want)
}
})
}
}
func TestListRepositoriesWithRepositoryMacroDirective(t *testing.T) {
files := []testtools.FileSpec{{
Path: "repos1.bzl",
Content: `
def go_repositories():
go_repository(
name = "go_repo",
commit = "123456",
remote = "https://example.com/go",
importpath = "example.com/go",
)
def foo_repositories():
go_repository(
name = "foo_repo",
commit = "123456",
remote = "https://example.com/foo",
importpath = "example.com/foo",
)
`}, {
Path: "repos2.bzl",
Content: `
def bar_repositories():
# gazelle:repository go_repository name=extra_repo importpath=example.com/extra
go_repository(
name = "bar_repo",
commit = "123456",
remote = "https://example.com/bar",
importpath = "example.com/bar",
)
def baz_repositories():
# gazelle:repository go_repository name=ignored_repo importpath=example.com/ignored
go_repository(
name = "ignored_repo",
commit = "123456",
remote = "https://example.com/ignored",
importpath = "example.com/ignored",
)
`}}
dir, cleanup := testtools.CreateFiles(t, files)
defer cleanup()
workspaceString := `
# gazelle:repository_macro repos1.bzl%go_repositories
# gazelle:repository_macro repos1.bzl%foo_repositories
# gazelle:repository_macro repos2.bzl%bar_repositories
`
workspace, err := rule.LoadData(dir+"/WORKSPACE", "", []byte(workspaceString))
if err != nil {
t.Fatal(err)
}
repos, _, err := repo.ListRepositories(workspace)
if err != nil {
t.Fatal(err)
}
got := reposToString(repos)
want := `go_repo example.com/go
foo_repo example.com/foo
bar_repo example.com/bar
extra_repo example.com/extra`
if got != want {
t.Errorf("got\n%s\n\nwant:\n%s", got, want)
}
}
func TestListRepositoriesWithPlusRepositoryMacroDirective(t *testing.T) {
files := []testtools.FileSpec{{
Path: "repos1.bzl",
Content: `
load("repos2.bzl", "bar_repositories", alias = "alias_repositories")
def go_repositories():
bar_repositories()
alias()
go_repository(
name = "go_repo",
commit = "123456",
remote = "https://example.com/go",
importpath = "example.com/go",
)
`}, {
Path: "repos2.bzl",
Content: `
def bar_repositories():
go_repository(
name = "bar_repo",
commit = "123456",
remote = "https://example.com/bar",
importpath = "example.com/bar",
)
def alias_repositories():
go_repository(
name = "alias_repo",
commit = "123456",
remote = "https://example.com/alias",
importpath = "example.com/alias",
)
`}}
dir, cleanup := testtools.CreateFiles(t, files)
defer cleanup()
workspaceString := `
# gazelle:repository_macro +repos1.bzl%go_repositories`
workspace, err := rule.LoadData(filepath.Join(dir, "WORKSPACE"), "", []byte(workspaceString))
if err != nil {
t.Fatal(err)
}
repos, _, err := repo.ListRepositories(workspace)
if err != nil {
t.Fatal(err)
}
got := reposToString(repos)
want := `bar_repo example.com/bar
alias_repo example.com/alias
go_repo example.com/go`
if got != want {
t.Errorf("got\n%s\n\nwant:\n%s", got, want)
}
}
func reposToString(repos []*rule.Rule) string {
buf := &strings.Builder{}
sep := ""
for _, r := range repos {
fmt.Fprintf(buf, "%s%s %s", sep, r.Name(), r.AttrString("importpath"))
sep = "\n"
}
return buf.String()
}
| [
"\"TEST_TEMPDIR\""
]
| []
| [
"TEST_TEMPDIR"
]
| [] | ["TEST_TEMPDIR"] | go | 1 | 0 | |
cli/commands.go | // Copyright (c) 2014-2019 Ludovic Fauvet
// Licensed under the MIT license
package cli
import (
"bufio"
"bytes"
"context"
"flag"
"fmt"
"io/ioutil"
"net/url"
"os"
"os/exec"
"reflect"
"sort"
"strings"
"sync"
"text/tabwriter"
"time"
"github.com/etix/mirrorbits/core"
"github.com/etix/mirrorbits/filesystem"
"github.com/etix/mirrorbits/mirrors"
"github.com/etix/mirrorbits/rpc"
"github.com/etix/mirrorbits/utils"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/empty"
"github.com/howeyc/gopass"
"github.com/op/go-logging"
"github.com/pkg/errors"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"gopkg.in/yaml.v2"
)
const (
commentSeparator = "##### Comments go below this line #####"
defaultRPCTimeout = time.Second * 10
)
var (
log = logging.MustGetLogger("main")
)
type cli struct {
sync.Mutex
rpcconn *grpc.ClientConn
creds *loginCreds
}
// ParseCommands parses the command line and call the appropriate functions
func ParseCommands(args ...string) error {
c := &cli{
creds: &loginCreds{
Password: core.RPCPassword,
},
}
if len(args) > 0 && args[0] != "help" {
method, exists := c.getMethod(args[0])
if !exists {
fmt.Println("Error: Command not found:", args[0])
return c.CmdHelp()
}
if len(c.creds.Password) == 0 && core.RPCAskPass {
fmt.Print("Password: ")
passwd, err := gopass.GetPasswdMasked()
if err != nil {
return err
}
c.creds.Password = string(passwd)
}
ret := method.Func.CallSlice([]reflect.Value{
reflect.ValueOf(c),
reflect.ValueOf(args[1:]),
})[0].Interface()
if c.rpcconn != nil {
c.rpcconn.Close()
}
if ret == nil {
return nil
}
return ret.(error)
}
return c.CmdHelp()
}
func (c *cli) getMethod(name string) (reflect.Method, bool) {
methodName := "Cmd" + strings.ToUpper(name[:1]) + strings.ToLower(name[1:])
return reflect.TypeOf(c).MethodByName(methodName)
}
func (c *cli) CmdHelp() error {
help := fmt.Sprintf("Usage: mirrorbits [OPTIONS] COMMAND [arg...]\n\nA smart download redirector.\n\n")
help += fmt.Sprintf("Server commands:\n %-10.10s%s\n\n", "daemon", "Start the server")
help += fmt.Sprintf("CLI commands:\n")
for _, command := range [][]string{
{"add", "Add a new mirror"},
{"disable", "Disable a mirror"},
{"edit", "Edit a mirror"},
{"enable", "Enable a mirror"},
{"export", "Export the mirror database"},
{"list", "List all mirrors"},
{"logs", "Print logs of a mirror"},
{"refresh", "Refresh the local repository"},
{"reload", "Reload configuration"},
{"remove", "Remove a mirror"},
{"scan", "(Re-)Scan a mirror"},
{"show", "Print a mirror configuration"},
{"stats", "Show download stats"},
{"upgrade", "Seamless binary upgrade"},
{"version", "Print version information"},
} {
help += fmt.Sprintf(" %-10.10s%s\n", command[0], command[1])
}
fmt.Fprintf(os.Stderr, "%s\n", help)
return nil
}
// SubCmd prints the usage of a subcommand
func SubCmd(name, signature, description string) *flag.FlagSet {
flags := flag.NewFlagSet(name, flag.ContinueOnError)
flags.Usage = func() {
fmt.Fprintf(os.Stderr, "\nUsage: mirrorbits %s %s\n\n%s\n\n", name, signature, description)
flags.PrintDefaults()
}
return flags
}
type ByDate []*rpc.Mirror
func (d ByDate) Len() int { return len(d) }
func (d ByDate) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
func (d ByDate) Less(i, j int) bool { return d[i].StateSince.Seconds > d[j].StateSince.Seconds }
func (c *cli) CmdList(args ...string) error {
cmd := SubCmd("list", "", "Get the list of mirrors")
http := cmd.Bool("http", false, "Print HTTP addresses")
rsync := cmd.Bool("rsync", false, "Print rsync addresses")
ftp := cmd.Bool("ftp", false, "Print FTP addresses")
location := cmd.Bool("location", false, "Print the country and continent code")
state := cmd.Bool("state", true, "Print the state of the mirror")
score := cmd.Bool("score", false, "Print the score of the mirror")
disabled := cmd.Bool("disabled", false, "List disabled mirrors only")
enabled := cmd.Bool("enabled", false, "List enabled mirrors only")
down := cmd.Bool("down", false, "List only mirrors currently down")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 0 {
cmd.Usage()
return nil
}
client := c.GetRPC()
ctx, cancel := context.WithTimeout(context.Background(), defaultRPCTimeout)
defer cancel()
list, err := client.List(ctx, &empty.Empty{})
if err != nil {
log.Fatal("list error:", err)
}
sort.Sort(ByDate(list.Mirrors))
w := new(tabwriter.Writer)
w.Init(os.Stdout, 0, 8, 0, '\t', 0)
fmt.Fprint(w, "Identifier ")
if *score == true {
fmt.Fprint(w, "\tSCORE")
}
if *http == true {
fmt.Fprint(w, "\tHTTP ")
}
if *rsync == true {
fmt.Fprint(w, "\tRSYNC ")
}
if *ftp == true {
fmt.Fprint(w, "\tFTP ")
}
if *location == true {
fmt.Fprint(w, "\tLOCATION ")
}
if *state == true {
fmt.Fprint(w, "\tSTATE\tSINCE")
}
fmt.Fprint(w, "\n")
for _, mirror := range list.Mirrors {
if *disabled == true {
if mirror.Enabled == true {
continue
}
}
if *enabled == true {
if mirror.Enabled == false {
continue
}
}
if *down == true {
if mirror.Up == true {
continue
}
}
stateSince, err := ptypes.Timestamp(mirror.StateSince)
if err != nil {
log.Fatal("list error:", err)
}
fmt.Fprintf(w, "%s ", mirror.Name)
if *score == true {
fmt.Fprintf(w, "\t%d ", mirror.Score)
}
if *http == true {
fmt.Fprintf(w, "\t%s ", mirror.HttpURL)
}
if *rsync == true {
fmt.Fprintf(w, "\t%s ", mirror.RsyncURL)
}
if *ftp == true {
fmt.Fprintf(w, "\t%s ", mirror.FtpURL)
}
if *location == true {
countries := strings.Split(mirror.CountryCodes, " ")
countryCode := "/"
if len(countries) >= 1 {
countryCode = countries[0]
}
fmt.Fprintf(w, "\t%s (%s) ", countryCode, mirror.ContinentCode)
}
if *state == true {
if mirror.Enabled == false {
fmt.Fprintf(w, "\tdisabled")
} else if mirror.Up == true {
fmt.Fprintf(w, "\tup")
} else {
fmt.Fprintf(w, "\tdown")
}
fmt.Fprintf(w, " \t(%s)", stateSince.Format(time.RFC1123))
}
fmt.Fprint(w, "\n")
}
w.Flush()
return nil
}
func (c *cli) CmdAdd(args ...string) error {
cmd := SubCmd("add", "[OPTIONS] IDENTIFIER", "Add a new mirror")
http := cmd.String("http", "", "HTTP base URL")
rsync := cmd.String("rsync", "", "RSYNC base URL (for scanning only)")
ftp := cmd.String("ftp", "", "FTP base URL (for scanning only)")
sponsorName := cmd.String("sponsor-name", "", "Name of the sponsor")
sponsorURL := cmd.String("sponsor-url", "", "URL of the sponsor")
sponsorLogo := cmd.String("sponsor-logo", "", "URL of a logo to display for this mirror")
adminName := cmd.String("admin-name", "", "Admin's name")
adminEmail := cmd.String("admin-email", "", "Admin's email")
customData := cmd.String("custom-data", "", "Associated data to return when the mirror is selected (i.e. json document)")
continentOnly := cmd.Bool("continent-only", false, "The mirror should only handle its continent")
countryOnly := cmd.Bool("country-only", false, "The mirror should only handle its country")
asOnly := cmd.Bool("as-only", false, "The mirror should only handle clients in the same AS number")
score := cmd.Int("score", 0, "Weight to give to the mirror during selection")
comment := cmd.String("comment", "", "Comment")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
if strings.Contains(cmd.Arg(0), " ") {
fmt.Fprintf(os.Stderr, "The identifier cannot contain a space\n")
os.Exit(-1)
}
if *http == "" {
fmt.Fprintf(os.Stderr, "You *must* pass at least an HTTP URL\n")
os.Exit(-1)
}
if !strings.HasPrefix(*http, "http://") && !strings.HasPrefix(*http, "https://") {
*http = "http://" + *http
}
_, err := url.Parse(*http)
if err != nil {
fmt.Fprintf(os.Stderr, "Can't parse url\n")
os.Exit(-1)
}
mirror := &mirrors.Mirror{
Name: cmd.Arg(0),
HttpURL: *http,
RsyncURL: *rsync,
FtpURL: *ftp,
SponsorName: *sponsorName,
SponsorURL: *sponsorURL,
SponsorLogoURL: *sponsorLogo,
AdminName: *adminName,
AdminEmail: *adminEmail,
CustomData: *customData,
ContinentOnly: *continentOnly,
CountryOnly: *countryOnly,
ASOnly: *asOnly,
Score: *score,
Comment: *comment,
}
client := c.GetRPC()
ctx, cancel := context.WithTimeout(context.Background(), defaultRPCTimeout)
defer cancel()
m, err := rpc.MirrorToRPC(mirror)
if err != nil {
log.Fatal("edit error:", err)
}
reply, err := client.AddMirror(ctx, m)
if err != nil {
if err.Error() == rpc.ErrNameAlreadyTaken.Error() {
log.Fatalf("Mirror %s already exists!\n", mirror.Name)
}
log.Fatal("edit error:", err)
}
for i := 0; i < len(reply.Warnings); i++ {
fmt.Println(reply.Warnings[i])
if i == len(reply.Warnings)-1 {
fmt.Println("")
}
}
if reply.Country != "" {
fmt.Println("Mirror location:")
fmt.Printf("Latitude: %.4f\n", reply.Latitude)
fmt.Printf("Longitude: %.4f\n", reply.Longitude)
fmt.Printf("Continent: %s\n", reply.Continent)
fmt.Printf("Country: %s\n", reply.Country)
fmt.Printf("ASN: %s\n", reply.ASN)
fmt.Println("")
}
fmt.Printf("Mirror '%s' added successfully\n", mirror.Name)
fmt.Printf("Enable this mirror using\n $ mirrorbits enable %s\n", mirror.Name)
return nil
}
func (c *cli) CmdRemove(args ...string) error {
cmd := SubCmd("remove", "IDENTIFIER", "Remove an existing mirror")
force := cmd.Bool("f", false, "Never prompt for confirmation")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
id, name := c.matchMirror(cmd.Arg(0))
if *force == false {
fmt.Printf("Removing %s, are you sure? [y/N]", name)
reader := bufio.NewReader(os.Stdin)
s, _ := reader.ReadString('\n')
switch s[0] {
case 'y', 'Y':
break
default:
return nil
}
}
client := c.GetRPC()
ctx, cancel := context.WithTimeout(context.Background(), defaultRPCTimeout)
defer cancel()
_, err := client.RemoveMirror(ctx, &rpc.MirrorIDRequest{
ID: int32(id),
})
if err != nil {
log.Fatal("remove error:", err)
}
fmt.Printf("Mirror '%s' removed successfully\n", name)
return nil
}
func (c *cli) CmdScan(args ...string) error {
cmd := SubCmd("scan", "[IDENTIFIER]", "(Re-)Scan a mirror")
enable := cmd.Bool("enable", false, "Enable the mirror automatically if the scan is successful")
all := cmd.Bool("all", false, "Scan all mirrors at once")
ftp := cmd.Bool("ftp", false, "Force a scan using FTP")
rsync := cmd.Bool("rsync", false, "Force a scan using rsync")
timeout := cmd.Uint("timeout", 0, "Timeout in seconds")
if err := cmd.Parse(args); err != nil {
return nil
}
if !*all && cmd.NArg() != 1 || *all && cmd.NArg() != 0 {
cmd.Usage()
return nil
}
client := c.GetRPC()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
list := make(map[int]string)
// Get the list of mirrors to scan
if *all == true {
reply, err := client.MatchMirror(ctx, &rpc.MatchRequest{
Pattern: "", // Match all of them
})
if err != nil {
return errors.New("Cannot fetch the list of mirrors")
}
for _, m := range reply.Mirrors {
list[int(m.ID)] = m.Name
}
} else {
// Single mirror
id, name := c.matchMirror(cmd.Arg(0))
list[id] = name
}
// Set the method of the scan (if not default)
var method rpc.ScanMirrorRequest_Method
if *ftp == false && *rsync == false {
method = rpc.ScanMirrorRequest_ALL
} else if *rsync == true {
method = rpc.ScanMirrorRequest_RSYNC
} else if *ftp == true {
method = rpc.ScanMirrorRequest_FTP
}
for id, name := range list {
if *timeout > 0 {
ctx, cancel = context.WithTimeout(context.Background(), time.Duration(*timeout)*time.Second)
defer cancel()
}
fmt.Printf("Scanning %s... ", name)
reply, err := client.ScanMirror(ctx, &rpc.ScanMirrorRequest{
ID: int32(id),
AutoEnable: *enable,
Protocol: method,
})
if err != nil {
s := status.Convert(err)
if s.Code() == codes.FailedPrecondition || len(list) == 1 {
return errors.New("\nscan error: " + grpc.ErrorDesc(err))
}
fmt.Println("scan error:", grpc.ErrorDesc(err))
continue
} else {
fmt.Printf("%d files indexed, %d known and %d removed\n", reply.FilesIndexed, reply.KnownIndexed, reply.Removed)
if reply.GetTZOffsetMs() != 0 {
fmt.Printf(" ∟ Timezone offset detected and corrected: %d milliseconds\n", reply.TZOffsetMs)
}
if reply.Enabled {
fmt.Println(" ∟ Enabled")
}
}
}
return nil
}
func (c *cli) CmdRefresh(args ...string) error {
cmd := SubCmd("refresh", "", "Scan the local repository")
rehash := cmd.Bool("rehash", false, "Force a rehash of the files")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 0 {
cmd.Usage()
return nil
}
fmt.Print("Refreshing the local repository... ")
client := c.GetRPC()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
_, err := client.RefreshRepository(ctx, &rpc.RefreshRepositoryRequest{
Rehash: *rehash,
})
if err != nil {
fmt.Println("")
log.Fatal(err)
}
fmt.Println("done")
return nil
}
func (c *cli) matchMirror(pattern string) (id int, name string) {
if len(pattern) == 0 {
return -1, ""
}
client := c.GetRPC()
ctx, cancel := context.WithTimeout(context.Background(), defaultRPCTimeout)
defer cancel()
reply, err := client.MatchMirror(ctx, &rpc.MatchRequest{
Pattern: pattern,
})
if err != nil {
fmt.Fprintf(os.Stderr, "mirror matching: %s\n", err)
os.Exit(1)
}
switch len(reply.Mirrors) {
case 0:
fmt.Fprintf(os.Stderr, "No match for '%s'\n", pattern)
os.Exit(1)
case 1:
id, name, err := GetSingle(reply.Mirrors)
if err != nil {
log.Fatal("unexpected error:", err)
}
return id, name
default:
fmt.Fprintln(os.Stderr, "Multiple match:")
for _, mirror := range reply.Mirrors {
fmt.Fprintf(os.Stderr, " %s\n", mirror.Name)
}
os.Exit(1)
}
return
}
func GetSingle(list []*rpc.MirrorID) (int, string, error) {
if len(list) == 0 {
return -1, "", errors.New("list is empty")
} else if len(list) > 1 {
return -1, "", errors.New("too many results")
}
return int(list[0].ID), list[0].Name, nil
}
func (c *cli) CmdEdit(args ...string) error {
cmd := SubCmd("edit", "[IDENTIFIER]", "Edit a mirror")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
// Find the editor to use
editor := os.Getenv("EDITOR")
if editor == "" {
log.Fatal("Environment variable $EDITOR not set")
}
id, _ := c.matchMirror(cmd.Arg(0))
client := c.GetRPC()
ctx, cancel := context.WithTimeout(context.Background(), defaultRPCTimeout)
defer cancel()
rpcm, err := client.MirrorInfo(ctx, &rpc.MirrorIDRequest{
ID: int32(id),
})
if err != nil {
log.Fatal("edit error:", err)
}
mirror, err := rpc.MirrorFromRPC(rpcm)
if err != nil {
log.Fatal("edit error:", err)
}
// Generate a yaml configuration string from the struct
out, err := yaml.Marshal(mirror)
// Open a temporary file
f, err := ioutil.TempFile(os.TempDir(), "edit")
if err != nil {
log.Fatal("Cannot create temporary file:", err)
}
defer os.Remove(f.Name())
f.WriteString("# You can now edit this mirror configuration.\n" +
"# Just save and quit when you're done.\n\n")
f.WriteString(string(out))
f.WriteString(fmt.Sprintf("\n%s\n\n%s\n", commentSeparator, mirror.Comment))
f.Close()
// Checksum the original file
chk, _ := filesystem.Sha256sum(f.Name())
reopen:
// Launch the editor with the filename as first parameter
exe := exec.Command(editor, f.Name())
exe.Stdin = os.Stdin
exe.Stdout = os.Stdout
exe.Stderr = os.Stderr
err = exe.Run()
if err != nil {
log.Fatal(err)
}
// Read the file back
out, err = ioutil.ReadFile(f.Name())
if err != nil {
log.Fatal("Cannot read file", f.Name())
}
// Checksum the file back and compare
chk2, _ := filesystem.Sha256sum(f.Name())
if bytes.Compare(chk, chk2) == 0 {
fmt.Println("Aborted - settings are unmodified, so there is nothing to change.")
return nil
}
var comment string
yamlstr := string(out)
commentIndex := strings.Index(yamlstr, commentSeparator)
if commentIndex > 0 {
comment = strings.TrimSpace(yamlstr[commentIndex+len(commentSeparator):])
yamlstr = yamlstr[:commentIndex]
}
reopen := func(err error) bool {
eagain:
fmt.Printf("%s\nRetry? [Y/n]", err.Error())
reader := bufio.NewReader(os.Stdin)
s, _ := reader.ReadString('\n')
switch s[0] {
case 'y', 'Y', 10:
return true
case 'n', 'N':
fmt.Println("Aborted")
return false
default:
goto eagain
}
}
// Fill the struct from the yaml
err = yaml.Unmarshal([]byte(yamlstr), &mirror)
if err != nil {
switch reopen(err) {
case true:
goto reopen
case false:
return nil
}
}
mirror.Comment = comment
ctx, cancel = context.WithTimeout(context.Background(), defaultRPCTimeout)
defer cancel()
m, err := rpc.MirrorToRPC(mirror)
if err != nil {
log.Fatal("edit error:", err)
}
reply, err := client.UpdateMirror(ctx, m)
if err != nil {
if err.Error() == rpc.ErrNameAlreadyTaken.Error() {
switch reopen(errors.New("Name already taken")) {
case true:
goto reopen
case false:
return nil
}
}
log.Fatal("edit error:", err)
}
if len(reply.Diff) > 0 {
fmt.Println(reply.Diff)
}
fmt.Printf("Mirror '%s' edited successfully\n", mirror.Name)
return nil
}
func (c *cli) CmdShow(args ...string) error {
cmd := SubCmd("show", "[IDENTIFIER]", "Print a mirror configuration")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
id, _ := c.matchMirror(cmd.Arg(0))
client := c.GetRPC()
ctx, cancel := context.WithTimeout(context.Background(), defaultRPCTimeout)
defer cancel()
rpcm, err := client.MirrorInfo(ctx, &rpc.MirrorIDRequest{
ID: int32(id),
})
if err != nil {
log.Fatal("edit error:", err)
}
mirror, err := rpc.MirrorFromRPC(rpcm)
if err != nil {
log.Fatal("edit error:", err)
}
// Generate a yaml configuration string from the struct
out, err := yaml.Marshal(mirror)
if err != nil {
log.Fatal("show error:", err)
}
fmt.Printf("%s\nComment:\n%s\n", out, mirror.Comment)
return nil
}
func (c *cli) CmdExport(args ...string) error {
cmd := SubCmd("export", "[format]", "Export the mirror database.\n\nAvailable formats: mirmon")
rsync := cmd.Bool("rsync", true, "Export rsync URLs")
http := cmd.Bool("http", true, "Export http URLs")
ftp := cmd.Bool("ftp", true, "Export ftp URLs")
disabled := cmd.Bool("disabled", true, "Export disabled mirrors")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
if cmd.Arg(0) != "mirmon" {
fmt.Fprintf(os.Stderr, "Unsupported format\n")
cmd.Usage()
return nil
}
client := c.GetRPC()
ctx, cancel := context.WithTimeout(context.Background(), defaultRPCTimeout)
defer cancel()
list, err := client.List(ctx, &empty.Empty{})
if err != nil {
log.Fatal("export error:", err)
}
w := new(tabwriter.Writer)
w.Init(os.Stdout, 0, 8, 0, '\t', 0)
for _, m := range list.Mirrors {
if *disabled == false {
if m.Enabled == false {
continue
}
}
ccodes := strings.Fields(m.CountryCodes)
urls := make([]string, 0, 3)
if *rsync == true && m.RsyncURL != "" {
urls = append(urls, m.RsyncURL)
}
if *http == true && m.HttpURL != "" {
urls = append(urls, m.HttpURL)
}
if *ftp == true && m.FtpURL != "" {
urls = append(urls, m.FtpURL)
}
for _, u := range urls {
fmt.Fprintf(w, "%s\t%s\t%s\n", ccodes[0], u, m.AdminEmail)
}
}
w.Flush()
return nil
}
func (c *cli) CmdEnable(args ...string) error {
cmd := SubCmd("enable", "[IDENTIFIER]", "Enable a mirror")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
c.changeStatus(cmd.Arg(0), true)
return nil
}
func (c *cli) CmdDisable(args ...string) error {
cmd := SubCmd("disable", "[IDENTIFIER]", "Disable a mirror")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
c.changeStatus(cmd.Arg(0), false)
return nil
}
func (c *cli) changeStatus(pattern string, enabled bool) {
id, name := c.matchMirror(pattern)
client := c.GetRPC()
ctx, cancel := context.WithTimeout(context.Background(), defaultRPCTimeout)
defer cancel()
_, err := client.ChangeStatus(ctx, &rpc.ChangeStatusRequest{
ID: int32(id),
Enabled: enabled,
})
if err != nil {
if enabled {
log.Fatalf("Couldn't enable mirror '%s': %s\n", name, err)
} else {
log.Fatalf("Couldn't disable mirror '%s': %s\n", name, err)
}
}
if enabled {
fmt.Printf("Mirror '%s' enabled successfully\n", name)
} else {
fmt.Printf("Mirror '%s' disabled successfully\n", name)
}
return
}
func (c *cli) CmdStats(args ...string) error {
cmd := SubCmd("stats", "[OPTIONS] [mirror|file] [IDENTIFIER|PATTERN]", "Show download stats for a particular mirror or a file pattern")
dateStart := cmd.String("start-date", "", "Starting date (format YYYY-MM-DD)")
dateEnd := cmd.String("end-date", "", "Ending date (format YYYY-MM-DD)")
human := cmd.Bool("h", true, "Human readable version")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 2 || (cmd.Arg(0) != "mirror" && cmd.Arg(0) != "file") {
cmd.Usage()
return nil
}
start, err := time.Parse("2006-1-2", *dateStart)
if err != nil {
start = time.Now()
}
startproto, _ := ptypes.TimestampProto(start)
end, err := time.Parse("2006-1-2", *dateEnd)
if err != nil {
end = time.Now()
}
endproto, _ := ptypes.TimestampProto(end)
client := c.GetRPC()
ctx, cancel := context.WithTimeout(context.Background(), defaultRPCTimeout)
defer cancel()
if cmd.Arg(0) == "file" {
// File stats
reply, err := client.StatsFile(ctx, &rpc.StatsFileRequest{
Pattern: cmd.Arg(1),
DateStart: startproto,
DateEnd: endproto,
})
if err != nil {
log.Fatal("file stats error:", err)
}
// Format the results
w := new(tabwriter.Writer)
w.Init(os.Stdout, 0, 8, 0, '\t', 0)
// Sort keys and count requests
var keys []string
var requests int64
for k, req := range reply.Files {
requests += req
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
fmt.Fprintf(w, "%s:\t%d\n", k, reply.Files[k])
}
if len(keys) > 0 {
// Add a line separator
fmt.Fprintf(w, "\t\n")
}
fmt.Fprintf(w, "Total download requests: \t%d\n", requests)
w.Flush()
} else if cmd.Arg(0) == "mirror" {
// Mirror stats
id, name := c.matchMirror(cmd.Arg(1))
reply, err := client.StatsMirror(ctx, &rpc.StatsMirrorRequest{
ID: int32(id),
DateStart: startproto,
DateEnd: endproto,
})
if err != nil {
log.Fatal("mirror stats error:", err)
}
// Format the results
w := new(tabwriter.Writer)
w.Init(os.Stdout, 0, 8, 0, '\t', 0)
fmt.Fprintf(w, "Identifier:\t%s\n", name)
if !reply.Mirror.Enabled {
fmt.Fprintf(w, "Status:\tdisabled\n")
} else if reply.Mirror.Up {
fmt.Fprintf(w, "Status:\tup\n")
} else {
fmt.Fprintf(w, "Status:\tdown\n")
}
fmt.Fprintf(w, "Download requests:\t%d\n", reply.Requests)
fmt.Fprint(w, "Bytes transferred:\t")
if *human {
fmt.Fprintln(w, utils.ReadableSize(reply.Bytes))
} else {
fmt.Fprintln(w, reply.Bytes)
}
w.Flush()
}
return nil
}
func (c *cli) CmdLogs(args ...string) error {
cmd := SubCmd("logs", "[IDENTIFIER]", "Print logs of a mirror")
maxResults := cmd.Uint("l", 500, "Maximum number of logs to return")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
id, name := c.matchMirror(cmd.Arg(0))
client := c.GetRPC()
ctx, cancel := context.WithTimeout(context.Background(), defaultRPCTimeout)
defer cancel()
resp, err := client.GetMirrorLogs(ctx, &rpc.GetMirrorLogsRequest{
ID: int32(id),
MaxResults: int32(*maxResults),
})
if err != nil {
log.Fatal("logs error:", err)
}
if len(resp.Line) == 0 {
fmt.Printf("No logs for %s\n", name)
return nil
}
fmt.Printf("Printing logs for %s:\n", name)
for _, l := range resp.Line {
fmt.Println(l)
}
return nil
}
func (c *cli) CmdReload(args ...string) error {
client := c.GetRPC()
ctx, cancel := context.WithTimeout(context.Background(), defaultRPCTimeout)
defer cancel()
_, err := client.Reload(ctx, &empty.Empty{})
if err != nil {
log.Fatal("upgrade error:", err)
}
return nil
}
func (c *cli) CmdUpgrade(args ...string) error {
client := c.GetRPC()
ctx, cancel := context.WithTimeout(context.Background(), defaultRPCTimeout)
defer cancel()
_, err := client.Upgrade(ctx, &empty.Empty{})
if err != nil {
log.Fatal("upgrade error:", err)
}
return nil
}
func (c *cli) CmdVersion(args ...string) error {
fmt.Printf("Client:\n")
core.PrintVersion(core.GetVersionInfo())
fmt.Println()
client := c.GetRPC()
ctx, cancel := context.WithTimeout(context.Background(), defaultRPCTimeout)
defer cancel()
reply, err := client.GetVersion(ctx, &empty.Empty{})
if err != nil {
s := status.Convert(err)
return errors.Wrap(s.Err(), "version error")
}
if reply.Version != "" {
fmt.Printf("Server:\n")
core.PrintVersion(core.VersionInfo{
Version: reply.Version,
Build: reply.Build,
GoVersion: reply.GoVersion,
OS: reply.OS,
Arch: reply.Arch,
GoMaxProcs: int(reply.GoMaxProcs),
})
}
return nil
}
| [
"\"EDITOR\""
]
| []
| [
"EDITOR"
]
| [] | ["EDITOR"] | go | 1 | 0 | |
shaper.go | // SPDX-License-Identifier: Unlicense OR BSD-3-Clause
package shaping
import (
"fmt"
"github.com/benoitkugler/textlayout/fonts"
"github.com/benoitkugler/textlayout/harfbuzz"
"github.com/go-text/di"
"golang.org/x/image/math/fixed"
)
type Shaper interface {
// Shape takes an Input and shapes it into the Output.
Shape(Input) Output
}
// MissingGlyphError indicates that the font used in shaping did not
// have a glyph needed to complete the shaping.
type MissingGlyphError struct {
fonts.GID
}
func (m MissingGlyphError) Error() string {
return fmt.Sprintf("missing glyph with id %d", m.GID)
}
// InvalidRunError represents an invalid run of text, either because
// the end is before the start or because start or end is greater
// than the length.
type InvalidRunError struct {
RunStart, RunEnd, TextLength int
}
func (i InvalidRunError) Error() string {
return fmt.Sprintf("run from %d to %d is not valid for text len %d", i.RunStart, i.RunEnd, i.TextLength)
}
const (
// scaleShift is the power of 2 with which to automatically scale
// up the input coordinate space of the shaper. This factor will
// be removed prior to returning dimensions. This ensures that the
// returned glyph dimensions take advantage of all of the precision
// that a fixed.Int26_6 can provide.
scaleShift = 6
)
// Shape turns an input into an output.
func Shape(input Input) (Output, error) {
// Prepare to shape the text.
// TODO: maybe reuse these buffers for performance?
buf := harfbuzz.NewBuffer()
runes, start, end := input.Text, input.RunStart, input.RunEnd
if end < start {
return Output{}, InvalidRunError{RunStart: start, RunEnd: end, TextLength: len(input.Text)}
}
buf.AddRunes(runes, start, end-start)
// TODO: handle vertical text?
switch input.Direction {
case di.DirectionLTR:
buf.Props.Direction = harfbuzz.LeftToRight
case di.DirectionRTL:
buf.Props.Direction = harfbuzz.RightToLeft
default:
return Output{}, UnimplementedDirectionError{
Direction: input.Direction,
}
}
buf.Props.Language = input.Language
buf.Props.Script = input.Script
// TODO: figure out what (if anything) to do if this type assertion fails.
font := harfbuzz.NewFont(input.Face.(harfbuzz.Face))
font.XScale = int32(input.Size.Ceil()) << scaleShift
font.YScale = font.XScale
// Actually use harfbuzz to shape the text.
buf.Shape(font, nil)
// Convert the shaped text into an Output.
glyphs := make([]Glyph, len(buf.Info))
for i := range glyphs {
g := buf.Info[i].Glyph
extents, ok := font.GlyphExtents(g)
if !ok {
// TODO: can this error happen? Will harfbuzz return a
// GID for a glyph that isn't in the font?
return Output{}, MissingGlyphError{GID: g}
}
glyphs[i] = Glyph{
Width: fixed.I(int(extents.Width)) >> scaleShift,
Height: fixed.I(int(extents.Height)) >> scaleShift,
XBearing: fixed.I(int(extents.XBearing)) >> scaleShift,
YBearing: fixed.I(int(extents.YBearing)) >> scaleShift,
XAdvance: fixed.I(int(buf.Pos[i].XAdvance)) >> scaleShift,
YAdvance: fixed.I(int(buf.Pos[i].YAdvance)) >> scaleShift,
XOffset: fixed.I(int(buf.Pos[i].XOffset)) >> scaleShift,
YOffset: fixed.I(int(buf.Pos[i].YOffset)) >> scaleShift,
Cluster: buf.Info[i].Cluster,
Glyph: g,
Mask: buf.Info[i].Mask,
}
}
out := Output{
Glyphs: glyphs,
}
fontExtents := font.ExtentsForDirection(buf.Props.Direction)
out.LineBounds = Bounds{
Ascent: fixed.I(int(fontExtents.Ascender)) >> scaleShift,
Descent: fixed.I(int(fontExtents.Descender)) >> scaleShift,
Gap: fixed.I(int(fontExtents.LineGap)) >> scaleShift,
}
return out, out.RecalculateAll(input.Direction)
}
| []
| []
| []
| [] | [] | go | null | null | null |
config/config.go | package config
import (
"encoding/json"
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
)
const (
// File is the default name of the JSON file where the config written.
// The user can pass an alternate filename when using the CLI.
File = ".exercism.json"
// LegacyFile is the name of the original config file.
// It is a misnomer, since the config was in json, not go.
LegacyFile = ".exercism.go"
// hostAPI is the endpoint to submit solutions to, and to get personalized data
hostAPI = "http://exercism.io"
// hostXAPI is the endpoint to fetch problems from
hostXAPI = "http://x.exercism.io"
// DirExercises is the default name of the directory for active users.
// Make this non-exported when handlers.Login is deleted.
DirExercises = "exercism"
)
var (
errHomeNotFound = errors.New("unable to locate home directory")
)
// Config represents the settings for particular user.
// This defines both the auth for talking to the API, as well as
// where to put problems that get downloaded.
type Config struct {
APIKey string `json:"apiKey"`
Dir string `json:"dir"`
API string `json:"api"`
XAPI string `json:"xapi"`
home string // cache user's home directory
file string // full path to config file
// deprecated, get rid of them when nobody uses 1.7.0 anymore
ExercismDirectory string `json:"exercismDirectory,omitempty"`
Hostname string `json:"hostname,omitempty"`
ProblemsHost string `json:"problemsHost,omitempty"`
}
// Home returns the user's canonical home directory.
// See: http://stackoverflow.com/questions/7922270/obtain-users-home-directory
// we can't cross compile using cgo and use user.Current()
func Home() (string, error) {
var dir string
if runtime.GOOS == "windows" {
dir = os.Getenv("USERPROFILE")
if dir == "" {
dir = os.Getenv("HOMEDRIVE") + os.Getenv("HOMEPATH")
}
} else {
dir = os.Getenv("HOME")
}
if dir == "" {
return dir, errHomeNotFound
}
return dir, nil
}
// Read loads the config from the stored JSON file.
func Read(file string) (*Config, error) {
c := &Config{}
err := c.Read(file)
return c, err
}
// New returns a new config.
// It will attempt to set defaults where no value is passed in.
func New(key, host, dir string) (*Config, error) {
c := &Config{
APIKey: key,
API: host,
Dir: dir,
}
return c.configure()
}
func (c *Config) Update(key, host, dir string) {
if key != "" {
c.APIKey = key
}
if host != "" {
c.API = host
}
if dir != "" {
c.Dir = dir
}
c.configure()
}
// Read loads the config from the stored JSON file.
func (c *Config) Read(file string) error {
renameLegacy()
if file == "" {
home, err := c.homeDir()
if err != nil {
return err
}
file = filepath.Join(home, File)
}
if _, err := os.Stat(file); err != nil {
if os.IsNotExist(err) {
c.configure()
return nil
}
return err
}
f, err := os.Open(file)
if err != nil {
return err
}
defer f.Close()
d := json.NewDecoder(f)
err = d.Decode(&c)
if err != nil {
return err
}
c.SavePath(file)
c.configure()
return nil
}
// SavePath allows the user to customize the location of the JSON file.
func (c *Config) SavePath(file string) {
if file != "" {
c.file = file
}
}
// File represents the path to the config file.
func (c *Config) File() string {
return c.file
}
// Write() saves the config as JSON.
func (c *Config) Write() error {
renameLegacy()
c.ExercismDirectory = ""
c.Hostname = ""
c.ProblemsHost = ""
// truncates existing file if it exists
f, err := os.Create(c.file)
if err != nil {
return err
}
defer f.Close()
e := json.NewEncoder(f)
return e.Encode(c)
}
func (c *Config) configure() (*Config, error) {
c.sanitize()
if c.Hostname != "" {
c.API = c.Hostname
}
if c.API == "" {
c.API = hostAPI
}
if c.ProblemsHost != "" {
c.XAPI = c.ProblemsHost
}
if c.XAPI == "" {
c.XAPI = hostXAPI
}
dir, err := c.homeDir()
if err != nil {
return c, err
}
c.file = filepath.Join(dir, File)
// use legacy value, if it exists
if c.ExercismDirectory != "" {
c.Dir = c.ExercismDirectory
}
// fall back to default value
if c.Dir == "" {
c.Dir = filepath.Join(dir, DirExercises)
}
err = c.setDir(c.Dir)
if err != nil {
return c, err
}
return c, nil
}
func (c *Config) setDir(dir string) error {
homeDir, err := c.homeDir()
if err != nil {
return err
}
c.Dir = strings.Replace(dir, "~/", fmt.Sprintf("%s/", homeDir), 1)
return nil
}
// FilePath returns the path to the config file.
func FilePath(file string) (string, error) {
if file != "" {
return file, nil
}
dir, err := Home()
if err != nil {
return "", err
}
return filepath.Join(dir, File), nil
}
// IsAuthenticated returns true if the config contains an API key.
// This does not check whether or not that key is valid.
func (c *Config) IsAuthenticated() bool {
return c.APIKey != ""
}
// See: http://stackoverflow.com/questions/7922270/obtain-users-home-directory
// we can't cross compile using cgo and use user.Current()
func (c *Config) homeDir() (string, error) {
if c.home != "" {
return c.home, nil
}
return Home()
}
func (c *Config) sanitize() {
c.APIKey = strings.TrimSpace(c.APIKey)
c.Dir = strings.TrimSpace(c.Dir)
c.API = strings.TrimSpace(c.API)
c.XAPI = strings.TrimSpace(c.XAPI)
c.Hostname = strings.TrimSpace(c.Hostname)
c.ProblemsHost = strings.TrimSpace(c.ProblemsHost)
}
// renameLegacy normalizes the default config file name.
// This function will bail silently if any error occurs.
func renameLegacy() {
dir, err := Home()
if err != nil {
return
}
legacyPath := filepath.Join(dir, LegacyFile)
if _, err = os.Stat(legacyPath); err != nil {
return
}
correctPath := filepath.Join(dir, File)
os.Rename(legacyPath, correctPath)
return
}
| [
"\"USERPROFILE\"",
"\"HOMEDRIVE\"",
"\"HOMEPATH\"",
"\"HOME\""
]
| []
| [
"HOME",
"HOMEPATH",
"HOMEDRIVE",
"USERPROFILE"
]
| [] | ["HOME", "HOMEPATH", "HOMEDRIVE", "USERPROFILE"] | go | 4 | 0 | |
scripts/M044.py | """
Created by: Rob Mulla
Sep 26
IEEE Fraud Detection Model
- FE013
- Yang's Features
- Raddars Features
- Remove AV bad features automatically
"""
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import sys
import matplotlib.pylab as plt
from sklearn.model_selection import KFold
from datetime import datetime
import time
import logging
from sklearn.metrics import roc_auc_score
from catboost import CatBoostClassifier, Pool
from timeit import default_timer as timer
import lightgbm as lgb
import gc
start = timer()
##################
# PARAMETERS
###################
run_id = "{:%m%d_%H%M}".format(datetime.now())
KERNEL_RUN = False
MODEL_NUMBER = os.path.basename(__file__).split('.')[0]
if KERNEL_RUN:
INPUT_DIR = '../input/champs-scalar-coupling/'
FE_DIR = '../input/molecule-fe024/'
FOLDS_DIR = '../input/champs-3fold-ids/'
TARGET = "isFraud"
N_ESTIMATORS = 100000
N_META_ESTIMATORS = 500000
LEARNING_RATE = 0.005
VERBOSE = 100
EARLY_STOPPING_ROUNDS = 100
RANDOM_STATE = 529
N_THREADS = 58
DEPTH = -1 #14
N_FOLDS = 5
SHUFFLE = False
FE_SET = 'FE013' # Feature Engineering Version
AV_THRESHOLD = 0.8
MODEL_TYPE = "lightgbm"
#####################
## SETUP LOGGER
#####################
def get_logger():
"""
credits to: https://www.kaggle.com/ogrellier/user-level-lightgbm-lb-1-4480
"""
os.environ["TZ"] = "US/Eastern"
time.tzset()
FORMAT = "[%(levelname)s]%(asctime)s:%(name)s:%(message)s"
logging.basicConfig(format=FORMAT)
logger = logging.getLogger("main")
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
fhandler = logging.FileHandler(f'../logs/{MODEL_NUMBER}_{run_id}.log')
formatter = logging.Formatter(FORMAT)
handler.setFormatter(formatter)
# logger.addHandler(handler)
logger.addHandler(fhandler)
return logger
logger = get_logger()
logger.info(f'Running for Model Number {MODEL_NUMBER}')
##################
# PARAMETERS
###################
if MODEL_TYPE == 'xgboost':
EVAL_METRIC = "AUC"
elif MODEL_TYPE == 'lightgbm':
EVAL_METRIC = 'auc'
elif MODEL_TYPE == 'catboost':
EVAL_METRIC = "AUC"
##################
# TRACKING FUNCTION
###################
def update_tracking(run_id,
field,
value, csv_file="../tracking/tracking.csv", integer=False, digits=None, drop_incomplete_rows=False):
"""
Function to update the tracking CSV with information about the model
"""
try:
df = pd.read_csv(csv_file, index_col=[0])
except FileNotFoundError:
df = pd.DataFrame()
if integer:
value = round(value)
elif digits is not None:
value = round(value, digits)
if drop_incomplete_rows:
df = df.loc[~df['AUC'].isna()]
df.loc[run_id, field] = value # Model number is index
df.to_csv(csv_file)
update_tracking(run_id, "model_number", MODEL_NUMBER, drop_incomplete_rows=True)
update_tracking(run_id, "n_estimators", N_ESTIMATORS)
update_tracking(run_id, "early_stopping_rounds", EARLY_STOPPING_ROUNDS)
update_tracking(run_id, "random_state", RANDOM_STATE)
update_tracking(run_id, "n_threads", N_THREADS)
update_tracking(run_id, "learning_rate", LEARNING_RATE)
update_tracking(run_id, "n_fold", N_FOLDS)
update_tracking(run_id, "model_type", MODEL_TYPE)
update_tracking(run_id, "eval_metric", EVAL_METRIC)
update_tracking(run_id, "depth", DEPTH)
update_tracking(run_id, "shuffle", SHUFFLE)
update_tracking(run_id, "fe", FE_SET)
update_tracking(run_id, "av_threshold", AV_THRESHOLD)
#####################
# PREPARE MODEL DATA
#####################
folds = KFold(n_splits=N_FOLDS, random_state=RANDOM_STATE, shuffle=SHUFFLE)
logger.info('Loading Data...')
train_df = pd.read_parquet(f'../data/train_{FE_SET}.parquet')
test_df = pd.read_parquet(f'../data/test_{FE_SET}.parquet')
logger.info('Done loading Data...')
###########
# FEATURES
###########
FEATURES = [ 'V85', 'bank_type_TransactionAmt_mean', 'D5_fq_enc', 'V12',
'V81', 'V282', 'bank_type_D7_std', 'id_15', 'V13', 'C12_fq_enc',
'anomaly', 'D7_DT_D_std_score', 'D3_DT_D_min_max', 'card4_count_full',
'D14_DT_D_min_max', 'card1_count_full', 'V169', 'D3_DT_M_min_max', 'V279',
'V91', 'bank_type_D10_std', 'D14', 'D6_DT_M_std_score', 'D4_DT_W_min_max',
'V152', 'V56', 'D3_intercept_bin0', 'D14_intercept_bin0', 'V220', 'V277',
'D12_intercept', 'ProductCD_W_00cents', 'D13_intercept_bin0', 'V291',
'V189', 'D15_DT_M_min_max', 'C5_fq_enc', 'D3_fq_enc', 'card5_fq_enc',
'addr1_count_full', 'V266', 'D11_intercept_bin2', 'V23',
'D4_intercept_bin3', 'bank_type_D10_mean', 'D2_intercept_bin3', 'V306',
'DeviceType', 'V285', 'D5_DT_W_std_score', 'V131', 'V37', 'V296',
'bank_type_D1_mean', 'V75', 'D3_DT_W_std_score', 'D10_DT_M_min_max',
'id_33_0', 'V67', 'D4_intercept_bin4', 'V256', 'V143', 'uid5_D6_std',
'ProductCD_target_mean', 'mxC3', 'V129', 'D13_DT_M_std_score', 'V24',
'D3_DT_M_std_score', 'mxC4', 'D9', 'id_30_version_fq_enc',
'D5_DT_D_std_score', 'D11_DT_M_std_score', 'uid5_D6_mean',
'D14_DT_M_std_score', 'card5_TransactionAmt_std', 'V20', 'C8_fq_enc',
'V70', 'V127', 'D6_intercept', 'D15_DT_W_min_max',
'sum_Cxx_binary_higher_than_q95', 'V156', 'uid4_D12_mean', 'C5',
'uid4_D12_std', 'id_30_fq_enc', 'V61', 'id_33', 'D15_to_std_addr1',
'bank_type_D9_mean', 'D5_intercept', 'D10_DT_W_min_max', 'V130',
'bank_type_D9_std', 'uid5_D7_std', 'bank_type_D14_mean',
'bank_type_D3_std', 'bank_type_D5_mean', 'ProductCD', 'M8', 'V44',
'D6_fq_enc', 'D15_DT_D_min_max', 'D11_intercept_bin0', 'V257',
'bank_type_D7_mean', 'V76', 'D15', 'V38', 'V55', 'V261', 'V149', 'D4',
'D8_intercept_bin0', 'M2', 'bank_type_D6_std', 'id_30_version',
'D4_intercept_bin1', 'D15_to_mean_card4', 'V82', 'D3_DT_D_std_score',
'D10_intercept_bin3', 'bank_type_D2_std', 'V77', 'M7', 'D11',
'D4_intercept_bin2', 'email_check', 'V294', 'V317', 'V308',
'id_33_fq_enc', 'bank_type_D5_std', 'D8_intercept', 'V62', 'V187',
'card5_TransactionAmt_mean', 'bank_type_D12_mean', 'id_33_count_dist',
'D2_intercept_bin2', 'C10', 'V86', 'D8_DT_M_min_max',
'D15_intercept_bin4', 'D6_DT_W_std_score', 'uid5_D7_mean', 'C9_fq_enc',
'mxC10', 'D14_DT_W_std_score', 'card2_count_full', 'V258',
'bank_type_D14_std', 'D10_intercept_bin4', 'V83', 'bank_type_D13_std',
'D8_DT_W_min_max', 'TransactionAmt', 'V312', 'D14_intercept', 'id_33_1',
'D15_intercept_bin2', 'D12_DT_W_std_score', 'V78', 'D8_D9_decimal_dist',
'M9', 'V281', 'bank_type_D12_std', 'V54', 'C9', 'M4_target_mean',
'sum_Cxx_binary_higher_than_q90', 'D10_DT_D_min_max', 'bank_type_D3_mean',
'bank_type_D8_mean', 'R_emaildomain_prefix', 'bank_type_D6_mean', 'V314',
'D11_DT_W_std_score', 'D10', 'D4_DT_D_min_max', 'V283',
'D10_intercept_bin2', 'D13_intercept', 'D8_DT_D_min_max', 'C2_fq_enc',
'V165', 'D1_intercept_bin4', 'bank_type_D13_mean', 'D3_intercept',
'TransactionAmt_2Dec', 'card3_div_Mean_D9_DOY', 'C12',
'D4_DT_M_std_score', 'D2_intercept_bin1', 'mxC8', 'D2_fq_enc',
'addr1_third_digit', 'D4_fq_enc', 'D1_fq_enc', 'mxC12', 'D8',
'D10_intercept_bin1', 'id_01', 'id_09', 'id_03', 'addr1_second_digit',
'D15_to_mean_addr1', 'sum_Cxx_binary_higher_than_q80', 'V53',
'TransactionAmt_decimal', 'card3_div_Mean_D6_DOY', 'D15_intercept_bin3',
'V45', 'id_02_to_std_card4', 'addr2_div_Mean_D10_DOY_productCD',
'DeviceInfo_version', 'DeviceInfo_device', 'D1_intercept_bin3',
'D11_intercept', 'DeviceInfo_version_fq_enc', 'C6', 'uid5_D13_std',
'TransactionAmt_DT_M_min_max', 'dist2', 'C8', 'D15_intercept_bin1', 'M3',
'R_emaildomain_fq_enc', 'DeviceInfo_device_fq_enc', 'D6_DT_D_std_score',
'sum_Cxx_binary_higher_than_q60', 'D11__DeviceInfo',
'TranAmt_div_Mean_D12_DOY_productCD', 'D10_DT_M_std_score',
'uid5_D13_mean', 'mxC5', 'id_30', 'addr2_div_Mean_D4_DOY', 'uid2_D12_std',
'C11_fq_enc', 'id_06', 'uid2_D12_mean', 'sum_Cxx_binary_higher_than_q70',
'V310', 'V307', 'C6_fq_enc', 'D8_fq_enc', 'dist2_fq_enc',
'D2_intercept_bin0', 'addr1_div_Mean_D10_DOY_productCD',
'addr1_div_Mean_D10_DOY', 'addr1_div_Mean_D11_DOY', 'uid2_D8_std',
'id_02__id_20', 'V313', 'D4_intercept_bin0', 'D11_DT_D_std_score',
'Transaction_day_of_week', 'card6_div_Mean_D3_DOY', 'uid2_D1_std',
'uid5_D11_mean', 'uid_fq_enc', 'D14_DT_D_std_score', 'D12_DT_D_std_score',
'id_02_to_mean_card4', 'uid4_D13_std', 'D1_intercept_bin1',
'id_02_to_std_card1', 'uid5_D11_std', 'P_emaildomain_prefix', 'DT_day',
'D8_DT_M_std_score', 'uid2_D1_mean', 'TransactionAmt_to_mean_card4',
'card5_div_Mean_D11_DOY', 'D15_DT_M_std_score', 'V87', 'uid_D12_std',
'id_31_device_fq_enc', 'uid2_D11_mean', 'card3_DT_W_week_day_dist_best',
'uid5_D14_std', 'uid2_D15_mean', 'sum_Cxx_binary_higher_than_q50',
'id_13', 'card3_div_Mean_D11_DOY', 'C11',
'bank_type_DT_W_week_day_dist_best', 'card4_div_Mean_D11_DOY',
'addr1_div_Mean_D1_DOY', 'uid2_D4_mean', 'card2_div_Mean_D11_DOY',
'C13_fq_enc', 'uid4_D13_mean', 'card5_DT_W_week_day_dist_best', 'id_02',
'uid5_D14_mean', 'uid2_D10_mean', 'id_01_count_dist',
'D13_DT_W_std_score', 'C2', 'C14', 'addr2_div_Mean_D10_DOY',
'uid2_D11_std', 'addr1_div_Mean_D1_DOY_productCD', 'id_02_to_mean_card1',
'dist1_fq_enc', 'card1_div_Mean_D11_DOY', 'D15_to_std_card1',
'TransactionAmt_DT_M_std_score', 'uid2_D6_std',
'TransactionAmt_to_std_card4', 'uid2_D15_std', 'uid3_D8_std',
'card6_div_Mean_D11_DOY', 'TranAmt_div_Mean_D14_DOY',
'card3_div_Mean_D14_DOY', 'D2', 'D1', 'uid_D15_mean', 'uid4_D6_std',
'uid_D15_std', 'D10_intercept_bin0', 'DeviceInfo_fq_enc', 'uid2_D13_std',
'uid_D12_mean', 'uid4_D6_mean', 'uid_D1_std', 'D1_intercept_bin2',
'uid_D10_mean', 'card2__id_20', 'uid4_D7_std', 'uid3_D13_std',
'C14_fq_enc', 'uid_D8_std', 'uid3_D13_mean', 'uid2_D4_std',
'addr1_div_Mean_D4_DOY', 'uid_D4_mean', 'D4_DT_W_std_score',
'addr2_div_Mean_D1_DOY_productCD', 'uid_D11_mean', 'D15_intercept_bin0',
'uid2_D10_std', 'uid_D13_std', 'uid2_fq_enc', 'uid2_D13_mean',
'uid2_D2_mean', 'D2_intercept', 'uid_D11_std', 'card2', 'uid4_D14_std',
'C_sum_after_clip75', 'R_emaildomain', 'dist1', 'id_05',
'uid_TransactionAmt_mean', 'uid_D1_mean', 'uid3_D1_std', 'uid5_D8_std',
'uid3_D6_std', 'Transaction_hour_of_day', 'uid4_D14_mean', 'uid5_D10_std',
'uid3_D10_std', 'uid5_D1_std', 'uid5_D15_std', 'uid2_D7_mean',
'uid3_D11_std', 'uid4_D8_std', 'D13_DT_D_std_score', 'uid3_D11_mean',
'uid2_D14_std', 'uid2_D7_std', 'uid2_D14_mean', 'uid_D13_mean',
'uid_D10_std', 'uid2_D3_std', 'uid_D6_std', 'uid3_D15_std',
'addr1_fq_enc', 'id_31', 'uid_TransactionAmt_std',
'card1_div_Mean_D4_DOY_productCD', 'uid2_TransactionAmt_mean',
'C_sum_after_clip90', 'uid2_TransactionAmt_std', 'uid4_D7_mean',
'uid2_D6_mean', 'uid3_D15_mean', 'D15_to_mean_card1', 'uid5_D15_mean',
'M4', 'uid3_D7_std', 'card2_div_Mean_D4_DOY',
'card5_div_Mean_D4_DOY_productCD', 'card5_div_Mean_D4_DOY',
'D4_intercept', 'uid_D4_std', 'card6_div_Mean_D4_DOY_productCD',
'card5__P_emaildomain', 'card1_fq_enc', 'uid5_D10_mean',
'card1_div_Mean_D4_DOY', 'C1', 'M6', 'uid2_D2_std',
'P_emaildomain_fq_enc', 'card1_TransactionAmt_mean', 'uid3_D10_mean',
'TransactionAmt_DT_W_min_max', 'uid5_D4_std',
'card1_div_Mean_D10_DOY_productCD', 'uid3_D1_mean',
'card1_div_Mean_D10_DOY', 'uid_D14_mean', 'mxC9',
'TranAmt_div_Mean_D4_DOY_productCD', 'D15_DT_W_std_score',
'DeviceInfo__P_emaildomain', 'uid3_D14_mean', 'bank_type_DT_M', 'mxC11',
'uid5_D1_mean', 'uid_D2_mean', 'D10_DT_W_std_score',
'card3_DT_M_month_day_dist_best', 'uid3_D2_std',
'TranAmt_div_Mean_D4_DOY', 'card1_TransactionAmt_std',
'card3_div_Mean_D4_DOY_productCD', 'D1_intercept_bin0', 'uid3_D4_std',
'card2_div_Mean_D10_DOY', 'uid_D2_std', 'uid3_D14_std', 'uid3_D4_mean',
'uid_D7_mean', 'uid5_D2_std', 'card4_div_Mean_D4_DOY_productCD',
'card6_div_Mean_D4_DOY', 'TranAmt_div_Mean_D10_DOY', 'uid2_D9_std',
'TransactionAmt_DT_W_std_score', 'C1_fq_enc', 'card1_div_Mean_D1_DOY',
'uid5_D4_mean', 'uid3_D6_mean', 'mxC14', 'uid5_D2_mean',
'card4_div_Mean_D4_DOY', 'card3_div_Mean_D4_DOY', 'uid_D14_std', 'M5',
'C13', 'mxC6', 'card5_div_Mean_D10_DOY_productCD',
'card3_DT_M_month_day_dist', 'card2_div_Mean_D10_DOY_productCD',
'uid_D7_std', 'card2_div_Mean_D4_DOY_productCD',
'bank_type_DT_M_month_day_dist', 'uid3_D7_mean', 'uid_D3_std',
'uid5_fq_enc', 'uid3_fq_enc', 'uid_D3_mean', 'D4_DT_D_std_score',
'uid3_D2_mean', 'uid4_D1_std', 'uid2_D5_std', 'uid4_D10_std',
'bank_type_DT_D_hour_dist_best', 'uid2_D8_mean',
'card6_div_Mean_D10_DOY_productCD', 'card1_div_Mean_D1_DOY_productCD',
'uid5_D9_std', 'card4_div_Mean_D10_DOY_productCD', 'uid2_D3_mean',
'uid_D6_mean', 'card2_div_Mean_D1_DOY', 'card5_div_Mean_D10_DOY', 'mxC2',
'card2_TransactionAmt_std', 'bank_type_DT_W_week_day_dist',
'card2_TransactionAmt_mean', 'uid4_D10_mean', 'id_31_count_dist',
'TranAmt_div_Mean_D1_DOY', 'uid3_D3_std', 'uid4_D15_std',
'card5_div_Mean_D1_DOY_productCD', 'card4_div_Mean_D10_DOY',
'card5_DT_D_hour_dist_best', 'uid4_D4_std', 'card5_DT_M_month_day_dist',
'bank_type_DT_W', 'addr1__card1', 'bank_type_DT_M_month_day_dist_best',
'card2_div_Mean_D1_DOY_productCD', 'card6_div_Mean_D10_DOY',
'uid2_D5_mean', 'uid_DT_M', 'card2__dist1', 'uid2_D9_mean',
'card5_DT_M_month_day_dist_best', 'TranAmt_div_Mean_D10_DOY_productCD',
'uid4_D11_std', 'uid_D5_mean', 'uid5_D3_std',
'TransactionAmt_DT_D_std_score', 'D8_DT_W_std_score',
'card5_DT_W_week_day_dist', 'uid5_D5_std', 'card3_DT_W_week_day_dist',
'uid4_D9_std', 'D10_intercept', 'uid3_D3_mean', 'uid4_D5_std',
'uid_D5_std', 'card5_div_Mean_D1_DOY', 'uid5_D3_mean', 'bank_type_DT_D',
'uid4_D1_mean', 'uid_D8_mean', 'uid3_D5_mean', 'D15_intercept',
'uid5_TransactionAmt_std', 'uid3_D5_std', 'uid4_D4_mean', 'uid4_D15_mean',
'uid5_D8_mean', 'uid5_D9_mean', 'uid_D9_std', 'uid_D9_mean',
'uid5_D5_mean', 'mtransamt', 'bank_type_DT_D_hour_dist', 'uid4_D11_mean',
'D15_DT_D_std_score', 'TransactionAmt_DT_D_min_max', 'uid4_D2_mean',
'ntrans', 'addr2_div_Mean_D1_DOY', 'uid5_TransactionAmt_mean',
'uid3_D9_std', 'TransactionAmt_Dec', 'uid3_TransactionAmt_std',
'card5_DT_D_hour_dist', 'card1', 'card4_div_Mean_D1_DOY_productCD',
'P_emaildomain__C2', 'card3_div_Mean_D10_DOY', 'uid4_D3_std',
'card3_DT_D_hour_dist_best', 'uid4_D8_mean', 'uid4_D2_std',
'card6_div_Mean_D1_DOY_productCD', 'uid_DT_W', 'Sum_TransAmt_Day',
'uid4_D5_mean', 'card4_div_Mean_D1_DOY',
'card3_div_Mean_D10_DOY_productCD', 'uid3_D8_mean',
'TransactionAmt_userid_median', 'uid4_fq_enc', 'uid3_TransactionAmt_mean',
'uid3_D9_mean', 'card6_div_Mean_D1_DOY', 'Trans_Count_Day', 'mxC1',
'D10_DT_D_std_score', 'card3_div_Mean_D1_DOY',
'TransactionAmt_to_mean_card1', 'card2_fq_enc', 'product_type',
'card3_div_Mean_D1_DOY_productCD', 'TransactionAmt_to_std_card1',
'uid_DT_D', 'uid4_D9_mean', 'D1_intercept', 'card3_DT_D_hour_dist',
'TranAmt_div_Mean_D1_DOY_productCD', 'product_type_DT_M', 'uid4_D3_mean',
'uid4_TransactionAmt_mean', 'uid4_TransactionAmt_std',
'D8_DT_D_std_score', 'Mean_TransAmt_Day', 'minDT', 'product_type_DT_W',
'mintransamt', 'maxtransamt', 'TransactionAmt_userid_std',
'P_emaildomain', 'card1__card5', 'product_type_DT_D', 'mxC13', 'maxDT',
'id_19', 'DeviceInfo', 'id_20', 'addr1', 'userid_min_C1', 'userid_max_C1',
'userid_max_minus_min_C1', 'userid_unique_C1', 'userid_mean_C1',
'userid_min_C2', 'userid_max_C2', 'userid_max_minus_min_C2',
'userid_unique_C2', 'userid_mean_C2', 'userid_min_C3', 'userid_max_C3',
'userid_max_minus_min_C3', 'userid_unique_C3', 'userid_mean_C3',
'userid_min_C4', 'userid_max_C4', 'userid_max_minus_min_C4',
'userid_unique_C4', 'userid_mean_C4', 'userid_min_C5', 'userid_max_C5',
'userid_max_minus_min_C5', 'userid_unique_C5', 'userid_mean_C5',
'userid_min_C6', 'userid_max_C6', 'userid_max_minus_min_C6',
'userid_unique_C6', 'userid_mean_C6', 'userid_min_C7', 'userid_max_C7',
'userid_max_minus_min_C7', 'userid_unique_C7', 'userid_mean_C7',
'userid_min_C8', 'userid_max_C8', 'userid_max_minus_min_C8',
'userid_unique_C8', 'userid_mean_C8', 'userid_min_C9', 'userid_max_C9',
'userid_max_minus_min_C9', 'userid_unique_C9', 'userid_mean_C9',
'userid_min_C10', 'userid_max_C10', 'userid_max_minus_min_C10',
'userid_unique_C10', 'userid_mean_C10', 'userid_min_C11',
'userid_max_C11', 'userid_max_minus_min_C11', 'userid_unique_C11',
'userid_mean_C11', 'userid_min_C12', 'userid_max_C12',
'userid_max_minus_min_C12', 'userid_unique_C12', 'userid_mean_C12',
'userid_min_C13', 'userid_max_C13', 'userid_max_minus_min_C13',
'userid_unique_C13', 'userid_mean_C13', 'userid_min_C14',
'userid_max_C14', 'userid_max_minus_min_C14', 'userid_unique_C14',
'userid_mean_C14', 'hour', 'hour_sin', 'week', 'week_sin', 'week_cos',
'month', 'life_of_customer', 'addr1_broad_area',
'uid6_TransactionAmt_mean', 'uid6_TransactionAmt_std',
'hour_TransactionAmt_mean', 'hour_TransactionAmt_std',
'week_TransactionAmt_mean', 'week_TransactionAmt_std', 'D1_diff',
'D10_diff', 'D15_diff', 'new_identity_M5_mean', 'new_identity_M6_mean',
'new_identity_V315_mean', 'new_identity_D1_diff_mean',
'new_identity_D3_mean', 'new_identity_D10_diff_mean',
'new_identity_D15_diff_mean', 'addr1_addr2_new_identity_M5_mean_mean',
'addr1_addr2_new_identity_M5_mean_std',
'addr1_addr2_new_identity_M6_mean_mean',
'addr1_addr2_new_identity_M6_mean_std',
'addr1_addr2_new_identity_V315_mean_mean',
'addr1_addr2_new_identity_V315_mean_std',
'addr1_addr2_new_identity_D1_diff_mean_mean',
'addr1_addr2_new_identity_D1_diff_mean_std',
'addr1_addr2_new_identity_D10_diff_mean_mean',
'addr1_addr2_new_identity_D10_diff_mean_std',
'addr1_addr2_new_identity_D15_diff_mean_mean',
'addr1_addr2_new_identity_D15_diff_mean_std',
'new_identity_ProductCD_TransactionAmt_mean', 'uid6_C1_mean',
'uid6_C1_std', 'uid6_V54_mean', 'uid6_V54_std', 'uid6_V281_mean',
'uid6_V281_std', 'uid6_C11_mean', 'uid6_C11_std', 'uid6_D4_mean',
'uid6_D4_std', 'uid6_V67_mean', 'uid6_V67_std', 'uid6_V320_mean',
'uid6_V320_std', 'uid6_M5_mean', 'uid6_M5_std', 'uid6_M6_mean',
'uid6_M6_std', 'uid3_V67_mean', 'uid3_V67_std', 'uid3_V83_mean',
'uid3_V83_std', 'uid6_fq_enc', 'card4_fq_enc', 'card6_fq_enc',
'ProductCD_fq_enc', 'M4_fq_enc', 'addr_fq_enc', 'R_emaildomain_V118_mean',
'R_emaildomain_V118_std', 'R_emaildomain_V119_mean',
'R_emaildomain_V119_std', 'card1_V20_mean', 'card1_V20_std',
'card1_V151_mean', 'card1_V151_std', 'card1_V67_mean', 'card1_V67_std',
'hour_V116_mean', 'hour_V116_std', 'V1max', 'V2max', 'V3max', 'V4max',
'V5max', 'V6max', 'V7max', 'V8max', 'V9max', 'V10max', 'V11max', 'V12max',
'V13max', 'V14max', 'V15max', 'V16max', 'V17max', 'V18max', 'V19max',
'V20max', 'V21max', 'V22max', 'V23max', 'V24max', 'V25max', 'V26max',
'V27max', 'V28max', 'V29max', 'V30max', 'V31max', 'V32max', 'V33max',
'V34max', 'V35max', 'V36max', 'V37max', 'V38max', 'V39max', 'V40max',
'V41max', 'V42max', 'V43max', 'V44max', 'V45max', 'V46max', 'V47max',
'V48max', 'V49max', 'V50max', 'V51max', 'V52max', 'V53max', 'V54max',
'V55max', 'V56max', 'V57max', 'V58max', 'V59max', 'V60max', 'V61max',
'V62max', 'V63max', 'V64max', 'V65max', 'V66max', 'V67max', 'V68max',
'V69max', 'V70max', 'V71max', 'V72max', 'V73max', 'V74max', 'V75max',
'V76max', 'V77max', 'V78max', 'V79max', 'V80max', 'V81max', 'V82max',
'V83max', 'V84max', 'V85max', 'V86max', 'V87max', 'V88max', 'V89max',
'V90max', 'V91max', 'V92max', 'V93max', 'V94max', 'V95max', 'V96max',
'V97max', 'V98max', 'V99max', 'V100max', 'V101max', 'V102max', 'V103max',
'V104max', 'V105max', 'V106max', 'V107max', 'V108max', 'V109max',
'V110max', 'V111max', 'V112max', 'V113max', 'V114max', 'V115max',
'V116max', 'V117max', 'V118max', 'V119max', 'V120max', 'V121max',
'V122max', 'V123max', 'V124max', 'V125max', 'V126max', 'V127max',
'V128max', 'V129max', 'V130max', 'V131max', 'V132max', 'V133max',
'V134max', 'V135max', 'V136max', 'V137max', 'V138max', 'V139max',
'V140max', 'V141max', 'V142max', 'V143max', 'V144max', 'V145max',
'V146max', 'V147max', 'V148max', 'V149max', 'V150max', 'V151max',
'V152max', 'V153max', 'V154max', 'V155max', 'V156max', 'V157max',
'V158max', 'V159max', 'V160max', 'V161max', 'V162max', 'V163max',
'V164max', 'V165max', 'V166max', 'V167max', 'V168max', 'V169max',
'V170max', 'V171max', 'V172max', 'V173max', 'V174max', 'V175max',
'V176max', 'V177max', 'V178max', 'V179max', 'V180max', 'V181max',
'V182max', 'V183max', 'V184max', 'V185max', 'V186max', 'V187max',
'V188max', 'V189max', 'V190max', 'V191max', 'V192max', 'V193max',
'V194max', 'V195max', 'V196max', 'V197max', 'V198max', 'V199max',
'V200max', 'V201max', 'V202max', 'V203max', 'V204max', 'V205max',
'V206max', 'V207max', 'V208max', 'V209max', 'V210max', 'V211max',
'V212max', 'V213max', 'V214max', 'V215max', 'V216max', 'V217max',
'V218max', 'V219max', 'V220max', 'V221max', 'V222max', 'V223max',
'V224max', 'V225max', 'V226max', 'V227max', 'V228max', 'V229max',
'V230max', 'V231max', 'V232max', 'V233max', 'V234max', 'V235max',
'V236max', 'V237max', 'V238max', 'V239max', 'V240max', 'V241max',
'V242max', 'V243max', 'V244max', 'V245max', 'V246max', 'V247max',
'V248max', 'V249max', 'V250max', 'V251max', 'V252max', 'V253max',
'V254max', 'V255max', 'V256max', 'V257max', 'V258max', 'V259max',
'V260max', 'V261max', 'V262max', 'V263max', 'V264max', 'V265max',
'V266max', 'V267max', 'V268max', 'V269max', 'V270max', 'V271max',
'V272max', 'V273max', 'V274max', 'V275max', 'V276max', 'V277max',
'V278max', 'V279max', 'V280max', 'V281max', 'V282max', 'V283max',
'V284max', 'V285max', 'V286max', 'V287max', 'V288max', 'V289max',
'V290max', 'V291max', 'V292max', 'V293max', 'V294max', 'V295max',
'V296max', 'V297max', 'V298max', 'V299max', 'V300max', 'V301max',
'V302max', 'V303max', 'V304max', 'V305max', 'V306max', 'V307max',
'V308max', 'V309max', 'V310max', 'V311max', 'V312max', 'V313max',
'V314max', 'V315max', 'V316max', 'V317max', 'V318max', 'V319max',
'V320max', 'V321max', 'V322max', 'V323max', 'V324max', 'V325max',
'V326max', 'V327max', 'V328max', 'V329max', 'V330max', 'V331max',
'V332max', 'V333max', 'V334max', 'V335max', 'V336max', 'V337max',
'V338max', 'V339max', 'ntrans', 'min_amt', 'mean_amt', 'max_amt',
'num_trans_ints', 'minC1', 'minC2', 'minC3', 'minC4', 'minC5', 'minC6',
'minC7', 'minC8', 'minC9', 'minC10', 'minC11', 'minC12', 'minC13',
'minC14', 'maxC1', 'maxC2', 'maxC3', 'maxC4', 'maxC5', 'maxC6', 'maxC7',
'maxC8', 'maxC9', 'maxC10', 'maxC11', 'maxC12', 'maxC13', 'maxC14',
'countC1_inc', 'countC2_inc', 'countC3_inc', 'countC4_inc', 'countC5_inc',
'countC6_inc', 'countC7_inc', 'countC8_inc', 'countC9_inc',
'countC10_inc', 'countC11_inc', 'countC12_inc', 'countC13_inc',
'countC14_inc', 'ndistM1', 'ndistM2', 'ndistM3', 'ndistM4', 'ndistM5',
'ndistM6', 'ndistM7', 'ndistM8', 'ndistM9']
df_av = pd.read_csv('../notebooks/AV/av002-output.csv')
BAD_AV_FEATURES = df_av.loc[df_av['cv'].replace('Running',1) >= AV_THRESHOLD]['feature'].tolist()
FEATURES = [f for f in FEATURES if f not in BAD_AV_FEATURES]
CAT_FEATURES = ['ProductCD', 'card4', 'card6', 'id_12', 'id_13', 'id_14', 'id_15', 'id_16',
'id_17', 'id_18', 'id_19', 'id_20', 'id_21', 'id_22', 'id_23', 'id_24',
'id_25', 'id_26', 'id_27', 'id_28', 'id_29', 'id_32', 'id_34', 'id_35',
'id_36', 'id_37', 'id_38', 'DeviceType', 'DeviceInfo', 'M4','P_emaildomain',
'R_emaildomain', 'addr1', 'addr2', 'M1', 'M2', 'M3', 'M5', 'M6', 'M7', 'M8',
'M9', 'ProductCD_W_95cents','ProductCD_W_00cents','ProductCD_W_50cents',
'ProductCD_W_50_95_0_cents','ProductCD_W_NOT_50_95_0_cents']
CAT_FEATURES = [c for c in CAT_FEATURES if c in FEATURES]
X = train_df[FEATURES].copy()
y = train_df[TARGET].copy()
X_test = test_df[FEATURES].copy()
X = X.fillna(-9999)
X_test = X_test.fillna(-9999)
logger.info('Running with features...')
logger.info(FEATURES)
logger.info(f'Target is {TARGET}')
update_tracking(run_id, "n_features", len(FEATURES), integer=True)
############################
#### TRAIN MODELS FUNCTIONS
############################
def train_catboost(X_train, y_train, X_valid, y_valid, X_test, CAT_FEATURES, fold_n, feature_importance):
train_dataset = Pool(data=X_train, label=y_train, cat_features=CAT_FEATURES)
valid_dataset = Pool(data=X_valid, label=y_valid, cat_features=CAT_FEATURES)
test_dataset = Pool(data=X_test, cat_features=CAT_FEATURES)
model = CatBoostClassifier(
iterations=N_ESTIMATORS,
learning_rate=LEARNING_RATE,
depth=DEPTH,
eval_metric=EVAL_METRIC,
verbose=VERBOSE,
random_state=RANDOM_STATE,
thread_count=N_THREADS,
task_type="GPU")
model.fit(
train_dataset,
eval_set=valid_dataset,
early_stopping_rounds=EARLY_STOPPING_ROUNDS,
)
y_pred_valid = model.predict_proba(valid_dataset)[:,1]
y_pred = model.predict_proba(test_dataset)[:,1]
fold_importance = pd.DataFrame()
fold_importance["feature"] = model.feature_names_
fold_importance["importance"] = model.get_feature_importance()
fold_importance["fold"] = fold_n + 1
feature_importance = pd.concat([feature_importance, fold_importance],
axis=0)
best_iteration = model.best_iteration_
return y_pred, y_pred_valid, feature_importance, best_iteration
lgb_params = {
'objective':'binary',
'boosting_type':'gbdt',
'metric': EVAL_METRIC,
'n_jobs':N_THREADS,
'learning_rate':LEARNING_RATE,
'num_leaves': 2**8,
'max_depth':DEPTH,
'tree_learner':'serial',
'colsample_bytree': 0.85,
'subsample_freq':1,
'subsample':0.85,
'n_estimators':N_ESTIMATORS,
'max_bin':255,
'verbose':-1,
'seed': RANDOM_STATE,
#'early_stopping_rounds':EARLY_STOPPING_ROUNDS,
'reg_alpha':0.3,
'reg_lamdba':0.243,
#'categorical_feature': CAT_FEATURES
}
# lgb_params = {
# 'min_data_in_leaf': 106,
# 'num_leaves': 500,
# 'learning_rate': LEARNING_RATE, #0.008,
# 'min_child_weight': 0.03454472573214212,
# 'bagging_fraction': 0.4181193142567742,
# 'feature_fraction': 0.3797454081646243,
# 'reg_lambda': 0.6485237330340494,
# 'reg_alpha': 0.3899927210061127,
# 'max_depth': DEPTH, #-1,
# 'objective': 'binary',
# 'seed': RANDOM_STATE, #13,
# 'feature_fraction_seed': RANDOM_STATE, #13,
# 'bagging_seed': RANDOM_STATE, #13,
# 'drop_seed': RANDOM_STATE, #13,
# 'data_random_seed': RANDOM_STATE, #13,
# 'boosting_type': 'gbdt',
# 'verbose': 1,
# 'metric':'auc',
# 'n_estimators':N_ESTIMATORS,
# }
def train_lightgbm(X_train, y_train, X_valid, y_valid, X_test, CAT_FEATURES, fold_n, feature_importance):
X_train = X_train.copy()
X_valid = X_valid.copy()
X_test = X_test.copy()
if len(CAT_FEATURES) > 0:
X_train[CAT_FEATURES] = X_train[CAT_FEATURES].astype('category')
X_valid[CAT_FEATURES] = X_valid[CAT_FEATURES].astype('category')
X_test[CAT_FEATURES] = X_test[CAT_FEATURES].astype('category')
model = lgb.LGBMClassifier(**lgb_params)
model.fit(X_train, y_train,
eval_set = [(X_train, y_train),
(X_valid, y_valid)],
verbose = VERBOSE,
early_stopping_rounds=EARLY_STOPPING_ROUNDS)
y_pred_valid = model.predict_proba(X_valid)[:,1]
y_pred = model.predict_proba(X_test)[:,1]
fold_importance = pd.DataFrame()
fold_importance["feature"] = X_train.columns
fold_importance["importance"] = model.feature_importances_
fold_importance["fold"] = fold_n + 1
feature_importance = pd.concat([feature_importance, fold_importance],
axis=0)
best_iteration = model.best_iteration_
return y_pred, y_pred_valid, feature_importance, best_iteration
################################
# Dataframes for storing results
#################################
feature_importance = pd.DataFrame()
oof = np.zeros(len(X))
pred = np.zeros(len(X_test))
oof_df = train_df[['isFraud']].copy()
oof_df['oof'] = np.nan
oof_df['fold'] = np.nan
scores = []
best_iterations = []
del train_df, test_df
gc.collect()
for fold_n, (train_idx, valid_idx) in enumerate(folds.split(X, y)):
X_train = X.iloc[train_idx]
y_train = y.iloc[train_idx]
X_valid = X.iloc[valid_idx]
y_valid = y.iloc[valid_idx]
if MODEL_TYPE == "catboost":
y_pred, y_pred_valid, feature_importance, best_iteration = train_catboost(X_train, y_train, X_valid, y_valid, X_test, CAT_FEATURES, fold_n, feature_importance)
if MODEL_TYPE == 'lightgbm':
y_pred, y_pred_valid, feature_importance, best_iteration = train_lightgbm(X_train, y_train, X_valid, y_valid, X_test, CAT_FEATURES, fold_n, feature_importance)
best_iterations.append(best_iteration)
fold_score = roc_auc_score(y_valid, y_pred_valid)
scores.append(fold_score)
update_tracking(run_id, "AUC_f{}".format(fold_n + 1),
fold_score,
integer=False,)
logger.info('Fold {} of {} CV mean AUC score: {:.4f}. Best iteration {}'.format(fold_n + 1,
N_FOLDS,
fold_score,
best_iteration))
oof_df.iloc[valid_idx, oof_df.columns.get_loc('oof')] = y_pred_valid.reshape(-1)
oof_df.iloc[valid_idx, oof_df.columns.get_loc('fold')] = fold_n + 1
pred += y_pred
update_tracking(run_id, 'avg_best_iteration',
np.mean(best_iterations),
integer=True)
###############
# Store Results
###############
pred /= N_FOLDS
score = np.mean(scores)
sub = pd.read_csv('../input/sample_submission.csv')
sub['isFraud'] = pred
sub.to_csv(f'../sub/sub_{MODEL_NUMBER}_{run_id}_{score:.4f}.csv', index=False)
oof_df.to_csv(f'../oof/oof_{MODEL_NUMBER}_{run_id}_{score:.4f}.csv')
logger.info('CV mean AUC score: {:.4f}, std: {:.4f}.'.format(np.mean(scores),
np.std(scores)))
total_score = roc_auc_score(oof_df['isFraud'], oof_df['oof'])
feature_importance.to_csv(f'../fi/fi_{MODEL_NUMBER}_{run_id}_{score:.4f}.csv')
update_tracking(run_id, "AUC",
total_score,
integer=False,)
logger.info('OOF AUC Score: {:.4f}'.format(total_score))
end = timer()
update_tracking(run_id, "training_time", (end - start), integer=True)
logger.info('Done!')
| []
| []
| [
"TZ"
]
| [] | ["TZ"] | python | 1 | 0 | |
mdb/server_info_test.go | // Copyright 2018 Kuei-chun Chen. All rights reserved.
package mdb
import (
"context"
"encoding/json"
"os"
"testing"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
var UnitTestURL = "mongodb://localhost/?replicaSet=replset"
func getMongoClient() *mongo.Client {
var err error
var client *mongo.Client
if os.Getenv("DATABASE_URL") != "" {
UnitTestURL = os.Getenv("DATABASE_URL")
}
if client, err = mongo.Connect(context.Background(), options.Client().ApplyURI(UnitTestURL)); err != nil {
panic(err)
}
return client
}
func TestGetServerInfo(t *testing.T) {
var err error
var client *mongo.Client
var info ServerInfo
client = getMongoClient()
defer client.Disconnect(context.Background())
if info, err = GetServerInfo(client); err != nil {
t.Fatal(err)
}
if err != nil {
t.Fatal(err)
}
bytes, _ := json.MarshalIndent(info, "", " ")
t.Log(string(bytes))
}
func TestListDatabaseNames(t *testing.T) {
client := getMongoClient()
defer client.Disconnect(context.Background())
_, err := ListDatabaseNames(client)
if err != nil {
t.Fatal(err)
}
}
| [
"\"DATABASE_URL\"",
"\"DATABASE_URL\""
]
| []
| [
"DATABASE_URL"
]
| [] | ["DATABASE_URL"] | go | 1 | 0 | |
flagging_site/data/_store/refresh.py | """The data store contains offline versions of the data so that you can run a
demo version of the website without the vault keys, or simply develop parts of
the website that don't require actively updated data without having to worry.
This data is used for the actual website when the `USE_MOCK_DATA` config
variable is True. It is useful for dev, but it should never be used in
production.
This file is a CLI to refresh the data store. You can run it with:
`python flagging_site/data/_store/refresh.py`
"""
import os
import sys
from typing import Optional
import click
DATA_STORE_PATH = os.path.dirname(__file__)
@click.command()
@click.option('--vault_password',
prompt=True,
default=lambda: os.environ.get('VAULT_PASSWORD', None))
def refresh_data_store(vault_password: Optional[str] = None) -> None:
"""When run, this function runs all the functions that compose the data
store. The app itself should not be running this function; in fact, this
function will raise an error if the app is turned on. This should only be
run from the command line or a Python console.
"""
os.environ['USE_MOCK_DATA'] = 'false'
if vault_password:
os.environ['VAULT_PASSWORD'] = vault_password
from flask import current_app
if current_app:
raise Exception('The app should not be running when the data store is '
'being refreshed.')
from flagging_site.data.hobolink import get_live_hobolink_data
from flagging_site.data.hobolink import HOBOLINK_STATIC_FILE_NAME
get_live_hobolink_data('code_for_boston_export_21d')\
.to_pickle(os.path.join(DATA_STORE_PATH, HOBOLINK_STATIC_FILE_NAME))
from flagging_site.data.usgs import get_live_usgs_data
from flagging_site.data.usgs import USGS_STATIC_FILE_NAME
get_live_usgs_data()\
.to_pickle(os.path.join(DATA_STORE_PATH, USGS_STATIC_FILE_NAME))
if __name__ == '__main__':
sys.path.append('.')
refresh_data_store()
| []
| []
| [
"USE_MOCK_DATA",
"VAULT_PASSWORD"
]
| [] | ["USE_MOCK_DATA", "VAULT_PASSWORD"] | python | 2 | 0 | |
tcex/app_config/tcex_json_update.py | """TcEx JSON Update"""
# standard library
import os
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING: # pragma: no cover
from .tcex_json import TcexJson
class TcexJsonUpdate:
"""Update install.json file with current standards and schema."""
def __init__(self, tj: 'TcexJson') -> None: # pylint: disable=E0601
"""Initialize class properties."""
self.tj = tj
def multiple(self, template: Optional[str] = None) -> None:
"""Update the contents of the tcex.json file."""
# update app_name
self.update_package_app_name()
# update deprecated fields
# self.update_deprecated_fields()
# update package excludes
self.update_package_excludes()
# update package excludes
self.update_lib_versions()
# update template
if template is not None:
self.tj.template = template
# write updated profile
self.tj.write()
def update_package_app_name(self) -> None:
"""Update the package app_name in the tcex.json file."""
if (
self.tj.model.package.app_name is None
or self.tj.model.package.app_name in self.tj.ij.app_prefixes.values()
):
# lower case name and replace prefix if already exists
_app_name = (
os.path.basename(os.getcwd()).lower().replace(self.tj.ij.app_prefix.lower(), '')
)
# replace spaces and dashes with underscores
_app_name = _app_name.replace(' ', '_').replace('-', '_').lower()
# title case app name
_app_name = '_'.join([a.title() for a in _app_name.split('_')])
# prepend appropriate App prefix (e.g., TCPB_-_)
_app_name = f'{self.tj.ij.app_prefix}{_app_name}'
# update App name
self.tj.model.package.app_name = _app_name
# def update_deprecated_fields(self) -> None:
# """Update deprecated fields in the tcex.json file."""
# deprecated_fields = ['profile_include_dirs']
# for d in deprecated_fields:
# setattr(self.tj.model, d, None)
def update_package_excludes(self) -> None:
"""Update the excludes values in the tcex.json file."""
for i in [
'.gitignore',
'.pre-commit-config.yaml',
'local-*',
'pyproject.toml',
'setup.cfg',
'tcex.json',
]:
if i not in self.tj.model.package.excludes:
# TODO: [low] pydantic doesn't seem to allow removing items from list???
self.tj.model.package.excludes.append(i)
def update_lib_versions(self) -> None:
"""Update the lib_versions array in the tcex.json file."""
if os.getenv('TCEX_LIB_VERSIONS') and not self.tj.model.lib_versions:
_lib_versions = []
for version in os.getenv('TCEX_LIB_VERSIONS').split(','):
_lib_versions.append(
{
'lib_dir': f'lib_${{env:{version}}}',
'python_executable': f'~/.pyenv/versions/${{env:{version}}}/bin/python',
}
)
self.tj.model.lib_versions = _lib_versions
| []
| []
| [
"TCEX_LIB_VERSIONS"
]
| [] | ["TCEX_LIB_VERSIONS"] | python | 1 | 0 | |
mvt/feature.go | package mvt
import (
"context"
"encoding/json"
"fmt"
"os"
"github.com/go-spatial/tegola"
"github.com/go-spatial/tegola/basic"
"github.com/go-spatial/tegola/geom/encoding/wkt"
"github.com/go-spatial/tegola/internal/convert"
"github.com/go-spatial/tegola/internal/log"
"github.com/go-spatial/tegola/maths"
"github.com/go-spatial/tegola/maths/points"
"github.com/go-spatial/tegola/maths/validate"
"github.com/go-spatial/tegola/mvt/vector_tile"
)
// errors
var (
ErrNilFeature = fmt.Errorf("Feature is nil")
// ErrUnknownGeometryType is the error retuned when the geometry is unknown.
ErrUnknownGeometryType = fmt.Errorf("Unknown geometry type")
ErrNilGeometryType = fmt.Errorf("Nil geometry passed")
)
// TODO: Need to put in validation for the Geometry, at current the system
// does not check to make sure that the geometry is following the rules as
// laid out by the spec. It just assumes the user is good.
// Feature describes a feature of a Layer. A layer will contain multiple features
// each of which has a geometry describing the interesting thing, and the metadata
// associated with it.
type Feature struct {
ID *uint64
Tags map[string]interface{}
// Does not support the collection geometry, for this you have to create a feature for each
// geometry in the collection.
Geometry tegola.Geometry
// Unsimplifed weather the Geometry is simple already and thus does not need to be simplified.
Unsimplifed *bool
}
func wktEncode(g tegola.Geometry) string {
gg, err := convert.ToGeom(g)
if err != nil {
return fmt.Sprintf("error converting tegola geom to geom geom, %v", err)
}
s, err := wkt.Encode(gg)
if err != nil {
return fmt.Sprintf("encoding error for geom geom, %v", err)
}
return s
}
func (f Feature) String() string {
g := wktEncode(f.Geometry)
if f.ID != nil {
return fmt.Sprintf("{Feature: %v, GEO: %v, Tags: %+v}", *f.ID, g, f.Tags)
}
return fmt.Sprintf("{Feature: GEO: %v, Tags: %+v}", g, f.Tags)
}
//NewFeatures returns one or more features for the given Geometry
// TODO: Should we consider supporting validation of polygons and multiple polygons here?
func NewFeatures(geo tegola.Geometry, tags map[string]interface{}) (f []Feature) {
if geo == nil {
return f // return empty feature set for a nil geometry
}
if g, ok := geo.(tegola.Collection); ok {
geos := g.Geometries()
for i := range geos {
f = append(f, NewFeatures(geos[i], tags)...)
}
return f
}
f = append(f, Feature{
Tags: tags,
Geometry: geo,
})
return f
}
// VTileFeature will return a vectorTile.Feature that would represent the Feature
func (f *Feature) VTileFeature(ctx context.Context, keys []string, vals []interface{}, tile *tegola.Tile, simplify bool) (tf *vectorTile.Tile_Feature, err error) {
tf = new(vectorTile.Tile_Feature)
tf.Id = f.ID
if tf.Tags, err = keyvalTagsMap(keys, vals, f); err != nil {
return tf, err
}
geo, gtype, err := encodeGeometry(ctx, f.Geometry, tile, simplify)
if err != nil {
return tf, err
}
if len(geo) == 0 {
return nil, nil
}
tf.Geometry = geo
tf.Type = >ype
return tf, nil
}
// These values came from: https://github.com/mapbox/vector-tile-spec/tree/master/2.1
const (
cmdMoveTo uint32 = 1
cmdLineTo uint32 = 2
cmdClosePath uint32 = 7
maxCmdCount uint32 = 0x1FFFFFFF
)
type Command uint32
func NewCommand(cmd uint32, count int) Command {
return Command((cmd & 0x7) | (uint32(count) << 3))
}
func (c Command) ID() uint32 {
return uint32(c) & 0x7
}
func (c Command) Count() int {
return int(uint32(c) >> 3)
}
func (c Command) String() string {
switch c.ID() {
case cmdMoveTo:
return fmt.Sprintf("move Command with count %v", c.Count())
case cmdLineTo:
return fmt.Sprintf("line To command with count %v", c.Count())
case cmdClosePath:
return fmt.Sprintf("close path command with count %v", c.Count())
default:
return fmt.Sprintf("unknown command (%v) with count %v", c.ID(), c.Count())
}
}
// encodeZigZag does the ZigZag encoding for small ints.
func encodeZigZag(i int64) uint32 {
return uint32((i << 1) ^ (i >> 31))
}
// cursor reprsents the current position, this is needed to encode the geometry.
// 0,0 is the origin, it which is the top-left most part of the tile.
type cursor struct {
// The coordinates — these should be int64, when they were float64 they
// introduced a slight drift in the coordinates.
x int64
y int64
// The tile of the screen.
tile *tegola.Tile
// Disabling scaling Use this when using clipping and scaling
DisableScaling bool
}
func NewCursor(tile *tegola.Tile) *cursor {
return &cursor{
tile: tile,
}
}
// GetDeltaPointAndUpdate assumes the Point is in WebMercator.
func (c *cursor) GetDeltaPointAndUpdate(p tegola.Point) (dx, dy int64) {
var ix, iy int64
var tx, ty = p.X(), p.Y()
// TODO: gdey — We should get rid of this, as we generally disable scaling; now.
if !c.DisableScaling {
tpt, err := c.tile.ToPixel(tegola.WebMercator, [2]float64{tx, ty})
if err != nil {
// Conversion error most likly, need to panic.
panic(err)
}
tx, ty = tpt[0], tpt[1]
}
ix, iy = int64(tx), int64(ty)
// compute our point delta
dx = ix - int64(c.x)
dy = iy - int64(c.y)
// update our cursor
c.x = ix
c.y = iy
return dx, dy
}
func (c *cursor) scalept(g tegola.Point) basic.Point {
pt, err := c.tile.ToPixel(tegola.WebMercator, [2]float64{g.X(), g.Y()})
if err != nil {
panic(err)
}
return basic.Point{pt[0], pt[1]}
}
func chk3Pts(pt1, pt2, pt3 basic.Point) int {
// If the first and third points are equal we only care about
// the first point.
if tegola.IsPointEqual(pt1, pt3) {
return 1
}
if tegola.IsPointEqual(pt1, pt2) || tegola.IsPointEqual(pt2, pt3) {
return 2
}
return 3
}
func cleanLine(ols basic.Line) (newline basic.Line) {
ls := ols
loop := 0
Restart:
count := 0
//log.Println("Line:", ls.GoString())
if len(ls) < 3 {
for i := range ls {
newline = append(newline, ls[i])
}
return newline
}
for i := 0; i < len(ls); i = i + 1 {
//log.Println(len(ls), "I:", i)
j, k := i+1, i+2
switch {
case i == len(ls)-2:
k = 0
case i == len(ls)-1:
j, k = 0, 1
}
// Always add the first point.
addFirstPt := true
skip := 3 - chk3Pts(ls[i], ls[j], ls[k])
//log.Println("Skip returned: ", skip, "I:", i)
switch {
case (k == 0 || k == 1) && skip == 2:
addFirstPt = false
case k == 1 && skip == 1:
// remove the first point from newline
newline = newline[1:]
case skip == 0:
count++
}
if addFirstPt {
newline = append(newline, ls[i])
}
i += skip
//log.Println(len(ls), "EI:", i)
}
//log.Println("Out of loop")
if len(ls) != count {
ls = newline
newline = basic.Line{}
loop++
if loop > 100 {
panic(fmt.Sprintf("infi (%v:%v)?\n%v\n%v", len(ls), count, ols.GoString(), ls.GoString()))
}
goto Restart
}
return newline
}
func simplifyLineString(g tegola.LineString, tolerance float64) basic.Line {
line := basic.CloneLine(g)
if len(line) <= 4 || maths.DistOfLine(g) < tolerance {
return line
}
pts := line.AsPts()
pts = maths.DouglasPeucker(pts, tolerance, true)
if len(pts) == 0 {
return nil
}
return basic.NewLineTruncatedFromPt(pts...)
}
func normalizePoints(pts []maths.Pt) (pnts []maths.Pt) {
if pts[0] == pts[len(pts)-1] {
pts = pts[1:]
}
if len(pts) <= 4 {
return pts
}
lpt := 0
pnts = append(pnts, pts[0])
for i := 1; i < len(pts); i++ {
ni := i + 1
if ni >= len(pts) {
ni = 0
}
m1, _, sdef1 := points.SlopeIntercept(pts[lpt], pts[i])
m2, _, sdef2 := points.SlopeIntercept(pts[lpt], pts[ni])
if m1 != m2 || sdef1 != sdef2 {
pnts = append(pnts, pts[i])
}
}
return pnts
}
func simplifyPolygon(g tegola.Polygon, tolerance float64, simplify bool) basic.Polygon {
lines := g.Sublines()
if len(lines) <= 0 {
return nil
}
var poly basic.Polygon
sqTolerance := tolerance * tolerance
// First lets look the first line, then we will simplify the other lines.
for i := range lines {
area := maths.AreaOfPolygonLineString(lines[i])
l := basic.CloneLine(lines[i])
if area < sqTolerance {
if i == 0 {
return basic.ClonePolygon(g)
}
// don't simplify the internal line
poly = append(poly, l)
continue
}
pts := l.AsPts()
if len(pts) <= 2 {
if i == 0 {
return nil
}
continue
}
pts = normalizePoints(pts)
// If the last point is the same as the first, remove the first point.
if len(pts) <= 4 {
if i == 0 {
return basic.ClonePolygon(g)
}
poly = append(poly, l)
continue
}
pts = maths.DouglasPeucker(pts, sqTolerance, simplify)
if len(pts) <= 2 {
if i == 0 {
return nil
}
//log.Println("\t Skipping polygon subline.")
continue
}
poly = append(poly, basic.NewLineTruncatedFromPt(pts...))
}
if len(poly) == 0 {
return nil
}
return poly
}
func SimplifyGeometry(g tegola.Geometry, tolerance float64, simplify bool) tegola.Geometry {
if !simplify || g == nil {
return g
}
switch gg := g.(type) {
case tegola.Polygon:
return simplifyPolygon(gg, tolerance, simplify)
case tegola.MultiPolygon:
var newMP basic.MultiPolygon
for _, p := range gg.Polygons() {
sp := simplifyPolygon(p, tolerance, simplify)
if sp == nil {
continue
}
newMP = append(newMP, sp)
}
if len(newMP) == 0 {
return nil
}
return newMP
case tegola.LineString:
return simplifyLineString(gg, tolerance)
case tegola.MultiLine:
var newML basic.MultiLine
for _, l := range gg.Lines() {
sl := simplifyLineString(l, tolerance)
if sl == nil {
continue
}
newML = append(newML, sl)
}
if len(newML) == 0 {
return nil
}
return newML
}
return g
}
func (c *cursor) scalelinestr(g tegola.LineString) (ls basic.Line) {
pts := g.Subpoints()
// If the linestring
if len(pts) < 2 {
// Not enought points to make a line.
return nil
}
ls = make(basic.Line, 0, len(pts))
ls = append(ls, c.scalept(pts[0]))
lidx := len(ls) - 1
for i := 1; i < len(pts); i++ {
npt := c.scalept(pts[i])
if tegola.IsPointEqual(ls[lidx], npt) {
// drop any duplicate points.
continue
}
ls = append(ls, npt)
lidx = len(ls) - 1
}
if len(ls) < 2 {
// Not enough points. the zoom must be too far out for this ring.
return nil
}
return ls
}
func (c *cursor) scalePolygon(g tegola.Polygon) (p basic.Polygon) {
lines := g.Sublines()
p = make(basic.Polygon, 0, len(lines))
if len(lines) == 0 {
return p
}
for i := range lines {
ln := c.scalelinestr(lines[i])
if len(ln) < 2 {
if debug {
// skip lines that have been reduced to less then 2 points.
log.Debug("skipping line 2", lines[i], len(ln))
}
continue
}
p = append(p, ln)
}
return p
}
func (c *cursor) ScaleGeo(geo tegola.Geometry) basic.Geometry {
switch g := geo.(type) {
case tegola.Point:
return c.scalept(g)
case tegola.Point3:
return c.scalept(g)
case tegola.MultiPoint:
pts := g.Points()
if len(pts) == 0 {
return nil
}
var ptmap = make(map[basic.Point]struct{})
var mp = make(basic.MultiPoint, 0, len(pts))
mp = append(mp, c.scalept(pts[0]))
ptmap[mp[0]] = struct{}{}
for i := 1; i < len(pts); i++ {
npt := c.scalept(pts[i])
if _, ok := ptmap[npt]; ok {
// Skip duplicate points.
continue
}
ptmap[npt] = struct{}{}
mp = append(mp, npt)
}
return mp
case tegola.LineString:
return c.scalelinestr(g)
case tegola.MultiLine:
var ml basic.MultiLine
for _, l := range g.Lines() {
nl := c.scalelinestr(l)
if len(nl) > 0 {
ml = append(ml, nl)
}
}
return ml
case tegola.Polygon:
return c.scalePolygon(g)
case tegola.MultiPolygon:
var mp basic.MultiPolygon
for _, p := range g.Polygons() {
np := c.scalePolygon(p)
if len(np) > 0 {
mp = append(mp, np)
}
}
return mp
}
return basic.G{}
}
type geoDebugStruct struct {
Min maths.Pt `json:"min"`
Max maths.Pt `json:"max"`
Geo basic.Geometry `json:"geo"`
}
func createDebugFile(min, max maths.Pt, geo tegola.Geometry, err error) {
fln := os.Getenv("GenTestCase")
if fln == "" {
return
}
filename := fmt.Sprintf("/tmp/testcase_%v_%p.json", fln, geo)
bgeo, err := basic.CloneGeometry(geo)
if err != nil {
log.Errorf("failed to clone geo for test case. %v", err)
return
}
f, err := os.Create(filename)
if err != nil {
log.Errorf("failed to create test file %v : %v.", filename, err)
return
}
defer f.Close()
geodebug := geoDebugStruct{
Max: max,
Min: min,
Geo: bgeo,
}
enc := json.NewEncoder(f)
enc.Encode(geodebug)
log.Infof("created file: %v", filename)
}
func (c *cursor) encodeCmd(cmd uint32, points []tegola.Point) []uint32 {
if len(points) == 0 {
return []uint32{}
}
// new slice to hold our encode bytes. 2 bytes for each point pluse a command byte.
g := make([]uint32, 0, (2*len(points))+1)
// add the command integer
g = append(g, cmd)
// range through our points
for _, p := range points {
dx, dy := c.GetDeltaPointAndUpdate(p)
// encode our delta point
g = append(g, encodeZigZag(dx), encodeZigZag(dy))
}
return g
}
func (c *cursor) MoveTo(points ...tegola.Point) []uint32 {
return c.encodeCmd(uint32(NewCommand(cmdMoveTo, len(points))), points)
}
func (c *cursor) LineTo(points ...tegola.Point) []uint32 {
return c.encodeCmd(uint32(NewCommand(cmdLineTo, len(points))), points)
}
func (c *cursor) ClosePath() uint32 {
return uint32(NewCommand(cmdClosePath, 1))
}
// encodeGeometry will take a tegola.Geometry type and encode it according to the
// mapbox vector_tile spec.
func encodeGeometry(ctx context.Context, geom tegola.Geometry, tile *tegola.Tile, simplify bool) (g []uint32, vtyp vectorTile.Tile_GeomType, err error) {
if geom == nil {
return nil, vectorTile.Tile_UNKNOWN, ErrNilGeometryType
}
// new cursor
c := NewCursor(tile)
// We are scaling separately, no need to scale in cursor.
c.DisableScaling = true
// Project Geom
// TODO: gdey: We need to separate out the transform, simplification, and clipping from the encoding process. #224
geo := c.ScaleGeo(geom)
sg := SimplifyGeometry(geo, tile.ZEpislon(), simplify)
pbb, err := tile.PixelBufferedBounds()
if err != nil {
return nil, vectorTile.Tile_UNKNOWN, err
}
ext := points.Extent(pbb)
geom, err = validate.CleanGeometry(ctx, sg, &ext)
if err != nil {
return nil, vectorTile.Tile_UNKNOWN, err
}
if geom == nil {
return []uint32{}, -1, nil
}
switch t := geom.(type) {
case tegola.Point:
g = append(g, c.MoveTo(t)...)
return g, vectorTile.Tile_POINT, nil
case tegola.Point3:
g = append(g, c.MoveTo(t)...)
return g, vectorTile.Tile_POINT, nil
case tegola.MultiPoint:
g = append(g, c.MoveTo(t.Points()...)...)
return g, vectorTile.Tile_POINT, nil
case tegola.LineString:
points := t.Subpoints()
g = append(g, c.MoveTo(points[0])...)
g = append(g, c.LineTo(points[1:]...)...)
return g, vectorTile.Tile_LINESTRING, nil
case tegola.MultiLine:
lines := t.Lines()
for _, l := range lines {
points := l.Subpoints()
g = append(g, c.MoveTo(points[0])...)
g = append(g, c.LineTo(points[1:]...)...)
}
return g, vectorTile.Tile_LINESTRING, nil
case tegola.Polygon:
// TODO: Right now c.ScaleGeo() never returns a Polygon, so this is dead code.
lines := t.Sublines()
for _, l := range lines {
points := l.Subpoints()
g = append(g, c.MoveTo(points[0])...)
g = append(g, c.LineTo(points[1:]...)...)
g = append(g, c.ClosePath())
}
return g, vectorTile.Tile_POLYGON, nil
case tegola.MultiPolygon:
polygons := t.Polygons()
for _, p := range polygons {
lines := p.Sublines()
for _, l := range lines {
points := l.Subpoints()
g = append(g, c.MoveTo(points[0])...)
g = append(g, c.LineTo(points[1:]...)...)
g = append(g, c.ClosePath())
}
}
return g, vectorTile.Tile_POLYGON, nil
default:
return nil, vectorTile.Tile_UNKNOWN, ErrUnknownGeometryType
}
}
// keyvalMapsFromFeatures returns a key map and value map, to help with the translation
// to mapbox tile format. In the Tile format, the Tile contains a mapping of all the unique
// keys and values, and then each feature contains a vector map to these two. This is an
// intermediate data structure to help with the construction of the three mappings.
func keyvalMapsFromFeatures(features []Feature) (keyMap []string, valMap []interface{}, err error) {
var didFind bool
for _, f := range features {
for k, v := range f.Tags {
didFind = false
for _, mk := range keyMap {
if k == mk {
didFind = true
break
}
}
if !didFind {
keyMap = append(keyMap, k)
}
didFind = false
switch vt := v.(type) {
default:
if vt == nil {
// ignore nil types
continue
}
return keyMap, valMap, fmt.Errorf("unsupported type for value(%v) with key(%v) in tags for feature %v.", vt, k, f)
case string:
for _, mv := range valMap {
tmv, ok := mv.(string)
if !ok {
continue
}
if tmv == vt {
didFind = true
break
}
}
case fmt.Stringer:
for _, mv := range valMap {
tmv, ok := mv.(fmt.Stringer)
if !ok {
continue
}
if tmv.String() == vt.String() {
didFind = true
break
}
}
case int:
for _, mv := range valMap {
tmv, ok := mv.(int)
if !ok {
continue
}
if tmv == vt {
didFind = true
break
}
}
case int8:
for _, mv := range valMap {
tmv, ok := mv.(int8)
if !ok {
continue
}
if tmv == vt {
didFind = true
break
}
}
case int16:
for _, mv := range valMap {
tmv, ok := mv.(int16)
if !ok {
continue
}
if tmv == vt {
didFind = true
break
}
}
case int32:
for _, mv := range valMap {
tmv, ok := mv.(int32)
if !ok {
continue
}
if tmv == vt {
didFind = true
break
}
}
case int64:
for _, mv := range valMap {
tmv, ok := mv.(int64)
if !ok {
continue
}
if tmv == vt {
didFind = true
break
}
}
case uint:
for _, mv := range valMap {
tmv, ok := mv.(uint)
if !ok {
continue
}
if tmv == vt {
didFind = true
break
}
}
case uint8:
for _, mv := range valMap {
tmv, ok := mv.(uint8)
if !ok {
continue
}
if tmv == vt {
didFind = true
break
}
}
case uint16:
for _, mv := range valMap {
tmv, ok := mv.(uint16)
if !ok {
continue
}
if tmv == vt {
didFind = true
break
}
}
case uint32:
for _, mv := range valMap {
tmv, ok := mv.(uint32)
if !ok {
continue
}
if tmv == vt {
didFind = true
break
}
}
case uint64:
for _, mv := range valMap {
tmv, ok := mv.(uint64)
if !ok {
continue
}
if tmv == vt {
didFind = true
break
}
}
case float32:
for _, mv := range valMap {
tmv, ok := mv.(float32)
if !ok {
continue
}
if tmv == vt {
didFind = true
break
}
}
case float64:
for _, mv := range valMap {
tmv, ok := mv.(float64)
if !ok {
continue
}
if tmv == vt {
didFind = true
break
}
}
case bool:
for _, mv := range valMap {
tmv, ok := mv.(bool)
if !ok {
continue
}
if tmv == vt {
didFind = true
break
}
}
} // value type switch
if !didFind {
valMap = append(valMap, v)
}
} // For f.Tags
} // for features
return keyMap, valMap, nil
}
// keyvalTagsMap will return the tags map as expected by the mapbox tile spec. It takes
// a keyMap and a valueMap that list the the order of the expected keys and values. It will
// return a vector map that refers to these two maps.
func keyvalTagsMap(keyMap []string, valueMap []interface{}, f *Feature) (tags []uint32, err error) {
if f == nil {
return nil, ErrNilFeature
}
var kidx, vidx int64
for key, val := range f.Tags {
kidx, vidx = -1, -1 // Set to known not found value.
for i, k := range keyMap {
if k != key {
continue // move to the next key
}
kidx = int64(i)
break // we found a match
}
if kidx == -1 {
log.Errorf("did not find key (%v) in keymap.", key)
return tags, fmt.Errorf("did not find key (%v) in keymap.", key)
}
// if val is nil we skip it for now
// https://github.com/mapbox/vector-tile-spec/issues/62
if val == nil {
continue
}
for i, v := range valueMap {
switch tv := val.(type) {
default:
return tags, fmt.Errorf("value (%[1]v) of type (%[1]T) for key (%[2]v) is not supported.", tv, key)
case string:
vmt, ok := v.(string) // Make sure the type of the Value map matches the type of the Tag's value
if !ok || vmt != tv { // and that the values match
continue // if they don't match move to the next value.
}
case fmt.Stringer:
vmt, ok := v.(fmt.Stringer)
if !ok || vmt.String() != tv.String() {
continue
}
case int:
vmt, ok := v.(int)
if !ok || vmt != tv {
continue
}
case int8:
vmt, ok := v.(int8)
if !ok || vmt != tv {
continue
}
case int16:
vmt, ok := v.(int16)
if !ok || vmt != tv {
continue
}
case int32:
vmt, ok := v.(int32)
if !ok || vmt != tv {
continue
}
case int64:
vmt, ok := v.(int64)
if !ok || vmt != tv {
continue
}
case uint:
vmt, ok := v.(uint)
if !ok || vmt != tv {
continue
}
case uint8:
vmt, ok := v.(uint8)
if !ok || vmt != tv {
continue
}
case uint16:
vmt, ok := v.(uint16)
if !ok || vmt != tv {
continue
}
case uint32:
vmt, ok := v.(uint32)
if !ok || vmt != tv {
continue
}
case uint64:
vmt, ok := v.(uint64)
if !ok || vmt != tv {
continue
}
case float32:
vmt, ok := v.(float32)
if !ok || vmt != tv {
continue
}
case float64:
vmt, ok := v.(float64)
if !ok || vmt != tv {
continue
}
case bool:
vmt, ok := v.(bool)
if !ok || vmt != tv {
continue
}
} // Values Switch Statement
// if the values match let's record the index.
vidx = int64(i)
break // we found our value no need to continue on.
} // range on value
if vidx == -1 { // None of the values matched.
return tags, fmt.Errorf("did not find a value: %v in valuemap.", val)
}
tags = append(tags, uint32(kidx), uint32(vidx))
} // Move to the next tag key and value.
return tags, nil
}
| [
"\"GenTestCase\""
]
| []
| [
"GenTestCase"
]
| [] | ["GenTestCase"] | go | 1 | 0 | |
conf/settings.go | package conf
import (
"fmt"
"os"
)
// ProjectRoot base directory
var ProjectRoot = fmt.Sprintf("%s/src/github.com/deeper-x/goship", os.Getenv("GOPATH"))
// EnvFile .env location
var EnvFile = fmt.Sprintf("%s/.env", ProjectRoot)
// PRegisterSQL register dot file
var PRegisterSQL = fmt.Sprintf("%s/qsql/register.sql", ProjectRoot)
// PLiveSQL live data dot file
var PLiveSQL = fmt.Sprintf("%s/qsql/realtime.sql", ProjectRoot)
// PMeteoSQL meteo data dot file
var PMeteoSQL = fmt.Sprintf("%s/qsql/meteo.sql", ProjectRoot)
| [
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
anontunnel-test/main.py | import kivy
kivy.require('1.0.9')
from kivy.lang import Builder
from kivy.uix.gridlayout import GridLayout
from kivy.properties import NumericProperty
from kivy.app import App
from time import sleep
import threading
import signal
# import tribler
import sys
import os
try:
import android
os.environ["PYTHON_EGG_CACHE"] = "/data/data/org.tribler.at3.anontunneltest/cache"
os.environ['TRIBLER_STATE_DIR'] = "/sdcard/org.tribler.at3.anontunneltest/.tribler"
except ImportError:
sys.path.append('/home/martijn/Documenten/rolf-tribler')
os.environ['TRIBLER_STATE_DIR'] = "/home/martijn/Documenten/.Tribler"
from Tribler.community.anontunnel.community import ProxySettings
from Tribler.community.anontunnel.atunnel import AnonTunnel
socks5_port = None
proxy_settings = ProxySettings()
crawl = False
anon_tunnel = AnonTunnel(socks5_port, proxy_settings, crawl)
anon_tunnel.run()
def check_status():
if anon_tunnel.community and anon_tunnel.community.libtorrenttest.download_finished_at:
print 'TEST SUCCESSFUL'
os.kill(os.getpid(), signal.SIGINT)
threading.Timer(5, check_status).start();
Builder.load_string('''
<AnontunnelStressScreen>:
cols: 1
Label:
text: 'Running ANON download test...'
''')
class AnontunnelStressScreen(GridLayout):
check_status()
class AnontunnelStressApp(App):
def build(self):
return AnontunnelStressScreen()
if __name__ == '__main__':
AnontunnelStressApp().run()
| []
| []
| [
"PYTHON_EGG_CACHE",
"TRIBLER_STATE_DIR"
]
| [] | ["PYTHON_EGG_CACHE", "TRIBLER_STATE_DIR"] | python | 2 | 0 | |
cmd/build.go | package cmd
import (
"bytes"
"context"
"github.com/pkg/errors"
"github.com/viant/afs/file"
"github.com/viant/afs/mem"
foption "github.com/viant/afs/option"
"github.com/viant/afs/url"
"github.com/viant/bqtail/base"
"github.com/viant/bqtail/cmd/rule/build"
"github.com/viant/bqtail/service/bq"
"github.com/viant/bqtail/shared"
"github.com/viant/bqtail/tail/config"
"github.com/viant/bqtail/task"
"gopkg.in/yaml.v2"
"os"
"path"
)
func (s *service) Build(ctx context.Context, request *build.Request) error {
request.Init(s.config)
if request.RuleURL == "" {
request.RuleURL = url.Join(ruleBaseURL, "rule.yaml")
}
rule := &config.Rule{
Async: true,
Dest: &config.Destination{
Transient: &config.Transient{},
},
OnSuccess: make([]*task.Action, 0),
OnFailure: make([]*task.Action, 0),
Info: base.Info{
URL: request.RuleURL,
LeadEngineer: os.Getenv("USER"),
},
}
rule.OnSuccess = append(rule.OnSuccess, &task.Action{Action: shared.ActionDelete})
if err := s.initSourceMatch(ctx, rule, request); err != nil {
return err
}
s.initDestination(rule, request)
s.initdBatch(request, rule)
if !(request.SourceURL != "" || request.Validate) {
s.reportRule(rule)
return nil
}
ruleMap := ruleToMap(rule)
ruleYAML, err := yaml.Marshal(ruleMap)
if err != nil {
return err
}
if mem.Scheme == url.Scheme(rule.Info.URL, "") {
err = s.fs.Upload(ctx, rule.Info.URL, file.DefaultFileOsMode, bytes.NewReader(ruleYAML))
}
return err
}
func (s *service) initdBatch(request *build.Request, rule *config.Rule) {
if request.Window > 0 {
if rule.Batch == nil {
rule.Batch = &config.Batch{Window: &config.Window{}}
}
rule.Batch.Window.DurationInSec = request.Window
rule.Batch.Window.Init()
}
}
func (s *service) buildQueryAction(query, destTable string) (*task.Action, error) {
if destTable == "" {
destTable = "mydataset.mytable"
}
destReference, err := base.NewTableReference(destTable)
if err != nil {
return nil, errors.Wrapf(err, "invalid query dest table: %v", destTable)
}
return bq.NewQueryAction(query, destReference, "", true, nil), nil
}
func (s *service) initDestination(rule *config.Rule, request *build.Request) {
rule.Dest.Table = request.Destination
rule.Dest.Schema.Template = request.Template
rule.Dest.Transient.Dataset = "temp"
rule.Dest.Transient.ProjectID = request.ProjectID
rule.Dest.Transient.Template = request.TransientTemplate
rule.Dest.UniqueColumns = request.DedupeColumns
if request.DestinationOverride {
rule.Dest.Override = &request.DestinationOverride
}
if request.Autodetect {
rule.Dest.Schema.Autodetect = request.Autodetect
}
if request.DestinationPartition {
rule.Dest.Partition = config.DateExpr
}
if request.SourceFormat != "" {
rule.Dest.SourceFormat = request.SourceFormat
}
}
func (s *service) initSourceMatch(ctx context.Context, rule *config.Rule, request *build.Request) error {
if request.MatchPattern == "" && (request.MatchPrefix == "" || request.MatchPrefix == shared.DefaultPrefix) && request.SourceURL != "" {
objects, err := s.fs.List(ctx, request.SourceURL, foption.NewRecursive(true))
if err != nil {
return errors.Wrapf(err, "invalid source: %v", request.SourceURL)
}
folderCount := 0
var extension = make(map[string]int)
extensionMax := 0
suffix := ""
for _, object := range objects {
if object.IsDir() {
folderCount++
continue
}
extension[path.Ext(object.Name())]++
if extensionMax < extension[path.Ext(object.Name())] {
suffix = path.Ext(object.Name())
extensionMax = extension[path.Ext(object.Name())]
}
}
objects, _ = s.fs.List(ctx, request.SourceURL)
if len(objects) == 2 && objects[1].IsDir() {
request.MatchPrefix = path.Join(request.MatchPrefix, objects[1].Name())
}
if suffix != "" {
rule.When.Suffix = suffix
}
if folderCount > 1 && request.Window > 0 && rule.Batch == nil {
rule.Batch = &config.Batch{Window: &config.Window{}}
rule.Batch.MultiPath = true
}
}
rule.When.Prefix = request.MatchPrefix
rule.When.Suffix = request.MatchSuffix
rule.When.Filter = request.MatchPattern
if request.MatchPattern != "" {
rule.Dest.Pattern = request.MatchPattern
}
return nil
}
| [
"\"USER\""
]
| []
| [
"USER"
]
| [] | ["USER"] | go | 1 | 0 | |
examples/thumbnail/screenshot/takeScreenshotOfAUrl/main.go | package main
import (
"fmt"
"os"
"go.m3o.com"
"go.m3o.com/thumbnail"
)
func main() {
client := m3o.New(os.Getenv("M3O_API_TOKEN"))
rsp, err := client.Thumbnail.Screenshot(&thumbnail.ScreenshotRequest{
Height: 600,
Url: "https://google.com",
Width: 600,
})
fmt.Println(rsp, err)
}
| [
"\"M3O_API_TOKEN\""
]
| []
| [
"M3O_API_TOKEN"
]
| [] | ["M3O_API_TOKEN"] | go | 1 | 0 | |
devscripts/readme_for_cdn.py | from __future__ import unicode_literals
import re
import sys
import subprocess
import os
infile, outfile = sys.argv[1:]
# usage: python3 devscripts/readme_for_cdn.py ../README.md to_be_converted.md
# git rev-parse --short master
git_commit = ''
for cwd in [
os.path.join(os.getcwd(), os.path.abspath(__file__), '../public'),
os.path.join(os.getcwd(), os.path.abspath(__file__), 'public'),
os.path.dirname(os.path.abspath(__file__)),
]:
try:
cwd = os.path.normpath(cwd)
sp = subprocess.Popen(
['git', 'rev-parse', '--short', 'master'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=cwd)
out, _ = sp.communicate()
out = out.decode().strip()
if re.match('[0-9a-f]+', out):
git_commit = out
break
except BaseException:
pass
if not git_commit:
git_commit = os.environ.get('VERCEL_GIT_COMMIT_SHA')
if isinstance(git_commit, str):
if len(git_commit) > 8:
git_commit = git_commit[0:8]
else:
git_commit = 'master'
# https://vercel.com/docs/cli#commands/overview/unique-options
information_section = [
'- current commit: [`%(git_commit)s`](https://github.com/nao20010128nao/ytdl-patched/commit/%(git_commit)s)' % {'git_commit': git_commit},
'- [see list of supported sites](/supportedsites.html)',
]
release_tag = os.environ.get('GITHUB_RELEASE_TAG')
if not release_tag:
sys.path[:0] = ['.']
from youtube_dl.extractor.common import InfoExtractor
from test.helper import FakeYDL
class TestIE(InfoExtractor):
pass
ie = TestIE(FakeYDL({'verbose': False}))
script_id = 'readme_for_cdn'
data = ie._download_json(
'https://api.github.com/repos/nao20010128nao/ytdl-patched/releases/latest',
script_id, note=False)
release_tag = data['tag_name']
if release_tag:
information_section.append('- [download ytdl-patched](https://github.com/nao20010128nao/ytdl-patched/releases/tag/%s)' % release_tag)
information_section.append(' - [for Linux/macOS](https://github.com/nao20010128nao/ytdl-patched/releases/download/%s/youtube-dl)' % release_tag)
information_section.append(' - [for Windows](https://github.com/nao20010128nao/ytdl-patched/releases/download/%s/youtube-dl-red.exe)' % release_tag)
information_section.append(' - [for pip](https://github.com/nao20010128nao/ytdl-patched/releases/download/%s/youtube-dl.tar.gz)' % release_tag)
information_section.append(' - or by Homebrew: `brew install nao20010128nao/my/ytdl-patched`')
MARKER_RE = r'(?m)^<!-- MARKER BEGIN -->[^\0]+<!-- MARKER END -->$'
NAVIGATE_TXT = """
# Information
%s
""" % ('\n'.join(information_section))
markdown = ''
with open(infile, 'r') as r:
markdown = r.read()
if isinstance(markdown, bytes):
markdown = markdown.decode('utf-8')
markdown = re.sub(MARKER_RE, NAVIGATE_TXT, markdown)
if sys.version_info < (3, ):
markdown = markdown.encode('utf-8')
with open(outfile, 'w') as w:
w.write(markdown)
| []
| []
| [
"GITHUB_RELEASE_TAG",
"VERCEL_GIT_COMMIT_SHA"
]
| [] | ["GITHUB_RELEASE_TAG", "VERCEL_GIT_COMMIT_SHA"] | python | 2 | 0 | |
Tests/test_content.py | from __future__ import print_function
import datetime
import json
import logging
import os
import re
import sys
from contextlib import contextmanager
from queue import Queue
from typing import Union, Any, Generator
import demisto_client.demisto_api
import pytz
import requests
import urllib3
from google.api_core.exceptions import PreconditionFailed
from google.cloud import storage
from Tests.test_dependencies import get_used_integrations
from demisto_sdk.commands.common.constants import FILTER_CONF
from demisto_sdk.commands.test_content.ParallelLoggingManager import ParallelLoggingManager
logging_manager: ParallelLoggingManager = None
# Disable insecure warnings
urllib3.disable_warnings()
SERVER_URL = "https://{}"
INTEGRATIONS_CONF = "./Tests/integrations_file.txt"
FAILED_MATCH_INSTANCE_MSG = "{} Failed to run.\n There are {} instances of {}, please select one of them by using " \
"the instance_name argument in conf.json. The options are:\n{}"
LOCKS_PATH = 'content-locks'
BUCKET_NAME = os.environ.get('GCS_ARTIFACTS_BUCKET')
BUILD_NUM = os.environ.get('CI_BUILD_ID')
WORKFLOW_ID = os.environ.get('CI_PIPELINE_ID')
CIRCLE_STATUS_TOKEN = os.environ.get('CIRCLECI_STATUS_TOKEN')
class SettingsTester:
def __init__(self, options):
self.api_key = options.apiKey
self.server = options.server
self.conf_path = options.conf
self.secret_conf_path = options.secret
self.nightly = options.nightly
self.slack = options.slack
self.circleci = options.circleci
self.buildNumber = options.buildNumber
self.buildName = options.buildName
self.isAMI = options.isAMI
self.memCheck = options.memCheck
self.serverVersion = options.serverVersion
self.serverNumericVersion = None
self.specific_tests_to_run = self.parse_tests_list_arg(options.testsList)
self.is_local_run = (self.server is not None)
@staticmethod
def parse_tests_list_arg(tests_list: str):
"""
Parses the test list arguments if present.
:param tests_list: CSV string of tests to run.
:return: List of tests if there are any, otherwise empty list.
"""
tests_to_run = tests_list.split(",") if tests_list else []
return tests_to_run
class DataKeeperTester:
def __init__(self):
self.succeeded_playbooks = []
self.failed_playbooks = []
self.skipped_tests = []
self.skipped_integrations = []
self.rerecorded_tests = []
self.empty_files = []
self.unmockable_integrations = {}
def add_tests_data(self, succeed_playbooks, failed_playbooks, skipped_tests, skipped_integration,
unmockable_integrations):
# Using multiple appends and not extend since append is guaranteed to be thread safe
for playbook in succeed_playbooks:
self.succeeded_playbooks.append(playbook)
for playbook in failed_playbooks:
self.failed_playbooks.append(playbook)
for playbook in skipped_tests:
self.skipped_tests.append(playbook)
for playbook in skipped_integration:
self.skipped_integrations.append(playbook)
for playbook_id, reason in unmockable_integrations.items():
self.unmockable_integrations[playbook_id] = reason
def add_proxy_related_test_data(self, proxy):
# Using multiple appends and not extend since append is guaranteed to be thread safe
for playbook_id in proxy.rerecorded_tests:
self.rerecorded_tests.append(playbook_id)
for playbook_id in proxy.empty_files:
self.empty_files.append(playbook_id)
def print_test_summary(tests_data_keeper: DataKeeperTester,
is_ami: bool = True,
logging_module: Union[Any, ParallelLoggingManager] = logging) -> None:
"""
Takes the information stored in the tests_data_keeper and prints it in a human readable way.
Args:
tests_data_keeper: object containing test statuses.
is_ami: indicating if the server running the tests is an AMI or not.
logging_module: Logging module to use for test_summary
"""
succeed_playbooks = tests_data_keeper.succeeded_playbooks
failed_playbooks = tests_data_keeper.failed_playbooks
skipped_tests = tests_data_keeper.skipped_tests
unmocklable_integrations = tests_data_keeper.unmockable_integrations
skipped_integration = tests_data_keeper.skipped_integrations
rerecorded_tests = tests_data_keeper.rerecorded_tests
empty_files = tests_data_keeper.empty_files
succeed_count = len(succeed_playbooks)
failed_count = len(failed_playbooks)
skipped_count = len(skipped_tests)
rerecorded_count = len(rerecorded_tests) if is_ami else 0
empty_mocks_count = len(empty_files) if is_ami else 0
unmocklable_integrations_count = len(unmocklable_integrations)
logging_module.info('TEST RESULTS:')
logging_module.info(f'Number of playbooks tested - {succeed_count + failed_count}')
if failed_count:
logging_module.error(f'Number of failed tests - {failed_count}:')
logging_module.error('Failed Tests: {}'.format(
''.join([f'\n\t\t\t\t\t\t\t - {playbook_id}' for playbook_id in failed_playbooks])))
if succeed_count:
logging_module.success(f'Number of succeeded tests - {succeed_count}')
logging_module.success('Successful Tests: {}'.format(
''.join([f'\n\t\t\t\t\t\t\t - {playbook_id}' for playbook_id in succeed_playbooks])))
if rerecorded_count > 0:
logging_module.warning(f'Number of tests with failed playback and successful re-recording - {rerecorded_count}')
logging_module.warning('Tests with failed playback and successful re-recording: {}'.format(
''.join([f'\n\t\t\t\t\t\t\t - {playbook_id}' for playbook_id in rerecorded_tests])))
if empty_mocks_count > 0:
logging_module.info(f'Successful tests with empty mock files count- {empty_mocks_count}:\n')
proxy_explanation = \
'\t\t\t\t\t\t\t (either there were no http requests or no traffic is passed through the proxy.\n' \
'\t\t\t\t\t\t\t Investigate the playbook and the integrations.\n' \
'\t\t\t\t\t\t\t If the integration has no http traffic, add to unmockable_integrations in conf.json)'
logging_module.info(proxy_explanation)
logging_module.info('Successful tests with empty mock files: {}'.format(
''.join([f'\n\t\t\t\t\t\t\t - {playbook_id}' for playbook_id in empty_files])))
if len(skipped_integration) > 0:
logging_module.warning(f'Number of skipped integration - {len(skipped_integration):}')
logging_module.warning('Skipped integration: {}'.format(
''.join([f'\n\t\t\t\t\t\t\t - {playbook_id}' for playbook_id in skipped_integration])))
if skipped_count > 0:
logging_module.warning(f'Number of skipped tests - {skipped_count}:')
logging_module.warning('Skipped tests: {}'.format(
''.join([f'\n\t\t\t\t\t\t\t - {playbook_id}' for playbook_id in skipped_tests])))
if unmocklable_integrations_count > 0:
logging_module.warning(f'Number of unmockable integrations - {unmocklable_integrations_count}:')
logging_module.warning('Unmockable integrations: {}'.format(
''.join([f'\n\t\t\t\t\t\t\t - {playbook_id} - {reason}' for playbook_id, reason in
unmocklable_integrations.items()])))
def update_test_msg(integrations, test_message):
if integrations:
integrations_names = [integration['name'] for integration in
integrations]
test_message = test_message + ' with integration(s): ' + ','.join(
integrations_names)
return test_message
def turn_off_telemetry(xsoar_client):
"""
Turn off telemetry on the AMI instance
:param xsoar_client: Preconfigured client for the XSOAR instance
:return: None
"""
body, status_code, _ = demisto_client.generic_request_func(self=xsoar_client, method='POST',
path='/telemetry?status=notelemetry')
if status_code != 200:
logging_manager.critical(f'Request to turn off telemetry failed with status code "{status_code}"\n{body}',
real_time=True)
sys.exit(1)
def create_result_files(tests_data_keeper):
failed_playbooks = tests_data_keeper.failed_playbooks
skipped_integration = tests_data_keeper.skipped_integrations
skipped_tests = tests_data_keeper.skipped_tests
with open("./Tests/failed_tests.txt", "w") as failed_tests_file:
failed_tests_file.write('\n'.join(failed_playbooks))
with open('./Tests/skipped_tests.txt', "w") as skipped_tests_file:
skipped_tests_file.write('\n'.join(skipped_tests))
with open('./Tests/skipped_integrations.txt', "w") as skipped_integrations_file:
skipped_integrations_file.write('\n'.join(skipped_integration))
def change_placeholders_to_values(placeholders_map, config_item):
"""Replaces placeholders in the object to their real values
Args:
placeholders_map: (dict)
Dict that holds the real values to be replaced for each placeholder.
config_item: (json object)
Integration configuration object.
Returns:
dict. json object with the real configuration.
"""
item_as_string = json.dumps(config_item)
for key, value in placeholders_map.items():
item_as_string = item_as_string.replace(key, value)
return json.loads(item_as_string)
def set_integration_params(demisto_api_key, integrations, secret_params, instance_names, playbook_id, placeholders_map):
for integration in integrations:
integration_params = [change_placeholders_to_values(placeholders_map, item) for item
in secret_params if item['name'] == integration['name']]
if integration_params:
matched_integration_params = integration_params[0]
if len(integration_params) != 1:
found_matching_instance = False
for item in integration_params:
if item.get('instance_name', 'Not Found') in instance_names:
matched_integration_params = item
found_matching_instance = True
if not found_matching_instance:
optional_instance_names = [optional_integration.get('instance_name', 'None')
for optional_integration in integration_params]
error_msg = FAILED_MATCH_INSTANCE_MSG.format(playbook_id, len(integration_params),
integration['name'],
'\n'.join(optional_instance_names))
logging_manager.error(error_msg)
return False
integration['params'] = matched_integration_params.get('params', {})
integration['byoi'] = matched_integration_params.get('byoi', True)
integration['instance_name'] = matched_integration_params.get('instance_name', integration['name'])
integration['validate_test'] = matched_integration_params.get('validate_test', True)
elif integration['name'] == 'Demisto REST API':
integration['params'] = {
'url': 'https://localhost',
'apikey': demisto_api_key,
'insecure': True,
}
return True
def collect_integrations(integrations_conf, skipped_integration, skipped_integrations_conf, nightly_integrations):
integrations = []
is_nightly_integration = False
test_skipped_integration = []
for integration in integrations_conf:
if integration in skipped_integrations_conf.keys():
skipped_integration.add("{0} - reason: {1}".format(integration, skipped_integrations_conf[integration]))
test_skipped_integration.append(integration)
if integration in nightly_integrations:
is_nightly_integration = True
# string description
integrations.append({
'name': integration,
'params': {}
})
return test_skipped_integration, integrations, is_nightly_integration
def extract_filtered_tests():
with open(FILTER_CONF, 'r') as filter_file:
filtered_tests = [line.strip('\n') for line in filter_file.readlines()]
return filtered_tests
def load_conf_files(conf_path, secret_conf_path):
with open(conf_path) as data_file:
conf = json.load(data_file)
secret_conf = None
if secret_conf_path:
with open(secret_conf_path) as data_file:
secret_conf = json.load(data_file)
return conf, secret_conf
def load_env_results_json():
env_results_path = os.getenv('ENV_RESULTS_PATH', os.path.join(os.getenv('ARTIFACTS_FOLDER', './artifacts'),
'env_results.json'))
if not os.path.isfile(env_results_path):
logging.warning(f"Did not find {env_results_path} file ")
return {}
with open(env_results_path, 'r') as json_file:
return json.load(json_file)
def get_server_numeric_version(ami_env, is_local_run=False):
"""
Gets the current server version
Arguments:
ami_env: (str)
AMI version name.
is_local_run: (bool)
when running locally, assume latest version.
Returns:
(str) Server numeric version
"""
default_version = '99.99.98'
if is_local_run:
logging.info(f'Local run, assuming server version is {default_version}')
return default_version
env_json = load_env_results_json()
if not env_json:
logging.warning(f"assuming server version is {default_version}.")
return default_version
instances_ami_names = {env.get('AmiName') for env in env_json if ami_env in env.get('Role', '')}
if len(instances_ami_names) != 1:
logging.warning(f'Did not get one AMI Name, got {instances_ami_names}.'
f' Assuming server version is {default_version}')
return default_version
instances_ami_name = list(instances_ami_names)[0]
return extract_server_numeric_version(instances_ami_name, default_version)
def extract_server_numeric_version(instances_ami_name, default_version):
# regex doesn't catch Server Master execution
extracted_version = re.findall(r'Demisto-(?:Circle-CI|Marketplace)-Content-AMI-[A-Za-z]*[-_](\d[._]\d)-[\d]{5}',
instances_ami_name)
extracted_version = [match.replace('_', '.') for match in extracted_version]
if extracted_version:
server_numeric_version = extracted_version[0]
else:
if 'Master' in instances_ami_name:
logging.info('Server version: Master')
return default_version
else:
server_numeric_version = default_version
# make sure version is three-part version
if server_numeric_version.count('.') == 1:
server_numeric_version += ".0"
logging.info(f'Server version: {server_numeric_version}')
return server_numeric_version
def get_instances_ips_and_names(tests_settings):
if tests_settings.server:
return [tests_settings.server]
env_json = load_env_results_json()
instances_ips = [(env.get('Role'), f"localhost:{env.get('TunnelPort')}") for env in env_json]
return instances_ips
def get_test_records_of_given_test_names(tests_settings, tests_names_to_search):
conf, secret_conf = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
tests_records = conf['tests']
test_records_with_supplied_names = []
for test_record in tests_records:
test_name = test_record.get("playbookID")
if test_name and test_name in tests_names_to_search:
test_records_with_supplied_names.append(test_record)
return test_records_with_supplied_names
def get_json_file(path):
with open(path, 'r') as json_file:
return json.loads(json_file.read())
def initialize_queue_and_executed_tests_set(tests):
tests_queue: Queue = Queue()
already_executed_test_playbooks: set = set()
for t in tests:
tests_queue.put(t)
return already_executed_test_playbooks, tests_queue
def get_unmockable_tests(tests_settings):
conf, _ = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
unmockable_integrations = conf['unmockable_integrations']
tests = conf['tests']
unmockable_tests = []
for test_record in tests:
test_name = test_record.get("playbookID")
integrations_used_in_test = get_used_integrations(test_record)
unmockable_integrations_used = [integration_name for integration_name in integrations_used_in_test if
integration_name in unmockable_integrations]
if test_name and (not integrations_used_in_test or unmockable_integrations_used):
unmockable_tests.append(test_name)
return unmockable_tests
def get_all_tests(tests_settings):
conf, _ = load_conf_files(tests_settings.conf_path, tests_settings.secret_conf_path)
tests_records = conf['tests']
all_tests = []
for test_record in tests_records:
test_name = test_record.get("playbookID")
if test_name:
all_tests.append(test_name)
return all_tests
def add_pr_comment(comment):
token = os.environ['CONTENT_GITHUB_TOKEN']
branch_name = os.environ['CI_COMMIT_BRANCH']
sha1 = os.environ['CI_COMMIT_SHA']
query = '?q={}+repo:demisto/content+org:demisto+is:pr+is:open+head:{}+is:open'.format(sha1, branch_name)
url = 'https://api.github.com/search/issues'
headers = {'Authorization': 'Bearer ' + token}
try:
res = requests.get(url + query, headers=headers, verify=False)
res_dict = handle_github_response(res)
if res_dict and res_dict.get('total_count', 0) == 1:
issue_url = res_dict['items'][0].get('comments_url') if res_dict.get('items', []) else None
if issue_url:
res = requests.post(issue_url, json={'body': comment}, headers=headers, verify=False)
handle_github_response(res)
else:
logging_manager.warning('Add pull request comment failed: There is more then one open pull '
f'request for branch {branch_name}.', real_time=True)
except Exception:
logging_manager.exception('Add pull request comment failed')
def handle_github_response(response):
res_dict = response.json()
if not res_dict.ok:
logging_manager.error(f'Add pull request comment failed: {res_dict.get("message")}', real_time=True)
return res_dict
@contextmanager
def acquire_test_lock(integrations_details: list,
test_timeout: int,
conf_json_path: str) -> Generator:
"""
This is a context manager that handles all the locking and unlocking of integrations.
Execution is as following:
* Attempts to lock the test's integrations and yields the result of this attempt
* If lock attempt has failed - yields False, if it succeeds - yields True
* Once the test is done- will unlock all integrations
Args:
integrations_details: test integrations details
test_timeout: test timeout in seconds
conf_json_path: Path to conf.json file
Yields:
A boolean indicating the lock attempt result
"""
locked = safe_lock_integrations(test_timeout,
integrations_details,
conf_json_path)
try:
yield locked
except Exception:
logging_manager.exception('Failed with test lock')
finally:
if not locked:
return
safe_unlock_integrations(integrations_details)
def safe_unlock_integrations(integrations_details: list):
"""
This integration safely unlocks the test's integrations.
If an unexpected error occurs - this method will log it's details and other tests execution will continue
Args:
integrations_details: Details of the currently executed test
"""
try:
# executing the test could take a while, re-instancing the storage client
storage_client = storage.Client()
unlock_integrations(integrations_details, storage_client)
except Exception:
logging_manager.exception('attempt to unlock integration failed for unknown reason.')
def safe_lock_integrations(test_timeout: int,
integrations_details: list,
conf_json_path: str) -> bool:
"""
This integration safely locks the test's integrations and return it's result
If an unexpected error occurs - this method will log it's details and return False
Args:
test_timeout: Test timeout in seconds
integrations_details: test integrations details
conf_json_path: Path to conf.json file
Returns:
A boolean indicating the lock attempt result
"""
conf, _ = load_conf_files(conf_json_path, None)
parallel_integrations_names = conf['parallel_integrations']
filtered_integrations_details = [integration for integration in integrations_details if
integration['name'] not in parallel_integrations_names]
integration_names = get_integrations_list(filtered_integrations_details)
if integration_names:
print_msg = f'Attempting to lock integrations {integration_names}, with timeout {test_timeout}'
else:
print_msg = 'No integrations to lock'
logging_manager.debug(print_msg)
try:
storage_client = storage.Client()
locked = lock_integrations(filtered_integrations_details, test_timeout, storage_client)
except Exception:
logging_manager.exception('attempt to lock integration failed for unknown reason.')
locked = False
return locked
def workflow_still_running(workflow_id: str) -> bool:
"""
This method takes a workflow id and checks if the workflow is still running
If given workflow ID is the same as the current workflow, will simply return True
else it will query circleci api for the workflow and return the status
Args:
workflow_id: The ID of the workflow
Returns:
True if the workflow is running, else False
"""
# If this is the current workflow_id
if workflow_id == WORKFLOW_ID:
return True
else:
try:
workflow_details_response = requests.get(f'https://circleci.com/api/v2/workflow/{workflow_id}',
headers={'Accept': 'application/json'},
auth=(CIRCLE_STATUS_TOKEN, ''))
workflow_details_response.raise_for_status()
except Exception:
logging_manager.exception(f'Failed to get circleci response about workflow with id {workflow_id}.')
return True
return workflow_details_response.json().get('status') not in ('canceled', 'success', 'failed')
def lock_integrations(integrations_details: list,
test_timeout: int,
storage_client: storage.Client) -> bool:
"""
Locks all the test's integrations
Args:
integrations_details: List of current test's integrations
test_timeout: Test timeout in seconds
storage_client: The GCP storage client
Returns:
True if all the test's integrations were successfully locked, else False
"""
integrations = get_integrations_list(integrations_details)
if not integrations:
return True
existing_integrations_lock_files = get_locked_integrations(integrations, storage_client)
for integration, lock_file in existing_integrations_lock_files.items():
# Each file has content in the form of <circleci-build-number>:<timeout in seconds>
# If it has not expired - it means the integration is currently locked by another test.
workflow_id, build_number, lock_timeout = lock_file.download_as_string().decode().split(':')
if not lock_expired(lock_file, lock_timeout) and workflow_still_running(workflow_id):
# there is a locked integration for which the lock is not expired - test cannot be executed at the moment
logging_manager.warning(
f'Could not lock integration {integration}, another lock file was exist with '
f'build number: {build_number}, timeout: {lock_timeout}, last update at {lock_file.updated}.\n'
f'Delaying test execution')
return False
integrations_generation_number = {}
# Gathering generation number with which the new file will be created,
# See https://cloud.google.com/storage/docs/generations-preconditions for details.
for integration in integrations:
if integration in existing_integrations_lock_files:
integrations_generation_number[integration] = existing_integrations_lock_files[integration].generation
else:
integrations_generation_number[integration] = 0
return create_lock_files(integrations_generation_number, storage_client, integrations_details, test_timeout)
def get_integrations_list(test_integrations: list) -> list:
"""
Since test details can have one integration as a string and sometimes a list of integrations- this methods
parses the test's integrations into a list of integration names.
Args:
test_integrations: List of current test's integrations
Returns:
the integration names in a list for all the integrations that takes place in the test
specified in test details.
"""
return [integration['name'] for integration in test_integrations]
def create_lock_files(integrations_generation_number: dict,
storage_client: storage.Client,
integrations_details: list,
test_timeout: int) -> bool:
"""
This method tries to create a lock files for all integrations specified in 'integrations_generation_number'.
Each file should contain <circle-ci-build-number>:<test-timeout>
where the <circle-ci-build-number> part is for debugging and troubleshooting
and the <test-timeout> part is to be able to unlock revoked test files.
If for any of the integrations, the lock file creation will fail- the already created files will be cleaned.
Args:
integrations_generation_number: A dict in the form of {<integration-name>:<integration-generation>}
storage_client: The GCP storage client
integrations_details: List of current test's integrations
test_timeout: The time out
Returns:
"""
locked_integrations = []
bucket = storage_client.bucket(BUCKET_NAME)
for integration, generation_number in integrations_generation_number.items():
blob = bucket.blob(f'{LOCKS_PATH}/{integration}')
try:
blob.upload_from_string(f'{WORKFLOW_ID}:{BUILD_NUM}:{test_timeout + 30}',
if_generation_match=generation_number)
logging_manager.debug(f'integration {integration} locked')
locked_integrations.append(integration)
except PreconditionFailed:
# if this exception occurs it means that another build has locked this integration
# before this build managed to do it.
# we need to unlock all the integrations we have already locked and try again later
logging_manager.warning(
f'Could not lock integration {integration}, Create file with precondition failed.'
f'delaying test execution.')
unlock_integrations(integrations_details, storage_client)
return False
return True
def unlock_integrations(integrations_details: list,
storage_client: storage.Client) -> None:
"""
Delete all integration lock files for integrations specified in 'locked_integrations'
Args:
integrations_details: List of current test's integrations
storage_client: The GCP storage client
"""
locked_integrations = get_integrations_list(integrations_details)
locked_integration_blobs = get_locked_integrations(locked_integrations, storage_client)
for integration, lock_file in locked_integration_blobs.items():
try:
# Verifying build number is the same as current build number to avoid deleting other tests lock files
_, build_number, _ = lock_file.download_as_string().decode().split(':')
if build_number == BUILD_NUM:
lock_file.delete(if_generation_match=lock_file.generation)
logging_manager.debug(
f'Integration {integration} unlocked')
except PreconditionFailed:
logging_manager.error(f'Could not unlock integration {integration} precondition failure')
def get_locked_integrations(integrations: list, storage_client: storage.Client) -> dict:
"""
Getting all locked integrations files
Args:
integrations: Integrations that we want to get lock files for
storage_client: The GCP storage client
Returns:
A dict of the form {<integration-name>:<integration-blob-object>} for all integrations that has a blob object.
"""
# Listing all files in lock folder
# Wrapping in 'list' operator because list_blobs return a generator which can only be iterated once
lock_files_ls = list(storage_client.list_blobs(BUCKET_NAME, prefix=f'{LOCKS_PATH}'))
current_integrations_lock_files = {}
# Getting all existing files details for integrations that we want to lock
for integration in integrations:
current_integrations_lock_files.update({integration: [lock_file_blob for lock_file_blob in lock_files_ls if
lock_file_blob.name == f'{LOCKS_PATH}/{integration}']})
# Filtering 'current_integrations_lock_files' from integrations with no files
current_integrations_lock_files = {integration: blob_files[0] for integration, blob_files in
current_integrations_lock_files.items() if blob_files}
return current_integrations_lock_files
def lock_expired(lock_file: storage.Blob, lock_timeout: str) -> bool:
"""
Checks if the time that passed since the creation of the 'lock_file' is more then 'lock_timeout'.
If not- it means that the integration represented by the lock file is currently locked and is tested in another build
Args:
lock_file: The lock file blob object
lock_timeout: The expiration timeout of the lock in seconds
Returns:
True if the lock has expired it's timeout, else False
"""
return datetime.datetime.now(tz=pytz.utc) - lock_file.updated >= datetime.timedelta(seconds=int(lock_timeout))
| []
| []
| [
"CI_PIPELINE_ID",
"CIRCLECI_STATUS_TOKEN",
"CONTENT_GITHUB_TOKEN",
"ENV_RESULTS_PATH",
"ARTIFACTS_FOLDER",
"CI_COMMIT_BRANCH",
"CI_COMMIT_SHA",
"GCS_ARTIFACTS_BUCKET",
"CI_BUILD_ID"
]
| [] | ["CI_PIPELINE_ID", "CIRCLECI_STATUS_TOKEN", "CONTENT_GITHUB_TOKEN", "ENV_RESULTS_PATH", "ARTIFACTS_FOLDER", "CI_COMMIT_BRANCH", "CI_COMMIT_SHA", "GCS_ARTIFACTS_BUCKET", "CI_BUILD_ID"] | python | 9 | 0 | |
app/config.py | import os
class ServerConfig:
def __init__(self, env):
self.update_plugins_on_start_up = env[
'UPDATE_PLUGINS_ON_STARTUP'] if 'UPDATE_PLUGINS_ON_STARTUP' in env else True
self.make_slower_responses = float(
env['DEBUG_MAKE_SLOWER_RESPONSES']) if 'DEBUG_MAKE_SLOWER_RESPONSES' in env else 0
self.heartbeat_every = env['RUN_HEARTBEAT_EVERY'] if 'RUN_HEARTBEAT_EVERY' in env else 5 * 60
self.tasks_every = env['RUN_TASKS_EVERY'] if 'RUN_TASKS_EVERY' in env else 1
self.page_size = int(env['AUTOLOAD_PAGE_SIZE']) if 'AUTOLOAD_PAGE_SIZE' in env else 25
self.expose_gui_api = (env['EXPOSE_GUI_API'].lower() == "yes") if 'EXPOSE_GUI_API' in env else True
self.reset_plugins = (env['RESET_PLUGINS'].lower() == "yes") if 'RESET_PLUGINS' in env else False
self.x_forwarded_ip_header = env['USE_X_FORWARDED_IP'] if 'USE_X_FORWARDED_IP' in env else None
server = ServerConfig(os.environ)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
mesonbuild/mesonlib.py | # Copyright 2012-2015 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A library of random helper functionality."""
from pathlib import Path
import sys
import stat
import time
import platform, subprocess, operator, os, shlex, shutil, re
import collections
from enum import Enum
from functools import lru_cache
import typing
import uuid
from mesonbuild import mlog
_T = typing.TypeVar('_T')
_U = typing.TypeVar('_U')
have_fcntl = False
have_msvcrt = False
# {subproject: project_meson_version}
project_meson_versions = {}
try:
import fcntl
have_fcntl = True
except Exception:
pass
try:
import msvcrt
have_msvcrt = True
except Exception:
pass
from glob import glob
if os.path.basename(sys.executable) == 'meson.exe':
# In Windows and using the MSI installed executable.
python_command = [sys.executable, 'runpython']
else:
python_command = [sys.executable]
meson_command = None
def set_meson_command(mainfile):
global python_command
global meson_command
# On UNIX-like systems `meson` is a Python script
# On Windows `meson` and `meson.exe` are wrapper exes
if not mainfile.endswith('.py'):
meson_command = [mainfile]
elif os.path.isabs(mainfile) and mainfile.endswith('mesonmain.py'):
# Can't actually run meson with an absolute path to mesonmain.py, it must be run as -m mesonbuild.mesonmain
meson_command = python_command + ['-m', 'mesonbuild.mesonmain']
else:
# Either run uninstalled, or full path to meson-script.py
meson_command = python_command + [mainfile]
# We print this value for unit tests.
if 'MESON_COMMAND_TESTS' in os.environ:
mlog.log('meson_command is {!r}'.format(meson_command))
def is_ascii_string(astring) -> bool:
try:
if isinstance(astring, str):
astring.encode('ascii')
elif isinstance(astring, bytes):
astring.decode('ascii')
except UnicodeDecodeError:
return False
return True
def check_direntry_issues(direntry_array):
import locale
# Warn if the locale is not UTF-8. This can cause various unfixable issues
# such as os.stat not being able to decode filenames with unicode in them.
# There is no way to reset both the preferred encoding and the filesystem
# encoding, so we can just warn about it.
e = locale.getpreferredencoding()
if e.upper() != 'UTF-8' and not is_windows():
if not isinstance(direntry_array, list):
direntry_array = [direntry_array]
for de in direntry_array:
if is_ascii_string(de):
continue
mlog.warning('''You are using {!r} which is not a Unicode-compatible '
locale but you are trying to access a file system entry called {!r} which is
not pure ASCII. This may cause problems.
'''.format(e, de), file=sys.stderr)
# Put this in objects that should not get dumped to pickle files
# by accident.
import threading
an_unpicklable_object = threading.Lock()
class MesonException(Exception):
'''Exceptions thrown by Meson'''
def get_msg_with_context(self):
s = ''
if hasattr(self, 'lineno') and hasattr(self, 'file'):
s = get_error_location_string(self.file, self.lineno) + ' '
s += str(self)
return s
class EnvironmentException(MesonException):
'''Exceptions thrown while processing and creating the build environment'''
class FileMode:
# The first triad is for owner permissions, the second for group permissions,
# and the third for others (everyone else).
# For the 1st character:
# 'r' means can read
# '-' means not allowed
# For the 2nd character:
# 'w' means can write
# '-' means not allowed
# For the 3rd character:
# 'x' means can execute
# 's' means can execute and setuid/setgid is set (owner/group triads only)
# 'S' means cannot execute and setuid/setgid is set (owner/group triads only)
# 't' means can execute and sticky bit is set ("others" triads only)
# 'T' means cannot execute and sticky bit is set ("others" triads only)
# '-' means none of these are allowed
#
# The meanings of 'rwx' perms is not obvious for directories; see:
# https://www.hackinglinuxexposed.com/articles/20030424.html
#
# For information on this notation such as setuid/setgid/sticky bits, see:
# https://en.wikipedia.org/wiki/File_system_permissions#Symbolic_notation
symbolic_perms_regex = re.compile('[r-][w-][xsS-]' # Owner perms
'[r-][w-][xsS-]' # Group perms
'[r-][w-][xtT-]') # Others perms
def __init__(self, perms=None, owner=None, group=None):
self.perms_s = perms
self.perms = self.perms_s_to_bits(perms)
self.owner = owner
self.group = group
def __repr__(self):
ret = '<FileMode: {!r} owner={} group={}'
return ret.format(self.perms_s, self.owner, self.group)
@classmethod
def perms_s_to_bits(cls, perms_s):
'''
Does the opposite of stat.filemode(), converts strings of the form
'rwxr-xr-x' to st_mode enums which can be passed to os.chmod()
'''
if perms_s is None:
# No perms specified, we will not touch the permissions
return -1
eg = 'rwxr-xr-x'
if not isinstance(perms_s, str):
msg = 'Install perms must be a string. For example, {!r}'
raise MesonException(msg.format(eg))
if len(perms_s) != 9 or not cls.symbolic_perms_regex.match(perms_s):
msg = 'File perms {!r} must be exactly 9 chars. For example, {!r}'
raise MesonException(msg.format(perms_s, eg))
perms = 0
# Owner perms
if perms_s[0] == 'r':
perms |= stat.S_IRUSR
if perms_s[1] == 'w':
perms |= stat.S_IWUSR
if perms_s[2] == 'x':
perms |= stat.S_IXUSR
elif perms_s[2] == 'S':
perms |= stat.S_ISUID
elif perms_s[2] == 's':
perms |= stat.S_IXUSR
perms |= stat.S_ISUID
# Group perms
if perms_s[3] == 'r':
perms |= stat.S_IRGRP
if perms_s[4] == 'w':
perms |= stat.S_IWGRP
if perms_s[5] == 'x':
perms |= stat.S_IXGRP
elif perms_s[5] == 'S':
perms |= stat.S_ISGID
elif perms_s[5] == 's':
perms |= stat.S_IXGRP
perms |= stat.S_ISGID
# Others perms
if perms_s[6] == 'r':
perms |= stat.S_IROTH
if perms_s[7] == 'w':
perms |= stat.S_IWOTH
if perms_s[8] == 'x':
perms |= stat.S_IXOTH
elif perms_s[8] == 'T':
perms |= stat.S_ISVTX
elif perms_s[8] == 't':
perms |= stat.S_IXOTH
perms |= stat.S_ISVTX
return perms
class File:
def __init__(self, is_built: bool, subdir: str, fname: str):
self.is_built = is_built
self.subdir = subdir
self.fname = fname
assert(isinstance(self.subdir, str))
assert(isinstance(self.fname, str))
def __str__(self) -> str:
return self.relative_name()
def __repr__(self) -> str:
ret = '<File: {0}'
if not self.is_built:
ret += ' (not built)'
ret += '>'
return ret.format(self.relative_name())
@staticmethod
@lru_cache(maxsize=None)
def from_source_file(source_root: str, subdir: str, fname: str):
if not os.path.isfile(os.path.join(source_root, subdir, fname)):
raise MesonException('File %s does not exist.' % fname)
return File(False, subdir, fname)
@staticmethod
def from_built_file(subdir: str, fname: str):
return File(True, subdir, fname)
@staticmethod
def from_absolute_file(fname: str):
return File(False, '', fname)
@lru_cache(maxsize=None)
def rel_to_builddir(self, build_to_src: str) -> str:
if self.is_built:
return self.relative_name()
else:
return os.path.join(build_to_src, self.subdir, self.fname)
@lru_cache(maxsize=None)
def absolute_path(self, srcdir: str, builddir: str) -> str:
absdir = srcdir
if self.is_built:
absdir = builddir
return os.path.join(absdir, self.relative_name())
def endswith(self, ending: str) -> bool:
return self.fname.endswith(ending)
def split(self, s: str) -> typing.List[str]:
return self.fname.split(s)
def __eq__(self, other) -> bool:
return (self.fname, self.subdir, self.is_built) == (other.fname, other.subdir, other.is_built)
def __hash__(self) -> int:
return hash((self.fname, self.subdir, self.is_built))
@lru_cache(maxsize=None)
def relative_name(self) -> str:
return os.path.join(self.subdir, self.fname)
def get_compiler_for_source(compilers, src):
for comp in compilers:
if comp.can_compile(src):
return comp
raise MesonException('No specified compiler can handle file {!s}'.format(src))
def classify_unity_sources(compilers, sources):
compsrclist = {}
for src in sources:
comp = get_compiler_for_source(compilers, src)
if comp not in compsrclist:
compsrclist[comp] = [src]
else:
compsrclist[comp].append(src)
return compsrclist
class OrderedEnum(Enum):
"""
An Enum which additionally offers homogeneous ordered comparison.
"""
def __ge__(self, other):
if self.__class__ is other.__class__:
return self.value >= other.value
return NotImplemented
def __gt__(self, other):
if self.__class__ is other.__class__:
return self.value > other.value
return NotImplemented
def __le__(self, other):
if self.__class__ is other.__class__:
return self.value <= other.value
return NotImplemented
def __lt__(self, other):
if self.__class__ is other.__class__:
return self.value < other.value
return NotImplemented
class MachineChoice(OrderedEnum):
"""Enum class representing one of the two abstract machine names used in
most places: the build, and host, machines.
"""
BUILD = 0
HOST = 1
def get_lower_case_name(self):
return PerMachine('build', 'host')[self]
def get_prefix(self):
return PerMachine('build.', '')[self]
class PerMachine(typing.Generic[_T]):
def __init__(self, build: _T, host: _T):
self.build = build
self.host = host
def __getitem__(self, machine: MachineChoice) -> _T:
return {
MachineChoice.BUILD: self.build,
MachineChoice.HOST: self.host,
}[machine]
def __setitem__(self, machine: MachineChoice, val: _T) -> None:
setattr(self, machine.get_lower_case_name(), val)
def miss_defaulting(self) -> "PerMachineDefaultable[typing.Optional[_T]]":
"""Unset definition duplicated from their previous to None
This is the inverse of ''default_missing''. By removing defaulted
machines, we can elaborate the original and then redefault them and thus
avoid repeating the elaboration explicitly.
"""
unfreeze = PerMachineDefaultable() # type: PerMachineDefaultable[typing.Optional[_T]]
unfreeze.build = self.build
unfreeze.host = self.host
if unfreeze.host == unfreeze.build:
unfreeze.host = None
return unfreeze
class PerThreeMachine(PerMachine[_T]):
"""Like `PerMachine` but includes `target` too.
It turns out just one thing do we need track the target machine. There's no
need to computer the `target` field so we don't bother overriding the
`__getitem__`/`__setitem__` methods.
"""
def __init__(self, build: _T, host: _T, target: _T):
super().__init__(build, host)
self.target = target
def miss_defaulting(self) -> "PerThreeMachineDefaultable[typing.Optional[_T]]":
"""Unset definition duplicated from their previous to None
This is the inverse of ''default_missing''. By removing defaulted
machines, we can elaborate the original and then redefault them and thus
avoid repeating the elaboration explicitly.
"""
unfreeze = PerThreeMachineDefaultable() # type: PerThreeMachineDefaultable[typing.Optional[_T]]
unfreeze.build = self.build
unfreeze.host = self.host
unfreeze.target = self.target
if unfreeze.target == unfreeze.host:
unfreeze.target = None
if unfreeze.host == unfreeze.build:
unfreeze.host = None
return unfreeze
def matches_build_machine(self, machine: MachineChoice) -> bool:
return self.build == self[machine]
class PerMachineDefaultable(PerMachine[typing.Optional[_T]]):
"""Extends `PerMachine` with the ability to default from `None`s.
"""
def __init__(self) -> None:
super().__init__(None, None)
def default_missing(self) -> "PerMachine[typing.Optional[_T]]":
"""Default host to buid
This allows just specifying nothing in the native case, and just host in the
cross non-compiler case.
"""
freeze = PerMachine(self.build, self.host)
if freeze.host is None:
freeze.host = freeze.build
return freeze
class PerThreeMachineDefaultable(PerMachineDefaultable, PerThreeMachine[typing.Optional[_T]]):
"""Extends `PerThreeMachine` with the ability to default from `None`s.
"""
def __init__(self) -> None:
PerThreeMachine.__init__(self, None, None, None)
def default_missing(self) -> "PerThreeMachine[typing.Optional[_T]]":
"""Default host to buid and target to host.
This allows just specifying nothing in the native case, just host in the
cross non-compiler case, and just target in the native-built
cross-compiler case.
"""
freeze = PerThreeMachine(self.build, self.host, self.target)
if freeze.host is None:
freeze.host = freeze.build
if freeze.target is None:
freeze.target = freeze.host
return freeze
def is_sunos() -> bool:
return platform.system().lower() == 'sunos'
def is_osx() -> bool:
return platform.system().lower() == 'darwin'
def is_linux() -> bool:
return platform.system().lower() == 'linux'
def is_android() -> bool:
return platform.system().lower() == 'android'
def is_haiku() -> bool:
return platform.system().lower() == 'haiku'
def is_openbsd() -> bool:
return platform.system().lower() == 'openbsd'
def is_windows() -> bool:
platname = platform.system().lower()
return platname == 'windows' or 'mingw' in platname
def is_cygwin() -> bool:
return platform.system().lower().startswith('cygwin')
def is_debianlike() -> bool:
return os.path.isfile('/etc/debian_version')
def is_dragonflybsd() -> bool:
return platform.system().lower() == 'dragonfly'
def is_netbsd() -> bool:
return platform.system().lower() == 'netbsd'
def is_freebsd() -> bool:
return platform.system().lower() == 'freebsd'
def exe_exists(arglist: typing.List[str]) -> bool:
try:
if subprocess.run(arglist, timeout=10).returncode == 0:
return True
except (FileNotFoundError, subprocess.TimeoutExpired):
pass
return False
@lru_cache(maxsize=None)
def darwin_get_object_archs(objpath):
'''
For a specific object (executable, static library, dylib, etc), run `lipo`
to fetch the list of archs supported by it. Supports both thin objects and
'fat' objects.
'''
_, stdo, stderr = Popen_safe(['lipo', '-info', objpath])
if not stdo:
mlog.debug('lipo {}: {}'.format(objpath, stderr))
return None
stdo = stdo.rsplit(': ', 1)[1]
# Convert from lipo-style archs to meson-style CPUs
stdo = stdo.replace('i386', 'x86')
stdo = stdo.replace('arm64', 'aarch64')
# Add generic name for armv7 and armv7s
if 'armv7' in stdo:
stdo += ' arm'
return stdo.split()
def detect_vcs(source_dir):
vcs_systems = [
dict(name = 'git', cmd = 'git', repo_dir = '.git', get_rev = 'git describe --dirty=+', rev_regex = '(.*)', dep = '.git/logs/HEAD'),
dict(name = 'mercurial', cmd = 'hg', repo_dir = '.hg', get_rev = 'hg id -i', rev_regex = '(.*)', dep = '.hg/dirstate'),
dict(name = 'subversion', cmd = 'svn', repo_dir = '.svn', get_rev = 'svn info', rev_regex = 'Revision: (.*)', dep = '.svn/wc.db'),
dict(name = 'bazaar', cmd = 'bzr', repo_dir = '.bzr', get_rev = 'bzr revno', rev_regex = '(.*)', dep = '.bzr'),
]
# FIXME: this is much cleaner with pathlib.Path
segs = source_dir.replace('\\', '/').split('/')
for i in range(len(segs), -1, -1):
curdir = '/'.join(segs[:i])
for vcs in vcs_systems:
if os.path.isdir(os.path.join(curdir, vcs['repo_dir'])) and shutil.which(vcs['cmd']):
vcs['wc_dir'] = curdir
return vcs
return None
# a helper class which implements the same version ordering as RPM
class Version:
def __init__(self, s):
self._s = s
# split into numeric, alphabetic and non-alphanumeric sequences
sequences = re.finditer(r'(\d+|[a-zA-Z]+|[^a-zA-Z\d]+)', s)
# non-alphanumeric separators are discarded
sequences = [m for m in sequences if not re.match(r'[^a-zA-Z\d]+', m.group(1))]
# numeric sequences are converted from strings to ints
sequences = [int(m.group(1)) if m.group(1).isdigit() else m.group(1) for m in sequences]
self._v = sequences
def __str__(self):
return '%s (V=%s)' % (self._s, str(self._v))
def __repr__(self):
return '<Version: {}>'.format(self._s)
def __lt__(self, other):
if isinstance(other, Version):
return self.__cmp(other, operator.lt)
return NotImplemented
def __gt__(self, other):
if isinstance(other, Version):
return self.__cmp(other, operator.gt)
return NotImplemented
def __le__(self, other):
if isinstance(other, Version):
return self.__cmp(other, operator.le)
return NotImplemented
def __ge__(self, other):
if isinstance(other, Version):
return self.__cmp(other, operator.ge)
return NotImplemented
def __eq__(self, other):
if isinstance(other, Version):
return self._v == other._v
return NotImplemented
def __ne__(self, other):
if isinstance(other, Version):
return self._v != other._v
return NotImplemented
def __cmp(self, other, comparator):
# compare each sequence in order
for ours, theirs in zip(self._v, other._v):
# sort a non-digit sequence before a digit sequence
ours_is_int = isinstance(ours, int)
theirs_is_int = isinstance(theirs, int)
if ours_is_int != theirs_is_int:
return comparator(ours_is_int, theirs_is_int)
if ours != theirs:
return comparator(ours, theirs)
# if equal length, all components have matched, so equal
# otherwise, the version with a suffix remaining is greater
return comparator(len(self._v), len(other._v))
def _version_extract_cmpop(vstr2: str) -> typing.Tuple[typing.Callable[[typing.Any, typing.Any], bool], str]:
if vstr2.startswith('>='):
cmpop = operator.ge
vstr2 = vstr2[2:]
elif vstr2.startswith('<='):
cmpop = operator.le
vstr2 = vstr2[2:]
elif vstr2.startswith('!='):
cmpop = operator.ne
vstr2 = vstr2[2:]
elif vstr2.startswith('=='):
cmpop = operator.eq
vstr2 = vstr2[2:]
elif vstr2.startswith('='):
cmpop = operator.eq
vstr2 = vstr2[1:]
elif vstr2.startswith('>'):
cmpop = operator.gt
vstr2 = vstr2[1:]
elif vstr2.startswith('<'):
cmpop = operator.lt
vstr2 = vstr2[1:]
else:
cmpop = operator.eq
return (cmpop, vstr2)
def version_compare(vstr1: str, vstr2: str) -> bool:
(cmpop, vstr2) = _version_extract_cmpop(vstr2)
return cmpop(Version(vstr1), Version(vstr2))
def version_compare_many(vstr1, conditions):
if not isinstance(conditions, (list, tuple, frozenset)):
conditions = [conditions]
found = []
not_found = []
for req in conditions:
if not version_compare(vstr1, req):
not_found.append(req)
else:
found.append(req)
return not_found == [], not_found, found
# determine if the minimum version satisfying the condition |condition| exceeds
# the minimum version for a feature |minimum|
def version_compare_condition_with_min(condition: str, minimum: str) -> bool:
if condition.startswith('>='):
cmpop = operator.le
condition = condition[2:]
elif condition.startswith('<='):
return False
elif condition.startswith('!='):
return False
elif condition.startswith('=='):
cmpop = operator.le
condition = condition[2:]
elif condition.startswith('='):
cmpop = operator.le
condition = condition[1:]
elif condition.startswith('>'):
cmpop = operator.lt
condition = condition[1:]
elif condition.startswith('<'):
return False
else:
cmpop = operator.le
# Declaring a project(meson_version: '>=0.46') and then using features in
# 0.46.0 is valid, because (knowing the meson versioning scheme) '0.46.0' is
# the lowest version which satisfies the constraint '>=0.46'.
#
# But this will fail here, because the minimum version required by the
# version constraint ('0.46') is strictly less (in our version comparison)
# than the minimum version needed for the feature ('0.46.0').
#
# Map versions in the constraint of the form '0.46' to '0.46.0', to embed
# this knowledge of the meson versioning scheme.
condition = condition.strip()
if re.match(r'^\d+.\d+$', condition):
condition += '.0'
return cmpop(Version(minimum), Version(condition))
def default_libdir():
if is_debianlike():
try:
pc = subprocess.Popen(['dpkg-architecture', '-qDEB_HOST_MULTIARCH'],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL)
(stdo, _) = pc.communicate()
if pc.returncode == 0:
archpath = stdo.decode().strip()
return 'lib/' + archpath
except Exception:
pass
if is_freebsd():
return 'lib'
if os.path.isdir('/usr/lib64') and not os.path.islink('/usr/lib64'):
return 'lib64'
return 'lib'
def default_libexecdir():
# There is no way to auto-detect this, so it must be set at build time
return 'libexec'
def default_prefix():
return 'c:/' if is_windows() else '/usr/local'
def get_library_dirs() -> typing.List[str]:
if is_windows():
return ['C:/mingw/lib'] # TODO: get programatically
if is_osx():
return ['/usr/lib'] # TODO: get programatically
# The following is probably Debian/Ubuntu specific.
# /usr/local/lib is first because it contains stuff
# installed by the sysadmin and is probably more up-to-date
# than /usr/lib. If you feel that this search order is
# problematic, please raise the issue on the mailing list.
unixdirs = ['/usr/local/lib', '/usr/lib', '/lib']
if is_freebsd():
return unixdirs
# FIXME: this needs to be further genericized for aarch64 etc.
machine = platform.machine()
if machine in ('i386', 'i486', 'i586', 'i686'):
plat = 'i386'
elif machine.startswith('arm'):
plat = 'arm'
else:
plat = ''
# Solaris puts 32-bit libraries in the main /lib & /usr/lib directories
# and 64-bit libraries in platform specific subdirectories.
if is_sunos():
if machine == 'i86pc':
plat = 'amd64'
elif machine.startswith('sun4'):
plat = 'sparcv9'
usr_platdir = Path('/usr/lib/') / plat
if usr_platdir.is_dir():
unixdirs += [str(x) for x in (usr_platdir).iterdir() if x.is_dir()]
if os.path.exists('/usr/lib64'):
unixdirs.append('/usr/lib64')
lib_platdir = Path('/lib/') / plat
if lib_platdir.is_dir():
unixdirs += [str(x) for x in (lib_platdir).iterdir() if x.is_dir()]
if os.path.exists('/lib64'):
unixdirs.append('/lib64')
return unixdirs
def has_path_sep(name, sep='/\\'):
'Checks if any of the specified @sep path separators are in @name'
for each in sep:
if each in name:
return True
return False
if is_windows():
# shlex.split is not suitable for splitting command line on Window (https://bugs.python.org/issue1724822);
# shlex.quote is similarly problematic. Below are "proper" implementations of these functions according to
# https://docs.microsoft.com/en-us/cpp/c-language/parsing-c-command-line-arguments and
# https://blogs.msdn.microsoft.com/twistylittlepassagesallalike/2011/04/23/everyone-quotes-command-line-arguments-the-wrong-way/
_whitespace = ' \t\n\r'
_find_unsafe_char = re.compile(r'[{}"]'.format(_whitespace)).search
def quote_arg(arg):
if arg and not _find_unsafe_char(arg):
return arg
result = '"'
num_backslashes = 0
for c in arg:
if c == '\\':
num_backslashes += 1
else:
if c == '"':
# Escape all backslashes and the following double quotation mark
num_backslashes = num_backslashes * 2 + 1
result += num_backslashes * '\\' + c
num_backslashes = 0
# Escape all backslashes, but let the terminating double quotation
# mark we add below be interpreted as a metacharacter
result += (num_backslashes * 2) * '\\' + '"'
return result
def split_args(cmd):
result = []
arg = ''
num_backslashes = 0
num_quotes = 0
in_quotes = False
for c in cmd:
if c == '\\':
num_backslashes += 1
else:
if c == '"' and not (num_backslashes % 2):
# unescaped quote, eat it
arg += (num_backslashes // 2) * '\\'
num_quotes += 1
in_quotes = not in_quotes
elif c in _whitespace and not in_quotes:
if arg or num_quotes:
# reached the end of the argument
result.append(arg)
arg = ''
num_quotes = 0
else:
if c == '"':
# escaped quote
num_backslashes = (num_backslashes - 1) // 2
arg += num_backslashes * '\\' + c
num_backslashes = 0
if arg or num_quotes:
result.append(arg)
return result
else:
def quote_arg(arg):
return shlex.quote(arg)
def split_args(cmd):
return shlex.split(cmd)
def join_args(args):
return ' '.join([quote_arg(x) for x in args])
def do_replacement(regex, line, variable_format, confdata):
missing_variables = set()
start_tag = '@'
backslash_tag = '\\@'
if variable_format == 'cmake':
start_tag = '${'
backslash_tag = '\\${'
def variable_replace(match):
# Pairs of escape characters before '@' or '\@'
if match.group(0).endswith('\\'):
num_escapes = match.end(0) - match.start(0)
return '\\' * (num_escapes // 2)
# Single escape character and '@'
elif match.group(0) == backslash_tag:
return start_tag
# Template variable to be replaced
else:
varname = match.group(1)
if varname in confdata:
(var, desc) = confdata.get(varname)
if isinstance(var, str):
pass
elif isinstance(var, int):
var = str(var)
else:
msg = 'Tried to replace variable {!r} value with ' \
'something other than a string or int: {!r}'
raise MesonException(msg.format(varname, var))
else:
missing_variables.add(varname)
var = ''
return var
return re.sub(regex, variable_replace, line), missing_variables
def do_mesondefine(line, confdata):
arr = line.split()
if len(arr) != 2:
raise MesonException('#mesondefine does not contain exactly two tokens: %s' % line.strip())
varname = arr[1]
try:
(v, desc) = confdata.get(varname)
except KeyError:
return '/* #undef %s */\n' % varname
if isinstance(v, bool):
if v:
return '#define %s\n' % varname
else:
return '#undef %s\n' % varname
elif isinstance(v, int):
return '#define %s %d\n' % (varname, v)
elif isinstance(v, str):
return '#define %s %s\n' % (varname, v)
else:
raise MesonException('#mesondefine argument "%s" is of unknown type.' % varname)
def do_conf_file(src, dst, confdata, variable_format, encoding='utf-8'):
try:
with open(src, encoding=encoding, newline='') as f:
data = f.readlines()
except Exception as e:
raise MesonException('Could not read input file %s: %s' % (src, str(e)))
# Only allow (a-z, A-Z, 0-9, _, -) as valid characters for a define
# Also allow escaping '@' with '\@'
if variable_format in ['meson', 'cmake@']:
regex = re.compile(r'(?:\\\\)+(?=\\?@)|\\@|@([-a-zA-Z0-9_]+)@')
elif variable_format == 'cmake':
regex = re.compile(r'(?:\\\\)+(?=\\?\$)|\\\${|\${([-a-zA-Z0-9_]+)}')
else:
raise MesonException('Format "{}" not handled'.format(variable_format))
search_token = '#mesondefine'
if variable_format != 'meson':
search_token = '#cmakedefine'
result = []
missing_variables = set()
# Detect when the configuration data is empty and no tokens were found
# during substitution so we can warn the user to use the `copy:` kwarg.
confdata_useless = not confdata.keys()
for line in data:
if line.startswith(search_token):
confdata_useless = False
line = do_mesondefine(line, confdata)
else:
line, missing = do_replacement(regex, line, variable_format, confdata)
missing_variables.update(missing)
if missing:
confdata_useless = False
result.append(line)
dst_tmp = dst + '~'
try:
with open(dst_tmp, 'w', encoding=encoding, newline='') as f:
f.writelines(result)
except Exception as e:
raise MesonException('Could not write output file %s: %s' % (dst, str(e)))
shutil.copymode(src, dst_tmp)
replace_if_different(dst, dst_tmp)
return missing_variables, confdata_useless
CONF_C_PRELUDE = '''/*
* Autogenerated by the Meson build system.
* Do not edit, your changes will be lost.
*/
#pragma once
'''
CONF_NASM_PRELUDE = '''; Autogenerated by the Meson build system.
; Do not edit, your changes will be lost.
'''
def dump_conf_header(ofilename, cdata, output_format):
if output_format == 'c':
prelude = CONF_C_PRELUDE
prefix = '#'
elif output_format == 'nasm':
prelude = CONF_NASM_PRELUDE
prefix = '%'
ofilename_tmp = ofilename + '~'
with open(ofilename_tmp, 'w', encoding='utf-8') as ofile:
ofile.write(prelude)
for k in sorted(cdata.keys()):
(v, desc) = cdata.get(k)
if desc:
if output_format == 'c':
ofile.write('/* %s */\n' % desc)
elif output_format == 'nasm':
for line in desc.split('\n'):
ofile.write('; %s\n' % line)
if isinstance(v, bool):
if v:
ofile.write('%sdefine %s\n\n' % (prefix, k))
else:
ofile.write('%sundef %s\n\n' % (prefix, k))
elif isinstance(v, (int, str)):
ofile.write('%sdefine %s %s\n\n' % (prefix, k, v))
else:
raise MesonException('Unknown data type in configuration file entry: ' + k)
replace_if_different(ofilename, ofilename_tmp)
def replace_if_different(dst, dst_tmp):
# If contents are identical, don't touch the file to prevent
# unnecessary rebuilds.
different = True
try:
with open(dst, 'rb') as f1, open(dst_tmp, 'rb') as f2:
if f1.read() == f2.read():
different = False
except FileNotFoundError:
pass
if different:
os.replace(dst_tmp, dst)
else:
os.unlink(dst_tmp)
def listify(item, flatten=True, unholder=False):
'''
Returns a list with all args embedded in a list if they are not a list.
This function preserves order.
@flatten: Convert lists of lists to a flat list
@unholder: Replace each item with the object it holds, if required
Note: unholding only works recursively when flattening
'''
if not isinstance(item, list):
if unholder and hasattr(item, 'held_object'):
item = item.held_object
return [item]
result = []
for i in item:
if unholder and hasattr(i, 'held_object'):
i = i.held_object
if flatten and isinstance(i, list):
result += listify(i, flatten=True, unholder=unholder)
else:
result.append(i)
return result
def extract_as_list(dict_object, *keys, pop=False, **kwargs):
'''
Extracts all values from given dict_object and listifies them.
'''
result = []
fetch = dict_object.get
if pop:
fetch = dict_object.pop
# If there's only one key, we don't return a list with one element
if len(keys) == 1:
return listify(fetch(keys[0], []), **kwargs)
# Return a list of values corresponding to *keys
for key in keys:
result.append(listify(fetch(key, []), **kwargs))
return result
def typeslistify(item: 'typing.Union[_T, typing.List[_T]]',
types: 'typing.Union[typing.Type[_T], typing.Tuple[typing.Type[_T]]]') -> typing.List[_T]:
'''
Ensure that type(@item) is one of @types or a
list of items all of which are of type @types
'''
if isinstance(item, types):
item = typing.cast(typing.List[_T], [item])
if not isinstance(item, list):
raise MesonException('Item must be a list or one of {!r}'.format(types))
for i in item:
if i is not None and not isinstance(i, types):
raise MesonException('List item must be one of {!r}'.format(types))
return item
def stringlistify(item: typing.Union[str, typing.List[str]]) -> typing.List[str]:
return typeslistify(item, str)
def expand_arguments(args):
expended_args = []
for arg in args:
if not arg.startswith('@'):
expended_args.append(arg)
continue
args_file = arg[1:]
try:
with open(args_file) as f:
extended_args = f.read().split()
expended_args += extended_args
except Exception as e:
print('Error expanding command line arguments, %s not found' % args_file)
print(e)
return None
return expended_args
def Popen_safe(args: typing.List[str], write: typing.Optional[str] = None,
stdout: typing.Union[typing.BinaryIO, int] = subprocess.PIPE,
stderr: typing.Union[typing.BinaryIO, int] = subprocess.PIPE,
**kwargs: typing.Any) -> typing.Tuple[subprocess.Popen, str, str]:
import locale
encoding = locale.getpreferredencoding()
if sys.version_info < (3, 6) or not sys.stdout.encoding or encoding.upper() != 'UTF-8':
return Popen_safe_legacy(args, write=write, stdout=stdout, stderr=stderr, **kwargs)
p = subprocess.Popen(args, universal_newlines=True, close_fds=False,
stdout=stdout, stderr=stderr, **kwargs)
o, e = p.communicate(write)
return p, o, e
def Popen_safe_legacy(args: typing.List[str], write: typing.Optional[str] = None,
stdout: typing.Union[typing.BinaryIO, int] = subprocess.PIPE,
stderr: typing.Union[typing.BinaryIO, int] = subprocess.PIPE,
**kwargs: typing.Any) -> typing.Tuple[subprocess.Popen, str, str]:
p = subprocess.Popen(args, universal_newlines=False, close_fds=False,
stdout=stdout, stderr=stderr, **kwargs)
input_ = None # type: typing.Optional[bytes]
if write is not None:
input_ = write.encode('utf-8')
o, e = p.communicate(input_)
if o is not None:
if sys.stdout.encoding:
o = o.decode(encoding=sys.stdout.encoding, errors='replace').replace('\r\n', '\n')
else:
o = o.decode(errors='replace').replace('\r\n', '\n')
if e is not None:
if sys.stderr.encoding:
e = e.decode(encoding=sys.stderr.encoding, errors='replace').replace('\r\n', '\n')
else:
e = e.decode(errors='replace').replace('\r\n', '\n')
return p, o, e
def iter_regexin_iter(regexiter, initer):
'''
Takes each regular expression in @regexiter and tries to search for it in
every item in @initer. If there is a match, returns that match.
Else returns False.
'''
for regex in regexiter:
for ii in initer:
if not isinstance(ii, str):
continue
match = re.search(regex, ii)
if match:
return match.group()
return False
def _substitute_values_check_errors(command, values):
# Error checking
inregex = ('@INPUT([0-9]+)?@', '@PLAINNAME@', '@BASENAME@')
outregex = ('@OUTPUT([0-9]+)?@', '@OUTDIR@')
if '@INPUT@' not in values:
# Error out if any input-derived templates are present in the command
match = iter_regexin_iter(inregex, command)
if match:
m = 'Command cannot have {!r}, since no input files were specified'
raise MesonException(m.format(match))
else:
if len(values['@INPUT@']) > 1:
# Error out if @PLAINNAME@ or @BASENAME@ is present in the command
match = iter_regexin_iter(inregex[1:], command)
if match:
raise MesonException('Command cannot have {!r} when there is '
'more than one input file'.format(match))
# Error out if an invalid @INPUTnn@ template was specified
for each in command:
if not isinstance(each, str):
continue
match = re.search(inregex[0], each)
if match and match.group() not in values:
m = 'Command cannot have {!r} since there are only {!r} inputs'
raise MesonException(m.format(match.group(), len(values['@INPUT@'])))
if '@OUTPUT@' not in values:
# Error out if any output-derived templates are present in the command
match = iter_regexin_iter(outregex, command)
if match:
m = 'Command cannot have {!r} since there are no outputs'
raise MesonException(m.format(match))
else:
# Error out if an invalid @OUTPUTnn@ template was specified
for each in command:
if not isinstance(each, str):
continue
match = re.search(outregex[0], each)
if match and match.group() not in values:
m = 'Command cannot have {!r} since there are only {!r} outputs'
raise MesonException(m.format(match.group(), len(values['@OUTPUT@'])))
def substitute_values(command, values):
'''
Substitute the template strings in the @values dict into the list of
strings @command and return a new list. For a full list of the templates,
see get_filenames_templates_dict()
If multiple inputs/outputs are given in the @values dictionary, we
substitute @INPUT@ and @OUTPUT@ only if they are the entire string, not
just a part of it, and in that case we substitute *all* of them.
'''
# Error checking
_substitute_values_check_errors(command, values)
# Substitution
outcmd = []
rx_keys = [re.escape(key) for key in values if key not in ('@INPUT@', '@OUTPUT@')]
value_rx = re.compile('|'.join(rx_keys)) if rx_keys else None
for vv in command:
if not isinstance(vv, str):
outcmd.append(vv)
elif '@INPUT@' in vv:
inputs = values['@INPUT@']
if vv == '@INPUT@':
outcmd += inputs
elif len(inputs) == 1:
outcmd.append(vv.replace('@INPUT@', inputs[0]))
else:
raise MesonException("Command has '@INPUT@' as part of a "
"string and more than one input file")
elif '@OUTPUT@' in vv:
outputs = values['@OUTPUT@']
if vv == '@OUTPUT@':
outcmd += outputs
elif len(outputs) == 1:
outcmd.append(vv.replace('@OUTPUT@', outputs[0]))
else:
raise MesonException("Command has '@OUTPUT@' as part of a "
"string and more than one output file")
# Append values that are exactly a template string.
# This is faster than a string replace.
elif vv in values:
outcmd.append(values[vv])
# Substitute everything else with replacement
elif value_rx:
outcmd.append(value_rx.sub(lambda m: values[m.group(0)], vv))
else:
outcmd.append(vv)
return outcmd
def get_filenames_templates_dict(inputs, outputs):
'''
Create a dictionary with template strings as keys and values as values for
the following templates:
@INPUT@ - the full path to one or more input files, from @inputs
@OUTPUT@ - the full path to one or more output files, from @outputs
@OUTDIR@ - the full path to the directory containing the output files
If there is only one input file, the following keys are also created:
@PLAINNAME@ - the filename of the input file
@BASENAME@ - the filename of the input file with the extension removed
If there is more than one input file, the following keys are also created:
@INPUT0@, @INPUT1@, ... one for each input file
If there is more than one output file, the following keys are also created:
@OUTPUT0@, @OUTPUT1@, ... one for each output file
'''
values = {}
# Gather values derived from the input
if inputs:
# We want to substitute all the inputs.
values['@INPUT@'] = inputs
for (ii, vv) in enumerate(inputs):
# Write out @INPUT0@, @INPUT1@, ...
values['@INPUT{}@'.format(ii)] = vv
if len(inputs) == 1:
# Just one value, substitute @PLAINNAME@ and @BASENAME@
values['@PLAINNAME@'] = plain = os.path.basename(inputs[0])
values['@BASENAME@'] = os.path.splitext(plain)[0]
if outputs:
# Gather values derived from the outputs, similar to above.
values['@OUTPUT@'] = outputs
for (ii, vv) in enumerate(outputs):
values['@OUTPUT{}@'.format(ii)] = vv
# Outdir should be the same for all outputs
values['@OUTDIR@'] = os.path.dirname(outputs[0])
# Many external programs fail on empty arguments.
if values['@OUTDIR@'] == '':
values['@OUTDIR@'] = '.'
return values
def _make_tree_writable(topdir):
# Ensure all files and directories under topdir are writable
# (and readable) by owner.
for d, _, files in os.walk(topdir):
os.chmod(d, os.stat(d).st_mode | stat.S_IWRITE | stat.S_IREAD)
for fname in files:
fpath = os.path.join(d, fname)
if os.path.isfile(fpath):
os.chmod(fpath, os.stat(fpath).st_mode | stat.S_IWRITE | stat.S_IREAD)
def windows_proof_rmtree(f):
# On Windows if anyone is holding a file open you can't
# delete it. As an example an anti virus scanner might
# be scanning files you are trying to delete. The only
# way to fix this is to try again and again.
delays = [0.1, 0.1, 0.2, 0.2, 0.2, 0.5, 0.5, 1, 1, 1, 1, 2]
# Start by making the tree wriable.
_make_tree_writable(f)
for d in delays:
try:
shutil.rmtree(f)
return
except FileNotFoundError:
return
except (OSError, PermissionError):
time.sleep(d)
# Try one last time and throw if it fails.
shutil.rmtree(f)
def windows_proof_rm(fpath):
"""Like windows_proof_rmtree, but for a single file."""
if os.path.isfile(fpath):
os.chmod(fpath, os.stat(fpath).st_mode | stat.S_IWRITE | stat.S_IREAD)
delays = [0.1, 0.1, 0.2, 0.2, 0.2, 0.5, 0.5, 1, 1, 1, 1, 2]
for d in delays:
try:
os.unlink(fpath)
return
except FileNotFoundError:
return
except (OSError, PermissionError):
time.sleep(d)
os.unlink(fpath)
def detect_subprojects(spdir_name, current_dir='', result=None):
if result is None:
result = {}
spdir = os.path.join(current_dir, spdir_name)
if not os.path.exists(spdir):
return result
for trial in glob(os.path.join(spdir, '*')):
basename = os.path.basename(trial)
if trial == 'packagecache':
continue
append_this = True
if os.path.isdir(trial):
detect_subprojects(spdir_name, trial, result)
elif trial.endswith('.wrap') and os.path.isfile(trial):
basename = os.path.splitext(basename)[0]
else:
append_this = False
if append_this:
if basename in result:
result[basename].append(trial)
else:
result[basename] = [trial]
return result
# This isn't strictly correct. What we really want here is something like:
# class StringProtocol(typing_extensions.Protocol):
#
# def __str__(self) -> str: ...
#
# This would more accurately embody what this funcitonc an handle, but we
# don't have that yet, so instead we'll do some casting to work around it
def get_error_location_string(fname: str, lineno: str) -> str:
return '{}:{}:'.format(fname, lineno)
def substring_is_in_list(substr: str, strlist: typing.List[str]) -> bool:
for s in strlist:
if substr in s:
return True
return False
class OrderedSet(collections.abc.MutableSet):
"""A set that preserves the order in which items are added, by first
insertion.
"""
def __init__(self, iterable=None):
self.__container = collections.OrderedDict()
if iterable:
self.update(iterable)
def __contains__(self, value):
return value in self.__container
def __iter__(self):
return iter(self.__container.keys())
def __len__(self):
return len(self.__container)
def __repr__(self):
# Don't print 'OrderedSet("")' for an empty set.
if self.__container:
return 'OrderedSet("{}")'.format(
'", "'.join(repr(e) for e in self.__container.keys()))
return 'OrderedSet()'
def __reversed__(self):
return reversed(self.__container)
def add(self, value):
self.__container[value] = None
def discard(self, value):
if value in self.__container:
del self.__container[value]
def update(self, iterable):
for item in iterable:
self.__container[item] = None
def difference(self, set_):
return type(self)(e for e in self if e not in set_)
class BuildDirLock:
def __init__(self, builddir):
self.lockfilename = os.path.join(builddir, 'meson-private/meson.lock')
def __enter__(self):
self.lockfile = open(self.lockfilename, 'w')
try:
if have_fcntl:
fcntl.flock(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
elif have_msvcrt:
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
except (BlockingIOError, PermissionError):
self.lockfile.close()
raise MesonException('Some other Meson process is already using this build directory. Exiting.')
def __exit__(self, *args):
if have_fcntl:
fcntl.flock(self.lockfile, fcntl.LOCK_UN)
elif have_msvcrt:
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
self.lockfile.close()
def relpath(path, start):
# On Windows a relative path can't be evaluated for paths on two different
# drives (i.e. c:\foo and f:\bar). The only thing left to do is to use the
# original absolute path.
try:
return os.path.relpath(path, start)
except ValueError:
return path
class LibType(Enum):
"""Enumeration for library types."""
SHARED = 0
STATIC = 1
PREFER_SHARED = 2
PREFER_STATIC = 3
class ProgressBarFallback:
'''Fallback progress bar implementation when tqdm is not found'''
def __init__(self, iterable=None, total=None, bar_type=None, desc=None):
if iterable is not None:
self.iterable = iter(iterable)
return
self.total = total
self.done = 0
self.printed_dots = 0
if self.total and bar_type == 'download':
print('Download size:', self.total)
if desc:
print('{}: '.format(desc), end='')
# Pretend to be an iterator when called as one and don't print any
# progress
def __iter__(self):
return self.iterable
def __next__(self):
return next(self.iterable)
def print_dot(self):
print('.', end='')
sys.stdout.flush()
self.printed_dots += 1
def update(self, progress):
self.done += progress
if not self.total:
# Just print one dot per call if we don't have a total length
self.print_dot()
return
ratio = int(self.done / self.total * 10)
while self.printed_dots < ratio:
self.print_dot()
def close(self):
print('')
try:
from tqdm import tqdm
class ProgressBar(tqdm):
def __init__(self, *args, bar_type=None, **kwargs):
if bar_type == 'download':
kwargs.update({'unit': 'bytes', 'leave': True})
else:
kwargs.update({'leave': False})
kwargs['ncols'] = 100
super().__init__(*args, **kwargs)
except ImportError:
ProgressBar = ProgressBarFallback
def get_wine_shortpath(winecmd, wine_paths):
""" Get A short version of @wine_paths to avoid
reaching WINEPATH number of char limit.
"""
seen = set()
wine_paths = [p for p in wine_paths if not (p in seen or seen.add(p))]
getShortPathScript = '%s.bat' % str(uuid.uuid4()).lower()[:5]
with open(getShortPathScript, mode='w') as f:
f.write("@ECHO OFF\nfor %%x in (%*) do (\n echo|set /p=;%~sx\n)\n")
f.flush()
try:
with open(os.devnull, 'w') as stderr:
wine_path = subprocess.check_output(
winecmd +
['cmd', '/C', getShortPathScript] + wine_paths,
stderr=stderr).decode('utf-8')
except subprocess.CalledProcessError as e:
print("Could not get short paths: %s" % e)
wine_path = ';'.join(wine_paths)
finally:
os.remove(getShortPathScript)
if len(wine_path) > 2048:
raise MesonException(
'WINEPATH size {} > 2048'
' this will cause random failure.'.format(
len(wine_path)))
return wine_path.strip(';')
| []
| []
| []
| [] | [] | python | 0 | 0 | |
handlers_drive.go | package main
import (
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"time"
"github.com/prasmussen/gdrive/auth"
"github.com/prasmussen/gdrive/cli"
"github.com/prasmussen/gdrive/drive"
)
const TokenFilename = "token_v2.json"
const DefaultCacheFileName = "file_cache.json"
func listHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).List(drive.ListFilesArgs{
Out: os.Stdout,
MaxFiles: args.Int64("maxFiles"),
NameWidth: args.Int64("nameWidth"),
Query: args.String("query"),
SortOrder: args.String("sortOrder"),
SkipHeader: args.Bool("skipHeader"),
SizeInBytes: args.Bool("sizeInBytes"),
AbsPath: args.Bool("absPath"),
})
checkErr(err)
}
func listChangesHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).ListChanges(drive.ListChangesArgs{
Out: os.Stdout,
PageToken: args.String("pageToken"),
MaxChanges: args.Int64("maxChanges"),
Now: args.Bool("now"),
NameWidth: args.Int64("nameWidth"),
SkipHeader: args.Bool("skipHeader"),
})
checkErr(err)
}
func downloadHandler(ctx cli.Context) {
args := ctx.Args()
checkDownloadArgs(args)
err := newDrive(args).Download(drive.DownloadArgs{
Out: os.Stdout,
Id: args.String("fileId"),
Force: args.Bool("force"),
Skip: args.Bool("skip"),
Path: args.String("path"),
Delete: args.Bool("delete"),
Recursive: args.Bool("recursive"),
Stdout: args.Bool("stdout"),
Progress: progressWriter(args.Bool("noProgress")),
Timeout: durationInSeconds(args.Int64("timeout")),
})
checkErr(err)
}
func downloadQueryHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).DownloadQuery(drive.DownloadQueryArgs{
Out: os.Stdout,
Query: args.String("query"),
Force: args.Bool("force"),
Skip: args.Bool("skip"),
Recursive: args.Bool("recursive"),
Path: args.String("path"),
Progress: progressWriter(args.Bool("noProgress")),
})
checkErr(err)
}
func downloadSyncHandler(ctx cli.Context) {
args := ctx.Args()
cachePath := filepath.Join(args.String("configDir"), DefaultCacheFileName)
err := newDrive(args).DownloadSync(drive.DownloadSyncArgs{
Out: os.Stdout,
Progress: progressWriter(args.Bool("noProgress")),
Path: args.String("path"),
RootId: args.String("fileId"),
DryRun: args.Bool("dryRun"),
DeleteExtraneous: args.Bool("deleteExtraneous"),
Timeout: durationInSeconds(args.Int64("timeout")),
Resolution: conflictResolution(args),
Comparer: NewCachedMd5Comparer(cachePath),
})
checkErr(err)
}
func downloadRevisionHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).DownloadRevision(drive.DownloadRevisionArgs{
Out: os.Stdout,
FileId: args.String("fileId"),
RevisionId: args.String("revId"),
Force: args.Bool("force"),
Stdout: args.Bool("stdout"),
Path: args.String("path"),
Progress: progressWriter(args.Bool("noProgress")),
Timeout: durationInSeconds(args.Int64("timeout")),
})
checkErr(err)
}
func uploadHandler(ctx cli.Context) {
args := ctx.Args()
checkUploadArgs(args)
err := newDrive(args).Upload(drive.UploadArgs{
Out: os.Stdout,
Progress: progressWriter(args.Bool("noProgress")),
Path: args.String("path"),
Name: args.String("name"),
Description: args.String("description"),
Parents: args.StringSlice("parent"),
Mime: args.String("mime"),
Recursive: args.Bool("recursive"),
Share: args.Bool("share"),
Delete: args.Bool("delete"),
ChunkSize: args.Int64("chunksize"),
Timeout: durationInSeconds(args.Int64("timeout")),
})
checkErr(err)
}
func uploadStdinHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).UploadStream(drive.UploadStreamArgs{
Out: os.Stdout,
In: os.Stdin,
Name: args.String("name"),
Description: args.String("description"),
Parents: args.StringSlice("parent"),
Mime: args.String("mime"),
Share: args.Bool("share"),
ChunkSize: args.Int64("chunksize"),
Timeout: durationInSeconds(args.Int64("timeout")),
Progress: progressWriter(args.Bool("noProgress")),
})
checkErr(err)
}
func uploadSyncHandler(ctx cli.Context) {
args := ctx.Args()
cachePath := filepath.Join(args.String("configDir"), DefaultCacheFileName)
err := newDrive(args).UploadSync(drive.UploadSyncArgs{
Out: os.Stdout,
Progress: progressWriter(args.Bool("noProgress")),
Path: args.String("path"),
RootId: args.String("fileId"),
DryRun: args.Bool("dryRun"),
DeleteExtraneous: args.Bool("deleteExtraneous"),
ChunkSize: args.Int64("chunksize"),
Timeout: durationInSeconds(args.Int64("timeout")),
Resolution: conflictResolution(args),
Comparer: NewCachedMd5Comparer(cachePath),
})
checkErr(err)
}
func updateHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).Update(drive.UpdateArgs{
Out: os.Stdout,
Id: args.String("fileId"),
Path: args.String("path"),
Name: args.String("name"),
Description: args.String("description"),
Parents: args.StringSlice("parent"),
Mime: args.String("mime"),
Progress: progressWriter(args.Bool("noProgress")),
ChunkSize: args.Int64("chunksize"),
Timeout: durationInSeconds(args.Int64("timeout")),
})
checkErr(err)
}
func infoHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).Info(drive.FileInfoArgs{
Out: os.Stdout,
Id: args.String("fileId"),
SizeInBytes: args.Bool("sizeInBytes"),
})
checkErr(err)
}
func importHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).Import(drive.ImportArgs{
Mime: args.String("mime"),
Out: os.Stdout,
Path: args.String("path"),
Parents: args.StringSlice("parent"),
Progress: progressWriter(args.Bool("noProgress")),
})
checkErr(err)
}
func exportHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).Export(drive.ExportArgs{
Out: os.Stdout,
Id: args.String("fileId"),
Mime: args.String("mime"),
PrintMimes: args.Bool("printMimes"),
Force: args.Bool("force"),
})
checkErr(err)
}
func listRevisionsHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).ListRevisions(drive.ListRevisionsArgs{
Out: os.Stdout,
Id: args.String("fileId"),
NameWidth: args.Int64("nameWidth"),
SizeInBytes: args.Bool("sizeInBytes"),
SkipHeader: args.Bool("skipHeader"),
})
checkErr(err)
}
func mkdirHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).Mkdir(drive.MkdirArgs{
Out: os.Stdout,
Name: args.String("name"),
Description: args.String("description"),
Parents: args.StringSlice("parent"),
})
checkErr(err)
}
func shareHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).Share(drive.ShareArgs{
Out: os.Stdout,
FileId: args.String("fileId"),
Role: args.String("role"),
Type: args.String("type"),
Email: args.String("email"),
Domain: args.String("domain"),
Discoverable: args.Bool("discoverable"),
})
checkErr(err)
}
func shareListHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).ListPermissions(drive.ListPermissionsArgs{
Out: os.Stdout,
FileId: args.String("fileId"),
})
checkErr(err)
}
func shareRevokeHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).RevokePermission(drive.RevokePermissionArgs{
Out: os.Stdout,
FileId: args.String("fileId"),
PermissionId: args.String("permissionId"),
})
checkErr(err)
}
func deleteHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).Delete(drive.DeleteArgs{
Out: os.Stdout,
Id: args.String("fileId"),
Recursive: args.Bool("recursive"),
})
checkErr(err)
}
func listSyncHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).ListSync(drive.ListSyncArgs{
Out: os.Stdout,
SkipHeader: args.Bool("skipHeader"),
})
checkErr(err)
}
func listRecursiveSyncHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).ListRecursiveSync(drive.ListRecursiveSyncArgs{
Out: os.Stdout,
RootId: args.String("fileId"),
SkipHeader: args.Bool("skipHeader"),
PathWidth: args.Int64("pathWidth"),
SizeInBytes: args.Bool("sizeInBytes"),
SortOrder: args.String("sortOrder"),
})
checkErr(err)
}
func deleteRevisionHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).DeleteRevision(drive.DeleteRevisionArgs{
Out: os.Stdout,
FileId: args.String("fileId"),
RevisionId: args.String("revId"),
})
checkErr(err)
}
func aboutHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).About(drive.AboutArgs{
Out: os.Stdout,
SizeInBytes: args.Bool("sizeInBytes"),
})
checkErr(err)
}
func aboutImportHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).AboutImport(drive.AboutImportArgs{
Out: os.Stdout,
})
checkErr(err)
}
func aboutExportHandler(ctx cli.Context) {
args := ctx.Args()
err := newDrive(args).AboutExport(drive.AboutExportArgs{
Out: os.Stdout,
})
checkErr(err)
}
func getOauthClient(args cli.Arguments) (*http.Client, error) {
ClientId := os.Getenv("GOOGLE_API_CLIENT_ID")
ClientSecret := os.Getenv("GOOGLE_API_CLIENT_SECRET")
if args.String("refreshToken") != "" && args.String("accessToken") != "" {
ExitF("Access token not needed when refresh token is provided")
}
if args.String("refreshToken") != "" {
return auth.NewRefreshTokenClient(ClientId, ClientSecret, args.String("refreshToken")), nil
}
if args.String("accessToken") != "" {
return auth.NewAccessTokenClient(ClientId, ClientSecret, args.String("accessToken")), nil
}
configDir := getConfigDir(args)
if args.String("serviceAccount") != "" {
serviceAccountPath := ConfigFilePath(configDir, args.String("serviceAccount"))
serviceAccountClient, err := auth.NewServiceAccountClient(serviceAccountPath)
if err != nil {
return nil, err
}
return serviceAccountClient, nil
}
tokenPath := ConfigFilePath(configDir, TokenFilename)
return auth.NewFileSourceClient(ClientId, ClientSecret, tokenPath, authCodePrompt)
}
func getConfigDir(args cli.Arguments) string {
// Use dir from environment var if present
if os.Getenv("GDRIVE_CONFIG_DIR") != "" {
return os.Getenv("GDRIVE_CONFIG_DIR")
}
return args.String("configDir")
}
func newDrive(args cli.Arguments) *drive.Drive {
oauth, err := getOauthClient(args)
if err != nil {
ExitF("Failed getting oauth client: %s", err.Error())
}
client, err := drive.New(oauth)
if err != nil {
ExitF("Failed getting drive: %s", err.Error())
}
return client
}
func authCodePrompt(url string) func() string {
return func() string {
fmt.Println("Authentication needed")
fmt.Println("Go to the following url in your browser:")
fmt.Printf("%s\n\n", url)
fmt.Print("Enter verification code: ")
var code string
if _, err := fmt.Scan(&code); err != nil {
fmt.Printf("Failed reading code: %s", err.Error())
}
return code
}
}
func progressWriter(discard bool) io.Writer {
if discard {
return ioutil.Discard
}
return os.Stderr
}
func durationInSeconds(seconds int64) time.Duration {
return time.Second * time.Duration(seconds)
}
func conflictResolution(args cli.Arguments) drive.ConflictResolution {
keepLocal := args.Bool("keepLocal")
keepRemote := args.Bool("keepRemote")
keepLargest := args.Bool("keepLargest")
if (keepLocal && keepRemote) || (keepLocal && keepLargest) || (keepRemote && keepLargest) {
ExitF("Only one conflict resolution flag can be given")
}
if keepLocal {
return drive.KeepLocal
}
if keepRemote {
return drive.KeepRemote
}
if keepLargest {
return drive.KeepLargest
}
return drive.NoResolution
}
func checkUploadArgs(args cli.Arguments) {
if args.Bool("recursive") && args.Bool("delete") {
ExitF("--delete is not allowed for recursive uploads")
}
if args.Bool("recursive") && args.Bool("share") {
ExitF("--share is not allowed for recursive uploads")
}
}
func checkDownloadArgs(args cli.Arguments) {
if args.Bool("recursive") && args.Bool("delete") {
ExitF("--delete is not allowed for recursive downloads")
}
}
| [
"\"GOOGLE_API_CLIENT_ID\"",
"\"GOOGLE_API_CLIENT_SECRET\"",
"\"GDRIVE_CONFIG_DIR\"",
"\"GDRIVE_CONFIG_DIR\""
]
| []
| [
"GOOGLE_API_CLIENT_SECRET",
"GDRIVE_CONFIG_DIR",
"GOOGLE_API_CLIENT_ID"
]
| [] | ["GOOGLE_API_CLIENT_SECRET", "GDRIVE_CONFIG_DIR", "GOOGLE_API_CLIENT_ID"] | go | 3 | 0 | |
Scripts/ConvertFile/ConvertFile.py | import demistomock as demisto
from CommonServerPython import *
import subprocess
import glob
import os
import tempfile
import shutil
import traceback
from typing import List
def convert_file(file_path: str, out_format: str, all_files: bool, outdir: str) -> List[str]:
run_cmd = ['soffice', '--headless', '-env:UserInstallation=file:///tmp/convertfile/.config',
'--convert-to', out_format, file_path, '--outdir', outdir]
env = os.environ.copy()
env['HOME'] = '/tmp/converfile'
res = subprocess.check_output(run_cmd, stderr=subprocess.STDOUT, text=True, env=env)
demisto.debug("completed running: {}. With result: {}".format(run_cmd, res))
if all_files:
return glob.glob(outdir + '/*')
ext = out_format.split(':')[0]
return glob.glob(outdir + '/*.' + ext)
def main():
entry_id = demisto.args()["entry_id"]
out_format = demisto.args().get('format', 'pdf')
all_files = demisto.args().get('all_files', 'no') == 'yes'
# URLS
try:
result = demisto.getFilePath(entry_id)
if not result:
return_error("Couldn't find entry id: {}".format(entry_id))
demisto.debug('going to convert: {}'.format(result))
file_path = result['path']
file_path_name_only = os.path.splitext(os.path.basename(file_path))[0]
file_name = result.get('name')
if file_name: # remove the extension
file_name = os.path.splitext(file_name)[0]
with tempfile.TemporaryDirectory() as outdir:
files = convert_file(file_path, out_format, all_files, outdir)
if not files:
return_error('No file result returned for convert: {}'.format(result[0]['Contents']))
return
for f in files:
temp = demisto.uniqueFile()
shutil.copy(f, demisto.investigation()['id'] + '_' + temp)
name = os.path.basename(f)
if file_name:
name = name.replace(file_path_name_only, file_name)
demisto.results({
'Contents': '',
'ContentsFormat': formats['text'],
'Type': entryTypes['file'],
'File': name,
'FileID': temp
})
except subprocess.CalledProcessError as e:
return_error("Failed converting file. Output: {}. Error: {}".format(e.output, e))
except Exception as e:
return_error("Failed converting file. General exception: {}.\n\nTrace:\n{}".format(e, traceback.format_exc()))
# python2 uses __builtin__ python3 uses builtins
if __name__ == "__builtin__" or __name__ == "builtins":
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
plugins/agent-installer/worker_installer/__init__.py | #########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import os
from functools import wraps
import json
from cloudify import context
from cloudify.exceptions import NonRecoverableError
from worker_installer.utils import (FabricRunner,
is_on_management_worker)
DEFAULT_MIN_WORKERS = 2
DEFAULT_MAX_WORKERS = 5
DEFAULT_REMOTE_EXECUTION_PORT = 22
DEFAULT_WAIT_STARTED_TIMEOUT = 15
DEFAULT_WAIT_STARTED_INTERVAL = 1
def _find_type_in_kwargs(cls, all_args):
result = [v for v in all_args if isinstance(v, cls)]
if not result:
return None
if len(result) > 1:
raise NonRecoverableError(
"Expected to find exactly one instance of {0} in "
"kwargs but found {1}".format(cls, len(result)))
return result[0]
def init_worker_installer(func):
@wraps(func)
def wrapper(*args, **kwargs):
ctx = _find_type_in_kwargs(context.CloudifyContext,
kwargs.values() + list(args))
if not ctx:
raise NonRecoverableError(
'CloudifyContext not found in invocation args')
if 'cloudify_agent' in kwargs:
if ctx.type == context.NODE_INSTANCE and \
ctx.node.properties.get('cloudify_agent'):
raise NonRecoverableError("'cloudify_agent' is configured "
"both as a node property and as an "
"invocation input parameter for "
"operation '{0}'"
.format(ctx.operation.name))
agent_config = kwargs['cloudify_agent']
else:
if ctx.type == context.NODE_INSTANCE and \
ctx.node.properties.get('cloudify_agent'):
agent_config = ctx.node.properties['cloudify_agent']
else:
agent_config = {}
prepare_connection_configuration(ctx, agent_config)
runner = FabricRunner(ctx, agent_config)
try:
prepare_additional_configuration(ctx, agent_config, runner)
kwargs['runner'] = runner
kwargs['agent_config'] = agent_config
if not (agent_config.get('distro') and
agent_config.get('distro_codename')):
distro_info = get_machine_distro(runner)
if not agent_config.get('distro'):
agent_config['distro'] = distro_info[0]
if not agent_config.get('distro_codename'):
agent_config['distro_codename'] = distro_info[2]
return func(*args, **kwargs)
finally:
# Fixes CFY-1741 (clear fabric connection cache)
runner.close()
return wrapper
def get_machine_distro(runner):
"""retrieves the distribution information of the machine"""
stdout = _run_py_cmd_with_output(runner,
'import platform, json',
'json.dumps(platform.dist())')
return json.loads(stdout)
def _run_py_cmd_with_output(runner, imports_line, command):
"""
To overcome the situation where additional info is printed
to stdout when a command execution occurs, a string is
appended to the output. This will then search for the string
and the following closing brackets to retrieve the original output.
"""
delim_start = '###CLOUDIFYDISTROOPEN'
delim_end = 'CLOUDIFYDISTROCLOSE###'
stdout = runner.run('python -c "import sys; {0}; '
'sys.stdout.write(\'{1}{2}{3}\\n\''
'.format({4}))"'
.format(imports_line,
delim_start,
'{0}',
delim_end,
command))
result = stdout[stdout.find(delim_start) + len(delim_start):
stdout.find(delim_end)]
return result
def get_machine_ip(ctx):
if ctx.node.properties.get('ip'):
return ctx.node.properties['ip']
if 'ip' in ctx.instance.runtime_properties:
return ctx.instance.runtime_properties['ip']
raise NonRecoverableError(
'ip property is not set for node: {0}. This is mandatory'
' for installing agent via ssh.'.format(ctx.instance.id))
def _prepare_and_validate_autoscale_params(ctx, config):
if 'min_workers' not in config and\
ctx.bootstrap_context.cloudify_agent.min_workers is not None:
config['min_workers'] = \
ctx.bootstrap_context.cloudify_agent.min_workers
if 'max_workers' not in config and\
ctx.bootstrap_context.cloudify_agent.max_workers is not None:
config['max_workers'] = \
ctx.bootstrap_context.cloudify_agent.max_workers
min_workers = config.get('min_workers', DEFAULT_MIN_WORKERS)
max_workers = config.get('max_workers', DEFAULT_MAX_WORKERS)
if not str(min_workers).isdigit():
raise NonRecoverableError('min_workers is supposed to be a number '
'but is: {0}'.format(min_workers))
if not str(max_workers).isdigit():
raise NonRecoverableError('max_workers is supposed to be a number '
'but is: {0}'.format(max_workers))
min_workers = int(min_workers)
max_workers = int(max_workers)
if int(min_workers) > int(max_workers):
raise NonRecoverableError(
'min_workers cannot be greater than max_workers '
'[min_workers={0}, max_workers={1}]'
.format(min_workers, max_workers))
config['min_workers'] = min_workers
config['max_workers'] = max_workers
def _set_auth(ctx, config):
is_password = config.get('password')
is_key = config.get('key')
if not is_password and not is_key:
if ctx.bootstrap_context.cloudify_agent.agent_key_path:
config['key'] = ctx.bootstrap_context.cloudify_agent.agent_key_path
is_key = True
else:
raise NonRecoverableError(
'Missing password or ssh key path in worker configuration '
'[cloudify_agent={0}'.format(config))
if not is_password and is_key:
if not os.path.isfile(os.path.expanduser(config['key'])):
raise NonRecoverableError(
'Cannot find keypair file, expected file path was {'
'0}'.format(config['key']))
def _set_user(ctx, config):
if 'user' not in config:
if ctx.bootstrap_context.cloudify_agent.user:
config['user'] = ctx.bootstrap_context.cloudify_agent.user
else:
raise NonRecoverableError(
'Missing user in worker configuration '
'[cloudify_agent={0}'.format(config))
def _set_remote_execution_port(ctx, config):
if 'port' not in config:
if ctx.bootstrap_context.cloudify_agent.remote_execution_port:
config['port'] =\
ctx.bootstrap_context.cloudify_agent.remote_execution_port
else:
config['port'] = DEFAULT_REMOTE_EXECUTION_PORT
def _set_wait_started_config(config):
if 'wait_started_timeout' not in config:
config['wait_started_timeout'] = DEFAULT_WAIT_STARTED_TIMEOUT
if 'wait_started_interval' not in config:
config['wait_started_interval'] = DEFAULT_WAIT_STARTED_INTERVAL
def _set_home_dir(runner, config):
if 'home_dir' not in config:
home_dir = _run_py_cmd_with_output(
runner,
'import pwd',
'pwd.getpwnam(\'{0}\').pw_dir'.format(config['user']))
config['home_dir'] = home_dir
def _get_bool(config, key, default):
if key not in config:
return default
str_value = str(config[key])
if str_value.lower() == 'true':
return True
if str_value.lower() == 'false':
return False
raise NonRecoverableError(
'Value for {0} property should be true/false '
'but is: {1}'.format(key, str_value))
def prepare_connection_configuration(ctx, agent_config):
if is_on_management_worker(ctx):
# we are starting a worker dedicated for a deployment
# (not specific node)
# use the same user we used when bootstrapping
if 'MANAGEMENT_USER' in os.environ:
agent_config['user'] = os.environ['MANAGEMENT_USER']
else:
raise NonRecoverableError(
'Cannot determine user for deployment user:'
'MANAGEMENT_USER is not set')
workflows_worker = agent_config['workflows_worker'] \
if 'workflows_worker' in agent_config else False
suffix = '_workflows' if workflows_worker else ''
name = '{0}{1}'.format(ctx.deployment.id, suffix)
agent_config['name'] = name
else:
agent_config['host'] = get_machine_ip(ctx)
_set_auth(ctx, agent_config)
_set_user(ctx, agent_config)
_set_remote_execution_port(ctx, agent_config)
agent_config['name'] = ctx.instance.id
def prepare_additional_configuration(ctx, agent_config, runner):
_set_wait_started_config(agent_config)
_set_home_dir(runner, agent_config)
home_dir = agent_config['home_dir']
agent_config['celery_base_dir'] = home_dir
manager_uid = ctx.bootstrap_context.manager_uid
if manager_uid is None or is_on_management_worker(ctx):
manager_sufix = ''
else:
manager_sufix = '.{0}'.format(manager_uid)
agent_config['base_dir'] = '{0}/cloudify.{1}{2}'.format(
home_dir, agent_config['name'], manager_sufix)
agent_config['init_file'] = '/etc/init.d/celeryd-{0}{1}'.format(
agent_config['name'], manager_sufix)
agent_config['config_file'] = '/etc/default/celeryd-{0}{1}'.format(
agent_config['name'], manager_sufix)
agent_config['includes_file'] = '{0}/work/celeryd-includes'.format(
agent_config['base_dir'])
agent_config['service_name'] = 'celeryd-{0}{1}'.format(
agent_config['name'], manager_sufix)
agent_config['worker_modifier'] = '{0}{1}'.format(
agent_config['name'], manager_sufix)
agent_config['disable_requiretty'] = _get_bool(agent_config,
'disable_requiretty',
True)
agent_config['delete_amqp_queues'] = _get_bool(agent_config,
'delete_amqp_queues',
True)
_prepare_and_validate_autoscale_params(ctx, agent_config)
| []
| []
| [
"MANAGEMENT_USER"
]
| [] | ["MANAGEMENT_USER"] | python | 1 | 0 | |
setup.py | from importlib.machinery import SourceFileLoader
import io
import os.path
from setuptools import find_packages, setup
sourcedml = SourceFileLoader("sourced-ml-core", "./sourced/ml/core/__init__.py").load_module()
with io.open(os.path.join(os.path.dirname(__file__), "README.md"), encoding="utf-8") as f:
long_description = f.read()
include_tests = os.getenv("ML_CORE_SETUP_INCLUDE_TESTS", False)
exclude_packages = (("sourced.ml.core.tests", "sourced.ml.core.tests.source")
if not include_tests else ())
tf_requires = ["tensorflow>=1.0,<1.14"]
tf_gpu_requires = ["tensorflow-gpu>=1.0,<1.14"]
package_data = {"": ["LICENSE.md", "README.md"]}
if include_tests:
test_data_dirs = ["./asdf/*.asdf", "./swivel/*", "identifiers.csv.tar.gz"]
package_data["sourced.ml.core.tests"] = test_data_dirs
setup(
name="sourced-ml-core",
description="Library containing the core algorithms for machine learning on source code. "
"Provides API and tools to train and use models based "
"on source code features extracted from Babelfish's UASTs.",
long_description=long_description,
long_description_content_type="text/markdown",
version=sourcedml.__version__,
license="Apache 2.0",
author="source{d}",
author_email="[email protected]",
url="https://github.com/src-d/ml-core",
download_url="https://github.com/src-d/ml-core",
packages=find_packages(exclude=exclude_packages),
namespace_packages=["sourced", "sourced.ml"],
keywords=[
"machine learning on source code",
"word2vec",
"id2vec",
"github",
"swivel",
"bow",
"bblfsh",
"babelfish",
],
install_requires=[
"PyStemmer>=1.3,<2.0",
"bblfsh>=3.1.0,<4.0",
"modelforge>=0.14.1",
"pygments>=2.2.0,<3.0",
"keras>=2.0,<3.0",
"scikit-learn>=0.21.1,<1.0",
"tqdm>=4.20,<5.0",
],
extras_require={"tf": tf_requires, "tf_gpu": tf_gpu_requires},
tests_require=["docker>=3.6.0,<4.0"],
package_data=package_data,
python_requires=">=3.5",
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Topic :: Software Development :: Libraries",
],
)
| []
| []
| [
"ML_CORE_SETUP_INCLUDE_TESTS"
]
| [] | ["ML_CORE_SETUP_INCLUDE_TESTS"] | python | 1 | 0 | |
testscript.go | // Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Script-driven tests.
// See testdata/script/README for an overview.
package testscript
import (
"bytes"
"context"
"flag"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
"strings"
"sync/atomic"
"testing"
"time"
"github.com/rogpeppe/testscript/internal/imports"
"github.com/rogpeppe/testscript/internal/os/execpath"
"github.com/rogpeppe/testscript/internal/par"
"github.com/rogpeppe/testscript/internal/testenv"
"github.com/rogpeppe/testscript/txtar"
)
var execCache par.Cache
// If -testwork is specified, the test prints the name of the temp directory
// and does not remove it when done, so that a programmer can
// poke at the test file tree afterward.
var testWork = flag.Bool("testwork", false, "")
// Env holds the environment to use at the start of a test script invocation.
type Env struct {
// WorkDir holds the path to the root directory of the
// extracted files.
WorkDir string
// Vars holds the initial set environment variables that will be passed to the
// testscript commands.
Vars []string
// Cd holds the initial current working directory.
Cd string
// Values holds a map of arbitrary values for use by custom
// testscript commands. This enables Setup to pass arbitrary
// values (not just strings) through to custom commands.
Values map[interface{}]interface{}
ts *TestScript
}
// Value returns a value from Env.Values, or nil if no
// value was set by Setup.
func (ts *TestScript) Value(key interface{}) interface{} {
return ts.values[key]
}
// Defer arranges for f to be called at the end
// of the test. If Defer is called multiple times, the
// defers are executed in reverse order (similar
// to Go's defer statement)
func (e *Env) Defer(f func()) {
e.ts.Defer(f)
}
// Params holds parameters for a call to Run.
type Params struct {
// Dir holds the name of the directory holding the scripts.
// All files in the directory with a .txt suffix will be considered
// as test scripts. By default the current directory is used.
// Dir is interpreted relative to the current test directory.
Dir string
// Setup is called, if not nil, to complete any setup required
// for a test. The WorkDir and Vars fields will have already
// been initialized and all the files extracted into WorkDir,
// and Cd will be the same as WorkDir.
// The Setup function may modify Vars and Cd as it wishes.
Setup func(*Env) error
// Condition is called, if not nil, to determine whether a particular
// condition is true. It's called only for conditions not in the
// standard set, and may be nil.
Condition func(cond string) (bool, error)
// Cmds holds a map of commands available to the script.
// It will only be consulted for commands not part of the standard set.
Cmds map[string]func(ts *TestScript, neg bool, args []string)
// TestWork specifies that working directories should be
// left intact for later inspection.
TestWork bool
// IgnoreMissedCoverage specifies that if coverage information
// is being generated (with the -test.coverprofile flag) and a subcommand
// function passed to RunMain fails to generate coverage information
// (for example because the function invoked os.Exit), then the
// error will be ignored.
IgnoreMissedCoverage bool
// UpdateScripts specifies that if a `cmp` command fails and
// its first argument is `stdout` or `stderr` and its second argument
// refers to a file inside the testscript file, the command will succeed
// and the testscript file will be updated to reflect the actual output.
//
// The content will be quoted with txtar.Quote if needed;
// a manual change will be needed if it is not unquoted in the
// script.
UpdateScripts bool
}
// RunDir runs the tests in the given directory. All files in dir with a ".txt"
// are considered to be test files.
func Run(t *testing.T, p Params) {
RunT(tshim{t}, p)
}
// T holds all the methods of the *testing.T type that
// are used by testscript.
type T interface {
Skip(...interface{})
Fatal(...interface{})
Parallel()
Log(...interface{})
FailNow()
Run(string, func(T))
// Verbose is usually implemented by the testing package
// directly rather than on the *testing.T type.
Verbose() bool
}
type tshim struct {
*testing.T
}
func (t tshim) Run(name string, f func(T)) {
t.T.Run(name, func(t *testing.T) {
f(tshim{t})
})
}
func (t tshim) Verbose() bool {
return testing.Verbose()
}
// RunT is like Run but uses an interface type instead of the concrete *testing.T
// type to make it possible to use testscript functionality outside of go test.
func RunT(t T, p Params) {
files, err := filepath.Glob(filepath.Join(p.Dir, "*.txt"))
if err != nil {
t.Fatal(err)
}
testTempDir, err := ioutil.TempDir(os.Getenv("GOTMPDIR"), "go-test-script")
if err != nil {
t.Fatal(err)
}
// The temp dir returned by ioutil.TempDir might be a sym linked dir (default
// behaviour in macOS). That could mess up matching that includes $WORK if,
// for example, an external program outputs resolved paths. Evaluating the
// dir here will ensure consistency.
testTempDir, err = filepath.EvalSymlinks(testTempDir)
if err != nil {
t.Fatal(err)
}
refCount := int32(len(files))
for _, file := range files {
file := file
name := strings.TrimSuffix(filepath.Base(file), ".txt")
t.Run(name, func(t T) {
t.Parallel()
ts := &TestScript{
t: t,
testTempDir: testTempDir,
name: name,
file: file,
params: p,
ctxt: context.Background(),
deferred: func() {},
scriptFiles: make(map[string]string),
scriptUpdates: make(map[string]string),
}
defer func() {
if p.TestWork || *testWork {
return
}
removeAll(ts.workdir)
if atomic.AddInt32(&refCount, -1) == 0 {
// This is the last subtest to finish. Remove the
// parent directory too.
os.Remove(testTempDir)
}
}()
ts.run()
})
}
}
// A TestScript holds execution state for a single test script.
type TestScript struct {
params Params
t T
testTempDir string
workdir string // temporary work dir ($WORK)
log bytes.Buffer // test execution log (printed at end of test)
mark int // offset of next log truncation
cd string // current directory during test execution; initially $WORK/gopath/src
name string // short name of test ("foo")
file string // full file name ("testdata/script/foo.txt")
lineno int // line number currently executing
line string // line currently executing
env []string // environment list (for os/exec)
envMap map[string]string // environment mapping (matches env; on Windows keys are lowercase)
values map[interface{}]interface{} // values for custom commands
stdin string // standard input to next 'go' command; set by 'stdin' command.
stdout string // standard output from last 'go' command; for 'stdout' command
stderr string // standard error from last 'go' command; for 'stderr' command
stopped bool // test wants to stop early
start time.Time // time phase started
background []backgroundCmd // backgrounded 'exec' and 'go' commands
deferred func() // deferred cleanup actions.
archive *txtar.Archive // the testscript being run.
scriptFiles map[string]string // files stored in the txtar archive (absolute paths -> path in script)
scriptUpdates map[string]string // updates to testscript files via UpdateScripts.
ctxt context.Context // per TestScript context
}
type backgroundCmd struct {
cmd *exec.Cmd
wait <-chan struct{}
neg bool // if true, cmd should fail
}
// setup sets up the test execution temporary directory and environment.
// It returns the comment section of the txtar archive.
func (ts *TestScript) setup() string {
ts.workdir = filepath.Join(ts.testTempDir, "script-"+ts.name)
ts.Check(os.MkdirAll(filepath.Join(ts.workdir, "tmp"), 0777))
env := &Env{
Vars: []string{
"WORK=" + ts.workdir, // must be first for ts.abbrev
"PATH=" + os.Getenv("PATH"),
homeEnvName() + "=/no-home",
tempEnvName() + "=" + filepath.Join(ts.workdir, "tmp"),
"devnull=" + os.DevNull,
":=" + string(os.PathListSeparator),
},
WorkDir: ts.workdir,
Values: make(map[interface{}]interface{}),
Cd: ts.workdir,
ts: ts,
}
// Must preserve SYSTEMROOT on Windows: https://github.com/golang/go/issues/25513 et al
if runtime.GOOS == "windows" {
env.Vars = append(env.Vars,
"SYSTEMROOT="+os.Getenv("SYSTEMROOT"),
"exe=.exe",
)
} else {
env.Vars = append(env.Vars,
"exe=",
)
}
ts.cd = env.Cd
// Unpack archive.
a, err := txtar.ParseFile(ts.file)
ts.Check(err)
ts.archive = a
for _, f := range a.Files {
name := ts.MkAbs(ts.expand(f.Name))
ts.scriptFiles[name] = f.Name
ts.Check(os.MkdirAll(filepath.Dir(name), 0777))
ts.Check(ioutil.WriteFile(name, f.Data, 0666))
}
// Run any user-defined setup.
if ts.params.Setup != nil {
ts.Check(ts.params.Setup(env))
}
ts.cd = env.Cd
ts.env = env.Vars
ts.values = env.Values
ts.envMap = make(map[string]string)
for _, kv := range ts.env {
if i := strings.Index(kv, "="); i >= 0 {
ts.envMap[envvarname(kv[:i])] = kv[i+1:]
}
}
return string(a.Comment)
}
// run runs the test script.
func (ts *TestScript) run() {
// Truncate log at end of last phase marker,
// discarding details of successful phase.
rewind := func() {
if !ts.t.Verbose() {
ts.log.Truncate(ts.mark)
}
}
// Insert elapsed time for phase at end of phase marker
markTime := func() {
if ts.mark > 0 && !ts.start.IsZero() {
afterMark := append([]byte{}, ts.log.Bytes()[ts.mark:]...)
ts.log.Truncate(ts.mark - 1) // cut \n and afterMark
fmt.Fprintf(&ts.log, " (%.3fs)\n", time.Since(ts.start).Seconds())
ts.log.Write(afterMark)
}
ts.start = time.Time{}
}
defer func() {
// On a normal exit from the test loop, background processes are cleaned up
// before we print PASS. If we return early (e.g., due to a test failure),
// don't print anything about the processes that were still running.
for _, bg := range ts.background {
interruptProcess(bg.cmd.Process)
}
for _, bg := range ts.background {
<-bg.wait
}
ts.background = nil
markTime()
// Flush testScript log to testing.T log.
ts.t.Log("\n" + ts.abbrev(ts.log.String()))
}()
defer func() {
ts.deferred()
}()
script := ts.setup()
// With -v or -testwork, start log with full environment.
if *testWork || ts.t.Verbose() {
// Display environment.
ts.cmdEnv(false, nil)
fmt.Fprintf(&ts.log, "\n")
ts.mark = ts.log.Len()
}
defer ts.applyScriptUpdates()
// Run script.
// See testdata/script/README for documentation of script form.
Script:
for script != "" {
// Extract next line.
ts.lineno++
var line string
if i := strings.Index(script, "\n"); i >= 0 {
line, script = script[:i], script[i+1:]
} else {
line, script = script, ""
}
// # is a comment indicating the start of new phase.
if strings.HasPrefix(line, "#") {
// If there was a previous phase, it succeeded,
// so rewind the log to delete its details (unless -v is in use).
// If nothing has happened at all since the mark,
// rewinding is a no-op and adding elapsed time
// for doing nothing is meaningless, so don't.
if ts.log.Len() > ts.mark {
rewind()
markTime()
}
// Print phase heading and mark start of phase output.
fmt.Fprintf(&ts.log, "%s\n", line)
ts.mark = ts.log.Len()
ts.start = time.Now()
continue
}
// Parse input line. Ignore blanks entirely.
args := ts.parse(line)
if len(args) == 0 {
continue
}
// Echo command to log.
fmt.Fprintf(&ts.log, "> %s\n", line)
// Command prefix [cond] means only run this command if cond is satisfied.
for strings.HasPrefix(args[0], "[") && strings.HasSuffix(args[0], "]") {
cond := args[0]
cond = cond[1 : len(cond)-1]
cond = strings.TrimSpace(cond)
args = args[1:]
if len(args) == 0 {
ts.Fatalf("missing command after condition")
}
want := true
if strings.HasPrefix(cond, "!") {
want = false
cond = strings.TrimSpace(cond[1:])
}
ok, err := ts.condition(cond)
if err != nil {
ts.Fatalf("bad condition %q: %v", cond, err)
}
if ok != want {
// Don't run rest of line.
continue Script
}
}
// Command prefix ! means negate the expectations about this command:
// go command should fail, match should not be found, etc.
neg := false
if args[0] == "!" {
neg = true
args = args[1:]
if len(args) == 0 {
ts.Fatalf("! on line by itself")
}
}
// Run command.
cmd := scriptCmds[args[0]]
if cmd == nil {
cmd = ts.params.Cmds[args[0]]
}
if cmd == nil {
ts.Fatalf("unknown command %q", args[0])
}
cmd(ts, neg, args[1:])
// Command can ask script to stop early.
if ts.stopped {
// Break instead of returning, so that we check the status of any
// background processes and print PASS.
break
}
}
for _, bg := range ts.background {
interruptProcess(bg.cmd.Process)
}
ts.cmdWait(false, nil)
// Final phase ended.
rewind()
markTime()
if !ts.stopped {
fmt.Fprintf(&ts.log, "PASS\n")
}
}
func (ts *TestScript) applyScriptUpdates() {
if len(ts.scriptUpdates) == 0 {
return
}
for name, content := range ts.scriptUpdates {
found := false
for i := range ts.archive.Files {
f := &ts.archive.Files[i]
if f.Name != name {
continue
}
data := []byte(content)
if txtar.NeedsQuote(data) {
data1, err := txtar.Quote(data)
if err != nil {
ts.t.Fatal(fmt.Sprintf("cannot update script file %q: %v", f.Name, err))
continue
}
data = data1
}
f.Data = data
found = true
}
// Sanity check.
if !found {
panic("script update file not found")
}
}
if err := ioutil.WriteFile(ts.file, txtar.Format(ts.archive), 0666); err != nil {
ts.t.Fatal("cannot update script: ", err)
}
ts.Logf("%s updated", ts.file)
}
// condition reports whether the given condition is satisfied.
func (ts *TestScript) condition(cond string) (bool, error) {
switch cond {
case "short":
return testing.Short(), nil
case "net":
return testenv.HasExternalNetwork(), nil
case "link":
return testenv.HasLink(), nil
case "symlink":
return testenv.HasSymlink(), nil
case runtime.GOOS, runtime.GOARCH:
return true, nil
default:
if imports.KnownArch[cond] || imports.KnownOS[cond] {
return false, nil
}
if strings.HasPrefix(cond, "exec:") {
prog := cond[len("exec:"):]
ok := execCache.Do(prog, func() interface{} {
_, err := execpath.Look(prog, ts.Getenv)
return err == nil
}).(bool)
return ok, nil
}
if ts.params.Condition != nil {
return ts.params.Condition(cond)
}
ts.Fatalf("unknown condition %q", cond)
panic("unreachable")
}
}
// Helpers for command implementations.
// abbrev abbreviates the actual work directory in the string s to the literal string "$WORK".
func (ts *TestScript) abbrev(s string) string {
s = strings.Replace(s, ts.workdir, "$WORK", -1)
if *testWork {
// Expose actual $WORK value in environment dump on first line of work script,
// so that the user can find out what directory -testwork left behind.
s = "WORK=" + ts.workdir + "\n" + strings.TrimPrefix(s, "WORK=$WORK\n")
}
return s
}
// Defer arranges for f to be called at the end
// of the test. If Defer is called multiple times, the
// defers are executed in reverse order (similar
// to Go's defer statement)
func (ts *TestScript) Defer(f func()) {
old := ts.deferred
ts.deferred = func() {
defer old()
f()
}
}
// Check calls ts.Fatalf if err != nil.
func (ts *TestScript) Check(err error) {
if err != nil {
ts.Fatalf("%v", err)
}
}
// Logf appends the given formatted message to the test log transcript.
func (ts *TestScript) Logf(format string, args ...interface{}) {
format = strings.TrimSuffix(format, "\n")
fmt.Fprintf(&ts.log, format, args...)
ts.log.WriteByte('\n')
}
// exec runs the given command line (an actual subprocess, not simulated)
// in ts.cd with environment ts.env and then returns collected standard output and standard error.
func (ts *TestScript) exec(command string, args ...string) (stdout, stderr string, err error) {
cmd, err := ts.buildExecCmd(command, args...)
if err != nil {
return "", "", err
}
cmd.Dir = ts.cd
cmd.Env = append(ts.env, "PWD="+ts.cd)
cmd.Stdin = strings.NewReader(ts.stdin)
var stdoutBuf, stderrBuf strings.Builder
cmd.Stdout = &stdoutBuf
cmd.Stderr = &stderrBuf
if err = cmd.Start(); err == nil {
err = ctxWait(ts.ctxt, cmd)
}
ts.stdin = ""
return stdoutBuf.String(), stderrBuf.String(), err
}
// execBackground starts the given command line (an actual subprocess, not simulated)
// in ts.cd with environment ts.env.
func (ts *TestScript) execBackground(command string, args ...string) (*exec.Cmd, error) {
cmd, err := ts.buildExecCmd(command, args...)
if err != nil {
return nil, err
}
cmd.Dir = ts.cd
cmd.Env = append(ts.env, "PWD="+ts.cd)
var stdoutBuf, stderrBuf strings.Builder
cmd.Stdin = strings.NewReader(ts.stdin)
cmd.Stdout = &stdoutBuf
cmd.Stderr = &stderrBuf
ts.stdin = ""
return cmd, cmd.Start()
}
func (ts *TestScript) buildExecCmd(command string, args ...string) (*exec.Cmd, error) {
if filepath.Base(command) == command {
if lp, err := execpath.Look(command, ts.Getenv); err != nil {
return nil, err
} else {
command = lp
}
}
return exec.Command(command, args...), nil
}
// BackgroundCmds returns a slice containing all the commands that have
// been started in the background since the most recent wait command, or
// the start of the script if wait has not been called.
func (ts *TestScript) BackgroundCmds() []*exec.Cmd {
cmds := make([]*exec.Cmd, len(ts.background))
for i, b := range ts.background {
cmds[i] = b.cmd
}
return cmds
}
// ctxWait is like cmd.Wait, but terminates cmd with os.Interrupt if ctx becomes done.
//
// This differs from exec.CommandContext in that it prefers os.Interrupt over os.Kill.
// (See https://golang.org/issue/21135.)
func ctxWait(ctx context.Context, cmd *exec.Cmd) error {
errc := make(chan error, 1)
go func() { errc <- cmd.Wait() }()
select {
case err := <-errc:
return err
case <-ctx.Done():
interruptProcess(cmd.Process)
return <-errc
}
}
// interruptProcess sends os.Interrupt to p if supported, or os.Kill otherwise.
func interruptProcess(p *os.Process) {
if err := p.Signal(os.Interrupt); err != nil {
// Per https://golang.org/pkg/os/#Signal, “Interrupt is not implemented on
// Windows; using it with os.Process.Signal will return an error.”
// Fall back to Kill instead.
p.Kill()
}
}
// Exec runs the given command and saves its stdout and stderr so
// they can be inspected by subsequent script commands.
func (ts *TestScript) Exec(command string, args ...string) error {
var err error
ts.stdout, ts.stderr, err = ts.exec(command, args...)
if ts.stdout != "" {
ts.Logf("[stdout]\n%s", ts.stdout)
}
if ts.stderr != "" {
ts.Logf("[stderr]\n%s", ts.stderr)
}
return err
}
// expand applies environment variable expansion to the string s.
func (ts *TestScript) expand(s string) string {
return os.Expand(s, func(key string) string {
if key1 := strings.TrimSuffix(key, "@R"); len(key1) != len(key) {
return regexp.QuoteMeta(ts.Getenv(key1))
}
return ts.Getenv(key)
})
}
// fatalf aborts the test with the given failure message.
func (ts *TestScript) Fatalf(format string, args ...interface{}) {
fmt.Fprintf(&ts.log, "FAIL: %s:%d: %s\n", ts.file, ts.lineno, fmt.Sprintf(format, args...))
ts.t.FailNow()
}
// MkAbs interprets file relative to the test script's current directory
// and returns the corresponding absolute path.
func (ts *TestScript) MkAbs(file string) string {
if filepath.IsAbs(file) {
return file
}
return filepath.Join(ts.cd, file)
}
// ReadFile returns the contents of the file with the
// given name, intepreted relative to the test script's
// current directory. It interprets "stdout" and "stderr" to
// mean the standard output or standard error from
// the most recent exec or wait command respectively.
//
// If the file cannot be read, the script fails.
func (ts *TestScript) ReadFile(file string) string {
switch file {
case "stdout":
return ts.stdout
case "stderr":
return ts.stderr
default:
file = ts.MkAbs(file)
data, err := ioutil.ReadFile(file)
ts.Check(err)
return string(data)
}
}
// Setenv sets the value of the environment variable named by the key.
func (ts *TestScript) Setenv(key, value string) {
ts.env = append(ts.env, key+"="+value)
ts.envMap[envvarname(key)] = value
}
// Getenv gets the value of the environment variable named by the key.
func (ts *TestScript) Getenv(key string) string {
return ts.envMap[envvarname(key)]
}
// parse parses a single line as a list of space-separated arguments
// subject to environment variable expansion (but not resplitting).
// Single quotes around text disable splitting and expansion.
// To embed a single quote, double it: 'Don''t communicate by sharing memory.'
func (ts *TestScript) parse(line string) []string {
ts.line = line
var (
args []string
arg string // text of current arg so far (need to add line[start:i])
start = -1 // if >= 0, position where current arg text chunk starts
quoted = false // currently processing quoted text
)
for i := 0; ; i++ {
if !quoted && (i >= len(line) || line[i] == ' ' || line[i] == '\t' || line[i] == '\r' || line[i] == '#') {
// Found arg-separating space.
if start >= 0 {
arg += ts.expand(line[start:i])
args = append(args, arg)
start = -1
arg = ""
}
if i >= len(line) || line[i] == '#' {
break
}
continue
}
if i >= len(line) {
ts.Fatalf("unterminated quoted argument")
}
if line[i] == '\'' {
if !quoted {
// starting a quoted chunk
if start >= 0 {
arg += ts.expand(line[start:i])
}
start = i + 1
quoted = true
continue
}
// 'foo''bar' means foo'bar, like in rc shell and Pascal.
if i+1 < len(line) && line[i+1] == '\'' {
arg += line[start:i]
start = i + 1
i++ // skip over second ' before next iteration
continue
}
// ending a quoted chunk
arg += line[start:i]
start = i + 1
quoted = false
continue
}
// found character worth saving; make sure we're saving
if start < 0 {
start = i
}
}
return args
}
func removeAll(dir string) error {
// module cache has 0444 directories;
// make them writable in order to remove content.
filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return nil // ignore errors walking in file system
}
if info.IsDir() {
os.Chmod(path, 0777)
}
return nil
})
return os.RemoveAll(dir)
}
func homeEnvName() string {
switch runtime.GOOS {
case "windows":
return "USERPROFILE"
case "plan9":
return "home"
default:
return "HOME"
}
}
func tempEnvName() string {
switch runtime.GOOS {
case "windows":
return "TMP"
case "plan9":
return "TMPDIR" // actually plan 9 doesn't have one at all but this is fine
default:
return "TMPDIR"
}
}
| [
"\"GOTMPDIR\"",
"\"PATH\"",
"\"SYSTEMROOT\""
]
| []
| [
"SYSTEMROOT",
"GOTMPDIR",
"PATH"
]
| [] | ["SYSTEMROOT", "GOTMPDIR", "PATH"] | go | 3 | 0 | |
pkg/objstore/s3/s3.go | // Package s3 implements common object storage abstractions against s3-compatible APIs.
package s3
import (
"context"
"fmt"
"io"
"math/rand"
"net"
"net/http"
"os"
"runtime"
"strconv"
"strings"
"testing"
"time"
"github.com/go-kit/kit/log/level"
"github.com/go-kit/kit/log"
"github.com/improbable-eng/thanos/pkg/objstore"
"github.com/improbable-eng/thanos/pkg/runutil"
"github.com/minio/minio-go"
"github.com/minio/minio-go/pkg/credentials"
"github.com/minio/minio-go/pkg/encrypt"
"github.com/pkg/errors"
"github.com/prometheus/common/model"
"github.com/prometheus/common/version"
yaml "gopkg.in/yaml.v2"
)
// DirDelim is the delimiter used to model a directory structure in an object store bucket.
const DirDelim = "/"
// Config stores the configuration for s3 bucket.
type Config struct {
Bucket string `yaml:"bucket"`
Endpoint string `yaml:"endpoint"`
AccessKey string `yaml:"access_key"`
Insecure bool `yaml:"insecure"`
SignatureV2 bool `yaml:"signature_version2"`
SSEEncryption bool `yaml:"encrypt_sse"`
SecretKey string `yaml:"secret_key"`
PutUserMetadata map[string]string `yaml:"put_user_metadata"`
HTTPConfig HTTPConfig `yaml:"http_config"`
}
// HTTPConfig stores the http.Transport configuration for the s3 minio client.
type HTTPConfig struct {
IdleConnTimeout model.Duration `yaml:"idle_conn_timeout"`
}
// Bucket implements the store.Bucket interface against s3-compatible APIs.
type Bucket struct {
logger log.Logger
name string
client *minio.Client
sse encrypt.ServerSide
putUserMetadata map[string]string
}
// parseConfig unmarshals a buffer into a Config with default HTTPConfig values.
func parseConfig(conf []byte) (Config, error) {
defaultHTTPConfig := HTTPConfig{IdleConnTimeout: model.Duration(90 * time.Second)}
config := Config{HTTPConfig: defaultHTTPConfig}
if err := yaml.Unmarshal(conf, &config); err != nil {
return Config{}, err
}
if config.PutUserMetadata == nil {
config.PutUserMetadata = make(map[string]string)
}
return config, nil
}
// NewBucket returns a new Bucket using the provided s3 config values.
func NewBucket(logger log.Logger, conf []byte, component string) (*Bucket, error) {
config, err := parseConfig(conf)
if err != nil {
return nil, err
}
return NewBucketWithConfig(logger, config, component)
}
// NewBucket returns a new Bucket using the provided s3 config values.
func NewBucketWithConfig(logger log.Logger, config Config, component string) (*Bucket, error) {
var chain []credentials.Provider
if err := validate(config); err != nil {
return nil, err
}
if config.AccessKey != "" {
signature := credentials.SignatureV4
if config.SignatureV2 {
signature = credentials.SignatureV2
}
chain = []credentials.Provider{&credentials.Static{
Value: credentials.Value{
AccessKeyID: config.AccessKey,
SecretAccessKey: config.SecretKey,
SignerType: signature,
},
}}
} else {
chain = []credentials.Provider{
&credentials.EnvAWS{},
&credentials.FileAWSCredentials{},
&credentials.IAM{
Client: &http.Client{
Transport: http.DefaultTransport,
},
},
}
}
client, err := minio.NewWithCredentials(config.Endpoint, credentials.NewChainCredentials(chain), !config.Insecure, "")
if err != nil {
return nil, errors.Wrap(err, "initialize s3 client")
}
client.SetAppInfo(fmt.Sprintf("thanos-%s", component), fmt.Sprintf("%s (%s)", version.Version, runtime.Version()))
client.SetCustomTransport(&http.Transport{
Proxy: http.ProxyFromEnvironment,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: time.Duration(config.HTTPConfig.IdleConnTimeout),
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
// The ResponseHeaderTimeout here is the only change from the
// default minio transport, it was introduced to cover cases
// where the tcp connection works but the server never answers
ResponseHeaderTimeout: 15 * time.Second,
// Set this value so that the underlying transport round-tripper
// doesn't try to auto decode the body of objects with
// content-encoding set to `gzip`.
//
// Refer:
// https://golang.org/src/net/http/transport.go?h=roundTrip#L1843
DisableCompression: true,
})
var sse encrypt.ServerSide
if config.SSEEncryption {
sse = encrypt.NewSSE()
}
bkt := &Bucket{
logger: logger,
name: config.Bucket,
client: client,
sse: sse,
putUserMetadata: config.PutUserMetadata,
}
return bkt, nil
}
// Name returns the bucket name for s3.
func (b *Bucket) Name() string {
return b.name
}
// validate checks to see the config options are set.
func validate(conf Config) error {
if conf.Endpoint == "" {
return errors.New("no s3 endpoint in config file")
}
if conf.AccessKey == "" && conf.SecretKey != "" {
return errors.New("no s3 acccess_key specified while secret_key is present in config file; either both should be present in config or envvars/IAM should be used.")
}
if conf.AccessKey != "" && conf.SecretKey == "" {
return errors.New("no s3 secret_key specified while access_key is present in config file; either both should be present in config or envvars/IAM should be used.")
}
return nil
}
// ValidateForTests checks to see the config options for tests are set.
func ValidateForTests(conf Config) error {
if conf.Endpoint == "" ||
conf.AccessKey == "" ||
conf.SecretKey == "" {
return errors.New("insufficient s3 test configuration information")
}
return nil
}
// Iter calls f for each entry in the given directory. The argument to f is the full
// object name including the prefix of the inspected directory.
func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error) error {
// Ensure the object name actually ends with a dir suffix. Otherwise we'll just iterate the
// object itself as one prefix item.
if dir != "" {
dir = strings.TrimSuffix(dir, DirDelim) + DirDelim
}
for object := range b.client.ListObjects(b.name, dir, false, ctx.Done()) {
// Catch the error when failed to list objects.
if object.Err != nil {
return object.Err
}
// This sometimes happens with empty buckets.
if object.Key == "" {
continue
}
if err := f(object.Key); err != nil {
return err
}
}
return nil
}
func (b *Bucket) getRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) {
opts := &minio.GetObjectOptions{ServerSideEncryption: b.sse}
if length != -1 {
if err := opts.SetRange(off, off+length-1); err != nil {
return nil, err
}
}
r, err := b.client.GetObjectWithContext(ctx, b.name, name, *opts)
if err != nil {
return nil, err
}
// NotFoundObject error is revealed only after first Read. This does the initial GetRequest. Prefetch this here
// for convenience.
if _, err := r.Read(nil); err != nil {
runutil.CloseWithLogOnErr(b.logger, r, "s3 get range obj close")
// First GET Object request error.
return nil, err
}
return r, nil
}
// Get returns a reader for the given object name.
func (b *Bucket) Get(ctx context.Context, name string) (io.ReadCloser, error) {
return b.getRange(ctx, name, 0, -1)
}
// GetRange returns a new range reader for the given object name and range.
func (b *Bucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) {
return b.getRange(ctx, name, off, length)
}
// Exists checks if the given object exists.
func (b *Bucket) Exists(ctx context.Context, name string) (bool, error) {
_, err := b.client.StatObject(b.name, name, minio.StatObjectOptions{})
if err != nil {
if b.IsObjNotFoundErr(err) {
return false, nil
}
return false, errors.Wrap(err, "stat s3 object")
}
return true, nil
}
func (b *Bucket) guessFileSize(name string, r io.Reader) int64 {
if f, ok := r.(*os.File); ok {
fileInfo, err := f.Stat()
if err == nil {
return fileInfo.Size()
}
level.Warn(b.logger).Log("msg", "could not stat file for multipart upload", "name", name, "err", err)
return -1
}
level.Warn(b.logger).Log("msg", "could not guess file size for multipart upload", "name", name)
return -1
}
// Upload the contents of the reader as an object into the bucket.
func (b *Bucket) Upload(ctx context.Context, name string, r io.Reader) error {
// TODO(https://github.com/improbable-eng/thanos/issues/678): Remove guessing length when minio provider will support multipart upload without this.
fileSize := b.guessFileSize(name, r)
if _, err := b.client.PutObjectWithContext(
ctx,
b.name,
name,
r,
fileSize,
minio.PutObjectOptions{
ServerSideEncryption: b.sse,
UserMetadata: b.putUserMetadata,
},
); err != nil {
return errors.Wrap(err, "upload s3 object")
}
return nil
}
// Delete removes the object with the given name.
func (b *Bucket) Delete(ctx context.Context, name string) error {
return b.client.RemoveObject(b.name, name)
}
// IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations.
func (b *Bucket) IsObjNotFoundErr(err error) bool {
return minio.ToErrorResponse(err).Code == "NoSuchKey"
}
func (b *Bucket) Close() error { return nil }
func configFromEnv() Config {
c := Config{
Bucket: os.Getenv("S3_BUCKET"),
Endpoint: os.Getenv("S3_ENDPOINT"),
AccessKey: os.Getenv("S3_ACCESS_KEY"),
SecretKey: os.Getenv("S3_SECRET_KEY"),
}
c.Insecure, _ = strconv.ParseBool(os.Getenv("S3_INSECURE"))
c.SignatureV2, _ = strconv.ParseBool(os.Getenv("S3_SIGNATURE_VERSION2"))
return c
}
// NewTestBucket creates test bkt client that before returning creates temporary bucket.
// In a close function it empties and deletes the bucket.
func NewTestBucket(t testing.TB, location string) (objstore.Bucket, func(), error) {
c := configFromEnv()
if err := ValidateForTests(c); err != nil {
return nil, nil, err
}
if c.Bucket != "" && os.Getenv("THANOS_ALLOW_EXISTING_BUCKET_USE") == "" {
return nil, nil, errors.New("S3_BUCKET is defined. Normally this tests will create temporary bucket " +
"and delete it after test. Unset S3_BUCKET env variable to use default logic. If you really want to run " +
"tests against provided (NOT USED!) bucket, set THANOS_ALLOW_EXISTING_BUCKET_USE=true. WARNING: That bucket " +
"needs to be manually cleared. This means that it is only useful to run one test in a time. This is due " +
"to safety (accidentally pointing prod bucket for test) as well as aws s3 not being fully strong consistent.")
}
return NewTestBucketFromConfig(t, location, c, true)
}
func NewTestBucketFromConfig(t testing.TB, location string, c Config, reuseBucket bool) (objstore.Bucket, func(), error) {
bc, err := yaml.Marshal(c)
if err != nil {
return nil, nil, err
}
b, err := NewBucket(log.NewNopLogger(), bc, "thanos-e2e-test")
if err != nil {
return nil, nil, err
}
bktToCreate := c.Bucket
if c.Bucket != "" && reuseBucket {
if err := b.Iter(context.Background(), "", func(f string) error {
return errors.Errorf("bucket %s is not empty", c.Bucket)
}); err != nil {
return nil, nil, errors.Wrapf(err, "s3 check bucket %s", c.Bucket)
}
t.Log("WARNING. Reusing", c.Bucket, "AWS bucket for AWS tests. Manual cleanup afterwards is required")
return b, func() {}, nil
}
if c.Bucket == "" {
src := rand.NewSource(time.Now().UnixNano())
// Bucket name need to conform: https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-s3-bucket-naming-requirements.html
bktToCreate = strings.Replace(fmt.Sprintf("test_%s_%x", strings.ToLower(t.Name()), src.Int63()), "_", "-", -1)
if len(bktToCreate) >= 63 {
bktToCreate = bktToCreate[:63]
}
}
if err := b.client.MakeBucket(bktToCreate, location); err != nil {
return nil, nil, err
}
b.name = bktToCreate
t.Log("created temporary AWS bucket for AWS tests with name", bktToCreate, "in", location)
return b, func() {
objstore.EmptyBucket(t, context.Background(), b)
if err := b.client.RemoveBucket(bktToCreate); err != nil {
t.Logf("deleting bucket %s failed: %s", bktToCreate, err)
}
}, nil
}
| [
"\"S3_BUCKET\"",
"\"S3_ENDPOINT\"",
"\"S3_ACCESS_KEY\"",
"\"S3_SECRET_KEY\"",
"\"S3_INSECURE\"",
"\"S3_SIGNATURE_VERSION2\"",
"\"THANOS_ALLOW_EXISTING_BUCKET_USE\""
]
| []
| [
"S3_BUCKET",
"THANOS_ALLOW_EXISTING_BUCKET_USE",
"S3_SECRET_KEY",
"S3_ACCESS_KEY",
"S3_INSECURE",
"S3_ENDPOINT",
"S3_SIGNATURE_VERSION2"
]
| [] | ["S3_BUCKET", "THANOS_ALLOW_EXISTING_BUCKET_USE", "S3_SECRET_KEY", "S3_ACCESS_KEY", "S3_INSECURE", "S3_ENDPOINT", "S3_SIGNATURE_VERSION2"] | go | 7 | 0 | |
client/proxy/proxy.go | // Package proxy is a cli proxy
package proxy
import (
"os"
"strings"
"github.com/go-acme/lego/v3/providers/dns/cloudflare"
"github.com/micro/cli/v2"
"github.com/micro/go-micro/v2"
"github.com/micro/go-micro/v2/api/server/acme"
"github.com/micro/go-micro/v2/api/server/acme/autocert"
"github.com/micro/go-micro/v2/api/server/acme/certmagic"
"github.com/micro/go-micro/v2/auth"
bmem "github.com/micro/go-micro/v2/broker/memory"
mucli "github.com/micro/go-micro/v2/client"
log "github.com/micro/go-micro/v2/logger"
"github.com/micro/go-micro/v2/proxy"
//"github.com/micro/go-micro/v2/proxy/grpc"
"github.com/micro/go-micro/v2/proxy/http"
"github.com/micro/go-micro/v2/proxy/mucp"
rmem "github.com/micro/go-micro/v2/registry/memory"
"github.com/micro/go-micro/v2/server"
sgrpc "github.com/micro/go-micro/v2/server/grpc"
"github.com/micro/go-micro/v2/sync/memory"
"github.com/micro/go-micro/v2/util/mux"
"github.com/micro/micro/v2/internal/helper"
"github.com/micro/micro/v2/service"
)
var (
// Name of the proxy
Name = "go.micro.proxy"
// The address of the proxy
Address = ":8081"
// the proxy protocol
Protocol = "grpc"
// The endpoint host to route to
Endpoint string
// ACME (Cert management)
ACMEProvider = "autocert"
ACMEChallengeProvider = "cloudflare"
ACMECA = acme.LetsEncryptProductionCA
)
func Run(ctx *cli.Context) error {
log.Init(log.WithFields(map[string]interface{}{"service": "proxy"}))
// because MICRO_PROXY_ADDRESS is used internally by the go-micro/client
// we need to unset it so we don't end up calling ourselves infinitely
os.Unsetenv("MICRO_PROXY_ADDRESS")
if len(ctx.String("server_name")) > 0 {
Name = ctx.String("server_name")
}
if len(ctx.String("address")) > 0 {
Address = ctx.String("address")
}
if len(ctx.String("endpoint")) > 0 {
Endpoint = ctx.String("endpoint")
}
if len(ctx.String("protocol")) > 0 {
Protocol = ctx.String("protocol")
}
if len(ctx.String("acme_provider")) > 0 {
ACMEProvider = ctx.String("acme_provider")
}
// new service
service := service.New(service.Name(Name))
// set the context
popts := []proxy.Option{
proxy.WithRouter(service.Options().Router),
}
// new proxy
var p proxy.Proxy
// setup the default server
var srv server.Server
// set endpoint
if len(Endpoint) > 0 {
switch {
case strings.HasPrefix(Endpoint, "grpc://"):
ep := strings.TrimPrefix(Endpoint, "grpc://")
popts = append(popts, proxy.WithEndpoint(ep))
Protocol = "grpc"
case strings.HasPrefix(Endpoint, "http://"):
// TODO: strip prefix?
popts = append(popts, proxy.WithEndpoint(Endpoint))
Protocol = "http"
default:
// TODO: strip prefix?
popts = append(popts, proxy.WithEndpoint(Endpoint))
Protocol = "mucp"
}
}
serverOpts := []server.Option{
server.Address(Address),
server.Registry(rmem.NewRegistry()),
server.Broker(bmem.NewBroker()),
}
// enable acme will create a net.Listener which
if ctx.Bool("enable_acme") {
var ap acme.Provider
switch ACMEProvider {
case "autocert":
ap = autocert.NewProvider()
case "certmagic":
if ACMEChallengeProvider != "cloudflare" {
log.Fatal("The only implemented DNS challenge provider is cloudflare")
}
apiToken := os.Getenv("CF_API_TOKEN")
if len(apiToken) == 0 {
log.Fatal("env variables CF_API_TOKEN and CF_ACCOUNT_ID must be set")
}
storage := certmagic.NewStorage(
memory.NewSync(),
service.Options().Store,
)
config := cloudflare.NewDefaultConfig()
config.AuthToken = apiToken
config.ZoneToken = apiToken
challengeProvider, err := cloudflare.NewDNSProviderConfig(config)
if err != nil {
log.Fatal(err.Error())
}
// define the provider
ap = certmagic.NewProvider(
acme.AcceptToS(true),
acme.CA(ACMECA),
acme.Cache(storage),
acme.ChallengeProvider(challengeProvider),
acme.OnDemand(false),
)
default:
log.Fatalf("Unsupported acme provider: %s\n", ACMEProvider)
}
// generate the tls config
config, err := ap.TLSConfig(helper.ACMEHosts(ctx)...)
if err != nil {
log.Fatalf("Failed to generate acme tls config: %v", err)
}
// set the tls config
serverOpts = append(serverOpts, server.TLSConfig(config))
// enable tls will leverage tls certs and generate a tls.Config
} else if ctx.Bool("enable_tls") {
// get certificates from the context
config, err := helper.TLSConfig(ctx)
if err != nil {
log.Fatal(err)
return err
}
serverOpts = append(serverOpts, server.TLSConfig(config))
}
// wrap the proxy using the proxy's authHandler
authFn := func() auth.Auth { return service.Options().Auth }
authOpt := server.WrapHandler(authHandler(authFn))
serverOpts = append(serverOpts, authOpt)
// set proxy
switch Protocol {
case "http":
p = http.NewProxy(popts...)
serverOpts = append(serverOpts, server.WithRouter(p))
// TODO: http server
srv = server.NewServer(serverOpts...)
case "mucp":
popts = append(popts, proxy.WithClient(mucli.NewClient()))
p = mucp.NewProxy(popts...)
serverOpts = append(serverOpts, server.WithRouter(p))
srv = server.NewServer(serverOpts...)
default:
p = mucp.NewProxy(popts...)
serverOpts = append(serverOpts, server.WithRouter(p))
srv = sgrpc.NewServer(serverOpts...)
}
if len(Endpoint) > 0 {
log.Infof("Proxy [%s] serving endpoint: %s", p.String(), Endpoint)
} else {
log.Infof("Proxy [%s] serving protocol: %s", p.String(), Protocol)
}
// create a new proxy muxer which includes the debug handler
muxer := mux.New(Name, p)
// set the router
service.Server().Init(
server.WithRouter(muxer),
)
// Start the proxy server
if err := srv.Start(); err != nil {
log.Fatal(err)
}
// Run internal service
if err := service.Run(); err != nil {
log.Fatal(err)
}
// Stop the server
if err := srv.Stop(); err != nil {
log.Fatal(err)
}
return nil
}
func Commands(options ...micro.Option) []*cli.Command {
command := &cli.Command{
Name: "proxy",
Usage: "Run the service proxy",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "address",
Usage: "Set the proxy http address e.g 0.0.0.0:8081",
EnvVars: []string{"MICRO_PROXY_ADDRESS"},
},
&cli.StringFlag{
Name: "protocol",
Usage: "Set the protocol used for proxying e.g mucp, grpc, http",
EnvVars: []string{"MICRO_PROXY_PROTOCOL"},
},
&cli.StringFlag{
Name: "endpoint",
Usage: "Set the endpoint to route to e.g greeter or localhost:9090",
EnvVars: []string{"MICRO_PROXY_ENDPOINT"},
},
},
Action: Run,
}
return []*cli.Command{command}
}
| [
"\"CF_API_TOKEN\""
]
| []
| [
"CF_API_TOKEN"
]
| [] | ["CF_API_TOKEN"] | go | 1 | 0 | |
services/service-f/main.go | // author: Gary A. Stafford
// site: https://programmaticponderings.com
// license: MIT License
// purpose: Service F
package main
import (
"bytes"
"context"
"encoding/json"
"github.com/banzaicloud/logrus-runtime-formatter"
"github.com/google/uuid"
"github.com/gorilla/mux"
log "github.com/sirupsen/logrus"
"github.com/streadway/amqp"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
"net/http"
"os"
"time"
)
type Greeting struct {
ID string `json:"id,omitempty"`
ServiceName string `json:"service,omitempty"`
Message string `json:"message,omitempty"`
CreatedAt time.Time `json:"created,omitempty"`
}
var greetings []Greeting
func PingHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
greetings = nil
tmpGreeting := Greeting{
ID: uuid.New().String(),
ServiceName: "Service-F",
Message: "Hola, from Service-F!",
CreatedAt: time.Now().Local(),
}
greetings = append(greetings, tmpGreeting)
CallMongoDB(tmpGreeting)
err := json.NewEncoder(w).Encode(greetings)
if err != nil {
log.Error(err)
}
}
func HealthCheckHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
_, err := w.Write([]byte("{\"alive\": true}"))
if err != nil {
log.Error(err)
}
}
func CallMongoDB(greeting Greeting) {
log.Info(greeting)
ctx, _ := context.WithTimeout(context.Background(), 10*time.Second)
client, err := mongo.Connect(ctx, options.Client().ApplyURI(os.Getenv("MONGO_CONN")))
if err != nil {
log.Error(err)
}
defer client.Disconnect(nil)
collection := client.Database("service-f").Collection("messages")
ctx, _ = context.WithTimeout(context.Background(), 5*time.Second)
_, err = collection.InsertOne(ctx, greeting)
if err != nil {
log.Error(err)
}
}
func GetMessages() {
conn, err := amqp.Dial(os.Getenv("RABBITMQ_CONN"))
if err != nil {
log.Error(err)
}
defer conn.Close()
ch, err := conn.Channel()
if err != nil {
log.Error(err)
}
defer ch.Close()
q, err := ch.QueueDeclare(
"service-d",
false,
false,
false,
false,
nil,
)
if err != nil {
log.Error(err)
}
msgs, err := ch.Consume(
q.Name,
"service-f",
true,
false,
false,
false,
nil,
)
if err != nil {
log.Error(err)
}
forever := make(chan bool)
go func() {
for delivery := range msgs {
log.Debug(delivery)
CallMongoDB(deserialize(delivery.Body))
}
}()
<-forever
}
func deserialize(b []byte) (t Greeting) {
log.Debug(b)
var tmpGreeting Greeting
buf := bytes.NewBuffer(b)
decoder := json.NewDecoder(buf)
err := decoder.Decode(&tmpGreeting)
if err != nil {
log.Error(err)
}
return tmpGreeting
}
func getEnv(key, fallback string) string {
if value, ok := os.LookupEnv(key); ok {
return value
}
return fallback
}
func init() {
formatter := runtime.Formatter{ChildFormatter: &log.JSONFormatter{}}
formatter.Line = true
log.SetFormatter(&formatter)
log.SetOutput(os.Stdout)
level, err := log.ParseLevel(getEnv("LOG_LEVEL", "info"))
if err != nil {
log.Error(err)
}
log.SetLevel(level)
}
func main() {
go GetMessages()
router := mux.NewRouter()
api := router.PathPrefix("/api").Subrouter()
api.HandleFunc("/ping", PingHandler).Methods("GET")
api.HandleFunc("/health", HealthCheckHandler).Methods("GET")
log.Fatal(http.ListenAndServe(":80", router))
}
| [
"\"MONGO_CONN\"",
"\"RABBITMQ_CONN\""
]
| []
| [
"RABBITMQ_CONN",
"MONGO_CONN"
]
| [] | ["RABBITMQ_CONN", "MONGO_CONN"] | go | 2 | 0 | |
tools/generate.go | package main
import (
"crypto/sha1"
"fmt"
"io/ioutil"
"net/http"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"text/template"
"github.com/russross/blackfriday"
)
var cacheDir = "/tmp/gobyexample-cache"
var siteDir = "./public"
var pygmentizeBin = "./vendor/pygments/pygmentize"
func check(err error) {
if err != nil {
panic(err)
}
}
func ensureDir(dir string) {
err := os.MkdirAll(dir, 0755)
check(err)
}
func copyFile(src, dst string) {
dat, err := ioutil.ReadFile(src)
check(err)
err = ioutil.WriteFile(dst, dat, 0644)
check(err)
}
func pipe(bin string, arg []string, src string) []byte {
cmd := exec.Command(bin, arg...)
in, err := cmd.StdinPipe()
check(err)
out, err := cmd.StdoutPipe()
check(err)
err = cmd.Start()
check(err)
_, err = in.Write([]byte(src))
check(err)
err = in.Close()
check(err)
bytes, err := ioutil.ReadAll(out)
check(err)
err = cmd.Wait()
check(err)
return bytes
}
func sha1Sum(s string) string {
h := sha1.New()
h.Write([]byte(s))
b := h.Sum(nil)
return fmt.Sprintf("%x", b)
}
func mustReadFile(path string) string {
bytes, err := ioutil.ReadFile(path)
check(err)
return string(bytes)
}
func cachedPygmentize(lex string, src string) string {
ensureDir(cacheDir)
arg := []string{"-l", lex, "-f", "html"}
cachePath := cacheDir + "/pygmentize-" + strings.Join(arg, "-") + "-" + sha1Sum(src)
cacheBytes, cacheErr := ioutil.ReadFile(cachePath)
if cacheErr == nil {
return string(cacheBytes)
}
renderBytes := pipe(pygmentizeBin, arg, src)
// Newer versions of Pygments add silly empty spans.
renderCleanString := strings.Replace(string(renderBytes), "<span></span>", "", -1)
writeErr := ioutil.WriteFile(cachePath, []byte(renderCleanString), 0600)
check(writeErr)
return renderCleanString
}
func markdown(src string) string {
return string(blackfriday.MarkdownCommon([]byte(src)))
}
func readLines(path string) []string {
src := mustReadFile(path)
return strings.Split(src, "\n")
}
func mustGlob(glob string) []string {
paths, err := filepath.Glob(glob)
check(err)
return paths
}
func whichLexer(path string) string {
if strings.HasSuffix(path, ".go") {
return "go"
} else if strings.HasSuffix(path, ".sh") {
return "console"
}
panic("No lexer for " + path)
return ""
}
func debug(msg string) {
if os.Getenv("DEBUG") == "1" {
fmt.Fprintln(os.Stderr, msg)
}
}
var docsPat = regexp.MustCompile("^\\s*(\\/\\/|#)\\s")
var dashPat = regexp.MustCompile("\\-+")
type Seg struct {
Docs, DocsRendered string
Code, CodeRendered string
CodeEmpty, CodeLeading, CodeRun bool
}
type Example struct {
Id, Name string
GoCode, GoCodeHash, UrlHash string
Segs [][]*Seg
NextExample *Example
}
func parseHashFile(sourcePath string) (string, string) {
lines := readLines(sourcePath)
return lines[0], lines[1]
}
func resetUrlHashFile(codehash, code, sourcePath string) string {
payload := strings.NewReader(code)
resp, err := http.Post("https://play.golang.org/share", "text/plain", payload)
if err != nil {
panic(err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
urlkey := string(body)
data := fmt.Sprintf("%s\n%s\n", codehash, urlkey)
ioutil.WriteFile(sourcePath, []byte(data), 0644)
return urlkey
}
func parseSegs(sourcePath string) ([]*Seg, string) {
lines := readLines(sourcePath)
filecontent := strings.Join(lines, "\n")
segs := []*Seg{}
lastSeen := ""
for _, line := range lines {
if line == "" {
lastSeen = ""
continue
}
matchDocs := docsPat.MatchString(line)
if strings.HasPrefix(line, "// +build") {
matchDocs = false
}
matchCode := !matchDocs
newDocs := (lastSeen == "") || ((lastSeen != "docs") && (segs[len(segs)-1].Docs != ""))
newCode := (lastSeen == "") || ((lastSeen != "code") && (segs[len(segs)-1].Code != ""))
if newDocs || newCode {
debug("NEWSEG")
}
if matchDocs {
trimmed := docsPat.ReplaceAllString(line, "")
if newDocs {
newSeg := Seg{Docs: trimmed, Code: ""}
segs = append(segs, &newSeg)
} else {
segs[len(segs)-1].Docs = segs[len(segs)-1].Docs + "\n" + trimmed
}
debug("DOCS: " + line)
lastSeen = "docs"
} else if matchCode {
if newCode {
newSeg := Seg{Docs: "", Code: line}
segs = append(segs, &newSeg)
} else {
segs[len(segs)-1].Code = segs[len(segs)-1].Code + "\n" + line
}
debug("CODE: " + line)
lastSeen = "code"
}
}
for i, seg := range segs {
seg.CodeEmpty = (seg.Code == "")
seg.CodeLeading = (i < (len(segs) - 1))
seg.CodeRun = strings.Contains(seg.Code, "package main")
}
return segs, filecontent
}
func parseAndRenderSegs(sourcePath string) ([]*Seg, string) {
segs, filecontent := parseSegs(sourcePath)
lexer := whichLexer(sourcePath)
for _, seg := range segs {
if seg.Docs != "" {
seg.DocsRendered = markdown(seg.Docs)
}
if seg.Code != "" {
seg.CodeRendered = cachedPygmentize(lexer, seg.Code)
}
}
// we are only interested in the 'go' code to pass to play.golang.org
if lexer != "go" {
filecontent = ""
}
return segs, filecontent
}
func parseExamples() []*Example {
exampleNames := readLines("examples.txt")
examples := make([]*Example, 0)
for _, exampleName := range exampleNames {
if (exampleName != "") && !strings.HasPrefix(exampleName, "#") {
example := Example{Name: exampleName}
exampleId := strings.ToLower(exampleName)
exampleId = strings.Replace(exampleId, " ", "-", -1)
exampleId = strings.Replace(exampleId, "/", "-", -1)
exampleId = strings.Replace(exampleId, "'", "", -1)
exampleId = dashPat.ReplaceAllString(exampleId, "-")
example.Id = exampleId
example.Segs = make([][]*Seg, 0)
sourcePaths := mustGlob("examples/" + exampleId + "/*")
for _, sourcePath := range sourcePaths {
if strings.HasSuffix(sourcePath, ".hash") {
example.GoCodeHash, example.UrlHash = parseHashFile(sourcePath)
} else {
sourceSegs, filecontents := parseAndRenderSegs(sourcePath)
if filecontents != "" {
example.GoCode = filecontents
}
example.Segs = append(example.Segs, sourceSegs)
}
}
newCodeHash := sha1Sum(example.GoCode)
if example.GoCodeHash != newCodeHash {
example.UrlHash = resetUrlHashFile(newCodeHash, example.GoCode, "examples/"+example.Id+"/"+example.Id+".hash")
}
examples = append(examples, &example)
}
}
for i, example := range examples {
if i < (len(examples) - 1) {
example.NextExample = examples[i+1]
}
}
return examples
}
func renderIndex(examples []*Example) {
indexTmpl := template.New("index")
_, err := indexTmpl.Parse(mustReadFile("templates/index.tmpl"))
check(err)
indexF, err := os.Create(siteDir + "/index.html")
check(err)
indexTmpl.Execute(indexF, examples)
}
func renderExamples(examples []*Example) {
exampleTmpl := template.New("example")
_, err := exampleTmpl.Parse(mustReadFile("templates/example.tmpl"))
check(err)
for _, example := range examples {
exampleF, err := os.Create(siteDir + "/" + example.Id + ".html")
check(err)
exampleTmpl.Execute(exampleF, example)
}
}
func main() {
copyFile("templates/site.css", siteDir+"/site.css")
copyFile("templates/favicon.ico", siteDir+"/favicon.ico")
copyFile("templates/404.html", siteDir+"/404.html")
copyFile("templates/play.png", siteDir+"/play.png")
examples := parseExamples()
renderIndex(examples)
renderExamples(examples)
}
| [
"\"DEBUG\""
]
| []
| [
"DEBUG"
]
| [] | ["DEBUG"] | go | 1 | 0 | |
internal/config.go | package config
import (
"fmt"
"log"
"os"
"github.com/joho/godotenv"
)
var Config config
type config struct {
NatsUrl string
}
func init() {
err := godotenv.Load("../.env")
if err != nil {
log.Fatal("Error loading .env file")
}
natsHost := os.Getenv("NATS_HOST")
natsPort := os.Getenv("NATS_PORT")
Config.NatsUrl = fmt.Sprintf("%s:%s", natsHost, natsPort)
}
| [
"\"NATS_HOST\"",
"\"NATS_PORT\""
]
| []
| [
"NATS_PORT",
"NATS_HOST"
]
| [] | ["NATS_PORT", "NATS_HOST"] | go | 2 | 0 | |
eventstore_test.go | package ehpg_test
import (
"context"
"os"
"testing"
"github.com/giautm/eh-pg"
"github.com/go-pg/pg"
eh "github.com/looplab/eventhorizon"
testutil "github.com/looplab/eventhorizon/eventstore"
)
func TestEventStore(t *testing.T) {
db := pg.Connect(&pg.Options{
Addr: os.Getenv("POSTGRES_ADDR"),
Database: os.Getenv("POSTGRES_DB"),
User: os.Getenv("POSTGRES_USER"),
Password: os.Getenv("POSTGRES_PASSWORD"),
})
defer db.Close()
store, err := ehpg.NewEventStore(db)
if err != nil {
t.Fatal("there should be no error:", err)
}
if store == nil {
t.Fatal("there should be a store")
}
ctx := eh.NewContextWithNamespace(context.Background(), "ns")
defer func() {
t.Log("clearing db")
if err = store.Clear(context.Background()); err != nil {
t.Fatal("there should be no error:", err)
}
if err = store.Clear(ctx); err != nil {
t.Fatal("there should be no error:", err)
}
}()
// Run the actual test suite.
t.Log("event store with default namespace")
testutil.AcceptanceTest(t, context.Background(), store)
t.Log("event store with other namespace")
testutil.AcceptanceTest(t, ctx, store)
t.Log("event store maintainer")
testutil.MaintainerAcceptanceTest(t, context.Background(), store)
}
| [
"\"POSTGRES_ADDR\"",
"\"POSTGRES_DB\"",
"\"POSTGRES_USER\"",
"\"POSTGRES_PASSWORD\""
]
| []
| [
"POSTGRES_PASSWORD",
"POSTGRES_USER",
"POSTGRES_DB",
"POSTGRES_ADDR"
]
| [] | ["POSTGRES_PASSWORD", "POSTGRES_USER", "POSTGRES_DB", "POSTGRES_ADDR"] | go | 4 | 0 | |
pkg/testcommon/testcommon_test.go | package testcommon
import (
"fmt"
"github.com/drud/ddev/pkg/ddevapp"
"github.com/drud/ddev/pkg/dockerutil"
"github.com/drud/ddev/pkg/exec"
"github.com/drud/ddev/pkg/fileutil"
asrt "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"os"
"path/filepath"
"reflect"
"testing"
)
var DdevBin = "ddev"
var TestSites = []TestSite{
{
Name: "",
SourceURL: "https://github.com/drud/wordpress/archive/v0.4.0.tar.gz",
ArchiveInternalExtractionPath: "wordpress-0.4.0/",
FilesTarballURL: "https://github.com/drud/wordpress/releases/download/v0.4.0/files.tar.gz",
DBTarURL: "https://github.com/drud/wordpress/releases/download/v0.4.0/db.tar.gz",
Docroot: "htdocs",
Type: ddevapp.AppTypeWordPress,
Safe200URIWithExpectation: URIWithExpect{URI: "/readme.html", Expect: "Welcome. WordPress is a very special project to me."},
},
}
// TestTmpDir tests the ability to create a temporary directory.
func TestTmpDir(t *testing.T) {
assert := asrt.New(t)
// Create a temporary directory and ensure it exists.
testDir := CreateTmpDir("TestTmpDir")
dirStat, err := os.Stat(testDir)
assert.NoError(err, "There is no error when getting directory details")
assert.True(dirStat.IsDir(), "Temp Directory created and exists")
// Clean up tempoary directory and ensure it no longer exists.
CleanupDir(testDir)
_, err = os.Stat(testDir)
assert.Error(err, "Could not stat temporary directory")
if err != nil {
assert.True(os.IsNotExist(err), "Error is of type IsNotExists")
}
}
// TestChdir tests the Chdir function and ensures it will change to a temporary directory and then properly return
// to the original directory when cleaned up.
func TestChdir(t *testing.T) {
assert := asrt.New(t)
// Get the current working directory.
startingDir, err := os.Getwd()
assert.NoError(err)
// Create a temporary directory.
testDir := CreateTmpDir("TestChdir")
assert.NotEqual(startingDir, testDir, "Ensure our starting directory and temporary directory are not the same")
// Change to the temporary directory.
cleanupFunc := Chdir(testDir)
currentDir, err := os.Getwd()
assert.NoError(err)
// On OSX this are created under /var, but /var is a symlink to /var/private, so we cannot ensure complete equality of these strings.
assert.Contains(currentDir, testDir, "Ensure the current directory is the temporary directory we created")
assert.True(reflect.TypeOf(cleanupFunc).Kind() == reflect.Func, "Chdir return is of type function")
cleanupFunc()
currentDir, err = os.Getwd()
assert.NoError(err)
assert.Equal(currentDir, startingDir, "Ensure we have changed back to the starting directory")
CleanupDir(testDir)
}
// TestValidTestSite tests the TestSite struct behavior in the case of a valid configuration.
func TestValidTestSite(t *testing.T) {
assert := asrt.New(t)
// Get the current working directory.
startingDir, err := os.Getwd()
assert.NoError(err, "Could not get current directory.")
// It's not ideal to copy/paste this archive around, but we don't actually care about the contents
// of the archive for this test, only that it exists and can be extracted. This should (knock on wood)
//not need to be updated over time.
site := TestSites[0]
// If running this with GOTEST_SHORT we have to create the directory, tarball etc.
site.Name = "TestValidTestSite"
if site.Dir == "" || !fileutil.FileExists(site.Dir) {
err = site.Prepare()
if err != nil {
t.Fatalf("Prepare() failed on TestSite.Prepare() site=%s, err=%v", site.Name, err)
}
}
assert.NotNil(site.Dir, "Directory is set.")
docroot := filepath.Join(site.Dir, site.Docroot)
dirStat, err := os.Stat(docroot)
assert.NoError(err, "Docroot exists after prepare()")
if err != nil {
t.Fatalf("Directory did not exist after prepare(): %s", docroot)
}
assert.True(dirStat.IsDir(), "Docroot is a directory")
cleanup := site.Chdir()
defer cleanup()
currentDir, err := os.Getwd()
assert.NoError(err)
// On OSX this are created under /var, but /var is a symlink to /var/private, so we cannot ensure complete equality of these strings.
assert.Contains(currentDir, site.Dir)
cleanup()
currentDir, err = os.Getwd()
assert.NoError(err)
assert.Equal(startingDir, currentDir)
site.Cleanup()
_, err = os.Stat(site.Dir)
assert.Error(err, "Could not stat temporary directory after cleanup")
}
// TestGetLocalHTTPResponse() brings up a project and hits a URL to get the response
func TestGetLocalHTTPResponse(t *testing.T) {
assert := asrt.New(t)
dockerutil.EnsureDdevNetwork()
if os.Getenv("DDEV_BINARY_FULLPATH") != "" {
DdevBin = os.Getenv("DDEV_BINARY_FULLPATH")
}
out, err := exec.RunCommand(DdevBin, []string{"stop", "--all"})
assert.NoError(err, "ddev stop --all should succeed but failed, err: %v, output: %s", err, out)
router, _ := ddevapp.FindDdevRouter()
require.Empty(t, router)
// It's not ideal to copy/paste this archive around, but we don't actually care about the contents
// of the archive for this test, only that it exists and can be extracted. This should (knock on wood)
//not need to be updated over time.
site := TestSites[0]
site.Name = "TestGetLocalHTTPResponse"
// If running this with GOTEST_SHORT we have to create the directory, tarball etc.
if site.Dir == "" || !fileutil.FileExists(site.Dir) {
err := site.Prepare()
if err != nil {
t.Fatalf("Prepare() failed on TestSite.Prepare() site=%s, err=%v", site.Name, err)
}
}
cleanup := site.Chdir()
defer cleanup()
app := &ddevapp.DdevApp{}
err = app.Init(site.Dir)
app.Name = "TestGetLocalHTTPResponse"
assert.NoError(err)
// nolint: errcheck
defer app.Stop(true, false)
for _, pair := range []PortPair{{"80", "443"}, {"8080", "8443"}} {
ClearDockerEnv()
app.RouterHTTPPort = pair.HTTPPort
app.RouterHTTPSPort = pair.HTTPSPort
err = app.WriteConfig()
assert.NoError(err)
startErr := app.StartAndWaitForSync(5)
if startErr != nil {
logs, err := ddevapp.GetErrLogsFromApp(app, startErr)
assert.NoError(err)
t.Fatalf("logs from broken container:\n=======\n%s\n========\n", logs)
}
safeURL := app.GetHTTPURL() + site.Safe200URIWithExpectation.URI
out, _, err := GetLocalHTTPResponse(t, safeURL)
assert.NoError(err)
assert.Contains(out, site.Safe200URIWithExpectation.Expect)
safeURL = app.GetHTTPSURL() + site.Safe200URIWithExpectation.URI
out, _, err = GetLocalHTTPResponse(t, safeURL)
assert.NoError(err)
assert.Contains(out, site.Safe200URIWithExpectation.Expect)
// This does the same thing as previous, but worth exercising it here.
_, _ = EnsureLocalHTTPContent(t, safeURL, site.Safe200URIWithExpectation.Expect)
}
// Set the ports back to the default was so we don't break any following tests.
app.RouterHTTPSPort = "443"
app.RouterHTTPPort = "80"
err = app.WriteConfig()
assert.NoError(err)
err = app.Stop(true, false)
assert.NoError(err)
cleanup()
site.Cleanup()
}
// TestGetCachedArchive tests download and extraction of archives for test sites
// to testcache directory.
func TestGetCachedArchive(t *testing.T) {
assert := asrt.New(t)
sourceURL := "https://raw.githubusercontent.com/drud/ddev/master/.gitignore"
exPath, archPath, err := GetCachedArchive("TestInvalidArchive", "test", "", sourceURL)
assert.Error(err)
if err != nil {
assert.Contains(err.Error(), fmt.Sprintf("archive extraction of %s failed", archPath))
}
err = os.RemoveAll(filepath.Dir(exPath))
assert.NoError(err)
sourceURL = "http://invalid_domain/somefilethatdoesnotexists"
exPath, archPath, err = GetCachedArchive("TestInvalidDownloadURL", "test", "", sourceURL)
assert.Error(err)
if err != nil {
assert.Contains(err.Error(), fmt.Sprintf("Failed to download url=%s into %s", sourceURL, archPath))
}
err = os.RemoveAll(filepath.Dir(exPath))
assert.NoError(err)
}
| [
"\"DDEV_BINARY_FULLPATH\"",
"\"DDEV_BINARY_FULLPATH\""
]
| []
| [
"DDEV_BINARY_FULLPATH"
]
| [] | ["DDEV_BINARY_FULLPATH"] | go | 1 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "weibosystem.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
groupbunk.py | """
GroupBunk v.1.2
Leave your Facebook groups quietly
Author: Shine Jayakumar
Github: https://github.com/shine-jayakumar
LICENSE: MIT
"""
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import StaleElementReferenceException
from webdriver_manager.chrome import ChromeDriverManager
import argparse
import logging
import sys
from datetime import datetime
import time
from groupfuncs import *
import os
# suppress webdriver manager logs
os.environ['WDM_LOG_LEVEL'] = '0'
IGNORE_DIV = ['your feed', 'discover', 'your notifications']
FB_GROUP_URL = 'https://www.facebook.com/groups/feed/'
def display_intro():
'''
Displays intro of the script
'''
intro = """
GroupBunk v.1.2
Leave your Facebook groups quietly
Author: Shine Jayakumar
Github: https://github.com/shine-jayakumar
"""
print(intro)
def time_taken(start_time, logger):
'''
Calculates the time difference from now and start time
'''
end_time = time.time()
logger.info(f"Total time taken: {round(end_time - start_time, 4)} seconds")
def cleanup_and_quit(driver):
'''
Quits driver and exits the script
'''
if driver:
driver.quit()
sys.exit()
start_time = time.time()
# ====================================================
# Argument parsing
# ====================================================
description = "Leave your Facebook groups quietly"
usage = "groupbunk.py username password [-h] [-eg FILE] [-et TIMEOUT] [-sw WAIT] [-gr RETRYCOUNT] [-dg FILE]"
examples="""
Examples:
groupbunk.py [email protected] bobspassword101
groupbunk.py [email protected] bobspassword101 -eg keepgroups.txt
groupbunk.py [email protected] bobspassword101 -et 60 --scrollwait 10 -gr 7
groupbunk.py [email protected] bobspassword101 --dumpgroups mygroup.txt --groupretry 5
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=description,
usage=usage,
epilog=examples,
prog='groupbunk')
# required arguments
parser.add_argument('username', type=str, help='Facebook username')
parser.add_argument('password', type=str, help='Facebook password')
# optional arguments
parser.add_argument('-eg', '--exgroups', type=str, metavar='', help='file with group names to exclude (one group per line)')
parser.add_argument('-et', '--eltimeout', type=int, metavar='', help='max timeout for elements to be loaded', default=30)
parser.add_argument('-sw', '--scrollwait', type=int, metavar='', help='time to wait after each scroll', default=4)
parser.add_argument('-gr', '--groupretry', type=int, metavar='', help='retry count while recapturing group names', default=5)
parser.add_argument('-dg', '--dumpgroups', type=str, metavar='', help='do not leave groups; only dump group names to a file')
parser.add_argument('-v', '--version', action='version', version='%(prog)s v.1.2')
args = parser.parse_args()
# ====================================================
# Setting up logger
# =====================================================
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s:%(name)s:%(lineno)d:%(levelname)s:%(message)s")
file_handler = logging.FileHandler(f'groupbunk_{datetime.now().strftime("%d_%m_%Y__%H_%M_%S")}.log', 'w', 'utf-8')
file_handler.setFormatter(formatter)
stdout_formatter = logging.Formatter("[*] => %(message)s")
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setFormatter(stdout_formatter)
logger.addHandler(file_handler)
logger.addHandler(stdout_handler)
#=======================================================
try:
display_intro()
logger.info("script started")
# loading group names to be excluded
if args.exgroups:
logger.info("Loading group names to be excluded")
excluded_group_names = get_excluded_group_names(args.exgroups)
IGNORE_DIV.extend(excluded_group_names)
options = Options()
# supresses notifications
options.add_argument("--disable-notifications")
options.add_experimental_option('excludeSwitches', ['enable-logging'])
options.add_argument("--log-level=3")
logger.info("Downloading latest chrome webdriver")
# UNCOMMENT TO SPECIFY DRIVER LOCATION
# driver = webdriver.Chrome("D:/chromedriver/98/chromedriver.exe", options=options)
driver = webdriver.Chrome(ChromeDriverManager().install(), options=options)
if not driver:
raise Exception('Unable to download chrome webdriver for your version of Chrome browser')
logger.info("Successfully downloaded chrome webdriver")
wait = WebDriverWait(driver, args.eltimeout)
logger.info(f"Opening FB GROUPS URL: {FB_GROUP_URL}")
driver.get(FB_GROUP_URL)
logger.info("Sending username")
wait.until(EC.visibility_of_element_located((By.ID, 'email'))).send_keys(args.username)
logger.info("Sending password")
driver.find_element(By.ID, 'pass').send_keys(args.password)
logger.info("Clicking on Log In")
wait.until(EC.presence_of_element_located((By.ID, 'loginbutton'))).click()
# get all the links inside divs representing group names
group_links = get_group_link_elements(driver, wait)
if not group_links:
raise Exception("Unable to find links")
no_of_currently_loaded_links = 0
logger.info(f"Initial link count: {len(group_links)-3}")
logger.info("Scrolling down to capture all the links")
# scroll until no new group links are loaded
while len(group_links) > no_of_currently_loaded_links:
no_of_currently_loaded_links = len(group_links)
logger.info(f"Updated link count: {no_of_currently_loaded_links-3}")
scroll_into_view(driver, group_links[no_of_currently_loaded_links-1])
time.sleep(args.scrollwait)
# re-capturing
group_links = get_group_link_elements(driver, wait)
logger.info(f"Total number of links found: {len(group_links)-3}")
# only show the group names and exit
if args.dumpgroups:
logger.info('Only dumping group names to file. Not leaving groups')
logger.info(f"Dumping group names to: {args.dumpgroups}")
dump_groups(group_links, args.dumpgroups)
time_taken(start_time, logger)
cleanup_and_quit(driver)
# first 3 links are for Your feed, 'Discover, Your notifications
i = 0
save_state = 0
no_of_retries = 0
failed_groups = []
total_groups = len(group_links)
while i < total_groups:
try:
# need only the group name and not Last Active
group_name = group_links[i].text.split('\n')[0]
# if group name not in ignore list
if group_name.lower() not in IGNORE_DIV:
logger.info(f"Leaving group: {group_name}")
link = group_links[i].get_attribute('href')
logger.info(f"Opening group link: {link}")
switch_tab(driver, open_new_tab(driver))
driver.get(link)
if not leave_group(wait):
logger.info('Unable to leave the group. You might not be a member of this group.')
driver.close()
switch_tab(driver, driver.window_handles[0])
else:
if group_name.lower() not in ['your feed', 'discover', 'your notifications']:
logger.info(f"Skipping group : {group_name}")
i += 1
except StaleElementReferenceException:
logger.error('Captured group elements gone stale. Recapturing...')
if no_of_retries > args.groupretry:
logger.error('Reached max number of retry attempts')
break
save_state = i
group_links = get_group_link_elements(driver, wait)
no_of_retries += 1
except Exception as ex:
logger.error(f"Unable to leave group {group_name}. Error: {ex}")
failed_groups.append(group_name)
i += 1
total_no_of_groups = len(group_links)-3
total_no_failed_groups = len(failed_groups)
logger.info(f"Total groups: {total_no_of_groups}")
logger.info(f"No. of groups failed to leave: {total_no_failed_groups}")
logger.info(f"Success percentage: {((total_no_of_groups - total_no_failed_groups)/total_no_of_groups) * 100} %")
if failed_groups:
failed_group_names = ", ".join(failed_groups)
logger.info(f"Failed groups: \n{failed_group_names}")
except Exception as ex:
logger.error(f"Script ended with exception: {ex}")
finally:
time_taken(start_time, logger)
cleanup_and_quit(driver) | []
| []
| [
"WDM_LOG_LEVEL"
]
| [] | ["WDM_LOG_LEVEL"] | python | 1 | 0 | |
cmd/config.go | package cmd
import (
"bufio"
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"os/exec"
"os/user"
"path/filepath"
"regexp"
"runtime"
"strings"
"text/template"
"unicode"
"github.com/Masterminds/sprig"
"github.com/pelletier/go-toml"
"github.com/spf13/cobra"
"github.com/spf13/viper"
"github.com/twpayne/chezmoi/internal/chezmoi"
vfs "github.com/twpayne/go-vfs"
xdg "github.com/twpayne/go-xdg/v3"
bolt "go.etcd.io/bbolt"
yaml "gopkg.in/yaml.v2"
)
const commitMessageTemplateAsset = "assets/templates/COMMIT_MESSAGE.tmpl"
var whitespaceRegexp = regexp.MustCompile(`\s+`)
type sourceVCSConfig struct {
Command string
AutoCommit bool
AutoPush bool
Init interface{}
Pull interface{}
}
type templateConfig struct {
Options []string
}
// A Config represents a configuration.
type Config struct {
configFile string
err error
fs vfs.FS
mutator chezmoi.Mutator
SourceDir string
DestDir string
Umask permValue
DryRun bool
Follow bool
Remove bool
Verbose bool
Color string
Debug bool
GPG chezmoi.GPG
GPGRecipient string
SourceVCS sourceVCSConfig
Template templateConfig
Merge mergeConfig
Bitwarden bitwardenCmdConfig
CD cdCmdConfig
GenericSecret genericSecretCmdConfig
Gopass gopassCmdConfig
KeePassXC keePassXCCmdConfig
Lastpass lastpassCmdConfig
Onepassword onepasswordCmdConfig
Vault vaultCmdConfig
Pass passCmdConfig
Data map[string]interface{}
colored bool
maxDiffDataSize int
templateFuncs template.FuncMap
add addCmdConfig
data dataCmdConfig
dump dumpCmdConfig
edit editCmdConfig
_import importCmdConfig
init initCmdConfig
keyring keyringCmdConfig
purge purgeCmdConfig
remove removeCmdConfig
update updateCmdConfig
upgrade upgradeCmdConfig
Stdin io.Reader
Stdout io.Writer
Stderr io.Writer
bds *xdg.BaseDirectorySpecification
scriptStateBucket []byte
}
// A configOption sets an option on a Config.
type configOption func(*Config)
var (
formatMap = map[string]func(io.Writer, interface{}) error{
"json": func(w io.Writer, value interface{}) error {
e := json.NewEncoder(w)
e.SetIndent("", " ")
return e.Encode(value)
},
"toml": func(w io.Writer, value interface{}) error {
return toml.NewEncoder(w).Encode(value)
},
"yaml": func(w io.Writer, value interface{}) error {
return yaml.NewEncoder(w).Encode(value)
},
}
wellKnownAbbreviations = map[string]struct{}{
"ANSI": {},
"CPE": {},
"ID": {},
"URL": {},
}
identifierRegexp = regexp.MustCompile(`\A[\pL_][\pL\p{Nd}_]*\z`)
assets = make(map[string][]byte)
)
// newConfig creates a new Config with the given options.
func newConfig(options ...configOption) *Config {
c := &Config{
Umask: permValue(getUmask()),
Color: "auto",
SourceVCS: sourceVCSConfig{
Command: "git",
},
Template: templateConfig{
Options: chezmoi.DefaultTemplateOptions,
},
Merge: mergeConfig{
Command: "vimdiff",
},
maxDiffDataSize: 1 * 1024 * 1024, // 1MB
templateFuncs: sprig.TxtFuncMap(),
scriptStateBucket: []byte("script"),
Stdin: os.Stdin,
Stdout: os.Stdout,
Stderr: os.Stderr,
}
for _, option := range options {
option(c)
}
return c
}
func (c *Config) addTemplateFunc(key string, value interface{}) {
if c.templateFuncs == nil {
c.templateFuncs = make(template.FuncMap)
}
if _, ok := c.templateFuncs[key]; ok {
panic(fmt.Sprintf("Config.addTemplateFunc: %s already defined", key))
}
c.templateFuncs[key] = value
}
func (c *Config) applyArgs(args []string, persistentState chezmoi.PersistentState) error {
fs := vfs.NewReadOnlyFS(c.fs)
ts, err := c.getTargetState(nil)
if err != nil {
return err
}
applyOptions := &chezmoi.ApplyOptions{
DestDir: ts.DestDir,
DryRun: c.DryRun,
Ignore: ts.TargetIgnore.Match,
PersistentState: persistentState,
Remove: c.Remove,
ScriptStateBucket: c.scriptStateBucket,
Stdout: c.Stdout,
Umask: ts.Umask,
Verbose: c.Verbose,
}
if len(args) == 0 {
return ts.Apply(fs, c.mutator, c.Follow, applyOptions)
}
entries, err := c.getEntries(ts, args)
if err != nil {
return err
}
for _, entry := range entries {
if err := entry.Apply(fs, c.mutator, c.Follow, applyOptions); err != nil {
return err
}
}
return nil
}
func (c *Config) autoCommit(vcs VCS) error {
addArgs := vcs.AddArgs(".")
if addArgs == nil {
return fmt.Errorf("%s: autocommit not supported", c.SourceVCS.Command)
}
if err := c.run(c.SourceDir, c.SourceVCS.Command, addArgs...); err != nil {
return err
}
output, err := c.output(c.SourceDir, c.SourceVCS.Command, vcs.StatusArgs()...)
if err != nil {
return err
}
status, err := vcs.ParseStatusOutput(output)
if err != nil {
return err
}
commitMessageText, err := getAsset(commitMessageTemplateAsset)
if err != nil {
return err
}
commitMessageTmpl, err := template.New("commit_message").Funcs(c.templateFuncs).Parse(string(commitMessageText))
if err != nil {
return err
}
b := &bytes.Buffer{}
if err := commitMessageTmpl.Execute(b, status); err != nil {
return err
}
commitArgs := vcs.CommitArgs(b.String())
return c.run(c.SourceDir, c.SourceVCS.Command, commitArgs...)
}
func (c *Config) autoCommitAndAutoPush(cmd *cobra.Command, args []string) error {
vcs, err := c.getVCS()
if err != nil {
return err
}
if c.DryRun {
return nil
}
if c.SourceVCS.AutoCommit || c.SourceVCS.AutoPush {
if err := c.autoCommit(vcs); err != nil {
return err
}
}
if c.SourceVCS.AutoPush {
if err := c.autoPush(vcs); err != nil {
return err
}
}
return nil
}
func (c *Config) autoPush(vcs VCS) error {
pushArgs := vcs.PushArgs()
if pushArgs == nil {
return fmt.Errorf("%s: autopush not supported", c.SourceVCS.Command)
}
return c.run(c.SourceDir, c.SourceVCS.Command, pushArgs...)
}
// ensureNoError ensures that no error was encountered when loading c.
func (c *Config) ensureNoError(cmd *cobra.Command, args []string) error {
if c.err != nil {
return errors.New("config contains errors, aborting")
}
return nil
}
func (c *Config) ensureSourceDirectory() error {
info, err := c.fs.Stat(c.SourceDir)
switch {
case err == nil && info.IsDir():
private, err := chezmoi.IsPrivate(c.fs, c.SourceDir, true)
if err != nil {
return err
}
if !private {
if err := c.mutator.Chmod(c.SourceDir, 0700&^os.FileMode(c.Umask)); err != nil {
return err
}
}
return nil
case os.IsNotExist(err):
if err := vfs.MkdirAll(c.mutator, filepath.Dir(c.SourceDir), 0777&^os.FileMode(c.Umask)); err != nil {
return err
}
return c.mutator.Mkdir(c.SourceDir, 0700&^os.FileMode(c.Umask))
case err == nil:
return fmt.Errorf("%s: not a directory", c.SourceDir)
default:
return err
}
}
func (c *Config) getData() (map[string]interface{}, error) {
defaultData, err := c.getDefaultData()
if err != nil {
return nil, err
}
data := map[string]interface{}{
"chezmoi": defaultData,
}
for key, value := range c.Data {
data[key] = value
}
return data, nil
}
func (c *Config) getDefaultData() (map[string]interface{}, error) {
data := map[string]interface{}{
"arch": runtime.GOARCH,
"os": runtime.GOOS,
"sourceDir": c.SourceDir,
}
currentUser, err := user.Current()
if err != nil {
return nil, err
}
data["username"] = currentUser.Username
// user.LookupGroupId is generally unreliable:
//
// If CGO is enabled, then this uses an underlying C library call (e.g.
// getgrgid_r on Linux) and is trustworthy, except on recent versions of Go
// on Android, where LookupGroupId is not implemented.
//
// If CGO is disabled then the fallback implementation only searches
// /etc/group, which is typically empty if an external directory service is
// being used, and so the lookup fails.
//
// So, only set group if user.LookupGroupId does not return an error.
group, err := user.LookupGroupId(currentUser.Gid)
if err == nil {
data["group"] = group.Name
}
homedir, err := os.UserHomeDir()
if err != nil {
return nil, err
}
data["homedir"] = homedir
hostname, err := os.Hostname()
if err != nil {
return nil, err
}
data["fullHostname"] = hostname
data["hostname"] = strings.SplitN(hostname, ".", 2)[0]
osRelease, err := getOSRelease(c.fs)
if err == nil {
if osRelease != nil {
data["osRelease"] = upperSnakeCaseToCamelCaseMap(osRelease)
}
} else if !os.IsNotExist(err) {
return nil, err
}
kernelInfo, err := getKernelInfo(c.fs)
if err == nil && kernelInfo != nil {
data["kernel"] = kernelInfo
} else if err != nil {
return nil, err
}
return data, nil
}
func (c *Config) getEditor() (string, []string) {
editor := os.Getenv("VISUAL")
if editor == "" {
editor = os.Getenv("EDITOR")
}
if editor == "" {
editor = "vi"
}
components := whitespaceRegexp.Split(editor, -1)
return components[0], components[1:]
}
func (c *Config) getEntries(ts *chezmoi.TargetState, args []string) ([]chezmoi.Entry, error) {
entries := []chezmoi.Entry{}
for _, arg := range args {
targetPath, err := filepath.Abs(arg)
if err != nil {
return nil, err
}
entry, err := ts.Get(c.fs, targetPath)
if err != nil {
return nil, err
}
if entry == nil {
return nil, fmt.Errorf("%s: not in source state", arg)
}
entries = append(entries, entry)
}
return entries, nil
}
func (c *Config) getPersistentState(options *bolt.Options) (chezmoi.PersistentState, error) {
persistentStateFile := c.getPersistentStateFile()
if c.DryRun {
if options == nil {
options = &bolt.Options{}
}
options.ReadOnly = true
}
return chezmoi.NewBoltPersistentState(c.fs, persistentStateFile, os.FileMode(c.Umask), options)
}
func (c *Config) getPersistentStateFile() string {
if c.configFile != "" {
return filepath.Join(filepath.Dir(c.configFile), "chezmoistate.boltdb")
}
for _, configDir := range c.bds.ConfigDirs {
persistentStateFile := filepath.Join(configDir, "chezmoi", "chezmoistate.boltdb")
if _, err := os.Stat(persistentStateFile); err == nil {
return persistentStateFile
}
}
return filepath.Join(filepath.Dir(getDefaultConfigFile(c.bds)), "chezmoistate.boltdb")
}
func (c *Config) getTargetState(populateOptions *chezmoi.PopulateOptions) (*chezmoi.TargetState, error) {
fs := vfs.NewReadOnlyFS(c.fs)
data, err := c.getData()
if err != nil {
return nil, err
}
destDir := c.DestDir
if destDir != "" {
destDir, err = filepath.Abs(c.DestDir)
if err != nil {
return nil, err
}
}
// For backwards compatibility, prioritize gpgRecipient over gpg.recipient.
if c.GPGRecipient != "" {
c.GPG.Recipient = c.GPGRecipient
}
ts := chezmoi.NewTargetState(
chezmoi.WithDestDir(destDir),
chezmoi.WithGPG(&c.GPG),
chezmoi.WithSourceDir(c.SourceDir),
chezmoi.WithTemplateData(data),
chezmoi.WithTemplateFuncs(c.templateFuncs),
chezmoi.WithTemplateOptions(c.Template.Options),
chezmoi.WithUmask(os.FileMode(c.Umask)),
)
if err := ts.Populate(fs, populateOptions); err != nil {
return nil, err
}
if Version != nil && ts.MinVersion != nil && Version.LessThan(*ts.MinVersion) {
return nil, fmt.Errorf("chezmoi version %s too old, source state requires at least %s", Version, ts.MinVersion)
}
return ts, nil
}
func (c *Config) getVCS() (VCS, error) {
vcs, ok := vcses[filepath.Base(c.SourceVCS.Command)]
if !ok {
return nil, fmt.Errorf("%s: unsupported source VCS command", c.SourceVCS.Command)
}
return vcs, nil
}
func (c *Config) output(dir, name string, argv ...string) ([]byte, error) {
cmd := exec.Command(name, argv...)
if dir != "" {
var err error
cmd.Dir, err = c.fs.RawPath(dir)
if err != nil {
return nil, err
}
}
return c.mutator.IdempotentCmdOutput(cmd)
}
//nolint:unparam
func (c *Config) prompt(s, choices string) (byte, error) {
r := bufio.NewReader(c.Stdin)
for {
_, err := fmt.Printf("%s [%s]? ", s, strings.Join(strings.Split(choices, ""), ","))
if err != nil {
return 0, err
}
line, err := r.ReadString('\n')
if err != nil {
return 0, err
}
line = strings.TrimSpace(line)
if len(line) == 1 && strings.IndexByte(choices, line[0]) != -1 {
return line[0], nil
}
}
}
// run runs name argv... in dir.
func (c *Config) run(dir, name string, argv ...string) error {
cmd := exec.Command(name, argv...)
if dir != "" {
var err error
cmd.Dir, err = c.fs.RawPath(dir)
if err != nil {
return err
}
}
cmd.Stdin = c.Stdin
cmd.Stdout = c.Stdout
cmd.Stderr = c.Stdout
return c.mutator.RunCmd(cmd)
}
func (c *Config) runEditor(argv ...string) error {
editorName, editorArgs := c.getEditor()
return c.run("", editorName, append(editorArgs, argv...)...)
}
func (c *Config) validateData() error {
return validateKeys(config.Data, identifierRegexp)
}
func getAsset(name string) ([]byte, error) {
asset, ok := assets[name]
if !ok {
return nil, fmt.Errorf("%s: not found", name)
}
return asset, nil
}
func getDefaultConfigFile(bds *xdg.BaseDirectorySpecification) string {
// Search XDG Base Directory Specification config directories first.
for _, configDir := range bds.ConfigDirs {
for _, extension := range viper.SupportedExts {
configFilePath := filepath.Join(configDir, "chezmoi", "chezmoi."+extension)
if _, err := os.Stat(configFilePath); err == nil {
return configFilePath
}
}
}
// Fallback to XDG Base Directory Specification default.
return filepath.Join(bds.ConfigHome, "chezmoi", "chezmoi.toml")
}
func getDefaultSourceDir(bds *xdg.BaseDirectorySpecification) string {
// Check for XDG Base Directory Specification data directories first.
for _, dataDir := range bds.DataDirs {
sourceDir := filepath.Join(dataDir, "chezmoi")
if _, err := os.Stat(sourceDir); err == nil {
return sourceDir
}
}
// Fallback to XDG Base Directory Specification default.
return filepath.Join(bds.DataHome, "chezmoi")
}
// isWellKnownAbbreviation returns true if word is a well known abbreviation.
func isWellKnownAbbreviation(word string) bool {
_, ok := wellKnownAbbreviations[word]
return ok
}
func panicOnError(err error) {
if err != nil {
panic(err)
}
}
func printErrorAndExit(err error) {
fmt.Printf("chezmoi: %v\n", err)
os.Exit(1)
}
// titilize returns s, titilized.
func titilize(s string) string {
if s == "" {
return s
}
runes := []rune(s)
return string(append([]rune{unicode.ToTitle(runes[0])}, runes[1:]...))
}
// upperSnakeCaseToCamelCase converts a string in UPPER_SNAKE_CASE to
// camelCase.
func upperSnakeCaseToCamelCase(s string) string {
words := strings.Split(s, "_")
for i, word := range words {
if i == 0 {
words[i] = strings.ToLower(word)
} else if !isWellKnownAbbreviation(word) {
words[i] = titilize(strings.ToLower(word))
}
}
return strings.Join(words, "")
}
// upperSnakeCaseToCamelCaseKeys returns m with all keys converted from
// UPPER_SNAKE_CASE to camelCase.
func upperSnakeCaseToCamelCaseMap(m map[string]string) map[string]string {
result := make(map[string]string)
for k, v := range m {
result[upperSnakeCaseToCamelCase(k)] = v
}
return result
}
// validateKeys ensures that all keys in data match re.
func validateKeys(data interface{}, re *regexp.Regexp) error {
switch data := data.(type) {
case map[string]interface{}:
for key, value := range data {
if !re.MatchString(key) {
return fmt.Errorf("invalid key: %q", key)
}
if err := validateKeys(value, re); err != nil {
return err
}
}
case []interface{}:
for _, value := range data {
if err := validateKeys(value, re); err != nil {
return err
}
}
}
return nil
}
| [
"\"VISUAL\"",
"\"EDITOR\""
]
| []
| [
"VISUAL",
"EDITOR"
]
| [] | ["VISUAL", "EDITOR"] | go | 2 | 0 | |
prod-stack.py | import os
import sys
from troposphere import GetAtt, Output, Ref, Template, Sub, Base64
from troposphere.iam import Group, Policy, PolicyType, Role, InstanceProfile
from troposphere.sqs import Queue
from troposphere.dynamodb import Table, KeySchema, AttributeDefinition, \
ProvisionedThroughput
from troposphere.ecs import Cluster, TaskDefinition, ContainerDefinition, \
Service, Secret, Environment, DeploymentConfiguration, Volume, \
Host, MountPoint, PortMapping, ContainerDependency
from troposphere.ec2 import Instance, CreditSpecification, Tag, \
BlockDeviceMapping, EBSBlockDevice
from troposphere.cloudformation import Init, InitFile, InitFiles, \
InitConfig, InitService, Metadata
from troposphere.events import Rule, Target, EcsParameters
from troposphere.route53 import RecordSetType
ZONE_ID = os.environ.get('CKAN_ZONEID', False)
BOT_FQDN = 'netkan.ksp-ckan.space'
EMAIL = '[email protected]'
PARAM_NAMESPACE = '/NetKAN/Indexer/'
NETKAN_REMOTE = '[email protected]:KSP-CKAN/NetKAN.git'
NETKAN_USER = 'KSP-CKAN'
NETKAN_REPO = 'NetKAN'
CKANMETA_REMOTE = '[email protected]:KSP-CKAN/CKAN-meta.git'
CKANMETA_USER = 'KSP-CKAN'
CKANMETA_REPO = 'CKAN-meta'
STATUS_BUCKET = 'status.ksp-ckan.space'
status_key = 'status/netkan.json'
if not ZONE_ID:
print('Zone ID Required from EnvVar `CKAN_ZONEID`')
sys.exit()
t = Template()
t.set_description("Generate NetKAN Infrastructure CF Template")
# Inbound + Outbound SQS Queues
# Inbound: Scheduler Write, Inflation Read
# Outbound: Inflator Write, Indexer Read
inbound = t.add_resource(Queue("NetKANInbound",
QueueName="Inbound.fifo",
ReceiveMessageWaitTimeSeconds=20,
FifoQueue=True))
outbound = t.add_resource(Queue("NetKANOutbound",
QueueName="Outbound.fifo",
ReceiveMessageWaitTimeSeconds=20,
FifoQueue=True))
for queue in [inbound, outbound]:
t.add_output([
Output(
"{}QueueURL".format(queue.title),
Description="{} SQS Queue URL".format(queue.title),
Value=Ref(queue)
),
Output(
"{}QueueARN".format(queue.title),
Description="ARN of {} SQS Queue".format(queue.title),
Value=GetAtt(queue, "Arn")
),
])
# DyanamoDB: NetKAN Status
netkan_db = t.add_resource(Table(
"NetKANStatus",
AttributeDefinitions=[
AttributeDefinition(
AttributeName="ModIdentifier",
AttributeType="S"
),
],
KeySchema=[
KeySchema(
AttributeName="ModIdentifier",
KeyType="HASH"
)
],
TableName="NetKANStatus",
ProvisionedThroughput=ProvisionedThroughput(
# The free tier allows for 25 R/W Capacity Units
# 5 allocated already for dev testing
ReadCapacityUnits=20,
WriteCapacityUnits=20
)
))
t.add_output(Output(
"TableName",
Value=Ref(netkan_db),
Description="Table name of the newly create DynamoDB table",
))
# Instance Role for Prod Indexing Instance to be able to
# access the relevant AWS resources. We can lock it all
# down to the container level, but this is unnecessary for
# now.
netkan_role = t.add_resource(Role(
"NetKANProdRole",
AssumeRolePolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": [
"ec2.amazonaws.com"
]
},
"Action": [
"sts:AssumeRole"
]
}
]
},
ManagedPolicyArns=[
"arn:aws:iam::aws:policy/service-role/AmazonEC2ContainerServiceforEC2Role",
],
Policies=[
Policy(
PolicyName="SQSProdPolicy",
PolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"sqs:SendMessage",
"sqs:DeleteMessage",
"sqs:PurgeQueue",
"sqs:ReceiveMessage",
"sqs:GetQueueUrl",
"sqs:GetQueueAttributes",
],
"Resource": [
GetAtt(inbound, "Arn"),
GetAtt(outbound, "Arn")
]
},
{
"Effect": "Allow",
"Action": "sqs:ListQueues",
"Resource": "*",
},
],
}
),
Policy(
PolicyName="DynamoDBProdPolicy",
PolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"dynamodb:DescribeTable",
"dynamodb:GetItem",
"dynamodb:Query",
"dynamodb:PutItem",
"dynamodb:UpdateItem",
"dynamodb:Scan",
"dynamodb:BatchWriteItem",
],
"Resource": [
GetAtt(netkan_db, "Arn")
]
},
{
"Effect": "Allow",
"Action": "dynamodb:ListTables",
"Resource": "*",
},
],
}
),
Policy(
PolicyName="S3StatusAccessProd",
PolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"s3:PutObject",
"s3:GetObject",
"s3:ListBucket",
],
"Resource": [
"arn:aws:s3:::status.ksp-ckan.space/*"
]
},
],
}
),
Policy(
PolicyName="CertbotProd",
PolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"route53:ListHostedZones",
"route53:GetChange"
],
"Resource": [
"*"
]
},
{
"Effect": "Allow",
"Action": [
"route53:ChangeResourceRecordSets"
],
"Resource": [
"arn:aws:route53:::hostedzone/{}".format(
ZONE_ID
),
]
}
],
}
),
Policy(
PolicyName="AllowCloudWatchMetrics",
PolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"cloudwatch:GetMetricStatistics",
],
"Effect": "Allow",
"Resource": "*"
}
]
}
),
Policy(
PolicyName="AllowWebhooksRestart",
PolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"ecs:ListServices",
],
"Effect": "Allow",
"Resource": "*",
},
{
"Action": [
"ecs:DescribeServices",
],
"Effect": "Allow",
"Resource": Sub(
'arn:aws:ecs:${AWS::Region}:${AWS::AccountId}:service/NetKANCluster/${service}',
service=GetAtt('WebhooksService', 'Name'),
)
},
{
"Action": [
"ecs:UpdateService",
],
"Effect": "Allow",
"Resource": Sub(
'arn:aws:ecs:${AWS::Region}:${AWS::AccountId}:service/NetKANCluster/${service}',
service=GetAtt('WebhooksService', 'Name'),
)
},
]
}
)
]
))
netkan_profile = t.add_resource(InstanceProfile(
"NetKANProdProfile",
Roles=[Ref(netkan_role)]
))
# To Access the Secrets manager, the ecs agent needs to AsssumeRole permission
# regardless of what the instance can access.
netkan_ecs_role = t.add_resource(Role(
"NetKANProdEcsRole",
AssumeRolePolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "ecs-tasks.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
},
Policies=[
Policy(
PolicyName="AllowParameterAccess",
PolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ssm:DescribeParameters"
],
"Resource": "*"
},
{
"Effect": "Allow",
"Action": [
"ssm:GetParameters"
],
"Resource": Sub(
"arn:aws:ssm:${AWS::Region}:${AWS::AccountId}:parameter${ns}*",
ns=PARAM_NAMESPACE
)
}
]
}
)
]
))
# To be able to schedule tasks, the scheduler needs to be allowed to perform
# the tasks.
scheduler_resources = []
for task in [
'Scheduler', 'SchedulerWebhooksPass', 'CertBot', 'StatusDumper',
'DownloadCounter', 'TicketCloser', 'AutoFreezer']:
scheduler_resources.append(Sub(
'arn:aws:ecs:*:${AWS::AccountId}:task-definition/NetKANBot${Task}:*',
Task=task
))
netkan_scheduler_role = t.add_resource(Role(
"NetKANProdSchedulerRole",
AssumeRolePolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "events.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
},
Policies=[
Policy(
PolicyName="AllowEcsTaskScheduling",
PolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"ecs:RunTask"
],
"Resource": scheduler_resources,
"Condition": {
"ArnLike": {
"ecs:cluster": GetAtt('NetKANCluster', 'Arn')
}
}
},
{
"Effect": "Allow",
"Action": "iam:PassRole",
"Resource": [
"*"
],
"Condition": {
"StringLike": {
"iam:PassedToService": "ecs-tasks.amazonaws.com"
}
}
}
]
}
)
]
))
# Build Account Permissions
# It's useful for the CI to be able to update services upon build, there
# is a service account with keys that will be exposed to CI for allowing
# redeployment of services.
ksp_builder_group = t.add_resource(Group("KspCkanBuilderGroup"))
builder_services = []
for service in ['Indexer', 'Inflator', 'Webhooks']:
builder_services.append(
Sub(
'arn:aws:ecs:${AWS::Region}:${AWS::AccountId}:service/NetKANCluster/${service}',
service=GetAtt('{}Service'.format(service), 'Name'),
)
)
t.add_resource(PolicyType(
"KspCkanBuilderRole",
PolicyName="KspCkanBuilder",
Groups=[Ref(ksp_builder_group)],
PolicyDocument={
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"ecs:ListServices",
],
"Effect": "Allow",
"Resource": "*",
},
{
"Action": [
"ecs:DescribeServices",
],
"Effect": "Allow",
"Resource": builder_services
},
{
"Action": [
"ecs:UpdateService",
],
"Effect": "Allow",
"Resource": builder_services
},
{
"Effect": "Allow",
"Action": [
"s3:PutObject",
],
"Resource": [
"arn:aws:s3:::status.ksp-ckan.space/*"
],
},
]
}
))
# Indexer Compute
# We could utilise an autoscaling group, but that is way
# more complicated for our use case. If at some point we'd
# to scale the service beyond a single instance (due to some
# infrastructure sponsorship) it wouldn't take more than
# adding an AutoScalingGroup + LoadBalancer to scale this.
netkan_ecs = t.add_resource(
Cluster('NetKANCluster', ClusterName='NetKANCluster')
)
netkan_userdata = Sub("""
#!/bin/bash -xe
echo ECS_CLUSTER=NetKANCluster > /etc/ecs/ecs.config
yum install -y aws-cfn-bootstrap
# Install the files and packages from the metadata
/opt/aws/bin/cfn-init -v --stack ${AWS::StackName} \
--resource NetKANCompute --region ${AWS::Region}
# ECS Volumes are a pain and I don't want to shave any more yaks
mkdir /mnt/letsencrypt
mkfs.ext4 -L CKANCACHE /dev/xvdh
mkdir -p /mnt/ckan_cache
echo "LABEL=CKANCACHE /mnt/ckan_cache ext4 defaults 0 2" >> /etc/fstab
mount -a
chown -R 1000:1000 /mnt/ckan_cache
# Docker doesn't see the new block device until restarted
service docker stop && service docker start
systemctl start ecs
# Start up the cfn-hup daemon to listen for changes
# to the metadata
/opt/aws/bin/cfn-hup || error_exit 'Failed to start cfn-hup
# Signal the status from cfn-init
/opt/aws/bin/cfn-signal -e $? --stack ${AWS::StackName} \
--resource NetKANCompute --region ${AWS::Region}
""")
cfn_hup = InitFile(
content=Sub(
"[main]\nstack=${AWS::StackId}\nregion=${AWS::Region}\n"
),
mode='000400',
owner='root',
group='root'
)
reloader = InitFile(
content=Sub("""
[cfn-auto-reloader-hook]
triggers=post.add, post.update
path=Resources.NetKANCompute.Metadata.AWS::CloudFormation::Init
action=/opt/aws/bin/cfn-init -s ${AWS::StackId} -r NetKANCompute --region ${AWS::Region}
runas=root
""")
)
docker = InitFile(
content="""
{
"log-driver": "json-file",
"log-opts": {
"max-size": "20m",
"max-file": "3"
}
}
""")
cfn_service = InitService(
enabled=True,
ensureRunning=True,
files=[
'/etc/cfn/cfn-hup.conf',
'/etc/cfn/hooks.d/cfn-auto-reloader.conf',
]
)
docker_service = InitService(
enabled=True,
ensureRunning=True,
files=['/etc/docker/daemon.json']
)
netkan_instance = Instance(
'NetKANCompute',
# ECS Optimised us-west-2
ImageId='ami-0e434a58221275ed4',
InstanceType='t3.micro',
IamInstanceProfile=Ref(netkan_profile),
KeyName='techman83_alucard',
SecurityGroups=['ckan-bot'],
UserData=Base64(netkan_userdata),
# t3 instances are unlimited by default
CreditSpecification=CreditSpecification(CPUCredits='standard'),
Tags=[
Tag(Key='Name', Value='NetKAN Indexer'),
Tag(Key='Service', Value='Indexer'),
],
Metadata=Metadata(Init({
'config': InitConfig(
files=InitFiles({
'/etc/cfn/cfn-hup.conf': cfn_hup,
'/etc/cfn/hooks.d/cfn-auto-reloader.conf': reloader,
'/etc/docker/daemon.json': docker,
})
),
'services': {
'sysvinit': {
'cfn': cfn_service,
'docker': docker_service,
}
},
})),
BlockDeviceMappings=[
BlockDeviceMapping(
DeviceName='/dev/xvdh',
Ebs=EBSBlockDevice(
VolumeSize='50',
VolumeType='standard',
)
)
]
)
t.add_resource(netkan_instance)
t.add_resource(RecordSetType(
"NetKANDns",
HostedZoneId=ZONE_ID,
Comment="NetKAN Bot DNS",
Name=BOT_FQDN,
Type="A",
TTL="900",
ResourceRecords=[GetAtt('NetKANCompute', "PublicIp")],
))
services = [
{
'name': 'Indexer',
'command': 'indexer',
'memory': '156',
'secrets': [
'SSH_KEY', 'GH_Token',
],
'env': [
('CKANMETA_REMOTE', CKANMETA_REMOTE),
('CKANMETA_USER', CKANMETA_USER),
('CKANMETA_REPO', CKANMETA_REPO),
('SQS_QUEUE', GetAtt(outbound, 'QueueName')),
('AWS_DEFAULT_REGION', Sub('${AWS::Region}')),
],
'volumes': [
('ckan_cache', '/home/netkan/ckan_cache')
],
},
{
'name': 'Scheduler',
'command': 'scheduler',
'memory': '156',
'secrets': ['SSH_KEY'],
'env': [
('SQS_QUEUE', GetAtt(inbound, 'QueueName')),
('NETKAN_REMOTE', NETKAN_REMOTE),
('CKANMETA_REMOTE', CKANMETA_REMOTE),
('AWS_DEFAULT_REGION', Sub('${AWS::Region}')),
],
'schedule': 'rate(2 hours)',
},
{
'name': 'SchedulerWebhooksPass',
'command': [
'scheduler', '--group', 'webhooks',
'--max-queued', '2000',
'--min-credits', '100'
],
'memory': '156',
'secrets': ['SSH_KEY'],
'env': [
('SQS_QUEUE', GetAtt(inbound, 'QueueName')),
('NETKAN_REMOTE', NETKAN_REMOTE),
('CKANMETA_REMOTE', CKANMETA_REMOTE),
('AWS_DEFAULT_REGION', Sub('${AWS::Region}')),
],
'schedule': 'rate(1 day)',
},
{
'name': 'CleanCache',
'command': [
'clean-cache',
'--days', '30',
],
'env': [],
'volumes': [
('ckan_cache', '/home/netkan/ckan_cache')
],
'schedule': 'rate(1 day)',
},
{
'name': 'Inflator',
'image': 'kspckan/inflator',
'memory': '156',
'secrets': ['GH_Token'],
'env': [
(
'QUEUES', Sub(
'${Inbound},${Outbound}',
Inbound=GetAtt(inbound, 'QueueName'),
Outbound=GetAtt(outbound, 'QueueName')
)
),
('AWS_REGION', Sub('${AWS::Region}')),
],
'volumes': [
('ckan_cache', '/home/netkan/ckan_cache')
]
},
{
'name': 'StatusDumper',
'command': 'export-status-s3',
'env': [
('STATUS_BUCKET', STATUS_BUCKET),
('STATUS_KEY', status_key),
('STATUS_INTERVAL', '0'),
],
'schedule': 'rate(5 minutes)',
},
{
'name': 'DownloadCounter',
'command': 'download-counter',
'memory': '156',
'secrets': [
'SSH_KEY', 'GH_Token',
],
'env': [
('NETKAN_REMOTE', NETKAN_REMOTE),
('CKANMETA_REMOTE', CKANMETA_REMOTE),
],
'schedule': 'rate(1 day)',
},
{
'name': 'CertBot',
'image': 'certbot/dns-route53',
'command': [
'certonly', '-n', '--agree-tos', '--email',
EMAIL, '--dns-route53', '-d', BOT_FQDN
],
'volumes': [
('letsencrypt', '/etc/letsencrypt')
],
'schedule': 'cron(0 0 ? * MON *)',
},
# TODO: It'd be nice to detect a new cert, this'll do for now.
{
'name': 'RestartWebhooks',
'command': [
'redeploy-service',
'--cluster', 'NetKANCluster',
'--service-name', 'WebhooksService',
],
'env': [
('AWS_DEFAULT_REGION', Sub('${AWS::Region}')),
],
'schedule': 'cron(30 0 ? * MON *)',
},
{
'name': 'TicketCloser',
'command': 'ticket-closer',
'env': [],
'secrets': ['GH_Token'],
'schedule': 'rate(1 day)',
},
{
'name': 'AutoFreezer',
'command': 'auto-freezer',
'env': [
('NETKAN_REMOTE', NETKAN_REMOTE),
('NETKAN_USER', NETKAN_USER),
('NETKAN_REPO', NETKAN_REPO),
],
'secrets': [
'SSH_KEY', 'GH_Token',
],
'schedule': 'rate(7 days)',
},
{
'name': 'Webhooks',
'containers': [
{
'name': 'legacyhooks',
'image': 'kspckan/webhooks',
'memory': '156',
'secrets': [
'SSH_KEY', 'GH_Token', 'XKAN_GHSECRET',
'IA_access', 'IA_secret',
],
'env': [
('CKAN_meta', CKANMETA_REMOTE),
('NetKAN', NETKAN_REMOTE),
('IA_collection', 'kspckanmods'),
],
'volumes': [
('ckan_cache', '/home/netkan/ckan_cache')
],
},
{
'name': 'webhooks',
'entrypoint': '.local/bin/gunicorn',
'command': [
'-b', '0.0.0.0:5000', '--access-logfile', '-',
'netkan.webhooks:create_app()'
],
'secrets': [
'XKAN_GHSECRET', 'SSH_KEY',
],
'env': [
('NETKAN_REMOTE', NETKAN_REMOTE),
('CKANMETA_REMOTE', CKANMETA_REMOTE),
('AWS_DEFAULT_REGION', Sub('${AWS::Region}')),
('INFLATION_SQS_QUEUE', GetAtt(inbound, 'QueueName')),
],
},
{
'name': 'WebhooksProxy',
'image': 'kspckan/webhooks-proxy',
'ports': ['80', '443'],
'volumes': [
('letsencrypt', '/etc/letsencrypt')
],
'depends': ['webhooks', 'legacyhooks']
},
]
},
]
for service in services:
name = service['name']
schedule = service.get('schedule')
containers = service.get('containers', [service])
task = TaskDefinition(
'{}Task'.format(name),
ContainerDefinitions=[],
Family=Sub('${AWS::StackName}${name}', name=name),
ExecutionRoleArn=Ref(netkan_ecs_role),
Volumes=[],
DependsOn=[],
)
for container in containers:
secrets = [
'DISCORD_WEBHOOK_ID', 'DISCORD_WEBHOOK_TOKEN',
*container.get('secrets', [])
]
envs = container.get('env', [])
entrypoint = container.get('entrypoint')
command = container.get('command')
volumes = container.get('volumes', [])
ports = container.get('ports', [])
depends = container.get('depends', [])
definition = ContainerDefinition(
Image=container.get('image', 'kspckan/netkan'),
Memory=container.get('memory', '96'),
Name=container['name'],
Secrets=[
Secret(
Name=x,
ValueFrom='{}{}'.format(
PARAM_NAMESPACE, x
)
) for x in secrets
],
Environment=[
Environment(
Name=x[0], Value=x[1]
) for x in envs
],
MountPoints=[],
PortMappings=[],
DependsOn=[],
Links=[],
)
if entrypoint:
entrypoint = entrypoint if isinstance(entrypoint, list) else [entrypoint]
definition.EntryPoint = entrypoint
if command:
command = command if isinstance(command, list) else [command]
definition.Command = command
for volume in volumes:
volume_name = '{}{}'.format(
name,
''.join([i for i in volume[0].capitalize() if i.isalpha()])
)
task.Volumes.append(
Volume(
Name=volume_name,
Host=Host(
SourcePath=('/mnt/{}'.format(volume[0]))
)
)
)
definition.MountPoints.append(
MountPoint(
ContainerPath=volume[1],
SourceVolume=volume_name
)
)
for port in ports:
definition.PortMappings.append(
PortMapping(
ContainerPort=port,
HostPort=port,
Protocol='tcp',
)
)
for depend in depends:
definition.DependsOn.append(
ContainerDependency(
Condition='START',
ContainerName=depend,
)
)
definition.Links.append(depend)
task.ContainerDefinitions.append(definition)
t.add_resource(task)
if schedule:
target = Target(
Id="{}-Schedule".format(name),
Arn=GetAtt(netkan_ecs, 'Arn'),
RoleArn=GetAtt(netkan_scheduler_role, 'Arn'),
EcsParameters=EcsParameters(
TaskDefinitionArn=Ref(task)
)
)
t.add_resource(Rule(
'{}Rule'.format(name),
Description='{} scheduled task'.format(name),
ScheduleExpression=schedule,
Targets=[target],
))
continue
t.add_resource(Service(
'{}Service'.format(name),
Cluster='NetKANCluster',
DesiredCount=1,
TaskDefinition=Ref(task),
# Allow for in place service redeployments
DeploymentConfiguration=DeploymentConfiguration(
MaximumPercent=100,
MinimumHealthyPercent=0
),
DependsOn=['NetKANCluster']
))
print(t.to_yaml())
| []
| []
| [
"CKAN_ZONEID"
]
| [] | ["CKAN_ZONEID"] | python | 1 | 0 | |
manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'oursite.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
vendor/src/github.com/cycps/xptools/dnsc/dns_client/main.go | package main
import (
"encoding/json"
"fmt"
"github.com/cycps/xptools/dnsc"
"io/ioutil"
"os"
"os/exec"
"regexp"
)
var cspec dnsc.XPClientSpec
var templateDir string
func resolveConfHead() {
dnsc.ApplyTemplate("resolve_conf_d_head", "head", templateDir, cspec)
}
func upfile() {
out, _ := exec.Command("ip", "route", "get", cspec.NSaddr).Output()
rx, _ := regexp.Compile("src\\s+(\\S+)")
m := rx.FindStringSubmatch(string(out))
cspec.Addr = m[len(m)-1]
dnsc.ApplyTemplate("upfile", "upfile", templateDir, cspec)
}
func dnsPrivate() {
dnsc.ApplyTemplate("dns.private", "dns.private", templateDir, cspec)
}
func dnsKey() {
dnsc.ApplyTemplate("dns.key", "dns.key", templateDir, cspec)
}
func setupScript() {
dnsc.CopyFile("setup_client.sh", templateDir)
os.Chmod("setup_client.sh", 0755)
}
func doUpdateScript() {
dnsc.CopyFile("do-nsupdate.sh", templateDir)
os.Chmod("do-nsupdate.sh", 0755)
}
func main() {
if len(os.Args) != 2 {
fmt.Fprintf(os.Stderr, "usage: dns_client <client spec>\n")
os.Exit(1)
}
fmt.Printf("dns_client v0.1\n\n")
templateDir = os.Getenv("GOPATH") +
"/src/github.com/cycps/xptools/dnsc/client_templates"
fmt.Printf("template dir: %s\n", templateDir)
cspec_src, _ := ioutil.ReadFile(os.Args[1])
err := json.Unmarshal(cspec_src, &cspec)
if err != nil {
fmt.Fprintf(os.Stderr, "%s\n", "bad cspec file")
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
resolveConfHead()
upfile()
dnsPrivate()
dnsKey()
setupScript()
doUpdateScript()
}
| [
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
kombu/connection.py | """
kombu.connection
================
Broker connection and pools.
"""
from __future__ import absolute_import
import os
import socket
from collections import OrderedDict
from contextlib import contextmanager
from itertools import count, cycle
from operator import itemgetter
# jython breaks on relative import for .exceptions for some reason
# (Issue #112)
from kombu import exceptions
from .five import Empty, range, string_t, text_t, LifoQueue as _LifoQueue
from .log import get_logger
from .transport import get_transport_cls, supports_librabbitmq
from .utils import cached_property, retry_over_time, shufflecycle, HashedSeq
from .utils.functional import lazy
from .utils.url import as_url, parse_url, quote, urlparse
__all__ = ['Connection', 'ConnectionPool', 'ChannelPool']
RESOLVE_ALIASES = {'pyamqp': 'amqp',
'librabbitmq': 'amqp'}
_LOG_CONNECTION = os.environ.get('KOMBU_LOG_CONNECTION', False)
_LOG_CHANNEL = os.environ.get('KOMBU_LOG_CHANNEL', False)
logger = get_logger(__name__)
roundrobin_failover = cycle
failover_strategies = {
'round-robin': roundrobin_failover,
'shuffle': shufflecycle,
}
class Connection(object):
"""A connection to the broker.
:param URL: Broker URL, or a list of URLs, e.g.
.. code-block:: python
Connection('amqp://guest:guest@localhost:5672//')
Connection('amqp://foo;amqp://bar', failover_strategy='round-robin')
Connection('redis://', transport_options={
'visibility_timeout': 3000,
})
import ssl
Connection('amqp://', login_method='EXTERNAL', ssl={
'ca_certs': '/etc/pki/tls/certs/something.crt',
'keyfile': '/etc/something/system.key',
'certfile': '/etc/something/system.cert',
'cert_reqs': ssl.CERT_REQUIRED,
})
.. admonition:: SSL compatibility
SSL currently only works with the py-amqp, amqplib, and qpid
transports. For other transports you can use stunnel.
:keyword hostname: Default host name/address if not provided in the URL.
:keyword userid: Default user name if not provided in the URL.
:keyword password: Default password if not provided in the URL.
:keyword virtual_host: Default virtual host if not provided in the URL.
:keyword port: Default port if not provided in the URL.
:keyword ssl: Use SSL to connect to the server. Default is ``False``.
May not be supported by the specified transport.
:keyword transport: Default transport if not specified in the URL.
:keyword connect_timeout: Timeout in seconds for connecting to the
server. May not be supported by the specified transport.
:keyword transport_options: A dict of additional connection arguments to
pass to alternate kombu channel implementations. Consult the transport
documentation for available options.
:keyword heartbeat: Heartbeat interval in int/float seconds.
Note that if heartbeats are enabled then the :meth:`heartbeat_check`
method must be called regularly, around once per second.
.. note::
The connection is established lazily when needed. If you need the
connection to be established, then force it by calling
:meth:`connect`::
>>> conn = Connection('amqp://')
>>> conn.connect()
and always remember to close the connection::
>>> conn.release()
"""
port = None
virtual_host = '/'
connect_timeout = 5
_closed = None
_connection = None
_default_channel = None
_transport = None
_logger = False
uri_prefix = None
#: The cache of declared entities is per connection,
#: in case the server loses data.
declared_entities = None
#: Iterator returning the next broker URL to try in the event
#: of connection failure (initialized by :attr:`failover_strategy`).
cycle = None
#: Additional transport specific options,
#: passed on to the transport instance.
transport_options = None
#: Strategy used to select new hosts when reconnecting after connection
#: failure. One of "round-robin", "shuffle" or any custom iterator
#: constantly yielding new URLs to try.
failover_strategy = 'round-robin'
#: Heartbeat value, currently only supported by the py-amqp transport.
heartbeat = None
hostname = userid = password = ssl = login_method = None
def __init__(self, hostname='localhost', userid=None,
password=None, virtual_host=None, port=None, insist=False,
ssl=False, transport=None, connect_timeout=5,
transport_options=None, login_method=None, uri_prefix=None,
heartbeat=0, failover_strategy='round-robin',
alternates=None, **kwargs):
alt = [] if alternates is None else alternates
# have to spell the args out, just to get nice docstrings :(
params = self._initial_params = {
'hostname': hostname, 'userid': userid,
'password': password, 'virtual_host': virtual_host,
'port': port, 'insist': insist, 'ssl': ssl,
'transport': transport, 'connect_timeout': connect_timeout,
'login_method': login_method, 'heartbeat': heartbeat
}
if hostname and not isinstance(hostname, string_t):
alt.extend(hostname)
hostname = alt[0]
if hostname and '://' in hostname:
if ';' in hostname:
alt.extend(hostname.split(';'))
hostname = alt[0]
if '+' in hostname[:hostname.index('://')]:
# e.g. sqla+mysql://root:masterkey@localhost/
params['transport'], params['hostname'] = \
hostname.split('+', 1)
transport = self.uri_prefix = params['transport']
else:
transport = transport or urlparse(hostname).scheme
if get_transport_cls(transport).can_parse_url:
# set the transport so that the default is not used.
params['transport'] = transport
else:
# we must parse the URL
params.update(parse_url(hostname))
self._init_params(**params)
# fallback hosts
self.alt = alt
self.failover_strategy = failover_strategies.get(
failover_strategy or 'round-robin') or failover_strategy
if self.alt:
self.cycle = self.failover_strategy(self.alt)
next(self.cycle) # skip first entry
if transport_options is None:
transport_options = {}
self.transport_options = transport_options
if _LOG_CONNECTION: # pragma: no cover
self._logger = True
if uri_prefix:
self.uri_prefix = uri_prefix
self.declared_entities = set()
def switch(self, url):
"""Switch connection parameters to use a new URL (does not
reconnect)"""
self.close()
self.declared_entities.clear()
self._closed = False
self._init_params(**dict(self._initial_params, **parse_url(url)))
def maybe_switch_next(self):
"""Switch to next URL given by the current failover strategy (if
any)."""
if self.cycle:
self.switch(next(self.cycle))
def _init_params(self, hostname, userid, password, virtual_host, port,
insist, ssl, transport, connect_timeout,
login_method, heartbeat):
transport = transport or 'amqp'
if transport == 'amqp' and supports_librabbitmq():
transport = 'librabbitmq'
self.hostname = hostname
self.userid = userid
self.password = password
self.login_method = login_method
self.virtual_host = virtual_host or self.virtual_host
self.port = port or self.port
self.insist = insist
self.connect_timeout = connect_timeout
self.ssl = ssl
self.transport_cls = transport
self.heartbeat = heartbeat and float(heartbeat)
def register_with_event_loop(self, loop):
self.transport.register_with_event_loop(self.connection, loop)
def _debug(self, msg, *args, **kwargs):
if self._logger: # pragma: no cover
fmt = '[Kombu connection:{id:#x}] {msg}'
logger.debug(fmt.format(id=id(self), msg=text_t(msg)),
*args, **kwargs)
def connect(self):
"""Establish connection to server immediately."""
self._closed = False
return self.connection
def channel(self):
"""Create and return a new channel."""
self._debug('create channel')
chan = self.transport.create_channel(self.connection)
if _LOG_CHANNEL: # pragma: no cover
from .utils.debug import Logwrapped
return Logwrapped(chan, 'kombu.channel',
'[Kombu channel:{0.channel_id}] ')
return chan
def heartbeat_check(self, rate=2):
"""Allow the transport to perform any periodic tasks
required to make heartbeats work. This should be called
approximately every second.
If the current transport does not support heartbeats then
this is a noop operation.
:keyword rate: Rate is how often the tick is called
compared to the actual heartbeat value. E.g. if
the heartbeat is set to 3 seconds, and the tick
is called every 3 / 2 seconds, then the rate is 2.
This value is currently unused by any transports.
"""
return self.transport.heartbeat_check(self.connection, rate=rate)
def drain_events(self, **kwargs):
"""Wait for a single event from the server.
:keyword timeout: Timeout in seconds before we give up.
:raises :exc:`socket.timeout`: if the timeout is exceeded.
"""
return self.transport.drain_events(self.connection, **kwargs)
def maybe_close_channel(self, channel):
"""Close given channel, but ignore connection and channel errors."""
try:
channel.close()
except (self.connection_errors + self.channel_errors):
pass
def _do_close_self(self):
# Close only connection and channel(s), but not transport.
self.declared_entities.clear()
if self._default_channel:
self.maybe_close_channel(self._default_channel)
if self._connection:
try:
self.transport.close_connection(self._connection)
except self.connection_errors + (AttributeError, socket.error):
pass
self._connection = None
def _close(self):
"""Really close connection, even if part of a connection pool."""
self._do_close_self()
if self._transport:
self._transport.client = None
self._transport = None
self._debug('closed')
self._closed = True
def collect(self, socket_timeout=None):
# amqp requires communication to close, we don't need that just
# to clear out references, Transport._collect can also be implemented
# by other transports that want fast after fork
try:
gc_transport = self._transport._collect
except AttributeError:
_timeo = socket.getdefaulttimeout()
socket.setdefaulttimeout(socket_timeout)
try:
self._close()
except socket.timeout:
pass
finally:
socket.setdefaulttimeout(_timeo)
else:
gc_transport(self._connection)
if self._transport:
self._transport.client = None
self._transport = None
self.declared_entities.clear()
self._connection = None
def release(self):
"""Close the connection (if open)."""
self._close()
close = release
def ensure_connection(self, errback=None, max_retries=None,
interval_start=2, interval_step=2, interval_max=30,
callback=None):
"""Ensure we have a connection to the server.
If not retry establishing the connection with the settings
specified.
:keyword errback: Optional callback called each time the connection
can't be established. Arguments provided are the exception
raised and the interval that will be slept ``(exc, interval)``.
:keyword max_retries: Maximum number of times to retry.
If this limit is exceeded the connection error will be re-raised.
:keyword interval_start: The number of seconds we start sleeping for.
:keyword interval_step: How many seconds added to the interval
for each retry.
:keyword interval_max: Maximum number of seconds to sleep between
each retry.
:keyword callback: Optional callback that is called for every
internal iteration (1 s)
"""
def on_error(exc, intervals, retries, interval=0):
round = self.completes_cycle(retries)
if round:
interval = next(intervals)
if errback:
errback(exc, interval)
self.maybe_switch_next() # select next host
return interval if round else 0
retry_over_time(self.connect, self.recoverable_connection_errors,
(), {}, on_error, max_retries,
interval_start, interval_step, interval_max, callback)
return self
def completes_cycle(self, retries):
"""Return true if the cycle is complete after number of `retries`."""
return not (retries + 1) % len(self.alt) if self.alt else True
def revive(self, new_channel):
"""Revive connection after connection re-established."""
if self._default_channel:
self.maybe_close_channel(self._default_channel)
self._default_channel = None
def _default_ensure_callback(self, exc, interval):
logger.error("Ensure: Operation error: %r. Retry in %ss",
exc, interval, exc_info=True)
def ensure(self, obj, fun, errback=None, max_retries=None,
interval_start=1, interval_step=1, interval_max=1,
on_revive=None):
"""Ensure operation completes, regardless of any channel/connection
errors occurring.
Will retry by establishing the connection, and reapplying
the function.
:param fun: Method to apply.
:keyword errback: Optional callback called each time the connection
can't be established. Arguments provided are the exception
raised and the interval that will be slept ``(exc, interval)``.
:keyword max_retries: Maximum number of times to retry.
If this limit is exceeded the connection error will be re-raised.
:keyword interval_start: The number of seconds we start sleeping for.
:keyword interval_step: How many seconds added to the interval
for each retry.
:keyword interval_max: Maximum number of seconds to sleep between
each retry.
**Example**
This is an example ensuring a publish operation::
>>> from kombu import Connection, Producer
>>> conn = Connection('amqp://')
>>> producer = Producer(conn)
>>> def errback(exc, interval):
... logger.error('Error: %r', exc, exc_info=1)
... logger.info('Retry in %s seconds.', interval)
>>> publish = conn.ensure(producer, producer.publish,
... errback=errback, max_retries=3)
>>> publish({'hello': 'world'}, routing_key='dest')
"""
def _ensured(*args, **kwargs):
got_connection = 0
conn_errors = self.recoverable_connection_errors
chan_errors = self.recoverable_channel_errors
has_modern_errors = hasattr(
self.transport, 'recoverable_connection_errors',
)
for retries in count(0): # for infinity
try:
return fun(*args, **kwargs)
except conn_errors as exc:
if got_connection and not has_modern_errors:
# transport can not distinguish between
# recoverable/irrecoverable errors, so we propagate
# the error if it persists after a new connection was
# successfully established.
raise
if max_retries is not None and retries > max_retries:
raise
self._debug('ensure connection error: %r', exc, exc_info=1)
self._connection = None
self._do_close_self()
errback and errback(exc, 0)
remaining_retries = None
if max_retries is not None:
remaining_retries = max(max_retries - retries, 1)
self.ensure_connection(errback,
remaining_retries,
interval_start,
interval_step,
interval_max)
new_channel = self.channel()
self.revive(new_channel)
obj.revive(new_channel)
if on_revive:
on_revive(new_channel)
got_connection += 1
except chan_errors as exc:
if max_retries is not None and retries > max_retries:
raise
self._debug('ensure channel error: %r', exc, exc_info=1)
errback and errback(exc, 0)
_ensured.__name__ = "%s(ensured)" % fun.__name__
_ensured.__doc__ = fun.__doc__
_ensured.__module__ = fun.__module__
return _ensured
def autoretry(self, fun, channel=None, **ensure_options):
"""Decorator for functions supporting a ``channel`` keyword argument.
The resulting callable will retry calling the function if
it raises connection or channel related errors.
The return value will be a tuple of ``(retval, last_created_channel)``.
If a ``channel`` is not provided, then one will be automatically
acquired (remember to close it afterwards).
See :meth:`ensure` for the full list of supported keyword arguments.
Example usage::
channel = connection.channel()
try:
ret, channel = connection.autoretry(publish_messages, channel)
finally:
channel.close()
"""
channels = [channel]
create_channel = self.channel
class Revival(object):
__name__ = getattr(fun, '__name__', None)
__module__ = getattr(fun, '__module__', None)
__doc__ = getattr(fun, '__doc__', None)
def revive(self, channel):
channels[0] = channel
def __call__(self, *args, **kwargs):
if channels[0] is None:
self.revive(create_channel())
return fun(*args, channel=channels[0], **kwargs), channels[0]
revive = Revival()
return self.ensure(revive, revive, **ensure_options)
def create_transport(self):
return self.get_transport_cls()(client=self)
def get_transport_cls(self):
"""Get the currently used transport class."""
transport_cls = self.transport_cls
if not transport_cls or isinstance(transport_cls, string_t):
transport_cls = get_transport_cls(transport_cls)
return transport_cls
def clone(self, **kwargs):
"""Create a copy of the connection with the same connection
settings."""
return self.__class__(**dict(self._info(resolve=False), **kwargs))
def get_heartbeat_interval(self):
return self.transport.get_heartbeat_interval(self.connection)
def _info(self, resolve=True):
transport_cls = self.transport_cls
if resolve:
transport_cls = RESOLVE_ALIASES.get(transport_cls, transport_cls)
D = self.transport.default_connection_params
hostname = self.hostname or D.get('hostname')
if self.uri_prefix:
hostname = '%s+%s' % (self.uri_prefix, hostname)
info = (
('hostname', hostname),
('userid', self.userid or D.get('userid')),
('password', self.password or D.get('password')),
('virtual_host', self.virtual_host or D.get('virtual_host')),
('port', self.port or D.get('port')),
('insist', self.insist),
('ssl', self.ssl),
('transport', transport_cls),
('connect_timeout', self.connect_timeout),
('transport_options', self.transport_options),
('login_method', self.login_method or D.get('login_method')),
('uri_prefix', self.uri_prefix),
('heartbeat', self.heartbeat),
('alternates', self.alt),
)
return info
def info(self):
"""Get connection info."""
return OrderedDict(self._info())
def __eqhash__(self):
return HashedSeq(self.transport_cls, self.hostname, self.userid,
self.password, self.virtual_host, self.port,
repr(self.transport_options))
def as_uri(self, include_password=False, mask='**',
getfields=itemgetter('port', 'userid', 'password',
'virtual_host', 'transport')):
"""Convert connection parameters to URL form."""
hostname = self.hostname or 'localhost'
if self.transport.can_parse_url:
if self.uri_prefix:
return '%s+%s' % (self.uri_prefix, hostname)
return self.hostname
fields = self.info()
port, userid, password, vhost, transport = getfields(fields)
scheme = ('{0}+{1}'.format(self.uri_prefix, transport)
if self.uri_prefix else transport)
return as_url(
scheme, hostname, port, userid, password, quote(vhost),
sanitize=not include_password, mask=mask,
)
def Pool(self, limit=None, preload=None):
"""Pool of connections.
See :class:`ConnectionPool`.
:keyword limit: Maximum number of active connections.
Default is no limit.
:keyword preload: Number of connections to preload
when the pool is created. Default is 0.
*Example usage*::
>>> connection = Connection('amqp://')
>>> pool = connection.Pool(2)
>>> c1 = pool.acquire()
>>> c2 = pool.acquire()
>>> c3 = pool.acquire()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "kombu/connection.py", line 354, in acquire
raise ConnectionLimitExceeded(self.limit)
kombu.exceptions.ConnectionLimitExceeded: 2
>>> c1.release()
>>> c3 = pool.acquire()
"""
return ConnectionPool(self, limit, preload)
def ChannelPool(self, limit=None, preload=None):
"""Pool of channels.
See :class:`ChannelPool`.
:keyword limit: Maximum number of active channels.
Default is no limit.
:keyword preload: Number of channels to preload
when the pool is created. Default is 0.
*Example usage*::
>>> connection = Connection('amqp://')
>>> pool = connection.ChannelPool(2)
>>> c1 = pool.acquire()
>>> c2 = pool.acquire()
>>> c3 = pool.acquire()
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "kombu/connection.py", line 354, in acquire
raise ChannelLimitExceeded(self.limit)
kombu.connection.ChannelLimitExceeded: 2
>>> c1.release()
>>> c3 = pool.acquire()
"""
return ChannelPool(self, limit, preload)
def Producer(self, channel=None, *args, **kwargs):
"""Create new :class:`kombu.Producer` instance using this
connection."""
from .messaging import Producer
return Producer(channel or self, *args, **kwargs)
def Consumer(self, queues=None, channel=None, *args, **kwargs):
"""Create new :class:`kombu.Consumer` instance using this
connection."""
from .messaging import Consumer
return Consumer(channel or self, queues, *args, **kwargs)
def SimpleQueue(self, name, no_ack=None, queue_opts=None,
exchange_opts=None, channel=None, **kwargs):
"""Create new :class:`~kombu.simple.SimpleQueue`, using a channel
from this connection.
If ``name`` is a string, a queue and exchange will be automatically
created using that name as the name of the queue and exchange,
also it will be used as the default routing key.
:param name: Name of the queue/or a :class:`~kombu.Queue`.
:keyword no_ack: Disable acknowledgements. Default is false.
:keyword queue_opts: Additional keyword arguments passed to the
constructor of the automatically created
:class:`~kombu.Queue`.
:keyword exchange_opts: Additional keyword arguments passed to the
constructor of the automatically created
:class:`~kombu.Exchange`.
:keyword channel: Custom channel to use. If not specified the
connection default channel is used.
"""
from .simple import SimpleQueue
return SimpleQueue(channel or self, name, no_ack, queue_opts,
exchange_opts, **kwargs)
def SimpleBuffer(self, name, no_ack=None, queue_opts=None,
exchange_opts=None, channel=None, **kwargs):
"""Create new :class:`~kombu.simple.SimpleQueue` using a channel
from this connection.
Same as :meth:`SimpleQueue`, but configured with buffering
semantics. The resulting queue and exchange will not be durable, also
auto delete is enabled. Messages will be transient (not persistent),
and acknowledgements are disabled (``no_ack``).
"""
from .simple import SimpleBuffer
return SimpleBuffer(channel or self, name, no_ack, queue_opts,
exchange_opts, **kwargs)
def _establish_connection(self):
self._debug('establishing connection...')
conn = self.transport.establish_connection()
self._debug('connection established: %r', self)
return conn
def __repr__(self):
"""``x.__repr__() <==> repr(x)``"""
return '<Connection: {0} at {1:#x}>'.format(self.as_uri(), id(self))
def __copy__(self):
"""``x.__copy__() <==> copy(x)``"""
return self.clone()
def __reduce__(self):
return self.__class__, tuple(self.info().values()), None
def __enter__(self):
return self
def __exit__(self, *args):
self.release()
@property
def qos_semantics_matches_spec(self):
return self.transport.qos_semantics_matches_spec(self.connection)
@property
def connected(self):
"""Return true if the connection has been established."""
return (not self._closed and
self._connection is not None and
self.transport.verify_connection(self._connection))
@property
def connection(self):
"""The underlying connection object.
.. warning::
This instance is transport specific, so do not
depend on the interface of this object.
"""
if not self._closed:
if not self.connected:
self.declared_entities.clear()
self._default_channel = None
self._connection = self._establish_connection()
self._closed = False
return self._connection
@property
def default_channel(self):
"""Default channel, created upon access and closed when the connection
is closed.
Can be used for automatic channel handling when you only need one
channel, and also it is the channel implicitly used if a connection
is passed instead of a channel, to functions that require a channel.
"""
# make sure we're still connected, and if not refresh.
self.connection
if self._default_channel is None:
self._default_channel = self.channel()
return self._default_channel
@property
def host(self):
"""The host as a host name/port pair separated by colon."""
return ':'.join([self.hostname, str(self.port)])
@property
def transport(self):
if self._transport is None:
self._transport = self.create_transport()
return self._transport
@cached_property
def manager(self):
"""Experimental manager that can be used to manage/monitor the broker
instance. Not available for all transports."""
return self.transport.manager
def get_manager(self, *args, **kwargs):
return self.transport.get_manager(*args, **kwargs)
@cached_property
def recoverable_connection_errors(self):
"""List of connection related exceptions that can be recovered from,
but where the connection must be closed and re-established first."""
try:
return self.transport.recoverable_connection_errors
except AttributeError:
# There were no such classification before,
# and all errors were assumed to be recoverable,
# so this is a fallback for transports that do
# not support the new recoverable/irrecoverable classes.
return self.connection_errors + self.channel_errors
@cached_property
def recoverable_channel_errors(self):
"""List of channel related exceptions that can be automatically
recovered from without re-establishing the connection."""
try:
return self.transport.recoverable_channel_errors
except AttributeError:
return ()
@cached_property
def connection_errors(self):
"""List of exceptions that may be raised by the connection."""
return self.transport.connection_errors
@cached_property
def channel_errors(self):
"""List of exceptions that may be raised by the channel."""
return self.transport.channel_errors
@property
def supports_heartbeats(self):
return self.transport.implements.heartbeats
@property
def is_evented(self):
return self.transport.implements.async
BrokerConnection = Connection
class Resource(object):
LimitExceeded = exceptions.LimitExceeded
def __init__(self, limit=None, preload=None):
self.limit = limit
self.preload = preload or 0
self._closed = False
self._resource = _LifoQueue()
self._dirty = set()
self.setup()
def setup(self):
raise NotImplementedError('subclass responsibility')
def _add_when_empty(self):
if self.limit and len(self._dirty) >= self.limit:
raise self.LimitExceeded(self.limit)
# All taken, put new on the queue and
# try get again, this way the first in line
# will get the resource.
self._resource.put_nowait(self.new())
def acquire(self, block=False, timeout=None):
"""Acquire resource.
:keyword block: If the limit is exceeded,
block until there is an available item.
:keyword timeout: Timeout to wait
if ``block`` is true. Default is :const:`None` (forever).
:raises LimitExceeded: if block is false
and the limit has been exceeded.
"""
if self._closed:
raise RuntimeError('Acquire on closed pool')
if self.limit:
while 1:
try:
R = self._resource.get(block=block, timeout=timeout)
except Empty:
self._add_when_empty()
else:
try:
R = self.prepare(R)
except BaseException:
if isinstance(R, lazy):
# no evaluated yet, just put it back
self._resource.put_nowait(R)
else:
# evaluted so must try to release/close first.
self.release(R)
raise
self._dirty.add(R)
break
else:
R = self.prepare(self.new())
def release():
"""Release resource so it can be used by another thread.
The caller is responsible for discarding the object,
and to never use the resource again. A new resource must
be acquired if so needed.
"""
self.release(R)
R.release = release
return R
def prepare(self, resource):
return resource
def close_resource(self, resource):
resource.close()
def release_resource(self, resource):
pass
def replace(self, resource):
"""Replace resource with a new instance. This can be used in case
of defective resources."""
if self.limit:
self._dirty.discard(resource)
self.close_resource(resource)
def release(self, resource):
if self.limit:
self._dirty.discard(resource)
self._resource.put_nowait(resource)
self.release_resource(resource)
else:
self.close_resource(resource)
def collect_resource(self, resource):
pass
def force_close_all(self):
"""Close and remove all resources in the pool (also those in use).
Can be used to close resources from parent processes
after fork (e.g. sockets/connections).
"""
self._closed = True
dirty = self._dirty
resource = self._resource
while 1: # - acquired
try:
dres = dirty.pop()
except KeyError:
break
try:
self.collect_resource(dres)
except AttributeError: # Issue #78
pass
while 1: # - available
# deque supports '.clear', but lists do not, so for that
# reason we use pop here, so that the underlying object can
# be any object supporting '.pop' and '.append'.
try:
res = resource.queue.pop()
except IndexError:
break
try:
self.collect_resource(res)
except AttributeError:
pass # Issue #78
if os.environ.get('KOMBU_DEBUG_POOL'): # pragma: no cover
_orig_acquire = acquire
_orig_release = release
_next_resource_id = 0
def acquire(self, *args, **kwargs): # noqa
import traceback
id = self._next_resource_id = self._next_resource_id + 1
print('+{0} ACQUIRE {1}'.format(id, self.__class__.__name__))
r = self._orig_acquire(*args, **kwargs)
r._resource_id = id
print('-{0} ACQUIRE {1}'.format(id, self.__class__.__name__))
if not hasattr(r, 'acquired_by'):
r.acquired_by = []
r.acquired_by.append(traceback.format_stack())
return r
def release(self, resource): # noqa
id = resource._resource_id
print('+{0} RELEASE {1}'.format(id, self.__class__.__name__))
r = self._orig_release(resource)
print('-{0} RELEASE {1}'.format(id, self.__class__.__name__))
self._next_resource_id -= 1
return r
class ConnectionPool(Resource):
LimitExceeded = exceptions.ConnectionLimitExceeded
def __init__(self, connection, limit=None, preload=None):
self.connection = connection
super(ConnectionPool, self).__init__(limit=limit,
preload=preload)
def new(self):
return self.connection.clone()
def release_resource(self, resource):
try:
resource._debug('released')
except AttributeError:
pass
def close_resource(self, resource):
resource._close()
def collect_resource(self, resource, socket_timeout=0.1):
return resource.collect(socket_timeout)
@contextmanager
def acquire_channel(self, block=False):
with self.acquire(block=block) as connection:
yield connection, connection.default_channel
def setup(self):
if self.limit:
for i in range(self.limit):
if i < self.preload:
conn = self.new()
conn.connect()
else:
conn = lazy(self.new)
self._resource.put_nowait(conn)
def prepare(self, resource):
if callable(resource):
resource = resource()
resource._debug('acquired')
return resource
class ChannelPool(Resource):
LimitExceeded = exceptions.ChannelLimitExceeded
def __init__(self, connection, limit=None, preload=None):
self.connection = connection
super(ChannelPool, self).__init__(limit=limit,
preload=preload)
def new(self):
return lazy(self.connection.channel)
def setup(self):
channel = self.new()
if self.limit:
for i in range(self.limit):
self._resource.put_nowait(
i < self.preload and channel() or lazy(channel))
def prepare(self, channel):
if callable(channel):
channel = channel()
return channel
def maybe_channel(channel):
"""Return the default channel if argument is a connection instance,
otherwise just return the channel given."""
if isinstance(channel, Connection):
return channel.default_channel
return channel
def is_connection(obj):
return isinstance(obj, Connection)
| []
| []
| [
"KOMBU_LOG_CONNECTION",
"KOMBU_LOG_CHANNEL",
"KOMBU_DEBUG_POOL"
]
| [] | ["KOMBU_LOG_CONNECTION", "KOMBU_LOG_CHANNEL", "KOMBU_DEBUG_POOL"] | python | 3 | 0 | |
vendor/github.com/hashicorp/vault/plugins/database/postgresql/postgresql_test.go | package postgresql
import (
"database/sql"
"fmt"
"os"
"strings"
"sync"
"testing"
"time"
"github.com/hashicorp/vault/builtin/logical/database/dbplugin"
"github.com/hashicorp/vault/plugins/helper/database/connutil"
dockertest "gopkg.in/ory-am/dockertest.v3"
)
var (
testPostgresImagePull sync.Once
)
func preparePostgresTestContainer(t *testing.T) (cleanup func(), retURL string) {
if os.Getenv("PG_URL") != "" {
return func() {}, os.Getenv("PG_URL")
}
pool, err := dockertest.NewPool("")
if err != nil {
t.Fatalf("Failed to connect to docker: %s", err)
}
resource, err := pool.Run("postgres", "latest", []string{"POSTGRES_PASSWORD=secret", "POSTGRES_DB=database"})
if err != nil {
t.Fatalf("Could not start local PostgreSQL docker container: %s", err)
}
cleanup = func() {
err := pool.Purge(resource)
if err != nil {
t.Fatalf("Failed to cleanup local container: %s", err)
}
}
retURL = fmt.Sprintf("postgres://postgres:secret@localhost:%s/database?sslmode=disable", resource.GetPort("5432/tcp"))
// exponential backoff-retry
if err = pool.Retry(func() error {
var err error
var db *sql.DB
db, err = sql.Open("postgres", retURL)
if err != nil {
return err
}
return db.Ping()
}); err != nil {
t.Fatalf("Could not connect to PostgreSQL docker container: %s", err)
}
return
}
func TestPostgreSQL_Initialize(t *testing.T) {
cleanup, connURL := preparePostgresTestContainer(t)
defer cleanup()
connectionDetails := map[string]interface{}{
"connection_url": connURL,
}
dbRaw, _ := New()
db := dbRaw.(*PostgreSQL)
connProducer := db.ConnectionProducer.(*connutil.SQLConnectionProducer)
err := db.Initialize(connectionDetails, true)
if err != nil {
t.Fatalf("err: %s", err)
}
if !connProducer.Initialized {
t.Fatal("Database should be initalized")
}
err = db.Close()
if err != nil {
t.Fatalf("err: %s", err)
}
}
func TestPostgreSQL_CreateUser(t *testing.T) {
cleanup, connURL := preparePostgresTestContainer(t)
defer cleanup()
connectionDetails := map[string]interface{}{
"connection_url": connURL,
}
dbRaw, _ := New()
db := dbRaw.(*PostgreSQL)
err := db.Initialize(connectionDetails, true)
if err != nil {
t.Fatalf("err: %s", err)
}
// Test with no configured Creation Statememt
_, _, err = db.CreateUser(dbplugin.Statements{}, "test", time.Now().Add(time.Minute))
if err == nil {
t.Fatal("Expected error when no creation statement is provided")
}
statements := dbplugin.Statements{
CreationStatements: testPostgresRole,
}
username, password, err := db.CreateUser(statements, "test", time.Now().Add(time.Minute))
if err != nil {
t.Fatalf("err: %s", err)
}
if err = testCredsExist(t, connURL, username, password); err != nil {
t.Fatalf("Could not connect with new credentials: %s", err)
}
statements.CreationStatements = testPostgresReadOnlyRole
username, password, err = db.CreateUser(statements, "test", time.Now().Add(time.Minute))
if err != nil {
t.Fatalf("err: %s", err)
}
if err = testCredsExist(t, connURL, username, password); err != nil {
t.Fatalf("Could not connect with new credentials: %s", err)
}
}
func TestPostgreSQL_RenewUser(t *testing.T) {
cleanup, connURL := preparePostgresTestContainer(t)
defer cleanup()
connectionDetails := map[string]interface{}{
"connection_url": connURL,
}
dbRaw, _ := New()
db := dbRaw.(*PostgreSQL)
err := db.Initialize(connectionDetails, true)
if err != nil {
t.Fatalf("err: %s", err)
}
statements := dbplugin.Statements{
CreationStatements: testPostgresRole,
}
username, password, err := db.CreateUser(statements, "test", time.Now().Add(2*time.Second))
if err != nil {
t.Fatalf("err: %s", err)
}
if err = testCredsExist(t, connURL, username, password); err != nil {
t.Fatalf("Could not connect with new credentials: %s", err)
}
err = db.RenewUser(statements, username, time.Now().Add(time.Minute))
if err != nil {
t.Fatalf("err: %s", err)
}
// Sleep longer than the inital expiration time
time.Sleep(2 * time.Second)
if err = testCredsExist(t, connURL, username, password); err != nil {
t.Fatalf("Could not connect with new credentials: %s", err)
}
}
func TestPostgreSQL_RevokeUser(t *testing.T) {
cleanup, connURL := preparePostgresTestContainer(t)
defer cleanup()
connectionDetails := map[string]interface{}{
"connection_url": connURL,
}
dbRaw, _ := New()
db := dbRaw.(*PostgreSQL)
err := db.Initialize(connectionDetails, true)
if err != nil {
t.Fatalf("err: %s", err)
}
statements := dbplugin.Statements{
CreationStatements: testPostgresRole,
}
username, password, err := db.CreateUser(statements, "test", time.Now().Add(2*time.Second))
if err != nil {
t.Fatalf("err: %s", err)
}
if err = testCredsExist(t, connURL, username, password); err != nil {
t.Fatalf("Could not connect with new credentials: %s", err)
}
// Test default revoke statememts
err = db.RevokeUser(statements, username)
if err != nil {
t.Fatalf("err: %s", err)
}
if err := testCredsExist(t, connURL, username, password); err == nil {
t.Fatal("Credentials were not revoked")
}
username, password, err = db.CreateUser(statements, "test", time.Now().Add(2*time.Second))
if err != nil {
t.Fatalf("err: %s", err)
}
if err = testCredsExist(t, connURL, username, password); err != nil {
t.Fatalf("Could not connect with new credentials: %s", err)
}
// Test custom revoke statements
statements.RevocationStatements = defaultPostgresRevocationSQL
err = db.RevokeUser(statements, username)
if err != nil {
t.Fatalf("err: %s", err)
}
if err := testCredsExist(t, connURL, username, password); err == nil {
t.Fatal("Credentials were not revoked")
}
}
func testCredsExist(t testing.TB, connURL, username, password string) error {
// Log in with the new creds
connURL = strings.Replace(connURL, "postgres:secret", fmt.Sprintf("%s:%s", username, password), 1)
db, err := sql.Open("postgres", connURL)
if err != nil {
return err
}
defer db.Close()
return db.Ping()
}
const testPostgresRole = `
CREATE ROLE "{{name}}" WITH
LOGIN
PASSWORD '{{password}}'
VALID UNTIL '{{expiration}}';
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA public TO "{{name}}";
`
const testPostgresReadOnlyRole = `
CREATE ROLE "{{name}}" WITH
LOGIN
PASSWORD '{{password}}'
VALID UNTIL '{{expiration}}';
GRANT SELECT ON ALL TABLES IN SCHEMA public TO "{{name}}";
GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO "{{name}}";
`
const testPostgresBlockStatementRole = `
DO $$
BEGIN
IF NOT EXISTS (SELECT * FROM pg_catalog.pg_roles WHERE rolname='foo-role') THEN
CREATE ROLE "foo-role";
CREATE SCHEMA IF NOT EXISTS foo AUTHORIZATION "foo-role";
ALTER ROLE "foo-role" SET search_path = foo;
GRANT TEMPORARY ON DATABASE "postgres" TO "foo-role";
GRANT ALL PRIVILEGES ON SCHEMA foo TO "foo-role";
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA foo TO "foo-role";
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA foo TO "foo-role";
GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA foo TO "foo-role";
END IF;
END
$$
CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}';
GRANT "foo-role" TO "{{name}}";
ALTER ROLE "{{name}}" SET search_path = foo;
GRANT CONNECT ON DATABASE "postgres" TO "{{name}}";
`
var testPostgresBlockStatementRoleSlice = []string{
`
DO $$
BEGIN
IF NOT EXISTS (SELECT * FROM pg_catalog.pg_roles WHERE rolname='foo-role') THEN
CREATE ROLE "foo-role";
CREATE SCHEMA IF NOT EXISTS foo AUTHORIZATION "foo-role";
ALTER ROLE "foo-role" SET search_path = foo;
GRANT TEMPORARY ON DATABASE "postgres" TO "foo-role";
GRANT ALL PRIVILEGES ON SCHEMA foo TO "foo-role";
GRANT ALL PRIVILEGES ON ALL TABLES IN SCHEMA foo TO "foo-role";
GRANT ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA foo TO "foo-role";
GRANT ALL PRIVILEGES ON ALL FUNCTIONS IN SCHEMA foo TO "foo-role";
END IF;
END
$$
`,
`CREATE ROLE "{{name}}" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}';`,
`GRANT "foo-role" TO "{{name}}";`,
`ALTER ROLE "{{name}}" SET search_path = foo;`,
`GRANT CONNECT ON DATABASE "postgres" TO "{{name}}";`,
}
const defaultPostgresRevocationSQL = `
REVOKE ALL PRIVILEGES ON ALL TABLES IN SCHEMA public FROM "{{name}}";
REVOKE ALL PRIVILEGES ON ALL SEQUENCES IN SCHEMA public FROM "{{name}}";
REVOKE USAGE ON SCHEMA public FROM "{{name}}";
DROP ROLE IF EXISTS "{{name}}";
`
| [
"\"PG_URL\"",
"\"PG_URL\""
]
| []
| [
"PG_URL"
]
| [] | ["PG_URL"] | go | 1 | 0 | |
great_expectations/cli/v012/toolkit.py | import datetime
import os
import subprocess
import sys
import warnings
from typing import Optional, Union
import click
from ruamel.yaml import YAML
from ruamel.yaml.compat import StringIO
from great_expectations import exceptions as ge_exceptions
from great_expectations.checkpoint import Checkpoint, LegacyCheckpoint
from great_expectations.cli.v012.cli_messages import SECTION_SEPARATOR
from great_expectations.cli.v012.datasource import get_batch_kwargs
from great_expectations.cli.v012.docs import build_docs
from great_expectations.cli.v012.upgrade_helpers import GE_UPGRADE_HELPER_VERSION_MAP
from great_expectations.cli.v012.util import cli_colorize_string, cli_message
from great_expectations.core.batch import Batch
from great_expectations.core.expectation_suite import ExpectationSuite
from great_expectations.core.id_dict import BatchKwargs
from great_expectations.core.usage_statistics.util import send_usage_message
from great_expectations.data_asset import DataAsset
from great_expectations.data_context.data_context import DataContext
from great_expectations.data_context.types.base import CURRENT_GE_CONFIG_VERSION
from great_expectations.data_context.types.resource_identifiers import (
ExpectationSuiteIdentifier,
RunIdentifier,
ValidationResultIdentifier,
)
from great_expectations.datasource import Datasource
from great_expectations.profile import BasicSuiteBuilderProfiler
EXIT_UPGRADE_CONTINUATION_MESSAGE = (
"\nOk, exiting now. To upgrade at a later time, use the following command: "
"<cyan>great_expectations project upgrade</cyan>\n\nTo learn more about the upgrade "
"process, visit "
"<cyan>https://docs.greatexpectations.io/en/latest/how_to_guides/migrating_versions.html"
"</cyan>.\n"
)
class MyYAML(YAML):
# copied from https://yaml.readthedocs.io/en/latest/example.html#output-of-dump-as-a-string
def dump(self, data, stream=None, **kw):
inefficient = False
if stream is None:
inefficient = True
stream = StringIO()
YAML.dump(self, data, stream, **kw)
if inefficient:
return stream.getvalue()
yaml = MyYAML() # or typ='safe'/'unsafe' etc
yaml.indent(mapping=2, sequence=4, offset=2)
yaml.default_flow_style = False
def create_expectation_suite(
context,
datasource_name=None,
batch_kwargs_generator_name=None,
generator_asset=None,
batch_kwargs=None,
expectation_suite_name=None,
additional_batch_kwargs=None,
empty_suite=False,
show_intro_message=False,
flag_build_docs=True,
open_docs=False,
profiler_configuration="demo",
data_asset_name=None,
):
"""
Create a new expectation suite.
WARNING: the flow and name of this method and its interaction with _profile_to_create_a_suite
require a serious revisiting.
:return: a tuple: (success, suite name, profiling_results)
"""
if generator_asset:
warnings.warn(
"The 'generator_asset' argument will be deprecated and renamed to 'data_asset_name'. "
"Please update code accordingly.",
DeprecationWarning,
)
data_asset_name = generator_asset
if show_intro_message and not empty_suite:
cli_message(
"\n<cyan>========== Create sample Expectations ==========</cyan>\n\n"
)
data_source = select_datasource(context, datasource_name=datasource_name)
if data_source is None:
# select_datasource takes care of displaying an error message, so all is left here is to exit.
sys.exit(1)
datasource_name = data_source.name
if expectation_suite_name in context.list_expectation_suite_names():
tell_user_suite_exists(expectation_suite_name)
sys.exit(1)
if (
batch_kwargs_generator_name is None
or data_asset_name is None
or batch_kwargs is None
):
(
datasource_name,
batch_kwargs_generator_name,
data_asset_name,
batch_kwargs,
) = get_batch_kwargs(
context,
datasource_name=datasource_name,
batch_kwargs_generator_name=batch_kwargs_generator_name,
data_asset_name=data_asset_name,
additional_batch_kwargs=additional_batch_kwargs,
)
# In this case, we have "consumed" the additional_batch_kwargs
additional_batch_kwargs = {}
if expectation_suite_name is None:
default_expectation_suite_name = _get_default_expectation_suite_name(
batch_kwargs, data_asset_name
)
while True:
expectation_suite_name = click.prompt(
"\nName the new Expectation Suite",
default=default_expectation_suite_name,
)
if expectation_suite_name in context.list_expectation_suite_names():
tell_user_suite_exists(expectation_suite_name)
else:
break
if empty_suite:
create_empty_suite(context, expectation_suite_name, batch_kwargs)
return True, expectation_suite_name, None
profiling_results = _profile_to_create_a_suite(
additional_batch_kwargs,
batch_kwargs,
batch_kwargs_generator_name,
context,
datasource_name,
expectation_suite_name,
data_asset_name,
profiler_configuration,
)
if flag_build_docs:
build_docs(context, view=False)
if open_docs:
attempt_to_open_validation_results_in_data_docs(context, profiling_results)
return True, expectation_suite_name, profiling_results
def _profile_to_create_a_suite(
additional_batch_kwargs,
batch_kwargs,
batch_kwargs_generator_name,
context,
datasource_name,
expectation_suite_name,
data_asset_name,
profiler_configuration,
):
cli_message(
"""
Great Expectations will choose a couple of columns and generate expectations about them
to demonstrate some examples of assertions you can make about your data.
Great Expectations will store these expectations in a new Expectation Suite '{:s}' here:
{:s}
""".format(
expectation_suite_name,
context.stores[
context.expectations_store_name
].store_backend.get_url_for_key(
ExpectationSuiteIdentifier(
expectation_suite_name=expectation_suite_name
).to_tuple()
),
)
)
confirm_proceed_or_exit()
# TODO this may not apply
cli_message("\nGenerating example Expectation Suite...")
run_id = datetime.datetime.now(datetime.timezone.utc).strftime("%Y%m%dT%H%M%S.%fZ")
profiling_results = context.profile_data_asset(
datasource_name,
batch_kwargs_generator_name=batch_kwargs_generator_name,
data_asset_name=data_asset_name,
batch_kwargs=batch_kwargs,
profiler=BasicSuiteBuilderProfiler,
profiler_configuration=profiler_configuration,
expectation_suite_name=expectation_suite_name,
run_id=RunIdentifier(run_name=run_id),
additional_batch_kwargs=additional_batch_kwargs,
)
if not profiling_results["success"]:
_raise_profiling_errors(profiling_results)
cli_message("\nDone generating example Expectation Suite")
return profiling_results
def _raise_profiling_errors(profiling_results):
if (
profiling_results["error"]["code"]
== DataContext.PROFILING_ERROR_CODE_SPECIFIED_DATA_ASSETS_NOT_FOUND
):
raise ge_exceptions.DataContextError(
"""Some of the data assets you specified were not found: {:s}
""".format(
",".join(profiling_results["error"]["not_found_data_assets"])
)
)
raise ge_exceptions.DataContextError(
"Unknown profiling error code: " + profiling_results["error"]["code"]
)
def attempt_to_open_validation_results_in_data_docs(context, profiling_results):
try:
# TODO this is really brittle and not covered in tests
validation_result = profiling_results["results"][0][1]
validation_result_identifier = ValidationResultIdentifier.from_object(
validation_result
)
context.open_data_docs(resource_identifier=validation_result_identifier)
except (KeyError, IndexError):
context.open_data_docs()
def _get_default_expectation_suite_name(batch_kwargs, data_asset_name):
if data_asset_name:
suite_name = f"{data_asset_name}.warning"
elif "query" in batch_kwargs:
suite_name = "query.warning"
elif "path" in batch_kwargs:
try:
# Try guessing a filename
filename = os.path.split(os.path.normpath(batch_kwargs["path"]))[1]
# Take all but the last part after the period
filename = ".".join(filename.split(".")[:-1])
suite_name = str(filename) + ".warning"
except (OSError, IndexError):
suite_name = "warning"
else:
suite_name = "warning"
return suite_name
def tell_user_suite_exists(suite_name: str) -> None:
cli_message(
f"""<red>An expectation suite named `{suite_name}` already exists.</red>
- If you intend to edit the suite please use `great_expectations suite edit {suite_name}`."""
)
def create_empty_suite(
context: DataContext, expectation_suite_name: str, batch_kwargs
) -> None:
cli_message(
"""
Great Expectations will create a new Expectation Suite '{:s}' and store it here:
{:s}
""".format(
expectation_suite_name,
context.stores[
context.expectations_store_name
].store_backend.get_url_for_key(
ExpectationSuiteIdentifier(
expectation_suite_name=expectation_suite_name
).to_tuple()
),
)
)
suite = context.create_expectation_suite(expectation_suite_name)
suite.add_citation(comment="New suite added via CLI", batch_kwargs=batch_kwargs)
context.save_expectation_suite(suite, expectation_suite_name)
def launch_jupyter_notebook(notebook_path: str) -> None:
jupyter_command_override = os.getenv("GE_JUPYTER_CMD", None)
if jupyter_command_override:
subprocess.call(f"{jupyter_command_override} {notebook_path}", shell=True)
else:
subprocess.call(["jupyter", "notebook", notebook_path])
def load_batch(
context: DataContext,
suite: Union[str, ExpectationSuite],
batch_kwargs: Union[dict, BatchKwargs],
) -> Union[Batch, DataAsset]:
batch: Union[Batch, DataAsset] = context.get_batch(batch_kwargs, suite)
assert isinstance(batch, DataAsset) or isinstance(
batch, Batch
), "Batch failed to load. Please check your batch_kwargs"
return batch
def load_expectation_suite(
# TODO consolidate all the myriad CLI tests into this
context: DataContext,
suite_name: str,
usage_event: str,
) -> ExpectationSuite:
"""
Load an expectation suite from a given context.
Handles a suite name with or without `.json`
:param usage_event:
"""
if suite_name.endswith(".json"):
suite_name = suite_name[:-5]
try:
suite = context.get_expectation_suite(suite_name)
return suite
except ge_exceptions.DataContextError as e:
exit_with_failure_message_and_stats(
context,
usage_event,
f"<red>Could not find a suite named `{suite_name}`.</red> Please check "
"the name by running `great_expectations suite list` and try again.",
)
def exit_with_failure_message_and_stats(
context: DataContext, usage_event: str, message: str
) -> None:
cli_message(message)
send_usage_message(
data_context=context,
event=usage_event,
api_version="v2",
success=False,
)
sys.exit(1)
def load_checkpoint(
context: DataContext,
checkpoint_name: str,
usage_event: str,
) -> Union[Checkpoint, LegacyCheckpoint]:
"""Load a checkpoint or raise helpful errors."""
try:
checkpoint: Union[Checkpoint, LegacyCheckpoint] = context.get_checkpoint(
name=checkpoint_name
)
return checkpoint
except (
ge_exceptions.CheckpointNotFoundError,
ge_exceptions.InvalidCheckpointConfigError,
):
exit_with_failure_message_and_stats(
context,
usage_event,
f"""\
<red>Could not find checkpoint `{checkpoint_name}`.</red> Try running:
- `<green>great_expectations checkpoint list</green>` to verify your checkpoint exists
- `<green>great_expectations checkpoint new</green>` to configure a new checkpoint""",
)
except ge_exceptions.CheckpointError as e:
exit_with_failure_message_and_stats(context, usage_event, f"<red>{e}</red>")
def select_datasource(context: DataContext, datasource_name: str = None) -> Datasource:
"""Select a datasource interactively."""
# TODO consolidate all the myriad CLI tests into this
data_source = None
if datasource_name is None:
data_sources = sorted(context.list_datasources(), key=lambda x: x["name"])
if len(data_sources) == 0:
cli_message(
"<red>No datasources found in the context. To add a datasource, run `great_expectations datasource new`</red>"
)
elif len(data_sources) == 1:
datasource_name = data_sources[0]["name"]
else:
choices = "\n".join(
[
" {}. {}".format(i, data_source["name"])
for i, data_source in enumerate(data_sources, 1)
]
)
option_selection = click.prompt(
"Select a datasource" + "\n" + choices + "\n",
type=click.Choice(
[str(i) for i, data_source in enumerate(data_sources, 1)]
),
show_choices=False,
)
datasource_name = data_sources[int(option_selection) - 1]["name"]
if datasource_name is not None:
data_source = context.get_datasource(datasource_name)
return data_source
def load_data_context_with_error_handling(
directory: str, from_cli_upgrade_command: bool = False
) -> DataContext:
"""Return a DataContext with good error handling and exit codes."""
try:
context: DataContext = DataContext(context_root_dir=directory)
ge_config_version: int = context.get_config().config_version
if (
from_cli_upgrade_command
and int(ge_config_version) < CURRENT_GE_CONFIG_VERSION
):
directory = directory or context.root_directory
(
increment_version,
exception_occurred,
) = upgrade_project_up_to_one_version_increment(
context_root_dir=directory,
ge_config_version=ge_config_version,
continuation_message=EXIT_UPGRADE_CONTINUATION_MESSAGE,
from_cli_upgrade_command=from_cli_upgrade_command,
)
if not exception_occurred and increment_version:
context = DataContext(context_root_dir=directory)
return context
except ge_exceptions.UnsupportedConfigVersionError as err:
directory = directory or DataContext.find_context_root_dir()
ge_config_version = DataContext.get_ge_config_version(
context_root_dir=directory
)
upgrade_helper_class = (
GE_UPGRADE_HELPER_VERSION_MAP.get(int(ge_config_version))
if ge_config_version
else None
)
if upgrade_helper_class and ge_config_version < CURRENT_GE_CONFIG_VERSION:
upgrade_project(
context_root_dir=directory,
ge_config_version=ge_config_version,
from_cli_upgrade_command=from_cli_upgrade_command,
)
else:
cli_message(f"<red>{err.message}</red>")
sys.exit(1)
except (
ge_exceptions.ConfigNotFoundError,
ge_exceptions.InvalidConfigError,
) as err:
cli_message(f"<red>{err.message}</red>")
sys.exit(1)
except ge_exceptions.PluginModuleNotFoundError as err:
cli_message(err.cli.v012_colored_message)
sys.exit(1)
except ge_exceptions.PluginClassNotFoundError as err:
cli_message(err.cli.v012_colored_message)
sys.exit(1)
except ge_exceptions.InvalidConfigurationYamlError as err:
cli_message(f"<red>{str(err)}</red>")
sys.exit(1)
def upgrade_project(
context_root_dir, ge_config_version, from_cli_upgrade_command=False
):
if from_cli_upgrade_command:
message = (
f"<red>\nYour project appears to have an out-of-date config version ({ge_config_version}) - "
f"the version "
f"number must be at least {CURRENT_GE_CONFIG_VERSION}.</red>"
)
else:
message = (
f"<red>\nYour project appears to have an out-of-date config version ({ge_config_version}) - "
f"the version "
f"number must be at least {CURRENT_GE_CONFIG_VERSION}.\nIn order to proceed, "
f"your project must be upgraded.</red>"
)
cli_message(message)
upgrade_prompt = (
"\nWould you like to run the Upgrade Helper to bring your project up-to-date?"
)
confirm_proceed_or_exit(
confirm_prompt=upgrade_prompt,
continuation_message=EXIT_UPGRADE_CONTINUATION_MESSAGE,
)
cli_message(SECTION_SEPARATOR)
# use loop in case multiple upgrades need to take place
while ge_config_version < CURRENT_GE_CONFIG_VERSION:
(
increment_version,
exception_occurred,
) = upgrade_project_up_to_one_version_increment(
context_root_dir=context_root_dir,
ge_config_version=ge_config_version,
continuation_message=EXIT_UPGRADE_CONTINUATION_MESSAGE,
from_cli_upgrade_command=from_cli_upgrade_command,
)
if exception_occurred or not increment_version:
break
ge_config_version += 1
cli_message(SECTION_SEPARATOR)
upgrade_success_message = "<green>Upgrade complete. Exiting...</green>\n"
upgrade_incomplete_message = f"""\
<red>The Upgrade Helper was unable to perform a complete project upgrade. Next steps:</red>
- Please perform any manual steps outlined in the Upgrade Overview and/or Upgrade Report above
- When complete, increment the config_version key in your <cyan>great_expectations.yml</cyan> to <cyan>{
ge_config_version + 1}</cyan>\n
To learn more about the upgrade process, visit \
<cyan>https://docs.greatexpectations.io/en/latest/how_to_guides/migrating_versions.html</cyan>
"""
if ge_config_version < CURRENT_GE_CONFIG_VERSION:
cli_message(upgrade_incomplete_message)
else:
cli_message(upgrade_success_message)
sys.exit(0)
def upgrade_project_up_to_one_version_increment(
context_root_dir: str,
ge_config_version: float,
continuation_message: str,
from_cli_upgrade_command: bool = False,
) -> [bool, bool]: # Returns increment_version, exception_occurred
upgrade_helper_class = GE_UPGRADE_HELPER_VERSION_MAP.get(int(ge_config_version))
if not upgrade_helper_class:
return False, False
target_ge_config_version = int(ge_config_version) + 1
# set version temporarily to CURRENT_GE_CONFIG_VERSION to get functional DataContext
DataContext.set_ge_config_version(
config_version=CURRENT_GE_CONFIG_VERSION,
context_root_dir=context_root_dir,
)
upgrade_helper = upgrade_helper_class(context_root_dir=context_root_dir)
upgrade_overview, confirmation_required = upgrade_helper.get_upgrade_overview()
if confirmation_required or from_cli_upgrade_command:
upgrade_confirmed = confirm_proceed_or_exit(
confirm_prompt=upgrade_overview,
continuation_message=continuation_message,
exit_on_no=False,
)
else:
upgrade_confirmed = True
if upgrade_confirmed:
cli_message("\nUpgrading project...")
cli_message(SECTION_SEPARATOR)
# run upgrade and get report of what was done, if version number should be incremented
(
upgrade_report,
increment_version,
exception_occurred,
) = upgrade_helper.upgrade_project()
# display report to user
cli_message(upgrade_report)
if exception_occurred:
# restore version number to current number
DataContext.set_ge_config_version(
ge_config_version, context_root_dir, validate_config_version=False
)
# display report to user
return False, True
# set config version to target version
if increment_version:
DataContext.set_ge_config_version(
target_ge_config_version,
context_root_dir,
validate_config_version=False,
)
return True, False
# restore version number to current number
DataContext.set_ge_config_version(
ge_config_version, context_root_dir, validate_config_version=False
)
return False, False
# restore version number to current number
DataContext.set_ge_config_version(
ge_config_version, context_root_dir, validate_config_version=False
)
cli_message(continuation_message)
sys.exit(0)
def confirm_proceed_or_exit(
confirm_prompt: str = "Would you like to proceed?",
continuation_message: str = "Ok, exiting now. You can always read more at https://docs.greatexpectations.io/ !",
exit_on_no: bool = True,
exit_code: int = 0,
) -> Optional[bool]:
"""
Every CLI command that starts a potentially lengthy (>1 sec) computation
or modifies some resources (e.g., edits the config file, adds objects
to the stores) must follow this pattern:
1. Explain which resources will be created/modified/deleted
2. Use this method to ask for user's confirmation
The goal of this standardization is for the users to expect consistency -
if you saw one command, you know what to expect from all others.
If the user does not confirm, the program should exit. The purpose of the exit_on_no parameter is to provide
the option to perform cleanup actions before exiting outside of the function.
"""
confirm_prompt_colorized = cli_colorize_string(confirm_prompt)
continuation_message_colorized = cli_colorize_string(continuation_message)
if not click.confirm(confirm_prompt_colorized, default=True):
if exit_on_no:
cli_message(continuation_message_colorized)
sys.exit(exit_code)
else:
return False
return True
| []
| []
| [
"GE_JUPYTER_CMD"
]
| [] | ["GE_JUPYTER_CMD"] | python | 1 | 0 | |
mmdb_test.go | // Copyright (c) 2018, Janoš Guljaš <[email protected]>
// All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found s the LICENSE file.
package mmdb
import (
"context"
"crypto/md5"
"fmt"
"io"
"io/ioutil"
"os"
"testing"
)
var licenseKey = os.Getenv("GO_TEST_MMDB_LICENSE_KEY")
func init() {
setTestM5Filename = func(md5Filename string) {
testMD5Filename = md5Filename
}
}
func TestUpdateGeoLite2Country(t *testing.T) {
testUpdate(t, UpdateGeoLite2Country)
}
func TestUpdateGeoLite2City(t *testing.T) {
testUpdate(t, UpdateGeoLite2City)
}
func TestUpdateGeoLite2ASN(t *testing.T) {
testUpdate(t, UpdateGeoLite2ASN)
}
func testUpdate(t *testing.T, f func(ctx context.Context, filename, licenseKey string) (saved bool, err error)) {
dir, err := ioutil.TempDir("", "mmdb_"+t.Name())
if err != nil {
t.Fatal(err)
}
file, err := ioutil.TempFile(dir, "")
if err != nil {
t.Fatal(err)
}
filename := file.Name()
defer os.RemoveAll(dir)
// download a new file
saved, err := f(context.Background(), filename, licenseKey)
if err != nil {
t.Fatal(err)
}
if !saved {
t.Error("expected file to be saved, but it is not")
}
if testMD5Filename == "" {
t.Error("expected testMD5Filename to be sat, but it is not")
}
fileStat, err := os.Stat(filename)
if err != nil {
t.Fatal(err)
}
md5FileStat, err := os.Stat(testMD5Filename)
if err != nil {
t.Fatal(err)
}
// do not download a new file
saved, err = f(context.Background(), filename, licenseKey)
if err != nil {
t.Fatal(err)
}
if saved {
t.Error("expected file not to be saved, but it is")
}
newFileStat, err := os.Stat(filename)
if err != nil {
t.Fatal(err)
}
newMD5FileStat, err := os.Stat(testMD5Filename)
if err != nil {
t.Fatal(err)
}
if !fileStat.ModTime().Equal(newFileStat.ModTime()) {
t.Error("expected file not to be changed, but it is")
}
if !md5FileStat.ModTime().Equal(newMD5FileStat.ModTime()) {
t.Error("expected file not to be changed, but it is")
}
fileHash := fileMD5(t, filename)
md5Hash := fileMD5(t, testMD5Filename)
// simulate update by changing saved files
if err := ioutil.WriteFile(filename, []byte("data"), 0666); err != nil {
t.Fatal(err)
}
if err := ioutil.WriteFile(testMD5Filename, []byte("hash"), 0666); err != nil {
t.Fatal(err)
}
// update
saved, err = f(context.Background(), filename, licenseKey)
if err != nil {
t.Fatal(err)
}
if !saved {
t.Error("expected file to be saved, but it is not")
}
if testMD5Filename == "" {
t.Error("expected testMD5Filename to be sat, but it is not")
}
newFileHash := fileMD5(t, filename)
newMD5Hash := fileMD5(t, testMD5Filename)
if fileHash != newFileHash {
t.Error("file hash and updated file hash are not the same")
}
if md5Hash != newMD5Hash {
t.Error("md5 file hash and updated md5 file hash are not the same")
}
}
func fileMD5(t *testing.T, filename string) (hash string) {
t.Helper()
file, err := os.Open(filename)
if err != nil {
t.Fatal(err)
}
defer file.Close()
h := md5.New()
_, err = io.Copy(h, file)
if err != nil {
t.Fatal(err)
}
return fmt.Sprintf("%x", h.Sum(nil))
}
| [
"\"GO_TEST_MMDB_LICENSE_KEY\""
]
| []
| [
"GO_TEST_MMDB_LICENSE_KEY"
]
| [] | ["GO_TEST_MMDB_LICENSE_KEY"] | go | 1 | 0 | |
dm/master/server_test.go | // Copyright 2019 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
package master
import (
"bytes"
"context"
"database/sql"
"fmt"
"io/ioutil"
"os"
"sort"
"strings"
"sync"
"testing"
"time"
sqlmock "github.com/DATA-DOG/go-sqlmock"
"github.com/golang/mock/gomock"
"github.com/pingcap/check"
"github.com/pingcap/errors"
"github.com/pingcap/parser"
"github.com/pingcap/parser/ast"
"github.com/pingcap/parser/model"
toolutils "github.com/pingcap/tidb-tools/pkg/utils"
tiddl "github.com/pingcap/tidb/ddl"
"github.com/pingcap/tidb/sessionctx"
tidbmock "github.com/pingcap/tidb/util/mock"
"github.com/tikv/pd/pkg/tempurl"
"go.etcd.io/etcd/clientv3"
"go.etcd.io/etcd/integration"
"github.com/pingcap/dm/checker"
"github.com/pingcap/dm/dm/config"
"github.com/pingcap/dm/dm/ctl/common"
"github.com/pingcap/dm/dm/master/scheduler"
"github.com/pingcap/dm/dm/master/shardddl"
"github.com/pingcap/dm/dm/master/workerrpc"
"github.com/pingcap/dm/dm/pb"
"github.com/pingcap/dm/dm/pbmock"
"github.com/pingcap/dm/pkg/conn"
"github.com/pingcap/dm/pkg/cputil"
"github.com/pingcap/dm/pkg/etcdutil"
"github.com/pingcap/dm/pkg/ha"
"github.com/pingcap/dm/pkg/log"
"github.com/pingcap/dm/pkg/shardddl/optimism"
"github.com/pingcap/dm/pkg/shardddl/pessimism"
"github.com/pingcap/dm/pkg/terror"
"github.com/pingcap/dm/pkg/utils"
)
// use task config from integration test `sharding`
var taskConfig = `---
name: test
task-mode: all
is-sharding: true
shard-mode: ""
meta-schema: "dm_meta"
enable-heartbeat: true
timezone: "Asia/Shanghai"
ignore-checking-items: ["all"]
target-database:
host: "127.0.0.1"
port: 4000
user: "root"
password: ""
mysql-instances:
- source-id: "mysql-replica-01"
block-allow-list: "instance"
route-rules: ["sharding-route-rules-table", "sharding-route-rules-schema"]
column-mapping-rules: ["instance-1"]
mydumper-config-name: "global"
loader-config-name: "global"
syncer-config-name: "global"
- source-id: "mysql-replica-02"
block-allow-list: "instance"
route-rules: ["sharding-route-rules-table", "sharding-route-rules-schema"]
column-mapping-rules: ["instance-2"]
mydumper-config-name: "global"
loader-config-name: "global"
syncer-config-name: "global"
block-allow-list:
instance:
do-dbs: ["~^sharding[\\d]+"]
do-tables:
- db-name: "~^sharding[\\d]+"
tbl-name: "~^t[\\d]+"
routes:
sharding-route-rules-table:
schema-pattern: sharding*
table-pattern: t*
target-schema: db_target
target-table: t_target
sharding-route-rules-schema:
schema-pattern: sharding*
target-schema: db_target
column-mappings:
instance-1:
schema-pattern: "sharding*"
table-pattern: "t*"
expression: "partition id"
source-column: "id"
target-column: "id"
arguments: ["1", "sharding", "t"]
instance-2:
schema-pattern: "sharding*"
table-pattern: "t*"
expression: "partition id"
source-column: "id"
target-column: "id"
arguments: ["2", "sharding", "t"]
mydumpers:
global:
threads: 4
chunk-filesize: 64
skip-tz-utc: true
extra-args: "--regex '^sharding.*'"
loaders:
global:
pool-size: 16
dir: "./dumped_data"
syncers:
global:
worker-count: 16
batch: 100
`
var (
errGRPCFailed = "test grpc request failed"
errGRPCFailedReg = fmt.Sprintf("(?m).*%s.*", errGRPCFailed)
errCheckSyncConfig = "(?m).*check sync config with error.*"
errCheckSyncConfigReg = fmt.Sprintf("(?m).*%s.*", errCheckSyncConfig)
testEtcdCluster *integration.ClusterV3
keepAliveTTL = int64(10)
etcdTestCli *clientv3.Client
)
func TestMaster(t *testing.T) {
err := log.InitLogger(&log.Config{})
if err != nil {
t.Fatal(err)
}
testEtcdCluster = integration.NewClusterV3(t, &integration.ClusterConfig{Size: 1})
defer testEtcdCluster.Terminate(t)
etcdTestCli = testEtcdCluster.RandClient()
check.TestingT(t)
}
type testMaster struct {
workerClients map[string]workerrpc.Client
saveMaxRetryNum int
}
var _ = check.Suite(&testMaster{})
func (t *testMaster) SetUpSuite(c *check.C) {
err := log.InitLogger(&log.Config{})
c.Assert(err, check.IsNil)
t.workerClients = make(map[string]workerrpc.Client)
clearEtcdEnv(c)
t.saveMaxRetryNum = maxRetryNum
maxRetryNum = 2
}
func (t *testMaster) TearDownSuite(c *check.C) {
maxRetryNum = t.saveMaxRetryNum
}
func (t *testMaster) TearDownTest(c *check.C) {
clearEtcdEnv(c)
}
func newMockRPCClient(client pb.WorkerClient) workerrpc.Client {
c, _ := workerrpc.NewGRPCClientWrap(nil, client)
return c
}
func defaultWorkerSource() ([]string, []string) {
return []string{
"mysql-replica-01",
"mysql-replica-02",
}, []string{
"127.0.0.1:8262",
"127.0.0.1:8263",
}
}
func clearEtcdEnv(c *check.C) {
c.Assert(ha.ClearTestInfoOperation(etcdTestCli), check.IsNil)
}
func clearSchedulerEnv(c *check.C, cancel context.CancelFunc, wg *sync.WaitGroup) {
cancel()
wg.Wait()
clearEtcdEnv(c)
}
func makeNilWorkerClients(workers []string) map[string]workerrpc.Client {
nilWorkerClients := make(map[string]workerrpc.Client, len(workers))
for _, worker := range workers {
nilWorkerClients[worker] = nil
}
return nilWorkerClients
}
func makeWorkerClientsForHandle(ctrl *gomock.Controller, taskName string, sources []string, workers []string, reqs ...interface{}) map[string]workerrpc.Client {
workerClients := make(map[string]workerrpc.Client, len(workers))
for i := range workers {
mockWorkerClient := pbmock.NewMockWorkerClient(ctrl)
for _, req := range reqs {
mockRevelantWorkerClient(mockWorkerClient, taskName, sources[i], req)
}
workerClients[workers[i]] = newMockRPCClient(mockWorkerClient)
}
return workerClients
}
func testDefaultMasterServer(c *check.C) *Server {
cfg := NewConfig()
err := cfg.Parse([]string{"-config=./dm-master.toml"})
c.Assert(err, check.IsNil)
cfg.DataDir = c.MkDir()
server := NewServer(cfg)
server.leader.Set(oneselfLeader)
go server.ap.Start(context.Background())
return server
}
func testMockScheduler(ctx context.Context, wg *sync.WaitGroup, c *check.C, sources, workers []string, password string, workerClients map[string]workerrpc.Client) (*scheduler.Scheduler, []context.CancelFunc) {
logger := log.L()
scheduler2 := scheduler.NewScheduler(&logger, config.Security{})
err := scheduler2.Start(ctx, etcdTestCli)
c.Assert(err, check.IsNil)
cancels := make([]context.CancelFunc, 0, 2)
for i := range workers {
// add worker to scheduler's workers map
name := workers[i]
c.Assert(scheduler2.AddWorker(name, workers[i]), check.IsNil)
scheduler2.SetWorkerClientForTest(name, workerClients[workers[i]])
// operate mysql config on this worker
cfg := config.NewSourceConfig()
cfg.SourceID = sources[i]
cfg.From.Password = password
c.Assert(scheduler2.AddSourceCfg(*cfg), check.IsNil, check.Commentf("all sources: %v", sources))
wg.Add(1)
ctx1, cancel1 := context.WithCancel(ctx)
cancels = append(cancels, cancel1)
go func(ctx context.Context, workerName string) {
defer wg.Done()
c.Assert(ha.KeepAlive(ctx, etcdTestCli, workerName, keepAliveTTL), check.IsNil)
}(ctx1, name)
c.Assert(utils.WaitSomething(30, 100*time.Millisecond, func() bool {
w := scheduler2.GetWorkerBySource(sources[i])
return w != nil && w.BaseInfo().Name == name
}), check.IsTrue)
}
return scheduler2, cancels
}
func (t *testMaster) TestQueryStatus(c *check.C) {
ctrl := gomock.NewController(c)
defer ctrl.Finish()
server := testDefaultMasterServer(c)
sources, workers := defaultWorkerSource()
// test query all workers
for _, worker := range workers {
mockWorkerClient := pbmock.NewMockWorkerClient(ctrl)
mockWorkerClient.EXPECT().QueryStatus(
gomock.Any(),
&pb.QueryStatusRequest{},
).Return(&pb.QueryStatusResponse{
Result: true,
SourceStatus: &pb.SourceStatus{},
}, nil)
t.workerClients[worker] = newMockRPCClient(mockWorkerClient)
}
var wg sync.WaitGroup
ctx, cancel := context.WithCancel(context.Background())
server.scheduler, _ = testMockScheduler(ctx, &wg, c, sources, workers, "", t.workerClients)
resp, err := server.QueryStatus(context.Background(), &pb.QueryStatusListRequest{})
c.Assert(err, check.IsNil)
c.Assert(resp.Result, check.IsTrue)
clearSchedulerEnv(c, cancel, &wg)
// query specified sources
for _, worker := range workers {
mockWorkerClient := pbmock.NewMockWorkerClient(ctrl)
mockWorkerClient.EXPECT().QueryStatus(
gomock.Any(),
&pb.QueryStatusRequest{},
).Return(&pb.QueryStatusResponse{
Result: true,
SourceStatus: &pb.SourceStatus{},
}, nil)
t.workerClients[worker] = newMockRPCClient(mockWorkerClient)
}
ctx, cancel = context.WithCancel(context.Background())
server.scheduler, _ = testMockScheduler(ctx, &wg, c, sources, workers, "", t.workerClients)
resp, err = server.QueryStatus(context.Background(), &pb.QueryStatusListRequest{
Sources: sources,
})
c.Assert(err, check.IsNil)
c.Assert(resp.Result, check.IsTrue)
// query with invalid dm-worker[s]
resp, err = server.QueryStatus(context.Background(), &pb.QueryStatusListRequest{
Sources: []string{"invalid-source1", "invalid-source2"},
})
c.Assert(err, check.IsNil)
c.Assert(resp.Result, check.IsFalse)
c.Assert(resp.Msg, check.Matches, ".*relevant worker-client not found")
// query with invalid task name
resp, err = server.QueryStatus(context.Background(), &pb.QueryStatusListRequest{
Name: "invalid-task-name",
})
c.Assert(err, check.IsNil)
c.Assert(resp.Result, check.IsFalse)
c.Assert(resp.Msg, check.Matches, "task .* has no source or not exist")
clearSchedulerEnv(c, cancel, &wg)
// TODO: test query with correct task name, this needs to add task first
}
func (t *testMaster) TestCheckTask(c *check.C) {
ctrl := gomock.NewController(c)
defer ctrl.Finish()
server := testDefaultMasterServer(c)
sources, workers := defaultWorkerSource()
t.workerClients = makeNilWorkerClients(workers)
var wg sync.WaitGroup
ctx, cancel := context.WithCancel(context.Background())
server.scheduler, _ = testMockScheduler(ctx, &wg, c, sources, workers, "", t.workerClients)
mock := t.initVersionDB(c)
defer func() {
conn.DefaultDBProvider = &conn.DefaultDBProviderImpl{}
}()
mock.ExpectQuery("SHOW GLOBAL VARIABLES LIKE 'version'").WillReturnRows(sqlmock.NewRows([]string{"Variable_name", "Value"}).
AddRow("version", "5.7.25-TiDB-v4.0.2"))
resp, err := server.CheckTask(context.Background(), &pb.CheckTaskRequest{
Task: taskConfig,
})
c.Assert(err, check.IsNil)
c.Assert(resp.Result, check.IsTrue)
// decode task with error
resp, err = server.CheckTask(context.Background(), &pb.CheckTaskRequest{
Task: "invalid toml config",
})
c.Assert(err, check.IsNil)
c.Assert(resp.Result, check.IsFalse)
clearSchedulerEnv(c, cancel, &wg)
// simulate invalid password returned from scheduler, but config was supported plaintext mysql password, so cfg.SubTaskConfigs will success
ctx, cancel = context.WithCancel(context.Background())
server.scheduler, _ = testMockScheduler(ctx, &wg, c, sources, workers, "invalid-encrypt-password", t.workerClients)
mock = t.initVersionDB(c)
mock.ExpectQuery("SHOW GLOBAL VARIABLES LIKE 'version'").WillReturnRows(sqlmock.NewRows([]string{"Variable_name", "Value"}).
AddRow("version", "5.7.25-TiDB-v4.0.2"))
resp, err = server.CheckTask(context.Background(), &pb.CheckTaskRequest{
Task: taskConfig,
})
c.Assert(err, check.IsNil)
c.Assert(resp.Result, check.IsTrue)
clearSchedulerEnv(c, cancel, &wg)
}
func (t *testMaster) TestStartTask(c *check.C) {
ctrl := gomock.NewController(c)
defer ctrl.Finish()
server := testDefaultMasterServer(c)
sources, workers := defaultWorkerSource()
// s.generateSubTask with error
resp, err := server.StartTask(context.Background(), &pb.StartTaskRequest{
Task: "invalid toml config",
})
c.Assert(err, check.IsNil)
c.Assert(resp.Result, check.IsFalse)
// test start task successfully
var wg sync.WaitGroup
// taskName is relative to taskConfig
taskName := "test"
ctx, cancel := context.WithCancel(context.Background())
req := &pb.StartTaskRequest{
Task: taskConfig,
Sources: sources,
}
server.scheduler, _ = testMockScheduler(ctx, &wg, c, sources, workers, "",
makeWorkerClientsForHandle(ctrl, taskName, sources, workers, req))
mock := t.initVersionDB(c)
defer func() {
conn.DefaultDBProvider = &conn.DefaultDBProviderImpl{}
}()
mock.ExpectQuery("SHOW GLOBAL VARIABLES LIKE 'version'").WillReturnRows(sqlmock.NewRows([]string{"Variable_name", "Value"}).
AddRow("version", "5.7.25-TiDB-v4.0.2"))
resp, err = server.StartTask(context.Background(), req)
c.Assert(err, check.IsNil)
c.Assert(resp.Result, check.IsTrue)
for _, source := range sources {
t.subTaskStageMatch(c, server.scheduler, taskName, source, pb.Stage_Running)
tcm, _, err2 := ha.GetSubTaskCfg(etcdTestCli, source, taskName, 0)
c.Assert(err2, check.IsNil)
c.Assert(tcm, check.HasKey, taskName)
c.Assert(tcm[taskName].Name, check.Equals, taskName)
c.Assert(tcm[taskName].SourceID, check.Equals, source)
}
// check start-task with an invalid source
invalidSource := "invalid-source"
mock = t.initVersionDB(c)
mock.ExpectQuery("SHOW GLOBAL VARIABLES LIKE 'version'").WillReturnRows(sqlmock.NewRows([]string{"Variable_name", "Value"}).
AddRow("version", "5.7.25-TiDB-v4.0.2"))
resp, err = server.StartTask(context.Background(), &pb.StartTaskRequest{
Task: taskConfig,
Sources: []string{invalidSource},
})
c.Assert(err, check.IsNil)
c.Assert(resp.Result, check.IsFalse)
c.Assert(resp.Sources, check.HasLen, 1)
c.Assert(resp.Sources[0].Result, check.IsFalse)
c.Assert(resp.Sources[0].Source, check.Equals, invalidSource)
// test start task, but the first step check-task fails
bakCheckSyncConfigFunc := checker.CheckSyncConfigFunc
checker.CheckSyncConfigFunc = func(_ context.Context, _ []*config.SubTaskConfig) error {
return errors.New(errCheckSyncConfig)
}
defer func() {
checker.CheckSyncConfigFunc = bakCheckSyncConfigFunc
}()
mock = t.initVersionDB(c)
mock.ExpectQuery("SHOW GLOBAL VARIABLES LIKE 'version'").WillReturnRows(sqlmock.NewRows([]string{"Variable_name", "Value"}).
AddRow("version", "5.7.25-TiDB-v4.0.2"))
resp, err = server.StartTask(context.Background(), &pb.StartTaskRequest{
Task: taskConfig,
Sources: sources,
})
c.Assert(err, check.IsNil)
c.Assert(resp.Result, check.IsFalse)
c.Assert(resp.Msg, check.Matches, errCheckSyncConfigReg)
clearSchedulerEnv(c, cancel, &wg)
}
// db use for remove data
// verDB user for show version
type mockDBProvider struct {
verDB *sql.DB
db *sql.DB
}
// return db if verDB was closed
func (d *mockDBProvider) Apply(config config.DBConfig) (*conn.BaseDB, error) {
if err := d.verDB.Ping(); err != nil {
return conn.NewBaseDB(d.db, func() {}), nil
}
return conn.NewBaseDB(d.verDB, func() {}), nil
}
func (t *testMaster) initVersionDB(c *check.C) sqlmock.Sqlmock {
db, mock, err := sqlmock.New()
c.Assert(err, check.IsNil)
if mdbp, ok := conn.DefaultDBProvider.(*mockDBProvider); ok {
mdbp.verDB = db
} else {
conn.DefaultDBProvider = &mockDBProvider{verDB: db}
}
return mock
}
func (t *testMaster) initMockDB(c *check.C) sqlmock.Sqlmock {
db, mock, err := sqlmock.New()
c.Assert(err, check.IsNil)
if mdbp, ok := conn.DefaultDBProvider.(*mockDBProvider); ok {
mdbp.db = db
} else {
conn.DefaultDBProvider = &mockDBProvider{db: db}
}
return mock
}
func (t *testMaster) TestStartTaskWithRemoveMeta(c *check.C) {
ctrl := gomock.NewController(c)
defer ctrl.Finish()
server := testDefaultMasterServer(c)
sources, workers := defaultWorkerSource()
server.etcdClient = etcdTestCli
// test start task successfully
var wg sync.WaitGroup
// taskName is relative to taskConfig
cfg := config.NewTaskConfig()
err := cfg.Decode(taskConfig)
c.Assert(err, check.IsNil)
taskName := cfg.Name
ctx, cancel := context.WithCancel(context.Background())
logger := log.L()
// test remove meta with pessimist
cfg.ShardMode = config.ShardPessimistic
req := &pb.StartTaskRequest{
Task: strings.ReplaceAll(taskConfig, `shard-mode: ""`, fmt.Sprintf(`shard-mode: "%s"`, cfg.ShardMode)),
Sources: sources,
RemoveMeta: true,
}
server.scheduler, _ = testMockScheduler(ctx, &wg, c, sources, workers, "",
makeWorkerClientsForHandle(ctrl, taskName, sources, workers, req))
server.pessimist = shardddl.NewPessimist(&logger, func(task string) []string { return sources })
server.optimist = shardddl.NewOptimist(&logger)
var (
DDLs = []string{"ALTER TABLE bar ADD COLUMN c1 INT"}
schema, table = "foo", "bar"
ID = fmt.Sprintf("%s-`%s`.`%s`", taskName, schema, table)
i11 = pessimism.NewInfo(taskName, sources[0], schema, table, DDLs)
op2 = pessimism.NewOperation(ID, taskName, sources[0], DDLs, true, false)
)
_, err = pessimism.PutInfo(etcdTestCli, i11)
c.Assert(err, check.IsNil)
_, succ, err := pessimism.PutOperations(etcdTestCli, false, op2)
c.Assert(succ, check.IsTrue)
c.Assert(err, check.IsNil)
c.Assert(server.pessimist.Start(ctx, etcdTestCli), check.IsNil)
c.Assert(server.optimist.Start(ctx, etcdTestCli), check.IsNil)
verMock := t.initVersionDB(c)
defer func() {
conn.DefaultDBProvider = &conn.DefaultDBProviderImpl{}
}()
verMock.ExpectQuery("SHOW GLOBAL VARIABLES LIKE 'version'").WillReturnRows(sqlmock.NewRows([]string{"Variable_name", "Value"}).
AddRow("version", "5.7.25-TiDB-v4.0.2"))
mock := t.initMockDB(c)
mock.ExpectBegin()
mock.ExpectExec(fmt.Sprintf("DROP TABLE IF EXISTS `%s`.`%s`", cfg.MetaSchema, cputil.LoaderCheckpoint(cfg.Name))).WillReturnResult(sqlmock.NewResult(1, 1))
mock.ExpectExec(fmt.Sprintf("DROP TABLE IF EXISTS `%s`.`%s`", cfg.MetaSchema, cputil.SyncerCheckpoint(cfg.Name))).WillReturnResult(sqlmock.NewResult(1, 1))
mock.ExpectExec(fmt.Sprintf("DROP TABLE IF EXISTS `%s`.`%s`", cfg.MetaSchema, cputil.SyncerShardMeta(cfg.Name))).WillReturnResult(sqlmock.NewResult(1, 1))
mock.ExpectExec(fmt.Sprintf("DROP TABLE IF EXISTS `%s`.`%s`", cfg.MetaSchema, cputil.SyncerOnlineDDL(cfg.Name))).WillReturnResult(sqlmock.NewResult(1, 1))
mock.ExpectCommit()
c.Assert(len(server.pessimist.Locks()), check.Greater, 0)
resp, err := server.StartTask(context.Background(), req)
wg.Add(1)
go func() {
defer wg.Done()
time.Sleep(10 * time.Microsecond)
// start another same task at the same time, should get err
verMock2 := t.initVersionDB(c)
verMock2.ExpectQuery("SHOW GLOBAL VARIABLES LIKE 'version'").WillReturnRows(sqlmock.NewRows([]string{"Variable_name", "Value"}).
AddRow("version", "5.7.25-TiDB-v4.0.2"))
resp1, err1 := server.StartTask(context.Background(), req)
c.Assert(err1, check.IsNil)
c.Assert(resp1.Result, check.IsFalse)
c.Assert(resp1.Msg, check.Equals, terror.Annotate(terror.ErrSchedulerSubTaskExist.Generate(cfg.Name, sources),
"while remove-meta is true").Error())
}()
c.Assert(err, check.IsNil)
if !resp.Result {
c.Errorf("start task failed: %s", resp.Msg)
}
for _, source := range sources {
t.subTaskStageMatch(c, server.scheduler, taskName, source, pb.Stage_Running)
tcm, _, err2 := ha.GetSubTaskCfg(etcdTestCli, source, taskName, 0)
c.Assert(err2, check.IsNil)
c.Assert(tcm, check.HasKey, taskName)
c.Assert(tcm[taskName].Name, check.Equals, taskName)
c.Assert(tcm[taskName].SourceID, check.Equals, source)
}
c.Assert(server.pessimist.Locks(), check.HasLen, 0)
if err = mock.ExpectationsWereMet(); err != nil {
c.Errorf("db unfulfilled expectations: %s", err)
}
ifm, _, err := pessimism.GetAllInfo(etcdTestCli)
c.Assert(err, check.IsNil)
c.Assert(ifm, check.HasLen, 0)
opm, _, err := pessimism.GetAllOperations(etcdTestCli)
c.Assert(err, check.IsNil)
c.Assert(opm, check.HasLen, 0)
clearSchedulerEnv(c, cancel, &wg)
// test remove meta with optimist
ctx, cancel = context.WithCancel(context.Background())
cfg.ShardMode = config.ShardOptimistic
req = &pb.StartTaskRequest{
Task: strings.ReplaceAll(taskConfig, `shard-mode: ""`, fmt.Sprintf(`shard-mode: "%s"`, cfg.ShardMode)),
Sources: sources,
RemoveMeta: true,
}
server.scheduler, _ = testMockScheduler(ctx, &wg, c, sources, workers, "",
makeWorkerClientsForHandle(ctrl, taskName, sources, workers, req))
server.pessimist = shardddl.NewPessimist(&logger, func(task string) []string { return sources })
server.optimist = shardddl.NewOptimist(&logger)
var (
p = parser.New()
se = tidbmock.NewContext()
tblID int64 = 111
st1 = optimism.NewSourceTables(taskName, sources[0])
DDLs1 = []string{"ALTER TABLE bar ADD COLUMN c1 INT"}
tiBefore = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY)`)
tiAfter1 = createTableInfo(c, p, se, tblID, `CREATE TABLE bar (id INT PRIMARY KEY, c1 TEXT)`)
info1 = optimism.NewInfo(taskName, sources[0], "foo-1", "bar-1", schema, table, DDLs1, tiBefore, tiAfter1)
op1 = optimism.NewOperation(ID, taskName, sources[0], info1.UpSchema, info1.UpTable, DDLs1, optimism.ConflictNone, false)
)
_, err = optimism.PutSourceTables(etcdTestCli, st1)
c.Assert(err, check.IsNil)
_, err = optimism.PutInfo(etcdTestCli, info1)
c.Assert(err, check.IsNil)
_, succ, err = optimism.PutOperation(etcdTestCli, false, op1)
c.Assert(succ, check.IsTrue)
c.Assert(err, check.IsNil)
err = server.pessimist.Start(ctx, etcdTestCli)
c.Assert(err, check.IsNil)
err = server.optimist.Start(ctx, etcdTestCli)
c.Assert(err, check.IsNil)
verMock = t.initVersionDB(c)
verMock.ExpectQuery("SHOW GLOBAL VARIABLES LIKE 'version'").WillReturnRows(sqlmock.NewRows([]string{"Variable_name", "Value"}).
AddRow("version", "5.7.25-TiDB-v4.0.2"))
mock = t.initMockDB(c)
mock.ExpectBegin()
mock.ExpectExec(fmt.Sprintf("DROP TABLE IF EXISTS `%s`.`%s`", cfg.MetaSchema, cputil.LoaderCheckpoint(cfg.Name))).WillReturnResult(sqlmock.NewResult(1, 1))
mock.ExpectExec(fmt.Sprintf("DROP TABLE IF EXISTS `%s`.`%s`", cfg.MetaSchema, cputil.SyncerCheckpoint(cfg.Name))).WillReturnResult(sqlmock.NewResult(1, 1))
mock.ExpectExec(fmt.Sprintf("DROP TABLE IF EXISTS `%s`.`%s`", cfg.MetaSchema, cputil.SyncerShardMeta(cfg.Name))).WillReturnResult(sqlmock.NewResult(1, 1))
mock.ExpectExec(fmt.Sprintf("DROP TABLE IF EXISTS `%s`.`%s`", cfg.MetaSchema, cputil.SyncerOnlineDDL(cfg.Name))).WillReturnResult(sqlmock.NewResult(1, 1))
mock.ExpectCommit()
c.Assert(len(server.optimist.Locks()), check.Greater, 0)
resp, err = server.StartTask(context.Background(), req)
wg.Add(1)
go func() {
defer wg.Done()
time.Sleep(10 * time.Microsecond)
// start another same task at the same time, should get err
vermock2 := t.initVersionDB(c)
vermock2.ExpectQuery("SHOW GLOBAL VARIABLES LIKE 'version'").WillReturnRows(sqlmock.NewRows([]string{"Variable_name", "Value"}).
AddRow("version", "5.7.25-TiDB-v4.0.2"))
resp1, err1 := server.StartTask(context.Background(), req)
c.Assert(err1, check.IsNil)
c.Assert(resp1.Result, check.IsFalse)
c.Assert(resp1.Msg, check.Equals, terror.Annotate(terror.ErrSchedulerSubTaskExist.Generate(cfg.Name, sources),
"while remove-meta is true").Error())
}()
c.Assert(err, check.IsNil)
c.Assert(resp.Result, check.IsTrue)
for _, source := range sources {
t.subTaskStageMatch(c, server.scheduler, taskName, source, pb.Stage_Running)
tcm, _, err2 := ha.GetSubTaskCfg(etcdTestCli, source, taskName, 0)
c.Assert(err2, check.IsNil)
c.Assert(tcm, check.HasKey, taskName)
c.Assert(tcm[taskName].Name, check.Equals, taskName)
c.Assert(tcm[taskName].SourceID, check.Equals, source)
}
c.Assert(server.optimist.Locks(), check.HasLen, 0)
if err = mock.ExpectationsWereMet(); err != nil {
c.Errorf("db unfulfilled expectations: %s", err)
}
ifm2, _, err := optimism.GetAllInfo(etcdTestCli)
c.Assert(err, check.IsNil)
c.Assert(ifm2, check.HasLen, 0)
opm2, _, err := optimism.GetAllOperations(etcdTestCli)
c.Assert(err, check.IsNil)
c.Assert(opm2, check.HasLen, 0)
tbm, _, err := optimism.GetAllSourceTables(etcdTestCli)
c.Assert(err, check.IsNil)
c.Assert(tbm, check.HasLen, 0)
clearSchedulerEnv(c, cancel, &wg)
}
func (t *testMaster) TestOperateTask(c *check.C) {
var (
taskName = "unit-test-task"
pauseOp = pb.TaskOp_Pause
)
ctrl := gomock.NewController(c)
defer ctrl.Finish()
server := testDefaultMasterServer(c)
sources, workers := defaultWorkerSource()
// test operate-task with invalid task name
resp, err := server.OperateTask(context.Background(), &pb.OperateTaskRequest{
Op: pauseOp,
Name: taskName,
})
c.Assert(err, check.IsNil)
c.Assert(resp.Result, check.IsFalse)
c.Assert(resp.Msg, check.Equals, fmt.Sprintf("task %s has no source or not exist, please check the task name and status", taskName))
// 1. start task
taskName = "test"
var wg sync.WaitGroup
ctx, cancel := context.WithCancel(context.Background())
startReq := &pb.StartTaskRequest{
Task: taskConfig,
Sources: sources,
}
pauseReq := &pb.OperateTaskRequest{
Op: pauseOp,
Name: taskName,
}
resumeReq := &pb.OperateTaskRequest{
Op: pb.TaskOp_Resume,
Name: taskName,
}
stopReq1 := &pb.OperateTaskRequest{
Op: pb.TaskOp_Stop,
Name: taskName,
Sources: []string{sources[0]},
}
stopReq2 := &pb.OperateTaskRequest{
Op: pb.TaskOp_Stop,
Name: taskName,
}
sourceResps := []*pb.CommonWorkerResponse{{Result: true, Source: sources[0]}, {Result: true, Source: sources[1]}}
server.scheduler, _ = testMockScheduler(ctx, &wg, c, sources, workers, "",
makeWorkerClientsForHandle(ctrl, taskName, sources, workers, startReq, pauseReq, resumeReq, stopReq1, stopReq2))
mock := t.initVersionDB(c)
defer func() {
conn.DefaultDBProvider = &conn.DefaultDBProviderImpl{}
}()
mock.ExpectQuery("SHOW GLOBAL VARIABLES LIKE 'version'").WillReturnRows(sqlmock.NewRows([]string{"Variable_name", "Value"}).
AddRow("version", "5.7.25-TiDB-v4.0.2"))
stResp, err := server.StartTask(context.Background(), startReq)
c.Assert(err, check.IsNil)
c.Assert(stResp.Result, check.IsTrue)
for _, source := range sources {
t.subTaskStageMatch(c, server.scheduler, taskName, source, pb.Stage_Running)
}
c.Assert(stResp.Sources, check.DeepEquals, sourceResps)
// 2. pause task
resp, err = server.OperateTask(context.Background(), pauseReq)
c.Assert(err, check.IsNil)
c.Assert(resp.Result, check.IsTrue)
for _, source := range sources {
t.subTaskStageMatch(c, server.scheduler, taskName, source, pb.Stage_Paused)
}
c.Assert(resp.Sources, check.DeepEquals, sourceResps)
// 3. resume task
resp, err = server.OperateTask(context.Background(), resumeReq)
c.Assert(err, check.IsNil)
c.Assert(resp.Result, check.IsTrue)
for _, source := range sources {
t.subTaskStageMatch(c, server.scheduler, taskName, source, pb.Stage_Running)
}
c.Assert(resp.Sources, check.DeepEquals, sourceResps)
// 4. test stop task successfully, remove partial sources
resp, err = server.OperateTask(context.Background(), stopReq1)
c.Assert(err, check.IsNil)
c.Assert(resp.Result, check.IsTrue)
c.Assert(server.getTaskResources(taskName), check.DeepEquals, []string{sources[1]})
c.Assert(resp.Sources, check.DeepEquals, []*pb.CommonWorkerResponse{{Result: true, Source: sources[0]}})
// 5. test stop task successfully, remove all workers
resp, err = server.OperateTask(context.Background(), stopReq2)
c.Assert(err, check.IsNil)
c.Assert(resp.Result, check.IsTrue)
c.Assert(len(server.getTaskResources(taskName)), check.Equals, 0)
c.Assert(resp.Sources, check.DeepEquals, []*pb.CommonWorkerResponse{{Result: true, Source: sources[1]}})
clearSchedulerEnv(c, cancel, &wg)
}
func (t *testMaster) TestPurgeWorkerRelay(c *check.C) {
ctrl := gomock.NewController(c)
defer ctrl.Finish()
server := testDefaultMasterServer(c)
sources, workers := defaultWorkerSource()
var (
now = time.Now().Unix()
filename = "mysql-bin.000005"
)
// mock PurgeRelay request
mockPurgeRelay := func(rpcSuccess bool) {
for i, worker := range workers {
rets := []interface{}{
nil,
errors.New(errGRPCFailed),
}
if rpcSuccess {
rets = []interface{}{
&pb.CommonWorkerResponse{
Result: true,
Source: sources[i],
},
nil,
}
}
mockWorkerClient := pbmock.NewMockWorkerClient(ctrl)
mockWorkerClient.EXPECT().PurgeRelay(
gomock.Any(),
&pb.PurgeRelayRequest{
Time: now,
Filename: filename,
},
).Return(rets...)
t.workerClients[worker] = newMockRPCClient(mockWorkerClient)
}
}
// test PurgeWorkerRelay with invalid dm-worker[s]
resp, err := server.PurgeWorkerRelay(context.Background(), &pb.PurgeWorkerRelayRequest{
Sources: []string{"invalid-source1", "invalid-source2"},
Time: now,
Filename: filename,
})
c.Assert(err, check.IsNil)
c.Assert(resp.Result, check.IsTrue)
c.Assert(resp.Sources, check.HasLen, 2)
for _, w := range resp.Sources {
c.Assert(w.Result, check.IsFalse)
c.Assert(w.Msg, check.Matches, ".*relevant worker-client not found")
}
var wg sync.WaitGroup
ctx, cancel := context.WithCancel(context.Background())
// test PurgeWorkerRelay successfully
mockPurgeRelay(true)
server.scheduler, _ = testMockScheduler(ctx, &wg, c, sources, workers, "", t.workerClients)
resp, err = server.PurgeWorkerRelay(context.Background(), &pb.PurgeWorkerRelayRequest{
Sources: sources,
Time: now,
Filename: filename,
})
c.Assert(err, check.IsNil)
c.Assert(resp.Result, check.IsTrue)
c.Assert(resp.Sources, check.HasLen, 2)
for _, w := range resp.Sources {
c.Assert(w.Result, check.IsTrue)
}
clearSchedulerEnv(c, cancel, &wg)
ctx, cancel = context.WithCancel(context.Background())
// test PurgeWorkerRelay with error response
mockPurgeRelay(false)
server.scheduler, _ = testMockScheduler(ctx, &wg, c, sources, workers, "", t.workerClients)
resp, err = server.PurgeWorkerRelay(context.Background(), &pb.PurgeWorkerRelayRequest{
Sources: sources,
Time: now,
Filename: filename,
})
c.Assert(err, check.IsNil)
c.Assert(resp.Result, check.IsTrue)
c.Assert(resp.Sources, check.HasLen, 2)
for _, w := range resp.Sources {
c.Assert(w.Result, check.IsFalse)
c.Assert(w.Msg, check.Matches, errGRPCFailedReg)
}
clearSchedulerEnv(c, cancel, &wg)
}
func (t *testMaster) TestOperateWorkerRelayTask(c *check.C) {
ctrl := gomock.NewController(c)
defer ctrl.Finish()
server := testDefaultMasterServer(c)
sources, workers := defaultWorkerSource()
var wg sync.WaitGroup
ctx, cancel := context.WithCancel(context.Background())
pauseReq := &pb.OperateWorkerRelayRequest{
Sources: sources,
Op: pb.RelayOp_PauseRelay,
}
resumeReq := &pb.OperateWorkerRelayRequest{
Sources: sources,
Op: pb.RelayOp_ResumeRelay,
}
server.scheduler, _ = testMockScheduler(ctx, &wg, c, sources, workers, "",
makeWorkerClientsForHandle(ctrl, "", sources, workers, pauseReq, resumeReq))
// test OperateWorkerRelayTask with invalid dm-worker[s]
resp, err := server.OperateWorkerRelayTask(context.Background(), &pb.OperateWorkerRelayRequest{
Sources: []string{"invalid-source1", "invalid-source2"},
Op: pb.RelayOp_PauseRelay,
})
c.Assert(err, check.IsNil)
c.Assert(resp.Result, check.IsFalse)
c.Assert(resp.Msg, check.Matches, `[\s\S]*need to update expectant relay stage not exist[\s\S]*`)
sourceResps := []*pb.CommonWorkerResponse{{Result: true, Source: sources[0]}, {Result: true, Source: sources[1]}}
// 1. test pause-relay successfully
resp, err = server.OperateWorkerRelayTask(context.Background(), pauseReq)
c.Assert(err, check.IsNil)
c.Assert(resp.Result, check.IsTrue)
for _, source := range sources {
t.relayStageMatch(c, server.scheduler, source, pb.Stage_Paused)
}
c.Assert(resp.Sources, check.DeepEquals, sourceResps)
// 2. test resume-relay successfully
resp, err = server.OperateWorkerRelayTask(context.Background(), resumeReq)
c.Assert(err, check.IsNil)
c.Assert(resp.Result, check.IsTrue)
for _, source := range sources {
t.relayStageMatch(c, server.scheduler, source, pb.Stage_Running)
}
c.Assert(resp.Sources, check.DeepEquals, sourceResps)
clearSchedulerEnv(c, cancel, &wg)
}
func (t *testMaster) TestServer(c *check.C) {
cfg := NewConfig()
c.Assert(cfg.Parse([]string{"-config=./dm-master.toml"}), check.IsNil)
cfg.PeerUrls = "http://127.0.0.1:8294"
cfg.DataDir = c.MkDir()
cfg.MasterAddr = tempurl.Alloc()[len("http://"):]
s := NewServer(cfg)
ctx, cancel := context.WithCancel(context.Background())
err1 := s.Start(ctx)
c.Assert(err1, check.IsNil)
t.testHTTPInterface(c, fmt.Sprintf("http://%s/status", cfg.MasterAddr), []byte(utils.GetRawInfo()))
t.testHTTPInterface(c, fmt.Sprintf("http://%s/debug/pprof/", cfg.MasterAddr), []byte("Types of profiles available"))
// HTTP API in this unit test is unstable, but we test it in `http_apis` in integration test.
//t.testHTTPInterface(c, fmt.Sprintf("http://%s/apis/v1alpha1/status/test-task", cfg.MasterAddr), []byte("task test-task has no source or not exist"))
dupServer := NewServer(cfg)
err := dupServer.Start(ctx)
c.Assert(terror.ErrMasterStartEmbedEtcdFail.Equal(err), check.IsTrue)
c.Assert(err.Error(), check.Matches, ".*bind: address already in use.*")
// close
cancel()
s.Close()
c.Assert(utils.WaitSomething(30, 100*time.Millisecond, func() bool {
return s.closed.Get()
}), check.IsTrue)
}
func (t *testMaster) TestMasterTLS(c *check.C) {
masterAddr := tempurl.Alloc()[len("http://"):]
peerAddr := tempurl.Alloc()[len("http://"):]
// all with `https://` prefix
cfg := NewConfig()
c.Assert(cfg.Parse([]string{
"--name=master-tls",
fmt.Sprintf("--data-dir=%s", c.MkDir()),
fmt.Sprintf("--master-addr=https://%s", masterAddr),
fmt.Sprintf("--advertise-addr=https://%s", masterAddr),
fmt.Sprintf("--peer-urls=https://%s", peerAddr),
fmt.Sprintf("--advertise-peer-urls=https://%s", peerAddr),
fmt.Sprintf("--initial-cluster=master-tls=https://%s", peerAddr),
"--ssl-ca=./tls_for_test/ca.pem",
"--ssl-cert=./tls_for_test/dm.pem",
"--ssl-key=./tls_for_test/dm.key",
}), check.IsNil)
t.testTLSPrefix(c, cfg)
c.Assert(cfg.MasterAddr, check.Equals, masterAddr)
c.Assert(cfg.AdvertiseAddr, check.Equals, masterAddr)
c.Assert(cfg.PeerUrls, check.Equals, "https://"+peerAddr)
c.Assert(cfg.AdvertisePeerUrls, check.Equals, "https://"+peerAddr)
c.Assert(cfg.InitialCluster, check.Equals, "master-tls=https://"+peerAddr)
// no `https://` prefix for `--master-addr`
cfg = NewConfig()
c.Assert(cfg.Parse([]string{
"--name=master-tls",
fmt.Sprintf("--data-dir=%s", c.MkDir()),
fmt.Sprintf("--master-addr=%s", masterAddr),
fmt.Sprintf("--advertise-addr=https://%s", masterAddr),
fmt.Sprintf("--peer-urls=https://%s", peerAddr),
fmt.Sprintf("--advertise-peer-urls=https://%s", peerAddr),
fmt.Sprintf("--initial-cluster=master-tls=https://%s", peerAddr),
"--ssl-ca=./tls_for_test/ca.pem",
"--ssl-cert=./tls_for_test/dm.pem",
"--ssl-key=./tls_for_test/dm.key",
}), check.IsNil)
t.testTLSPrefix(c, cfg)
// no `https://` prefix for `--master-addr` and `--advertise-addr`
cfg = NewConfig()
c.Assert(cfg.Parse([]string{
"--name=master-tls",
fmt.Sprintf("--data-dir=%s", c.MkDir()),
fmt.Sprintf("--master-addr=%s", masterAddr),
fmt.Sprintf("--advertise-addr=%s", masterAddr),
fmt.Sprintf("--peer-urls=https://%s", peerAddr),
fmt.Sprintf("--advertise-peer-urls=https://%s", peerAddr),
fmt.Sprintf("--initial-cluster=master-tls=https://%s", peerAddr),
"--ssl-ca=./tls_for_test/ca.pem",
"--ssl-cert=./tls_for_test/dm.pem",
"--ssl-key=./tls_for_test/dm.key",
}), check.IsNil)
t.testTLSPrefix(c, cfg)
// no `https://` prefix for `--master-addr`, `--advertise-addr` and `--peer-urls`
cfg = NewConfig()
c.Assert(cfg.Parse([]string{
"--name=master-tls",
fmt.Sprintf("--data-dir=%s", c.MkDir()),
fmt.Sprintf("--master-addr=%s", masterAddr),
fmt.Sprintf("--advertise-addr=%s", masterAddr),
fmt.Sprintf("--peer-urls=%s", peerAddr),
fmt.Sprintf("--advertise-peer-urls=https://%s", peerAddr),
fmt.Sprintf("--initial-cluster=master-tls=https://%s", peerAddr),
"--ssl-ca=./tls_for_test/ca.pem",
"--ssl-cert=./tls_for_test/dm.pem",
"--ssl-key=./tls_for_test/dm.key",
}), check.IsNil)
t.testTLSPrefix(c, cfg)
// no `https://` prefix for `--master-addr`, `--advertise-addr`, `--peer-urls` and `--advertise-peer-urls`
cfg = NewConfig()
c.Assert(cfg.Parse([]string{
"--name=master-tls",
fmt.Sprintf("--data-dir=%s", c.MkDir()),
fmt.Sprintf("--master-addr=%s", masterAddr),
fmt.Sprintf("--advertise-addr=%s", masterAddr),
fmt.Sprintf("--peer-urls=%s", peerAddr),
fmt.Sprintf("--advertise-peer-urls=%s", peerAddr),
fmt.Sprintf("--initial-cluster=master-tls=https://%s", peerAddr),
"--ssl-ca=./tls_for_test/ca.pem",
"--ssl-cert=./tls_for_test/dm.pem",
"--ssl-key=./tls_for_test/dm.key",
}), check.IsNil)
t.testTLSPrefix(c, cfg)
// all without `https://`/`http://` prefix
cfg = NewConfig()
c.Assert(cfg.Parse([]string{
"--name=master-tls",
fmt.Sprintf("--data-dir=%s", c.MkDir()),
fmt.Sprintf("--master-addr=%s", masterAddr),
fmt.Sprintf("--advertise-addr=%s", masterAddr),
fmt.Sprintf("--peer-urls=%s", peerAddr),
fmt.Sprintf("--advertise-peer-urls=%s", peerAddr),
fmt.Sprintf("--initial-cluster=master-tls=%s", peerAddr),
"--ssl-ca=./tls_for_test/ca.pem",
"--ssl-cert=./tls_for_test/dm.pem",
"--ssl-key=./tls_for_test/dm.key",
}), check.IsNil)
t.testTLSPrefix(c, cfg)
c.Assert(cfg.MasterAddr, check.Equals, masterAddr)
c.Assert(cfg.AdvertiseAddr, check.Equals, masterAddr)
c.Assert(cfg.PeerUrls, check.Equals, "https://"+peerAddr)
c.Assert(cfg.AdvertisePeerUrls, check.Equals, "https://"+peerAddr)
c.Assert(cfg.InitialCluster, check.Equals, "master-tls=https://"+peerAddr)
// all with `http://` prefix, but with TLS enabled.
cfg = NewConfig()
c.Assert(cfg.Parse([]string{
"--name=master-tls",
fmt.Sprintf("--data-dir=%s", c.MkDir()),
fmt.Sprintf("--master-addr=http://%s", masterAddr),
fmt.Sprintf("--advertise-addr=http://%s", masterAddr),
fmt.Sprintf("--peer-urls=http://%s", peerAddr),
fmt.Sprintf("--advertise-peer-urls=http://%s", peerAddr),
fmt.Sprintf("--initial-cluster=master-tls=http://%s", peerAddr),
"--ssl-ca=./tls_for_test/ca.pem",
"--ssl-cert=./tls_for_test/dm.pem",
"--ssl-key=./tls_for_test/dm.key",
}), check.IsNil)
c.Assert(cfg.MasterAddr, check.Equals, masterAddr)
c.Assert(cfg.AdvertiseAddr, check.Equals, masterAddr)
c.Assert(cfg.PeerUrls, check.Equals, "https://"+peerAddr)
c.Assert(cfg.AdvertisePeerUrls, check.Equals, "https://"+peerAddr)
c.Assert(cfg.InitialCluster, check.Equals, "master-tls=https://"+peerAddr)
// different prefix for `--peer-urls` and `--initial-cluster`
cfg = NewConfig()
c.Assert(cfg.Parse([]string{
"--name=master-tls",
fmt.Sprintf("--data-dir=%s", c.MkDir()),
fmt.Sprintf("--master-addr=https://%s", masterAddr),
fmt.Sprintf("--advertise-addr=https://%s", masterAddr),
fmt.Sprintf("--peer-urls=https://%s", peerAddr),
fmt.Sprintf("--advertise-peer-urls=https://%s", peerAddr),
fmt.Sprintf("--initial-cluster=master-tls=http://%s", peerAddr),
"--ssl-ca=./tls_for_test/ca.pem",
"--ssl-cert=./tls_for_test/dm.pem",
"--ssl-key=./tls_for_test/dm.key",
}), check.IsNil)
c.Assert(cfg.MasterAddr, check.Equals, masterAddr)
c.Assert(cfg.AdvertiseAddr, check.Equals, masterAddr)
c.Assert(cfg.PeerUrls, check.Equals, "https://"+peerAddr)
c.Assert(cfg.AdvertisePeerUrls, check.Equals, "https://"+peerAddr)
c.Assert(cfg.InitialCluster, check.Equals, "master-tls=https://"+peerAddr)
t.testTLSPrefix(c, cfg)
}
func (t *testMaster) testTLSPrefix(c *check.C, cfg *Config) {
s := NewServer(cfg)
ctx, cancel := context.WithCancel(context.Background())
err1 := s.Start(ctx)
c.Assert(err1, check.IsNil)
t.testHTTPInterface(c, fmt.Sprintf("https://%s/status", cfg.AdvertiseAddr), []byte(utils.GetRawInfo()))
t.testHTTPInterface(c, fmt.Sprintf("https://%s/debug/pprof/", cfg.AdvertiseAddr), []byte("Types of profiles available"))
// close
cancel()
s.Close()
c.Assert(utils.WaitSomething(30, 100*time.Millisecond, func() bool {
return s.closed.Get()
}), check.IsTrue)
}
func (t *testMaster) testHTTPInterface(c *check.C, url string, contain []byte) {
// we use HTTPS in some test cases.
tls, err := toolutils.NewTLS("./tls_for_test/ca.pem", "./tls_for_test/dm.pem", "./tls_for_test/dm.key", url, []string{})
c.Assert(err, check.IsNil)
cli := toolutils.ClientWithTLS(tls.TLSConfig())
resp, err := cli.Get(url)
c.Assert(err, check.IsNil)
defer resp.Body.Close()
c.Assert(resp.StatusCode, check.Equals, 200)
body, err := ioutil.ReadAll(resp.Body)
c.Assert(err, check.IsNil)
c.Assert(bytes.Contains(body, contain), check.IsTrue)
}
func (t *testMaster) TestJoinMember(c *check.C) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
// create a new cluster
cfg1 := NewConfig()
c.Assert(cfg1.Parse([]string{"-config=./dm-master.toml"}), check.IsNil)
cfg1.Name = "dm-master-1"
cfg1.DataDir = c.MkDir()
cfg1.MasterAddr = tempurl.Alloc()[len("http://"):]
cfg1.PeerUrls = tempurl.Alloc()
cfg1.AdvertisePeerUrls = cfg1.PeerUrls
cfg1.InitialCluster = fmt.Sprintf("%s=%s", cfg1.Name, cfg1.AdvertisePeerUrls)
s1 := NewServer(cfg1)
c.Assert(s1.Start(ctx), check.IsNil)
defer s1.Close()
// wait the first one become the leader
c.Assert(utils.WaitSomething(30, 100*time.Millisecond, func() bool {
return s1.election.IsLeader()
}), check.IsTrue)
// join to an existing cluster
cfg2 := NewConfig()
c.Assert(cfg2.Parse([]string{"-config=./dm-master.toml"}), check.IsNil)
cfg2.Name = "dm-master-2"
cfg2.DataDir = c.MkDir()
cfg2.MasterAddr = tempurl.Alloc()[len("http://"):]
cfg2.PeerUrls = tempurl.Alloc()
cfg2.AdvertisePeerUrls = cfg2.PeerUrls
cfg2.Join = cfg1.MasterAddr // join to an existing cluster
s2 := NewServer(cfg2)
c.Assert(s2.Start(ctx), check.IsNil)
defer s2.Close()
client, err := etcdutil.CreateClient(strings.Split(cfg1.AdvertisePeerUrls, ","), nil)
c.Assert(err, check.IsNil)
defer client.Close()
// verify members
listResp, err := etcdutil.ListMembers(client)
c.Assert(err, check.IsNil)
c.Assert(listResp.Members, check.HasLen, 2)
names := make(map[string]struct{}, len(listResp.Members))
for _, m := range listResp.Members {
names[m.Name] = struct{}{}
}
_, ok := names[cfg1.Name]
c.Assert(ok, check.IsTrue)
_, ok = names[cfg2.Name]
c.Assert(ok, check.IsTrue)
// s1 is still the leader
_, leaderID, _, err := s2.election.LeaderInfo(ctx)
c.Assert(err, check.IsNil)
c.Assert(leaderID, check.Equals, cfg1.Name)
cancel()
clearEtcdEnv(c)
}
func (t *testMaster) TestOperateSource(c *check.C) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
ctrl := gomock.NewController(c)
defer ctrl.Finish()
// create a new cluster
cfg1 := NewConfig()
c.Assert(cfg1.Parse([]string{"-config=./dm-master.toml"}), check.IsNil)
cfg1.Name = "dm-master-1"
cfg1.DataDir = c.MkDir()
cfg1.MasterAddr = tempurl.Alloc()[len("http://"):]
cfg1.AdvertiseAddr = tempurl.Alloc()[len("http://"):]
cfg1.PeerUrls = tempurl.Alloc()
cfg1.AdvertisePeerUrls = cfg1.PeerUrls
cfg1.InitialCluster = fmt.Sprintf("%s=%s", cfg1.Name, cfg1.AdvertisePeerUrls)
s1 := NewServer(cfg1)
s1.leader.Set(oneselfLeader)
c.Assert(s1.Start(ctx), check.IsNil)
defer s1.Close()
mysqlCfg := config.NewSourceConfig()
c.Assert(mysqlCfg.LoadFromFile("./source.yaml"), check.IsNil)
mysqlCfg.From.Password = os.Getenv("MYSQL_PSWD")
task, err := mysqlCfg.Yaml()
c.Assert(err, check.IsNil)
sourceID := mysqlCfg.SourceID
// 1. wait for scheduler to start
time.Sleep(3 * time.Second)
// 2. try to add a new mysql source
req := &pb.OperateSourceRequest{Op: pb.SourceOp_StartSource, Config: []string{task}}
resp, err := s1.OperateSource(ctx, req)
c.Assert(err, check.IsNil)
c.Assert(resp.Result, check.Equals, true)
c.Assert(resp.Sources, check.DeepEquals, []*pb.CommonWorkerResponse{{
Result: true,
Msg: "source is added but there is no free worker to bound",
Source: sourceID,
}})
unBoundSources := s1.scheduler.UnboundSources()
c.Assert(unBoundSources, check.HasLen, 1)
c.Assert(unBoundSources[0], check.Equals, sourceID)
// 3. try to add multiple source
// 3.1 duplicated source id
sourceID2 := "mysql-replica-02"
mysqlCfg.SourceID = sourceID2
task2, err := mysqlCfg.Yaml()
c.Assert(err, check.IsNil)
req = &pb.OperateSourceRequest{Op: pb.SourceOp_StartSource, Config: []string{task2, task2}}
resp, err = s1.OperateSource(ctx, req)
c.Assert(err, check.IsNil)
c.Assert(resp.Result, check.Equals, false)
c.Assert(resp.Msg, check.Matches, ".*source config with ID "+sourceID2+" already exists.*")
// 3.2 run same command after correction
sourceID3 := "mysql-replica-03"
mysqlCfg.SourceID = sourceID3
task3, err := mysqlCfg.Yaml()
c.Assert(err, check.IsNil)
req = &pb.OperateSourceRequest{Op: pb.SourceOp_StartSource, Config: []string{task2, task3}}
resp, err = s1.OperateSource(ctx, req)
c.Assert(err, check.IsNil)
c.Assert(resp.Result, check.Equals, true)
sort.Slice(resp.Sources, func(i, j int) bool {
return resp.Sources[i].Source < resp.Sources[j].Source
})
c.Assert(resp.Sources, check.DeepEquals, []*pb.CommonWorkerResponse{{
Result: true,
Msg: "source is added but there is no free worker to bound",
Source: sourceID2,
}, {
Result: true,
Msg: "source is added but there is no free worker to bound",
Source: sourceID3,
}})
unBoundSources = s1.scheduler.UnboundSources()
c.Assert(unBoundSources, check.HasLen, 3)
c.Assert(unBoundSources[0], check.Equals, sourceID)
c.Assert(unBoundSources[1], check.Equals, sourceID2)
c.Assert(unBoundSources[2], check.Equals, sourceID3)
// 4. try to stop a non-exist-source
req.Op = pb.SourceOp_StopSource
mysqlCfg.SourceID = "not-exist-source"
task4, err := mysqlCfg.Yaml()
c.Assert(err, check.IsNil)
req.Config = []string{task4}
resp, err = s1.OperateSource(ctx, req)
c.Assert(err, check.IsNil)
c.Assert(resp.Result, check.Equals, false)
c.Assert(resp.Msg, check.Matches, `[\s\S]*source config with ID `+mysqlCfg.SourceID+` not exists[\s\S]*`)
// 5. start workers, the unbounded sources should be bounded
var wg sync.WaitGroup
ctx1, cancel1 := context.WithCancel(ctx)
ctx2, cancel2 := context.WithCancel(ctx)
ctx3, cancel3 := context.WithCancel(ctx)
workerName1 := "worker1"
workerName2 := "worker2"
workerName3 := "worker3"
defer func() {
clearSchedulerEnv(c, cancel1, &wg)
clearSchedulerEnv(c, cancel2, &wg)
clearSchedulerEnv(c, cancel3, &wg)
}()
c.Assert(s1.scheduler.AddWorker(workerName1, "172.16.10.72:8262"), check.IsNil)
wg.Add(1)
go func(ctx context.Context, workerName string) {
defer wg.Done()
c.Assert(ha.KeepAlive(ctx, s1.etcdClient, workerName, keepAliveTTL), check.IsNil)
}(ctx1, workerName1)
c.Assert(s1.scheduler.AddWorker(workerName2, "172.16.10.72:8263"), check.IsNil)
wg.Add(1)
go func(ctx context.Context, workerName string) {
defer wg.Done()
c.Assert(ha.KeepAlive(ctx, s1.etcdClient, workerName, keepAliveTTL), check.IsNil)
}(ctx2, workerName2)
c.Assert(s1.scheduler.AddWorker(workerName3, "172.16.10.72:8264"), check.IsNil)
wg.Add(1)
go func(ctx context.Context, workerName string) {
defer wg.Done()
c.Assert(ha.KeepAlive(ctx, s1.etcdClient, workerName, keepAliveTTL), check.IsNil)
}(ctx3, workerName3)
c.Assert(utils.WaitSomething(30, 100*time.Millisecond, func() bool {
w := s1.scheduler.GetWorkerBySource(sourceID)
return w != nil
}), check.IsTrue)
// 6. stop sources
req.Config = []string{task, task2, task3}
req.Op = pb.SourceOp_StopSource
mockWorkerClient := pbmock.NewMockWorkerClient(ctrl)
mockRevelantWorkerClient(mockWorkerClient, "", sourceID, req)
s1.scheduler.SetWorkerClientForTest(workerName1, newMockRPCClient(mockWorkerClient))
mockWorkerClient2 := pbmock.NewMockWorkerClient(ctrl)
mockRevelantWorkerClient(mockWorkerClient2, "", sourceID2, req)
s1.scheduler.SetWorkerClientForTest(workerName2, newMockRPCClient(mockWorkerClient2))
mockWorkerClient3 := pbmock.NewMockWorkerClient(ctrl)
mockRevelantWorkerClient(mockWorkerClient3, "", sourceID3, req)
s1.scheduler.SetWorkerClientForTest(workerName3, newMockRPCClient(mockWorkerClient3))
resp, err = s1.OperateSource(ctx, req)
c.Assert(err, check.IsNil)
c.Assert(resp.Result, check.Equals, true)
c.Assert(resp.Sources, check.DeepEquals, []*pb.CommonWorkerResponse{{
Result: true,
Source: sourceID,
}, {
Result: true,
Source: sourceID2,
}, {
Result: true,
Source: sourceID3,
}})
scm, _, err := ha.GetSourceCfg(etcdTestCli, sourceID, 0)
c.Assert(err, check.IsNil)
c.Assert(scm, check.HasLen, 0)
clearSchedulerEnv(c, cancel, &wg)
}
func generateServerConfig(c *check.C, name string) *Config {
// create a new cluster
cfg1 := NewConfig()
c.Assert(cfg1.Parse([]string{"-config=./dm-master.toml"}), check.IsNil)
cfg1.Name = name
cfg1.DataDir = c.MkDir()
cfg1.MasterAddr = tempurl.Alloc()[len("http://"):]
cfg1.AdvertiseAddr = cfg1.MasterAddr
cfg1.PeerUrls = tempurl.Alloc()
cfg1.AdvertisePeerUrls = cfg1.PeerUrls
cfg1.InitialCluster = fmt.Sprintf("%s=%s", cfg1.Name, cfg1.AdvertisePeerUrls)
return cfg1
}
func (t *testMaster) TestOfflineMember(c *check.C) {
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()
cfg1 := generateServerConfig(c, "dm-master-1")
cfg2 := generateServerConfig(c, "dm-master-2")
cfg3 := generateServerConfig(c, "dm-master-3")
initialCluster := fmt.Sprintf("%s=%s", cfg1.Name, cfg1.AdvertisePeerUrls) + "," +
fmt.Sprintf("%s=%s", cfg2.Name, cfg2.AdvertisePeerUrls) + "," +
fmt.Sprintf("%s=%s", cfg3.Name, cfg3.AdvertisePeerUrls)
cfg1.InitialCluster = initialCluster
cfg2.InitialCluster = initialCluster
cfg3.InitialCluster = initialCluster
var wg sync.WaitGroup
s1 := NewServer(cfg1)
defer s1.Close()
wg.Add(1)
go func() {
c.Assert(s1.Start(ctx), check.IsNil)
wg.Done()
}()
s2 := NewServer(cfg2)
defer s2.Close()
wg.Add(1)
go func() {
c.Assert(s2.Start(ctx), check.IsNil)
wg.Done()
}()
ctx3, cancel3 := context.WithCancel(ctx)
s3 := NewServer(cfg3)
c.Assert(s3.Start(ctx3), check.IsNil)
defer s3.Close()
defer cancel3()
wg.Wait()
var leaderID string
// ensure s2 has got the right leader info, because it will be used to `OfflineMember`.
c.Assert(utils.WaitSomething(30, 100*time.Millisecond, func() bool {
s2.RLock()
leader := s2.leader.Get()
s2.RUnlock()
if leader == "" {
return false
}
if leader == oneselfLeader {
leaderID = s2.cfg.Name
} else {
leaderID = s2.leader.Get()
}
return true
}), check.IsTrue)
// master related operations
req := &pb.OfflineMemberRequest{
Type: "masters",
Name: "xixi",
}
// test offline member with wrong type
resp, err := s2.OfflineMember(ctx, req)
c.Assert(err, check.IsNil)
c.Assert(resp.Result, check.IsFalse)
c.Assert(resp.Msg, check.Equals, terror.ErrMasterInvalidOfflineType.Generate(req.Type).Error())
// test offline member with invalid master name
req.Type = common.Master
resp, err = s2.OfflineMember(ctx, req)
c.Assert(err, check.IsNil)
c.Assert(resp.Result, check.IsFalse)
c.Assert(resp.Msg, check.Matches, `[\s\S]*dm-master with name `+req.Name+` not exists[\s\S]*`)
// test offline member with correct master name
cli := s2.etcdClient
listResp, err := etcdutil.ListMembers(cli)
c.Assert(err, check.IsNil)
c.Assert(listResp.Members, check.HasLen, 3)
// make sure s3 is not the leader, otherwise it will take some time to campain a new leader after close s3, and it may cause timeout
c.Assert(utils.WaitSomething(20, 500*time.Millisecond, func() bool {
_, leaderID, _, err = s1.election.LeaderInfo(ctx)
if err != nil {
return false
}
if leaderID == s3.cfg.Name {
_, err = s3.OperateLeader(ctx, &pb.OperateLeaderRequest{
Op: pb.LeaderOp_EvictLeaderOp,
})
c.Assert(err, check.IsNil)
}
return leaderID != s3.cfg.Name
}), check.IsTrue)
cancel3()
s3.Close()
req.Name = s3.cfg.Name
resp, err = s2.OfflineMember(ctx, req)
c.Assert(err, check.IsNil)
c.Assert(resp.Msg, check.Equals, "")
c.Assert(resp.Result, check.IsTrue)
listResp, err = etcdutil.ListMembers(cli)
c.Assert(err, check.IsNil)
c.Assert(listResp.Members, check.HasLen, 2)
if listResp.Members[0].Name == cfg2.Name {
listResp.Members[0], listResp.Members[1] = listResp.Members[1], listResp.Members[0]
}
c.Assert(listResp.Members[0].Name, check.Equals, cfg1.Name)
c.Assert(listResp.Members[1].Name, check.Equals, cfg2.Name)
_, leaderID2, _, err := s1.election.LeaderInfo(ctx)
c.Assert(err, check.IsNil)
if leaderID == cfg3.Name {
// s3 is leader before, leader should re-campaign
c.Assert(leaderID != leaderID2, check.IsTrue)
} else {
// s3 isn't leader before, leader should keep the same
c.Assert(leaderID, check.Equals, leaderID2)
}
// worker related operations
ectx, canc := context.WithTimeout(ctx, time.Second)
defer canc()
req1 := &pb.RegisterWorkerRequest{
Name: "xixi",
Address: "127.0.0.1:1000",
}
regReq, err := s1.RegisterWorker(ectx, req1)
c.Assert(err, check.IsNil)
c.Assert(regReq.Result, check.IsTrue)
req2 := &pb.OfflineMemberRequest{
Type: common.Worker,
Name: "haha",
}
{
res, err := s1.OfflineMember(ectx, req2)
c.Assert(err, check.IsNil)
c.Assert(res.Result, check.IsFalse)
c.Assert(res.Msg, check.Matches, `[\s\S]*dm-worker with name `+req2.Name+` not exists[\s\S]*`)
}
{
req2.Name = "xixi"
res, err := s1.OfflineMember(ectx, req2)
c.Assert(err, check.IsNil)
c.Assert(res.Result, check.IsTrue)
}
{
// register offline worker again. TICASE-962, 963
resp, err := s1.RegisterWorker(ectx, req1)
c.Assert(err, check.IsNil)
c.Assert(resp.Result, check.IsTrue)
}
clearSchedulerEnv(c, cancel, &wg)
}
func (t *testMaster) TestGetTaskCfg(c *check.C) {
ctrl := gomock.NewController(c)
defer ctrl.Finish()
server := testDefaultMasterServer(c)
sources, workers := defaultWorkerSource()
var wg sync.WaitGroup
taskName := "test"
ctx, cancel := context.WithCancel(context.Background())
req := &pb.StartTaskRequest{
Task: taskConfig,
Sources: sources,
}
server.scheduler, _ = testMockScheduler(ctx, &wg, c, sources, workers, "",
makeWorkerClientsForHandle(ctrl, taskName, sources, workers, req))
// start task
mock := t.initVersionDB(c)
defer func() {
conn.DefaultDBProvider = &conn.DefaultDBProviderImpl{}
}()
mock.ExpectQuery("SHOW GLOBAL VARIABLES LIKE 'version'").WillReturnRows(sqlmock.NewRows([]string{"Variable_name", "Value"}).
AddRow("version", "5.7.25-TiDB-v4.0.2"))
resp, err := server.StartTask(context.Background(), req)
c.Assert(err, check.IsNil)
c.Assert(resp.Result, check.IsTrue)
// get task config
req1 := &pb.GetTaskCfgRequest{
Name: taskName,
}
resp1, err := server.GetTaskCfg(context.Background(), req1)
c.Assert(err, check.IsNil)
c.Assert(resp1.Result, check.IsTrue)
c.Assert(strings.Contains(resp1.Cfg, "name: test"), check.IsTrue)
// wrong task name
req2 := &pb.GetTaskCfgRequest{
Name: "haha",
}
resp2, err := server.GetTaskCfg(context.Background(), req2)
c.Assert(err, check.IsNil)
c.Assert(resp2.Result, check.IsFalse)
// test recover from etcd
server.scheduler.Close()
c.Assert(server.scheduler.Start(ctx, etcdTestCli), check.IsNil)
resp3, err := server.GetTaskCfg(context.Background(), req1)
c.Assert(err, check.IsNil)
c.Assert(resp3.Result, check.IsTrue)
c.Assert(resp3.Cfg, check.Equals, resp1.Cfg)
clearSchedulerEnv(c, cancel, &wg)
}
func stageDeepEqualExcludeRev(c *check.C, stage, expectStage ha.Stage) {
expectStage.Revision = stage.Revision
c.Assert(stage, check.DeepEquals, expectStage)
}
func (t *testMaster) relayStageMatch(c *check.C, s *scheduler.Scheduler, source string, expectStage pb.Stage) {
stage := ha.NewRelayStage(expectStage, source)
stageDeepEqualExcludeRev(c, s.GetExpectRelayStage(source), stage)
eStage, _, err := ha.GetRelayStage(etcdTestCli, source)
c.Assert(err, check.IsNil)
switch expectStage {
case pb.Stage_Running, pb.Stage_Paused:
stageDeepEqualExcludeRev(c, eStage, stage)
}
}
func (t *testMaster) subTaskStageMatch(c *check.C, s *scheduler.Scheduler, task, source string, expectStage pb.Stage) {
stage := ha.NewSubTaskStage(expectStage, source, task)
c.Assert(s.GetExpectSubTaskStage(task, source), check.DeepEquals, stage)
eStageM, _, err := ha.GetSubTaskStage(etcdTestCli, source, task)
c.Assert(err, check.IsNil)
switch expectStage {
case pb.Stage_Running, pb.Stage_Paused:
c.Assert(eStageM, check.HasLen, 1)
stageDeepEqualExcludeRev(c, eStageM[task], stage)
default:
c.Assert(eStageM, check.HasLen, 0)
}
}
func mockRevelantWorkerClient(mockWorkerClient *pbmock.MockWorkerClient, taskName, sourceID string, masterReq interface{}) {
var expect pb.Stage
switch req := masterReq.(type) {
case *pb.OperateSourceRequest:
switch req.Op {
case pb.SourceOp_StartSource, pb.SourceOp_UpdateSource:
expect = pb.Stage_Running
case pb.SourceOp_StopSource:
expect = pb.Stage_Stopped
}
case *pb.StartTaskRequest, *pb.UpdateTaskRequest:
expect = pb.Stage_Running
case *pb.OperateTaskRequest:
switch req.Op {
case pb.TaskOp_Resume:
expect = pb.Stage_Running
case pb.TaskOp_Pause:
expect = pb.Stage_Paused
case pb.TaskOp_Stop:
expect = pb.Stage_Stopped
}
case *pb.OperateWorkerRelayRequest:
switch req.Op {
case pb.RelayOp_ResumeRelay:
expect = pb.Stage_Running
case pb.RelayOp_PauseRelay:
expect = pb.Stage_Paused
case pb.RelayOp_StopRelay:
expect = pb.Stage_Stopped
}
}
queryResp := &pb.QueryStatusResponse{
Result: true,
SourceStatus: &pb.SourceStatus{},
}
switch masterReq.(type) {
case *pb.OperateSourceRequest:
switch expect {
case pb.Stage_Running:
queryResp.SourceStatus = &pb.SourceStatus{Source: sourceID}
case pb.Stage_Stopped:
queryResp.SourceStatus = &pb.SourceStatus{Source: ""}
}
case *pb.StartTaskRequest, *pb.UpdateTaskRequest, *pb.OperateTaskRequest:
queryResp.SubTaskStatus = []*pb.SubTaskStatus{{}}
if expect == pb.Stage_Stopped {
queryResp.SubTaskStatus[0].Status = &pb.SubTaskStatus_Msg{
Msg: fmt.Sprintf("no sub task with name %s has started", taskName),
}
} else {
queryResp.SubTaskStatus[0].Name = taskName
queryResp.SubTaskStatus[0].Stage = expect
}
case *pb.OperateWorkerRelayRequest:
queryResp.SourceStatus = &pb.SourceStatus{RelayStatus: &pb.RelayStatus{Stage: expect}}
}
mockWorkerClient.EXPECT().QueryStatus(
gomock.Any(),
&pb.QueryStatusRequest{
Name: taskName,
},
).Return(queryResp, nil).MaxTimes(maxRetryNum)
}
func createTableInfo(c *check.C, p *parser.Parser, se sessionctx.Context, tableID int64, sql string) *model.TableInfo {
node, err := p.ParseOneStmt(sql, "utf8mb4", "utf8mb4_bin")
if err != nil {
c.Fatalf("fail to parse stmt, %v", err)
}
createStmtNode, ok := node.(*ast.CreateTableStmt)
if !ok {
c.Fatalf("%s is not a CREATE TABLE statement", sql)
}
info, err := tiddl.MockTableInfo(se, createStmtNode, tableID)
if err != nil {
c.Fatalf("fail to create table info, %v", err)
}
return info
}
| [
"\"MYSQL_PSWD\""
]
| []
| [
"MYSQL_PSWD"
]
| [] | ["MYSQL_PSWD"] | go | 1 | 0 | |
main.go | package main
import (
gocontext "context"
"io/fs"
"net/http"
"os"
"github.com/flamego/flamego"
"github.com/robfig/cron/v3"
log "unknwon.dev/clog/v2"
"github.com/asoul-sig/asoul-video/frontend"
"github.com/asoul-sig/asoul-video/internal/context"
"github.com/asoul-sig/asoul-video/internal/db"
"github.com/asoul-sig/asoul-video/internal/route"
)
var (
BuildTime string
BuildCommit string
)
func main() {
defer log.Stop()
err := log.NewConsole()
if err != nil {
panic(err)
}
if err := db.Init(); err != nil {
log.Fatal("Failed to connect to database: %v", err)
}
// Register cron task.
ctx := gocontext.Background()
if err := db.Videos.Refresh(ctx); err != nil {
log.Error("Failed to refresh materialized views: %v", err)
}
c := cron.New()
if _, err := c.AddFunc("@every 1h", func() {
if err := db.Videos.Refresh(ctx); err != nil {
log.Error("Failed to refresh materialized views: %v", err)
}
log.Trace("Refresh materialized views.")
}); err != nil {
log.Fatal("Failed to add cron function: %v", err)
}
c.Start()
f := flamego.Classic()
f.Use(func(ctx flamego.Context) {
ctx.ResponseWriter().Header().Set("Access-Control-Allow-Methods", http.MethodGet)
ctx.ResponseWriter().Header().Set("Access-Control-Max-Age", "600")
ctx.ResponseWriter().Header().Set("Access-Control-Allow-Origin", "*")
})
f.Use(context.Contexter())
fe, err := fs.Sub(frontend.FS, "dist")
if err != nil {
log.Fatal("Failed to sub filesystem: %v", err)
}
f.Use(flamego.Static(flamego.StaticOptions{
FileSystem: http.FS(fe),
}))
f.Group("/api", func() {
member := route.NewMemberHandler()
f.Get("/members", member.List)
f.Get("/member/{secUID}", member.GetBySecUID)
video := route.NewVideoHandler()
f.Get("/videos", video.List)
f.Group("/video", func() {
f.Get("/{id}", video.GetByID)
f.Get("/random", video.Random)
})
})
// Crawler report service.
source := route.NewSourceHandler()
f.Group("/source", func() {
f.Post("/report", source.Report)
f.Get("/video_urls", source.VideoURLs)
f.Get("/video_ids", source.VideoIDs)
}, source.VerifyKey(os.Getenv("SOURCE_REPORT_KEY")))
f.Get("/ping", func(ctx context.Context) {
ctx.Success(map[string]interface{}{
"build_time": BuildTime,
"build_commit": BuildCommit,
})
})
f.Run()
}
| [
"\"SOURCE_REPORT_KEY\""
]
| []
| [
"SOURCE_REPORT_KEY"
]
| [] | ["SOURCE_REPORT_KEY"] | go | 1 | 0 | |
vendor/github.com/spf13/cobra/doc/man_docs.go | // Copyright 2015 Red Hat Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package doc
import (
"bytes"
"fmt"
"io"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
"github.com/cpuguy83/go-md2man/md2man"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
)
// GenManTree will generate a man page for this command and all descendants
// in the directory given. The header may be nil. This function may not work
// correctly if your command names have `-` in them. If you have `cmd` with two
// subcmds, `sub` and `sub-third`, and `sub` has a subcommand called `third`
// it is undefined which help output will be in the file `cmd-sub-third.1`.
func GenManTree(cmd *cobra.Command, header *GenManHeader, dir string) error {
return GenManTreeFromOpts(cmd, GenManTreeOptions{
Header: header,
Path: dir,
CommandSeparator: "-",
})
}
// GenManTreeFromOpts generates a man page for the command and all descendants.
// The pages are written to the opts.Path directory.
func GenManTreeFromOpts(cmd *cobra.Command, opts GenManTreeOptions) error {
header := opts.Header
if header == nil {
header = &GenManHeader{}
}
for _, c := range cmd.Commands() {
if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {
continue
}
if err := GenManTreeFromOpts(c, opts); err != nil {
return err
}
}
section := "1"
if header.Section != "" {
section = header.Section
}
separator := "_"
if opts.CommandSeparator != "" {
separator = opts.CommandSeparator
}
basename := strings.Replace(cmd.CommandPath(), " ", separator, -1)
filename := filepath.Join(opts.Path, basename+"."+section)
f, err := os.Create(filename)
if err != nil {
return err
}
defer f.Close()
headerCopy := *header
return GenMan(cmd, &headerCopy, f)
}
// GenManTreeOptions is the options for generating the man pages.
// Used only in GenManTreeFromOpts.
type GenManTreeOptions struct {
Header *GenManHeader
Path string
CommandSeparator string
}
// GenManHeader is a lot like the .TH header at the start of man pages. These
// include the title, section, date, source, and manual. We will use the
// current time if Date is unset and will use "Auto generated by spf13/cobra"
// if the Source is unset.
type GenManHeader struct {
Title string
Section string
Date *time.Time
date string
Source string
Manual string
}
// GenMan will generate a man page for the given command and write it to
// w. The header argument may be nil, however obviously w may not.
func GenMan(cmd *cobra.Command, header *GenManHeader, w io.Writer) error {
if header == nil {
header = &GenManHeader{}
}
if err := fillHeader(header, cmd.CommandPath()); err != nil {
return err
}
b := genMan(cmd, header)
_, err := w.Write(md2man.Render(b))
return err
}
func fillHeader(header *GenManHeader, name string) error {
if header.Title == "" {
header.Title = strings.ToUpper(strings.Replace(name, " ", "\\-", -1))
}
if header.Section == "" {
header.Section = "1"
}
if header.Date == nil {
now := time.Now()
if epoch := os.Getenv("SOURCE_DATE_EPOCH"); epoch != "" {
unixEpoch, err := strconv.ParseInt(epoch, 10, 64)
if err != nil {
return fmt.Errorf("invalid SOURCE_DATE_EPOCH: %v", err)
}
now = time.Unix(unixEpoch, 0)
}
header.Date = &now
}
header.date = (*header.Date).Format("Jan 2006")
if header.Source == "" {
header.Source = "Auto generated by spf13/cobra"
}
return nil
}
func manPreamble(buf *bytes.Buffer, header *GenManHeader, cmd *cobra.Command, dashedName string) {
description := cmd.Long
if len(description) == 0 {
description = cmd.Short
}
buf.WriteString(fmt.Sprintf(`%% %s(%s)%s
%% %s
%% %s
# NAME
`, header.Title, header.Section, header.date, header.Source, header.Manual))
buf.WriteString(fmt.Sprintf("%s \\- %s\n\n", dashedName, cmd.Short))
buf.WriteString("# SYNOPSIS\n")
buf.WriteString(fmt.Sprintf("**%s**\n\n", cmd.UseLine()))
buf.WriteString("# DESCRIPTION\n")
buf.WriteString(description + "\n\n")
}
func manPrintFlags(buf *bytes.Buffer, flags *pflag.FlagSet) {
flags.VisitAll(func(flag *pflag.Flag) {
if len(flag.Deprecated) > 0 || flag.Hidden {
return
}
format := ""
if len(flag.Shorthand) > 0 && len(flag.ShorthandDeprecated) == 0 {
format = fmt.Sprintf("**-%s**, **--%s**", flag.Shorthand, flag.Name)
} else {
format = fmt.Sprintf("**--%s**", flag.Name)
}
if len(flag.NoOptDefVal) > 0 {
format += "["
}
if flag.Value.Type() == "string" {
// put quotes on the value
format += "=%q"
} else {
format += "=%s"
}
if len(flag.NoOptDefVal) > 0 {
format += "]"
}
format += "\n\t%s\n\n"
buf.WriteString(fmt.Sprintf(format, flag.DefValue, flag.Usage))
})
}
func manPrintOptions(buf *bytes.Buffer, command *cobra.Command) {
flags := command.NonInheritedFlags()
if flags.HasAvailableFlags() {
buf.WriteString("# OPTIONS\n")
manPrintFlags(buf, flags)
buf.WriteString("\n")
}
flags = command.InheritedFlags()
if flags.HasAvailableFlags() {
buf.WriteString("# OPTIONS INHERITED FROM PARENT COMMANDS\n")
manPrintFlags(buf, flags)
buf.WriteString("\n")
}
}
func genMan(cmd *cobra.Command, header *GenManHeader) []byte {
cmd.InitDefaultHelpCmd()
cmd.InitDefaultHelpFlag()
// something like `rootcmd-subcmd1-subcmd2`
dashCommandName := strings.Replace(cmd.CommandPath(), " ", "-", -1)
buf := new(bytes.Buffer)
manPreamble(buf, header, cmd, dashCommandName)
manPrintOptions(buf, cmd)
if len(cmd.Example) > 0 {
buf.WriteString("# EXAMPLE\n")
buf.WriteString(fmt.Sprintf("```\n%s\n```\n", cmd.Example))
}
if hasSeeAlso(cmd) {
buf.WriteString("# SEE ALSO\n")
seealsos := make([]string, 0)
if cmd.HasParent() {
parentPath := cmd.Parent().CommandPath()
dashParentPath := strings.Replace(parentPath, " ", "-", -1)
seealso := fmt.Sprintf("**%s(%s)**", dashParentPath, header.Section)
seealsos = append(seealsos, seealso)
cmd.VisitParents(func(c *cobra.Command) {
if c.DisableAutoGenTag {
cmd.DisableAutoGenTag = c.DisableAutoGenTag
}
})
}
children := cmd.Commands()
sort.Sort(byName(children))
for _, c := range children {
if !c.IsAvailableCommand() || c.IsAdditionalHelpTopicCommand() {
continue
}
seealso := fmt.Sprintf("**%s-%s(%s)**", dashCommandName, c.Name(), header.Section)
seealsos = append(seealsos, seealso)
}
buf.WriteString(strings.Join(seealsos, ", ") + "\n")
}
if !cmd.DisableAutoGenTag {
buf.WriteString(fmt.Sprintf("# HISTORY\n%s Auto generated by spf13/cobra\n", header.Date.Format("2-Jan-2006")))
}
return buf.Bytes()
}
| [
"\"SOURCE_DATE_EPOCH\""
]
| []
| [
"SOURCE_DATE_EPOCH"
]
| [] | ["SOURCE_DATE_EPOCH"] | go | 1 | 0 | |
lib/googlecloudsdk/core/util/execution_utils.py | # Copyright 2013 Google Inc. All Rights Reserved.
"""Functions to help with shelling out to other commands."""
import os
import signal
import subprocess
import sys
from googlecloudsdk.core import config
from googlecloudsdk.core import log
def GetPythonExecutable():
"""Gets the path to the Python interpreter that should be used."""
cloudsdk_python = os.environ.get('CLOUDSDK_PYTHON')
if cloudsdk_python:
return cloudsdk_python
python_bin = sys.executable
if not python_bin:
raise ValueError('Could not find Python executable.')
return python_bin
def GetShellExecutable():
"""Gets the path to the Shell that should be used."""
shell = os.getenv('SHELL', None)
shells = ['/bin/bash', '/bin/sh']
if shell:
shells.insert(0, shell)
for s in shells:
if os.path.isfile(s):
return s
raise ValueError("You must set your 'SHELL' environment variable to a "
"valid shell executable to use this tool.")
def _GetToolArgs(interpreter, interpreter_args, executable_path, *args):
tool_args = []
if interpreter:
tool_args.append(interpreter)
if interpreter_args:
tool_args.extend(interpreter_args)
tool_args.append(executable_path)
tool_args.extend(list(args))
return tool_args
def ArgsForPythonTool(executable_path, *args):
"""Constructs an argument list for calling the Python interpreter.
Args:
executable_path: str, The full path to the Python main file.
*args: args for the command
Returns:
An argument list to execute the Python interpreter
"""
python_executable = GetPythonExecutable()
python_args_str = os.environ.get('CLOUDSDK_PYTHON_ARGS', '')
python_args = python_args_str.split()
return _GetToolArgs(
python_executable, python_args, executable_path, *args)
def ArgsForShellTool(executable_path, *args):
"""Constructs an argument list for calling the bash interpreter.
Args:
executable_path: str, The full path to the shell script.
*args: args for the command
Returns:
An argument list to execute the bash interpreter
"""
shell_bin = GetShellExecutable()
return _GetToolArgs(shell_bin, [], executable_path, *args)
def ArgsForCMDTool(executable_path, *args):
"""Constructs an argument list for calling the cmd interpreter.
Args:
executable_path: str, The full path to the cmd script.
*args: args for the command
Returns:
An argument list to execute the cmd interpreter
"""
return _GetToolArgs('cmd', ['/c'], executable_path, *args)
def ArgsForBinaryTool(executable_path, *args):
"""Constructs an argument list for calling a native binary.
Args:
executable_path: str, The full path to the binary.
*args: args for the command
Returns:
An argument list to execute the native binary
"""
return _GetToolArgs(None, None, executable_path, *args)
class _ProcessHolder(object):
PROCESS = None
@staticmethod
# pylint: disable=unused-argument
def Handler(signum, frame):
if _ProcessHolder.PROCESS:
_ProcessHolder.PROCESS.terminate()
ret_val = _ProcessHolder.PROCESS.wait()
sys.exit(ret_val)
def Exec(args, env=None, no_exit=False):
"""Emulates the os.exec* set of commands, but uses subprocess.
This executes the given command, waits for it to finish, and then exits this
process with the exit code of the child process.
Args:
args: [str], The arguments to execute. The first argument is the command.
env: {str: str}, An optional environment for the child process.
no_exit: bool, True to just return the exit code of the child instead of
exiting.
Returns:
int, The exit code of the child if no_exit is True, else this method does
not return.
"""
# We use subprocess instead of execv because windows does not support process
# replacement. The result of execv on windows is that a new processes is
# started and the original is killed. When running in a shell, the prompt
# returns as soon as the parent is killed even though the child is still
# running. subprocess waits for the new process to finish before returning.
signal.signal(signal.SIGTERM, _ProcessHolder.Handler)
p = subprocess.Popen(args, env=env)
_ProcessHolder.PROCESS = p
ret_val = p.wait()
if no_exit:
return ret_val
sys.exit(ret_val)
def RestartGcloud():
"""Calls gcloud again with the same arguments as this invocation and exit."""
gcloud = config.Paths().gcloud_path
gcloud_args = sys.argv[1:]
args = ArgsForPythonTool(gcloud, *tuple(gcloud_args))
log.status.Print('Restarting gcloud command:\n $ gcloud {args}'.format(
args=' '.join(gcloud_args)))
log.debug('Restarting gcloud: %s', args)
log.out.flush()
log.err.flush()
Exec(args)
| []
| []
| [
"SHELL",
"CLOUDSDK_PYTHON_ARGS",
"CLOUDSDK_PYTHON"
]
| [] | ["SHELL", "CLOUDSDK_PYTHON_ARGS", "CLOUDSDK_PYTHON"] | python | 3 | 0 | |
bin/plot_W.py | import argparse
from matplotlib import pyplot as plt
plt.switch_backend('agg')
import torch
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "6,7"
parser = argparse.ArgumentParser(description='Plot the W from a CRF model')
parser.add_argument('ckpt_path', default=None, metavar='CKPT_PATH', type=str,
help='Path to the ckpt file of a CRF model')
def main():
args = parser.parse_args()
ckpt = torch.load(args.ckpt_path, map_location={'cuda:6':'cuda:7'})
W = ckpt['state_dict']['crf.W'].cpu().numpy()[0].reshape((3, 3, 3, 3))
weight = [W[0, 0],W[0, 1],W[0, 2],W[1, 0],W[1, 1],W[1, 2],W[2, 0],W[2, 1],W[2, 2]]
fig,axes = plt.subplots(nrows=3,ncols=3)
i=0
for ax in axes.flat:
im = ax.imshow(weight[i], vmin=-1, vmax=1, cmap='seismic')
i+=1
fig.colorbar(im, ax=axes.ravel().tolist())
# plt.subplot(331)
# plt.imshow(W[0, 0], vmin=-1, vmax=1, cmap='seismic')
# plt.subplot(332)
# plt.imshow(W[0, 1], vmin=-1, vmax=1, cmap='seismic')
# plt.subplot(333)
# plt.imshow(W[0, 2], vmin=-1, vmax=1, cmap='seismic')
#
# plt.subplot(334)
# plt.imshow(W[1, 0], vmin=-1, vmax=1, cmap='seismic')
# plt.subplot(335)
# plt.imshow(W[1, 1], vmin=-1, vmax=1, cmap='seismic')
# plt.subplot(336)
# plt.imshow(W[1, 2], vmin=-1, vmax=1, cmap='seismic')
#
# plt.subplot(337)
# plt.imshow(W[2, 0], vmin=-1, vmax=1, cmap='seismic')
# plt.subplot(338)
# plt.imshow(W[2, 1], vmin=-1, vmax=1, cmap='seismic')
# plt.subplot(339)
# plt.imshow(W[2, 2], vmin=-1, vmax=1, cmap='seismic')
##
# fig.colorbar(plt, ax=axes.ravel().tolist())
# plt.subplots_adjust(right=0.8)
# plt.colorbar()
# plt.show()
plt.savefig('/mnt/lustre/yuxian/Code/NCRF-master/CKPT_PATH/Camelyon16/crf/plot_W.png')
if __name__ == '__main__':
main()
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
mist/lang/cmd.py | import base64
import os
import re
import abc
import json
import time
import shlex
import hashlib
import tempfile
import subprocess
import asyncio
from typing import Tuple
from functools import lru_cache
input_file_regex = re.compile(r'''(\{)(infile-[\w\.\-\d]+)(\})''')
output_file_regex = re.compile(r'''(\{)(outfile-[\w\.\-\d]+)(\})''')
def _extract_files(text: str, input_or_output: str) -> list:
if input_or_output == "input":
regex = input_file_regex
else:
regex = output_file_regex
files = []
if found := regex.findall(text):
for _, file_name, _ in found:
files.append(file_name)
return files
extract_input_files = lambda x: _extract_files(x, "input")
extract_output_files = lambda x: _extract_files(x, "output")
class Executor(object):
def __init__(self,
command: str,
environment: dict,
input_files: dict,
output_files :dict,
interactive: bool):
self.environment = environment or {}
self.command = command
self.input_files = input_files
self.output_files = output_files
self.interactive = interactive
self._console_output = []
self._console_stderr = []
self.error_code = None
@abc.abstractmethod
def run_ctx(self):
pass
async def run(self):
ctx = await self.run_ctx()
while 1:
try:
next(ctx)
except StopIteration:
return
@lru_cache(1)
def console_output(self) -> str:
return "\n".join(self._console_output)
@lru_cache(1)
def stderr_output(self) -> str:
return "\n".join(self._console_stderr)
def status(self):
return self.error_code
def status_text(self):
return True if self.error_code == 0 else False
def __enter__(self):
return self.run_ctx()
def __exit__(self, exc_type, exc_val, exc_tb):
pass
class LocalExecutor(Executor):
def _replace_files_in_command_(self) -> str:
return self.command.format(**{**self.output_files, **self.input_files})
async def finish_run(self, process, wait=True):
if wait:
await process.communicate()
self.error_code = process.returncode
async def run_ctx(self):
start_time = time.time()
new_command = self._replace_files_in_command_()
run_env = os.environ.copy()
run_env.update(self.environment)
command = shlex.split(new_command)
# process = await asyncio.create_subprocess_shell(new_command,
# env=run_env,
# stdout=asyncio.subprocess.PIPE,
# stderr=asyncio.subprocess.PIPE,
# universal_newlines=False)
process = await asyncio.create_subprocess_exec(command[0],
*command[1:],
env=run_env,
stdin=self.interactive,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
universal_newlines=False)
if self.interactive:
yield "InteractiveInit", process
interactive_processes.append((self, row_id, process))
return
while True:
output = await process.stdout.readline()
output = output.decode('utf-8')
if output == '':
break
line = output.strip()
self._console_output.append(output.strip())
yield line, process
while True:
output = await process.stderr.readline()
output = output.decode('utf-8')
if output == '':
break
line = output.strip()
self._console_stderr.append(output.strip())
await self.finish_run(process)
class execution(object):
def __init__(self, command: str, metadata: dict = None, environment: dict = None, interactive=False):
self.command = command
self.metadata = metadata or {}
self.environment = environment or {}
self.interactive = asyncio.subprocess.PIPE if interactive else None
self.input_files = {}
self.output_files = {}
self._tmp_files = []
self._load_files()
def _load_files(self):
for via, fn in (
("out", extract_output_files), ("in", extract_input_files)
):
for f in fn(self.command):
tmp_file = tempfile.NamedTemporaryFile()
self._tmp_files.append(tmp_file)
if via == "out":
self.output_files[f] = tmp_file.name
else:
self.input_files[f] = tmp_file.name
def __enter__(self) -> Tuple[Executor, dict, dict]:
executor = LocalExecutor(
self.command,
self.environment,
self.input_files,
self.output_files,
self.interactive
)
return executor, self.input_files, self.output_files
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def __del__(self):
# Destructor
for f in self._tmp_files:
f.close()
interactive_processes = []
__all__ = ("execution", interactive_processes)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
source/paramiko_src/demos/demo.py | #!/usr/bin/env python
# Copyright (C) 2003-2007 Robey Pointer <[email protected]>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
import base64
from binascii import hexlify
import getpass
import os
import select
import socket
import sys
import time
import traceback
from paramiko.py3compat import input
import paramiko
try:
import interactive
except ImportError:
from . import interactive
def agent_auth(transport, username):
"""
Attempt to authenticate to the given transport using any of the private
keys available from an SSH agent.
"""
agent = paramiko.Agent()
agent_keys = agent.get_keys()
if len(agent_keys) == 0:
return
for key in agent_keys:
print('Trying ssh-agent key %s' % hexlify(key.get_fingerprint()))
try:
transport.auth_publickey(username, key)
print('... success!')
return
except paramiko.SSHException:
print('... nope.')
def manual_auth(username, hostname):
default_auth = 'p'
auth = input('Auth by (p)assword, (r)sa key, or (d)ss key? [%s] ' % default_auth)
if len(auth) == 0:
auth = default_auth
if auth == 'r':
default_path = os.path.join(os.environ['HOME'], '.ssh', 'id_rsa')
path = input('RSA key [%s]: ' % default_path)
if len(path) == 0:
path = default_path
try:
key = paramiko.RSAKey.from_private_key_file(path)
except paramiko.PasswordRequiredException:
password = getpass.getpass('RSA key password: ')
key = paramiko.RSAKey.from_private_key_file(path, password)
t.auth_publickey(username, key)
elif auth == 'd':
default_path = os.path.join(os.environ['HOME'], '.ssh', 'id_dsa')
path = input('DSS key [%s]: ' % default_path)
if len(path) == 0:
path = default_path
try:
key = paramiko.DSSKey.from_private_key_file(path)
except paramiko.PasswordRequiredException:
password = getpass.getpass('DSS key password: ')
key = paramiko.DSSKey.from_private_key_file(path, password)
t.auth_publickey(username, key)
else:
pw = getpass.getpass('Password for %s@%s: ' % (username, hostname))
t.auth_password(username, pw)
# setup logging
paramiko.util.log_to_file('demo.log')
username = ''
if len(sys.argv) > 1:
hostname = sys.argv[1]
if hostname.find('@') >= 0:
username, hostname = hostname.split('@')
else:
hostname = input('Hostname: ')
if len(hostname) == 0:
print('*** Hostname required.')
sys.exit(1)
port = 22
if hostname.find(':') >= 0:
hostname, portstr = hostname.split(':')
port = int(portstr)
# now connect
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((hostname, port))
except Exception as e:
print('*** Connect failed: ' + str(e))
traceback.print_exc()
sys.exit(1)
try:
t = paramiko.Transport(sock)
try:
t.start_client()
except paramiko.SSHException:
print('*** SSH negotiation failed.')
sys.exit(1)
try:
keys = paramiko.util.load_host_keys(os.path.expanduser('~/.ssh/known_hosts'))
except IOError:
try:
keys = paramiko.util.load_host_keys(os.path.expanduser('~/ssh/known_hosts'))
except IOError:
print('*** Unable to open host keys file')
keys = {}
# check server's host key -- this is important.
key = t.get_remote_server_key()
if hostname not in keys:
print('*** WARNING: Unknown host key!')
elif key.get_name() not in keys[hostname]:
print('*** WARNING: Unknown host key!')
elif keys[hostname][key.get_name()] != key:
print('*** WARNING: Host key has changed!!!')
sys.exit(1)
else:
print('*** Host key OK.')
# get username
if username == '':
default_username = getpass.getuser()
username = input('Username [%s]: ' % default_username)
if len(username) == 0:
username = default_username
agent_auth(t, username)
if not t.is_authenticated():
manual_auth(username, hostname)
if not t.is_authenticated():
print('*** Authentication failed. :(')
t.close()
sys.exit(1)
chan = t.open_session()
chan.get_pty()
chan.invoke_shell()
print('*** Here we go!\n')
interactive.interactive_shell(chan)
chan.close()
t.close()
except Exception as e:
print('*** Caught exception: ' + str(e.__class__) + ': ' + str(e))
traceback.print_exc()
try:
t.close()
except:
pass
sys.exit(1)
| []
| []
| [
"HOME"
]
| [] | ["HOME"] | python | 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.