filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
debian/anytask.wsgi | import os
import sys
activate_this = os.path.join("/usr/share/python/anytask", "bin/activate_this.py")
open(activate_this)
execfile(activate_this, dict(__file__=activate_this))
os.environ['DJANGO_SETTINGS_MODULE'] = "anytask.settings_production"
os.environ['PYTHON_EGG_CACHE'] = "/tmp/anytask_egg_cache"
os.environ['HOME'] = "/usr/share/python/anytask/lib/python2.7/site-packages/Anytask-0.0.0-py2.7.egg/anytask"
os.environ['PYTHONPATH'] = '/usr/share/python/anytask/lib/python2.7/site-packages/Anytask-0.0.0-py2.7.egg/anytask:/usr/share/python/anytask/lib/python2.7/site-packages/Anytask-0.0.0-py2.7.egg:' + os.environ.get('PYTHONPATH', '')
sys.path = ['/usr/share/python/anytask/lib/python2.7/site-packages/Anytask-0.0.0-py2.7.egg/anytask', '/usr/share/python/anytask/lib/python2.7/site-packages/Anytask-0.0.0-py2.7.egg'] + sys.path
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| []
| []
| [
"PYTHON_EGG_CACHE",
"HOME",
"DJANGO_SETTINGS_MODULE",
"PYTHONPATH"
]
| [] | ["PYTHON_EGG_CACHE", "HOME", "DJANGO_SETTINGS_MODULE", "PYTHONPATH"] | python | 4 | 0 | |
ocr.py | from array import array
import os
import sys
import json
import requests
import time
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
from PIL import Image
from io import BytesIO
import csv
import logging
from pathlib import Path
import requests_helper
from PyPDF2 import PdfFileWriter, PdfFileReader
import sqlite3
def main():
# Add your Computer Vision subscription key to your environment variables.
if 'COMPUTER_VISION_KEY' in os.environ:
subscription_key = os.environ['COMPUTER_VISION_KEY']
else:
print("\nSet the COMPUTER_VISION_KEY environment variable.\n**Restart your shell or IDE for changes to take effect.**")
sys.exit()
# Add your Computer Vision endpoint to your environment variables.
if 'COMPUTER_VISION_ENDPOINT' in os.environ:
endpoint = os.environ['COMPUTER_VISION_ENDPOINT']
else:
print("\nSet the COMPUTER_VISION_ENDPOINT environment variable.\n**Restart your shell or IDE for changes to take effect.**")
sys.exit()
src_dir = 'src'
out_dir = '_output'
db_location = out_dir + "/ocr-results.db"
conn = sqlite3.connect(db_location)
db = conn.cursor()
create_result_table_if_not_exists(db)
create_error_table_if_not_exists(db)
do_work(src_dir, out_dir,subscription_key, endpoint, db, conn)
conn.close()
# def show_plot(image_path):
# # Display the image and overlay it with the extracted text.
# image = Image.open(image_path)
# ax = plt.imshow(image)
# for polygon in polygons:
# vertices = [(polygon[0][i], polygon[0][i+1])
# for i in range(0, len(polygon[0]), 2)]
# text = polygon[1]
# patch = Polygon(vertices, closed=True, fill=False, linewidth=2, color='y')
# ax.axes.add_patch(patch)
# plt.text(vertices[0][0], vertices[0][1], text, fontsize=20, va="top")
# plt.show()
def check_multi_page(input_pdf):
""" checks to see if the pdf is multipaged returns true or false
"""
is_multi_page: bool
f = open(input_pdf, "rb")
inputpdf = PdfFileReader(f)
is_multi_page = inputpdf.numPages > 1
f.close()
return is_multi_page
def check_size_pdf(input_pdf):
"""
- checks to see if pdf is over 50 mb
"""
return input_pdf.stat().st_size > 49000000
def handle_multi_page(src_dir, input_pdf):
print("input_pdf is multiple pages")
pdf = PdfFileReader(open(input_pdf, "rb"))
num_pages = pdf.numPages
create_dir = "{}/{}".format(src_dir, input_pdf.stem)
if not os.path.exists(create_dir):
os.mkdir(create_dir)
new_dir = Path(create_dir)
if num_pages > 1:
for i in range(num_pages):
output = PdfFileWriter()
output.addPage(pdf.getPage(i))
with open("{}/{}-{}.pdf".format(new_dir, input_pdf.stem, str(i+1).zfill(3)), "wb") as outputStream:
output.write(outputStream)
return new_dir
def post_and_request(f, out, url, headers):
# get name without file extensions
kvp = []
kvp.append(f.stem)
page_text = ""
post_response = requests_helper.post_image(url=url, headers=headers, image=open(f, 'rb'))
if post_response["status_code"] == 202:
result = {}
poll = True
while poll:
get_response = requests_helper.get_read_result(url=post_response["response"].headers["Operation-Location"], headers=headers)
if get_response["status_code"] == 200:
json_response = get_response["response"].json()
if json_response["status"] == "notStarted":
print("sleep 1s- notStarted")
time.sleep(0.5)
if json_response["status"] == "running":
print("sleep 1s- running")
time.sleep(0.5)
elif json_response["status"] == "succeeded":
print("{} succeeded".format(f.stem))
text = []
if ("analyzeResult" in json_response):
text = json_response["analyzeResult"]["readResults"][0]["lines"]
# change this to a string, rather than a file
# add it as a row value to our output .csv
for line in text:
page_text += " " + line["text"] + "\n"
kvp.append(page_text)
poll = False
else:
kvp.append("No Text Found")
poll = False
elif get_response["status_code"] == 429:
print("sleep 10s - 429")
time.sleep(10)
else:
kvp.append("No Text Found")
raise Exception("status code was wrong?")
else:
kvp.append("ocring failed")
raise Exception("status code was wrong?")
return kvp
def do_work(src_dir, out_dir, subscription_key, endpoint, db, conn):
src = Path(src_dir)
timestamp = time.strftime("%Y-%m-%dT%H-%M-%S", time.localtime())
out = open('{}/{}.csv'.format(out_dir, timestamp), "w", encoding="utf-8")
# add column headers to .csv
out.write("Control Number {} Extracted Text\n".format(chr(166)))
url = endpoint + "/vision/v3.0/read/analyze"
headers = {
'Ocp-Apim-Subscription-Key': subscription_key,
'Content-Type': 'application/octet-stream'
}
logging.basicConfig(filename='{}/{}.log'.format(out_dir,timestamp), level=logging.INFO)
for f in src.iterdir():
try:
# only make call iff a supported type (suffix) -> .png, .jpg/jpeg, .pdf, .tiff
# keep a record in log if not
suffix = f.suffix.lower()
if suffix not in [".png", ".jpg", ".jpeg", ".pdf", ".bmp", ".tiff"]:
logging.warning("%s:UnsupportedFileType:%s", f.stem, f.suffix)
continue
if does_record_already_exist(db, conn, f.stem):
print("already exists")
continue
if (suffix == ".pdf" and check_multi_page(f)) and check_size_pdf(f):
new_dir = handle_multi_page(src_dir, f)
text_pages = []
for page in new_dir.iterdir():
text_pages.append(post_and_request(page, out, url, headers))
multi_page_str = ""
out.write("{} {} ".format(f.stem, chr(166))) # using ¦ as a delimiter for the .csv - works with RDC
count = 1
for text in text_pages:
out.write(text[1])
multi_page_str += text[1] + "\n ----------------- page {} -----------------\n".format(count)
count += 1
insert_result(db, conn, f.stem, multi_page_str)
out.write("\n")
os.rmdir(new_dir)
else:
kvp = post_and_request(f, out, url, headers)
insert_result(db, conn, kvp[0], kvp[1])
out.write("{} {} {}\n".format(kvp[0], chr(166), kvp[1]))
except Exception as e:
insert_error(db, conn, f.stem, str(e))
continue
out.close()
def create_result_table_if_not_exists(db: sqlite3.Cursor):
# create result table
db.execute('''CREATE TABLE IF NOT EXISTS results (
control_number text PRIMARY KEY,
azure_extracted_text text
)''')
def insert_result(db: sqlite3.Cursor, conn: sqlite3.Connection, control_number, azure_extracted_text):
db.execute('INSERT INTO results VALUES (?,?)', (control_number, azure_extracted_text))
conn.commit()
def create_error_table_if_not_exists(db: sqlite3.Cursor):
# create result table
db.execute('''CREATE TABLE IF NOT EXISTS errors (
row_id INTEGER PRIMARY KEY AUTOINCREMENT,
control_number TEXT,
message TEXT
)''')
def insert_error(db: sqlite3.Cursor, conn: sqlite3.Connection, control_number, message):
db.execute('INSERT INTO errors VALUES (?,?,?)', (None,control_number, message))
conn.commit()
def does_record_already_exist(db: sqlite3.Cursor, conn: sqlite3.Connection, control_number):
query = db.execute('SELECT * FROM results WHERE control_number=?', (str(control_number),))
result = query.fetchone()
if result is None:
return False
return True
if __name__ == "__main__":
main() | []
| []
| [
"COMPUTER_VISION_ENDPOINT",
"COMPUTER_VISION_KEY"
]
| [] | ["COMPUTER_VISION_ENDPOINT", "COMPUTER_VISION_KEY"] | python | 2 | 0 | |
services/filesstore/filesstore_test.go | // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package filesstore
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"testing"
"github.com/stretchr/testify/require"
"github.com/stretchr/testify/suite"
"github.com/mattermost/mattermost-server/v5/mlog"
"github.com/mattermost/mattermost-server/v5/model"
"github.com/mattermost/mattermost-server/v5/utils"
)
type FileBackendTestSuite struct {
suite.Suite
settings model.FileSettings
backend FileBackend
}
func TestLocalFileBackendTestSuite(t *testing.T) {
// Setup a global logger to catch tests logging outside of app context
// The global logger will be stomped by apps initializing but that's fine for testing. Ideally this won't happen.
mlog.InitGlobalLogger(mlog.NewLogger(&mlog.LoggerConfiguration{
EnableConsole: true,
ConsoleJson: true,
ConsoleLevel: "error",
EnableFile: false,
}))
dir, err := ioutil.TempDir("", "")
require.NoError(t, err)
defer os.RemoveAll(dir)
suite.Run(t, &FileBackendTestSuite{
settings: model.FileSettings{
DriverName: model.NewString(model.IMAGE_DRIVER_LOCAL),
Directory: &dir,
},
})
}
func TestS3FileBackendTestSuite(t *testing.T) {
runBackendTest(t, false)
}
func TestS3FileBackendTestSuiteWithEncryption(t *testing.T) {
runBackendTest(t, true)
}
func TestInternalEncryptedFileBackendTestSuiteWithEncryption(t *testing.T) {
suite.Run(t, &FileBackendTestSuite{
settings: model.FileSettings{
DriverName: model.NewString(model.IMAGE_DRIVER_INTERNAL_ENCRYPTED_STORE),
},
})
}
func runBackendTest(t *testing.T, encrypt bool) {
s3Host := os.Getenv("CI_MINIO_HOST")
if s3Host == "" {
s3Host = "localhost"
}
s3Port := os.Getenv("CI_MINIO_PORT")
if s3Port == "" {
s3Port = "9000"
}
s3Endpoint := fmt.Sprintf("%s:%s", s3Host, s3Port)
suite.Run(t, &FileBackendTestSuite{
settings: model.FileSettings{
DriverName: model.NewString(model.IMAGE_DRIVER_S3),
AmazonS3AccessKeyId: model.NewString(model.MINIO_ACCESS_KEY),
AmazonS3SecretAccessKey: model.NewString(model.MINIO_SECRET_KEY),
AmazonS3Bucket: model.NewString(model.MINIO_BUCKET),
AmazonS3Region: model.NewString(""),
AmazonS3Endpoint: model.NewString(s3Endpoint),
AmazonS3PathPrefix: model.NewString(""),
AmazonS3SSL: model.NewBool(false),
AmazonS3SSE: model.NewBool(encrypt),
},
})
}
func (s *FileBackendTestSuite) SetupTest() {
utils.TranslationsPreInit()
backend, err := NewFileBackend(&s.settings, true)
require.Nil(s.T(), err)
s.backend = backend
// This is needed to create the bucket if it doesn't exist.
s.Nil(s.backend.TestConnection())
}
func (s *FileBackendTestSuite) TestConnection() {
s.Nil(s.backend.TestConnection())
}
func (s *FileBackendTestSuite) TestReadWriteFile() {
b := []byte("test")
path := "tests/" + model.NewId()
written, err := s.backend.WriteFile(bytes.NewReader(b), path)
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
defer s.backend.RemoveFile(path)
read, err := s.backend.ReadFile(path)
s.Nil(err)
readString := string(read)
s.EqualValues(readString, "test")
}
func (s *FileBackendTestSuite) TestReadWriteFileImage() {
b := []byte("testimage")
path := "tests/" + model.NewId() + ".png"
written, err := s.backend.WriteFile(bytes.NewReader(b), path)
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
defer s.backend.RemoveFile(path)
read, err := s.backend.ReadFile(path)
s.Nil(err)
readString := string(read)
s.EqualValues(readString, "testimage")
}
func (s *FileBackendTestSuite) TestFileExists() {
b := []byte("testimage")
path := "tests/" + model.NewId() + ".png"
_, err := s.backend.WriteFile(bytes.NewReader(b), path)
s.Nil(err)
defer s.backend.RemoveFile(path)
res, err := s.backend.FileExists(path)
s.Nil(err)
s.True(res)
res, err = s.backend.FileExists("tests/idontexist.png")
s.Nil(err)
s.False(res)
}
func (s *FileBackendTestSuite) TestCopyFile() {
b := []byte("test")
path1 := "tests/" + model.NewId()
path2 := "tests/" + model.NewId()
written, err := s.backend.WriteFile(bytes.NewReader(b), path1)
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
defer s.backend.RemoveFile(path1)
err = s.backend.CopyFile(path1, path2)
s.Nil(err)
defer s.backend.RemoveFile(path2)
data1, err := s.backend.ReadFile(path1)
s.Nil(err)
data2, err := s.backend.ReadFile(path2)
s.Nil(err)
s.Equal(b, data1)
s.Equal(b, data2)
}
func (s *FileBackendTestSuite) TestCopyFileToDirectoryThatDoesntExist() {
b := []byte("test")
path1 := "tests/" + model.NewId()
path2 := "tests/newdirectory/" + model.NewId()
written, err := s.backend.WriteFile(bytes.NewReader(b), path1)
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
defer s.backend.RemoveFile(path1)
err = s.backend.CopyFile(path1, path2)
s.Nil(err)
defer s.backend.RemoveFile(path2)
_, err = s.backend.ReadFile(path1)
s.Nil(err)
_, err = s.backend.ReadFile(path2)
s.Nil(err)
}
func (s *FileBackendTestSuite) TestMoveFile() {
b := []byte("test")
path1 := "tests/" + model.NewId()
path2 := "tests/" + model.NewId()
written, err := s.backend.WriteFile(bytes.NewReader(b), path1)
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
defer s.backend.RemoveFile(path1)
s.Nil(s.backend.MoveFile(path1, path2))
defer s.backend.RemoveFile(path2)
_, err = s.backend.ReadFile(path1)
s.Error(err)
data, err := s.backend.ReadFile(path2)
s.Nil(err)
s.Equal(b, data)
}
func (s *FileBackendTestSuite) TestRemoveFile() {
b := []byte("test")
path := "tests/" + model.NewId()
written, err := s.backend.WriteFile(bytes.NewReader(b), path)
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
s.Nil(s.backend.RemoveFile(path))
_, err = s.backend.ReadFile(path)
s.Error(err)
written, err = s.backend.WriteFile(bytes.NewReader(b), "tests2/foo")
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
written, err = s.backend.WriteFile(bytes.NewReader(b), "tests2/bar")
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
written, err = s.backend.WriteFile(bytes.NewReader(b), "tests2/asdf")
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
s.Nil(s.backend.RemoveDirectory("tests2"))
}
func (s *FileBackendTestSuite) TestListDirectory() {
b := []byte("test")
path1 := "19700101/" + model.NewId()
path2 := "19800101/" + model.NewId()
paths, err := s.backend.ListDirectory("19700101")
s.Nil(err)
s.Len(*paths, 0)
written, err := s.backend.WriteFile(bytes.NewReader(b), path1)
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
written, err = s.backend.WriteFile(bytes.NewReader(b), path2)
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
paths, err = s.backend.ListDirectory("19700101")
s.Nil(err)
s.Len(*paths, 1)
s.Equal(path1, (*paths)[0])
paths, err = s.backend.ListDirectory("19700101/")
s.Nil(err)
s.Len(*paths, 1)
s.Equal(path1, (*paths)[0])
paths, err = s.backend.ListDirectory("")
s.Nil(err)
found1 := false
found2 := false
for _, path := range *paths {
if path == "19700101" {
found1 = true
} else if path == "19800101" {
found2 = true
}
}
s.True(found1)
s.True(found2)
s.backend.RemoveFile(path1)
s.backend.RemoveFile(path2)
}
func (s *FileBackendTestSuite) TestRemoveDirectory() {
b := []byte("test")
written, err := s.backend.WriteFile(bytes.NewReader(b), "tests2/foo")
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
written, err = s.backend.WriteFile(bytes.NewReader(b), "tests2/bar")
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
written, err = s.backend.WriteFile(bytes.NewReader(b), "tests2/aaa")
s.Nil(err)
s.EqualValues(len(b), written, "expected given number of bytes to have been written")
s.Nil(s.backend.RemoveDirectory("tests2"))
_, err = s.backend.ReadFile("tests2/foo")
s.Error(err)
_, err = s.backend.ReadFile("tests2/bar")
s.Error(err)
_, err = s.backend.ReadFile("tests2/asdf")
s.Error(err)
}
func (s *FileBackendTestSuite) TestAppendFile() {
s.Run("should fail if target file is missing", func() {
path := "tests/" + model.NewId()
b := make([]byte, 1024)
written, err := s.backend.AppendFile(bytes.NewReader(b), path)
s.Error(err)
s.Zero(written)
})
s.Run("should correctly append the data", func() {
// First part needs to be at least 5MB for the S3 implementation to work.
size := 5 * 1024 * 1024
b := make([]byte, size)
for i := range b {
b[i] = 'A'
}
path := "tests/" + model.NewId()
written, err := s.backend.WriteFile(bytes.NewReader(b), path)
s.Nil(err)
s.EqualValues(len(b), written)
defer s.backend.RemoveFile(path)
b2 := make([]byte, 1024)
for i := range b2 {
b2[i] = 'B'
}
written, err = s.backend.AppendFile(bytes.NewReader(b2), path)
s.Nil(err)
s.EqualValues(int64(len(b2)), written)
read, err := s.backend.ReadFile(path)
s.Nil(err)
s.EqualValues(len(b)+len(b2), len(read))
s.EqualValues(append(b, b2...), read)
b3 := make([]byte, 1024)
for i := range b3 {
b3[i] = 'C'
}
written, err = s.backend.AppendFile(bytes.NewReader(b3), path)
s.Nil(err)
s.EqualValues(int64(len(b3)), written)
read, err = s.backend.ReadFile(path)
s.Nil(err)
s.EqualValues(len(b)+len(b2)+len(b3), len(read))
s.EqualValues(append(append(b, b2...), b3...), read)
})
}
| [
"\"CI_MINIO_HOST\"",
"\"CI_MINIO_PORT\""
]
| []
| [
"CI_MINIO_PORT",
"CI_MINIO_HOST"
]
| [] | ["CI_MINIO_PORT", "CI_MINIO_HOST"] | go | 2 | 0 | |
pkg/auth/auth.go | package auth
import (
"context"
"fmt"
"net/http"
"strings"
"os"
"github.com/gin-gonic/gin"
"github.com/new-adventure-areolite/grpc-app-server/pd/auth"
"google.golang.org/grpc/metadata"
)
func AuthMiddleWare(client *Client) gin.HandlerFunc {
return func(c *gin.Context) {
c.Writer.Header().Set("Access-Control-Allow-Origin", "*")
bearerToken := c.GetHeader("Authorization")
IDToken := strings.Split(bearerToken, " ")
if len(IDToken) != 2 {
if os.Getenv("DEBUG") != "" {
fmt.Println(bearerToken)
}
c.AbortWithStatusJSON(http.StatusUnauthorized, map[string]string{
"error": "failed to auth, expected header: 'Authorization: bearer <token>'",
})
return
}
email, isAdmin, err := client.ValidateAdmin(c.Request.Context(), IDToken[1])
if err != nil {
c.AbortWithStatusJSON(http.StatusUnauthorized, map[string]string{
"error": err.Error(),
})
return
}
c.Set("id", email)
if isAdmin {
c.Set("user-type", "admin")
} else {
c.Set("user-type", "normal")
}
newCtx := metadata.AppendToOutgoingContext(c.Request.Context(), "user-type", c.GetString("user-type"))
c.Request = c.Request.WithContext(newCtx)
c.Next()
}
}
func AdminAuthMiddleWare(client *Client) gin.HandlerFunc {
return func(c *gin.Context) {
c.Writer.Header().Set("Access-Control-Allow-Origin", "*")
bearerToken := c.GetHeader("Authorization")
IDToken := strings.Split(bearerToken, " ")
if len(IDToken) != 2 {
c.AbortWithStatusJSON(http.StatusUnauthorized, map[string]string{
"error": "failed to auth, expected header: 'Authorization: bearer <token>'",
})
return
}
ctxInterface, _ := c.Get("SpanContext")
ctx := ctxInterface.(context.Context)
email, isAdmin, err := client.ValidateAdmin(ctx, IDToken[1])
if !isAdmin {
c.AbortWithStatusJSON(http.StatusForbidden, gin.H{
"error": "this request needs admin access",
})
}
if err != nil {
c.AbortWithStatusJSON(http.StatusUnauthorized, map[string]string{
"error": err.Error(),
})
return
}
c.Set("id", email)
if isAdmin {
c.Set("user-type", "admin")
} else {
c.Set("user-type", "normal")
}
newCtx := metadata.AppendToOutgoingContext(c.Request.Context(), "user-type", c.GetString("user-type"))
c.Request = c.Request.WithContext(newCtx)
c.Next()
}
}
type Client struct {
authClient auth.AuthServiceClient
}
func New(client auth.AuthServiceClient) *Client {
return &Client{
authClient: client,
}
}
func (c *Client) ValidateAdmin(ctx context.Context, token string) (string, bool, error) {
resp, err := c.authClient.Validate(ctx, &auth.ValidateRequest{
RawIdToken: token,
ClaimNames: []string{"email", "groups"},
})
if err != nil {
return "", false, err
}
if resp.Email == "" {
return "", false, fmt.Errorf("email must not be empty")
}
var isAdmin = false
for i := range resp.Groups {
if strings.Contains(resp.Groups[i], "admin") {
isAdmin = true
}
}
return resp.Email, isAdmin, nil
}
| [
"\"DEBUG\""
]
| []
| [
"DEBUG"
]
| [] | ["DEBUG"] | go | 1 | 0 | |
scripts/cache_models.py | #!/usr/bin/env python
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir))))
from allennlp.commands.serve import DEFAULT_MODELS
from allennlp.common.file_utils import cached_path
value = os.environ.get('CACHE_MODELS', 'false')
if value.lower() == "true":
models = DEFAULT_MODELS.items()
print("CACHE_MODELS is '%s'. Downloading %i models." % (value, len(models)))
for i, (model, url) in enumerate(models):
print("Downloading '%s' model from %s" % (model, url))
print("Saved at %s" % cached_path(url))
else:
print("CACHE_MODELS is '%s'. Not caching models." % value)
| []
| []
| [
"CACHE_MODELS"
]
| [] | ["CACHE_MODELS"] | python | 1 | 0 | |
share/qt/extract_strings_qt.py | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt stringdefs so that
they can be picked up by Qt linguist.
'''
from __future__ import division,print_function,unicode_literals
from subprocess import Popen, PIPE
import glob
import operator
import os
import sys
OUT_CPP="qt/quicknodesstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
exit(1)
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *quicknodes_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("quicknodes-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
| []
| []
| [
"XGETTEXT"
]
| [] | ["XGETTEXT"] | python | 1 | 0 | |
matminer/data_retrieval/tests/test_retrieve_Citrine.py | # coding: utf-8
from __future__ import division, unicode_literals, absolute_import
import os
import pandas as pd
import unittest
from matminer.data_retrieval.retrieve_Citrine import CitrineDataRetrieval
pd.set_option('display.width', 1000)
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
citrine_key = os.environ.get('CITRINATION_API_KEY', None)
@unittest.skipIf(citrine_key is None, "CITRINATION_API_KEY env variable not set.")
class CitrineDataRetrievalTest(unittest.TestCase):
def setUp(self):
self.cdr = CitrineDataRetrieval(citrine_key)
def test_get_data(self):
pifs_lst = self.cdr.get_data(formula="W", data_type='EXPERIMENTAL',
max_results=10)
self.assertEqual(len(pifs_lst), 10)
df = self.cdr.get_dataframe(criteria={'formula':'W',
'data_type':'EXPERIMENTAL',
'max_results':10},
print_properties_options=False)
self.assertEqual(df.shape[0], 10)
def test_multiple_items_in_list(self):
df = self.cdr.get_dataframe(criteria={'data_set_id': 114192,
'max_results':102},
print_properties_options=False)
self.assertEqual(df.shape[0], 102)
test_cols = {"Thermal conductivity_5-conditions", "Condition_1",
"Thermal conductivity_10"}
self.assertTrue(test_cols < set(df.columns))
if __name__ == "__main__":
unittest.main()
| []
| []
| [
"CITRINATION_API_KEY"
]
| [] | ["CITRINATION_API_KEY"] | python | 1 | 0 | |
modelzoo/DIEN/train.py | import time
import argparse
import tensorflow as tf
import os
import sys
import math
import collections
from tensorflow.python.client import timeline
import json
from tensorflow.python.ops import partitioned_variables
from tensorflow.contrib.rnn.python.ops.core_rnn_cell import _Linear
from tensorflow.python.feature_column.feature_column import _LazyBuilder
from tensorflow.python.feature_column import utils as fc_utils
# Set to INFO for tracking training, default is WARN. ERROR for least messages
tf.logging.set_verbosity(tf.logging.INFO)
print("Using TensorFlow version %s" % (tf.__version__))
UNSEQ_COLUMNS = ['UID', 'ITEM', 'CATEGORY']
HIS_COLUMNS = ['HISTORY_ITEM', 'HISTORY_CATEGORY']
NEG_COLUMNS = ['NOCLK_HISTORY_ITEM', 'NOCLK_HISTORY_CATEGORY']
SEQ_COLUMNS = HIS_COLUMNS + NEG_COLUMNS
LABEL_COLUMN = ["CLICKED"]
TRAIN_DATA_COLUMNS = LABEL_COLUMN + UNSEQ_COLUMNS + SEQ_COLUMNS
EMBEDDING_DIM = 18
HIDDEN_SIZE = 18 * 2
ATTENTION_SIZE = 18 * 2
MAX_SEQ_LENGTH = 50
def add_layer_summary(value, tag):
tf.summary.scalar('%s/fraction_of_zero_values' % tag,
tf.nn.zero_fraction(value))
tf.summary.histogram('%s/activation' % tag, value)
def _assert_all_equal_and_return(tensors, name=None):
"""Asserts that all tensors are equal and returns the first one."""
with tf.name_scope(name, 'assert_all_equal', values=tensors):
if len(tensors) == 1:
return tensors[0]
assert_equal_ops = []
for t in tensors[1:]:
assert_equal_ops.append(tf.debugging.assert_equal(tensors[0], t))
with tf.control_dependencies(assert_equal_ops):
return tf.identity(tensors[0])
def generate_input_data(filename, batch_size, num_epochs):
def parse_csv(value, neg_value):
tf.logging.info('Parsing {}'.format(filename))
cate_defaults = [[" "] for i in range(0, 5)]
# cate_defaults = [[" "] for i in range(0, 7)]
label_defaults = [[0]]
column_headers = TRAIN_DATA_COLUMNS
record_defaults = label_defaults + cate_defaults
columns = tf.io.decode_csv(value,
record_defaults=record_defaults,
field_delim='\t')
neg_columns = tf.io.decode_csv(neg_value,
record_defaults=[[""], [""]],
field_delim='\t')
columns.extend(neg_columns)
all_columns = collections.OrderedDict(zip(column_headers, columns))
labels = all_columns.pop(LABEL_COLUMN[0])
features = all_columns
return features, labels
# Extract lines from input files using the Dataset API.
dataset = tf.data.TextLineDataset(filename)
dataset_neg_samples = tf.data.TextLineDataset(filename + '_neg')
dataset = tf.data.Dataset.zip((dataset, dataset_neg_samples))
dataset = dataset.shuffle(buffer_size=20000,
seed=2021) # set seed for reproducing
dataset = dataset.repeat(num_epochs)
dataset = dataset.prefetch(batch_size)
dataset = dataset.batch(batch_size)
dataset = dataset.map(parse_csv,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.prefetch(1)
return dataset
def build_feature_cols(data_location=None):
# uid_file
uid_file = os.path.join(data_location, 'uid_voc.txt')
mid_file = os.path.join(data_location, 'mid_voc.txt')
cat_file = os.path.join(data_location, 'cat_voc.txt')
if (not os.path.exists(uid_file)) or (not os.path.exists(mid_file)) or (
not os.path.exists(cat_file)):
print(
"uid_voc.txt, mid_voc.txt or cat_voc does not exist in data file.")
sys.exit()
# uid
uid_cate_column = tf.feature_column.categorical_column_with_vocabulary_file(
'UID', uid_file, default_value=0)
uid_emb_column = tf.feature_column.embedding_column(
uid_cate_column, dimension=EMBEDDING_DIM)
# item
item_cate_column = tf.feature_column.categorical_column_with_vocabulary_file(
'ITEM', mid_file, default_value=0)
category_cate_column = tf.feature_column.categorical_column_with_vocabulary_file(
'CATEGORY', cat_file, default_value=0)
# history behavior
his_item_cate_column = tf.feature_column.sequence_categorical_column_with_vocabulary_file(
'HISTORY_ITEM', mid_file, default_value=0)
his_category_cate_column = tf.feature_column.sequence_categorical_column_with_vocabulary_file(
'HISTORY_CATEGORY', cat_file, default_value=0)
# negative samples
noclk_his_item_cate_column = tf.feature_column.sequence_categorical_column_with_vocabulary_file(
'NOCLK_HISTORY_ITEM', mid_file, default_value=0)
noclk_his_category_cate_column = tf.feature_column.sequence_categorical_column_with_vocabulary_file(
'NOCLK_HISTORY_CATEGORY', cat_file, default_value=0)
return {
'uid_emb_column': uid_emb_column,
'item_cate_column': item_cate_column,
'category_cate_column': category_cate_column,
'his_item_cate_column': his_item_cate_column,
'his_category_cate_column': his_category_cate_column,
'noclk_his_item_cate_column': noclk_his_item_cate_column,
'noclk_his_category_cate_column': noclk_his_category_cate_column
}
class VecAttGRUCell(tf.nn.rnn_cell.RNNCell):
def __init__(self,
num_units,
activation=None,
reuse=None,
kernel_initializer=None,
bias_initializer=None):
super(VecAttGRUCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._activation = activation or tf.math.tanh
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._gate_linear = None
self._candidate_linear = None
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state):
return self.call(inputs, state)
def call(self, inputs, state, att_score=None):
"""Gated recurrent unit (GRU) with nunits cells."""
_inputs = inputs[0]
att_score = inputs[1]
if self._gate_linear is None:
bias_ones = self._bias_initializer
if self._bias_initializer is None:
bias_ones = tf.constant_initializer(1.0, dtype=_inputs.dtype)
with tf.variable_scope("gates"): # Reset gate and update gate.
self._gate_linear = _Linear(
[_inputs, state],
2 * self._num_units,
True,
bias_initializer=bias_ones,
kernel_initializer=self._kernel_initializer)
value = tf.math.sigmoid(self._gate_linear([_inputs, state]))
r, u = tf.split(value=value, num_or_size_splits=2, axis=1)
r_state = r * state
if self._candidate_linear is None:
with tf.variable_scope("candidate"):
self._candidate_linear = _Linear(
[_inputs, r_state],
self._num_units,
True,
bias_initializer=self._bias_initializer,
kernel_initializer=self._kernel_initializer)
c = self._activation(self._candidate_linear([_inputs, r_state]))
u = (1.0 - att_score) * u
new_h = u * state + (1 - u) * c
return new_h, new_h
class DIEN():
def __init__(self,
feature_column=None,
learning_rate=0.001,
embedding_dim=18,
hidden_size=36,
attention_size=36,
inputs=None,
bf16=False,
input_layer_partitioner=None,
dense_layer_partitioner=None):
if not inputs:
raise ValueError('Dataset is not defined.')
if not feature_column:
raise ValueError('Dense column or sparse column is not defined.')
# self.feature_column = feature_column
self.uid_emb_column = feature_column['uid_emb_column']
self.item_cate_column = feature_column['item_cate_column']
self.his_item_cate_column = feature_column['his_item_cate_column']
self.category_cate_column = feature_column['category_cate_column']
self.his_category_cate_column = feature_column[
'his_category_cate_column']
self.noclk_his_item_cate_column = feature_column[
'noclk_his_item_cate_column']
self.noclk_his_category_cate_column = feature_column[
'noclk_his_category_cate_column']
self.learning_rate = learning_rate
self.input_layer_partitioner = input_layer_partitioner
self.dense_layer_partitioner = dense_layer_partitioner
self.feature = inputs[0]
self.label = inputs[1]
self.batch_size = tf.shape(self.label)[0]
self.embedding_dim = embedding_dim
self.hidden_size = hidden_size
self.attention_size = attention_size
self.bf16 = bf16
if self.bf16:
self.data_tpye = tf.bfloat16
else:
self.data_tpye = tf.float32
self.predict = self.prediction()
with tf.name_scope('head'):
self.train_op, self.loss = self.optimizer()
self.acc, self.acc_op = tf.metrics.accuracy(labels=self.label,
predictions=tf.round(
self.predict))
self.auc, self.auc_op = tf.metrics.auc(labels=self.label,
predictions=self.predict,
num_thresholds=1000)
tf.summary.scalar('eval_acc', self.acc)
tf.summary.scalar('eval_auc', self.auc)
def prelu(self, x, scope=''):
"""parametric ReLU activation"""
with tf.variable_scope(name_or_scope=scope, default_name="prelu"):
alpha = tf.get_variable("prelu_" + scope,
shape=x.get_shape()[-1],
dtype=x.dtype,
initializer=tf.constant_initializer(0.1))
pos = tf.nn.relu(x)
neg = alpha * (x - abs(x)) * tf.constant(0.5, dtype=x.dtype)
return pos + neg
def auxiliary_net(self, in_, stag='auxiliary_net'):
bn1 = tf.layers.batch_normalization(inputs=in_,
name='bn1' + stag,
reuse=tf.AUTO_REUSE)
dnn1 = tf.layers.dense(bn1,
100,
activation=None,
name='f1' + stag,
reuse=tf.AUTO_REUSE)
dnn1 = tf.nn.sigmoid(dnn1)
dnn2 = tf.layers.dense(dnn1,
50,
activation=None,
name='f2' + stag,
reuse=tf.AUTO_REUSE)
dnn2 = tf.nn.sigmoid(dnn2)
dnn3 = tf.layers.dense(dnn2,
2,
activation=None,
name='f3' + stag,
reuse=tf.AUTO_REUSE)
y_hat = tf.nn.softmax(dnn3) + tf.constant(0.00000001, dtype=dnn3.dtype)
return y_hat
def auxiliary_loss(self,
h_states,
click_seq,
noclick_seq,
mask,
dtype=tf.float32,
stag=None):
mask = tf.cast(mask, dtype=dtype)
click_input_ = tf.concat([h_states, click_seq], -1)
noclick_input_ = tf.concat([h_states, noclick_seq], -1)
if dtype == tf.bfloat16:
with tf.variable_scope('auxiliary_net').keep_weights(dtype=tf.float32):
click_prop_ = self.auxiliary_net(click_input_, stag=stag)[:, :,
0]
noclick_prop_ = self.auxiliary_net(noclick_input_,
stag=stag)[:, :, 0]
else:
with tf.variable_scope('auxiliary_net'):
click_prop_ = self.auxiliary_net(click_input_, stag=stag)[:, :,
0]
noclick_prop_ = self.auxiliary_net(noclick_input_,
stag=stag)[:, :, 0]
click_loss_ = -tf.reshape(tf.log(click_prop_),
[-1, tf.shape(click_seq)[1]]) * mask
noclick_loss_ = -tf.reshape(tf.log(1.0 - noclick_prop_),
[-1, tf.shape(noclick_seq)[1]]) * mask
loss_ = tf.reduce_mean(click_loss_ + noclick_loss_)
return loss_
def attention(self,
query,
facts,
attention_size,
mask,
stag='null',
mode='SUM',
softmax_stag=1,
time_major=False,
return_alphas=False,
forCnn=False):
if isinstance(facts, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
facts = tf.concat(facts, 2)
if len(facts.get_shape().as_list()) == 2:
facts = tf.expand_dims(facts, 1)
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
# Trainable parameters
# mask = tf.equal(mask, tf.ones_like(mask))
facts_size = facts.get_shape().as_list()[
-1] # D value - hidden size of the RNN layer
querry_size = query.get_shape().as_list()[-1]
query = tf.layers.dense(query,
facts_size,
activation=None,
name='f1' + stag)
query = self.prelu(query)
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat([queries, facts, queries - facts, queries * facts],
axis=-1)
d_layer_1_all = tf.layers.dense(din_all,
80,
activation=tf.nn.sigmoid,
name='f1_att' + stag)
d_layer_2_all = tf.layers.dense(d_layer_1_all,
40,
activation=tf.nn.sigmoid,
name='f2_att' + stag)
d_layer_3_all = tf.layers.dense(d_layer_2_all,
1,
activation=None,
name='f3_att' + stag)
d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]])
scores = d_layer_3_all
# Mask
# key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T]
key_masks = tf.expand_dims(mask, 1) # [B, 1, T]
paddings = tf.ones_like(scores) * (-2**32 + 1)
if not forCnn:
scores = tf.where(key_masks, scores, paddings) # [B, 1, T]
# Scale
# scores = scores / (facts.get_shape().as_list()[-1] ** 0.5)
# Activation
if softmax_stag:
scores = tf.nn.softmax(scores) # [B, 1, T]
# Weighted sum
if mode == 'SUM':
output = tf.matmul(scores, facts) # [B, 1, H]
# output = tf.reshape(output, [-1, tf.shape(facts)[-1]])
else:
scores = tf.reshape(scores, [-1, tf.shape(facts)[1]])
output = facts * tf.expand_dims(scores, -1)
output = tf.reshape(output, tf.shape(facts))
if return_alphas:
return output, scores
return output
def dice(self, _x, axis=-1, epsilon=0.000000001, name=''):
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
alphas = tf.get_variable('alpha' + name,
_x.get_shape()[-1],
initializer=tf.constant_initializer(0.0),
dtype=_x.dtype)
input_shape = list(_x.get_shape())
reduction_axes = list(range(len(input_shape)))
del reduction_axes[axis]
broadcast_shape = [1] * len(input_shape)
broadcast_shape[axis] = input_shape[axis]
# case: train mode (uses stats of the current batch)
mean = tf.reduce_mean(_x, axis=reduction_axes)
brodcast_mean = tf.reshape(mean, broadcast_shape)
std = tf.reduce_mean(tf.square(_x - brodcast_mean) + epsilon,
axis=reduction_axes)
std = tf.sqrt(std)
brodcast_std = tf.reshape(std, broadcast_shape)
x_normed = (_x - brodcast_mean) / (brodcast_std + epsilon)
# x_normed = tf.layers.batch_normalization(_x, center=False, scale=False)
x_p = tf.sigmoid(x_normed)
return alphas * (1.0 - x_p) * _x + x_p * _x
def embedding_input_layer(self,
builder,
feature_column,
embedding_table,
get_seq_len=False):
sparse_tensors = feature_column._get_sparse_tensors(builder)
sparse_tensors_ids = sparse_tensors.id_tensor
sparse_tensors_weights = sparse_tensors.weight_tensor
embedding = tf.nn.safe_embedding_lookup_sparse(
embedding_weights=embedding_table,
sparse_ids=sparse_tensors_ids,
sparse_weights=sparse_tensors_weights)
if get_seq_len:
sequence_length = fc_utils.sequence_length_from_sparse_tensor(
sparse_tensors_ids)
return embedding, sequence_length
else:
return embedding
def embedding_input(self):
for key in [
'HISTORY_ITEM', 'HISTORY_CATEGORY', 'NOCLK_HISTORY_ITEM',
'NOCLK_HISTORY_CATEGORY'
]:
self.feature[key] = tf.strings.split(self.feature[key], '')
self.feature[key] = tf.sparse.slice(
self.feature[key], [0, 0], [self.batch_size, MAX_SEQ_LENGTH])
# get uid embeddings
uid_emb = tf.feature_column.input_layer(self.feature,
self.uid_emb_column)
# get embeddings of item and category
# create embedding table
item_embedding_var = tf.get_variable(
'item_embedding_var',
[self.item_cate_column._num_buckets, self.embedding_dim])
category_embedding_var = tf.get_variable(
'category_embedding_var',
[self.category_cate_column._num_buckets, self.embedding_dim])
builder = _LazyBuilder(self.feature)
# get item embedding concat [item_id,category]
item_embedding = self.embedding_input_layer(builder,
self.item_cate_column,
item_embedding_var)
category_embedding = self.embedding_input_layer(
builder, self.category_cate_column, category_embedding_var)
item_emb = tf.concat([item_embedding, category_embedding], 1)
# get history item embedding concat [history_item_id,history_category] and sequence length
his_item_embedding, his_item_sequence_length = self.embedding_input_layer(
builder,
self.his_item_cate_column,
item_embedding_var,
get_seq_len=True)
his_category_embedding, his_category_sequence_length = self.embedding_input_layer(
builder,
self.his_category_cate_column,
category_embedding_var,
get_seq_len=True)
sequence_lengths = [
his_item_sequence_length, his_category_sequence_length
]
his_item_emb = tf.concat([his_item_embedding, his_category_embedding],
2)
sequence_length = _assert_all_equal_and_return(sequence_lengths)
# get negative samples item embedding
noclk_his_item_embedding = self.embedding_input_layer(
builder, self.noclk_his_item_cate_column, item_embedding_var)
noclk_his_category_embedding = self.embedding_input_layer(
builder, self.noclk_his_category_cate_column,
category_embedding_var)
noclk_his_item_emb = tf.concat(
[noclk_his_item_embedding, noclk_his_category_embedding], 2)
return uid_emb, item_emb, his_item_emb, noclk_his_item_emb, sequence_length
def top_fc_layer(self, inputs):
bn1 = tf.layers.batch_normalization(inputs=inputs, name='bn1')
dnn1 = tf.layers.dense(bn1, 200, activation=None, name='dnn1')
if args.norelu:
dnn1 = self.dice(dnn1, name='dice_1')
else:
dnn1 = tf.nn.relu(dnn1)
dnn2 = tf.layers.dense(dnn1, 80, activation=None, name='dnn2')
if args.norelu:
dnn2 = self.dice(dnn2, name='dice_2')
else:
dnn2 = tf.nn.relu(dnn2)
dnn3 = tf.layers.dense(dnn2, 2, activation=None, name='dnn3')
logits = tf.layers.dense(dnn3, 1, activation=None, name='logits')
add_layer_summary(dnn1, 'dnn1')
add_layer_summary(dnn2, 'dnn2')
add_layer_summary(dnn3, 'dnn3')
return logits
def prediction(self):
# input layer to get embedding of features
with tf.variable_scope('input_layer',
partitioner=self.input_layer_partitioner,
reuse=tf.AUTO_REUSE):
uid_emb, item_emb, his_item_emb, noclk_his_item_emb, sequence_length = self.embedding_input(
)
if self.bf16:
uid_emb = tf.cast(uid_emb, tf.bfloat16)
item_emb = tf.cast(item_emb, tf.bfloat16)
his_item_emb = tf.cast(his_item_emb, tf.bfloat16)
noclk_his_item_emb = tf.cast(noclk_his_item_emb, tf.bfloat16)
item_his_eb_sum = tf.reduce_sum(his_item_emb, 1)
# mask = tf.sequence_mask(sequence_length, maxlen=MAX_SEQ_LENGTH)
mask = tf.sequence_mask(sequence_length)
# RNN layer_1
with tf.variable_scope('rnn_1'):
run_output_1, _ = tf.nn.dynamic_rnn(
tf.nn.rnn_cell.GRUCell(self.hidden_size),
inputs=his_item_emb,
sequence_length=sequence_length,
dtype=self.data_tpye,
scope="gru1")
tf.summary.histogram('GRU_outputs', run_output_1)
# Aux loss
aux_loss_scope = tf.variable_scope(
'aux_loss', partitioner=self.dense_layer_partitioner)
with aux_loss_scope.keep_weights(dtype=tf.float32) if self.bf16 \
else aux_loss_scope:
self.aux_loss = self.auxiliary_loss(run_output_1[:, :-1, :],
his_item_emb[:, 1:, :],
noclk_his_item_emb[:, 1:, :],
mask[:, 1:],
dtype=self.data_tpye,
stag='gru')
if self.bf16:
self.aux_loss = tf.cast(self.aux_loss, tf.float32)
# Attention layer
attention_scope = tf.variable_scope('attention_layer')
with attention_scope.keep_weights(dtype=tf.float32) if self.bf16 \
else attention_scope:
_, alphas = self.attention(item_emb,
run_output_1,
self.attention_size,
mask,
softmax_stag=1,
stag='1_1',
mode='LIST',
return_alphas=True)
tf.summary.histogram('alpha_outputs', alphas)
# RNN layer_2
with tf.variable_scope('rnn_2'):
_, final_state2 = tf.nn.dynamic_rnn(
VecAttGRUCell(self.hidden_size),
inputs=[run_output_1, tf.expand_dims(alphas, -1)],
sequence_length=sequence_length,
dtype=self.data_tpye,
scope="gru2")
tf.summary.histogram('GRU2_Final_State', final_state2)
top_input = tf.concat([
uid_emb, item_emb, item_his_eb_sum, item_emb * item_his_eb_sum,
final_state2
], 1)
# Top MLP layer
top_mlp_scope = tf.variable_scope(
'top_mlp_layer',
partitioner=self.dense_layer_partitioner,
reuse=tf.AUTO_REUSE,
)
with top_mlp_scope.keep_weights(dtype=tf.float32) if self.bf16 \
else top_mlp_scope:
self.logits = self.top_fc_layer(top_input)
if self.bf16:
self.logits = tf.cast(self.logits, dtype=tf.float32)
predict = tf.math.sigmoid(self.logits)
return predict
def optimizer(self):
self.logits = tf.squeeze(self.logits)
self.crt_loss = tf.losses.sigmoid_cross_entropy(
self.label,
self.logits,
scope='loss',
reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE)
loss = self.crt_loss + self.aux_loss
tf.summary.scalar('sigmoid_cross_entropy', self.crt_loss)
tf.summary.scalar('aux_loss', self.aux_loss)
tf.summary.scalar('loss', loss)
self.global_step = tf.train.get_or_create_global_step()
optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
gradients = optimizer.compute_gradients(loss)
clipped_gradients = [(tf.clip_by_norm(grad, 5), var)
for grad, var in gradients if grad is not None]
train_op = optimizer.apply_gradients(clipped_gradients,
global_step=self.global_step)
return train_op, loss
def get_arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--data_location',
help='Full path of train data',
required=False,
default='./data')
parser.add_argument('--steps',
help='set the number of steps on train dataset',
type=int,
default=0)
parser.add_argument('--batch_size',
help='Batch size to train. Default is 512',
type=int,
default=128)
parser.add_argument('--output_dir',
help='Full path to logs & model output directory',
required=False,
default='./result')
parser.add_argument('--checkpoint',
help='Full path to checkpoints input/output directory',
required=False)
parser.add_argument('--deep_dropout',
help='Dropout regularization for deep model',
type=float,
default=0.0)
parser.add_argument('--learning_rate',
help='Learning rate for model',
type=float,
default=0.001)
parser.add_argument('--save_steps',
help='set the number of steps on saving checkpoints',
type=int,
default=0)
parser.add_argument('--keep_checkpoint_max',
help='Maximum number of recent checkpoint to keep',
type=int,
default=1)
parser.add_argument('--bf16',
help='enable DeepRec BF16 in deep model. Default FP32',
action='store_true')
parser.add_argument('--no_eval',
help='not evaluate trained model by eval dataset.',
action='store_true')
parser.add_argument('--timeline',
help='number of steps on saving timeline. Default 0',
type=int,
default=0)
parser.add_argument("--protocol",
type=str,
choices=["grpc", "grpc++", "star_server"],
default="grpc")
parser.add_argument('--inter',
help='set inter op parallelism threads.',
type=int,
default=0)
parser.add_argument('--intra',
help='set inter op parallelism threads.',
type=int,
default=0)
parser.add_argument('--input_layer_partitioner',
help='slice size of input layer partitioner. units MB',
type=int,
default=0)
parser.add_argument('--dense_layer_partitioner',
help='slice size of dense layer partitioner. units KB',
type=int,
default=0)
parser.add_argument('--norelu', action='store_true')
return parser
def main(tf_config=None, server=None):
# check dataset
print('Checking dataset')
train_file = args.data_location + '/local_train_splitByUser'
test_file = args.data_location + '/local_test_splitByUser'
if (not os.path.exists(train_file)) or (not os.path.exists(test_file)) or (
not os.path.exists(train_file + '_neg')) or (
not os.path.exists(test_file + '_neg')):
print("Dataset does not exist in the given data_location.")
sys.exit()
no_of_training_examples = sum(1 for line in open(train_file))
no_of_test_examples = sum(1 for line in open(test_file))
print("Numbers of training dataset is {}".format(no_of_training_examples))
print("Numbers of test dataset is {}".format(no_of_test_examples))
# set params
# set batch size & steps
batch_size = args.batch_size
if args.steps == 0:
no_of_epochs = 3
train_steps = math.ceil(
(float(no_of_epochs) * no_of_training_examples) / batch_size)
else:
no_of_epochs = math.ceil(
(float(batch_size) * args.steps) / no_of_training_examples)
train_steps = args.steps
test_steps = math.ceil(float(no_of_test_examples) / batch_size)
# set fixed random seed
tf.set_random_seed(2021)
# set directory path
model_dir = os.path.join(args.output_dir,
'model_DIEN_' + str(int(time.time())))
checkpoint_dir = args.checkpoint if args.checkpoint else model_dir
print("Saving model checkpoints to " + checkpoint_dir)
# create data pipline
feature_column = build_feature_cols(args.data_location)
train_dataset = generate_input_data(train_file, batch_size, no_of_epochs)
test_dataset = generate_input_data(test_file, batch_size, 1)
iterator = tf.data.Iterator.from_structure(train_dataset.output_types,
test_dataset.output_shapes)
next_element = iterator.get_next()
train_init_op = iterator.make_initializer(train_dataset)
test_init_op = iterator.make_initializer(test_dataset)
# create variable partitioner for distributed training
num_ps_replicas = len(tf_config['ps_hosts']) if tf_config else 0
input_layer_partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=args.input_layer_partitioner <<
20) if args.input_layer_partitioner else None
dense_layer_partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=args.dense_layer_partitioner <<
10) if args.dense_layer_partitioner else None
# create model
model = DIEN(feature_column=feature_column,
learning_rate=args.learning_rate,
embedding_dim=EMBEDDING_DIM,
hidden_size=HIDDEN_SIZE,
attention_size=ATTENTION_SIZE,
bf16=args.bf16,
inputs=next_element,
input_layer_partitioner=input_layer_partitioner,
dense_layer_partitioner=dense_layer_partitioner)
sess_config = tf.ConfigProto()
if args.inter:
sess_config.inter_op_parallelism_threads = args.inter
if args.intra:
sess_config.intra_op_parallelism_threads = args.intra
hooks = []
if tf_config:
hooks.append(tf.train.StopAtStepHook(last_step=train_steps))
hooks.append(
tf.train.LoggingTensorHook(
{
'steps': model.global_step,
'loss': model.loss
},
every_n_iter=100))
scaffold = tf.train.Scaffold(local_init_op=tf.group(
tf.global_variables_initializer(),
tf.local_variables_initializer(),
tf.tables_initializer(),
train_init_op))
with tf.train.MonitoredTrainingSession(
master=server.target,
is_chief=tf_config['is_chief'],
checkpoint_dir=checkpoint_dir,
scaffold=scaffold,
hooks=hooks,
# save_checkpoint_steps=args.save_steps,
log_step_count_steps=100,
config=sess_config) as sess:
while not sess.should_stop():
_, train_loss = sess.run([model.train_op, model.loss])
else:
with tf.Session(config=sess_config) as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sess.run(tf.tables_initializer())
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter(checkpoint_dir, sess.graph)
saver = tf.train.Saver(tf.global_variables(),
max_to_keep=args.keep_checkpoint_max)
options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
# train model
sess.run(train_init_op)
start = time.perf_counter()
for _in in range(0, train_steps):
if args.save_steps > 0 and (_in % args.save_steps == 0
or _in == train_steps - 1):
_, train_loss, events = sess.run(
[model.train_op, model.loss, merged])
writer.add_summary(events, _in)
checkpoint_path = saver.save(sess,
save_path=os.path.join(
checkpoint_dir,
'DIEN-checkpoint'),
global_step=_in)
print("Save checkpoint to %s" % checkpoint_path)
elif (args.timeline > 0 and _in % args.timeline == 0):
_, train_loss = sess.run([model.train_op, model.loss],
options=options,
run_metadata=run_metadata)
fetched_timeline = timeline.Timeline(
run_metadata.step_stats)
chrome_trace = fetched_timeline.generate_chrome_trace_format(
)
print("Save timeline to %s" % checkpoint_dir)
with open(
os.path.join(checkpoint_dir,
'timeline-%d.json' % _in), 'w') as f:
f.write(chrome_trace)
else:
_, train_loss = sess.run([model.train_op, model.loss])
# print training loss and time cost
if (_in % 100 == 0 or _in == train_steps - 1):
end = time.perf_counter()
cost_time = end - start
global_step_sec = (100 if _in % 100 == 0 else train_steps -
1 % 100) / cost_time
print("global_step/sec: %0.4f" % global_step_sec)
print("loss = {}, steps = {}, cost time = {:0.2f}s".format(
train_loss, _in, cost_time))
start = time.perf_counter()
# eval model
if not args.no_eval:
writer = tf.summary.FileWriter(
os.path.join(checkpoint_dir, 'eval'))
sess.run(test_init_op)
sess.run(tf.local_variables_initializer())
for _in in range(1, test_steps + 1):
if (_in != test_steps):
sess.run(
[model.acc, model.acc_op, model.auc, model.auc_op])
if (_in % 1000 == 0):
print("Evaluation complate:[{}/{}]".format(
_in, test_steps))
else:
_, eval_acc, _, eval_auc, events = sess.run([
model.acc, model.acc_op, model.auc, model.auc_op,
merged
])
writer.add_summary(events, _in)
print("Evaluation complate:[{}/{}]".format(
_in, test_steps))
print("ACC = {}\nAUC = {}".format(eval_acc, eval_auc))
if __name__ == "__main__":
parser = get_arg_parser()
args = parser.parse_args()
TF_CONFIG = os.getenv('TF_CONFIG')
if not TF_CONFIG:
main()
else:
print(TF_CONFIG)
tf_config = json.loads(TF_CONFIG)
cluster_config = tf_config.get('cluster')
ps_hosts = []
worker_hosts = []
chief_hosts = []
for key, value in cluster_config.items():
if "ps" == key:
ps_hosts = value
elif "worker" == key:
worker_hosts = value
elif "chief" == key:
chief_hosts = value
if chief_hosts:
worker_hosts = chief_hosts + worker_hosts
if not ps_hosts or not worker_hosts:
print('TF_CONFIG ERROR')
sys.exit()
task_config = tf_config.get('task')
task_type = task_config.get('type')
task_index = task_config.get('index') + (1 if task_type == 'worker'
and chief_hosts else 0)
if task_type == 'chief':
task_type = 'worker'
is_chief = True if task_index == 0 else False
cluster = tf.train.ClusterSpec({
"ps": ps_hosts,
"worker": worker_hosts
})
server = tf.distribute.Server(cluster,
job_name=task_type,
task_index=task_index,
protocol=args.protocol)
if task_type == 'ps':
server.join()
elif task_type == 'worker':
with tf.device(
tf.train.replica_device_setter(
worker_device="/job:worker/task:%d" % task_index,
cluster=cluster)):
main(tf_config={
'ps_hosts': ps_hosts,
'worker_hosts': worker_hosts,
'type': task_type,
'index': task_index,
'is_chief': is_chief
},
server=server)
else:
print("Task type or index error.")
sys.exit() | []
| []
| [
"TF_CONFIG"
]
| [] | ["TF_CONFIG"] | python | 1 | 0 | |
retention_data_pipeline/dao/edw.py | import os
import pyodbc
import pandas
from django.conf import settings
DB = "UWSDBDataStore"
def get_day1_enrollments(year, quarter):
"""
Returns a list of student system_keys enrolled on day one and EOP status
"""
campus = 0
db_query = """
SELECT *
FROM (
SELECT
CASE WHEN mm_spcl_program IN(1, 2, 13, 14, 16, 17, 31, 32, 33)
THEN CAST(1 AS BIT)
ELSE CAST(0 AS BIT)
END AS eop_student,
(mm.mm_year*10 + mm.mm_qtr) as yrq,
ROW_NUMBER() OVER
(PARTITION BY mm.mm_system_key ORDER BY mm.mm_system_key) AS rn,
mm_system_key, mm.mm_year, mm.mm_qtr, mm_deg_level, mm_major_abbr
FROM
sec.sr_mini_master mm
INNER JOIN sec.sr_mini_master_deg_program deg
ON deg.mm_student_no = mm.mm_student_no
AND deg.mm_year = mm.mm_year
AND deg.mm_qtr = mm.mm_qtr
WHERE
mm.mm_year = {}
AND mm.mm_qtr = {}
AND mm.mm_proc_ind = 2
AND deg.mm_branch = {}) as a
WHERE a.rn = 1
""".format(
year, quarter, campus
)
results = _run_query(DB, db_query)
return results
def get_ts_courses(year, quarter):
db_query = """
SELECT
ts_year,
ts_quarter,
course_no,
dept_abbrev,
section_id,
sln
FROM
sec.time_schedule
WHERE
ts_year = {}
AND ts_quarter = {}
""".format(
year, quarter
)
results = _run_query(DB, db_query)
return results
def get_registrations(year, quarter):
db_query = """
SELECT
system_key,
regis_yr,
regis_qtr,
sln
FROM
sec.registration_courses
WHERE
regis_yr = {}
AND regis_qtr = {}
AND request_status in ('A', 'C', 'R')
""".format(
year, quarter
)
results = _run_query(DB, db_query)
return results
def get_student_metadata():
db_query = """
SELECT
system_key,
uw_netid,
student_no,
student_name_lowc
FROM
sec.student_1
"""
results = _run_query(DB, db_query)
return results
def get_international_students():
db_query = """
SELECT
SDBSrcSystemKey,
InternationalStudentInd
FROM EDWPresentation.sec.dimStudent
WHERE
InternationalStudentInd = 'Y'
"""
results = _run_query(DB, db_query)
return results
def get_majors(year, quarter):
db_query = """
#TODO: Determine relationship w/ mini_maser and write query
""".format(
year, quarter
)
results = _run_query(DB, db_query)
return results
def _run_query(database, query):
os.environ["FREETDSCONF"] = "db_config/freetds.conf"
os.environ["ODBCSYSINI"] = "db_config"
password = getattr(settings, "EDW_PASSWORD")
user = getattr(settings, "EDW_USER")
server = getattr(settings, "EDW_SERVER")
constring = (
"Driver={FreeTDS};"
f"SERVERNAME={server};"
f"Database={database};"
"Port=1433;"
"TDS_Version=7.2;"
f"UID={user};"
f"PWD={password}"
)
con = pyodbc.connect(constring)
df = pandas.read_sql(query, con)
return df
| []
| []
| [
"FREETDSCONF",
"ODBCSYSINI"
]
| [] | ["FREETDSCONF", "ODBCSYSINI"] | python | 2 | 0 | |
pkg/action.go | // author: github.com/jnpacker
package main
import (
"context"
"encoding/json"
"flag"
"fmt"
"os"
"strings"
hivev1 "github.com/openshift/hive/pkg/apis/hive/v1"
hiveclient "github.com/openshift/hive/pkg/client/clientset/versioned"
corev1 "k8s.io/api/core/v1"
eventv1beta1 "k8s.io/api/events/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
const HibernateSA = true
const ClusterInstallerSA = false
// patchStringValue specifies a json patch operation for a string.
type patchStringValue struct {
Op string `json:"op"`
Path string `json:"path"`
Value string `json:"value"`
}
// Simple error function
func checkError(err error) {
if err != nil {
fmt.Println(err.Error())
}
}
func powerStatePatch(clientSet *hiveclient.Clientset, clusterDeployment *hivev1.ClusterDeployment, powerState string) string {
patch := []patchStringValue{{
Op: "replace",
Path: "/spec/powerState",
Value: powerState,
}}
patchInBytes, _ := json.Marshal(patch)
changedCD, err := clientSet.
HiveV1().
ClusterDeployments(clusterDeployment.Namespace).
Patch(context.TODO(), clusterDeployment.Name, types.JSONPatchType, patchInBytes, v1.PatchOptions{})
checkError(err)
return string(changedCD.Spec.PowerState)
}
func powerStateUpdate(clientSet *hiveclient.Clientset, clusterDeployment *hivev1.ClusterDeployment, powerState string) string {
clusterDeployment.Spec.PowerState = hivev1.ClusterPowerState(powerState)
changedCD, err := clientSet.
HiveV1().
ClusterDeployments(clusterDeployment.Namespace).
Update(context.TODO(), clusterDeployment, v1.UpdateOptions{})
checkError(err)
return string(changedCD.Spec.PowerState)
}
// Used to create events for Cluster hibernation actions
//objName, namespaceName, objKind, eventName, message, reason, eType, api_core
func fireEvent(clientSet *kubernetes.Clientset, objRef *hivev1.ClusterDeployment, eventName string, message string, reason string, eType string) {
event, err := clientSet.EventsV1beta1().Events(objRef.Namespace).Get(context.TODO(), eventName, v1.GetOptions{})
if event != nil && event.Series == nil {
event.Series = new(eventv1beta1.EventSeries)
event.Series.Count = 1
event.Series.LastObservedTime = v1.NowMicro()
}
if err != nil {
fmt.Println(" |-> Event not found")
event = new(eventv1beta1.Event)
//event.TypeMeta.Kind = "Event"
//event.TypeMeta.APIVersion = "v1"
event.ObjectMeta.Name = eventName
event.ObjectMeta.Namespace = objRef.Namespace
event.EventTime = v1.NowMicro()
event.Action = "hibernating"
event.ReportingController = "hibernate-cronjob"
event.ReportingInstance = objRef.Namespace + "/" + objRef.Name
}
if event.Series != nil {
event.Series.Count = event.Series.Count + 1
event.Series.LastObservedTime = v1.NowMicro()
}
event.Type = eType
event.Reason = reason
event.Note = message
event.Regarding = corev1.ObjectReference{
Kind: objRef.Kind,
Namespace: objRef.Namespace,
Name: objRef.Name,
}
if err != nil {
_, err := clientSet.EventsV1beta1().Events(objRef.Namespace).Create(context.TODO(), event, v1.CreateOptions{})
fmt.Println(" \\-> Create a new event " + eventName + " for cluster " + objRef.Namespace + "/" + objRef.Name)
checkError(err)
} else {
_, err := clientSet.EventsV1beta1().Events(objRef.Namespace).Update(context.TODO(), event, v1.UpdateOptions{})
fmt.Println(" \\-> Update existing event "+eventName+", event count", event.Series.Count)
checkError(err)
}
}
func main() {
var kubeconfig *string
// Determine what action to take Hibernating or Running
var TakeAction = strings.ToLower(os.Getenv("TAKE_ACTION"))
var OptIn = os.Getenv("OPT_IN")
if TakeAction == "" || (TakeAction != "hibernating" && TakeAction != "running") {
panic("Environment variable TAKE_ACTION missing: " + TakeAction)
}
TakeAction = strings.ToUpper(TakeAction[0:1]) + TakeAction[1:]
homePath := os.Getenv("HOME") // Used to look for .kube/config
kubeconfig = flag.String("kubeconfig", homePath+"/.kube/config", "")
flag.Parse()
var config *rest.Config
var err error
if _, err := os.Stat(homePath + "/.kube/config"); !os.IsNotExist(err) {
fmt.Println("Connecting with local kubeconfig")
config, err = clientcmd.BuildConfigFromFlags("", *kubeconfig)
} else {
fmt.Println("Connecting using In Cluster Config")
config, err = rest.InClusterConfig()
}
checkError(err)
// Create a client for the hiveV1 CustomResourceDefinitions
hiveset, err := hiveclient.NewForConfig(config)
checkError(err)
// Create a client for kubernetes to access events
kubeset, err := kubernetes.NewForConfig(config)
checkError(err)
podNamespace := os.Getenv("POD_NAMESPACE")
// When running inside the cluster namespace as cluster-installer, we only have access to Get & Update for ClusterDeployment
if podNamespace != "" {
clusterDeployment, err := hiveset.HiveV1().ClusterDeployments(podNamespace).Get(context.TODO(), podNamespace, v1.GetOptions{})
checkError(err)
takeAction(hiveset, kubeset, *clusterDeployment, TakeAction, ClusterInstallerSA)
fmt.Println(" \\-> Event supressed")
} else {
// Grab all ClusterDeployments to change the state
cds, err := hiveset.HiveV1().ClusterDeployments(podNamespace).List(context.TODO(), v1.ListOptions{})
checkError(err)
for _, clusterDeployment := range cds.Items {
if (OptIn == "true" && clusterDeployment.Labels["hibernate"] == "true") || (OptIn != "true" && clusterDeployment.Labels["hibernate"] != "skip") {
takeAction(hiveset, kubeset, clusterDeployment, TakeAction, HibernateSA)
} else {
fmt.Println("Skip : " + clusterDeployment.Name + " (currently " + string(clusterDeployment.Spec.PowerState) + ")")
fireEvent(kubeset, &clusterDeployment, "hibernating", "Skipping cluster "+clusterDeployment.Name, "skipAction", "Normal")
}
}
}
}
func takeAction(hiveset *hiveclient.Clientset, kubeset *kubernetes.Clientset, clusterDeployment hivev1.ClusterDeployment, takeAction string, hibernateSA bool) {
if string(clusterDeployment.Spec.PowerState) != takeAction {
fmt.Print(takeAction + ": " + clusterDeployment.Name)
var newPowerState string
if hibernateSA {
newPowerState = powerStatePatch(hiveset, &clusterDeployment, takeAction)
} else {
newPowerState = powerStateUpdate(hiveset, &clusterDeployment, takeAction)
}
// Check the new state and report a response
if newPowerState == takeAction {
fmt.Println(" ✓")
if hibernateSA {
fireEvent(kubeset, &clusterDeployment, "hibernating", "The cluster "+clusterDeployment.Name+" has powerState "+takeAction, takeAction, "Normal")
}
} else {
fmt.Println(" X")
if hibernateSA {
fireEvent(kubeset, &clusterDeployment, "hibernating", "The cluster "+clusterDeployment.Name+" did not set powerState to "+takeAction, "failedHibernating", "Warning")
}
}
} else {
fmt.Println("Skip : " + clusterDeployment.Name + " (currently " + string(clusterDeployment.Spec.PowerState) + ")")
if hibernateSA {
fireEvent(kubeset, &clusterDeployment, "hibernating", "Skipping cluster "+clusterDeployment.Name+", requested powerState "+takeAction+" equals current powerState "+string(clusterDeployment.Spec.PowerState), "skipHibernating", "Normal")
}
}
}
| [
"\"TAKE_ACTION\"",
"\"OPT_IN\"",
"\"HOME\"",
"\"POD_NAMESPACE\""
]
| []
| [
"POD_NAMESPACE",
"OPT_IN",
"TAKE_ACTION",
"HOME"
]
| [] | ["POD_NAMESPACE", "OPT_IN", "TAKE_ACTION", "HOME"] | go | 4 | 0 | |
util/log/clog.go | // Go support for leveled logs, analogous to https://code.google.com/p/google-clog/
//
// Copyright 2013 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Original version (c) Google.
// Author (fork from https://github.com/golang/glog): Tobias Schottdorf
package log
import (
"bufio"
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
stdLog "log"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/cockroachdb/cockroach/util/caller"
"github.com/cockroachdb/cockroach/util/encoding"
gogoproto "github.com/gogo/protobuf/proto"
)
// Severity identifies the sort of log: info, warning etc. It also implements
// the flag.Value interface. The -stderrthreshold flag is of type Severity and
// should be modified only through the flag.Value interface. The values match
// the corresponding constants in C++.
type Severity int32 // sync/atomic int32
// These constants identify the log levels in order of increasing Severity.
// A message written to a high-Severity log file is also written to each
// lower-Severity log file.
const (
InfoLog Severity = iota
WarningLog
ErrorLog
FatalLog
NumSeverity = 4
)
const severityChar = "IWEF"
// severityName provides a mapping from Severity level to a string.
var severityName = []string{
InfoLog: "INFO",
WarningLog: "WARNING",
ErrorLog: "ERROR",
FatalLog: "FATAL",
}
// get returns the value of the Severity.
func (s *Severity) get() Severity {
return Severity(atomic.LoadInt32((*int32)(s)))
}
// set sets the value of the Severity.
func (s *Severity) set(val Severity) {
atomic.StoreInt32((*int32)(s), int32(val))
}
// String is part of the flag.Value interface.
func (s *Severity) String() string {
return strconv.FormatInt(int64(*s), 10)
}
// Set is part of the flag.Value interface.
func (s *Severity) Set(value string) error {
var threshold Severity
// Is it a known name?
if v, ok := SeverityByName(value); ok {
threshold = v
} else {
v, err := strconv.Atoi(value)
if err != nil {
return err
}
threshold = Severity(v)
}
logging.stderrThreshold.set(threshold)
return nil
}
// Name returns the string representation of the severity (i.e. ERROR, INFO).
func (s *Severity) Name() string {
return severityName[s.get()]
}
// SeverityByName attempts to parse the passed in string into a severity. (i.e.
// ERROR, INFO). If it succeeds, the returned bool is set to true.
func SeverityByName(s string) (Severity, bool) {
s = strings.ToUpper(s)
for i, name := range severityName {
if name == s {
return Severity(i), true
}
}
return 0, false
}
// colorProfile defines escape sequences which provide color in
// terminals. Some terminals support 8 colors, some 256, others
// none at all.
type colorProfile struct {
infoPrefix []byte
warnPrefix []byte
errorPrefix []byte
timePrefix []byte
}
// For terms with 8-color support.
var colorProfile8 = &colorProfile{
infoPrefix: []byte("\033[0;36;49"),
warnPrefix: []byte("\033[0;33;49"),
errorPrefix: []byte("\033[0;31;49"),
timePrefix: []byte("\033[2;37;49"),
}
// For terms with 256-color support.
var colorProfile256 = &colorProfile{
infoPrefix: []byte("\033[38;5;33m"),
warnPrefix: []byte("\033[38;5;214m"),
errorPrefix: []byte("\033[38;5;160m"),
timePrefix: []byte("\033[38;5;246m"),
}
// OutputStats tracks the number of output lines and bytes written.
type outputStats struct {
lines int64
bytes int64
}
// Lines returns the number of lines written.
func (s *outputStats) Lines() int64 {
return atomic.LoadInt64(&s.lines)
}
// Bytes returns the number of bytes written.
func (s *outputStats) Bytes() int64 {
return atomic.LoadInt64(&s.bytes)
}
// Stats tracks the number of lines of output and number of bytes
// per severity level. Values must be read with atomic.LoadInt64.
var Stats struct {
Info, Warning, Error outputStats
}
var severityStats = [NumSeverity]*outputStats{
InfoLog: &Stats.Info,
WarningLog: &Stats.Warning,
ErrorLog: &Stats.Error,
}
// Level is exported because it appears in the arguments to V and is
// the type of the v flag, which can be set programmatically.
// It's a distinct type because we want to discriminate it from logType.
// Variables of type level are only changed under logging.mu.
// The --verbosity flag is read only with atomic ops, so the state of the logging
// module is consistent.
// Level is treated as a sync/atomic int32.
// Level specifies a level of verbosity for V logs. *Level implements
// flag.Value; the --verbosity flag is of type Level and should be modified
// only through the flag.Value interface.
type level int32
// get returns the value of the Level.
func (l *level) get() level {
return level(atomic.LoadInt32((*int32)(l)))
}
// set sets the value of the Level.
func (l *level) set(val level) {
atomic.StoreInt32((*int32)(l), int32(val))
}
// String is part of the flag.Value interface.
func (l *level) String() string {
return strconv.FormatInt(int64(*l), 10)
}
// Set is part of the flag.Value interface.
func (l *level) Set(value string) error {
v, err := strconv.Atoi(value)
if err != nil {
return err
}
logging.mu.Lock()
defer logging.mu.Unlock()
logging.setVState(level(v), logging.vmodule.filter, false)
return nil
}
// moduleSpec represents the setting of the --vmodule flag.
type moduleSpec struct {
filter []modulePat
}
// modulePat contains a filter for the --vmodule flag.
// It holds a verbosity level and a file pattern to match.
type modulePat struct {
pattern string
literal bool // The pattern is a literal string
level level
}
// match reports whether the file matches the pattern. It uses a string
// comparison if the pattern contains no metacharacters.
func (m *modulePat) match(file string) bool {
if m.literal {
return file == m.pattern
}
match, _ := filepath.Match(m.pattern, file)
return match
}
func (m *moduleSpec) String() string {
// Lock because the type is not atomic. TODO: clean this up.
logging.mu.Lock()
defer logging.mu.Unlock()
var b bytes.Buffer
for i, f := range m.filter {
if i > 0 {
b.WriteRune(',')
}
fmt.Fprintf(&b, "%s=%d", f.pattern, f.level)
}
return b.String()
}
var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N")
// Syntax: --vmodule=recordio=2,file=1,gfs*=3
func (m *moduleSpec) Set(value string) error {
var filter []modulePat
for _, pat := range strings.Split(value, ",") {
if len(pat) == 0 {
// Empty strings such as from a trailing comma can be ignored.
continue
}
patLev := strings.Split(pat, "=")
if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 {
return errVmoduleSyntax
}
pattern := patLev[0]
v, err := strconv.Atoi(patLev[1])
if err != nil {
return errors.New("syntax error: expect comma-separated list of filename=N")
}
if v < 0 {
return errors.New("negative value for vmodule level")
}
if v == 0 {
continue // Ignore. It's harmless but no point in paying the overhead.
}
// TODO: check syntax of filter?
filter = append(filter, modulePat{pattern, isLiteral(pattern), level(v)})
}
logging.mu.Lock()
defer logging.mu.Unlock()
logging.setVState(logging.verbosity, filter, true)
return nil
}
// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters
// that require filepath.Match to be called to match the pattern.
func isLiteral(pattern string) bool {
return !strings.ContainsAny(pattern, `\*?[]`)
}
// traceLocation represents the setting of the -log_backtrace_at flag.
type traceLocation struct {
file string
line int
}
// isSet reports whether the trace location has been specified.
// logging.mu is held.
func (t *traceLocation) isSet() bool {
return t.line > 0
}
// match reports whether the specified file and line matches the trace location.
// The argument file name is the full path, not the basename specified in the flag.
// logging.mu is held.
func (t *traceLocation) match(file string, line int) bool {
if t.line != line {
return false
}
if i := strings.LastIndex(file, "/"); i >= 0 {
file = file[i+1:]
}
return t.file == file
}
func (t *traceLocation) String() string {
// Lock because the type is not atomic. TODO: clean this up.
logging.mu.Lock()
defer logging.mu.Unlock()
return fmt.Sprintf("%s:%d", t.file, t.line)
}
var errTraceSyntax = errors.New("syntax error: expect file.go:234")
// Syntax: -log_backtrace_at=gopherflakes.go:234
// Note that unlike vmodule the file extension is included here.
func (t *traceLocation) Set(value string) error {
if value == "" {
// Unset.
logging.mu.Lock()
defer logging.mu.Unlock()
t.line = 0
t.file = ""
return nil
}
fields := strings.Split(value, ":")
if len(fields) != 2 {
return errTraceSyntax
}
file, line := fields[0], fields[1]
if !strings.Contains(file, ".") {
return errTraceSyntax
}
v, err := strconv.Atoi(line)
if err != nil {
return errTraceSyntax
}
if v <= 0 {
return errors.New("negative or zero value for level")
}
logging.mu.Lock()
defer logging.mu.Unlock()
t.line = v
t.file = file
return nil
}
// EntryDecoder reads successive encoded log entries from the input
// buffer. Each entry is preceded by a single big-ending uint32
// describing the next entry's length.
type EntryDecoder struct {
in io.Reader
}
// NewEntryDecoder creates a new instance of EntryDecoder.
func NewEntryDecoder(in io.Reader) *EntryDecoder {
return &EntryDecoder{in: in}
}
// Decode decodes the next log entry into the provided protobuf message.
func (lr *EntryDecoder) Decode(entry *LogEntry) error {
// Read the next log entry.
szBuf := make([]byte, 4)
n, err := lr.in.Read(szBuf)
if err != nil {
return err
}
_, sz := encoding.DecodeUint32(szBuf)
buf := make([]byte, sz)
n, err = lr.in.Read(buf)
if err != nil {
return err
}
if err := gogoproto.Unmarshal(buf[:n], entry); err != nil {
return err
}
return nil
}
type baseEntryReader struct {
buf []byte
ld *EntryDecoder
format func(entry *LogEntry) []byte
}
// Read implements the io.Reader interface.
func (hr *baseEntryReader) Read(p []byte) (int, error) {
var n int
for {
if len(hr.buf) != 0 {
copied := copy(p, hr.buf)
hr.buf = hr.buf[copied:]
n += copied
p = p[copied:]
if len(p) == 0 {
return n, nil
}
}
entry := &LogEntry{}
if err := hr.ld.Decode(entry); err != nil {
return n, err
}
hr.buf = hr.format(entry)
}
}
// NewTermEntryReader returns a reader for log files containing
// encoded entries for use from a terminal. If the --color flag is
// set, and the terminal supports colors, then log output will be
// colorized.
func NewTermEntryReader(reader io.Reader) io.Reader {
tr := &baseEntryReader{ld: NewEntryDecoder(reader)}
colors := logging.shouldColorize()
tr.format = func(entry *LogEntry) []byte { return formatLogEntry(entry, colors) }
return tr
}
// NewJSONEntryReader returns a reader for log files containing
// encoded entries in JSON format.
func NewJSONEntryReader(reader io.Reader) io.Reader {
jr := &baseEntryReader{ld: NewEntryDecoder(reader)}
jr.format = func(entry *LogEntry) []byte {
data, err := json.MarshalIndent(entry, "", " ")
if err != nil {
return []byte(fmt.Sprintf("{\"error\": %q}", err))
}
return data
}
return jr
}
// flushSyncWriter is the interface satisfied by logging destinations.
type flushSyncWriter interface {
Flush() error
Sync() error
io.Writer
}
// formatHeader formats a log header using the provided file name and
// line number. Log lines are colorized depending on severity.
//
// Log lines have this form:
// Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg...
// where the fields are defined as follows:
// L A single character, representing the log level (eg 'I' for INFO)
// mm The month (zero padded; ie May is '05')
// dd The day (zero padded)
// hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds
// threadid The space-padded thread ID as returned by GetTID()
// file The file name
// line The line number
// msg The user-supplied message
func formatHeader(s Severity, now time.Time, threadID int32, file string, line int32, colors *colorProfile) *buffer {
buf := logging.getBuffer()
if line < 0 {
line = 0 // not a real line number, but acceptable to someDigits
}
if s > FatalLog {
s = InfoLog // for safety.
}
tmp := buf.tmp[:len(buf.tmp)]
var n int
if colors != nil {
var prefix []byte
switch s {
case InfoLog:
prefix = colors.infoPrefix
case WarningLog:
prefix = colors.warnPrefix
case ErrorLog, FatalLog:
prefix = colors.errorPrefix
}
n += copy(tmp, prefix)
}
// Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand.
// It's worth about 3X. Fprintf is hard.
_, month, day := now.Date()
hour, minute, second := now.Clock()
// Lmmdd hh:mm:ss.uuuuuu threadid file:line]
tmp[n] = severityChar[s]
n++
n += buf.twoDigits(n, int(month))
n += buf.twoDigits(n, day)
if colors != nil {
n += copy(tmp[n:], colors.timePrefix) // gray for time, file & line
}
tmp[n] = ' '
n++
n += buf.twoDigits(n, hour)
tmp[n] = ':'
n++
n += buf.twoDigits(n, minute)
tmp[n] = ':'
n++
n += buf.twoDigits(n, second)
tmp[n] = '.'
n++
n += buf.nDigits(6, n, now.Nanosecond()/1000, '0')
tmp[n] = ' '
n++
n += buf.someDigits(n, int(threadID))
tmp[n] = ' '
n++
buf.Write(tmp[:n])
buf.WriteString(file)
tmp[0] = ':'
n = buf.someDigits(1, int(line))
n++
// Extra space between the header and the actual message for scannability.
tmp[n] = ' '
n++
if colors != nil {
n += copy(tmp[n:], []byte("\033[0m")) // reset
}
tmp[n] = ' '
n++
buf.Write(tmp[:n])
return buf
}
// Some custom tiny helper functions to print the log header efficiently.
const digits = "0123456789"
// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i].
// Returns two.
func (buf *buffer) twoDigits(i, d int) int {
buf.tmp[i+1] = digits[d%10]
d /= 10
buf.tmp[i] = digits[d%10]
return 2
}
// nDigits formats an n-digit integer at buf.tmp[i],
// padding with pad on the left.
// It assumes d >= 0. Returns n.
func (buf *buffer) nDigits(n, i, d int, pad byte) int {
j := n - 1
for ; j >= 0 && d > 0; j-- {
buf.tmp[i+j] = digits[d%10]
d /= 10
}
for ; j >= 0; j-- {
buf.tmp[i+j] = pad
}
return n
}
// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i].
func (buf *buffer) someDigits(i, d int) int {
// Print into the top, then copy down. We know there's space for at least
// a 10-digit number.
j := len(buf.tmp)
for {
j--
buf.tmp[j] = digits[d%10]
d /= 10
if d == 0 {
break
}
}
return copy(buf.tmp[i:], buf.tmp[j:])
}
func formatLogEntry(entry *LogEntry, colors *colorProfile) []byte {
buf := formatHeader(Severity(entry.Severity), time.Unix(entry.Time/1E9, entry.Time%1E9), entry.ThreadID, entry.File, entry.Line, colors)
var args []interface{}
for _, arg := range entry.Args {
args = append(args, arg.Str)
}
if len(entry.Format) == 0 {
buf.WriteString(fmt.Sprint(args...))
} else {
buf.WriteString(fmt.Sprintf(entry.Format, args...))
}
buf.WriteByte('\n')
if len(entry.Stacks) > 0 {
buf.Write(entry.Stacks)
}
defer logging.putBuffer(buf)
return buf.Bytes()
}
func init() {
// Default stderrThreshold is so high that nothing gets through.
logging.stderrThreshold = NumSeverity
if tmpStr := ""; true {
logDir = &tmpStr
}
logging.setVState(0, nil, false)
osExitFunc = os.Exit
go logging.flushDaemon()
}
// Flush flushes all pending log I/O.
func Flush() {
logging.lockAndFlushAll()
}
// loggingT collects all the global state of the logging setup.
type loggingT struct {
// Boolean flags. Not handled atomically because the flag.Value interface
// does not let us avoid the =true, and that shorthand is necessary for
// compatibility. TODO: does this matter enough to fix? Seems unlikely.
toStderr bool // The -logtostderr flag.
alsoToStderr bool // The -alsologtostderr flag.
color string // The -color flag.
hasColorProfile *bool // Non-nil if the color profile has been determined
colorProfile *colorProfile // Set via call to getTermColorProfile
// Level flag. Handled atomically.
stderrThreshold Severity // The -stderrthreshold flag.
// freeList is a list of byte buffers, maintained under freeListMu.
freeList *buffer
// freeListMu maintains the free list. It is separate from the main mutex
// so buffers can be grabbed and printed to without holding the main lock,
// for better parallelization.
freeListMu sync.Mutex
// mu protects the remaining elements of this structure and is
// used to synchronize logging.
mu sync.Mutex
// file holds writer for each of the log types.
file [NumSeverity]flushSyncWriter
// pcs is used in V to avoid an allocation when computing the caller's PC.
pcs [1]uintptr
// vmap is a cache of the V Level for each V() call site, identified by PC.
// It is wiped whenever the vmodule flag changes state.
vmap map[uintptr]level
// filterLength stores the length of the vmodule filter chain. If greater
// than zero, it means vmodule is enabled. It may be read safely
// using sync.LoadInt32, but is only modified under mu.
filterLength int32
// traceLocation is the state of the -log_backtrace_at flag.
traceLocation traceLocation
// These flags are modified only under lock, although verbosity may be fetched
// safely using atomic.LoadInt32.
vmodule moduleSpec // The state of the --vmodule flag.
verbosity level // V logging level, the value of the --verbosity flag/
}
// buffer holds a byte Buffer for reuse. The zero value is ready for use.
type buffer struct {
bytes.Buffer
tmp [64]byte // temporary byte array for creating headers.
next *buffer
}
var logging loggingT
// setVState sets a consistent state for V logging.
// l.mu is held.
func (l *loggingT) setVState(verbosity level, filter []modulePat, setFilter bool) {
// Turn verbosity off so V will not fire while we are in transition.
logging.verbosity.set(0)
// Ditto for filter length.
atomic.StoreInt32(&logging.filterLength, 0)
// Set the new filters and wipe the pc->Level map if the filter has changed.
if setFilter {
logging.vmodule.filter = filter
logging.vmap = make(map[uintptr]level)
}
// Things are consistent now, so enable filtering and verbosity.
// They are enabled in order opposite to that in V.
atomic.StoreInt32(&logging.filterLength, int32(len(filter)))
logging.verbosity.set(verbosity)
}
// getBuffer returns a new, ready-to-use buffer.
func (l *loggingT) getBuffer() *buffer {
l.freeListMu.Lock()
b := l.freeList
if b != nil {
l.freeList = b.next
}
l.freeListMu.Unlock()
if b == nil {
b = new(buffer)
} else {
b.next = nil
b.Reset()
}
return b
}
// putBuffer returns a buffer to the free list.
func (l *loggingT) putBuffer(b *buffer) {
if b.Len() >= 256 {
// Let big buffers die a natural death.
return
}
l.freeListMu.Lock()
b.next = l.freeList
l.freeList = b
l.freeListMu.Unlock()
}
var timeNow = time.Now // Stubbed out for testing.
func (l *loggingT) print(s Severity, args ...interface{}) {
file, line, _ := caller.Lookup(1)
entry := LogEntry{}
setLogEntry(nil, "", args, &entry)
l.outputLogEntry(s, file, line, false, &entry)
}
// outputLogEntry marshals a log entry proto into bytes, and writes
// the data to the log files. If a trace location is set, stack traces
// are added to the entry before marshaling.
func (l *loggingT) outputLogEntry(s Severity, file string, line int, alsoToStderr bool, entry *LogEntry) {
l.mu.Lock()
// Set additional details in log entry.
now := time.Now()
entry.Severity = int32(s)
entry.Time = now.UnixNano()
entry.ThreadID = int32(pid) // TODO: should be TID
entry.File = file
entry.Line = int32(line)
// On fatal log, set all stacks.
if s == FatalLog {
entry.Stacks = stacks(true)
logExitFunc = func(error) {} // If we get a write error, we'll still exit.
} else if l.traceLocation.isSet() {
if l.traceLocation.match(file, line) {
entry.Stacks = stacks(false)
}
}
if l.toStderr {
_, _ = os.Stderr.Write(l.processForStderr(entry))
} else {
if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() {
_, _ = os.Stderr.Write(l.processForStderr(entry))
}
if l.file[s] == nil {
if err := l.createFiles(s); err != nil {
_, _ = os.Stderr.Write(l.processForStderr(entry)) // Make sure the message appears somewhere.
l.exit(err)
}
}
data := encodeLogEntry(entry)
switch s {
case FatalLog:
l.file[FatalLog].Write(data)
fallthrough
case ErrorLog:
l.file[ErrorLog].Write(data)
fallthrough
case WarningLog:
l.file[WarningLog].Write(data)
fallthrough
case InfoLog:
l.file[InfoLog].Write(data)
}
if stats := severityStats[s]; stats != nil {
atomic.AddInt64(&stats.lines, 1)
atomic.AddInt64(&stats.bytes, int64(len(data)))
}
}
l.mu.Unlock()
// Flush and exit on fatal logging.
if s == FatalLog {
// If we got here via Exit rather than Fatal, print no stacks.
timeoutFlush(10 * time.Second)
if atomic.LoadUint32(&fatalNoStacks) > 0 {
osExitFunc(1)
} else {
osExitFunc(255) // C++ uses -1, which is silly because it's anded with 255 anyway.
}
}
}
func encodeLogEntry(entry *LogEntry) []byte {
// Marshal log entry.
entryData, err := gogoproto.Marshal(entry)
if err != nil {
panic(fmt.Sprintf("unable to marshal log entry: %s", err))
}
// Encode the length of the data first, followed by the encoded data.
data := encoding.EncodeUint32([]byte(nil), uint32(len(entryData)))
return append(data, entryData...)
}
// processForStderr formats a log entry for output to standard error.
func (l *loggingT) processForStderr(entry *LogEntry) []byte {
return formatLogEntry(entry, l.shouldColorize())
}
// shouldColorize returns whether output should be colorized.
func (l *loggingT) shouldColorize() *colorProfile {
if l.color == "auto" {
return l.getTermColorProfile()
}
return nil
}
// checkForColorTerm attempts to verify that stderr is a character
// device and if so, that the terminal supports color output.
func (l *loggingT) getTermColorProfile() *colorProfile {
if l.hasColorProfile == nil {
var color bool
fi, _ := os.Stderr.Stat() // get the FileInfo struct describing the standard input.
if (fi.Mode() & os.ModeCharDevice) != 0 {
term := os.Getenv("TERM")
switch term {
case "ansi", "xterm-color":
l.colorProfile = colorProfile8
color = true
case "xterm-256color":
l.colorProfile = colorProfile256
color = true
}
}
l.hasColorProfile = &color
}
return l.colorProfile
}
// timeoutFlush calls Flush and returns when it completes or after timeout
// elapses, whichever happens first. This is needed because the hooks invoked
// by Flush may deadlock when clog.Fatal is called from a hook that holds
// a lock.
func timeoutFlush(timeout time.Duration) {
done := make(chan bool, 1)
go func() {
Flush() // calls logging.lockAndFlushAll()
done <- true
}()
select {
case <-done:
case <-time.After(timeout):
fmt.Fprintln(os.Stderr, "clog: Flush took longer than", timeout)
}
}
// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines.
func stacks(all bool) []byte {
// We don't know how big the traces are, so grow a few times if they don't fit. Start large, though.
n := 10000
if all {
n = 100000
}
var trace []byte
for i := 0; i < 5; i++ {
trace = make([]byte, n)
nbytes := runtime.Stack(trace, all)
if nbytes < len(trace) {
return trace[:nbytes]
}
n *= 2
}
return trace
}
// logExitFunc provides a simple mechanism to override the default behavior
// of exiting on error. Used in testing and to guarantee we reach a required exit
// for fatal logs. Instead, exit could be a function rather than a method but that
// would make its use clumsier.
var logExitFunc func(error)
var osExitFunc func(int)
// exit is called if there is trouble creating or writing log files.
// It flushes the logs and exits the program; there's no point in hanging around.
// l.mu is held.
func (l *loggingT) exit(err error) {
fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err)
// If logExitFunc is set, we do that instead of exiting.
if logExitFunc != nil {
logExitFunc(err)
return
}
l.flushAll()
osExitFunc(2)
}
// syncBuffer joins a bufio.Writer to its underlying file, providing access to the
// file's Sync method and providing a wrapper for the Write method that provides log
// file rotation. There are conflicting methods, so the file cannot be embedded.
// l.mu is held for all its methods.
type syncBuffer struct {
logger *loggingT
*bufio.Writer
file *os.File
sev Severity
nbytes uint64 // The number of bytes written to this file
}
func (sb *syncBuffer) Sync() error {
return sb.file.Sync()
}
func (sb *syncBuffer) Write(p []byte) (n int, err error) {
if sb.nbytes+uint64(len(p)) >= MaxSize {
if err := sb.rotateFile(time.Now()); err != nil {
sb.logger.exit(err)
}
}
n, err = sb.Writer.Write(p)
sb.nbytes += uint64(n)
if err != nil {
sb.logger.exit(err)
}
return
}
// rotateFile closes the syncBuffer's file and starts a new one.
func (sb *syncBuffer) rotateFile(now time.Time) error {
if sb.file != nil {
if err := sb.Flush(); err != nil {
return err
}
if err := sb.file.Close(); err != nil {
return err
}
}
var err error
sb.file, _, err = create(sb.sev, now)
sb.nbytes = 0
if err != nil {
return err
}
sb.Writer = bufio.NewWriterSize(sb.file, bufferSize)
// Write header.
file, line, _ := caller.Lookup(0)
for _, format := range []string{
fmt.Sprintf("Running on machine: %s", host),
fmt.Sprintf("Binary: Built with %s %s for %s/%s",
runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH),
} {
entry := LogEntry{
Time: time.Now().UnixNano(),
File: file,
Line: int32(line),
Format: format,
}
n, err := sb.file.Write(encodeLogEntry(&entry))
if err != nil {
panic(err)
}
sb.nbytes += uint64(n)
}
return err
}
// bufferSize sizes the buffer associated with each log file. It's large
// so that log records can accumulate without the logging thread blocking
// on disk I/O. The flushDaemon will block instead.
const bufferSize = 256 * 1024
// removeFiles clears all the log files.
func (l *loggingT) removeFiles() error {
l.mu.Lock()
defer l.mu.Unlock()
return l.removeFilesLocked()
}
func (l *loggingT) removeFilesLocked() error {
for s := FatalLog; s >= InfoLog; s-- {
if sb, ok := l.file[s].(*syncBuffer); ok {
if err := sb.file.Close(); err != nil {
return err
}
if err := os.Remove(sb.file.Name()); err != nil {
return err
}
}
l.file[s] = nil
}
return nil
}
// createFiles creates all the log files for severity from sev down to InfoLog.
// l.mu is held.
func (l *loggingT) createFiles(sev Severity) error {
now := time.Now()
// Files are created in decreasing severity order, so as soon as we find one
// has already been created, we can stop.
for s := sev; s >= InfoLog && l.file[s] == nil; s-- {
sb := &syncBuffer{
logger: l,
sev: s,
}
if err := sb.rotateFile(now); err != nil {
return err
}
l.file[s] = sb
}
return nil
}
const flushInterval = 30 * time.Second
// flushDaemon periodically flushes the log file buffers.
func (l *loggingT) flushDaemon() {
// doesn't need to be Stop()'d as the loop never escapes
for range time.Tick(flushInterval) {
l.lockAndFlushAll()
}
}
// lockAndFlushAll is like flushAll but locks l.mu first.
func (l *loggingT) lockAndFlushAll() {
l.mu.Lock()
l.flushAll()
l.mu.Unlock()
}
// flushAll flushes all the logs and attempts to "sync" their data to disk.
// l.mu is held.
func (l *loggingT) flushAll() {
// Flush from fatal down, in case there's trouble flushing.
for s := FatalLog; s >= InfoLog; s-- {
file := l.file[s]
if file != nil {
_ = file.Flush() // ignore error
_ = file.Sync() // ignore error
}
}
}
// CopyStandardLogTo arranges for messages written to the Go "log" package's
// default logs to also appear in the Google logs for the named and lower
// severities. Subsequent changes to the standard log's default output location
// or format may break this behavior.
//
// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not
// recognized, CopyStandardLogTo panics.
func CopyStandardLogTo(name string) {
sev, ok := SeverityByName(name)
if !ok {
panic(fmt.Sprintf("CopyStandardLogTo(%q): unrecognized Severity name", name))
}
// Set a log format that captures the user's file and line:
// d.go:23: message
stdLog.SetFlags(stdLog.Lshortfile)
stdLog.SetOutput(logBridge(sev))
}
// logBridge provides the Write method that enables CopyStandardLogTo to connect
// Go's standard logs to the logs provided by this package.
type logBridge Severity
// Write parses the standard logging line and passes its components to the
// logger for Severity(lb).
func (lb logBridge) Write(b []byte) (n int, err error) {
var (
file = "???"
line = 1
text string
)
// Split "d.go:23: message" into "d.go", "23", and "message".
if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 {
text = fmt.Sprintf("bad log format: %s", b)
} else {
file = string(parts[0])
text = string(parts[2][1 : len(parts[2])-1]) // skip leading space and trailing newline
line, err = strconv.Atoi(string(parts[1]))
if err != nil {
text = fmt.Sprintf("bad line number: %s", b)
line = 1
}
}
// printWithFileLine with alsoToStderr=true, so standard log messages
// always appear on standard error.
entry := &LogEntry{
Format: text,
}
logging.outputLogEntry(Severity(lb), file, line, true, entry)
return len(b), nil
}
// setV computes and remembers the V level for a given PC
// when vmodule is enabled.
// File pattern matching takes the basename of the file, stripped
// of its .go suffix, and uses filepath.Match, which is a little more
// general than the *? matching used in C++.
// l.mu is held.
func (l *loggingT) setV(pc uintptr) level {
fn := runtime.FuncForPC(pc)
file, _ := fn.FileLine(pc)
// The file is something like /a/b/c/d.go. We want just the d.
if strings.HasSuffix(file, ".go") {
file = file[:len(file)-3]
}
if slash := strings.LastIndex(file, "/"); slash >= 0 {
file = file[slash+1:]
}
for _, filter := range l.vmodule.filter {
if filter.match(file) {
l.vmap[pc] = filter.level
return filter.level
}
}
l.vmap[pc] = 0
return 0
}
func v(level level) bool {
return VDepth(level, 1)
}
// VDepth reports whether verbosity at the call site is at least the requested
// level.
func VDepth(level level, depth int) bool {
// This function tries hard to be cheap unless there's work to do.
// The fast path is two atomic loads and compares.
// Here is a cheap but safe test to see if V logging is enabled globally.
if logging.verbosity.get() >= level {
return true
}
// It's off globally but it vmodule may still be set.
// Here is another cheap but safe test to see if vmodule is enabled.
if atomic.LoadInt32(&logging.filterLength) > 0 {
// Now we need a proper lock to use the logging structure. The pcs field
// is shared so we must lock before accessing it. This is fairly expensive,
// but if V logging is enabled we're slow anyway.
logging.mu.Lock()
defer logging.mu.Unlock()
if runtime.Callers(2+depth, logging.pcs[:]) == 0 {
return false
}
v, ok := logging.vmap[logging.pcs[0]]
if !ok {
v = logging.setV(logging.pcs[0])
}
return v >= level
}
return false
}
// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks.
// It allows Exit and relatives to use the Fatal logs.
var fatalNoStacks uint32
| [
"\"TERM\""
]
| []
| [
"TERM"
]
| [] | ["TERM"] | go | 1 | 0 | |
src/seashore/shell.py | # Copyright (c) Shopkick 2017
# See LICENSE for details.
'''
Shell
-----
Running subprocesses with a shell-like interface.
'''
import contextlib
import os
import tempfile
import time
import signal
import subprocess
import attr
class ProcessError(Exception):
"""
A process has exited with non-zero status.
"""
def __init__(self, *args):
super(ProcessError, self).__init__()
self._args = args
self.returncode = args[0]
if len(self._args) > 1:
self.output = args[1]
if len(self._args) > 2:
self.error = args[1]
def __repr__(self):
return 'ProcessError{}'.format(repr(self._args))
__str__ = __repr__
def __getitem__(self, i):
return self._args[i]
def __iter__(self):
return iter(self._args) # pragma: no cover
@attr.s
class Shell(object):
"""
Run subprocesses.
Init arguments:
:param cwd: current working directory (default is process's current working directory)
:param env: environment variables dict (default is a copy of the process's environment)
"""
_procs = attr.ib(init=False, default=attr.Factory(list))
_cwd = attr.ib(init=False, default=attr.Factory(os.getcwd))
_env = attr.ib(init=False, default=attr.Factory(lambda: dict(os.environ)))
def redirect(self, command, outfp, errfp, cwd=None):
"""
Run a process, while its standard error and output go to pre-existing files
:param command: list of arguments
:param outfp: output file object
:param errfp: error file object
:param cwd: current working directory (default is to use the internal working directory)
:raises: :code:`ProcessError` with return code
"""
proc = self.popen(command, stdin=subprocess.PIPE, stdout=outfp, stderr=errfp, cwd=cwd)
proc.communicate('')
retcode = proc.wait()
if retcode != 0:
raise ProcessError(retcode)
def batch(self, command, cwd=None):
"""
Run a process, wait until it ends and return the output and error
:param command: list of arguments
:param cwd: current working directory (default is to use the internal working directory)
:returns: pair of standard output, standard error
:raises: :code:`ProcessError` with (return code, standard output, standard error)
"""
with tempfile.NamedTemporaryFile() as stdout, \
tempfile.NamedTemporaryFile() as stderr:
proc = self.popen(command, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr, cwd=cwd)
proc.communicate('')
retcode = proc.wait()
self._procs.remove(proc)
stdout.seek(0)
stderr.seek(0)
stdout_contents = stdout.read()
stderr_contents = stderr.read()
## Log contents of stdout, stderr
if retcode != 0:
raise ProcessError(retcode, stdout_contents, stderr_contents)
else:
return stdout_contents, stderr_contents
def interactive(self, command, cwd=None):
"""
Run a process, while its standard output and error go directly to ours.
:param command: list of arguments
:param cwd: current working directory (default is to use the internal working directory)
:raises: :code:`ProcessError` with (return code, standard output, standard error)
"""
proc = self.popen(command, cwd=cwd)
retcode = proc.wait()
self._procs.remove(proc)
if retcode != 0:
raise ProcessError(retcode)
def popen(self, command, **kwargs):
"""
Run a process, giving direct access to the :code:`subprocess.Popen` arguments.
:param command: list of arguments
:param kwargs: keyword arguments passed to :code:`subprocess.Popen`
:returns: a :code:`Process`
"""
if kwargs.get('cwd') is None:
kwargs['cwd'] = self._cwd
if kwargs.get('env') is None:
kwargs['env'] = self._env
proc = subprocess.Popen(command, **kwargs)
self._procs.append(proc)
return proc
def setenv(self, key, val):
"""
Set internal environment variable.
Changes internal environment in which subprocesses will be run.
Does not change the process's own environment.
:param key: name of variable
:param value: value of variable
"""
if val is None:
if key in self._env:
del self._env[key]
return
key = str(key) # keys must be strings
val = str(val) # vals must be strings
self._env[key] = val
def getenv(self, key):
"""
Get internal environment variable.
Return value of variable in internal environment in which subprocesses will be run.
:param key: name of variable
:returns: value of variable
:raises: :code:`KeyError` if key is not in environment
"""
return self._env[key]
def chdir(self, path):
"""
Change internal current working directory.
Changes internal directory in which subprocesses will be run.
Does not change the process's own current working directory.
:param path: new working directory
"""
self._cwd = os.path.join(self._cwd, path)
def reap_all(self):
"""
Kill, as gently as possible, all processes.
Loop through all processes and try to kill them with
a sequence of :code:`SIGINT`, :code:`SIGTERM` and
:code:`SIGKILL`.
"""
for proc in self._procs:
ret_code = proc.poll()
if ret_code is None:
proc.send_signal(signal.SIGINT)
time.sleep(3)
ret_code = ret_code or proc.poll()
if ret_code is None: # pragma: no coverage
proc.terminate()
time.sleep(3)
ret_code = ret_code or proc.poll() # pragma: no coverage
if ret_code is None: # pragma: no coverage
proc.kill()
def clone(self):
"""
Clone the shell object.
:returns: a new Shell object with a copy of the environment dictionary
"""
return attr.assoc(self, _env=dict(self._env), _procs=[])
@contextlib.contextmanager
def autoexit_code():
"""
Context manager that translates :code:`ProcessError` to immediate process exit.
"""
try:
yield
except ProcessError as pexc:
raise SystemExit(pexc[0])
| []
| []
| []
| [] | [] | python | 0 | 0 | |
pkg/env/dev.go | package env
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"fmt"
"net"
"os"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/sirupsen/logrus"
"github.com/Azure/ARO-RP/pkg/util/clientauthorizer"
"github.com/Azure/ARO-RP/pkg/util/refreshable"
"github.com/Azure/ARO-RP/pkg/util/version"
)
type dev struct {
*prod
}
func newDev(ctx context.Context, stop <-chan struct{}, log *logrus.Entry) (Interface, error) {
for _, key := range []string{
"PROXY_HOSTNAME",
} {
if _, found := os.LookupEnv(key); !found {
return nil, fmt.Errorf("environment variable %q unset", key)
}
}
d := &dev{}
var err error
d.prod, err = newProd(ctx, stop, log)
if err != nil {
return nil, err
}
for _, feature := range []Feature{
FeatureDisableDenyAssignments,
FeatureDisableSignedCertificates,
FeatureRequireD2sV3Workers,
FeatureDisableReadinessDelay,
} {
d.features[feature] = true
}
d.prod.clusterGenevaLoggingAccount = version.DevClusterGenevaLoggingAccount
d.prod.clusterGenevaLoggingConfigVersion = version.DevClusterGenevaLoggingConfigVersion
d.prod.clusterGenevaLoggingEnvironment = version.DevGenevaLoggingEnvironment
d.prod.clusterGenevaLoggingNamespace = version.DevClusterGenevaLoggingNamespace
// ugh: run this again after RP_MODE=development has caused the feature flag
// to be set.
d.prod.ARMHelper, err = newARMHelper(ctx, log, d)
if err != nil {
return nil, err
}
return d, nil
}
func (d *dev) InitializeAuthorizers() error {
d.armClientAuthorizer = clientauthorizer.NewAll()
d.adminClientAuthorizer = clientauthorizer.NewAll()
return nil
}
func (d *dev) AROOperatorImage() string {
override := os.Getenv("ARO_IMAGE")
if override != "" {
return override
}
return fmt.Sprintf("%s/aro:%s", d.ACRDomain(), version.GitCommit)
}
func (d *dev) Listen() (net.Listener, error) {
// in dev mode there is no authentication, so for safety we only listen on
// localhost
return net.Listen("tcp", "localhost:8443")
}
func (d *dev) FPAuthorizer(tenantID, resource string) (refreshable.Authorizer, error) {
oauthConfig, err := adal.NewOAuthConfig(d.Environment().ActiveDirectoryEndpoint, tenantID)
if err != nil {
return nil, err
}
fpPrivateKey, fpCertificates := d.fpCertificateRefresher.GetCertificates()
sp, err := adal.NewServicePrincipalTokenFromCertificate(*oauthConfig, d.fpClientID, fpCertificates[0], fpPrivateKey, resource)
if err != nil {
return nil, err
}
return refreshable.NewAuthorizer(sp), nil
}
| [
"\"ARO_IMAGE\""
]
| []
| [
"ARO_IMAGE"
]
| [] | ["ARO_IMAGE"] | go | 1 | 0 | |
awx/plugins/inventory/tower.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
"""
Ansible Tower/AWX dynamic inventory script
==========================================
Generates dynamic inventory for Tower
Author: Matthew Jones (@matburt)
"""
import argparse
import re
import os
import sys
import json
import requests
from requests.auth import HTTPBasicAuth
from urlparse import urljoin
def parse_configuration():
"""
Create command line parser for oVirt dynamic inventory script.
"""
parser = argparse.ArgumentParser(
description='Ansible dynamic inventory script for Ansible Tower.',
)
parser.add_argument(
'--list',
action='store_true',
default=True,
help='Return all hosts known to Tower given a particular inventory',
)
parser.parse_args()
host_name = os.environ.get("TOWER_HOST", None)
username = os.environ.get("TOWER_USERNAME", None)
password = os.environ.get("TOWER_PASSWORD", None)
ignore_ssl = False
ssl_negative_var = os.environ.get("TOWER_IGNORE_SSL", None)
if ssl_negative_var:
ignore_ssl = ssl_negative_var.lower() in ("1", "yes", "true")
else:
ssl_positive_var = os.environ.get("TOWER_VERIFY_SSL", None)
if ssl_positive_var:
ignore_ssl = ssl_positive_var.lower() not in ('true', '1', 't', 'y', 'yes')
inventory = os.environ.get("TOWER_INVENTORY", None)
license_type = os.environ.get("TOWER_LICENSE_TYPE", "enterprise")
errors = []
if not host_name:
errors.append("Missing TOWER_HOST in environment")
if not username:
errors.append("Missing TOWER_USERNAME in environment")
if not password:
errors.append("Missing TOWER_PASSWORD in environment")
if not inventory:
errors.append("Missing TOWER_INVENTORY in environment")
if errors:
raise RuntimeError("\n".join(errors))
return dict(tower_host=host_name,
tower_user=username,
tower_pass=password,
tower_inventory=inventory,
tower_license_type=license_type,
ignore_ssl=ignore_ssl)
def read_tower_inventory(tower_host, tower_user, tower_pass, inventory, license_type, ignore_ssl=False):
if not re.match('(?:http|https)://', tower_host):
tower_host = "https://{}".format(tower_host)
inventory_url = urljoin(tower_host, "/api/v2/inventories/{}/script/?hostvars=1&towervars=1&all=1".format(inventory.replace('/', '')))
config_url = urljoin(tower_host, "/api/v2/config/")
reason = None
try:
if license_type != "open":
config_response = requests.get(config_url,
auth=HTTPBasicAuth(tower_user, tower_pass),
verify=not ignore_ssl)
if config_response.ok:
source_type = config_response.json()['license_info']['license_type']
if not source_type == license_type:
raise RuntimeError("Tower server licenses must match: source: {} local: {}".format(source_type,
license_type))
else:
raise RuntimeError("Failed to validate the license of the remote Tower: {}".format(config_response.data))
response = requests.get(inventory_url,
auth=HTTPBasicAuth(tower_user, tower_pass),
verify=not ignore_ssl)
try:
json_response = response.json()
except (ValueError, TypeError) as e:
reason = "Failed to parse json from host: {}".format(e)
if response.ok:
return json_response
if not reason:
reason = json_response.get('detail', 'Retrieving Tower Inventory Failed')
except requests.ConnectionError as e:
reason = "Connection to remote host failed: {}".format(e)
raise RuntimeError(reason)
def main():
config = parse_configuration()
inventory_hosts = read_tower_inventory(config['tower_host'],
config['tower_user'],
config['tower_pass'],
config['tower_inventory'],
config['tower_license_type'],
ignore_ssl=config['ignore_ssl'])
print(
json.dumps(
inventory_hosts
)
)
if __name__ == '__main__':
main()
| []
| []
| [
"TOWER_LICENSE_TYPE",
"TOWER_USERNAME",
"TOWER_INVENTORY",
"TOWER_PASSWORD",
"TOWER_HOST",
"TOWER_VERIFY_SSL",
"TOWER_IGNORE_SSL"
]
| [] | ["TOWER_LICENSE_TYPE", "TOWER_USERNAME", "TOWER_INVENTORY", "TOWER_PASSWORD", "TOWER_HOST", "TOWER_VERIFY_SSL", "TOWER_IGNORE_SSL"] | python | 7 | 0 | |
core/lager/lager_test.go | package lager_test
import (
"github.com/go-chassis/go-chassis/core/lager"
//"github.com/go-chassis/go-chassis/core/config"
"os"
"path/filepath"
"testing"
"time"
)
func TestInitialize1(t *testing.T) {
path := os.Getenv("GOPATH")
logDir := filepath.Join(path, "src", "github.com", "go-chassis", "go-chassis", "examples", "discovery", "server")
os.Setenv("CHASSIS_HOME", logDir)
t.Log("Initializing lager")
t.Log("creating log/chassis.log")
lager.Init(&lager.Options{
LoggerFile: filepath.Join("log", "chassis.log"),
})
if _, err := os.Stat(logDir); err != nil {
if os.IsNotExist(err) {
t.Error(err)
}
}
t.Log("duplicate initialization")
lager.Init(&lager.Options{})
}
func TestInitialize2(t *testing.T) {
path := os.Getenv("GOPATH")
logDir := filepath.Join(path, "src", "github.com", "go-chassis", "go-chassis", "examples", "discovery", "server")
os.Setenv("CHASSIS_HOME", logDir)
//initializing config for to initialize PassLagerDefinition variable
t.Log("initializing config for to initialize PassLagerDefinition variable")
//Initializing lager
t.Log("Initializing lager")
lager.Init(&lager.Options{})
if _, err := os.Stat(logDir); err != nil {
if os.IsNotExist(err) {
t.Error(err)
}
}
time.Sleep(1 * time.Second)
}
| [
"\"GOPATH\"",
"\"GOPATH\""
]
| []
| [
"GOPATH"
]
| [] | ["GOPATH"] | go | 1 | 0 | |
internal/vcs/git/commits.go | package git
import (
"bytes"
"context"
"fmt"
"os"
"strconv"
"strings"
"time"
"github.com/cockroachdb/errors"
"github.com/sourcegraph/sourcegraph/internal/actor"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/gitserver"
"github.com/sourcegraph/sourcegraph/internal/gitserver/gitdomain"
"github.com/sourcegraph/sourcegraph/internal/honey"
"github.com/sourcegraph/sourcegraph/internal/lazyregexp"
"github.com/sourcegraph/sourcegraph/internal/trace"
"github.com/sourcegraph/sourcegraph/internal/trace/ot"
)
// CommitsOptions specifies options for (Repository).Commits (Repository).CommitCount.
type CommitsOptions struct {
Range string // commit range (revspec, "A..B", "A...B", etc.)
N uint // limit the number of returned commits to this many (0 means no limit)
Skip uint // skip this many commits at the beginning
MessageQuery string // include only commits whose commit message contains this substring
Author string // include only commits whose author matches this
After string // include only commits after this date
Before string // include only commits before this date
Reverse bool // Whether or not commits should be given in reverse order (optional)
DateOrder bool // Whether or not commits should be sorted by date (optional)
Path string // only commits modifying the given path are selected (optional)
// When true we opt out of attempting to fetch missing revisions
NoEnsureRevision bool
}
// logEntryPattern is the regexp pattern that matches entries in the output of the `git shortlog
// -sne` command.
var logEntryPattern = lazyregexp.New(`^\s*([0-9]+)\s+(.*)$`)
var recordGetCommitQueries = os.Getenv("RECORD_GET_COMMIT_QUERIES") == "1"
// getCommit returns the commit with the given id.
func getCommit(ctx context.Context, repo api.RepoName, id api.CommitID, opt ResolveRevisionOptions) (_ *gitdomain.Commit, err error) {
if Mocks.GetCommit != nil {
return Mocks.GetCommit(id)
}
if honey.Enabled() && recordGetCommitQueries {
defer func() {
ev := honey.NewEvent("getCommit")
ev.SetSampleRate(10) // 1 in 10
ev.AddField("repo", repo)
ev.AddField("commit", id)
ev.AddField("no_ensure_revision", opt.NoEnsureRevision)
ev.AddField("actor", actor.FromContext(ctx).UIDString())
q, _ := ctx.Value(trace.GraphQLQueryKey).(string)
ev.AddField("query", q)
if err != nil {
ev.AddField("error", err.Error())
}
_ = ev.Send()
}()
}
if err := checkSpecArgSafety(string(id)); err != nil {
return nil, err
}
commitOptions := CommitsOptions{
Range: string(id),
N: 1,
NoEnsureRevision: opt.NoEnsureRevision,
}
commits, err := commitLog(ctx, repo, commitOptions)
if err != nil {
return nil, err
}
if len(commits) != 1 {
return nil, errors.Errorf("git log: expected 1 commit, got %d", len(commits))
}
return commits[0], nil
}
// GetCommit returns the commit with the given commit ID, or ErrCommitNotFound if no such commit
// exists.
//
// The remoteURLFunc is called to get the Git remote URL if it's not set in repo and if it is
// needed. The Git remote URL is only required if the gitserver doesn't already contain a clone of
// the repository or if the commit must be fetched from the remote.
func GetCommit(ctx context.Context, repo api.RepoName, id api.CommitID, opt ResolveRevisionOptions) (*gitdomain.Commit, error) {
span, ctx := ot.StartSpanFromContext(ctx, "Git: GetCommit")
span.SetTag("Commit", id)
defer span.Finish()
return getCommit(ctx, repo, id, opt)
}
// Commits returns all commits matching the options.
func Commits(ctx context.Context, repo api.RepoName, opt CommitsOptions) ([]*gitdomain.Commit, error) {
if Mocks.Commits != nil {
return Mocks.Commits(repo, opt)
}
span, ctx := ot.StartSpanFromContext(ctx, "Git: Commits")
span.SetTag("Opt", opt)
defer span.Finish()
if err := checkSpecArgSafety(opt.Range); err != nil {
return nil, err
}
return commitLog(ctx, repo, opt)
}
// HasCommitAfter indicates the staleness of a repository. It returns a boolean indicating if a repository
// contains a commit past a specified date.
func HasCommitAfter(ctx context.Context, repo api.RepoName, date string, revspec string) (bool, error) {
span, ctx := ot.StartSpanFromContext(ctx, "Git: HasCommitAfter")
span.SetTag("Date", date)
span.SetTag("RevSpec", revspec)
defer span.Finish()
if revspec == "" {
revspec = "HEAD"
}
commitid, err := ResolveRevision(ctx, repo, revspec, ResolveRevisionOptions{NoEnsureRevision: true})
if err != nil {
return false, err
}
n, err := CommitCount(ctx, repo, CommitsOptions{
N: 1,
After: date,
Range: string(commitid),
})
return n > 0, err
}
func isBadObjectErr(output, obj string) bool {
return output == "fatal: bad object "+obj
}
// commitLog returns a list of commits.
//
// The caller is responsible for doing checkSpecArgSafety on opt.Head and opt.Base.
func commitLog(ctx context.Context, repo api.RepoName, opt CommitsOptions) (commits []*gitdomain.Commit, err error) {
args, err := commitLogArgs([]string{"log", logFormatWithoutRefs}, opt)
if err != nil {
return nil, err
}
cmd := gitserver.DefaultClient.Command("git", args...)
cmd.Repo = repo
if !opt.NoEnsureRevision {
cmd.EnsureRevision = opt.Range
}
return runCommitLog(ctx, cmd, opt)
}
// runCommitLog sends the git command to gitserver. It interprets missing
// revision responses and converts them into RevisionNotFoundError.
// It is declared as a variable so that we can swap it out in tests
var runCommitLog = func(ctx context.Context, cmd *gitserver.Cmd, opt CommitsOptions) ([]*gitdomain.Commit, error) {
data, stderr, err := cmd.DividedOutput(ctx)
if err != nil {
data = bytes.TrimSpace(data)
if isBadObjectErr(string(stderr), opt.Range) {
return nil, &gitdomain.RevisionNotFoundError{Repo: cmd.Repo, Spec: opt.Range}
}
return nil, errors.WithMessage(err, fmt.Sprintf("git command %v failed (output: %q)", cmd.Args, data))
}
allParts := bytes.Split(data, []byte{'\x00'})
numCommits := len(allParts) / partsPerCommit
commits := make([]*gitdomain.Commit, 0, numCommits)
for len(data) > 0 {
var commit *gitdomain.Commit
var err error
commit, _, data, err = parseCommitFromLog(data)
if err != nil {
return nil, err
}
commits = append(commits, commit)
}
return commits, nil
}
func commitLogArgs(initialArgs []string, opt CommitsOptions) (args []string, err error) {
if err := checkSpecArgSafety(opt.Range); err != nil {
return nil, err
}
args = initialArgs
if opt.N != 0 {
args = append(args, "-n", strconv.FormatUint(uint64(opt.N), 10))
}
if opt.Skip != 0 {
args = append(args, "--skip="+strconv.FormatUint(uint64(opt.Skip), 10))
}
if opt.Author != "" {
args = append(args, "--fixed-strings", "--author="+opt.Author)
}
if opt.After != "" {
args = append(args, "--after="+opt.After)
}
if opt.Before != "" {
args = append(args, "--before="+opt.Before)
}
if opt.Reverse {
args = append(args, "--reverse")
}
if opt.DateOrder {
args = append(args, "--date-order")
}
if opt.MessageQuery != "" {
args = append(args, "--fixed-strings", "--regexp-ignore-case", "--grep="+opt.MessageQuery)
}
if opt.Range != "" {
args = append(args, opt.Range)
}
if opt.Path != "" {
args = append(args, "--", opt.Path)
}
return args, nil
}
// CommitCount returns the number of commits that would be returned by Commits.
func CommitCount(ctx context.Context, repo api.RepoName, opt CommitsOptions) (uint, error) {
span, ctx := ot.StartSpanFromContext(ctx, "Git: CommitCount")
span.SetTag("Opt", opt)
defer span.Finish()
args, err := commitLogArgs([]string{"rev-list", "--count"}, opt)
if err != nil {
return 0, err
}
cmd := gitserver.DefaultClient.Command("git", args...)
cmd.Repo = repo
if opt.Path != "" {
// This doesn't include --follow flag because rev-list doesn't support it, so the number may be slightly off.
cmd.Args = append(cmd.Args, "--", opt.Path)
}
out, err := cmd.Output(ctx)
if err != nil {
return 0, errors.WithMessage(err, fmt.Sprintf("git command %v failed (output: %q)", cmd.Args, out))
}
out = bytes.TrimSpace(out)
n, err := strconv.ParseUint(string(out), 10, 64)
return uint(n), err
}
// FirstEverCommit returns the first commit ever made to the repository.
func FirstEverCommit(ctx context.Context, repo api.RepoName) (*gitdomain.Commit, error) {
span, ctx := ot.StartSpanFromContext(ctx, "Git: FirstEverCommit")
defer span.Finish()
args := []string{"rev-list", "--max-count=1", "--max-parents=0", "HEAD"}
cmd := gitserver.DefaultClient.Command("git", args...)
cmd.Repo = repo
out, err := cmd.Output(ctx)
if err != nil {
return nil, errors.WithMessage(err, fmt.Sprintf("git command %v failed (output: %q)", args, out))
}
id := api.CommitID(bytes.TrimSpace(out))
return GetCommit(ctx, repo, id, ResolveRevisionOptions{NoEnsureRevision: true})
}
const (
partsPerCommit = 10 // number of \x00-separated fields per commit
// don't include refs (faster, should be used if refs are not needed)
logFormatWithoutRefs = "--format=format:%H%x00%x00%aN%x00%aE%x00%at%x00%cN%x00%cE%x00%ct%x00%B%x00%P%x00"
)
// parseCommitFromLog parses the next commit from data and returns the commit and the remaining
// data. The data arg is a byte array that contains NUL-separated log fields as formatted by
// logFormatFlag.
func parseCommitFromLog(data []byte) (commit *gitdomain.Commit, refs []string, rest []byte, err error) {
parts := bytes.SplitN(data, []byte{'\x00'}, partsPerCommit+1)
if len(parts) < partsPerCommit {
return nil, nil, nil, errors.Errorf("invalid commit log entry: %q", parts)
}
// log outputs are newline separated, so all but the 1st commit ID part
// has an erroneous leading newline.
parts[0] = bytes.TrimPrefix(parts[0], []byte{'\n'})
commitID := api.CommitID(parts[0])
authorTime, err := strconv.ParseInt(string(parts[4]), 10, 64)
if err != nil {
return nil, nil, nil, errors.Errorf("parsing git commit author time: %s", err)
}
committerTime, err := strconv.ParseInt(string(parts[7]), 10, 64)
if err != nil {
return nil, nil, nil, errors.Errorf("parsing git commit committer time: %s", err)
}
var parents []api.CommitID
if parentPart := parts[9]; len(parentPart) > 0 {
parentIDs := bytes.Split(parentPart, []byte{' '})
parents = make([]api.CommitID, len(parentIDs))
for i, id := range parentIDs {
parents[i] = api.CommitID(id)
}
}
if len(parts[1]) > 0 {
refs = strings.Split(string(parts[1]), ", ")
}
commit = &gitdomain.Commit{
ID: commitID,
Author: gitdomain.Signature{Name: string(parts[2]), Email: string(parts[3]), Date: time.Unix(authorTime, 0).UTC()},
Committer: &gitdomain.Signature{Name: string(parts[5]), Email: string(parts[6]), Date: time.Unix(committerTime, 0).UTC()},
Message: gitdomain.Message(strings.TrimSuffix(string(parts[8]), "\n")),
Parents: parents,
}
if len(parts) == partsPerCommit+1 {
rest = parts[10]
}
return commit, refs, rest, nil
}
| [
"\"RECORD_GET_COMMIT_QUERIES\""
]
| []
| [
"RECORD_GET_COMMIT_QUERIES"
]
| [] | ["RECORD_GET_COMMIT_QUERIES"] | go | 1 | 0 | |
cmd/abapEnvironmentCreateSystem_generated.go | // Code generated by piper's step-generator. DO NOT EDIT.
package cmd
import (
"fmt"
"os"
"time"
"github.com/SAP/jenkins-library/pkg/config"
"github.com/SAP/jenkins-library/pkg/log"
"github.com/SAP/jenkins-library/pkg/splunk"
"github.com/SAP/jenkins-library/pkg/telemetry"
"github.com/spf13/cobra"
)
type abapEnvironmentCreateSystemOptions struct {
CfAPIEndpoint string `json:"cfApiEndpoint,omitempty"`
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
CfOrg string `json:"cfOrg,omitempty"`
CfSpace string `json:"cfSpace,omitempty"`
CfService string `json:"cfService,omitempty"`
CfServicePlan string `json:"cfServicePlan,omitempty"`
CfServiceInstance string `json:"cfServiceInstance,omitempty"`
ServiceManifest string `json:"serviceManifest,omitempty"`
AbapSystemAdminEmail string `json:"abapSystemAdminEmail,omitempty"`
AbapSystemDescription string `json:"abapSystemDescription,omitempty"`
AbapSystemIsDevelopmentAllowed bool `json:"abapSystemIsDevelopmentAllowed,omitempty"`
AbapSystemID string `json:"abapSystemID,omitempty"`
AbapSystemSizeOfPersistence int `json:"abapSystemSizeOfPersistence,omitempty"`
AbapSystemSizeOfRuntime int `json:"abapSystemSizeOfRuntime,omitempty"`
AddonDescriptorFileName string `json:"addonDescriptorFileName,omitempty"`
IncludeAddon bool `json:"includeAddon,omitempty"`
}
// AbapEnvironmentCreateSystemCommand Creates a SAP Cloud Platform ABAP Environment system (aka Steampunk system)
func AbapEnvironmentCreateSystemCommand() *cobra.Command {
const STEP_NAME = "abapEnvironmentCreateSystem"
metadata := abapEnvironmentCreateSystemMetadata()
var stepConfig abapEnvironmentCreateSystemOptions
var startTime time.Time
var logCollector *log.CollectorHook
var createAbapEnvironmentCreateSystemCmd = &cobra.Command{
Use: STEP_NAME,
Short: "Creates a SAP Cloud Platform ABAP Environment system (aka Steampunk system)",
Long: `This step creates a SAP Cloud Platform ABAP Environment system (aka Steampunk system) via the cloud foundry command line interface (cf CLI). This can be done by providing a service manifest as a configuration file (parameter ` + "`" + `serviceManifest` + "`" + `) or by passing the configuration values directly via the other parameters of this step.`,
PreRunE: func(cmd *cobra.Command, _ []string) error {
startTime = time.Now()
log.SetStepName(STEP_NAME)
log.SetVerbose(GeneralConfig.Verbose)
path, _ := os.Getwd()
fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path}
log.RegisterHook(fatalHook)
err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile)
if err != nil {
log.SetErrorCategory(log.ErrorConfiguration)
return err
}
log.RegisterSecret(stepConfig.Username)
log.RegisterSecret(stepConfig.Password)
if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 {
sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID)
log.RegisterHook(&sentryHook)
}
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
logCollector = &log.CollectorHook{CorrelationID: GeneralConfig.CorrelationID}
log.RegisterHook(logCollector)
}
return nil
},
Run: func(_ *cobra.Command, _ []string) {
telemetryData := telemetry.CustomData{}
telemetryData.ErrorCode = "1"
handler := func() {
config.RemoveVaultSecretFiles()
telemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds())
telemetryData.ErrorCategory = log.GetErrorCategory().String()
telemetry.Send(&telemetryData)
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunk.Send(&telemetryData, logCollector)
}
}
log.DeferExitHandler(handler)
defer handler()
telemetry.Initialize(GeneralConfig.NoTelemetry, STEP_NAME)
if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 {
splunk.Initialize(GeneralConfig.CorrelationID,
GeneralConfig.HookConfig.SplunkConfig.Dsn,
GeneralConfig.HookConfig.SplunkConfig.Token,
GeneralConfig.HookConfig.SplunkConfig.Index,
GeneralConfig.HookConfig.SplunkConfig.SendLogs)
}
abapEnvironmentCreateSystem(stepConfig, &telemetryData)
telemetryData.ErrorCode = "0"
log.Entry().Info("SUCCESS")
},
}
addAbapEnvironmentCreateSystemFlags(createAbapEnvironmentCreateSystemCmd, &stepConfig)
return createAbapEnvironmentCreateSystemCmd
}
func addAbapEnvironmentCreateSystemFlags(cmd *cobra.Command, stepConfig *abapEnvironmentCreateSystemOptions) {
cmd.Flags().StringVar(&stepConfig.CfAPIEndpoint, "cfApiEndpoint", `https://api.cf.eu10.hana.ondemand.com`, "Cloud Foundry API endpoint")
cmd.Flags().StringVar(&stepConfig.Username, "username", os.Getenv("PIPER_username"), "User or E-Mail for CF")
cmd.Flags().StringVar(&stepConfig.Password, "password", os.Getenv("PIPER_password"), "Password for Cloud Foundry User")
cmd.Flags().StringVar(&stepConfig.CfOrg, "cfOrg", os.Getenv("PIPER_cfOrg"), "Cloud Foundry org")
cmd.Flags().StringVar(&stepConfig.CfSpace, "cfSpace", os.Getenv("PIPER_cfSpace"), "Cloud Foundry Space")
cmd.Flags().StringVar(&stepConfig.CfService, "cfService", os.Getenv("PIPER_cfService"), "Parameter for Cloud Foundry Service to be used for creating Cloud Foundry Service")
cmd.Flags().StringVar(&stepConfig.CfServicePlan, "cfServicePlan", os.Getenv("PIPER_cfServicePlan"), "Parameter for Cloud Foundry Service Plan to be used when creating a Cloud Foundry Service")
cmd.Flags().StringVar(&stepConfig.CfServiceInstance, "cfServiceInstance", os.Getenv("PIPER_cfServiceInstance"), "Parameter for naming the Service Instance when creating a Cloud Foundry Service")
cmd.Flags().StringVar(&stepConfig.ServiceManifest, "serviceManifest", os.Getenv("PIPER_serviceManifest"), "Path to Cloud Foundry Service Manifest in YAML format for multiple service creations that are being passed to a Create-Service-Push Cloud Foundry cli plugin")
cmd.Flags().StringVar(&stepConfig.AbapSystemAdminEmail, "abapSystemAdminEmail", os.Getenv("PIPER_abapSystemAdminEmail"), "Admin E-Mail address for the initial administrator of the system")
cmd.Flags().StringVar(&stepConfig.AbapSystemDescription, "abapSystemDescription", `Test system created by an automated pipeline`, "Description for the ABAP Environment system")
cmd.Flags().BoolVar(&stepConfig.AbapSystemIsDevelopmentAllowed, "abapSystemIsDevelopmentAllowed", true, "This parameter determines, if development is allowed on the system")
cmd.Flags().StringVar(&stepConfig.AbapSystemID, "abapSystemID", `H02`, "The three character name of the system - maps to 'sapSystemName'")
cmd.Flags().IntVar(&stepConfig.AbapSystemSizeOfPersistence, "abapSystemSizeOfPersistence", 0, "The size of the persistence")
cmd.Flags().IntVar(&stepConfig.AbapSystemSizeOfRuntime, "abapSystemSizeOfRuntime", 0, "The size of the runtime")
cmd.Flags().StringVar(&stepConfig.AddonDescriptorFileName, "addonDescriptorFileName", os.Getenv("PIPER_addonDescriptorFileName"), "The file name of the addonDescriptor")
cmd.Flags().BoolVar(&stepConfig.IncludeAddon, "includeAddon", false, "Must be set to true to install the addon provided via 'addonDescriptorFileName'")
cmd.MarkFlagRequired("cfApiEndpoint")
cmd.MarkFlagRequired("username")
cmd.MarkFlagRequired("password")
cmd.MarkFlagRequired("cfOrg")
cmd.MarkFlagRequired("cfSpace")
}
// retrieve step metadata
func abapEnvironmentCreateSystemMetadata() config.StepData {
var theMetaData = config.StepData{
Metadata: config.StepMetadata{
Name: "abapEnvironmentCreateSystem",
Aliases: []config.Alias{},
Description: "Creates a SAP Cloud Platform ABAP Environment system (aka Steampunk system)",
},
Spec: config.StepSpec{
Inputs: config.StepInputs{
Parameters: []config.StepParameters{
{
Name: "cfApiEndpoint",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "cloudFoundry/apiEndpoint"}},
},
{
Name: "username",
ResourceRef: []config.ResourceReference{
{
Name: "cfCredentialsId",
Param: "username",
Type: "secret",
},
{
Name: "",
Paths: []string{"$(vaultPath)/cloudfoundry-$(org)-$(space)", "$(vaultBasePath)/$(vaultPipelineName)/cloudfoundry-$(org)-$(space)", "$(vaultBasePath)/GROUP-SECRETS/cloudfoundry-$(org)-$(space)"},
Type: "vaultSecret",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
{
Name: "password",
ResourceRef: []config.ResourceReference{
{
Name: "cfCredentialsId",
Param: "password",
Type: "secret",
},
{
Name: "",
Paths: []string{"$(vaultPath)/cloudfoundry-$(org)-$(space)", "$(vaultBasePath)/$(vaultPipelineName)/cloudfoundry-$(org)-$(space)", "$(vaultBasePath)/GROUP-SECRETS/cloudfoundry-$(org)-$(space)"},
Type: "vaultSecret",
},
},
Scope: []string{"PARAMETERS", "STAGES", "STEPS"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{},
},
{
Name: "cfOrg",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "cloudFoundry/org"}},
},
{
Name: "cfSpace",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: true,
Aliases: []config.Alias{{Name: "cloudFoundry/space"}},
},
{
Name: "cfService",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/service"}},
},
{
Name: "cfServicePlan",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/servicePlan"}},
},
{
Name: "cfServiceInstance",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/serviceInstance"}},
},
{
Name: "serviceManifest",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{{Name: "cloudFoundry/serviceManifest"}, {Name: "cfServiceManifest"}},
},
{
Name: "abapSystemAdminEmail",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "abapSystemDescription",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "abapSystemIsDevelopmentAllowed",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "abapSystemID",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "abapSystemSizeOfPersistence",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "int",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "abapSystemSizeOfRuntime",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "int",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "addonDescriptorFileName",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"},
Type: "string",
Mandatory: false,
Aliases: []config.Alias{},
},
{
Name: "includeAddon",
ResourceRef: []config.ResourceReference{},
Scope: []string{"PARAMETERS", "STAGES"},
Type: "bool",
Mandatory: false,
Aliases: []config.Alias{},
},
},
},
Containers: []config.Container{
{Name: "cf", Image: "ppiper/cf-cli:7"},
},
},
}
return theMetaData
}
| [
"\"PIPER_username\"",
"\"PIPER_password\"",
"\"PIPER_cfOrg\"",
"\"PIPER_cfSpace\"",
"\"PIPER_cfService\"",
"\"PIPER_cfServicePlan\"",
"\"PIPER_cfServiceInstance\"",
"\"PIPER_serviceManifest\"",
"\"PIPER_abapSystemAdminEmail\"",
"\"PIPER_addonDescriptorFileName\""
]
| []
| [
"PIPER_cfSpace",
"PIPER_cfServicePlan",
"PIPER_password",
"PIPER_cfService",
"PIPER_username",
"PIPER_addonDescriptorFileName",
"PIPER_cfServiceInstance",
"PIPER_cfOrg",
"PIPER_abapSystemAdminEmail",
"PIPER_serviceManifest"
]
| [] | ["PIPER_cfSpace", "PIPER_cfServicePlan", "PIPER_password", "PIPER_cfService", "PIPER_username", "PIPER_addonDescriptorFileName", "PIPER_cfServiceInstance", "PIPER_cfOrg", "PIPER_abapSystemAdminEmail", "PIPER_serviceManifest"] | go | 10 | 0 | |
omaha/site_scons/site_tools/atlmfc_vc12_0.py | # Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
"""Windows ATL MFC for VC12 (Visual Studio 2013) tool for SCons.
Note that ATL MFC requires the commercial (non-free) version of Visual Studio
2013.
"""
import os
def _FindLocalInstall():
"""Returns the directory containing the local install of the tool.
Returns:
Path to tool (as a string), or None if not found.
"""
default_dir = os.environ['VCINSTALLDIR'] + 'atlmfc'
if os.path.exists(default_dir):
return default_dir
else:
return None
def generate(env):
# NOTE: SCons requires the use of this name, which fails gpylint.
"""SCons entry point for this tool."""
if not env.get('ATLMFC_VC12_0_DIR'):
env['ATLMFC_VC12_0_DIR'] = _FindLocalInstall()
env.AppendENVPath('INCLUDE', env.Dir('$ATLMFC_VC12_0_DIR/include').abspath)
env.AppendENVPath('LIB', env.Dir('$ATLMFC_VC12_0_DIR/lib').abspath)
| []
| []
| [
"VCINSTALLDIR"
]
| [] | ["VCINSTALLDIR"] | python | 1 | 0 | |
mesonbuild/modules/python.py | # Copyright 2018 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import json
import shutil
import typing as T
from pathlib import Path
from .. import mesonlib
from ..mesonlib import MachineChoice, MesonException
from . import ExtensionModule
from mesonbuild.modules import ModuleReturnValue
from ..interpreterbase import (
noPosargs, noKwargs, permittedKwargs,
InvalidArguments,
FeatureNew, FeatureNewKwargs, disablerIfNotFound
)
from ..interpreter import ExternalProgramHolder, extract_required_kwarg, permitted_kwargs
from ..build import known_shmod_kwargs
from .. import mlog
from ..environment import detect_cpu_family
from ..dependencies.base import (
DependencyMethods, ExternalDependency,
ExternalProgram, PkgConfigDependency,
NonExistingExternalProgram
)
mod_kwargs = set(['subdir'])
mod_kwargs.update(known_shmod_kwargs)
mod_kwargs -= set(['name_prefix', 'name_suffix'])
class PythonDependency(ExternalDependency):
def __init__(self, python_holder, environment, kwargs):
super().__init__('python', environment, kwargs)
self.name = 'python'
self.static = kwargs.get('static', False)
self.embed = kwargs.get('embed', False)
self.version = python_holder.version
self.platform = python_holder.platform
self.pkgdep = None
self.variables = python_holder.variables
self.paths = python_holder.paths
self.link_libpython = python_holder.link_libpython
if mesonlib.version_compare(self.version, '>= 3.0'):
self.major_version = 3
else:
self.major_version = 2
# We first try to find the necessary python variables using pkgconfig
if DependencyMethods.PKGCONFIG in self.methods and not python_holder.is_pypy:
pkg_version = self.variables.get('LDVERSION') or self.version
pkg_libdir = self.variables.get('LIBPC')
pkg_embed = '-embed' if self.embed and mesonlib.version_compare(self.version, '>=3.8') else ''
pkg_name = 'python-{}{}'.format(pkg_version, pkg_embed)
# If python-X.Y.pc exists in LIBPC, we will try to use it
if pkg_libdir is not None and Path(os.path.join(pkg_libdir, '{}.pc'.format(pkg_name))).is_file():
old_pkg_libdir = os.environ.get('PKG_CONFIG_LIBDIR')
old_pkg_path = os.environ.get('PKG_CONFIG_PATH')
os.environ.pop('PKG_CONFIG_PATH', None)
if pkg_libdir:
os.environ['PKG_CONFIG_LIBDIR'] = pkg_libdir
try:
self.pkgdep = PkgConfigDependency(pkg_name, environment, kwargs)
mlog.debug('Found "{}" via pkgconfig lookup in LIBPC ({})'.format(pkg_name, pkg_libdir))
py_lookup_method = 'pkgconfig'
except MesonException as e:
mlog.debug('"{}" could not be found in LIBPC ({})'.format(pkg_name, pkg_libdir))
mlog.debug(e)
if old_pkg_path is not None:
os.environ['PKG_CONFIG_PATH'] = old_pkg_path
if old_pkg_libdir is not None:
os.environ['PKG_CONFIG_LIBDIR'] = old_pkg_libdir
else:
os.environ.pop('PKG_CONFIG_LIBDIR', None)
else:
mlog.debug('"{}" could not be found in LIBPC ({}), this is likely due to a relocated python installation'.format(pkg_name, pkg_libdir))
# If lookup via LIBPC failed, try to use fallback PKG_CONFIG_LIBDIR/PKG_CONFIG_PATH mechanisms
if self.pkgdep is None or not self.pkgdep.found():
try:
self.pkgdep = PkgConfigDependency(pkg_name, environment, kwargs)
mlog.debug('Found "{}" via fallback pkgconfig lookup in PKG_CONFIG_LIBDIR/PKG_CONFIG_PATH'.format(pkg_name))
py_lookup_method = 'pkgconfig-fallback'
except MesonException as e:
mlog.debug('"{}" could not be found via fallback pkgconfig lookup in PKG_CONFIG_LIBDIR/PKG_CONFIG_PATH'.format(pkg_name))
mlog.debug(e)
if self.pkgdep and self.pkgdep.found():
self.compile_args = self.pkgdep.get_compile_args()
self.link_args = self.pkgdep.get_link_args()
self.is_found = True
self.pcdep = self.pkgdep
else:
self.pkgdep = None
# Finally, try to find python via SYSCONFIG as a final measure
if DependencyMethods.SYSCONFIG in self.methods:
if mesonlib.is_windows():
self._find_libpy_windows(environment)
else:
self._find_libpy(python_holder, environment)
if self.is_found:
mlog.debug('Found "python-{}" via SYSCONFIG module'.format(self.version))
py_lookup_method = 'sysconfig'
if self.is_found:
mlog.log('Dependency', mlog.bold(self.name), 'found:', mlog.green('YES ({})'.format(py_lookup_method)))
else:
mlog.log('Dependency', mlog.bold(self.name), 'found:', mlog.red('NO'))
def _find_libpy(self, python_holder, environment):
if python_holder.is_pypy:
if self.major_version == 3:
libname = 'pypy3-c'
else:
libname = 'pypy-c'
libdir = os.path.join(self.variables.get('base'), 'bin')
libdirs = [libdir]
else:
libname = 'python{}'.format(self.version)
if 'DEBUG_EXT' in self.variables:
libname += self.variables['DEBUG_EXT']
if 'ABIFLAGS' in self.variables:
libname += self.variables['ABIFLAGS']
libdirs = []
largs = self.clib_compiler.find_library(libname, environment, libdirs)
if largs is not None:
self.link_args = largs
self.is_found = largs is not None or self.link_libpython
inc_paths = mesonlib.OrderedSet([
self.variables.get('INCLUDEPY'),
self.paths.get('include'),
self.paths.get('platinclude')])
self.compile_args += ['-I' + path for path in inc_paths if path]
def get_windows_python_arch(self):
if self.platform == 'mingw':
pycc = self.variables.get('CC')
if pycc.startswith('x86_64'):
return '64'
elif pycc.startswith(('i686', 'i386')):
return '32'
else:
mlog.log('MinGW Python built with unknown CC {!r}, please file'
'a bug'.format(pycc))
return None
elif self.platform == 'win32':
return '32'
elif self.platform in ('win64', 'win-amd64'):
return '64'
mlog.log('Unknown Windows Python platform {!r}'.format(self.platform))
return None
def get_windows_link_args(self):
if self.platform.startswith('win'):
vernum = self.variables.get('py_version_nodot')
if self.static:
libpath = Path('libs') / 'libpython{}.a'.format(vernum)
else:
comp = self.get_compiler()
if comp.id == "gcc":
libpath = 'python{}.dll'.format(vernum)
else:
libpath = Path('libs') / 'python{}.lib'.format(vernum)
lib = Path(self.variables.get('base')) / libpath
elif self.platform == 'mingw':
if self.static:
libname = self.variables.get('LIBRARY')
else:
libname = self.variables.get('LDLIBRARY')
lib = Path(self.variables.get('LIBDIR')) / libname
if not lib.exists():
mlog.log('Could not find Python3 library {!r}'.format(str(lib)))
return None
return [str(lib)]
def _find_libpy_windows(self, env):
'''
Find python3 libraries on Windows and also verify that the arch matches
what we are building for.
'''
pyarch = self.get_windows_python_arch()
if pyarch is None:
self.is_found = False
return
arch = detect_cpu_family(env.coredata.compilers.host)
if arch == 'x86':
arch = '32'
elif arch == 'x86_64':
arch = '64'
else:
# We can't cross-compile Python 3 dependencies on Windows yet
mlog.log('Unknown architecture {!r} for'.format(arch),
mlog.bold(self.name))
self.is_found = False
return
# Pyarch ends in '32' or '64'
if arch != pyarch:
mlog.log('Need', mlog.bold(self.name), 'for {}-bit, but '
'found {}-bit'.format(arch, pyarch))
self.is_found = False
return
# This can fail if the library is not found
largs = self.get_windows_link_args()
if largs is None:
self.is_found = False
return
self.link_args = largs
# Compile args
inc_paths = mesonlib.OrderedSet([
self.variables.get('INCLUDEPY'),
self.paths.get('include'),
self.paths.get('platinclude')])
self.compile_args += ['-I' + path for path in inc_paths if path]
# https://sourceforge.net/p/mingw-w64/mailman/message/30504611/
if pyarch == '64' and self.major_version == 2:
self.compile_args += ['-DMS_WIN64']
self.is_found = True
@staticmethod
def get_methods():
if mesonlib.is_windows():
return [DependencyMethods.PKGCONFIG, DependencyMethods.SYSCONFIG]
elif mesonlib.is_osx():
return [DependencyMethods.PKGCONFIG, DependencyMethods.EXTRAFRAMEWORK]
else:
return [DependencyMethods.PKGCONFIG, DependencyMethods.SYSCONFIG]
def get_pkgconfig_variable(self, variable_name, kwargs):
if self.pkgdep:
return self.pkgdep.get_pkgconfig_variable(variable_name, kwargs)
else:
return super().get_pkgconfig_variable(variable_name, kwargs)
INTROSPECT_COMMAND = '''import sysconfig
import json
import sys
install_paths = sysconfig.get_paths(scheme='posix_prefix', vars={'base': '', 'platbase': '', 'installed_base': ''})
def links_against_libpython():
from distutils.core import Distribution, Extension
cmd = Distribution().get_command_obj('build_ext')
cmd.ensure_finalized()
return bool(cmd.get_libraries(Extension('dummy', [])))
print (json.dumps ({
'variables': sysconfig.get_config_vars(),
'paths': sysconfig.get_paths(),
'install_paths': install_paths,
'version': sysconfig.get_python_version(),
'platform': sysconfig.get_platform(),
'is_pypy': '__pypy__' in sys.builtin_module_names,
'link_libpython': links_against_libpython(),
}))
'''
class PythonInstallation(ExternalProgramHolder):
def __init__(self, interpreter, python, info):
ExternalProgramHolder.__init__(self, python, interpreter.subproject)
self.interpreter = interpreter
self.subproject = self.interpreter.subproject
prefix = self.interpreter.environment.coredata.get_builtin_option('prefix')
self.variables = info['variables']
self.paths = info['paths']
install_paths = info['install_paths']
self.platlib_install_path = os.path.join(prefix, install_paths['platlib'][1:])
self.purelib_install_path = os.path.join(prefix, install_paths['purelib'][1:])
self.version = info['version']
self.platform = info['platform']
self.is_pypy = info['is_pypy']
self.link_libpython = info['link_libpython']
self.methods.update({
'extension_module': self.extension_module_method,
'dependency': self.dependency_method,
'install_sources': self.install_sources_method,
'get_install_dir': self.get_install_dir_method,
'language_version': self.language_version_method,
'found': self.found_method,
'has_path': self.has_path_method,
'get_path': self.get_path_method,
'has_variable': self.has_variable_method,
'get_variable': self.get_variable_method,
'path': self.path_method,
})
@permittedKwargs(mod_kwargs)
def extension_module_method(self, args, kwargs):
if 'subdir' in kwargs and 'install_dir' in kwargs:
raise InvalidArguments('"subdir" and "install_dir" are mutually exclusive')
if 'subdir' in kwargs:
subdir = kwargs.pop('subdir', '')
if not isinstance(subdir, str):
raise InvalidArguments('"subdir" argument must be a string.')
kwargs['install_dir'] = os.path.join(self.platlib_install_path, subdir)
# On macOS and some Linux distros (Debian) distutils doesn't link
# extensions against libpython. We call into distutils and mirror its
# behavior. See https://github.com/mesonbuild/meson/issues/4117
if not self.link_libpython:
new_deps = []
for holder in mesonlib.extract_as_list(kwargs, 'dependencies'):
dep = holder.held_object
if isinstance(dep, PythonDependency):
holder = self.interpreter.holderify(dep.get_partial_dependency(compile_args=True))
new_deps.append(holder)
kwargs['dependencies'] = new_deps
suffix = self.variables.get('EXT_SUFFIX') or self.variables.get('SO') or self.variables.get('.so')
# msys2's python3 has "-cpython-36m.dll", we have to be clever
split = suffix.rsplit('.', 1)
suffix = split.pop(-1)
args[0] += ''.join(s for s in split)
kwargs['name_prefix'] = ''
kwargs['name_suffix'] = suffix
return self.interpreter.func_shared_module(None, args, kwargs)
@permittedKwargs(permitted_kwargs['dependency'])
@FeatureNewKwargs('python_installation.dependency', '0.53.0', ['embed'])
def dependency_method(self, args, kwargs):
if args:
mlog.warning('python_installation.dependency() does not take any '
'positional arguments. It always returns a Python '
'dependency. This will become an error in the future.',
location=self.interpreter.current_node)
dep = PythonDependency(self, self.interpreter.environment, kwargs)
return self.interpreter.holderify(dep)
@permittedKwargs(['pure', 'subdir'])
def install_sources_method(self, args, kwargs):
pure = kwargs.pop('pure', False)
if not isinstance(pure, bool):
raise InvalidArguments('"pure" argument must be a boolean.')
subdir = kwargs.pop('subdir', '')
if not isinstance(subdir, str):
raise InvalidArguments('"subdir" argument must be a string.')
if pure:
kwargs['install_dir'] = os.path.join(self.purelib_install_path, subdir)
else:
kwargs['install_dir'] = os.path.join(self.platlib_install_path, subdir)
return self.interpreter.holderify(self.interpreter.func_install_data(None, args, kwargs))
@noPosargs
@permittedKwargs(['pure', 'subdir'])
def get_install_dir_method(self, args, kwargs):
pure = kwargs.pop('pure', True)
if not isinstance(pure, bool):
raise InvalidArguments('"pure" argument must be a boolean.')
subdir = kwargs.pop('subdir', '')
if not isinstance(subdir, str):
raise InvalidArguments('"subdir" argument must be a string.')
if pure:
res = os.path.join(self.purelib_install_path, subdir)
else:
res = os.path.join(self.platlib_install_path, subdir)
return self.interpreter.module_method_callback(ModuleReturnValue(res, []))
@noPosargs
@noKwargs
def language_version_method(self, args, kwargs):
return self.interpreter.module_method_callback(ModuleReturnValue(self.version, []))
@noKwargs
def has_path_method(self, args, kwargs):
if len(args) != 1:
raise InvalidArguments('has_path takes exactly one positional argument.')
path_name = args[0]
if not isinstance(path_name, str):
raise InvalidArguments('has_path argument must be a string.')
return self.interpreter.module_method_callback(ModuleReturnValue(path_name in self.paths, []))
@noKwargs
def get_path_method(self, args, kwargs):
if len(args) not in (1, 2):
raise InvalidArguments('get_path must have one or two arguments.')
path_name = args[0]
if not isinstance(path_name, str):
raise InvalidArguments('get_path argument must be a string.')
try:
path = self.paths[path_name]
except KeyError:
if len(args) == 2:
path = args[1]
else:
raise InvalidArguments('{} is not a valid path name'.format(path_name))
return self.interpreter.module_method_callback(ModuleReturnValue(path, []))
@noKwargs
def has_variable_method(self, args, kwargs):
if len(args) != 1:
raise InvalidArguments('has_variable takes exactly one positional argument.')
var_name = args[0]
if not isinstance(var_name, str):
raise InvalidArguments('has_variable argument must be a string.')
return self.interpreter.module_method_callback(ModuleReturnValue(var_name in self.variables, []))
@noKwargs
def get_variable_method(self, args, kwargs):
if len(args) not in (1, 2):
raise InvalidArguments('get_variable must have one or two arguments.')
var_name = args[0]
if not isinstance(var_name, str):
raise InvalidArguments('get_variable argument must be a string.')
try:
var = self.variables[var_name]
except KeyError:
if len(args) == 2:
var = args[1]
else:
raise InvalidArguments('{} is not a valid variable name'.format(var_name))
return self.interpreter.module_method_callback(ModuleReturnValue(var, []))
@noPosargs
@noKwargs
@FeatureNew('Python module path method', '0.50.0')
def path_method(self, args, kwargs):
return super().path_method(args, kwargs)
class PythonModule(ExtensionModule):
@FeatureNew('Python Module', '0.46.0')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.snippets.add('find_installation')
# https://www.python.org/dev/peps/pep-0397/
def _get_win_pythonpath(self, name_or_path):
if name_or_path not in ['python2', 'python3']:
return None
if not shutil.which('py'):
# program not installed, return without an exception
return None
ver = {'python2': '-2', 'python3': '-3'}[name_or_path]
cmd = ['py', ver, '-c', "import sysconfig; print(sysconfig.get_config_var('BINDIR'))"]
_, stdout, _ = mesonlib.Popen_safe(cmd)
directory = stdout.strip()
if os.path.exists(directory):
return os.path.join(directory, 'python')
else:
return None
def _check_version(self, name_or_path, version):
if name_or_path == 'python2':
return mesonlib.version_compare(version, '< 3.0')
elif name_or_path == 'python3':
return mesonlib.version_compare(version, '>= 3.0')
return True
@FeatureNewKwargs('python.find_installation', '0.49.0', ['disabler'])
@FeatureNewKwargs('python.find_installation', '0.51.0', ['modules'])
@disablerIfNotFound
@permittedKwargs({'required', 'modules'})
def find_installation(self, interpreter, state, args, kwargs):
feature_check = FeatureNew('Passing "feature" option to find_installation', '0.48.0')
disabled, required, feature = extract_required_kwarg(kwargs, state.subproject, feature_check)
want_modules = mesonlib.extract_as_list(kwargs, 'modules') # type: T.List[str]
found_modules = [] # type: T.List[str]
missing_modules = [] # type: T.List[str]
if len(args) > 1:
raise InvalidArguments('find_installation takes zero or one positional argument.')
name_or_path = state.environment.lookup_binary_entry(MachineChoice.HOST, 'python')
if name_or_path is None and args:
name_or_path = args[0]
if not isinstance(name_or_path, str):
raise InvalidArguments('find_installation argument must be a string.')
if disabled:
mlog.log('Program', name_or_path or 'python', 'found:', mlog.red('NO'), '(disabled by:', mlog.bold(feature), ')')
return ExternalProgramHolder(NonExistingExternalProgram(), state.subproject)
if not name_or_path:
python = ExternalProgram('python3', mesonlib.python_command, silent=True)
else:
python = ExternalProgram.from_entry('python3', name_or_path)
if not python.found() and mesonlib.is_windows():
pythonpath = self._get_win_pythonpath(name_or_path)
if pythonpath is not None:
name_or_path = pythonpath
python = ExternalProgram(name_or_path, silent=True)
# Last ditch effort, python2 or python3 can be named python
# on various platforms, let's not give up just yet, if an executable
# named python is available and has a compatible version, let's use
# it
if not python.found() and name_or_path in ['python2', 'python3']:
python = ExternalProgram('python', silent=True)
if python.found() and want_modules:
for mod in want_modules:
p, out, err = mesonlib.Popen_safe(
python.command +
['-c', 'import {0}'.format(mod)])
if p.returncode != 0:
missing_modules.append(mod)
else:
found_modules.append(mod)
msg = ['Program', python.name]
if want_modules:
msg.append('({})'.format(', '.join(want_modules)))
msg.append('found:')
if python.found() and not missing_modules:
msg.extend([mlog.green('YES'), '({})'.format(' '.join(python.command))])
else:
msg.append(mlog.red('NO'))
if found_modules:
msg.append('modules:')
msg.append(', '.join(found_modules))
mlog.log(*msg)
if not python.found():
if required:
raise mesonlib.MesonException('{} not found'.format(name_or_path or 'python'))
res = ExternalProgramHolder(NonExistingExternalProgram(), state.subproject)
elif missing_modules:
if required:
raise mesonlib.MesonException('{} is missing modules: {}'.format(name_or_path or 'python', ', '.join(missing_modules)))
res = ExternalProgramHolder(NonExistingExternalProgram(), state.subproject)
else:
# Sanity check, we expect to have something that at least quacks in tune
try:
cmd = python.get_command() + ['-c', INTROSPECT_COMMAND]
p, stdout, stderr = mesonlib.Popen_safe(cmd)
info = json.loads(stdout)
except json.JSONDecodeError:
info = None
mlog.debug('Could not introspect Python (%s): exit code %d' % (str(p.args), p.returncode))
mlog.debug('Program stdout:\n')
mlog.debug(stdout)
mlog.debug('Program stderr:\n')
mlog.debug(stderr)
if isinstance(info, dict) and 'version' in info and self._check_version(name_or_path, info['version']):
res = PythonInstallation(interpreter, python, info)
else:
res = ExternalProgramHolder(NonExistingExternalProgram(), state.subproject)
if required:
raise mesonlib.MesonException('{} is not a valid python or it is missing setuptools'.format(python))
return res
def initialize(*args, **kwargs):
return PythonModule(*args, **kwargs)
| []
| []
| [
"PKG_CONFIG_LIBDIR",
"PKG_CONFIG_PATH"
]
| [] | ["PKG_CONFIG_LIBDIR", "PKG_CONFIG_PATH"] | python | 2 | 0 | |
bashbrew/go/src/bashbrew/main.go | package main
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/codegangsta/cli"
"github.com/docker-library/go-dockerlibrary/manifest"
)
// TODO somewhere, ensure that the Docker engine we're talking to is API version 1.22+ (Docker 1.10+)
// docker version --format '{{.Server.APIVersion}}'
var (
configPath string
flagsConfig *FlagsConfig
defaultLibrary string
defaultCache string
arch string
constraints []string
exclusiveConstraints bool
archNamespaces map[string]string
debugFlag = false
noSortFlag = false
// separated so that FlagsConfig.ApplyTo can access them
flagEnvVars = map[string]string{
"debug": "BASHBREW_DEBUG",
"arch": "BASHBREW_ARCH",
"config": "BASHBREW_CONFIG",
"library": "BASHBREW_LIBRARY",
"cache": "BASHBREW_CACHE",
"pull": "BASHBREW_PULL",
"constraint": "BASHBREW_CONSTRAINTS",
"arch-namespace": "BASHBREW_ARCH_NAMESPACES",
}
)
func initDefaultConfigPath() string {
xdgConfig := os.Getenv("XDG_CONFIG_HOME")
if xdgConfig == "" {
xdgConfig = filepath.Join(os.Getenv("HOME"), ".config")
}
return filepath.Join(xdgConfig, "bashbrew")
}
func initDefaultCachePath() string {
xdgCache := os.Getenv("XDG_CACHE_HOME")
if xdgCache == "" {
xdgCache = filepath.Join(os.Getenv("HOME"), ".cache")
}
return filepath.Join(xdgCache, "bashbrew")
}
func main() {
app := cli.NewApp()
app.Name = "bashbrew"
app.Usage = "canonical build tool for the official images"
app.Version = "dev"
app.HideVersion = true
app.EnableBashCompletion = true
// TODO add "Description" to app and commands (for longer-form description of their functionality)
cli.VersionFlag.Name = "version" // remove "-v" from VersionFlag
cli.HelpFlag.Name = "help, h, ?" // add "-?" to HelpFlag
app.Flags = []cli.Flag{
cli.BoolFlag{
Name: "debug",
EnvVar: flagEnvVars["debug"],
Usage: `enable more output (esp. all "docker build" output instead of only output on failure)`,
},
cli.BoolFlag{
Name: "no-sort",
Usage: "do not apply any sorting, even via --build-order",
},
cli.StringFlag{
Name: "arch",
Value: manifest.DefaultArchitecture,
EnvVar: flagEnvVars["arch"],
Usage: "the current platform architecture",
},
cli.StringSliceFlag{
Name: "constraint",
EnvVar: flagEnvVars["constraint"],
Usage: "build constraints (see Constraints in Manifest2822Entry)",
},
cli.BoolFlag{
Name: "exclusive-constraints",
Usage: "skip entries which do not have Constraints",
},
cli.StringSliceFlag{
Name: "arch-namespace",
EnvVar: flagEnvVars["arch-namespace"],
Usage: `architecture to push namespace mappings for creating indexes/manifest lists ("arch=namespace" ala "s390x=tianons390x")`,
},
cli.StringFlag{
Name: "config",
Value: initDefaultConfigPath(),
EnvVar: flagEnvVars["config"],
Usage: `where default "flags" configuration can be overridden more persistently`,
},
cli.StringFlag{
Name: "library",
Value: filepath.Join(os.Getenv("HOME"), "docker", "official-images", "library"),
EnvVar: flagEnvVars["library"],
Usage: "where the bodies are buried",
},
cli.StringFlag{
Name: "cache",
Value: initDefaultCachePath(),
EnvVar: flagEnvVars["cache"],
Usage: "where the git wizardry is stashed",
},
}
app.Before = func(c *cli.Context) error {
var err error
configPath, err = filepath.Abs(c.String("config"))
if err != nil {
return err
}
flagsConfig, err = ParseFlagsConfigFile(filepath.Join(configPath, "flags"))
if err != nil && !os.IsNotExist(err) {
return err
}
return nil
}
subcommandBeforeFactory := func(cmd string) cli.BeforeFunc {
return func(c *cli.Context) error {
err := flagsConfig.ApplyTo(cmd, c)
if err != nil {
return err
}
debugFlag = c.GlobalBool("debug")
noSortFlag = c.GlobalBool("no-sort")
arch = c.GlobalString("arch")
constraints = c.GlobalStringSlice("constraint")
exclusiveConstraints = c.GlobalBool("exclusive-constraints")
archNamespaces = map[string]string{}
for _, archMapping := range c.GlobalStringSlice("arch-namespace") {
splitArchMapping := strings.SplitN(archMapping, "=", 2)
splitArch, splitNamespace := strings.TrimSpace(splitArchMapping[0]), strings.TrimSpace(splitArchMapping[1])
archNamespaces[splitArch] = splitNamespace
}
defaultLibrary, err = filepath.Abs(c.GlobalString("library"))
if err != nil {
return err
}
defaultCache, err = filepath.Abs(c.GlobalString("cache"))
if err != nil {
return err
}
return nil
}
}
// define a few useful flags so their usage, etc can be consistent
commonFlags := map[string]cli.Flag{
"all": cli.BoolFlag{
Name: "all",
Usage: "act upon all repos listed in --library",
},
"uniq": cli.BoolFlag{
Name: "uniq, unique",
Usage: "only act upon the first tag of each entry",
},
"namespace": cli.StringFlag{
Name: "namespace",
Usage: "a repo namespace to act upon/in",
},
"apply-constraints": cli.BoolFlag{
Name: "apply-constraints",
Usage: "apply Constraints as if repos were building",
},
"depth": cli.IntFlag{
Name: "depth",
Value: 0,
Usage: "maximum number of levels to traverse (0 for unlimited)",
},
"dry-run": cli.BoolFlag{
Name: "dry-run",
Usage: "do everything except the final action (for testing whether actions will be performed)",
},
"force": cli.BoolFlag{
Name: "force",
Usage: "always push (skip the clever Hub API lookups that no-op things sooner if a push doesn't seem necessary)",
},
}
app.Commands = []cli.Command{
{
Name: "list",
Aliases: []string{"ls"},
Usage: "list repo:tag combinations for a given repo",
Flags: []cli.Flag{
commonFlags["all"],
commonFlags["uniq"],
commonFlags["apply-constraints"],
cli.BoolFlag{
Name: "build-order",
Usage: "sort by the order repos would need to build (topsort)",
},
cli.BoolFlag{
Name: "repos",
Usage: `list only repos, not repo:tag (unless "repo:tag" is explicitly specified)`,
},
},
Before: subcommandBeforeFactory("list"),
Action: cmdList,
},
{
Name: "build",
Usage: "build (and tag) repo:tag combinations for a given repo",
Flags: []cli.Flag{
commonFlags["all"],
commonFlags["uniq"],
commonFlags["namespace"],
cli.StringFlag{
Name: "pull",
Value: "missing",
EnvVar: flagEnvVars["pull"],
Usage: `pull FROM before building (always, missing, never)`,
},
commonFlags["dry-run"],
},
Before: subcommandBeforeFactory("build"),
Action: cmdBuild,
},
{
Name: "tag",
Usage: "tag repo:tag into a namespace (especially for pushing)",
Flags: []cli.Flag{
commonFlags["all"],
commonFlags["uniq"],
commonFlags["namespace"],
commonFlags["dry-run"],
},
Before: subcommandBeforeFactory("tag"),
Action: cmdTag,
},
{
Name: "push",
Usage: `push namespace/repo:tag (see also "tag")`,
Flags: []cli.Flag{
commonFlags["all"],
commonFlags["uniq"],
commonFlags["namespace"],
commonFlags["dry-run"],
commonFlags["force"],
},
Before: subcommandBeforeFactory("push"),
Action: cmdPush,
},
{
Name: "put-shared",
Usage: `update shared tags in the registry (and multi-architecture tags)`,
Flags: []cli.Flag{
commonFlags["all"],
commonFlags["namespace"],
commonFlags["dry-run"],
commonFlags["force"],
cli.BoolFlag{
Name: "single-arch",
Usage: `only act on the current architecture (for pushing "amd64/hello-world:latest", for example)`,
},
},
Before: subcommandBeforeFactory("put-shared"),
Action: cmdPutShared,
},
{
Name: "children",
Aliases: []string{
"offspring",
"descendants",
"progeny",
},
Usage: `print the repos built FROM a given repo or repo:tag`,
Flags: []cli.Flag{
commonFlags["apply-constraints"],
commonFlags["depth"],
},
Before: subcommandBeforeFactory("children"),
Action: cmdOffspring,
Category: "plumbing",
},
{
Name: "parents",
Aliases: []string{
"ancestors",
"progenitors",
},
Usage: `print the repos this repo or repo:tag is FROM`,
Flags: []cli.Flag{
commonFlags["apply-constraints"],
commonFlags["depth"],
},
Before: subcommandBeforeFactory("parents"),
Action: cmdParents,
Category: "plumbing",
},
{
Name: "cat",
Usage: "print manifest contents for repo or repo:tag",
Flags: []cli.Flag{
commonFlags["all"],
cli.StringFlag{
Name: "format, f",
Usage: "change the `FORMAT` of the output",
Value: DefaultCatFormat,
},
cli.StringFlag{
Name: "format-file, F",
Usage: "use the contents of `FILE` for \"--format\"",
},
},
Before: subcommandBeforeFactory("cat"),
Action: cmdCat,
Description: `see Go's "text/template" package (https://golang.org/pkg/text/template/) for details on the syntax expected in "--format"`,
Category: "plumbing",
},
{
Name: "from",
Usage: "print FROM for repo:tag",
Flags: []cli.Flag{
commonFlags["all"],
commonFlags["uniq"],
commonFlags["apply-constraints"],
},
Before: subcommandBeforeFactory("from"),
Action: cmdFrom,
Category: "plumbing",
},
}
err := app.Run(os.Args)
if err != nil {
fmt.Fprintf(os.Stderr, "error: %v\n", err)
os.Exit(1)
}
}
| [
"\"XDG_CONFIG_HOME\"",
"\"HOME\"",
"\"XDG_CACHE_HOME\"",
"\"HOME\"",
"\"HOME\""
]
| []
| [
"HOME",
"XDG_CONFIG_HOME",
"XDG_CACHE_HOME"
]
| [] | ["HOME", "XDG_CONFIG_HOME", "XDG_CACHE_HOME"] | go | 3 | 0 | |
programs/serum/queue_test.go | // Copyright 2020 dfuse Platform Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package serum
import (
"context"
"fmt"
"os"
"strings"
"testing"
"time"
bin "github.com/gagliardetto/binary"
"github.com/gagliardetto/solana-go"
"github.com/gagliardetto/solana-go/diff"
"github.com/gagliardetto/solana-go/rpc"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestDecoder_EventQueue_Diff(t *testing.T) {
//t.Skip("diff event queue test")
oldDataFile := "testdata/serum-event-queue-old.bin.zst"
newDataFile := "testdata/serum-event-queue-new.bin.zst"
olDataJSONFile := strings.ReplaceAll(oldDataFile, ".bin.zst", ".json")
newDataJSONFile := strings.ReplaceAll(newDataFile, ".bin.zst", ".json")
if os.Getenv("TESTDATA_UPDATE") == "true" {
client := rpc.New("http://api.mainnet-beta.solana.com:80/rpc")
ctx := context.Background()
account := solana.MustPublicKeyFromBase58("13iGJcA4w5hcJZDjJbJQor1zUiDLE4jv2rMW9HkD5Eo1")
info, err := client.GetAccountInfo(ctx, account)
require.NoError(t, err)
writeCompressedFile(t, oldDataFile, info.Value.Data.GetBinary())
oldQueue := &EventQueue{}
require.NoError(t, oldQueue.Decode(info.Value.Data.GetBinary()))
writeJSONFile(t, olDataJSONFile, oldQueue)
time.Sleep(900 * time.Millisecond)
info, err = client.GetAccountInfo(ctx, account)
require.NoError(t, err)
writeCompressedFile(t, newDataFile, info.Value.Data.GetBinary())
newQueue := &EventQueue{}
require.NoError(t, newQueue.Decode(info.Value.Data.GetBinary()))
writeJSONFile(t, newDataJSONFile, newQueue)
}
oldQueue := &EventQueue{}
require.NoError(t, oldQueue.Decode(readCompressedFile(t, oldDataFile)))
newQueue := &EventQueue{}
require.NoError(t, newQueue.Decode(readCompressedFile(t, newDataFile)))
fmt.Println("==>> All diff(s)")
diff.Diff(oldQueue, newQueue, diff.OnEvent(func(event diff.Event) { fmt.Println("Event " + event.String()) }))
}
func Test_fill(t *testing.T) {
tests := []struct {
name string
e *Event
expectIsFill bool
expectIsOut bool
expectIsBid bool
expectIsMaker bool
}{
{
name: "Is Fill",
e: &Event{
Flag: 0b00000001,
},
expectIsFill: true,
expectIsOut: false,
expectIsBid: false,
expectIsMaker: false,
},
{
name: "Is Out",
e: &Event{
Flag: 0b00000010,
},
expectIsFill: false,
expectIsOut: true,
expectIsBid: false,
expectIsMaker: false,
},
{
name: "Is Fill & bid",
e: &Event{
Flag: 0b00000101,
},
expectIsFill: true,
expectIsOut: false,
expectIsBid: true,
expectIsMaker: false,
},
{
name: "Is Fill & bid & maker",
e: &Event{
Flag: 0b00001101,
},
expectIsFill: true,
expectIsOut: false,
expectIsBid: true,
expectIsMaker: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
assert.Equal(t, test.expectIsFill, test.e.Flag.IsFill())
assert.Equal(t, test.expectIsOut, test.e.Flag.IsOut())
assert.Equal(t, test.expectIsBid, test.e.Flag.IsBid())
assert.Equal(t, test.expectIsMaker, test.e.Flag.IsMaker())
})
}
}
func TestDecoder_EventQueue_DiffManual(t *testing.T) {
oldQueue := &EventQueue{
SerumPadding: [5]byte{},
Head: 120,
Count: 13,
SeqNum: 25,
Events: []*Event{
{OrderID: OrderID(bin.Uint128{Lo: 1})},
{OrderID: OrderID(bin.Uint128{Lo: 2})},
},
EndPadding: [7]byte{},
}
newQueue := &EventQueue{
Head: 120,
Count: 13,
SeqNum: 25,
Events: []*Event{
{OrderID: OrderID(bin.Uint128{Lo: 1})},
{OrderID: OrderID(bin.Uint128{Lo: 4})},
{OrderID: OrderID(bin.Uint128{Lo: 5})},
},
}
fmt.Println("All diff lines")
diff.Diff(oldQueue, newQueue, diff.OnEvent(func(event diff.Event) { fmt.Println("Event " + event.String()) }))
fmt.Println("")
fmt.Println("Processed diff lines")
diff.Diff(oldQueue, newQueue, diff.OnEvent(func(event diff.Event) {
if match, _ := event.Match("Events[#]"); match {
fmt.Printf("Event %s => %v\n", event.Kind, event.Element())
}
}))
}
| [
"\"TESTDATA_UPDATE\""
]
| []
| [
"TESTDATA_UPDATE"
]
| [] | ["TESTDATA_UPDATE"] | go | 1 | 0 | |
examples/Deeplab/experiments/deeplabv2.naked.fpn.standard.scale12.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# File: deeplabv2.py
# Author: Tao Hu <[email protected]>
import cv2
import tensorflow as tf
import argparse
from six.moves import zip
import os
import numpy as np
os.environ['TENSORPACK_TRAIN_API'] = 'v2' # will become default soon
from tensorpack import *
from tensorpack.dataflow import dataset
from tensorpack.utils.gpu import get_nr_gpu
from tensorpack.utils.segmentation.segmentation import predict_slider, visualize_label, predict_scaler
from tensorpack.utils.stats import MIoUStatistics
from tensorpack.dataflow.imgaug.misc import RandomCropWithPadding
from tensorpack.utils import logger
from tensorpack.tfutils import optimizer
from tensorpack.tfutils.summary import add_moving_summary, add_param_summary
import tensorpack.tfutils.symbolic_functions as symbf
from tqdm import tqdm
from resnet_model_fpn_standard import (
preresnet_group, preresnet_basicblock, preresnet_bottleneck,
resnet_group, resnet_basicblock, resnet_bottleneck, se_resnet_bottleneck,
resnet_backbone)
CLASS_NUM = 21
CROP_SIZE = 512
IGNORE_LABEL = 255
class Model(ModelDesc):
def _get_inputs(self):
## Set static shape so that tensorflow knows shape at compile time.
return [InputDesc(tf.float32, [None, CROP_SIZE, CROP_SIZE, 3], 'image'),
InputDesc(tf.int32, [None, CROP_SIZE, CROP_SIZE], 'gt')]
def _build_graph(self, inputs):
def resnet101(image, label):
mode = 'resnet'
depth = 101
basicblock = preresnet_basicblock if mode == 'preact' else resnet_basicblock
bottleneck = {
'resnet': resnet_bottleneck,
'preact': preresnet_bottleneck,
'se': se_resnet_bottleneck}[mode]
num_blocks, block_func = {
18: ([2, 2, 2, 2], basicblock),
34: ([3, 4, 6, 3], basicblock),
50: ([3, 4, 6, 3], bottleneck),
101: ([3, 4, 23, 3], bottleneck),
152: ([3, 8, 36, 3], bottleneck)
}[depth]
def get_logits(image):
with argscope([Conv2D, MaxPooling, GlobalAvgPooling, BatchNorm], data_format="NHWC"):
return resnet_backbone(
image, num_blocks,
preresnet_group if mode == 'preact' else resnet_group, block_func,CLASS_NUM)
return get_logits(image)
image, label = inputs
image = image - tf.constant([104, 116, 122], dtype='float32')
label = tf.identity(label, name="label")
predict = resnet101(image, label)
costs = []
prob = tf.nn.softmax(predict, name='prob')
label4d = tf.expand_dims(label, 3, name='label4d')
new_size = prob.get_shape()[1:3]
#label_resized = tf.image.resize_nearest_neighbor(label4d, new_size)
cost = symbf.softmax_cross_entropy_with_ignore_label(logits=predict, label=label4d,
class_num=CLASS_NUM)
prediction = tf.argmax(prob, axis=-1,name="prediction")
cost = tf.reduce_mean(cost, name='cross_entropy_loss') # the average cross-entropy loss
costs.append(cost)
if get_current_tower_context().is_training:
wd_w = tf.train.exponential_decay(2e-4, get_global_step_var(),
80000, 0.7, True)
wd_cost = tf.multiply(wd_w, regularize_cost('.*/W', tf.nn.l2_loss), name='wd_cost')
costs.append(wd_cost)
add_param_summary(('.*/W', ['histogram'])) # monitor W
self.cost = tf.add_n(costs, name='cost')
add_moving_summary(costs + [self.cost])
def _get_optimizer(self):
lr = tf.get_variable('learning_rate', initializer=2.5e-4, trainable=False)
opt = tf.train.AdamOptimizer(lr, epsilon=2.5e-4)
return optimizer.apply_grad_processors(
opt, [gradproc.ScaleGradient(
[('fpn.*W', 10),('fpn.*b',20)])])
def get_data(name, data_dir, meta_dir, batch_size):
isTrain = name == 'train'
ds = dataset.PascalVOC12(data_dir, meta_dir, name, shuffle=True)
if isTrain:
shape_aug = [
imgaug.RandomResize(xrange=(0.7, 1.5), yrange=(0.7, 1.5),
aspect_ratio_thres=0.15),
RandomCropWithPadding(CROP_SIZE,IGNORE_LABEL),
imgaug.Flip(horiz=True),
]
else:
shape_aug = []
pass
ds = AugmentImageComponents(ds, shape_aug, (0, 1), copy=False)
def f(ds):
return ds
if isTrain:
ds = MapData(ds, f)
ds = BatchData(ds, batch_size)
ds = PrefetchDataZMQ(ds, 1)
else:
ds = BatchData(ds, 1)
return ds
def view_data(data_dir, meta_dir, batch_size):
ds = RepeatedData(get_data('train',data_dir, meta_dir, batch_size), -1)
ds.reset_state()
for ims, labels in ds.get_data():
for im, label in zip(ims, labels):
#aa = visualize_label(label)
#pass
cv2.imshow("im", im / 255.0)
cv2.imshow("raw-label", label)
cv2.imshow("color-label", visualize_label(label))
cv2.waitKey(0)
def get_config(data_dir, meta_dir, batch_size):
logger.auto_set_dir()
dataset_train = get_data('train', data_dir, meta_dir, batch_size)
steps_per_epoch = dataset_train.size() * 12
dataset_val = get_data('val', data_dir, meta_dir, batch_size)
return TrainConfig(
dataflow=dataset_train,
callbacks=[
ModelSaver(),
ScheduledHyperParamSetter('learning_rate', [(2, 1e-4), (4, 1e-5), (6, 8e-6)]),
HumanHyperParamSetter('learning_rate'),
PeriodicTrigger(CalculateMIoU(CLASS_NUM), every_k_epochs=1),
ProgressBar(["cross_entropy_loss","cost","wd_cost"])#uncomment it to debug for every step
],
model=Model(),
steps_per_epoch=steps_per_epoch,
max_epoch=10,
)
def run(model_path, image_path, output):
pred_config = PredictConfig(
model=Model(),
session_init=get_model_loader(model_path),
input_names=['image'],
output_names=['output' + str(k) for k in range(1, 7)])
predictor = OfflinePredictor(pred_config)
im = cv2.imread(image_path)
assert im is not None
im = cv2.resize(
im, (im.shape[1] // 16 * 16, im.shape[0] // 16 * 16)
)[None, :, :, :].astype('float32')
outputs = predictor(im)
if output is None:
for k in range(6):
pred = outputs[k][0]
cv2.imwrite("out{}.png".format(
'-fused' if k == 5 else str(k + 1)), pred * 255)
else:
pred = outputs[5][0]
cv2.imwrite(output, pred * 255)
def proceed_validation(args, is_save = True, is_densecrf = False):
import cv2
ds = dataset.PascalVOC12Edge(args.data_dir, args.meta_dir, "val")
ds = BatchData(ds, 1)
pred_config = PredictConfig(
model=Model(),
session_init=get_model_loader(args.load),
input_names=['image'],
output_names=['prob'])
predictor = OfflinePredictor(pred_config)
i = 0
stat = MIoUStatistics(CLASS_NUM)
logger.info("start validation....")
for image, label in tqdm(ds.get_data()):
label = np.squeeze(label)
image = np.squeeze(image)
prediction = predict_scaler(image, predictor, scales=[0.9, 1, 1.1], classes=CLASS_NUM, tile_size=CROP_SIZE, is_densecrf = is_densecrf)
prediction = np.argmax(prediction, axis=2)
stat.feed(prediction, label)
if is_save:
cv2.imwrite("result/{}.png".format(i), np.concatenate((image, visualize_label(label), visualize_label(prediction)), axis=1))
i += 1
logger.info("mIoU: {}".format(stat.mIoU))
logger.info("mean_accuracy: {}".format(stat.mean_accuracy))
logger.info("accuracy: {}".format(stat.accuracy))
stat.print_confusion_matrix()
class CalculateMIoU(Callback):
def __init__(self, nb_class):
self.nb_class = nb_class
def _setup_graph(self):
self.pred = self.trainer.get_predictor(
['image'], ['prob'])
def _before_train(self):
pass
def _trigger(self):
global args
self.val_ds = get_data('val', args.data_dir, args.meta_dir, args.batch_size)
self.val_ds.reset_state()
self.stat = MIoUStatistics(self.nb_class)
for image, label in tqdm(self.val_ds.get_data()):
label = np.squeeze(label)
image = np.squeeze(image)
prediction = predict_scaler(image, self.pred, scales=[0.9, 1, 1.1], classes=CLASS_NUM, tile_size=CROP_SIZE,
is_densecrf=False)
prediction = np.argmax(prediction, axis=2)
self.stat.feed(prediction, label)
self.trainer.monitors.put_scalar("mIoU", self.stat.mIoU)
self.trainer.monitors.put_scalar("mean_accuracy", self.stat.mean_accuracy)
self.trainer.monitors.put_scalar("accuracy", self.stat.accuracy)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', default="5", help='comma separated list of GPU(s) to use.')
parser.add_argument('--data_dir', default="/data2/dataset/pascalvoc2012/VOC2012trainval/VOCdevkit/VOC2012",
help='dataset dir')
parser.add_argument('--meta_dir', default="../metadata/pascalvoc12", help='meta dir')
parser.add_argument('--load', default="../resnet101.npz", help='load model')
parser.add_argument('--view', help='view dataset', action='store_true')
parser.add_argument('--run', help='run model on images')
parser.add_argument('--batch_size', type=int, default = 10, help='batch_size')
parser.add_argument('--output', help='fused output filename. default to out-fused.png')
parser.add_argument('--validation', action='store_true', help='validate model on validation images')
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if args.view:
view_data(args.data_dir,args.meta_dir,args.batch_size)
elif args.run:
run(args.load, args.run, args.output)
elif args.validation:
proceed_validation(args)
else:
config = get_config(args.data_dir,args.meta_dir,args.batch_size)
if args.load:
config.session_init = get_model_loader(args.load)
launch_train_with_config(
config,
SyncMultiGPUTrainer(max(get_nr_gpu(), 1)))
| []
| []
| [
"CUDA_VISIBLE_DEVICES",
"TENSORPACK_TRAIN_API"
]
| [] | ["CUDA_VISIBLE_DEVICES", "TENSORPACK_TRAIN_API"] | python | 2 | 0 | |
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java | /**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.BufferedInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.io.PrintWriter;
import java.net.InetAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.net.UnknownHostException;
import java.nio.charset.StandardCharsets;
import java.nio.file.FileSystems;
import java.nio.file.Files;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.jar.Attributes;
import java.util.jar.JarFile;
import java.util.jar.Manifest;
import java.util.zip.ZipEntry;
import java.util.zip.ZipOutputStream;
import org.apache.commons.compress.archivers.tar.TarArchiveEntry;
import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.tools.tar.TarEntry;
import org.apache.tools.tar.TarOutputStream;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class TestFileUtil {
private static final Logger LOG = LoggerFactory.getLogger(TestFileUtil.class);
@Rule
public TemporaryFolder testFolder = new TemporaryFolder();
private static final String FILE = "x";
private static final String LINK = "y";
private static final String DIR = "dir";
private static final String FILE_1_NAME = "file1";
private File del;
private File tmp;
private File dir1;
private File dir2;
private File partitioned;
private File xSubDir;
private File xSubSubDir;
private File ySubDir;
private File file2;
private File file22;
private File file3;
private File zlink;
private InetAddress inet1;
private InetAddress inet2;
private InetAddress inet3;
private InetAddress inet4;
private InetAddress inet5;
private InetAddress inet6;
private URI uri1;
private URI uri2;
private URI uri3;
private URI uri4;
private URI uri5;
private URI uri6;
private FileSystem fs1;
private FileSystem fs2;
private FileSystem fs3;
private FileSystem fs4;
private FileSystem fs5;
private FileSystem fs6;
/**
* Creates multiple directories for testing.
*
* Contents of them are
* dir:tmp:
* file: x
* dir:del:
* file: x
* dir: dir1 : file:x
* dir: dir2 : file:x
* link: y to tmp/x
* link: tmpDir to tmp
* dir:partitioned:
* file: part-r-00000, contents: "foo"
* file: part-r-00001, contents: "bar"
*/
@Before
public void setup() throws IOException {
del = testFolder.newFolder("del");
tmp = testFolder.newFolder("tmp");
partitioned = testFolder.newFolder("partitioned");
zlink = new File(del, "zlink");
xSubDir = new File(del, "xSubDir");
xSubSubDir = new File(xSubDir, "xSubSubDir");
ySubDir = new File(del, "ySubDir");
file2 = new File(xSubDir, "file2");
file22 = new File(xSubSubDir, "file22");
file3 = new File(ySubDir, "file3");
dir1 = new File(del, DIR + "1");
dir2 = new File(del, DIR + "2");
FileUtils.forceMkdir(dir1);
FileUtils.forceMkdir(dir2);
new File(del, FILE).createNewFile();
File tmpFile = new File(tmp, FILE);
tmpFile.createNewFile();
// create files
new File(dir1, FILE).createNewFile();
new File(dir2, FILE).createNewFile();
// create a symlink to file
File link = new File(del, LINK);
FileUtil.symLink(tmpFile.toString(), link.toString());
// create a symlink to dir
File linkDir = new File(del, "tmpDir");
FileUtil.symLink(tmp.toString(), linkDir.toString());
Assert.assertEquals(5, del.listFiles().length);
// create files in partitioned directories
createFile(partitioned, "part-r-00000", "foo");
createFile(partitioned, "part-r-00001", "bar");
// create a cycle using symlinks. Cycles should be handled
FileUtil.symLink(del.toString(), dir1.toString() + "/cycle");
}
@After
public void tearDown() throws IOException {
testFolder.delete();
}
/**
* Creates a new file in the specified directory, with the specified name and
* the specified file contents. This method will add a newline terminator to
* the end of the contents string in the destination file.
* @param directory File non-null destination directory.
* @param name String non-null file name.
* @param contents String non-null file contents.
* @throws IOException if an I/O error occurs.
*/
private File createFile(File directory, String name, String contents)
throws IOException {
File newFile = new File(directory, name);
PrintWriter pw = new PrintWriter(newFile);
try {
pw.println(contents);
}
finally {
pw.close();
}
return newFile;
}
@Test (timeout = 30000)
public void testListFiles() throws IOException {
//Test existing files case
File[] files = FileUtil.listFiles(partitioned);
Assert.assertEquals(2, files.length);
//Test existing directory with no files case
File newDir = new File(tmp.getPath(),"test");
newDir.mkdir();
Assert.assertTrue("Failed to create test dir", newDir.exists());
files = FileUtil.listFiles(newDir);
Assert.assertEquals(0, files.length);
newDir.delete();
Assert.assertFalse("Failed to delete test dir", newDir.exists());
//Test non-existing directory case, this throws
//IOException
try {
files = FileUtil.listFiles(newDir);
Assert.fail("IOException expected on listFiles() for non-existent dir "
+ newDir.toString());
} catch(IOException ioe) {
//Expected an IOException
}
}
@Test (timeout = 30000)
public void testListAPI() throws IOException {
//Test existing files case
String[] files = FileUtil.list(partitioned);
Assert.assertEquals("Unexpected number of pre-existing files", 2, files.length);
//Test existing directory with no files case
File newDir = new File(tmp.getPath(),"test");
newDir.mkdir();
Assert.assertTrue("Failed to create test dir", newDir.exists());
files = FileUtil.list(newDir);
Assert.assertEquals("New directory unexpectedly contains files", 0, files.length);
newDir.delete();
Assert.assertFalse("Failed to delete test dir", newDir.exists());
//Test non-existing directory case, this throws
//IOException
try {
files = FileUtil.list(newDir);
Assert.fail("IOException expected on list() for non-existent dir "
+ newDir.toString());
} catch(IOException ioe) {
//Expected an IOException
}
}
@Test (timeout = 30000)
public void testFullyDelete() throws IOException {
boolean ret = FileUtil.fullyDelete(del);
Assert.assertTrue(ret);
Assert.assertFalse(del.exists());
validateTmpDir();
}
/**
* Tests if fullyDelete deletes
* (a) symlink to file only and not the file pointed to by symlink.
* (b) symlink to dir only and not the dir pointed to by symlink.
* @throws IOException
*/
@Test (timeout = 30000)
public void testFullyDeleteSymlinks() throws IOException {
File link = new File(del, LINK);
Assert.assertEquals(5, del.list().length);
// Since tmpDir is symlink to tmp, fullyDelete(tmpDir) should not
// delete contents of tmp. See setupDirs for details.
boolean ret = FileUtil.fullyDelete(link);
Assert.assertTrue(ret);
Assert.assertFalse(link.exists());
Assert.assertEquals(4, del.list().length);
validateTmpDir();
File linkDir = new File(del, "tmpDir");
// Since tmpDir is symlink to tmp, fullyDelete(tmpDir) should not
// delete contents of tmp. See setupDirs for details.
ret = FileUtil.fullyDelete(linkDir);
Assert.assertTrue(ret);
Assert.assertFalse(linkDir.exists());
Assert.assertEquals(3, del.list().length);
validateTmpDir();
}
/**
* Tests if fullyDelete deletes
* (a) dangling symlink to file properly
* (b) dangling symlink to directory properly
* @throws IOException
*/
@Test (timeout = 30000)
public void testFullyDeleteDanglingSymlinks() throws IOException {
// delete the directory tmp to make tmpDir a dangling link to dir tmp and
// to make y as a dangling link to file tmp/x
boolean ret = FileUtil.fullyDelete(tmp);
Assert.assertTrue(ret);
Assert.assertFalse(tmp.exists());
// dangling symlink to file
File link = new File(del, LINK);
Assert.assertEquals(5, del.list().length);
// Even though 'y' is dangling symlink to file tmp/x, fullyDelete(y)
// should delete 'y' properly.
ret = FileUtil.fullyDelete(link);
Assert.assertTrue(ret);
Assert.assertEquals(4, del.list().length);
// dangling symlink to directory
File linkDir = new File(del, "tmpDir");
// Even though tmpDir is dangling symlink to tmp, fullyDelete(tmpDir) should
// delete tmpDir properly.
ret = FileUtil.fullyDelete(linkDir);
Assert.assertTrue(ret);
Assert.assertEquals(3, del.list().length);
}
@Test (timeout = 30000)
public void testFullyDeleteContents() throws IOException {
boolean ret = FileUtil.fullyDeleteContents(del);
Assert.assertTrue(ret);
Assert.assertTrue(del.exists());
Assert.assertEquals(0, del.listFiles().length);
validateTmpDir();
}
private void validateTmpDir() {
Assert.assertTrue(tmp.exists());
Assert.assertEquals(1, tmp.listFiles().length);
Assert.assertTrue(new File(tmp, FILE).exists());
}
/**
* Creates a directory which can not be deleted completely.
*
* Directory structure. The naming is important in that {@link MyFile}
* is used to return them in alphabetical order when listed.
*
* del(+w)
* |
* .---------------------------------------,
* | | | |
* file1(!w) xSubDir(-rwx) ySubDir(+w) zlink
* | | |
* | file2(-rwx) file3
* |
* xSubSubDir(-rwx)
* |
* file22(-rwx)
*
* @throws IOException
*/
private void setupDirsAndNonWritablePermissions() throws IOException {
new MyFile(del, FILE_1_NAME).createNewFile();
// "file1" is non-deletable by default, see MyFile.delete().
xSubDir.mkdirs();
file2.createNewFile();
xSubSubDir.mkdirs();
file22.createNewFile();
revokePermissions(file22);
revokePermissions(xSubSubDir);
revokePermissions(file2);
revokePermissions(xSubDir);
ySubDir.mkdirs();
file3.createNewFile();
File tmpFile = new File(tmp, FILE);
tmpFile.createNewFile();
FileUtil.symLink(tmpFile.toString(), zlink.toString());
}
private static void grantPermissions(final File f) {
FileUtil.setReadable(f, true);
FileUtil.setWritable(f, true);
FileUtil.setExecutable(f, true);
}
private static void revokePermissions(final File f) {
FileUtil.setWritable(f, false);
FileUtil.setExecutable(f, false);
FileUtil.setReadable(f, false);
}
// Validates the return value.
// Validates the existence of the file "file1"
private void validateAndSetWritablePermissions(
final boolean expectedRevokedPermissionDirsExist, final boolean ret) {
grantPermissions(xSubDir);
grantPermissions(xSubSubDir);
Assert.assertFalse("The return value should have been false.", ret);
Assert.assertTrue("The file file1 should not have been deleted.",
new File(del, FILE_1_NAME).exists());
Assert.assertEquals(
"The directory xSubDir *should* not have been deleted.",
expectedRevokedPermissionDirsExist, xSubDir.exists());
Assert.assertEquals("The file file2 *should* not have been deleted.",
expectedRevokedPermissionDirsExist, file2.exists());
Assert.assertEquals(
"The directory xSubSubDir *should* not have been deleted.",
expectedRevokedPermissionDirsExist, xSubSubDir.exists());
Assert.assertEquals("The file file22 *should* not have been deleted.",
expectedRevokedPermissionDirsExist, file22.exists());
Assert.assertFalse("The directory ySubDir should have been deleted.",
ySubDir.exists());
Assert.assertFalse("The link zlink should have been deleted.",
zlink.exists());
}
@Test (timeout = 30000)
public void testFailFullyDelete() throws IOException {
// Windows Dir.setWritable(false) does not work for directories
assumeNotWindows();
LOG.info("Running test to verify failure of fullyDelete()");
setupDirsAndNonWritablePermissions();
boolean ret = FileUtil.fullyDelete(new MyFile(del));
validateAndSetWritablePermissions(true, ret);
}
@Test (timeout = 30000)
public void testFailFullyDeleteGrantPermissions() throws IOException {
setupDirsAndNonWritablePermissions();
boolean ret = FileUtil.fullyDelete(new MyFile(del), true);
// this time the directories with revoked permissions *should* be deleted:
validateAndSetWritablePermissions(false, ret);
}
/**
* Extend {@link File}. Same as {@link File} except for two things: (1) This
* treats file1Name as a very special file which is not delete-able
* irrespective of it's parent-dir's permissions, a peculiar file instance for
* testing. (2) It returns the files in alphabetically sorted order when
* listed.
*
*/
public static class MyFile extends File {
private static final long serialVersionUID = 1L;
public MyFile(File f) {
super(f.getAbsolutePath());
}
public MyFile(File parent, String child) {
super(parent, child);
}
/**
* Same as {@link File#delete()} except for file1Name which will never be
* deleted (hard-coded)
*/
@Override
public boolean delete() {
LOG.info("Trying to delete myFile " + getAbsolutePath());
boolean bool = false;
if (getName().equals(FILE_1_NAME)) {
bool = false;
} else {
bool = super.delete();
}
if (bool) {
LOG.info("Deleted " + getAbsolutePath() + " successfully");
} else {
LOG.info("Cannot delete " + getAbsolutePath());
}
return bool;
}
/**
* Return the list of files in an alphabetically sorted order
*/
@Override
public File[] listFiles() {
final File[] files = super.listFiles();
if (files == null) {
return null;
}
List<File> filesList = Arrays.asList(files);
Collections.sort(filesList);
File[] myFiles = new MyFile[files.length];
int i=0;
for(File f : filesList) {
myFiles[i++] = new MyFile(f);
}
return myFiles;
}
}
@Test (timeout = 30000)
public void testFailFullyDeleteContents() throws IOException {
// Windows Dir.setWritable(false) does not work for directories
assumeNotWindows();
LOG.info("Running test to verify failure of fullyDeleteContents()");
setupDirsAndNonWritablePermissions();
boolean ret = FileUtil.fullyDeleteContents(new MyFile(del));
validateAndSetWritablePermissions(true, ret);
}
@Test (timeout = 30000)
public void testFailFullyDeleteContentsGrantPermissions() throws IOException {
setupDirsAndNonWritablePermissions();
boolean ret = FileUtil.fullyDeleteContents(new MyFile(del), true);
// this time the directories with revoked permissions *should* be deleted:
validateAndSetWritablePermissions(false, ret);
}
/**
* Test that getDU is able to handle cycles caused due to symbolic links
* and that directory sizes are not added to the final calculated size
* @throws IOException
*/
@Test (timeout = 30000)
public void testGetDU() throws Exception {
long du = FileUtil.getDU(testFolder.getRoot());
// Only two files (in partitioned). Each has 3 characters + system-specific
// line separator.
final long expected = 2 * (3 + System.getProperty("line.separator").length());
Assert.assertEquals(expected, du);
// target file does not exist:
final File doesNotExist = new File(tmp, "QuickBrownFoxJumpsOverTheLazyDog");
long duDoesNotExist = FileUtil.getDU(doesNotExist);
assertEquals(0, duDoesNotExist);
// target file is not a directory:
File notADirectory = new File(partitioned, "part-r-00000");
long duNotADirectoryActual = FileUtil.getDU(notADirectory);
long duNotADirectoryExpected = 3 + System.getProperty("line.separator").length();
assertEquals(duNotADirectoryExpected, duNotADirectoryActual);
try {
// one of target files is not accessible, but the containing directory
// is accessible:
try {
FileUtil.chmod(notADirectory.getAbsolutePath(), "0000");
} catch (InterruptedException ie) {
// should never happen since that method never throws InterruptedException.
assertNull(ie);
}
assertFalse(FileUtil.canRead(notADirectory));
final long du3 = FileUtil.getDU(partitioned);
assertEquals(expected, du3);
// some target files and containing directory are not accessible:
try {
FileUtil.chmod(partitioned.getAbsolutePath(), "0000");
} catch (InterruptedException ie) {
// should never happen since that method never throws InterruptedException.
assertNull(ie);
}
assertFalse(FileUtil.canRead(partitioned));
final long du4 = FileUtil.getDU(partitioned);
assertEquals(0, du4);
} finally {
// Restore the permissions so that we can delete the folder
// in @After method:
FileUtil.chmod(partitioned.getAbsolutePath(), "0777", true/*recursive*/);
}
}
@Test (timeout = 30000)
public void testUnTar() throws IOException {
// make a simple tar:
final File simpleTar = new File(del, FILE);
OutputStream os = new FileOutputStream(simpleTar);
TarOutputStream tos = new TarOutputStream(os);
try {
TarEntry te = new TarEntry("/bar/foo");
byte[] data = "some-content".getBytes("UTF-8");
te.setSize(data.length);
tos.putNextEntry(te);
tos.write(data);
tos.closeEntry();
tos.flush();
tos.finish();
} finally {
tos.close();
}
// successfully untar it into an existing dir:
FileUtil.unTar(simpleTar, tmp);
// check result:
assertTrue(new File(tmp, "/bar/foo").exists());
assertEquals(12, new File(tmp, "/bar/foo").length());
final File regularFile = new File(tmp, "QuickBrownFoxJumpsOverTheLazyDog");
regularFile.createNewFile();
assertTrue(regularFile.exists());
try {
FileUtil.unTar(simpleTar, regularFile);
assertTrue("An IOException expected.", false);
} catch (IOException ioe) {
// okay
}
}
@Test (timeout = 30000)
public void testReplaceFile() throws IOException {
final File srcFile = new File(tmp, "src");
// src exists, and target does not exist:
srcFile.createNewFile();
assertTrue(srcFile.exists());
final File targetFile = new File(tmp, "target");
assertTrue(!targetFile.exists());
FileUtil.replaceFile(srcFile, targetFile);
assertTrue(!srcFile.exists());
assertTrue(targetFile.exists());
// src exists and target is a regular file:
srcFile.createNewFile();
assertTrue(srcFile.exists());
FileUtil.replaceFile(srcFile, targetFile);
assertTrue(!srcFile.exists());
assertTrue(targetFile.exists());
// src exists, and target is a non-empty directory:
srcFile.createNewFile();
assertTrue(srcFile.exists());
targetFile.delete();
targetFile.mkdirs();
File obstacle = new File(targetFile, "obstacle");
obstacle.createNewFile();
assertTrue(obstacle.exists());
assertTrue(targetFile.exists() && targetFile.isDirectory());
try {
FileUtil.replaceFile(srcFile, targetFile);
assertTrue(false);
} catch (IOException ioe) {
// okay
}
// check up the post-condition: nothing is deleted:
assertTrue(srcFile.exists());
assertTrue(targetFile.exists() && targetFile.isDirectory());
assertTrue(obstacle.exists());
}
@Test (timeout = 30000)
public void testCreateLocalTempFile() throws IOException {
final File baseFile = new File(tmp, "base");
File tmp1 = FileUtil.createLocalTempFile(baseFile, "foo", false);
File tmp2 = FileUtil.createLocalTempFile(baseFile, "foo", true);
assertFalse(tmp1.getAbsolutePath().equals(baseFile.getAbsolutePath()));
assertFalse(tmp2.getAbsolutePath().equals(baseFile.getAbsolutePath()));
assertTrue(tmp1.exists() && tmp2.exists());
assertTrue(tmp1.canWrite() && tmp2.canWrite());
assertTrue(tmp1.canRead() && tmp2.canRead());
tmp1.delete();
tmp2.delete();
assertTrue(!tmp1.exists() && !tmp2.exists());
}
@Test (timeout = 30000)
public void testUnZip() throws IOException {
// make sa simple zip
final File simpleZip = new File(del, FILE);
OutputStream os = new FileOutputStream(simpleZip);
ZipOutputStream tos = new ZipOutputStream(os);
try {
ZipEntry ze = new ZipEntry("foo");
byte[] data = "some-content".getBytes("UTF-8");
ze.setSize(data.length);
tos.putNextEntry(ze);
tos.write(data);
tos.closeEntry();
tos.flush();
tos.finish();
} finally {
tos.close();
}
// successfully unzip it into an existing dir:
FileUtil.unZip(simpleZip, tmp);
// check result:
assertTrue(new File(tmp, "foo").exists());
assertEquals(12, new File(tmp, "foo").length());
final File regularFile = new File(tmp, "QuickBrownFoxJumpsOverTheLazyDog");
regularFile.createNewFile();
assertTrue(regularFile.exists());
try {
FileUtil.unZip(simpleZip, regularFile);
assertTrue("An IOException expected.", false);
} catch (IOException ioe) {
// okay
}
}
@Test (timeout = 30000)
public void testUnZip2() throws IOException {
// make a simple zip
final File simpleZip = new File(del, FILE);
OutputStream os = new FileOutputStream(simpleZip);
try (ZipOutputStream tos = new ZipOutputStream(os)) {
// Add an entry that contains invalid filename
ZipEntry ze = new ZipEntry("../foo");
byte[] data = "some-content".getBytes(StandardCharsets.UTF_8);
ze.setSize(data.length);
tos.putNextEntry(ze);
tos.write(data);
tos.closeEntry();
tos.flush();
tos.finish();
}
// Unzip it into an existing dir
try {
FileUtil.unZip(simpleZip, tmp);
fail("unZip should throw IOException.");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains(
"would create file outside of", e);
}
}
@Test (timeout = 30000)
/*
* Test method copy(FileSystem srcFS, Path src, File dst, boolean deleteSource, Configuration conf)
*/
public void testCopy5() throws IOException {
URI uri = tmp.toURI();
Configuration conf = new Configuration();
FileSystem fs = FileSystem.newInstance(uri, conf);
final String content = "some-content";
File srcFile = createFile(tmp, "src", content);
Path srcPath = new Path(srcFile.toURI());
// copy regular file:
final File dest = new File(del, "dest");
boolean result = FileUtil.copy(fs, srcPath, dest, false, conf);
assertTrue(result);
assertTrue(dest.exists());
assertEquals(content.getBytes().length
+ System.getProperty("line.separator").getBytes().length, dest.length());
assertTrue(srcFile.exists()); // should not be deleted
// copy regular file, delete src:
dest.delete();
assertTrue(!dest.exists());
result = FileUtil.copy(fs, srcPath, dest, true, conf);
assertTrue(result);
assertTrue(dest.exists());
assertEquals(content.getBytes().length
+ System.getProperty("line.separator").getBytes().length, dest.length());
assertTrue(!srcFile.exists()); // should be deleted
// copy a dir:
dest.delete();
assertTrue(!dest.exists());
srcPath = new Path(partitioned.toURI());
result = FileUtil.copy(fs, srcPath, dest, true, conf);
assertTrue(result);
assertTrue(dest.exists() && dest.isDirectory());
File[] files = dest.listFiles();
assertTrue(files != null);
assertEquals(2, files.length);
for (File f: files) {
assertEquals(3
+ System.getProperty("line.separator").getBytes().length, f.length());
}
assertTrue(!partitioned.exists()); // should be deleted
}
@Test (timeout = 30000)
public void testStat2Paths1() {
assertNull(FileUtil.stat2Paths(null));
FileStatus[] fileStatuses = new FileStatus[0];
Path[] paths = FileUtil.stat2Paths(fileStatuses);
assertEquals(0, paths.length);
Path path1 = new Path("file://foo");
Path path2 = new Path("file://moo");
fileStatuses = new FileStatus[] {
new FileStatus(3, false, 0, 0, 0, path1),
new FileStatus(3, false, 0, 0, 0, path2)
};
paths = FileUtil.stat2Paths(fileStatuses);
assertEquals(2, paths.length);
assertEquals(paths[0], path1);
assertEquals(paths[1], path2);
}
@Test (timeout = 30000)
public void testStat2Paths2() {
Path defaultPath = new Path("file://default");
Path[] paths = FileUtil.stat2Paths(null, defaultPath);
assertEquals(1, paths.length);
assertEquals(defaultPath, paths[0]);
paths = FileUtil.stat2Paths(null, null);
assertTrue(paths != null);
assertEquals(1, paths.length);
assertEquals(null, paths[0]);
Path path1 = new Path("file://foo");
Path path2 = new Path("file://moo");
FileStatus[] fileStatuses = new FileStatus[] {
new FileStatus(3, false, 0, 0, 0, path1),
new FileStatus(3, false, 0, 0, 0, path2)
};
paths = FileUtil.stat2Paths(fileStatuses, defaultPath);
assertEquals(2, paths.length);
assertEquals(paths[0], path1);
assertEquals(paths[1], path2);
}
@Test (timeout = 30000)
public void testSymlink() throws Exception {
byte[] data = "testSymLink".getBytes();
File file = new File(del, FILE);
File link = new File(del, "_link");
//write some data to the file
FileOutputStream os = new FileOutputStream(file);
os.write(data);
os.close();
//create the symlink
FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath());
//ensure that symlink length is correctly reported by Java
Assert.assertEquals(data.length, file.length());
Assert.assertEquals(data.length, link.length());
//ensure that we can read from link.
FileInputStream in = new FileInputStream(link);
long len = 0;
while (in.read() > 0) {
len++;
}
in.close();
Assert.assertEquals(data.length, len);
}
/**
* Test that rename on a symlink works as expected.
*/
@Test (timeout = 30000)
public void testSymlinkRenameTo() throws Exception {
File file = new File(del, FILE);
file.createNewFile();
File link = new File(del, "_link");
// create the symlink
FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath());
Assert.assertTrue(file.exists());
Assert.assertTrue(link.exists());
File link2 = new File(del, "_link2");
// Rename the symlink
Assert.assertTrue(link.renameTo(link2));
// Make sure the file still exists
// (NOTE: this would fail on Java6 on Windows if we didn't
// copy the file in FileUtil#symlink)
Assert.assertTrue(file.exists());
Assert.assertTrue(link2.exists());
Assert.assertFalse(link.exists());
}
/**
* Test that deletion of a symlink works as expected.
*/
@Test (timeout = 30000)
public void testSymlinkDelete() throws Exception {
File file = new File(del, FILE);
file.createNewFile();
File link = new File(del, "_link");
// create the symlink
FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath());
Assert.assertTrue(file.exists());
Assert.assertTrue(link.exists());
// make sure that deleting a symlink works properly
Assert.assertTrue(link.delete());
Assert.assertFalse(link.exists());
Assert.assertTrue(file.exists());
}
/**
* Test that length on a symlink works as expected.
*/
@Test (timeout = 30000)
public void testSymlinkLength() throws Exception {
byte[] data = "testSymLinkData".getBytes();
File file = new File(del, FILE);
File link = new File(del, "_link");
// write some data to the file
FileOutputStream os = new FileOutputStream(file);
os.write(data);
os.close();
Assert.assertEquals(0, link.length());
// create the symlink
FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath());
// ensure that File#length returns the target file and link size
Assert.assertEquals(data.length, file.length());
Assert.assertEquals(data.length, link.length());
file.delete();
Assert.assertFalse(file.exists());
Assert.assertEquals(0, link.length());
link.delete();
Assert.assertFalse(link.exists());
}
/**
* This test validates the correctness of
* {@link FileUtil#symLink(String, String)} in case of null pointer inputs.
*
* @throws IOException
*/
@Test
public void testSymlinkWithNullInput() throws IOException {
File file = new File(del, FILE);
File link = new File(del, "_link");
// Create the same symbolic link
// The operation should fail and returns 1
int result = FileUtil.symLink(null, null);
Assert.assertEquals(1, result);
// Create the same symbolic link
// The operation should fail and returns 1
result = FileUtil.symLink(file.getAbsolutePath(), null);
Assert.assertEquals(1, result);
// Create the same symbolic link
// The operation should fail and returns 1
result = FileUtil.symLink(null, link.getAbsolutePath());
Assert.assertEquals(1, result);
}
/**
* This test validates the correctness of
* {@link FileUtil#symLink(String, String)} in case the file already exists.
*
* @throws IOException
*/
@Test
public void testSymlinkFileAlreadyExists() throws IOException {
File file = new File(del, FILE);
File link = new File(del, "_link");
// Create a symbolic link
// The operation should succeed
int result1 =
FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath());
Assert.assertEquals(0, result1);
// Create the same symbolic link
// The operation should fail and returns 1
result1 = FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath());
Assert.assertEquals(1, result1);
}
/**
* This test validates the correctness of
* {@link FileUtil#symLink(String, String)} in case the file and the link are
* the same file.
*
* @throws IOException
*/
@Test
public void testSymlinkSameFile() throws IOException {
File file = new File(del, FILE);
file.delete();
// Create a symbolic link
// The operation should succeed
int result =
FileUtil.symLink(file.getAbsolutePath(), file.getAbsolutePath());
Assert.assertEquals(0, result);
}
/**
* This test validates the correctness of
* {@link FileUtil#symLink(String, String)} in case we want to use a link for
* 2 different files.
*
* @throws IOException
*/
@Test
public void testSymlink2DifferentFile() throws IOException {
File file = new File(del, FILE);
File fileSecond = new File(del, FILE + "_1");
File link = new File(del, "_link");
// Create a symbolic link
// The operation should succeed
int result =
FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath());
Assert.assertEquals(0, result);
// The operation should fail and returns 1
result =
FileUtil.symLink(fileSecond.getAbsolutePath(), link.getAbsolutePath());
Assert.assertEquals(1, result);
}
/**
* This test validates the correctness of
* {@link FileUtil#symLink(String, String)} in case we want to use a 2
* different links for the same file.
*
* @throws IOException
*/
@Test
public void testSymlink2DifferentLinks() throws IOException {
File file = new File(del, FILE);
File link = new File(del, "_link");
File linkSecond = new File(del, "_link_1");
// Create a symbolic link
// The operation should succeed
int result =
FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath());
Assert.assertEquals(0, result);
// The operation should succeed
result =
FileUtil.symLink(file.getAbsolutePath(), linkSecond.getAbsolutePath());
Assert.assertEquals(0, result);
}
private void doUntarAndVerify(File tarFile, File untarDir)
throws IOException {
if (untarDir.exists() && !FileUtil.fullyDelete(untarDir)) {
throw new IOException("Could not delete directory '" + untarDir + "'");
}
FileUtil.unTar(tarFile, untarDir);
String parentDir = untarDir.getCanonicalPath() + Path.SEPARATOR + "name";
File testFile = new File(parentDir + Path.SEPARATOR + "version");
Assert.assertTrue(testFile.exists());
Assert.assertTrue(testFile.length() == 0);
String imageDir = parentDir + Path.SEPARATOR + "image";
testFile = new File(imageDir + Path.SEPARATOR + "fsimage");
Assert.assertTrue(testFile.exists());
Assert.assertTrue(testFile.length() == 157);
String currentDir = parentDir + Path.SEPARATOR + "current";
testFile = new File(currentDir + Path.SEPARATOR + "fsimage");
Assert.assertTrue(testFile.exists());
Assert.assertTrue(testFile.length() == 4331);
testFile = new File(currentDir + Path.SEPARATOR + "edits");
Assert.assertTrue(testFile.exists());
Assert.assertTrue(testFile.length() == 1033);
testFile = new File(currentDir + Path.SEPARATOR + "fstime");
Assert.assertTrue(testFile.exists());
Assert.assertTrue(testFile.length() == 8);
}
@Test (timeout = 30000)
public void testUntar() throws IOException {
String tarGzFileName = System.getProperty("test.cache.data",
"target/test/cache") + "/test-untar.tgz";
String tarFileName = System.getProperty("test.cache.data",
"build/test/cache") + "/test-untar.tar";
File dataDir = GenericTestUtils.getTestDir();
File untarDir = new File(dataDir, "untarDir");
doUntarAndVerify(new File(tarGzFileName), untarDir);
doUntarAndVerify(new File(tarFileName), untarDir);
}
@Test (timeout = 30000)
public void testCreateJarWithClassPath() throws Exception {
// create files expected to match a wildcard
List<File> wildcardMatches = Arrays.asList(new File(tmp, "wildcard1.jar"),
new File(tmp, "wildcard2.jar"), new File(tmp, "wildcard3.JAR"),
new File(tmp, "wildcard4.JAR"));
for (File wildcardMatch: wildcardMatches) {
Assert.assertTrue("failure creating file: " + wildcardMatch,
wildcardMatch.createNewFile());
}
// create non-jar files, which we expect to not be included in the classpath
Assert.assertTrue(new File(tmp, "text.txt").createNewFile());
Assert.assertTrue(new File(tmp, "executable.exe").createNewFile());
Assert.assertTrue(new File(tmp, "README").createNewFile());
// create classpath jar
String wildcardPath = tmp.getCanonicalPath() + File.separator + "*";
String nonExistentSubdir = tmp.getCanonicalPath() + Path.SEPARATOR + "subdir"
+ Path.SEPARATOR;
List<String> classPaths = Arrays.asList("", "cp1.jar", "cp2.jar", wildcardPath,
"cp3.jar", nonExistentSubdir);
String inputClassPath = StringUtils.join(File.pathSeparator, classPaths);
String[] jarCp = FileUtil.createJarWithClassPath(inputClassPath + File.pathSeparator + "unexpandedwildcard/*",
new Path(tmp.getCanonicalPath()), System.getenv());
String classPathJar = jarCp[0];
assertNotEquals("Unexpanded wildcard was not placed in extra classpath", jarCp[1].indexOf("unexpanded"), -1);
// verify classpath by reading manifest from jar file
JarFile jarFile = null;
try {
jarFile = new JarFile(classPathJar);
Manifest jarManifest = jarFile.getManifest();
Assert.assertNotNull(jarManifest);
Attributes mainAttributes = jarManifest.getMainAttributes();
Assert.assertNotNull(mainAttributes);
Assert.assertTrue(mainAttributes.containsKey(Attributes.Name.CLASS_PATH));
String classPathAttr = mainAttributes.getValue(Attributes.Name.CLASS_PATH);
Assert.assertNotNull(classPathAttr);
List<String> expectedClassPaths = new ArrayList<String>();
for (String classPath: classPaths) {
if (classPath.length() == 0) {
continue;
}
if (wildcardPath.equals(classPath)) {
// add wildcard matches
for (File wildcardMatch: wildcardMatches) {
expectedClassPaths.add(wildcardMatch.toURI().toURL()
.toExternalForm());
}
} else {
File fileCp = null;
if(!new Path(classPath).isAbsolute()) {
fileCp = new File(tmp, classPath);
}
else {
fileCp = new File(classPath);
}
if (nonExistentSubdir.equals(classPath)) {
// expect to maintain trailing path separator if present in input, even
// if directory doesn't exist yet
expectedClassPaths.add(fileCp.toURI().toURL()
.toExternalForm() + Path.SEPARATOR);
} else {
expectedClassPaths.add(fileCp.toURI().toURL()
.toExternalForm());
}
}
}
List<String> actualClassPaths = Arrays.asList(classPathAttr.split(" "));
Collections.sort(expectedClassPaths);
Collections.sort(actualClassPaths);
Assert.assertEquals(expectedClassPaths, actualClassPaths);
} finally {
if (jarFile != null) {
try {
jarFile.close();
} catch (IOException e) {
LOG.warn("exception closing jarFile: " + classPathJar, e);
}
}
}
}
@Test
public void testGetJarsInDirectory() throws Exception {
List<Path> jars = FileUtil.getJarsInDirectory("/foo/bar/bogus/");
assertTrue("no jars should be returned for a bogus path",
jars.isEmpty());
// create jar files to be returned
File jar1 = new File(tmp, "wildcard1.jar");
File jar2 = new File(tmp, "wildcard2.JAR");
List<File> matches = Arrays.asList(jar1, jar2);
for (File match: matches) {
assertTrue("failure creating file: " + match, match.createNewFile());
}
// create non-jar files, which we expect to not be included in the result
assertTrue(new File(tmp, "text.txt").createNewFile());
assertTrue(new File(tmp, "executable.exe").createNewFile());
assertTrue(new File(tmp, "README").createNewFile());
// pass in the directory
String directory = tmp.getCanonicalPath();
jars = FileUtil.getJarsInDirectory(directory);
assertEquals("there should be 2 jars", 2, jars.size());
for (Path jar: jars) {
URL url = jar.toUri().toURL();
assertTrue("the jar should match either of the jars",
url.equals(jar1.toURI().toURL()) || url.equals(jar2.toURI().toURL()));
}
}
@Ignore
public void setupCompareFs() {
// Set up Strings
String host1 = "1.2.3.4";
String host2 = "2.3.4.5";
int port1 = 7000;
int port2 = 7001;
String uris1 = "hdfs://" + host1 + ":" + Integer.toString(port1) + "/tmp/foo";
String uris2 = "hdfs://" + host1 + ":" + Integer.toString(port2) + "/tmp/foo";
String uris3 = "hdfs://" + host2 + ":" + Integer.toString(port2) + "/tmp/foo";
String uris4 = "hdfs://" + host2 + ":" + Integer.toString(port2) + "/tmp/foo";
String uris5 = "file:///" + host1 + ":" + Integer.toString(port1) + "/tmp/foo";
String uris6 = "hdfs:///" + host1 + "/tmp/foo";
// Set up URI objects
try {
uri1 = new URI(uris1);
uri2 = new URI(uris2);
uri3 = new URI(uris3);
uri4 = new URI(uris4);
uri5 = new URI(uris5);
uri6 = new URI(uris6);
} catch (URISyntaxException use) {
}
// Set up InetAddress
inet1 = mock(InetAddress.class);
when(inet1.getCanonicalHostName()).thenReturn(host1);
inet2 = mock(InetAddress.class);
when(inet2.getCanonicalHostName()).thenReturn(host1);
inet3 = mock(InetAddress.class);
when(inet3.getCanonicalHostName()).thenReturn(host2);
inet4 = mock(InetAddress.class);
when(inet4.getCanonicalHostName()).thenReturn(host2);
inet5 = mock(InetAddress.class);
when(inet5.getCanonicalHostName()).thenReturn(host1);
inet6 = mock(InetAddress.class);
when(inet6.getCanonicalHostName()).thenReturn(host1);
// Link of InetAddress to corresponding URI
try {
when(InetAddress.getByName(uris1)).thenReturn(inet1);
when(InetAddress.getByName(uris2)).thenReturn(inet2);
when(InetAddress.getByName(uris3)).thenReturn(inet3);
when(InetAddress.getByName(uris4)).thenReturn(inet4);
when(InetAddress.getByName(uris5)).thenReturn(inet5);
} catch (UnknownHostException ue) {
}
fs1 = mock(FileSystem.class);
when(fs1.getUri()).thenReturn(uri1);
fs2 = mock(FileSystem.class);
when(fs2.getUri()).thenReturn(uri2);
fs3 = mock(FileSystem.class);
when(fs3.getUri()).thenReturn(uri3);
fs4 = mock(FileSystem.class);
when(fs4.getUri()).thenReturn(uri4);
fs5 = mock(FileSystem.class);
when(fs5.getUri()).thenReturn(uri5);
fs6 = mock(FileSystem.class);
when(fs6.getUri()).thenReturn(uri6);
}
@Test
public void testCompareFsNull() throws Exception {
setupCompareFs();
assertEquals(FileUtil.compareFs(null,fs1),false);
assertEquals(FileUtil.compareFs(fs1,null),false);
}
@Test
public void testCompareFsDirectories() throws Exception {
setupCompareFs();
assertEquals(FileUtil.compareFs(fs1,fs1),true);
assertEquals(FileUtil.compareFs(fs1,fs2),false);
assertEquals(FileUtil.compareFs(fs1,fs5),false);
assertEquals(FileUtil.compareFs(fs3,fs4),true);
assertEquals(FileUtil.compareFs(fs1,fs6),false);
}
@Test(timeout = 8000)
public void testCreateSymbolicLinkUsingJava() throws IOException {
final File simpleTar = new File(del, FILE);
OutputStream os = new FileOutputStream(simpleTar);
TarArchiveOutputStream tos = new TarArchiveOutputStream(os);
File untarFile = null;
try {
// Files to tar
final String tmpDir = "tmp/test";
File tmpDir1 = new File(tmpDir, "dir1/");
File tmpDir2 = new File(tmpDir, "dir2/");
// Delete the directories if they already exist
tmpDir1.mkdirs();
tmpDir2.mkdirs();
java.nio.file.Path symLink = FileSystems
.getDefault().getPath(tmpDir1.getPath() + "/sl");
// Create Symbolic Link
Files.createSymbolicLink(symLink,
FileSystems.getDefault().getPath(tmpDir2.getPath())).toString();
assertTrue(Files.isSymbolicLink(symLink.toAbsolutePath()));
// put entries in tar file
putEntriesInTar(tos, tmpDir1.getParentFile());
tos.close();
untarFile = new File(tmpDir, "2");
// Untar using java
FileUtil.unTarUsingJava(simpleTar, untarFile, false);
// Check symbolic link and other directories are there in untar file
assertTrue(Files.exists(untarFile.toPath()));
assertTrue(Files.exists(FileSystems.getDefault().getPath(untarFile
.getPath(), tmpDir)));
assertTrue(Files.isSymbolicLink(FileSystems.getDefault().getPath(untarFile
.getPath().toString(), symLink.toString())));
} finally {
FileUtils.deleteDirectory(new File("tmp"));
tos.close();
}
}
private void putEntriesInTar(TarArchiveOutputStream tos, File f)
throws IOException {
if (Files.isSymbolicLink(f.toPath())) {
TarArchiveEntry tarEntry = new TarArchiveEntry(f.getPath(),
TarArchiveEntry.LF_SYMLINK);
tarEntry.setLinkName(Files.readSymbolicLink(f.toPath()).toString());
tos.putArchiveEntry(tarEntry);
tos.closeArchiveEntry();
return;
}
if (f.isDirectory()) {
tos.putArchiveEntry(new TarArchiveEntry(f));
tos.closeArchiveEntry();
for (File child : f.listFiles()) {
putEntriesInTar(tos, child);
}
}
if (f.isFile()) {
tos.putArchiveEntry(new TarArchiveEntry(f));
BufferedInputStream origin = new BufferedInputStream(
new FileInputStream(f));
int count;
byte[] data = new byte[2048];
while ((count = origin.read(data)) != -1) {
tos.write(data, 0, count);
}
tos.flush();
tos.closeArchiveEntry();
origin.close();
}
}
/**
* This test validates the correctness of {@link FileUtil#readLink(File)} in
* case of null pointer inputs.
*/
@Test
public void testReadSymlinkWithNullInput() {
String result = FileUtil.readLink(null);
Assert.assertEquals("", result);
}
/**
* This test validates the correctness of {@link FileUtil#readLink(File)}.
*
* @throws IOException
*/
@Test
public void testReadSymlink() throws IOException {
File file = new File(del, FILE);
File link = new File(del, "_link");
// Create a symbolic link
FileUtil.symLink(file.getAbsolutePath(), link.getAbsolutePath());
String result = FileUtil.readLink(link);
Assert.assertEquals(file.getAbsolutePath(), result);
}
/**
* This test validates the correctness of {@link FileUtil#readLink(File)} when
* it gets a file in input.
*
* @throws IOException
*/
@Test
public void testReadSymlinkWithAFileAsInput() throws IOException {
File file = new File(del, FILE);
String result = FileUtil.readLink(file);
Assert.assertEquals("", result);
file.delete();
}
/**
* Test that bytes are written out correctly to the local file system.
*/
@Test
public void testWriteBytesFileSystem() throws IOException {
URI uri = tmp.toURI();
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(uri, conf);
Path testPath = new Path(new Path(uri), "writebytes.out");
byte[] write = new byte[] {0x00, 0x01, 0x02, 0x03};
FileUtil.write(fs, testPath, write);
byte[] read = FileUtils.readFileToByteArray(new File(testPath.toUri()));
assertArrayEquals(write, read);
}
/**
* Test that a Collection of Strings are written out correctly to the local
* file system.
*/
@Test
public void testWriteStringsFileSystem() throws IOException {
URI uri = tmp.toURI();
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(uri, conf);
Path testPath = new Path(new Path(uri), "writestrings.out");
Collection<String> write = Arrays.asList("over", "the", "lazy", "dog");
FileUtil.write(fs, testPath, write, StandardCharsets.UTF_8);
List<String> read =
FileUtils.readLines(new File(testPath.toUri()), StandardCharsets.UTF_8);
assertEquals(write, read);
}
/**
* Test that a String is written out correctly to the local file system.
*/
@Test
public void testWriteStringFileSystem() throws IOException {
URI uri = tmp.toURI();
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(uri, conf);
Path testPath = new Path(new Path(uri), "writestring.out");
String write = "A" + "\u00ea" + "\u00f1" + "\u00fc" + "C";
FileUtil.write(fs, testPath, write, StandardCharsets.UTF_8);
String read = FileUtils.readFileToString(new File(testPath.toUri()),
StandardCharsets.UTF_8);
assertEquals(write, read);
}
/**
* Test that a String is written out correctly to the local file system
* without specifying a character set.
*/
@Test
public void testWriteStringNoCharSetFileSystem() throws IOException {
URI uri = tmp.toURI();
Configuration conf = new Configuration();
FileSystem fs = FileSystem.get(uri, conf);
Path testPath = new Path(new Path(uri), "writestring.out");
String write = "A" + "\u00ea" + "\u00f1" + "\u00fc" + "C";
FileUtil.write(fs, testPath, write);
String read = FileUtils.readFileToString(new File(testPath.toUri()),
StandardCharsets.UTF_8);
assertEquals(write, read);
}
/**
* Test that bytes are written out correctly to the local file system.
*/
@Test
public void testWriteBytesFileContext() throws IOException {
URI uri = tmp.toURI();
Configuration conf = new Configuration();
FileContext fc = FileContext.getFileContext(uri, conf);
Path testPath = new Path(new Path(uri), "writebytes.out");
byte[] write = new byte[] {0x00, 0x01, 0x02, 0x03};
FileUtil.write(fc, testPath, write);
byte[] read = FileUtils.readFileToByteArray(new File(testPath.toUri()));
assertArrayEquals(write, read);
}
/**
* Test that a Collection of Strings are written out correctly to the local
* file system.
*/
@Test
public void testWriteStringsFileContext() throws IOException {
URI uri = tmp.toURI();
Configuration conf = new Configuration();
FileContext fc = FileContext.getFileContext(uri, conf);
Path testPath = new Path(new Path(uri), "writestrings.out");
Collection<String> write = Arrays.asList("over", "the", "lazy", "dog");
FileUtil.write(fc, testPath, write, StandardCharsets.UTF_8);
List<String> read =
FileUtils.readLines(new File(testPath.toUri()), StandardCharsets.UTF_8);
assertEquals(write, read);
}
/**
* Test that a String is written out correctly to the local file system.
*/
@Test
public void testWriteStringFileContext() throws IOException {
URI uri = tmp.toURI();
Configuration conf = new Configuration();
FileContext fc = FileContext.getFileContext(uri, conf);
Path testPath = new Path(new Path(uri), "writestring.out");
String write = "A" + "\u00ea" + "\u00f1" + "\u00fc" + "C";
FileUtil.write(fc, testPath, write, StandardCharsets.UTF_8);
String read = FileUtils.readFileToString(new File(testPath.toUri()),
StandardCharsets.UTF_8);
assertEquals(write, read);
}
/**
* Test that a String is written out correctly to the local file system
* without specifying a character set.
*/
@Test
public void testWriteStringNoCharSetFileContext() throws IOException {
URI uri = tmp.toURI();
Configuration conf = new Configuration();
FileContext fc = FileContext.getFileContext(uri, conf);
Path testPath = new Path(new Path(uri), "writestring.out");
String write = "A" + "\u00ea" + "\u00f1" + "\u00fc" + "C";
FileUtil.write(fc, testPath, write);
String read = FileUtils.readFileToString(new File(testPath.toUri()),
StandardCharsets.UTF_8);
assertEquals(write, read);
}
/**
* The size of FileSystem cache.
*/
public static int getCacheSize() {
return FileSystem.cacheSize();
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
swift/common/utils.py | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Miscellaneous utility functions for use with Swift."""
from __future__ import print_function
import base64
import binascii
import bisect
import collections
import errno
import fcntl
import grp
import hashlib
import hmac
import json
import math
import operator
import os
import pwd
import re
import string
import struct
import sys
import time
import uuid
import functools
import platform
import email.parser
from random import random, shuffle
from contextlib import contextmanager, closing
import ctypes
import ctypes.util
from optparse import OptionParser
import traceback
import warnings
from tempfile import gettempdir, mkstemp, NamedTemporaryFile
import glob
import itertools
import stat
import datetime
import eventlet
import eventlet.debug
import eventlet.greenthread
import eventlet.patcher
import eventlet.semaphore
import pkg_resources
from eventlet import GreenPool, sleep, Timeout
from eventlet.event import Event
from eventlet.green import socket, threading
import eventlet.hubs
import eventlet.queue
import netifaces
import codecs
utf8_decoder = codecs.getdecoder('utf-8')
utf8_encoder = codecs.getencoder('utf-8')
import six
if six.PY2:
from eventlet.green import httplib as green_http_client
else:
from eventlet.green.http import client as green_http_client
utf16_decoder = codecs.getdecoder('utf-16')
utf16_encoder = codecs.getencoder('utf-16')
from six.moves import cPickle as pickle
from six.moves import configparser
from six.moves.configparser import (ConfigParser, NoSectionError,
NoOptionError, RawConfigParser)
from six.moves import range, http_client
from six.moves.urllib.parse import quote as _quote, unquote
from six.moves.urllib.parse import urlparse
from six.moves import UserList
from swift import gettext_ as _
import swift.common.exceptions
from swift.common.http import is_server_error
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.linkat import linkat
# For backwards compatability with 3rd party middlewares
from swift.common.registry import register_swift_info, get_swift_info # noqa
# logging doesn't import patched as cleanly as one would like
from logging.handlers import SysLogHandler
import logging
logging.thread = eventlet.green.thread
logging.threading = eventlet.green.threading
logging._lock = logging.threading.RLock()
# setup notice level logging
NOTICE = 25
logging.addLevelName(NOTICE, 'NOTICE')
SysLogHandler.priority_map['NOTICE'] = 'notice'
# These are lazily pulled from libc elsewhere
_sys_fallocate = None
_posix_fadvise = None
_libc_socket = None
_libc_bind = None
_libc_accept = None
# see man -s 2 setpriority
_libc_setpriority = None
# see man -s 2 syscall
_posix_syscall = None
# If set to non-zero, fallocate routines will fail based on free space
# available being at or below this amount, in bytes.
FALLOCATE_RESERVE = 0
# Indicates if FALLOCATE_RESERVE is the percentage of free space (True) or
# the number of bytes (False).
FALLOCATE_IS_PERCENT = False
# from /usr/include/linux/falloc.h
FALLOC_FL_KEEP_SIZE = 1
FALLOC_FL_PUNCH_HOLE = 2
# from /usr/src/linux-headers-*/include/uapi/linux/resource.h
PRIO_PROCESS = 0
# /usr/include/x86_64-linux-gnu/asm/unistd_64.h defines syscalls there
# are many like it, but this one is mine, see man -s 2 ioprio_set
def NR_ioprio_set():
"""Give __NR_ioprio_set value for your system."""
architecture = os.uname()[4]
arch_bits = platform.architecture()[0]
# check if supported system, now support x86_64 and AArch64
if architecture == 'x86_64' and arch_bits == '64bit':
return 251
elif architecture == 'aarch64' and arch_bits == '64bit':
return 30
raise OSError("Swift doesn't support ionice priority for %s %s" %
(architecture, arch_bits))
# this syscall integer probably only works on x86_64 linux systems, you
# can check if it's correct on yours with something like this:
"""
#include <stdio.h>
#include <sys/syscall.h>
int main(int argc, const char* argv[]) {
printf("%d\n", __NR_ioprio_set);
return 0;
}
"""
# this is the value for "which" that says our who value will be a pid
# pulled out of /usr/src/linux-headers-*/include/linux/ioprio.h
IOPRIO_WHO_PROCESS = 1
IO_CLASS_ENUM = {
'IOPRIO_CLASS_RT': 1,
'IOPRIO_CLASS_BE': 2,
'IOPRIO_CLASS_IDLE': 3,
}
# the IOPRIO_PRIO_VALUE "macro" is also pulled from
# /usr/src/linux-headers-*/include/linux/ioprio.h
IOPRIO_CLASS_SHIFT = 13
def IOPRIO_PRIO_VALUE(class_, data):
return (((class_) << IOPRIO_CLASS_SHIFT) | data)
# Used by hash_path to offer a bit more security when generating hashes for
# paths. It simply appends this value to all paths; guessing the hash a path
# will end up with would also require knowing this suffix.
HASH_PATH_SUFFIX = b''
HASH_PATH_PREFIX = b''
SWIFT_CONF_FILE = '/etc/swift/swift.conf'
# These constants are Linux-specific, and Python doesn't seem to know
# about them. We ask anyway just in case that ever gets fixed.
#
# The values were copied from the Linux 3.x kernel headers.
AF_ALG = getattr(socket, 'AF_ALG', 38)
F_SETPIPE_SZ = getattr(fcntl, 'F_SETPIPE_SZ', 1031)
O_TMPFILE = getattr(os, 'O_TMPFILE', 0o20000000 | os.O_DIRECTORY)
# Used by the parse_socket_string() function to validate IPv6 addresses
IPV6_RE = re.compile(r"^\[(?P<address>.*)\](:(?P<port>[0-9]+))?$")
MD5_OF_EMPTY_STRING = 'd41d8cd98f00b204e9800998ecf8427e'
RESERVED_BYTE = b'\x00'
RESERVED_STR = u'\x00'
RESERVED = '\x00'
LOG_LINE_DEFAULT_FORMAT = '{remote_addr} - - [{time.d}/{time.b}/{time.Y}' \
':{time.H}:{time.M}:{time.S} +0000] ' \
'"{method} {path}" {status} {content_length} ' \
'"{referer}" "{txn_id}" "{user_agent}" ' \
'{trans_time:.4f} "{additional_info}" {pid} ' \
'{policy_index}'
DEFAULT_LOCK_TIMEOUT = 10
class InvalidHashPathConfigError(ValueError):
def __str__(self):
return "[swift-hash]: both swift_hash_path_suffix and " \
"swift_hash_path_prefix are missing from %s" % SWIFT_CONF_FILE
def set_swift_dir(swift_dir):
"""
Sets the directory from which swift config files will be read. If the given
directory differs from that already set then the swift.conf file in the new
directory will be validated and storage policies will be reloaded from the
new swift.conf file.
:param swift_dir: non-default directory to read swift.conf from
"""
global HASH_PATH_SUFFIX
global HASH_PATH_PREFIX
global SWIFT_CONF_FILE
if (swift_dir is not None and
swift_dir != os.path.dirname(SWIFT_CONF_FILE)):
SWIFT_CONF_FILE = os.path.join(
swift_dir, os.path.basename(SWIFT_CONF_FILE))
HASH_PATH_PREFIX = b''
HASH_PATH_SUFFIX = b''
validate_configuration()
return True
return False
def validate_hash_conf():
global HASH_PATH_SUFFIX
global HASH_PATH_PREFIX
if not HASH_PATH_SUFFIX and not HASH_PATH_PREFIX:
hash_conf = ConfigParser()
if six.PY3:
# Use Latin1 to accept arbitrary bytes in the hash prefix/suffix
with open(SWIFT_CONF_FILE, encoding='latin1') as swift_conf_file:
hash_conf.readfp(swift_conf_file)
else:
with open(SWIFT_CONF_FILE) as swift_conf_file:
hash_conf.readfp(swift_conf_file)
try:
HASH_PATH_SUFFIX = hash_conf.get('swift-hash',
'swift_hash_path_suffix')
if six.PY3:
HASH_PATH_SUFFIX = HASH_PATH_SUFFIX.encode('latin1')
except (NoSectionError, NoOptionError):
pass
try:
HASH_PATH_PREFIX = hash_conf.get('swift-hash',
'swift_hash_path_prefix')
if six.PY3:
HASH_PATH_PREFIX = HASH_PATH_PREFIX.encode('latin1')
except (NoSectionError, NoOptionError):
pass
if not HASH_PATH_SUFFIX and not HASH_PATH_PREFIX:
raise InvalidHashPathConfigError()
try:
validate_hash_conf()
except (InvalidHashPathConfigError, IOError):
# could get monkey patched or lazy loaded
pass
def get_hmac(request_method, path, expires, key, digest="sha1",
ip_range=None):
"""
Returns the hexdigest string of the HMAC (see RFC 2104) for
the request.
:param request_method: Request method to allow.
:param path: The path to the resource to allow access to.
:param expires: Unix timestamp as an int for when the URL
expires.
:param key: HMAC shared secret.
:param digest: constructor or the string name for the digest to use in
calculating the HMAC
Defaults to SHA1
:param ip_range: The ip range from which the resource is allowed
to be accessed. We need to put the ip_range as the
first argument to hmac to avoid manipulation of the path
due to newlines being valid in paths
e.g. /v1/a/c/o\\n127.0.0.1
:returns: hexdigest str of the HMAC for the request using the specified
digest algorithm.
"""
# These are the three mandatory fields.
parts = [request_method, str(expires), path]
formats = [b"%s", b"%s", b"%s"]
if ip_range:
parts.insert(0, ip_range)
formats.insert(0, b"ip=%s")
if not isinstance(key, six.binary_type):
key = key.encode('utf8')
message = b'\n'.join(
fmt % (part if isinstance(part, six.binary_type)
else part.encode("utf-8"))
for fmt, part in zip(formats, parts))
if six.PY2 and isinstance(digest, six.string_types):
digest = getattr(hashlib, digest)
return hmac.new(key, message, digest).hexdigest()
def backward(f, blocksize=4096):
"""
A generator returning lines from a file starting with the last line,
then the second last line, etc. i.e., it reads lines backwards.
Stops when the first line (if any) is read.
This is useful when searching for recent activity in very
large files.
:param f: file object to read
:param blocksize: no of characters to go backwards at each block
"""
f.seek(0, os.SEEK_END)
if f.tell() == 0:
return
last_row = b''
while f.tell() != 0:
try:
f.seek(-blocksize, os.SEEK_CUR)
except IOError:
blocksize = f.tell()
f.seek(-blocksize, os.SEEK_CUR)
block = f.read(blocksize)
f.seek(-blocksize, os.SEEK_CUR)
rows = block.split(b'\n')
rows[-1] = rows[-1] + last_row
while rows:
last_row = rows.pop(-1)
if rows and last_row:
yield last_row
yield last_row
# Used when reading config values
TRUE_VALUES = set(('true', '1', 'yes', 'on', 't', 'y'))
def non_negative_float(value):
"""
Check that the value casts to a float and is non-negative.
:param value: value to check
:raises ValueError: if the value cannot be cast to a float or is negative.
:return: a float
"""
value = float(value)
if value < 0:
raise ValueError
return value
def non_negative_int(value):
"""
Check that the value casts to an int and is a whole number.
:param value: value to check
:raises ValueError: if the value cannot be cast to an int or does not
represent a whole number.
:return: an int
"""
int_value = int(value)
if int_value != non_negative_float(value):
raise ValueError
return int_value
def config_true_value(value):
"""
Returns True if the value is either True or a string in TRUE_VALUES.
Returns False otherwise.
"""
return value is True or \
(isinstance(value, six.string_types) and value.lower() in TRUE_VALUES)
def config_positive_int_value(value):
"""
Returns positive int value if it can be cast by int() and it's an
integer > 0. (not including zero) Raises ValueError otherwise.
"""
try:
result = int(value)
if result < 1:
raise ValueError()
except (TypeError, ValueError):
raise ValueError(
'Config option must be an positive int number, not "%s".' % value)
return result
def config_float_value(value, minimum=None, maximum=None):
try:
val = float(value)
if minimum is not None and val < minimum:
raise ValueError()
if maximum is not None and val > maximum:
raise ValueError()
return val
except (TypeError, ValueError):
min_ = ', greater than %s' % minimum if minimum is not None else ''
max_ = ', less than %s' % maximum if maximum is not None else ''
raise ValueError('Config option must be a number%s%s, not "%s".' %
(min_, max_, value))
def config_auto_int_value(value, default):
"""
Returns default if value is None or 'auto'.
Returns value as an int or raises ValueError otherwise.
"""
if value is None or \
(isinstance(value, six.string_types) and value.lower() == 'auto'):
return default
try:
value = int(value)
except (TypeError, ValueError):
raise ValueError('Config option must be an integer or the '
'string "auto", not "%s".' % value)
return value
def config_percent_value(value):
try:
return config_float_value(value, 0, 100) / 100.0
except ValueError as err:
raise ValueError("%s: %s" % (str(err), value))
def config_request_node_count_value(value):
try:
value_parts = value.lower().split()
rnc_value = int(value_parts[0])
except (ValueError, AttributeError):
pass
else:
if len(value_parts) == 1:
return lambda replicas: rnc_value
elif (len(value_parts) == 3 and
value_parts[1] == '*' and
value_parts[2] == 'replicas'):
return lambda replicas: rnc_value * replicas
raise ValueError(
'Invalid request_node_count value: %r' % value)
def append_underscore(prefix):
if prefix and not prefix.endswith('_'):
prefix += '_'
return prefix
def config_read_reseller_options(conf, defaults):
"""
Read reseller_prefix option and associated options from configuration
Reads the reseller_prefix option, then reads options that may be
associated with a specific reseller prefix. Reads options such that an
option without a prefix applies to all reseller prefixes unless an option
has an explicit prefix.
:param conf: the configuration
:param defaults: a dict of default values. The key is the option
name. The value is either an array of strings or a string
:return: tuple of an array of reseller prefixes and a dict of option values
"""
reseller_prefix_opt = conf.get('reseller_prefix', 'AUTH').split(',')
reseller_prefixes = []
for prefix in [pre.strip() for pre in reseller_prefix_opt if pre.strip()]:
if prefix == "''":
prefix = ''
prefix = append_underscore(prefix)
if prefix not in reseller_prefixes:
reseller_prefixes.append(prefix)
if len(reseller_prefixes) == 0:
reseller_prefixes.append('')
# Get prefix-using config options
associated_options = {}
for prefix in reseller_prefixes:
associated_options[prefix] = dict(defaults)
associated_options[prefix].update(
config_read_prefixed_options(conf, '', defaults))
prefix_name = prefix if prefix != '' else "''"
associated_options[prefix].update(
config_read_prefixed_options(conf, prefix_name, defaults))
return reseller_prefixes, associated_options
def config_read_prefixed_options(conf, prefix_name, defaults):
"""
Read prefixed options from configuration
:param conf: the configuration
:param prefix_name: the prefix (including, if needed, an underscore)
:param defaults: a dict of default values. The dict supplies the
option name and type (string or comma separated string)
:return: a dict containing the options
"""
params = {}
for option_name in defaults.keys():
value = conf.get('%s%s' % (prefix_name, option_name))
if value:
if isinstance(defaults.get(option_name), list):
params[option_name] = []
for role in value.lower().split(','):
params[option_name].append(role.strip())
else:
params[option_name] = value.strip()
return params
def eventlet_monkey_patch():
"""
Install the appropriate Eventlet monkey patches.
"""
# NOTE(sileht):
# monkey-patching thread is required by python-keystoneclient;
# monkey-patching select is required by oslo.messaging pika driver
# if thread is monkey-patched.
eventlet.patcher.monkey_patch(all=False, socket=True, select=True,
thread=True)
# Trying to log threads while monkey-patched can lead to deadlocks; see
# https://bugs.launchpad.net/swift/+bug/1895739
logging.logThreads = 0
def noop_libc_function(*args):
return 0
def validate_configuration():
try:
validate_hash_conf()
except InvalidHashPathConfigError as e:
sys.exit("Error: %s" % e)
def load_libc_function(func_name, log_error=True,
fail_if_missing=False, errcheck=False):
"""
Attempt to find the function in libc, otherwise return a no-op func.
:param func_name: name of the function to pull from libc.
:param log_error: log an error when a function can't be found
:param fail_if_missing: raise an exception when a function can't be found.
Default behavior is to return a no-op function.
:param errcheck: boolean, if true install a wrapper on the function
to check for a return values of -1 and call
ctype.get_errno and raise an OSError
"""
try:
libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
func = getattr(libc, func_name)
except AttributeError:
if fail_if_missing:
raise
if log_error:
logging.warning(_("Unable to locate %s in libc. Leaving as a "
"no-op."), func_name)
return noop_libc_function
if errcheck:
def _errcheck(result, f, args):
if result == -1:
errcode = ctypes.get_errno()
raise OSError(errcode, os.strerror(errcode))
return result
func.errcheck = _errcheck
return func
def generate_trans_id(trans_id_suffix):
return 'tx%s-%010x%s' % (
uuid.uuid4().hex[:21], int(time.time()), quote(trans_id_suffix))
def get_policy_index(req_headers, res_headers):
"""
Returns the appropriate index of the storage policy for the request from
a proxy server
:param req_headers: dict of the request headers.
:param res_headers: dict of the response headers.
:returns: string index of storage policy, or None
"""
header = 'X-Backend-Storage-Policy-Index'
policy_index = res_headers.get(header, req_headers.get(header))
if isinstance(policy_index, six.binary_type) and not six.PY2:
policy_index = policy_index.decode('ascii')
return str(policy_index) if policy_index is not None else None
class _UTC(datetime.tzinfo):
"""
A tzinfo class for datetime objects that returns a 0 timedelta (UTC time)
"""
def dst(self, dt):
return datetime.timedelta(0)
utcoffset = dst
def tzname(self, dt):
return 'UTC'
UTC = _UTC()
class LogStringFormatter(string.Formatter):
def __init__(self, default='', quote=False):
super(LogStringFormatter, self).__init__()
self.default = default
self.quote = quote
def format_field(self, value, spec):
if not value:
return self.default
else:
log = super(LogStringFormatter, self).format_field(value, spec)
if self.quote:
return quote(log, ':/{}')
else:
return log
class StrAnonymizer(str):
"""
Class that permits to get a string anonymized or simply quoted.
"""
def __new__(cls, data, method, salt):
method = method.lower()
if method not in (hashlib.algorithms if six.PY2 else
hashlib.algorithms_guaranteed):
raise ValueError('Unsupported hashing method: %r' % method)
s = str.__new__(cls, data or '')
s.method = method
s.salt = salt
return s
@property
def anonymized(self):
if not self:
return self
else:
if self.method == 'md5':
h = md5(usedforsecurity=False)
else:
h = getattr(hashlib, self.method)()
if self.salt:
h.update(six.b(self.salt))
h.update(six.b(self))
return '{%s%s}%s' % ('S' if self.salt else '', self.method.upper(),
h.hexdigest())
class StrFormatTime(object):
"""
Class that permits to get formats or parts of a time.
"""
def __init__(self, ts):
self.time = ts
self.time_struct = time.gmtime(ts)
def __str__(self):
return "%.9f" % self.time
def __getattr__(self, attr):
if attr not in ['a', 'A', 'b', 'B', 'c', 'd', 'H',
'I', 'j', 'm', 'M', 'p', 'S', 'U',
'w', 'W', 'x', 'X', 'y', 'Y', 'Z']:
raise ValueError(("The attribute %s is not a correct directive "
"for time.strftime formater.") % attr)
return datetime.datetime(*self.time_struct[:-2],
tzinfo=UTC).strftime('%' + attr)
@property
def asctime(self):
return time.asctime(self.time_struct)
@property
def datetime(self):
return time.strftime('%d/%b/%Y/%H/%M/%S', self.time_struct)
@property
def iso8601(self):
return time.strftime('%Y-%m-%dT%H:%M:%S', self.time_struct)
@property
def ms(self):
return self.__str__().split('.')[1][:3]
@property
def us(self):
return self.__str__().split('.')[1][:6]
@property
def ns(self):
return self.__str__().split('.')[1]
@property
def s(self):
return self.__str__().split('.')[0]
def get_log_line(req, res, trans_time, additional_info, fmt,
anonymization_method, anonymization_salt):
"""
Make a line for logging that matches the documented log line format
for backend servers.
:param req: the request.
:param res: the response.
:param trans_time: the time the request took to complete, a float.
:param additional_info: a string to log at the end of the line
:returns: a properly formatted line for logging.
"""
policy_index = get_policy_index(req.headers, res.headers)
if req.path.startswith('/'):
disk, partition, account, container, obj = split_path(req.path, 0, 5,
True)
else:
disk, partition, account, container, obj = (None, ) * 5
replacements = {
'remote_addr': StrAnonymizer(req.remote_addr, anonymization_method,
anonymization_salt),
'time': StrFormatTime(time.time()),
'method': req.method,
'path': StrAnonymizer(req.path, anonymization_method,
anonymization_salt),
'disk': disk,
'partition': partition,
'account': StrAnonymizer(account, anonymization_method,
anonymization_salt),
'container': StrAnonymizer(container, anonymization_method,
anonymization_salt),
'object': StrAnonymizer(obj, anonymization_method,
anonymization_salt),
'status': res.status.split()[0],
'content_length': res.content_length,
'referer': StrAnonymizer(req.referer, anonymization_method,
anonymization_salt),
'txn_id': req.headers.get('x-trans-id'),
'user_agent': StrAnonymizer(req.user_agent, anonymization_method,
anonymization_salt),
'trans_time': trans_time,
'additional_info': additional_info,
'pid': os.getpid(),
'policy_index': policy_index,
}
return LogStringFormatter(default='-').format(fmt, **replacements)
def get_trans_id_time(trans_id):
if len(trans_id) >= 34 and \
trans_id.startswith('tx') and trans_id[23] == '-':
try:
return int(trans_id[24:34], 16)
except ValueError:
pass
return None
def config_fallocate_value(reserve_value):
"""
Returns fallocate reserve_value as an int or float.
Returns is_percent as a boolean.
Returns a ValueError on invalid fallocate value.
"""
try:
if str(reserve_value[-1:]) == '%':
reserve_value = float(reserve_value[:-1])
is_percent = True
else:
reserve_value = int(reserve_value)
is_percent = False
except ValueError:
raise ValueError('Error: %s is an invalid value for fallocate'
'_reserve.' % reserve_value)
return reserve_value, is_percent
class FileLikeIter(object):
def __init__(self, iterable):
"""
Wraps an iterable to behave as a file-like object.
The iterable must be a byte string or yield byte strings.
"""
if isinstance(iterable, bytes):
iterable = (iterable, )
self.iterator = iter(iterable)
self.buf = None
self.closed = False
def __iter__(self):
return self
def next(self):
"""
next(x) -> the next value, or raise StopIteration
"""
if self.closed:
raise ValueError('I/O operation on closed file')
if self.buf:
rv = self.buf
self.buf = None
return rv
else:
return next(self.iterator)
__next__ = next
def read(self, size=-1):
"""
read([size]) -> read at most size bytes, returned as a bytes string.
If the size argument is negative or omitted, read until EOF is reached.
Notice that when in non-blocking mode, less data than what was
requested may be returned, even if no size parameter was given.
"""
if self.closed:
raise ValueError('I/O operation on closed file')
if size < 0:
return b''.join(self)
elif not size:
chunk = b''
elif self.buf:
chunk = self.buf
self.buf = None
else:
try:
chunk = next(self.iterator)
except StopIteration:
return b''
if len(chunk) > size:
self.buf = chunk[size:]
chunk = chunk[:size]
return chunk
def readline(self, size=-1):
"""
readline([size]) -> next line from the file, as a bytes string.
Retain newline. A non-negative size argument limits the maximum
number of bytes to return (an incomplete line may be returned then).
Return an empty string at EOF.
"""
if self.closed:
raise ValueError('I/O operation on closed file')
data = b''
while b'\n' not in data and (size < 0 or len(data) < size):
if size < 0:
chunk = self.read(1024)
else:
chunk = self.read(size - len(data))
if not chunk:
break
data += chunk
if b'\n' in data:
data, sep, rest = data.partition(b'\n')
data += sep
if self.buf:
self.buf = rest + self.buf
else:
self.buf = rest
return data
def readlines(self, sizehint=-1):
"""
readlines([size]) -> list of bytes strings, each a line from the file.
Call readline() repeatedly and return a list of the lines so read.
The optional size argument, if given, is an approximate bound on the
total number of bytes in the lines returned.
"""
if self.closed:
raise ValueError('I/O operation on closed file')
lines = []
while True:
line = self.readline(sizehint)
if not line:
break
lines.append(line)
if sizehint >= 0:
sizehint -= len(line)
if sizehint <= 0:
break
return lines
def close(self):
"""
close() -> None or (perhaps) an integer. Close the file.
Sets data attribute .closed to True. A closed file cannot be used for
further I/O operations. close() may be called more than once without
error. Some kinds of file objects (for example, opened by popen())
may return an exit status upon closing.
"""
self.iterator = None
self.closed = True
def fs_has_free_space(fs_path, space_needed, is_percent):
"""
Check to see whether or not a filesystem has the given amount of space
free. Unlike fallocate(), this does not reserve any space.
:param fs_path: path to a file or directory on the filesystem; typically
the path to the filesystem's mount point
:param space_needed: minimum bytes or percentage of free space
:param is_percent: if True, then space_needed is treated as a percentage
of the filesystem's capacity; if False, space_needed is a number of
free bytes.
:returns: True if the filesystem has at least that much free space,
False otherwise
:raises OSError: if fs_path does not exist
"""
st = os.statvfs(fs_path)
free_bytes = st.f_frsize * st.f_bavail
if is_percent:
size_bytes = st.f_frsize * st.f_blocks
free_percent = float(free_bytes) / float(size_bytes) * 100
return free_percent >= space_needed
else:
return free_bytes >= space_needed
class _LibcWrapper(object):
"""
A callable object that forwards its calls to a C function from libc.
These objects are lazy. libc will not be checked until someone tries to
either call the function or check its availability.
_LibcWrapper objects have an "available" property; if true, then libc
has the function of that name. If false, then calls will fail with a
NotImplementedError.
"""
def __init__(self, func_name):
self._func_name = func_name
self._func_handle = None
self._loaded = False
def _ensure_loaded(self):
if not self._loaded:
func_name = self._func_name
try:
# Keep everything in this try-block in local variables so
# that a typo in self.some_attribute_name doesn't raise a
# spurious AttributeError.
func_handle = load_libc_function(
func_name, fail_if_missing=True)
self._func_handle = func_handle
except AttributeError:
# We pass fail_if_missing=True to load_libc_function and
# then ignore the error. It's weird, but otherwise we have
# to check if self._func_handle is noop_libc_function, and
# that's even weirder.
pass
self._loaded = True
@property
def available(self):
self._ensure_loaded()
return bool(self._func_handle)
def __call__(self, *args):
if self.available:
return self._func_handle(*args)
else:
raise NotImplementedError(
"No function %r found in libc" % self._func_name)
_fallocate_enabled = True
_fallocate_warned_about_missing = False
_sys_fallocate = _LibcWrapper('fallocate')
_sys_posix_fallocate = _LibcWrapper('posix_fallocate')
def disable_fallocate():
global _fallocate_enabled
_fallocate_enabled = False
def fallocate(fd, size, offset=0):
"""
Pre-allocate disk space for a file.
This function can be disabled by calling disable_fallocate(). If no
suitable C function is available in libc, this function is a no-op.
:param fd: file descriptor
:param size: size to allocate (in bytes)
"""
global _fallocate_enabled
if not _fallocate_enabled:
return
if size < 0:
size = 0 # Done historically; not really sure why
if size >= (1 << 63):
raise ValueError('size must be less than 2 ** 63')
if offset < 0:
raise ValueError('offset must be non-negative')
if offset >= (1 << 63):
raise ValueError('offset must be less than 2 ** 63')
# Make sure there's some (configurable) amount of free space in
# addition to the number of bytes we're allocating.
if FALLOCATE_RESERVE:
st = os.fstatvfs(fd)
free = st.f_frsize * st.f_bavail - size
if FALLOCATE_IS_PERCENT:
free = (float(free) / float(st.f_frsize * st.f_blocks)) * 100
if float(free) <= float(FALLOCATE_RESERVE):
raise OSError(
errno.ENOSPC,
'FALLOCATE_RESERVE fail %g <= %g' %
(free, FALLOCATE_RESERVE))
if _sys_fallocate.available:
# Parameters are (fd, mode, offset, length).
#
# mode=FALLOC_FL_KEEP_SIZE pre-allocates invisibly (without
# affecting the reported file size).
ret = _sys_fallocate(
fd, FALLOC_FL_KEEP_SIZE, ctypes.c_uint64(offset),
ctypes.c_uint64(size))
err = ctypes.get_errno()
elif _sys_posix_fallocate.available:
# Parameters are (fd, offset, length).
ret = _sys_posix_fallocate(fd, ctypes.c_uint64(offset),
ctypes.c_uint64(size))
err = ctypes.get_errno()
else:
# No suitable fallocate-like function is in our libc. Warn about it,
# but just once per process, and then do nothing.
global _fallocate_warned_about_missing
if not _fallocate_warned_about_missing:
logging.warning(_("Unable to locate fallocate, posix_fallocate in "
"libc. Leaving as a no-op."))
_fallocate_warned_about_missing = True
return
if ret and err not in (0, errno.ENOSYS, errno.EOPNOTSUPP,
errno.EINVAL):
raise OSError(err, 'Unable to fallocate(%s)' % size)
def punch_hole(fd, offset, length):
"""
De-allocate disk space in the middle of a file.
:param fd: file descriptor
:param offset: index of first byte to de-allocate
:param length: number of bytes to de-allocate
"""
if offset < 0:
raise ValueError('offset must be non-negative')
if offset >= (1 << 63):
raise ValueError('offset must be less than 2 ** 63')
if length <= 0:
raise ValueError('length must be positive')
if length >= (1 << 63):
raise ValueError('length must be less than 2 ** 63')
if _sys_fallocate.available:
# Parameters are (fd, mode, offset, length).
ret = _sys_fallocate(
fd,
FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
ctypes.c_uint64(offset),
ctypes.c_uint64(length))
err = ctypes.get_errno()
if ret and err:
mode_str = "FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE"
raise OSError(err, "Unable to fallocate(%d, %s, %d, %d)" % (
fd, mode_str, offset, length))
else:
raise OSError(errno.ENOTSUP,
'No suitable C function found for hole punching')
def fsync(fd):
"""
Sync modified file data and metadata to disk.
:param fd: file descriptor
"""
if hasattr(fcntl, 'F_FULLSYNC'):
try:
fcntl.fcntl(fd, fcntl.F_FULLSYNC)
except IOError as e:
raise OSError(e.errno, 'Unable to F_FULLSYNC(%s)' % fd)
else:
os.fsync(fd)
def fdatasync(fd):
"""
Sync modified file data to disk.
:param fd: file descriptor
"""
try:
os.fdatasync(fd)
except AttributeError:
fsync(fd)
def fsync_dir(dirpath):
"""
Sync directory entries to disk.
:param dirpath: Path to the directory to be synced.
"""
dirfd = None
try:
dirfd = os.open(dirpath, os.O_DIRECTORY | os.O_RDONLY)
fsync(dirfd)
except OSError as err:
if err.errno == errno.ENOTDIR:
# Raise error if someone calls fsync_dir on a non-directory
raise
logging.warning(_('Unable to perform fsync() on directory %(dir)s:'
' %(err)s'),
{'dir': dirpath, 'err': os.strerror(err.errno)})
finally:
if dirfd:
os.close(dirfd)
def drop_buffer_cache(fd, offset, length):
"""
Drop 'buffer' cache for the given range of the given file.
:param fd: file descriptor
:param offset: start offset
:param length: length
"""
global _posix_fadvise
if _posix_fadvise is None:
_posix_fadvise = load_libc_function('posix_fadvise64')
# 4 means "POSIX_FADV_DONTNEED"
ret = _posix_fadvise(fd, ctypes.c_uint64(offset),
ctypes.c_uint64(length), 4)
if ret != 0:
logging.warning("posix_fadvise64(%(fd)s, %(offset)s, %(length)s, 4) "
"-> %(ret)s", {'fd': fd, 'offset': offset,
'length': length, 'ret': ret})
NORMAL_FORMAT = "%016.05f"
INTERNAL_FORMAT = NORMAL_FORMAT + '_%016x'
SHORT_FORMAT = NORMAL_FORMAT + '_%x'
MAX_OFFSET = (16 ** 16) - 1
PRECISION = 1e-5
# Setting this to True will cause the internal format to always display
# extended digits - even when the value is equivalent to the normalized form.
# This isn't ideal during an upgrade when some servers might not understand
# the new time format - but flipping it to True works great for testing.
FORCE_INTERNAL = False # or True
@functools.total_ordering
class Timestamp(object):
"""
Internal Representation of Swift Time.
The normalized form of the X-Timestamp header looks like a float
with a fixed width to ensure stable string sorting - normalized
timestamps look like "1402464677.04188"
To support overwrites of existing data without modifying the original
timestamp but still maintain consistency a second internal offset vector
is append to the normalized timestamp form which compares and sorts
greater than the fixed width float format but less than a newer timestamp.
The internalized format of timestamps looks like
"1402464677.04188_0000000000000000" - the portion after the underscore is
the offset and is a formatted hexadecimal integer.
The internalized form is not exposed to clients in responses from
Swift. Normal client operations will not create a timestamp with an
offset.
The Timestamp class in common.utils supports internalized and
normalized formatting of timestamps and also comparison of timestamp
values. When the offset value of a Timestamp is 0 - it's considered
insignificant and need not be represented in the string format; to
support backwards compatibility during a Swift upgrade the
internalized and normalized form of a Timestamp with an
insignificant offset are identical. When a timestamp includes an
offset it will always be represented in the internalized form, but
is still excluded from the normalized form. Timestamps with an
equivalent timestamp portion (the float part) will compare and order
by their offset. Timestamps with a greater timestamp portion will
always compare and order greater than a Timestamp with a lesser
timestamp regardless of it's offset. String comparison and ordering
is guaranteed for the internalized string format, and is backwards
compatible for normalized timestamps which do not include an offset.
"""
def __init__(self, timestamp, offset=0, delta=0, check_bounds=True):
"""
Create a new Timestamp.
:param timestamp: time in seconds since the Epoch, may be any of:
* a float or integer
* normalized/internalized string
* another instance of this class (offset is preserved)
:param offset: the second internal offset vector, an int
:param delta: deca-microsecond difference from the base timestamp
param, an int
"""
if isinstance(timestamp, bytes):
timestamp = timestamp.decode('ascii')
if isinstance(timestamp, six.string_types):
base, base_offset = timestamp.partition('_')[::2]
self.timestamp = float(base)
if '_' in base_offset:
raise ValueError('invalid literal for int() with base 16: '
'%r' % base_offset)
if base_offset:
self.offset = int(base_offset, 16)
else:
self.offset = 0
else:
self.timestamp = float(timestamp)
self.offset = getattr(timestamp, 'offset', 0)
# increment offset
if offset >= 0:
self.offset += offset
else:
raise ValueError('offset must be non-negative')
if self.offset > MAX_OFFSET:
raise ValueError('offset must be smaller than %d' % MAX_OFFSET)
self.raw = int(round(self.timestamp / PRECISION))
# add delta
if delta:
self.raw = self.raw + delta
if self.raw <= 0:
raise ValueError(
'delta must be greater than %d' % (-1 * self.raw))
self.timestamp = float(self.raw * PRECISION)
if check_bounds:
if self.timestamp < 0:
raise ValueError('timestamp cannot be negative')
if self.timestamp >= 10000000000:
raise ValueError('timestamp too large')
@classmethod
def now(cls, offset=0, delta=0):
return cls(time.time(), offset=offset, delta=delta)
def __repr__(self):
return INTERNAL_FORMAT % (self.timestamp, self.offset)
def __str__(self):
raise TypeError('You must specify which string format is required')
def __float__(self):
return self.timestamp
def __int__(self):
return int(self.timestamp)
def __nonzero__(self):
return bool(self.timestamp or self.offset)
def __bool__(self):
return self.__nonzero__()
@property
def normal(self):
return NORMAL_FORMAT % self.timestamp
@property
def internal(self):
if self.offset or FORCE_INTERNAL:
return INTERNAL_FORMAT % (self.timestamp, self.offset)
else:
return self.normal
@property
def short(self):
if self.offset or FORCE_INTERNAL:
return SHORT_FORMAT % (self.timestamp, self.offset)
else:
return self.normal
@property
def isoformat(self):
"""
Get an isoformat string representation of the 'normal' part of the
Timestamp with microsecond precision and no trailing timezone, for
example:
1970-01-01T00:00:00.000000
:return: an isoformat string
"""
t = float(self.normal)
if six.PY3:
# On Python 3, round manually using ROUND_HALF_EVEN rounding
# method, to use the same rounding method than Python 2. Python 3
# used a different rounding method, but Python 3.4.4 and 3.5.1 use
# again ROUND_HALF_EVEN as Python 2.
# See https://bugs.python.org/issue23517
frac, t = math.modf(t)
us = round(frac * 1e6)
if us >= 1000000:
t += 1
us -= 1000000
elif us < 0:
t -= 1
us += 1000000
dt = datetime.datetime.utcfromtimestamp(t)
dt = dt.replace(microsecond=us)
else:
dt = datetime.datetime.utcfromtimestamp(t)
isoformat = dt.isoformat()
# python isoformat() doesn't include msecs when zero
if len(isoformat) < len("1970-01-01T00:00:00.000000"):
isoformat += ".000000"
return isoformat
@classmethod
def from_isoformat(cls, date_string):
"""
Parse an isoformat string representation of time to a Timestamp object.
:param date_string: a string formatted as per an Timestamp.isoformat
property.
:return: an instance of this class.
"""
start = datetime.datetime.strptime(date_string, "%Y-%m-%dT%H:%M:%S.%f")
delta = start - EPOCH
# This calculation is based on Python 2.7's Modules/datetimemodule.c,
# function delta_to_microseconds(), but written in Python.
return cls(delta.total_seconds())
def ceil(self):
"""
Return the 'normal' part of the timestamp rounded up to the nearest
integer number of seconds.
This value should be used whenever the second-precision Last-Modified
time of a resource is required.
:return: a float value with second precision.
"""
return math.ceil(float(self))
def __eq__(self, other):
if other is None:
return False
if not isinstance(other, Timestamp):
try:
other = Timestamp(other, check_bounds=False)
except ValueError:
return False
return self.internal == other.internal
def __ne__(self, other):
return not (self == other)
def __lt__(self, other):
if other is None:
return False
if not isinstance(other, Timestamp):
other = Timestamp(other, check_bounds=False)
if other.timestamp < 0:
return False
if other.timestamp >= 10000000000:
return True
return self.internal < other.internal
def __hash__(self):
return hash(self.internal)
def __invert__(self):
if self.offset:
raise ValueError('Cannot invert timestamps with offsets')
return Timestamp((999999999999999 - self.raw) * PRECISION)
def encode_timestamps(t1, t2=None, t3=None, explicit=False):
"""
Encode up to three timestamps into a string. Unlike a Timestamp object, the
encoded string does NOT used fixed width fields and consequently no
relative chronology of the timestamps can be inferred from lexicographic
sorting of encoded timestamp strings.
The format of the encoded string is:
<t1>[<+/-><t2 - t1>[<+/-><t3 - t2>]]
i.e. if t1 = t2 = t3 then just the string representation of t1 is returned,
otherwise the time offsets for t2 and t3 are appended. If explicit is True
then the offsets for t2 and t3 are always appended even if zero.
Note: any offset value in t1 will be preserved, but offsets on t2 and t3
are not preserved. In the anticipated use cases for this method (and the
inverse decode_timestamps method) the timestamps passed as t2 and t3 are
not expected to have offsets as they will be timestamps associated with a
POST request. In the case where the encoding is used in a container objects
table row, t1 could be the PUT or DELETE time but t2 and t3 represent the
content type and metadata times (if different from the data file) i.e.
correspond to POST timestamps. In the case where the encoded form is used
in a .meta file name, t1 and t2 both correspond to POST timestamps.
"""
form = '{0}'
values = [t1.short]
if t2 is not None:
t2_t1_delta = t2.raw - t1.raw
explicit = explicit or (t2_t1_delta != 0)
values.append(t2_t1_delta)
if t3 is not None:
t3_t2_delta = t3.raw - t2.raw
explicit = explicit or (t3_t2_delta != 0)
values.append(t3_t2_delta)
if explicit:
form += '{1:+x}'
if t3 is not None:
form += '{2:+x}'
return form.format(*values)
def decode_timestamps(encoded, explicit=False):
"""
Parses a string of the form generated by encode_timestamps and returns
a tuple of the three component timestamps. If explicit is False, component
timestamps that are not explicitly encoded will be assumed to have zero
delta from the previous component and therefore take the value of the
previous component. If explicit is True, component timestamps that are
not explicitly encoded will be returned with value None.
"""
# TODO: some tests, e.g. in test_replicator, put float timestamps values
# into container db's, hence this defensive check, but in real world
# this may never happen.
if not isinstance(encoded, six.string_types):
ts = Timestamp(encoded)
return ts, ts, ts
parts = []
signs = []
pos_parts = encoded.split('+')
for part in pos_parts:
# parse time components and their signs
# e.g. x-y+z --> parts = [x, y, z] and signs = [+1, -1, +1]
neg_parts = part.split('-')
parts = parts + neg_parts
signs = signs + [1] + [-1] * (len(neg_parts) - 1)
t1 = Timestamp(parts[0])
t2 = t3 = None
if len(parts) > 1:
t2 = t1
delta = signs[1] * int(parts[1], 16)
# if delta = 0 we want t2 = t3 = t1 in order to
# preserve any offset in t1 - only construct a distinct
# timestamp if there is a non-zero delta.
if delta:
t2 = Timestamp((t1.raw + delta) * PRECISION)
elif not explicit:
t2 = t1
if len(parts) > 2:
t3 = t2
delta = signs[2] * int(parts[2], 16)
if delta:
t3 = Timestamp((t2.raw + delta) * PRECISION)
elif not explicit:
t3 = t2
return t1, t2, t3
def normalize_timestamp(timestamp):
"""
Format a timestamp (string or numeric) into a standardized
xxxxxxxxxx.xxxxx (10.5) format.
Note that timestamps using values greater than or equal to November 20th,
2286 at 17:46 UTC will use 11 digits to represent the number of
seconds.
:param timestamp: unix timestamp
:returns: normalized timestamp as a string
"""
return Timestamp(timestamp).normal
EPOCH = datetime.datetime(1970, 1, 1)
def last_modified_date_to_timestamp(last_modified_date_str):
"""
Convert a last modified date (like you'd get from a container listing,
e.g. 2014-02-28T23:22:36.698390) to a float.
"""
return Timestamp.from_isoformat(last_modified_date_str)
def normalize_delete_at_timestamp(timestamp, high_precision=False):
"""
Format a timestamp (string or numeric) into a standardized
xxxxxxxxxx (10) or xxxxxxxxxx.xxxxx (10.5) format.
Note that timestamps less than 0000000000 are raised to
0000000000 and values greater than November 20th, 2286 at
17:46:39 UTC will be capped at that date and time, resulting in
no return value exceeding 9999999999.99999 (or 9999999999 if
using low-precision).
This cap is because the expirer is already working through a
sorted list of strings that were all a length of 10. Adding
another digit would mess up the sort and cause the expirer to
break from processing early. By 2286, this problem will need to
be fixed, probably by creating an additional .expiring_objects
account to work from with 11 (or more) digit container names.
:param timestamp: unix timestamp
:returns: normalized timestamp as a string
"""
fmt = '%016.5f' if high_precision else '%010d'
return fmt % min(max(0, float(timestamp)), 9999999999.99999)
def mkdirs(path):
"""
Ensures the path is a directory or makes it if not. Errors if the path
exists but is a file or on permissions failure.
:param path: path to create
"""
if not os.path.isdir(path):
try:
os.makedirs(path)
except OSError as err:
if err.errno != errno.EEXIST or not os.path.isdir(path):
raise
def makedirs_count(path, count=0):
"""
Same as os.makedirs() except that this method returns the number of
new directories that had to be created.
Also, this does not raise an error if target directory already exists.
This behaviour is similar to Python 3.x's os.makedirs() called with
exist_ok=True. Also similar to swift.common.utils.mkdirs()
https://hg.python.org/cpython/file/v3.4.2/Lib/os.py#l212
"""
head, tail = os.path.split(path)
if not tail:
head, tail = os.path.split(head)
if head and tail and not os.path.exists(head):
count = makedirs_count(head, count)
if tail == os.path.curdir:
return
try:
os.mkdir(path)
except OSError as e:
# EEXIST may also be raised if path exists as a file
# Do not let that pass.
if e.errno != errno.EEXIST or not os.path.isdir(path):
raise
else:
count += 1
return count
def renamer(old, new, fsync=True):
"""
Attempt to fix / hide race conditions like empty object directories
being removed by backend processes during uploads, by retrying.
The containing directory of 'new' and of all newly created directories are
fsync'd by default. This _will_ come at a performance penalty. In cases
where these additional fsyncs are not necessary, it is expected that the
caller of renamer() turn it off explicitly.
:param old: old path to be renamed
:param new: new path to be renamed to
:param fsync: fsync on containing directory of new and also all
the newly created directories.
"""
dirpath = os.path.dirname(new)
try:
count = makedirs_count(dirpath)
os.rename(old, new)
except OSError:
count = makedirs_count(dirpath)
os.rename(old, new)
if fsync:
# If count=0, no new directories were created. But we still need to
# fsync leaf dir after os.rename().
# If count>0, starting from leaf dir, fsync parent dirs of all
# directories created by makedirs_count()
for i in range(0, count + 1):
fsync_dir(dirpath)
dirpath = os.path.dirname(dirpath)
def link_fd_to_path(fd, target_path, dirs_created=0, retries=2, fsync=True):
"""
Creates a link to file descriptor at target_path specified. This method
does not close the fd for you. Unlike rename, as linkat() cannot
overwrite target_path if it exists, we unlink and try again.
Attempts to fix / hide race conditions like empty object directories
being removed by backend processes during uploads, by retrying.
:param fd: File descriptor to be linked
:param target_path: Path in filesystem where fd is to be linked
:param dirs_created: Number of newly created directories that needs to
be fsync'd.
:param retries: number of retries to make
:param fsync: fsync on containing directory of target_path and also all
the newly created directories.
"""
dirpath = os.path.dirname(target_path)
for _junk in range(0, retries):
try:
linkat(linkat.AT_FDCWD, "/proc/self/fd/%d" % (fd),
linkat.AT_FDCWD, target_path, linkat.AT_SYMLINK_FOLLOW)
break
except IOError as err:
if err.errno == errno.ENOENT:
dirs_created = makedirs_count(dirpath)
elif err.errno == errno.EEXIST:
try:
os.unlink(target_path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
else:
raise
if fsync:
for i in range(0, dirs_created + 1):
fsync_dir(dirpath)
dirpath = os.path.dirname(dirpath)
def split_path(path, minsegs=1, maxsegs=None, rest_with_last=False):
"""
Validate and split the given HTTP request path.
**Examples**::
['a'] = split_path('/a')
['a', None] = split_path('/a', 1, 2)
['a', 'c'] = split_path('/a/c', 1, 2)
['a', 'c', 'o/r'] = split_path('/a/c/o/r', 1, 3, True)
:param path: HTTP Request path to be split
:param minsegs: Minimum number of segments to be extracted
:param maxsegs: Maximum number of segments to be extracted
:param rest_with_last: If True, trailing data will be returned as part
of last segment. If False, and there is
trailing data, raises ValueError.
:returns: list of segments with a length of maxsegs (non-existent
segments will return as None)
:raises ValueError: if given an invalid path
"""
if not maxsegs:
maxsegs = minsegs
if minsegs > maxsegs:
raise ValueError('minsegs > maxsegs: %d > %d' % (minsegs, maxsegs))
if rest_with_last:
segs = path.split('/', maxsegs)
minsegs += 1
maxsegs += 1
count = len(segs)
if (segs[0] or count < minsegs or count > maxsegs or
'' in segs[1:minsegs]):
raise ValueError('Invalid path: %s' % quote(path))
else:
minsegs += 1
maxsegs += 1
segs = path.split('/', maxsegs)
count = len(segs)
if (segs[0] or count < minsegs or count > maxsegs + 1 or
'' in segs[1:minsegs] or
(count == maxsegs + 1 and segs[maxsegs])):
raise ValueError('Invalid path: %s' % quote(path))
segs = segs[1:maxsegs]
segs.extend([None] * (maxsegs - 1 - len(segs)))
return segs
def validate_device_partition(device, partition):
"""
Validate that a device and a partition are valid and won't lead to
directory traversal when used.
:param device: device to validate
:param partition: partition to validate
:raises ValueError: if given an invalid device or partition
"""
if not device or '/' in device or device in ['.', '..']:
raise ValueError('Invalid device: %s' % quote(device or ''))
if not partition or '/' in partition or partition in ['.', '..']:
raise ValueError('Invalid partition: %s' % quote(partition or ''))
class RateLimitedIterator(object):
"""
Wrap an iterator to only yield elements at a rate of N per second.
:param iterable: iterable to wrap
:param elements_per_second: the rate at which to yield elements
:param limit_after: rate limiting kicks in only after yielding
this many elements; default is 0 (rate limit
immediately)
"""
def __init__(self, iterable, elements_per_second, limit_after=0,
ratelimit_if=lambda _junk: True):
self.iterator = iter(iterable)
self.elements_per_second = elements_per_second
self.limit_after = limit_after
self.rate_limiter = EventletRateLimiter(elements_per_second)
self.ratelimit_if = ratelimit_if
def __iter__(self):
return self
def next(self):
next_value = next(self.iterator)
if self.ratelimit_if(next_value):
if self.limit_after > 0:
self.limit_after -= 1
else:
self.rate_limiter.wait()
return next_value
__next__ = next
class GreenthreadSafeIterator(object):
"""
Wrap an iterator to ensure that only one greenthread is inside its next()
method at a time.
This is useful if an iterator's next() method may perform network IO, as
that may trigger a greenthread context switch (aka trampoline), which can
give another greenthread a chance to call next(). At that point, you get
an error like "ValueError: generator already executing". By wrapping calls
to next() with a mutex, we avoid that error.
"""
def __init__(self, unsafe_iterable):
self.unsafe_iter = iter(unsafe_iterable)
self.semaphore = eventlet.semaphore.Semaphore(value=1)
def __iter__(self):
return self
def next(self):
with self.semaphore:
return next(self.unsafe_iter)
__next__ = next
class NullLogger(object):
"""A no-op logger for eventlet wsgi."""
def write(self, *args):
# "Logs" the args to nowhere
pass
def exception(self, *args):
pass
def critical(self, *args):
pass
def error(self, *args):
pass
def warning(self, *args):
pass
def info(self, *args):
pass
def debug(self, *args):
pass
def log(self, *args):
pass
class LoggerFileObject(object):
# Note: this is greenthread-local storage
_cls_thread_local = threading.local()
def __init__(self, logger, log_type='STDOUT'):
self.logger = logger
self.log_type = log_type
def write(self, value):
# We can get into a nasty situation when logs are going to syslog
# and syslog dies.
#
# It's something like this:
#
# (A) someone logs something
#
# (B) there's an exception in sending to /dev/log since syslog is
# not working
#
# (C) logging takes that exception and writes it to stderr (see
# logging.Handler.handleError)
#
# (D) stderr was replaced with a LoggerFileObject at process start,
# so the LoggerFileObject takes the provided string and tells
# its logger to log it (to syslog, naturally).
#
# Then, steps B through D repeat until we run out of stack.
if getattr(self._cls_thread_local, 'already_called_write', False):
return
self._cls_thread_local.already_called_write = True
try:
value = value.strip()
if value:
if 'Connection reset by peer' in value:
self.logger.error(
_('%s: Connection reset by peer'), self.log_type)
else:
self.logger.error(_('%(type)s: %(value)s'),
{'type': self.log_type, 'value': value})
finally:
self._cls_thread_local.already_called_write = False
def writelines(self, values):
if getattr(self._cls_thread_local, 'already_called_writelines', False):
return
self._cls_thread_local.already_called_writelines = True
try:
self.logger.error(_('%(type)s: %(value)s'),
{'type': self.log_type,
'value': '#012'.join(values)})
finally:
self._cls_thread_local.already_called_writelines = False
def close(self):
pass
def flush(self):
pass
def __iter__(self):
return self
def next(self):
raise IOError(errno.EBADF, 'Bad file descriptor')
__next__ = next
def read(self, size=-1):
raise IOError(errno.EBADF, 'Bad file descriptor')
def readline(self, size=-1):
raise IOError(errno.EBADF, 'Bad file descriptor')
def tell(self):
return 0
def xreadlines(self):
return self
class StatsdClient(object):
def __init__(self, host, port, base_prefix='', tail_prefix='',
default_sample_rate=1, sample_rate_factor=1, logger=None):
self._host = host
self._port = port
self._base_prefix = base_prefix
self._set_prefix(tail_prefix)
self._default_sample_rate = default_sample_rate
self._sample_rate_factor = sample_rate_factor
self.random = random
self.logger = logger
# Determine if host is IPv4 or IPv6
addr_info = None
try:
addr_info = socket.getaddrinfo(host, port, socket.AF_INET)
self._sock_family = socket.AF_INET
except socket.gaierror:
try:
addr_info = socket.getaddrinfo(host, port, socket.AF_INET6)
self._sock_family = socket.AF_INET6
except socket.gaierror:
# Don't keep the server from starting from what could be a
# transient DNS failure. Any hostname will get re-resolved as
# necessary in the .sendto() calls.
# However, we don't know if we're IPv4 or IPv6 in this case, so
# we assume legacy IPv4.
self._sock_family = socket.AF_INET
# NOTE: we use the original host value, not the DNS-resolved one
# because if host is a hostname, we don't want to cache the DNS
# resolution for the entire lifetime of this process. Let standard
# name resolution caching take effect. This should help operators use
# DNS trickery if they want.
if addr_info is not None:
# addr_info is a list of 5-tuples with the following structure:
# (family, socktype, proto, canonname, sockaddr)
# where sockaddr is the only thing of interest to us, and we only
# use the first result. We want to use the originally supplied
# host (see note above) and the remainder of the variable-length
# sockaddr: IPv4 has (address, port) while IPv6 has (address,
# port, flow info, scope id).
sockaddr = addr_info[0][-1]
self._target = (host,) + (sockaddr[1:])
else:
self._target = (host, port)
def _set_prefix(self, tail_prefix):
"""
Modifies the prefix that is added to metric names. The resulting prefix
is the concatenation of the component parts `base_prefix` and
`tail_prefix`. Only truthy components are included. Each included
component is followed by a period, e.g.::
<base_prefix>.<tail_prefix>.
<tail_prefix>.
<base_prefix>.
<the empty string>
Note: this method is expected to be called from the constructor only,
but exists to provide backwards compatible functionality for the
deprecated set_prefix() method.
:param tail_prefix: The new value of tail_prefix
"""
if tail_prefix and self._base_prefix:
self._prefix = '.'.join([self._base_prefix, tail_prefix, ''])
elif tail_prefix:
self._prefix = tail_prefix + '.'
elif self._base_prefix:
self._prefix = self._base_prefix + '.'
else:
self._prefix = ''
def set_prefix(self, tail_prefix):
"""
This method is deprecated; use the ``tail_prefix`` argument of the
constructor when instantiating the class instead.
"""
warnings.warn(
'set_prefix() is deprecated; use the ``tail_prefix`` argument of '
'the constructor when instantiating the class instead.',
DeprecationWarning
)
self._set_prefix(tail_prefix)
def _send(self, m_name, m_value, m_type, sample_rate):
if sample_rate is None:
sample_rate = self._default_sample_rate
sample_rate = sample_rate * self._sample_rate_factor
parts = ['%s%s:%s' % (self._prefix, m_name, m_value), m_type]
if sample_rate < 1:
if self.random() < sample_rate:
parts.append('@%s' % (sample_rate,))
else:
return
if six.PY3:
parts = [part.encode('utf-8') for part in parts]
# Ideally, we'd cache a sending socket in self, but that
# results in a socket getting shared by multiple green threads.
with closing(self._open_socket()) as sock:
try:
return sock.sendto(b'|'.join(parts), self._target)
except IOError as err:
if self.logger:
self.logger.warning(
_('Error sending UDP message to %(target)r: %(err)s'),
{'target': self._target, 'err': err})
def _open_socket(self):
return socket.socket(self._sock_family, socket.SOCK_DGRAM)
def update_stats(self, m_name, m_value, sample_rate=None):
return self._send(m_name, m_value, 'c', sample_rate)
def increment(self, metric, sample_rate=None):
return self.update_stats(metric, 1, sample_rate)
def decrement(self, metric, sample_rate=None):
return self.update_stats(metric, -1, sample_rate)
def timing(self, metric, timing_ms, sample_rate=None):
return self._send(metric, timing_ms, 'ms', sample_rate)
def timing_since(self, metric, orig_time, sample_rate=None):
return self.timing(metric, (time.time() - orig_time) * 1000,
sample_rate)
def transfer_rate(self, metric, elapsed_time, byte_xfer, sample_rate=None):
if byte_xfer:
return self.timing(metric,
elapsed_time * 1000 / byte_xfer * 1000,
sample_rate)
def timing_stats(**dec_kwargs):
"""
Returns a decorator that logs timing events or errors for public methods in
swift's wsgi server controllers, based on response code.
"""
def decorating_func(func):
method = func.__name__
@functools.wraps(func)
def _timing_stats(ctrl, *args, **kwargs):
start_time = time.time()
resp = func(ctrl, *args, **kwargs)
# .timing is for successful responses *or* error codes that are
# not Swift's fault. For example, 500 is definitely the server's
# fault, but 412 is an error code (4xx are all errors) that is
# due to a header the client sent.
#
# .errors.timing is for failures that *are* Swift's fault.
# Examples include 507 for an unmounted drive or 500 for an
# unhandled exception.
if not is_server_error(resp.status_int):
ctrl.logger.timing_since(method + '.timing',
start_time, **dec_kwargs)
else:
ctrl.logger.timing_since(method + '.errors.timing',
start_time, **dec_kwargs)
return resp
return _timing_stats
return decorating_func
class SwiftLoggerAdapter(logging.LoggerAdapter):
"""
A logging.LoggerAdapter subclass that also passes through StatsD method
calls.
Like logging.LoggerAdapter, you have to subclass this and override the
process() method to accomplish anything useful.
"""
def get_metric_name(self, metric):
# subclasses may override this method to annotate the metric name
return metric
def update_stats(self, metric, *a, **kw):
return self.logger.update_stats(self.get_metric_name(metric), *a, **kw)
def increment(self, metric, *a, **kw):
return self.logger.increment(self.get_metric_name(metric), *a, **kw)
def decrement(self, metric, *a, **kw):
return self.logger.decrement(self.get_metric_name(metric), *a, **kw)
def timing(self, metric, *a, **kw):
return self.logger.timing(self.get_metric_name(metric), *a, **kw)
def timing_since(self, metric, *a, **kw):
return self.logger.timing_since(self.get_metric_name(metric), *a, **kw)
def transfer_rate(self, metric, *a, **kw):
return self.logger.transfer_rate(
self.get_metric_name(metric), *a, **kw)
@property
def thread_locals(self):
return self.logger.thread_locals
@thread_locals.setter
def thread_locals(self, thread_locals):
self.logger.thread_locals = thread_locals
def exception(self, msg, *a, **kw):
# We up-call to exception() where stdlib uses error() so we can get
# some of the traceback suppression from LogAdapter, below
self.logger.exception(msg, *a, **kw)
class PrefixLoggerAdapter(SwiftLoggerAdapter):
"""
Adds an optional prefix to all its log messages. When the prefix has not
been set, messages are unchanged.
"""
def set_prefix(self, prefix):
self.extra['prefix'] = prefix
def exception(self, msg, *a, **kw):
if 'prefix' in self.extra:
msg = self.extra['prefix'] + msg
super(PrefixLoggerAdapter, self).exception(msg, *a, **kw)
def process(self, msg, kwargs):
msg, kwargs = super(PrefixLoggerAdapter, self).process(msg, kwargs)
if 'prefix' in self.extra:
msg = self.extra['prefix'] + msg
return (msg, kwargs)
class MetricsPrefixLoggerAdapter(SwiftLoggerAdapter):
"""
Adds a prefix to all Statsd metrics' names.
"""
def __init__(self, logger, extra, metric_prefix):
"""
:param logger: an instance of logging.Logger
:param extra: a dict-like object
:param metric_prefix: A prefix that will be added to the start of each
metric name such that the metric name is transformed to:
``<metric_prefix>.<metric name>``. Note that the logger's
StatsdClient also adds its configured prefix to metric names.
"""
super(MetricsPrefixLoggerAdapter, self).__init__(logger, extra)
self.metric_prefix = metric_prefix
def get_metric_name(self, metric):
return '%s.%s' % (self.metric_prefix, metric)
# double inheritance to support property with setter
class LogAdapter(logging.LoggerAdapter, object):
"""
A Logger like object which performs some reformatting on calls to
:meth:`exception`. Can be used to store a threadlocal transaction id and
client ip.
"""
_cls_thread_local = threading.local()
def __init__(self, logger, server):
logging.LoggerAdapter.__init__(self, logger, {})
self.server = server
self.warn = self.warning
# There are a few properties needed for py35; see
# - https://bugs.python.org/issue31457
# - https://github.com/python/cpython/commit/1bbd482
# - https://github.com/python/cpython/commit/0b6a118
# - https://github.com/python/cpython/commit/ce9e625
def _log(self, level, msg, args, exc_info=None, extra=None,
stack_info=False):
"""
Low-level log implementation, proxied to allow nested logger adapters.
"""
return self.logger._log(
level,
msg,
args,
exc_info=exc_info,
extra=extra,
stack_info=stack_info,
)
@property
def manager(self):
return self.logger.manager
@manager.setter
def manager(self, value):
self.logger.manager = value
@property
def name(self):
return self.logger.name
@property
def txn_id(self):
if hasattr(self._cls_thread_local, 'txn_id'):
return self._cls_thread_local.txn_id
@txn_id.setter
def txn_id(self, value):
self._cls_thread_local.txn_id = value
@property
def client_ip(self):
if hasattr(self._cls_thread_local, 'client_ip'):
return self._cls_thread_local.client_ip
@client_ip.setter
def client_ip(self, value):
self._cls_thread_local.client_ip = value
@property
def thread_locals(self):
return (self.txn_id, self.client_ip)
@thread_locals.setter
def thread_locals(self, value):
self.txn_id, self.client_ip = value
def getEffectiveLevel(self):
return self.logger.getEffectiveLevel()
def process(self, msg, kwargs):
"""
Add extra info to message
"""
kwargs['extra'] = {'server': self.server, 'txn_id': self.txn_id,
'client_ip': self.client_ip}
return msg, kwargs
def notice(self, msg, *args, **kwargs):
"""
Convenience function for syslog priority LOG_NOTICE. The python
logging lvl is set to 25, just above info. SysLogHandler is
monkey patched to map this log lvl to the LOG_NOTICE syslog
priority.
"""
self.log(NOTICE, msg, *args, **kwargs)
def _exception(self, msg, *args, **kwargs):
logging.LoggerAdapter.exception(self, msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
_junk, exc, _junk = sys.exc_info()
call = self.error
emsg = ''
if isinstance(exc, (OSError, socket.error)):
if exc.errno in (errno.EIO, errno.ENOSPC):
emsg = str(exc)
elif exc.errno == errno.ECONNREFUSED:
emsg = _('Connection refused')
elif exc.errno == errno.ECONNRESET:
emsg = _('Connection reset')
elif exc.errno == errno.EHOSTUNREACH:
emsg = _('Host unreachable')
elif exc.errno == errno.ENETUNREACH:
emsg = _('Network unreachable')
elif exc.errno == errno.ETIMEDOUT:
emsg = _('Connection timeout')
elif exc.errno == errno.EPIPE:
emsg = _('Broken pipe')
else:
call = self._exception
elif isinstance(exc, (http_client.BadStatusLine,
green_http_client.BadStatusLine)):
# Use error(); not really exceptional
emsg = '%s: %s' % (exc.__class__.__name__, exc.line)
elif isinstance(exc, eventlet.Timeout):
emsg = exc.__class__.__name__
if hasattr(exc, 'seconds'):
emsg += ' (%ss)' % exc.seconds
if isinstance(exc, swift.common.exceptions.MessageTimeout):
if exc.msg:
emsg += ' %s' % exc.msg
else:
call = self._exception
call('%s: %s' % (msg, emsg), *args, **kwargs)
def set_statsd_prefix(self, prefix):
"""
This method is deprecated. Callers should use the
``statsd_tail_prefix`` argument of ``get_logger`` when instantiating a
logger.
The StatsD client prefix defaults to the "name" of the logger. This
method may override that default with a specific value. Currently used
in the proxy-server to differentiate the Account, Container, and Object
controllers.
"""
if self.logger.statsd_client:
self.logger.statsd_client.set_prefix(prefix)
def statsd_delegate(statsd_func_name):
"""
Factory to create methods which delegate to methods on
self.logger.statsd_client (an instance of StatsdClient). The
created methods conditionally delegate to a method whose name is given
in 'statsd_func_name'. The created delegate methods are a no-op when
StatsD logging is not configured.
:param statsd_func_name: the name of a method on StatsdClient.
"""
func = getattr(StatsdClient, statsd_func_name)
@functools.wraps(func)
def wrapped(self, *a, **kw):
if getattr(self.logger, 'statsd_client'):
return func(self.logger.statsd_client, *a, **kw)
return wrapped
update_stats = statsd_delegate('update_stats')
increment = statsd_delegate('increment')
decrement = statsd_delegate('decrement')
timing = statsd_delegate('timing')
timing_since = statsd_delegate('timing_since')
transfer_rate = statsd_delegate('transfer_rate')
class SwiftLogFormatter(logging.Formatter):
"""
Custom logging.Formatter will append txn_id to a log message if the
record has one and the message does not. Optionally it can shorten
overly long log lines.
"""
def __init__(self, fmt=None, datefmt=None, max_line_length=0):
logging.Formatter.__init__(self, fmt=fmt, datefmt=datefmt)
self.max_line_length = max_line_length
def format(self, record):
if not hasattr(record, 'server'):
# Catch log messages that were not initiated by swift
# (for example, the keystone auth middleware)
record.server = record.name
# Included from Python's logging.Formatter and then altered slightly to
# replace \n with #012
record.message = record.getMessage()
if self._fmt.find('%(asctime)') >= 0:
record.asctime = self.formatTime(record, self.datefmt)
msg = (self._fmt % record.__dict__).replace('\n', '#012')
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(
record.exc_info).replace('\n', '#012')
if record.exc_text:
if not msg.endswith('#012'):
msg = msg + '#012'
msg = msg + record.exc_text
if (hasattr(record, 'txn_id') and record.txn_id and
record.txn_id not in msg):
msg = "%s (txn: %s)" % (msg, record.txn_id)
if (hasattr(record, 'client_ip') and record.client_ip and
record.levelno != logging.INFO and
record.client_ip not in msg):
msg = "%s (client_ip: %s)" % (msg, record.client_ip)
if self.max_line_length > 0 and len(msg) > self.max_line_length:
if self.max_line_length < 7:
msg = msg[:self.max_line_length]
else:
approxhalf = (self.max_line_length - 5) // 2
msg = msg[:approxhalf] + " ... " + msg[-approxhalf:]
return msg
class LogLevelFilter(object):
"""
Drop messages for the logger based on level.
This is useful when dependencies log too much information.
:param level: All messages at or below this level are dropped
(DEBUG < INFO < WARN < ERROR < CRITICAL|FATAL)
Default: DEBUG
"""
def __init__(self, level=logging.DEBUG):
self.level = level
def filter(self, record):
if record.levelno <= self.level:
return 0
return 1
def get_logger(conf, name=None, log_to_console=False, log_route=None,
fmt="%(server)s: %(message)s", statsd_tail_prefix=None):
"""
Get the current system logger using config settings.
**Log config and defaults**::
log_facility = LOG_LOCAL0
log_level = INFO
log_name = swift
log_max_line_length = 0
log_udp_host = (disabled)
log_udp_port = logging.handlers.SYSLOG_UDP_PORT
log_address = /dev/log
log_statsd_host = (disabled)
log_statsd_port = 8125
log_statsd_default_sample_rate = 1.0
log_statsd_sample_rate_factor = 1.0
log_statsd_metric_prefix = (empty-string)
:param conf: Configuration dict to read settings from
:param name: This value is used to populate the ``server`` field in the log
format, as the prefix for statsd messages, and as the default
value for ``log_route``; defaults to the ``log_name`` value in
``conf``, if it exists, or to 'swift'.
:param log_to_console: Add handler which writes to console on stderr
:param log_route: Route for the logging, not emitted to the log, just used
to separate logging configurations; defaults to the value
of ``name`` or whatever ``name`` defaults to. This value
is used as the name attribute of the
``logging.LogAdapter`` that is returned.
:param fmt: Override log format
:param statsd_tail_prefix: tail prefix to pass to statsd client; if None
then the tail prefix defaults to the value of ``name``.
:return: an instance of ``LogAdapter``
"""
# note: log_name is typically specified in conf (i.e. defined by
# operators), whereas log_route is typically hard-coded in callers of
# get_logger (i.e. defined by developers)
if not conf:
conf = {}
if name is None:
name = conf.get('log_name', 'swift')
if not log_route:
log_route = name
logger = logging.getLogger(log_route)
logger.propagate = False
# all new handlers will get the same formatter
formatter = SwiftLogFormatter(
fmt=fmt, max_line_length=int(conf.get('log_max_line_length', 0)))
# get_logger will only ever add one SysLog Handler to a logger
if not hasattr(get_logger, 'handler4logger'):
get_logger.handler4logger = {}
if logger in get_logger.handler4logger:
logger.removeHandler(get_logger.handler4logger[logger])
# facility for this logger will be set by last call wins
facility = getattr(SysLogHandler, conf.get('log_facility', 'LOG_LOCAL0'),
SysLogHandler.LOG_LOCAL0)
udp_host = conf.get('log_udp_host')
if udp_host:
udp_port = int(conf.get('log_udp_port',
logging.handlers.SYSLOG_UDP_PORT))
handler = ThreadSafeSysLogHandler(address=(udp_host, udp_port),
facility=facility)
else:
log_address = conf.get('log_address', '/dev/log')
handler = None
try:
mode = os.stat(log_address).st_mode
if stat.S_ISSOCK(mode):
handler = ThreadSafeSysLogHandler(address=log_address,
facility=facility)
except (OSError, socket.error) as e:
# If either /dev/log isn't a UNIX socket or it does not exist at
# all then py2 would raise an error
if e.errno not in [errno.ENOTSOCK, errno.ENOENT]:
raise
if handler is None:
# fallback to default UDP
handler = ThreadSafeSysLogHandler(facility=facility)
handler.setFormatter(formatter)
logger.addHandler(handler)
get_logger.handler4logger[logger] = handler
# setup console logging
if log_to_console or hasattr(get_logger, 'console_handler4logger'):
# remove pre-existing console handler for this logger
if not hasattr(get_logger, 'console_handler4logger'):
get_logger.console_handler4logger = {}
if logger in get_logger.console_handler4logger:
logger.removeHandler(get_logger.console_handler4logger[logger])
console_handler = logging.StreamHandler(sys.__stderr__)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
get_logger.console_handler4logger[logger] = console_handler
# set the level for the logger
logger.setLevel(
getattr(logging, conf.get('log_level', 'INFO').upper(), logging.INFO))
# Setup logger with a StatsD client if so configured
statsd_host = conf.get('log_statsd_host')
if statsd_host:
statsd_port = int(conf.get('log_statsd_port', 8125))
base_prefix = conf.get('log_statsd_metric_prefix', '')
default_sample_rate = float(conf.get(
'log_statsd_default_sample_rate', 1))
sample_rate_factor = float(conf.get(
'log_statsd_sample_rate_factor', 1))
if statsd_tail_prefix is None:
statsd_tail_prefix = name
statsd_client = StatsdClient(statsd_host, statsd_port, base_prefix,
statsd_tail_prefix, default_sample_rate,
sample_rate_factor, logger=logger)
logger.statsd_client = statsd_client
else:
logger.statsd_client = None
adapted_logger = LogAdapter(logger, name)
other_handlers = conf.get('log_custom_handlers', None)
if other_handlers:
log_custom_handlers = [s.strip() for s in other_handlers.split(',')
if s.strip()]
for hook in log_custom_handlers:
try:
mod, fnc = hook.rsplit('.', 1)
logger_hook = getattr(__import__(mod, fromlist=[fnc]), fnc)
logger_hook(conf, name, log_to_console, log_route, fmt,
logger, adapted_logger)
except (AttributeError, ImportError):
print('Error calling custom handler [%s]' % hook,
file=sys.stderr)
except ValueError:
print('Invalid custom handler format [%s]' % hook,
file=sys.stderr)
return adapted_logger
def get_hub():
"""
Checks whether poll is available and falls back
on select if it isn't.
Note about epoll:
Review: https://review.opendev.org/#/c/18806/
There was a problem where once out of every 30 quadrillion
connections, a coroutine wouldn't wake up when the client
closed its end. Epoll was not reporting the event or it was
getting swallowed somewhere. Then when that file descriptor
was re-used, eventlet would freak right out because it still
thought it was waiting for activity from it in some other coro.
Another note about epoll: it's hard to use when forking. epoll works
like so:
* create an epoll instance: efd = epoll_create(...)
* register file descriptors of interest with epoll_ctl(efd,
EPOLL_CTL_ADD, fd, ...)
* wait for events with epoll_wait(efd, ...)
If you fork, you and all your child processes end up using the same
epoll instance, and everyone becomes confused. It is possible to use
epoll and fork and still have a correct program as long as you do the
right things, but eventlet doesn't do those things. Really, it can't
even try to do those things since it doesn't get notified of forks.
In contrast, both poll() and select() specify the set of interesting
file descriptors with each call, so there's no problem with forking.
As eventlet monkey patching is now done before call get_hub() in wsgi.py
if we use 'import select' we get the eventlet version, but since version
0.20.0 eventlet removed select.poll() function in patched select (see:
http://eventlet.net/doc/changelog.html and
https://github.com/eventlet/eventlet/commit/614a20462).
We use eventlet.patcher.original function to get python select module
to test if poll() is available on platform.
"""
try:
select = eventlet.patcher.original('select')
if hasattr(select, "poll"):
return "poll"
return "selects"
except ImportError:
return None
def drop_privileges(user):
"""
Sets the userid/groupid of the current process, get session leader, etc.
:param user: User name to change privileges to
"""
if os.geteuid() == 0:
groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
os.setgroups(groups)
user = pwd.getpwnam(user)
os.setgid(user[3])
os.setuid(user[2])
os.environ['HOME'] = user[5]
def clean_up_daemon_hygiene():
try:
os.setsid()
except OSError:
pass
os.chdir('/') # in case you need to rmdir on where you started the daemon
os.umask(0o22) # ensure files are created with the correct privileges
def capture_stdio(logger, **kwargs):
"""
Log unhandled exceptions, close stdio, capture stdout and stderr.
param logger: Logger object to use
"""
# log uncaught exceptions
sys.excepthook = lambda * exc_info: \
logger.critical(_('UNCAUGHT EXCEPTION'), exc_info=exc_info)
# collect stdio file desc not in use for logging
stdio_files = [sys.stdin, sys.stdout, sys.stderr]
console_fds = [h.stream.fileno() for _junk, h in getattr(
get_logger, 'console_handler4logger', {}).items()]
stdio_files = [f for f in stdio_files if f.fileno() not in console_fds]
with open(os.devnull, 'r+b') as nullfile:
# close stdio (excludes fds open for logging)
for f in stdio_files:
# some platforms throw an error when attempting an stdin flush
try:
f.flush()
except IOError:
pass
try:
os.dup2(nullfile.fileno(), f.fileno())
except OSError:
pass
# redirect stdio
if kwargs.pop('capture_stdout', True):
sys.stdout = LoggerFileObject(logger)
if kwargs.pop('capture_stderr', True):
sys.stderr = LoggerFileObject(logger, 'STDERR')
def parse_options(parser=None, once=False, test_args=None):
"""Parse standard swift server/daemon options with optparse.OptionParser.
:param parser: OptionParser to use. If not sent one will be created.
:param once: Boolean indicating the "once" option is available
:param test_args: Override sys.argv; used in testing
:returns: Tuple of (config, options); config is an absolute path to the
config file, options is the parser options as a dictionary.
:raises SystemExit: First arg (CONFIG) is required, file must exist
"""
if not parser:
parser = OptionParser(usage="%prog CONFIG [options]")
parser.add_option("-v", "--verbose", default=False, action="store_true",
help="log to console")
if once:
parser.add_option("-o", "--once", default=False, action="store_true",
help="only run one pass of daemon")
# if test_args is None, optparse will use sys.argv[:1]
options, args = parser.parse_args(args=test_args)
if not args:
parser.print_usage()
print(_("Error: missing config path argument"))
sys.exit(1)
config = os.path.abspath(args.pop(0))
if not os.path.exists(config):
parser.print_usage()
print(_("Error: unable to locate %s") % config)
sys.exit(1)
extra_args = []
# if any named options appear in remaining args, set the option to True
for arg in args:
if arg in options.__dict__:
setattr(options, arg, True)
else:
extra_args.append(arg)
options = vars(options)
if extra_args:
options['extra_args'] = extra_args
return config, options
def is_valid_ip(ip):
"""
Return True if the provided ip is a valid IP-address
"""
return is_valid_ipv4(ip) or is_valid_ipv6(ip)
def is_valid_ipv4(ip):
"""
Return True if the provided ip is a valid IPv4-address
"""
try:
socket.inet_pton(socket.AF_INET, ip)
except socket.error: # not a valid IPv4 address
return False
return True
def is_valid_ipv6(ip):
"""
Returns True if the provided ip is a valid IPv6-address
"""
try:
socket.inet_pton(socket.AF_INET6, ip)
except socket.error: # not a valid IPv6 address
return False
return True
def expand_ipv6(address):
"""
Expand ipv6 address.
:param address: a string indicating valid ipv6 address
:returns: a string indicating fully expanded ipv6 address
"""
packed_ip = socket.inet_pton(socket.AF_INET6, address)
return socket.inet_ntop(socket.AF_INET6, packed_ip)
def whataremyips(ring_ip=None):
"""
Get "our" IP addresses ("us" being the set of services configured by
one `*.conf` file). If our REST listens on a specific address, return it.
Otherwise, if listen on '0.0.0.0' or '::' return all addresses, including
the loopback.
:param str ring_ip: Optional ring_ip/bind_ip from a config file; may be
IP address or hostname.
:returns: list of Strings of ip addresses
"""
if ring_ip:
# See if bind_ip is '0.0.0.0'/'::'
try:
_, _, _, _, sockaddr = socket.getaddrinfo(
ring_ip, None, 0, socket.SOCK_STREAM, 0,
socket.AI_NUMERICHOST)[0]
if sockaddr[0] not in ('0.0.0.0', '::'):
return [ring_ip]
except socket.gaierror:
pass
addresses = []
for interface in netifaces.interfaces():
try:
iface_data = netifaces.ifaddresses(interface)
for family in iface_data:
if family not in (netifaces.AF_INET, netifaces.AF_INET6):
continue
for address in iface_data[family]:
addr = address['addr']
# If we have an ipv6 address remove the
# %ether_interface at the end
if family == netifaces.AF_INET6:
addr = expand_ipv6(addr.split('%')[0])
addresses.append(addr)
except ValueError:
pass
return addresses
def parse_socket_string(socket_string, default_port):
"""
Given a string representing a socket, returns a tuple of (host, port).
Valid strings are DNS names, IPv4 addresses, or IPv6 addresses, with an
optional port. If an IPv6 address is specified it **must** be enclosed in
[], like *[::1]* or *[::1]:11211*. This follows the accepted prescription
for `IPv6 host literals`_.
Examples::
server.org
server.org:1337
127.0.0.1:1337
[::1]:1337
[::1]
.. _IPv6 host literals: https://tools.ietf.org/html/rfc3986#section-3.2.2
"""
port = default_port
# IPv6 addresses must be between '[]'
if socket_string.startswith('['):
match = IPV6_RE.match(socket_string)
if not match:
raise ValueError("Invalid IPv6 address: %s" % socket_string)
host = match.group('address')
port = match.group('port') or port
else:
if ':' in socket_string:
tokens = socket_string.split(':')
if len(tokens) > 2:
raise ValueError("IPv6 addresses must be between '[]'")
host, port = tokens
else:
host = socket_string
return (host, port)
def storage_directory(datadir, partition, name_hash):
"""
Get the storage directory
:param datadir: Base data directory
:param partition: Partition
:param name_hash: Account, container or object name hash
:returns: Storage directory
"""
return os.path.join(datadir, str(partition), name_hash[-3:], name_hash)
def hash_path(account, container=None, object=None, raw_digest=False):
"""
Get the canonical hash for an account/container/object
:param account: Account
:param container: Container
:param object: Object
:param raw_digest: If True, return the raw version rather than a hex digest
:returns: hash string
"""
if object and not container:
raise ValueError('container is required if object is provided')
paths = [account if isinstance(account, six.binary_type)
else account.encode('utf8')]
if container:
paths.append(container if isinstance(container, six.binary_type)
else container.encode('utf8'))
if object:
paths.append(object if isinstance(object, six.binary_type)
else object.encode('utf8'))
if raw_digest:
return md5(HASH_PATH_PREFIX + b'/' + b'/'.join(paths)
+ HASH_PATH_SUFFIX, usedforsecurity=False).digest()
else:
return md5(HASH_PATH_PREFIX + b'/' + b'/'.join(paths)
+ HASH_PATH_SUFFIX, usedforsecurity=False).hexdigest()
def get_zero_indexed_base_string(base, index):
"""
This allows the caller to make a list of things with indexes, where the
first item (zero indexed) is just the bare base string, and subsequent
indexes are appended '-1', '-2', etc.
e.g.::
'lock', None => 'lock'
'lock', 0 => 'lock'
'lock', 1 => 'lock-1'
'object', 2 => 'object-2'
:param base: a string, the base string; when ``index`` is 0 (or None) this
is the identity function.
:param index: a digit, typically an integer (or None); for values other
than 0 or None this digit is appended to the base string
separated by a hyphen.
"""
if index == 0 or index is None:
return_string = base
else:
return_string = base + "-%d" % int(index)
return return_string
def _get_any_lock(fds):
for fd in fds:
try:
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
return True
except IOError as err:
if err.errno != errno.EAGAIN:
raise
return False
@contextmanager
def lock_path(directory, timeout=None, timeout_class=None,
limit=1, name=None):
"""
Context manager that acquires a lock on a directory. This will block until
the lock can be acquired, or the timeout time has expired (whichever occurs
first).
For locking exclusively, file or directory has to be opened in Write mode.
Python doesn't allow directories to be opened in Write Mode. So we
workaround by locking a hidden file in the directory.
:param directory: directory to be locked
:param timeout: timeout (in seconds). If None, defaults to
DEFAULT_LOCK_TIMEOUT
:param timeout_class: The class of the exception to raise if the
lock cannot be granted within the timeout. Will be
constructed as timeout_class(timeout, lockpath). Default:
LockTimeout
:param limit: The maximum number of locks that may be held concurrently on
the same directory at the time this method is called. Note that this
limit is only applied during the current call to this method and does
not prevent subsequent calls giving a larger limit. Defaults to 1.
:param name: A string to distinguishes different type of locks in a
directory
:raises TypeError: if limit is not an int.
:raises ValueError: if limit is less than 1.
"""
if timeout is None:
timeout = DEFAULT_LOCK_TIMEOUT
if timeout_class is None:
timeout_class = swift.common.exceptions.LockTimeout
if limit < 1:
raise ValueError('limit must be greater than or equal to 1')
mkdirs(directory)
lockpath = '%s/.lock' % directory
if name:
lockpath += '-%s' % str(name)
fds = [os.open(get_zero_indexed_base_string(lockpath, i),
os.O_WRONLY | os.O_CREAT)
for i in range(limit)]
sleep_time = 0.01
slower_sleep_time = max(timeout * 0.01, sleep_time)
slowdown_at = timeout * 0.01
time_slept = 0
try:
with timeout_class(timeout, lockpath):
while True:
if _get_any_lock(fds):
break
if time_slept > slowdown_at:
sleep_time = slower_sleep_time
sleep(sleep_time)
time_slept += sleep_time
yield True
finally:
for fd in fds:
os.close(fd)
@contextmanager
def lock_file(filename, timeout=None, append=False, unlink=True):
"""
Context manager that acquires a lock on a file. This will block until
the lock can be acquired, or the timeout time has expired (whichever occurs
first).
:param filename: file to be locked
:param timeout: timeout (in seconds). If None, defaults to
DEFAULT_LOCK_TIMEOUT
:param append: True if file should be opened in append mode
:param unlink: True if the file should be unlinked at the end
"""
if timeout is None:
timeout = DEFAULT_LOCK_TIMEOUT
flags = os.O_CREAT | os.O_RDWR
if append:
flags |= os.O_APPEND
mode = 'a+b'
else:
mode = 'r+b'
while True:
fd = os.open(filename, flags)
file_obj = os.fdopen(fd, mode)
try:
with swift.common.exceptions.LockTimeout(timeout, filename):
while True:
try:
fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
break
except IOError as err:
if err.errno != errno.EAGAIN:
raise
sleep(0.01)
try:
if os.stat(filename).st_ino != os.fstat(fd).st_ino:
continue
except OSError as err:
if err.errno == errno.ENOENT:
continue
raise
yield file_obj
if unlink:
os.unlink(filename)
break
finally:
file_obj.close()
def lock_parent_directory(filename, timeout=None):
"""
Context manager that acquires a lock on the parent directory of the given
file path. This will block until the lock can be acquired, or the timeout
time has expired (whichever occurs first).
:param filename: file path of the parent directory to be locked
:param timeout: timeout (in seconds). If None, defaults to
DEFAULT_LOCK_TIMEOUT
"""
return lock_path(os.path.dirname(filename), timeout=timeout)
def get_time_units(time_amount):
"""
Get a nomralized length of time in the largest unit of time (hours,
minutes, or seconds.)
:param time_amount: length of time in seconds
:returns: A touple of (length of time, unit of time) where unit of time is
one of ('h', 'm', 's')
"""
time_unit = 's'
if time_amount > 60:
time_amount /= 60
time_unit = 'm'
if time_amount > 60:
time_amount /= 60
time_unit = 'h'
return time_amount, time_unit
def compute_eta(start_time, current_value, final_value):
"""
Compute an ETA. Now only if we could also have a progress bar...
:param start_time: Unix timestamp when the operation began
:param current_value: Current value
:param final_value: Final value
:returns: ETA as a tuple of (length of time, unit of time) where unit of
time is one of ('h', 'm', 's')
"""
elapsed = time.time() - start_time
completion = (float(current_value) / final_value) or 0.00001
return get_time_units(1.0 / completion * elapsed - elapsed)
def unlink_older_than(path, mtime):
"""
Remove any file in a given path that was last modified before mtime.
:param path: path to remove file from
:param mtime: timestamp of oldest file to keep
"""
filepaths = map(functools.partial(os.path.join, path), listdir(path))
return unlink_paths_older_than(filepaths, mtime)
def unlink_paths_older_than(filepaths, mtime):
"""
Remove any files from the given list that were
last modified before mtime.
:param filepaths: a list of strings, the full paths of files to check
:param mtime: timestamp of oldest file to keep
"""
for fpath in filepaths:
try:
if os.path.getmtime(fpath) < mtime:
os.unlink(fpath)
except OSError:
pass
def item_from_env(env, item_name, allow_none=False):
"""
Get a value from the wsgi environment
:param env: wsgi environment dict
:param item_name: name of item to get
:returns: the value from the environment
"""
item = env.get(item_name, None)
if item is None and not allow_none:
logging.error("ERROR: %s could not be found in env!", item_name)
return item
def cache_from_env(env, allow_none=False):
"""
Get memcache connection pool from the environment (which had been
previously set by the memcache middleware
:param env: wsgi environment dict
:returns: swift.common.memcached.MemcacheRing from environment
"""
return item_from_env(env, 'swift.cache', allow_none)
def read_conf_dir(parser, conf_dir):
conf_files = []
for f in os.listdir(conf_dir):
if f.endswith('.conf') and not f.startswith('.'):
conf_files.append(os.path.join(conf_dir, f))
return parser.read(sorted(conf_files))
if six.PY2:
NicerInterpolation = None # just don't cause ImportErrors over in wsgi.py
else:
class NicerInterpolation(configparser.BasicInterpolation):
def before_get(self, parser, section, option, value, defaults):
if '%(' not in value:
return value
return super(NicerInterpolation, self).before_get(
parser, section, option, value, defaults)
def readconf(conf_path, section_name=None, log_name=None, defaults=None,
raw=False):
"""
Read config file(s) and return config items as a dict
:param conf_path: path to config file/directory, or a file-like object
(hasattr readline)
:param section_name: config section to read (will return all sections if
not defined)
:param log_name: name to be used with logging (will use section_name if
not defined)
:param defaults: dict of default values to pre-populate the config with
:returns: dict of config items
:raises ValueError: if section_name does not exist
:raises IOError: if reading the file failed
"""
if defaults is None:
defaults = {}
if raw:
c = RawConfigParser(defaults)
else:
if six.PY2:
c = ConfigParser(defaults)
else:
# In general, we haven't really thought much about interpolation
# in configs. Python's default ConfigParser has always supported
# it, though, so *we* got it "for free". Unfortunatley, since we
# "supported" interpolation, we have to assume there are
# deployments in the wild that use it, and try not to break them.
# So, do what we can to mimic the py2 behavior of passing through
# values like "1%" (which we want to support for
# fallocate_reserve).
c = ConfigParser(defaults, interpolation=NicerInterpolation())
if hasattr(conf_path, 'readline'):
if hasattr(conf_path, 'seek'):
conf_path.seek(0)
c.readfp(conf_path)
else:
if os.path.isdir(conf_path):
# read all configs in directory
success = read_conf_dir(c, conf_path)
else:
success = c.read(conf_path)
if not success:
raise IOError(_("Unable to read config from %s") %
conf_path)
if section_name:
if c.has_section(section_name):
conf = dict(c.items(section_name))
else:
raise ValueError(
_("Unable to find %(section)s config section in %(conf)s") %
{'section': section_name, 'conf': conf_path})
if "log_name" not in conf:
if log_name is not None:
conf['log_name'] = log_name
else:
conf['log_name'] = section_name
else:
conf = {}
for s in c.sections():
conf.update({s: dict(c.items(s))})
if 'log_name' not in conf:
conf['log_name'] = log_name
conf['__file__'] = conf_path
return conf
def parse_prefixed_conf(conf_file, prefix):
"""
Search the config file for any common-prefix sections and load those
sections to a dict mapping the after-prefix reference to options.
:param conf_file: the file name of the config to parse
:param prefix: the common prefix of the sections
:return: a dict mapping policy reference -> dict of policy options
:raises ValueError: if a policy config section has an invalid name
"""
ret_config = {}
all_conf = readconf(conf_file)
for section, options in all_conf.items():
if not section.startswith(prefix):
continue
target_ref = section[len(prefix):]
ret_config[target_ref] = options
return ret_config
def write_pickle(obj, dest, tmp=None, pickle_protocol=0):
"""
Ensure that a pickle file gets written to disk. The file
is first written to a tmp location, ensure it is synced to disk, then
perform a move to its final location
:param obj: python object to be pickled
:param dest: path of final destination file
:param tmp: path to tmp to use, defaults to None
:param pickle_protocol: protocol to pickle the obj with, defaults to 0
"""
if tmp is None:
tmp = os.path.dirname(dest)
mkdirs(tmp)
fd, tmppath = mkstemp(dir=tmp, suffix='.tmp')
with os.fdopen(fd, 'wb') as fo:
pickle.dump(obj, fo, pickle_protocol)
fo.flush()
os.fsync(fd)
renamer(tmppath, dest)
def search_tree(root, glob_match, ext='', exts=None, dir_ext=None):
"""Look in root, for any files/dirs matching glob, recursively traversing
any found directories looking for files ending with ext
:param root: start of search path
:param glob_match: glob to match in root, matching dirs are traversed with
os.walk
:param ext: only files that end in ext will be returned
:param exts: a list of file extensions; only files that end in one of these
extensions will be returned; if set this list overrides any
extension specified using the 'ext' param.
:param dir_ext: if present directories that end with dir_ext will not be
traversed and instead will be returned as a matched path
:returns: list of full paths to matching files, sorted
"""
exts = exts or [ext]
found_files = []
for path in glob.glob(os.path.join(root, glob_match)):
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
if dir_ext and root.endswith(dir_ext):
found_files.append(root)
# the root is a config dir, descend no further
break
for file_ in files:
if any(exts) and not any(file_.endswith(e) for e in exts):
continue
found_files.append(os.path.join(root, file_))
found_dir = False
for dir_ in dirs:
if dir_ext and dir_.endswith(dir_ext):
found_dir = True
found_files.append(os.path.join(root, dir_))
if found_dir:
# do not descend further into matching directories
break
else:
if ext and not path.endswith(ext):
continue
found_files.append(path)
return sorted(found_files)
def write_file(path, contents):
"""Write contents to file at path
:param path: any path, subdirs will be created as needed
:param contents: data to write to file, will be converted to string
"""
dirname, name = os.path.split(path)
if not os.path.exists(dirname):
try:
os.makedirs(dirname)
except OSError as err:
if err.errno == errno.EACCES:
sys.exit('Unable to create %s. Running as '
'non-root?' % dirname)
with open(path, 'w') as f:
f.write('%s' % contents)
def remove_file(path):
"""Quiet wrapper for os.unlink, OSErrors are suppressed
:param path: first and only argument passed to os.unlink
"""
try:
os.unlink(path)
except OSError:
pass
def remove_directory(path):
"""Wrapper for os.rmdir, ENOENT and ENOTEMPTY are ignored
:param path: first and only argument passed to os.rmdir
"""
try:
os.rmdir(path)
except OSError as e:
if e.errno not in (errno.ENOENT, errno.ENOTEMPTY):
raise
def is_file_older(path, age):
"""
Test if a file mtime is older than the given age, suppressing any OSErrors.
:param path: first and only argument passed to os.stat
:param age: age in seconds
:return: True if age is less than or equal to zero or if the file mtime is
more than ``age`` in the past; False if age is greater than zero and
the file mtime is less than or equal to ``age`` in the past or if there
is an OSError while stat'ing the file.
"""
if age <= 0:
return True
try:
return time.time() - os.stat(path).st_mtime > age
except OSError:
return False
def audit_location_generator(devices, datadir, suffix='',
mount_check=True, logger=None,
devices_filter=None, partitions_filter=None,
suffixes_filter=None, hashes_filter=None,
hook_pre_device=None, hook_post_device=None,
hook_pre_partition=None, hook_post_partition=None,
hook_pre_suffix=None, hook_post_suffix=None,
hook_pre_hash=None, hook_post_hash=None,
error_counter=None, yield_hash_dirs=False):
"""
Given a devices path and a data directory, yield (path, device,
partition) for all files in that directory
(devices|partitions|suffixes|hashes)_filter are meant to modify the list of
elements that will be iterated. eg: they can be used to exclude some
elements based on a custom condition defined by the caller.
hook_pre_(device|partition|suffix|hash) are called before yielding the
element, hook_pos_(device|partition|suffix|hash) are called after the
element was yielded. They are meant to do some pre/post processing.
eg: saving a progress status.
:param devices: parent directory of the devices to be audited
:param datadir: a directory located under self.devices. This should be
one of the DATADIR constants defined in the account,
container, and object servers.
:param suffix: path name suffix required for all names returned
(ignored if yield_hash_dirs is True)
:param mount_check: Flag to check if a mount check should be performed
on devices
:param logger: a logger object
:param devices_filter: a callable taking (devices, [list of devices]) as
parameters and returning a [list of devices]
:param partitions_filter: a callable taking (datadir_path, [list of parts])
as parameters and returning a [list of parts]
:param suffixes_filter: a callable taking (part_path, [list of suffixes])
as parameters and returning a [list of suffixes]
:param hashes_filter: a callable taking (suff_path, [list of hashes]) as
parameters and returning a [list of hashes]
:param hook_pre_device: a callable taking device_path as parameter
:param hook_post_device: a callable taking device_path as parameter
:param hook_pre_partition: a callable taking part_path as parameter
:param hook_post_partition: a callable taking part_path as parameter
:param hook_pre_suffix: a callable taking suff_path as parameter
:param hook_post_suffix: a callable taking suff_path as parameter
:param hook_pre_hash: a callable taking hash_path as parameter
:param hook_post_hash: a callable taking hash_path as parameter
:param error_counter: a dictionary used to accumulate error counts; may
add keys 'unmounted' and 'unlistable_partitions'
:param yield_hash_dirs: if True, yield hash dirs instead of individual
files
"""
device_dir = listdir(devices)
# randomize devices in case of process restart before sweep completed
shuffle(device_dir)
if devices_filter:
device_dir = devices_filter(devices, device_dir)
for device in device_dir:
if mount_check and not ismount(os.path.join(devices, device)):
if error_counter is not None:
error_counter.setdefault('unmounted', [])
error_counter['unmounted'].append(device)
if logger:
logger.warning(
_('Skipping %s as it is not mounted'), device)
continue
if hook_pre_device:
hook_pre_device(os.path.join(devices, device))
datadir_path = os.path.join(devices, device, datadir)
try:
partitions = listdir(datadir_path)
except OSError as e:
# NB: listdir ignores non-existent datadir_path
if error_counter is not None:
error_counter.setdefault('unlistable_partitions', [])
error_counter['unlistable_partitions'].append(datadir_path)
if logger:
logger.warning(_('Skipping %(datadir)s because %(err)s'),
{'datadir': datadir_path, 'err': e})
continue
if partitions_filter:
partitions = partitions_filter(datadir_path, partitions)
for partition in partitions:
part_path = os.path.join(datadir_path, partition)
if hook_pre_partition:
hook_pre_partition(part_path)
try:
suffixes = listdir(part_path)
except OSError as e:
if e.errno != errno.ENOTDIR:
raise
continue
if suffixes_filter:
suffixes = suffixes_filter(part_path, suffixes)
for asuffix in suffixes:
suff_path = os.path.join(part_path, asuffix)
if hook_pre_suffix:
hook_pre_suffix(suff_path)
try:
hashes = listdir(suff_path)
except OSError as e:
if e.errno != errno.ENOTDIR:
raise
continue
if hashes_filter:
hashes = hashes_filter(suff_path, hashes)
for hsh in hashes:
hash_path = os.path.join(suff_path, hsh)
if hook_pre_hash:
hook_pre_hash(hash_path)
if yield_hash_dirs:
if os.path.isdir(hash_path):
yield hash_path, device, partition
else:
try:
files = sorted(listdir(hash_path), reverse=True)
except OSError as e:
if e.errno != errno.ENOTDIR:
raise
continue
for fname in files:
if suffix and not fname.endswith(suffix):
continue
path = os.path.join(hash_path, fname)
yield path, device, partition
if hook_post_hash:
hook_post_hash(hash_path)
if hook_post_suffix:
hook_post_suffix(suff_path)
if hook_post_partition:
hook_post_partition(part_path)
if hook_post_device:
hook_post_device(os.path.join(devices, device))
class AbstractRateLimiter(object):
# 1,000 milliseconds = 1 second
clock_accuracy = 1000.0
def __init__(self, max_rate, rate_buffer=5, burst_after_idle=False,
running_time=0):
"""
:param max_rate: The maximum rate per second allowed for the process.
Must be > 0 to engage rate-limiting behavior.
:param rate_buffer: Number of seconds the rate counter can drop and be
allowed to catch up (at a faster than listed rate). A larger number
will result in larger spikes in rate but better average accuracy.
:param burst_after_idle: If False (the default) then the rate_buffer
allowance is lost after the rate limiter has not been called for
more than rate_buffer seconds. If True then the rate_buffer
allowance is preserved during idle periods which means that a burst
of requests may be granted immediately after the idle period.
:param running_time: The running time in milliseconds of the next
allowable request. Setting this to any time in the past will cause
the rate limiter to immediately allow requests; setting this to a
future time will cause the rate limiter to deny requests until that
time. If ``burst_after_idle`` is True then this can
be set to current time (ms) to avoid an initial burst, or set to
running_time < (current time - rate_buffer ms) to allow an initial
burst.
"""
self.max_rate = max_rate
self.rate_buffer_ms = rate_buffer * self.clock_accuracy
self.burst_after_idle = burst_after_idle
self.running_time = running_time
self.time_per_incr = (self.clock_accuracy / self.max_rate
if self.max_rate else 0)
def _sleep(self, seconds):
# subclasses should override to implement a sleep
raise NotImplementedError
def is_allowed(self, incr_by=1, now=None, block=False):
"""
Check if the calling process is allowed to proceed according to the
rate limit.
:param incr_by: How much to increment the counter. Useful if you want
to ratelimit 1024 bytes/sec and have differing sizes
of requests. Must be > 0 to engage rate-limiting
behavior.
:param now: The time in seconds; defaults to time.time()
:param block: if True, the call will sleep until the calling process
is allowed to proceed; otherwise the call returns immediately.
:return: True if the the calling process is allowed to proceed, False
otherwise.
"""
if self.max_rate <= 0 or incr_by <= 0:
return True
now = now or time.time()
# Convert seconds to milliseconds
now = now * self.clock_accuracy
# Calculate time per request in milliseconds
time_per_request = self.time_per_incr * float(incr_by)
# Convert rate_buffer to milliseconds and compare
if now - self.running_time > self.rate_buffer_ms:
self.running_time = now
if self.burst_after_idle:
self.running_time -= self.rate_buffer_ms
if now >= self.running_time:
self.running_time += time_per_request
allowed = True
elif block:
sleep_time = (self.running_time - now) / self.clock_accuracy
# increment running time before sleeping in case the sleep allows
# another thread to inspect the rate limiter state
self.running_time += time_per_request
# Convert diff to a floating point number of seconds and sleep
self._sleep(sleep_time)
allowed = True
else:
allowed = False
return allowed
def wait(self, incr_by=1, now=None):
self.is_allowed(incr_by=incr_by, now=now, block=True)
class EventletRateLimiter(AbstractRateLimiter):
def __init__(self, max_rate, rate_buffer=5, running_time=0,
burst_after_idle=False):
super(EventletRateLimiter, self).__init__(
max_rate, rate_buffer=rate_buffer, running_time=running_time,
burst_after_idle=burst_after_idle)
def _sleep(self, seconds):
eventlet.sleep(seconds)
def ratelimit_sleep(running_time, max_rate, incr_by=1, rate_buffer=5):
"""
Will eventlet.sleep() for the appropriate time so that the max_rate
is never exceeded. If max_rate is 0, will not ratelimit. The
maximum recommended rate should not exceed (1000 * incr_by) a second
as eventlet.sleep() does involve some overhead. Returns running_time
that should be used for subsequent calls.
:param running_time: the running time in milliseconds of the next
allowable request. Best to start at zero.
:param max_rate: The maximum rate per second allowed for the process.
:param incr_by: How much to increment the counter. Useful if you want
to ratelimit 1024 bytes/sec and have differing sizes
of requests. Must be > 0 to engage rate-limiting
behavior.
:param rate_buffer: Number of seconds the rate counter can drop and be
allowed to catch up (at a faster than listed rate).
A larger number will result in larger spikes in rate
but better average accuracy. Must be > 0 to engage
rate-limiting behavior.
:return: The absolute time for the next interval in milliseconds; note
that time could have passed well beyond that point, but the next call
will catch that and skip the sleep.
"""
warnings.warn(
'ratelimit_sleep() is deprecated; use the ``EventletRateLimiter`` '
'class instead.', DeprecationWarning
)
rate_limit = EventletRateLimiter(max_rate, rate_buffer=rate_buffer,
running_time=running_time)
rate_limit.wait(incr_by=incr_by)
return rate_limit.running_time
class ContextPool(GreenPool):
"""GreenPool subclassed to kill its coros when it gets gc'ed"""
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
for coro in list(self.coroutines_running):
coro.kill()
class GreenAsyncPileWaitallTimeout(Timeout):
pass
DEAD = object()
class GreenAsyncPile(object):
"""
Runs jobs in a pool of green threads, and the results can be retrieved by
using this object as an iterator.
This is very similar in principle to eventlet.GreenPile, except it returns
results as they become available rather than in the order they were
launched.
Correlating results with jobs (if necessary) is left to the caller.
"""
def __init__(self, size_or_pool):
"""
:param size_or_pool: thread pool size or a pool to use
"""
if isinstance(size_or_pool, GreenPool):
self._pool = size_or_pool
size = self._pool.size
else:
self._pool = GreenPool(size_or_pool)
size = size_or_pool
self._responses = eventlet.queue.LightQueue(size)
self._inflight = 0
self._pending = 0
def _run_func(self, func, args, kwargs):
try:
self._responses.put(func(*args, **kwargs))
except Exception:
if eventlet.hubs.get_hub().debug_exceptions:
traceback.print_exception(*sys.exc_info())
self._responses.put(DEAD)
finally:
self._inflight -= 1
@property
def inflight(self):
return self._inflight
def spawn(self, func, *args, **kwargs):
"""
Spawn a job in a green thread on the pile.
"""
self._pending += 1
self._inflight += 1
self._pool.spawn(self._run_func, func, args, kwargs)
def waitfirst(self, timeout):
"""
Wait up to timeout seconds for first result to come in.
:param timeout: seconds to wait for results
:returns: first item to come back, or None
"""
for result in self._wait(timeout, first_n=1):
return result
def waitall(self, timeout):
"""
Wait timeout seconds for any results to come in.
:param timeout: seconds to wait for results
:returns: list of results accrued in that time
"""
return self._wait(timeout)
def _wait(self, timeout, first_n=None):
results = []
try:
with GreenAsyncPileWaitallTimeout(timeout):
while True:
results.append(next(self))
if first_n and len(results) >= first_n:
break
except (GreenAsyncPileWaitallTimeout, StopIteration):
pass
return results
def __iter__(self):
return self
def next(self):
while True:
try:
rv = self._responses.get_nowait()
except eventlet.queue.Empty:
if self._inflight == 0:
raise StopIteration()
rv = self._responses.get()
self._pending -= 1
if rv is DEAD:
continue
return rv
__next__ = next
class StreamingPile(GreenAsyncPile):
"""
Runs jobs in a pool of green threads, spawning more jobs as results are
retrieved and worker threads become available.
When used as a context manager, has the same worker-killing properties as
:class:`ContextPool`.
"""
def __init__(self, size):
""":param size: number of worker threads to use"""
self.pool = ContextPool(size)
super(StreamingPile, self).__init__(self.pool)
def asyncstarmap(self, func, args_iter):
"""
This is the same as :func:`itertools.starmap`, except that *func* is
executed in a separate green thread for each item, and results won't
necessarily have the same order as inputs.
"""
args_iter = iter(args_iter)
# Initialize the pile
for args in itertools.islice(args_iter, self.pool.size):
self.spawn(func, *args)
# Keep populating the pile as greenthreads become available
for args in args_iter:
yield next(self)
self.spawn(func, *args)
# Drain the pile
for result in self:
yield result
def __enter__(self):
self.pool.__enter__()
return self
def __exit__(self, type, value, traceback):
self.pool.__exit__(type, value, traceback)
def validate_sync_to(value, allowed_sync_hosts, realms_conf):
"""
Validates an X-Container-Sync-To header value, returning the
validated endpoint, realm, and realm_key, or an error string.
:param value: The X-Container-Sync-To header value to validate.
:param allowed_sync_hosts: A list of allowed hosts in endpoints,
if realms_conf does not apply.
:param realms_conf: An instance of
swift.common.container_sync_realms.ContainerSyncRealms to
validate against.
:returns: A tuple of (error_string, validated_endpoint, realm,
realm_key). The error_string will None if the rest of the
values have been validated. The validated_endpoint will be
the validated endpoint to sync to. The realm and realm_key
will be set if validation was done through realms_conf.
"""
orig_value = value
value = value.rstrip('/')
if not value:
return (None, None, None, None)
if value.startswith('//'):
if not realms_conf:
return (None, None, None, None)
data = value[2:].split('/')
if len(data) != 4:
return (
_('Invalid X-Container-Sync-To format %r') % orig_value,
None, None, None)
realm, cluster, account, container = data
realm_key = realms_conf.key(realm)
if not realm_key:
return (_('No realm key for %r') % realm, None, None, None)
endpoint = realms_conf.endpoint(realm, cluster)
if not endpoint:
return (
_('No cluster endpoint for %(realm)r %(cluster)r')
% {'realm': realm, 'cluster': cluster},
None, None, None)
return (
None,
'%s/%s/%s' % (endpoint.rstrip('/'), account, container),
realm.upper(), realm_key)
p = urlparse(value)
if p.scheme not in ('http', 'https'):
return (
_('Invalid scheme %r in X-Container-Sync-To, must be "//", '
'"http", or "https".') % p.scheme,
None, None, None)
if not p.path:
return (_('Path required in X-Container-Sync-To'), None, None, None)
if p.params or p.query or p.fragment:
return (
_('Params, queries, and fragments not allowed in '
'X-Container-Sync-To'),
None, None, None)
if p.hostname not in allowed_sync_hosts:
return (
_('Invalid host %r in X-Container-Sync-To') % p.hostname,
None, None, None)
return (None, value, None, None)
def affinity_key_function(affinity_str):
"""Turns an affinity config value into a function suitable for passing to
sort(). After doing so, the array will be sorted with respect to the given
ordering.
For example, if affinity_str is "r1=1, r2z7=2, r2z8=2", then the array
will be sorted with all nodes from region 1 (r1=1) first, then all the
nodes from region 2 zones 7 and 8 (r2z7=2 and r2z8=2), then everything
else.
Note that the order of the pieces of affinity_str is irrelevant; the
priority values are what comes after the equals sign.
If affinity_str is empty or all whitespace, then the resulting function
will not alter the ordering of the nodes.
:param affinity_str: affinity config value, e.g. "r1z2=3"
or "r1=1, r2z1=2, r2z2=2"
:returns: single-argument function
:raises ValueError: if argument invalid
"""
affinity_str = affinity_str.strip()
if not affinity_str:
return lambda x: 0
priority_matchers = []
pieces = [s.strip() for s in affinity_str.split(',')]
for piece in pieces:
# matches r<number>=<number> or r<number>z<number>=<number>
match = re.match(r"r(\d+)(?:z(\d+))?=(\d+)$", piece)
if match:
region, zone, priority = match.groups()
region = int(region)
priority = int(priority)
zone = int(zone) if zone else None
matcher = {'region': region, 'priority': priority}
if zone is not None:
matcher['zone'] = zone
priority_matchers.append(matcher)
else:
raise ValueError("Invalid affinity value: %r" % affinity_str)
priority_matchers.sort(key=operator.itemgetter('priority'))
def keyfn(ring_node):
for matcher in priority_matchers:
if (matcher['region'] == ring_node['region']
and ('zone' not in matcher
or matcher['zone'] == ring_node['zone'])):
return matcher['priority']
return 4294967296 # 2^32, i.e. "a big number"
return keyfn
def affinity_locality_predicate(write_affinity_str):
"""
Turns a write-affinity config value into a predicate function for nodes.
The returned value will be a 1-arg function that takes a node dictionary
and returns a true value if it is "local" and a false value otherwise. The
definition of "local" comes from the affinity_str argument passed in here.
For example, if affinity_str is "r1, r2z2", then only nodes where region=1
or where (region=2 and zone=2) are considered local.
If affinity_str is empty or all whitespace, then the resulting function
will consider everything local
:param write_affinity_str: affinity config value, e.g. "r1z2"
or "r1, r2z1, r2z2"
:returns: single-argument function, or None if affinity_str is empty
:raises ValueError: if argument invalid
"""
affinity_str = write_affinity_str.strip()
if not affinity_str:
return None
matchers = []
pieces = [s.strip() for s in affinity_str.split(',')]
for piece in pieces:
# matches r<number> or r<number>z<number>
match = re.match(r"r(\d+)(?:z(\d+))?$", piece)
if match:
region, zone = match.groups()
region = int(region)
zone = int(zone) if zone else None
matcher = {'region': region}
if zone is not None:
matcher['zone'] = zone
matchers.append(matcher)
else:
raise ValueError("Invalid write-affinity value: %r" % affinity_str)
def is_local(ring_node):
for matcher in matchers:
if (matcher['region'] == ring_node['region']
and ('zone' not in matcher
or matcher['zone'] == ring_node['zone'])):
return True
return False
return is_local
def get_remote_client(req):
# remote host for zeus
client = req.headers.get('x-cluster-client-ip')
if not client and 'x-forwarded-for' in req.headers:
# remote host for other lbs
client = req.headers['x-forwarded-for'].split(',')[0].strip()
if not client:
client = req.remote_addr
return client
def human_readable(value):
"""
Returns the number in a human readable format; for example 1048576 = "1Mi".
"""
value = float(value)
index = -1
suffixes = 'KMGTPEZY'
while value >= 1024 and index + 1 < len(suffixes):
index += 1
value = round(value / 1024)
if index == -1:
return '%d' % value
return '%d%si' % (round(value), suffixes[index])
def put_recon_cache_entry(cache_entry, key, item):
"""
Update a recon cache entry item.
If ``item`` is an empty dict then any existing ``key`` in ``cache_entry``
will be deleted. Similarly if ``item`` is a dict and any of its values are
empty dicts then the corrsponsing key will be deleted from the nested dict
in ``cache_entry``.
We use nested recon cache entries when the object auditor
runs in parallel or else in 'once' mode with a specified subset of devices.
:param cache_entry: a dict of existing cache entries
:param key: key for item to update
:param item: value for item to update
"""
if isinstance(item, dict):
if not item:
cache_entry.pop(key, None)
return
if key not in cache_entry or key in cache_entry and not \
isinstance(cache_entry[key], dict):
cache_entry[key] = {}
for k, v in item.items():
if v == {}:
cache_entry[key].pop(k, None)
else:
cache_entry[key][k] = v
else:
cache_entry[key] = item
def dump_recon_cache(cache_dict, cache_file, logger, lock_timeout=2,
set_owner=None):
"""Update recon cache values
:param cache_dict: Dictionary of cache key/value pairs to write out
:param cache_file: cache file to update
:param logger: the logger to use to log an encountered error
:param lock_timeout: timeout (in seconds)
:param set_owner: Set owner of recon cache file
"""
try:
with lock_file(cache_file, lock_timeout, unlink=False) as cf:
cache_entry = {}
try:
existing_entry = cf.readline()
if existing_entry:
cache_entry = json.loads(existing_entry)
except ValueError:
# file doesn't have a valid entry, we'll recreate it
pass
for cache_key, cache_value in cache_dict.items():
put_recon_cache_entry(cache_entry, cache_key, cache_value)
tf = None
try:
with NamedTemporaryFile(dir=os.path.dirname(cache_file),
delete=False) as tf:
cache_data = json.dumps(cache_entry, ensure_ascii=True,
sort_keys=True)
tf.write(cache_data.encode('ascii') + b'\n')
if set_owner:
os.chown(tf.name, pwd.getpwnam(set_owner).pw_uid, -1)
renamer(tf.name, cache_file, fsync=False)
finally:
if tf is not None:
try:
os.unlink(tf.name)
except OSError as err:
if err.errno != errno.ENOENT:
raise
except (Exception, Timeout) as err:
logger.exception('Exception dumping recon cache: %s' % err)
def load_recon_cache(cache_file):
"""
Load a recon cache file. Treats missing file as empty.
"""
try:
with open(cache_file) as fh:
return json.load(fh)
except IOError as e:
if e.errno == errno.ENOENT:
return {}
else:
raise
except ValueError: # invalid JSON
return {}
def listdir(path):
try:
return os.listdir(path)
except OSError as err:
if err.errno != errno.ENOENT:
raise
return []
def streq_const_time(s1, s2):
"""Constant-time string comparison.
:params s1: the first string
:params s2: the second string
:return: True if the strings are equal.
This function takes two strings and compares them. It is intended to be
used when doing a comparison for authentication purposes to help guard
against timing attacks.
"""
if len(s1) != len(s2):
return False
result = 0
for (a, b) in zip(s1, s2):
result |= ord(a) ^ ord(b)
return result == 0
def pairs(item_list):
"""
Returns an iterator of all pairs of elements from item_list.
:param item_list: items (no duplicates allowed)
"""
for i, item1 in enumerate(item_list):
for item2 in item_list[(i + 1):]:
yield (item1, item2)
def replication(func):
"""
Decorator to declare which methods are accessible for different
type of servers:
* If option replication_server is None then this decorator
doesn't matter.
* If option replication_server is True then ONLY decorated with
this decorator methods will be started.
* If option replication_server is False then decorated with this
decorator methods will NOT be started.
:param func: function to mark accessible for replication
"""
func.replication = True
return func
def public(func):
"""
Decorator to declare which methods are publicly accessible as HTTP
requests
:param func: function to make public
"""
func.publicly_accessible = True
return func
def private(func):
"""
Decorator to declare which methods are privately accessible as HTTP
requests with an ``X-Backend-Allow-Private-Methods: True`` override
:param func: function to make private
"""
func.privately_accessible = True
return func
def majority_size(n):
return (n // 2) + 1
def quorum_size(n):
"""
quorum size as it applies to services that use 'replication' for data
integrity (Account/Container services). Object quorum_size is defined
on a storage policy basis.
Number of successful backend requests needed for the proxy to consider
the client request successful.
"""
return (n + 1) // 2
def rsync_ip(ip):
"""
Transform ip string to an rsync-compatible form
Will return ipv4 addresses unchanged, but will nest ipv6 addresses
inside square brackets.
:param ip: an ip string (ipv4 or ipv6)
:returns: a string ip address
"""
return '[%s]' % ip if is_valid_ipv6(ip) else ip
def rsync_module_interpolation(template, device):
"""
Interpolate devices variables inside a rsync module template
:param template: rsync module template as a string
:param device: a device from a ring
:returns: a string with all variables replaced by device attributes
"""
replacements = {
'ip': rsync_ip(device.get('ip', '')),
'port': device.get('port', ''),
'replication_ip': rsync_ip(device.get('replication_ip', '')),
'replication_port': device.get('replication_port', ''),
'region': device.get('region', ''),
'zone': device.get('zone', ''),
'device': device.get('device', ''),
'meta': device.get('meta', ''),
}
try:
module = template.format(**replacements)
except KeyError as e:
raise ValueError('Cannot interpolate rsync_module, invalid variable: '
'%s' % e)
return module
def get_valid_utf8_str(str_or_unicode):
"""
Get valid parts of utf-8 str from str, unicode and even invalid utf-8 str
:param str_or_unicode: a string or an unicode which can be invalid utf-8
"""
if six.PY2:
if isinstance(str_or_unicode, six.text_type):
(str_or_unicode, _len) = utf8_encoder(str_or_unicode, 'replace')
(valid_unicode_str, _len) = utf8_decoder(str_or_unicode, 'replace')
else:
# Apparently under py3 we need to go to utf-16 to collapse surrogates?
if isinstance(str_or_unicode, six.binary_type):
try:
(str_or_unicode, _len) = utf8_decoder(str_or_unicode,
'surrogatepass')
except UnicodeDecodeError:
(str_or_unicode, _len) = utf8_decoder(str_or_unicode,
'replace')
(str_or_unicode, _len) = utf16_encoder(str_or_unicode, 'surrogatepass')
(valid_unicode_str, _len) = utf16_decoder(str_or_unicode, 'replace')
return valid_unicode_str.encode('utf-8')
class Everything(object):
"""
A container that contains everything. If "e" is an instance of
Everything, then "x in e" is true for all x.
"""
def __contains__(self, element):
return True
def list_from_csv(comma_separated_str):
"""
Splits the str given and returns a properly stripped list of the comma
separated values.
"""
if comma_separated_str:
return [v.strip() for v in comma_separated_str.split(',') if v.strip()]
return []
def csv_append(csv_string, item):
"""
Appends an item to a comma-separated string.
If the comma-separated string is empty/None, just returns item.
"""
if csv_string:
return ",".join((csv_string, item))
else:
return item
class CloseableChain(object):
"""
Like itertools.chain, but with a close method that will attempt to invoke
its sub-iterators' close methods, if any.
"""
def __init__(self, *iterables):
self.iterables = iterables
self.chained_iter = itertools.chain(*self.iterables)
def __iter__(self):
return self
def __next__(self):
return next(self.chained_iter)
next = __next__ # py2
def close(self):
for it in self.iterables:
close_if_possible(it)
def reiterate(iterable):
"""
Consume the first truthy item from an iterator, then re-chain it to the
rest of the iterator. This is useful when you want to make sure the
prologue to downstream generators have been executed before continuing.
:param iterable: an iterable object
"""
if isinstance(iterable, (list, tuple)):
return iterable
else:
iterator = iter(iterable)
try:
chunk = next(iterator)
while not chunk:
chunk = next(iterator)
return CloseableChain([chunk], iterator)
except StopIteration:
close_if_possible(iterable)
return iter([])
class InputProxy(object):
"""
File-like object that counts bytes read.
To be swapped in for wsgi.input for accounting purposes.
"""
def __init__(self, wsgi_input):
"""
:param wsgi_input: file-like object to wrap the functionality of
"""
self.wsgi_input = wsgi_input
self.bytes_received = 0
self.client_disconnect = False
def read(self, *args, **kwargs):
"""
Pass read request to the underlying file-like object and
add bytes read to total.
"""
try:
chunk = self.wsgi_input.read(*args, **kwargs)
except Exception:
self.client_disconnect = True
raise
self.bytes_received += len(chunk)
return chunk
def readline(self, *args, **kwargs):
"""
Pass readline request to the underlying file-like object and
add bytes read to total.
"""
try:
line = self.wsgi_input.readline(*args, **kwargs)
except Exception:
self.client_disconnect = True
raise
self.bytes_received += len(line)
return line
class LRUCache(object):
"""
Decorator for size/time bound memoization that evicts the least
recently used members.
"""
PREV, NEXT, KEY, CACHED_AT, VALUE = 0, 1, 2, 3, 4 # link fields
def __init__(self, maxsize=1000, maxtime=3600):
self.maxsize = maxsize
self.maxtime = maxtime
self.reset()
def reset(self):
self.mapping = {}
self.head = [None, None, None, None, None] # oldest
self.tail = [self.head, None, None, None, None] # newest
self.head[self.NEXT] = self.tail
def set_cache(self, value, *key):
while len(self.mapping) >= self.maxsize:
old_next, old_key = self.head[self.NEXT][self.NEXT:self.NEXT + 2]
self.head[self.NEXT], old_next[self.PREV] = old_next, self.head
del self.mapping[old_key]
last = self.tail[self.PREV]
link = [last, self.tail, key, time.time(), value]
self.mapping[key] = last[self.NEXT] = self.tail[self.PREV] = link
return value
def get_cached(self, link, *key):
link_prev, link_next, key, cached_at, value = link
if cached_at + self.maxtime < time.time():
raise KeyError('%r has timed out' % (key,))
link_prev[self.NEXT] = link_next
link_next[self.PREV] = link_prev
last = self.tail[self.PREV]
last[self.NEXT] = self.tail[self.PREV] = link
link[self.PREV] = last
link[self.NEXT] = self.tail
return value
def __call__(self, f):
class LRUCacheWrapped(object):
@functools.wraps(f)
def __call__(im_self, *key):
link = self.mapping.get(key, self.head)
if link is not self.head:
try:
return self.get_cached(link, *key)
except KeyError:
pass
value = f(*key)
self.set_cache(value, *key)
return value
def size(im_self):
"""
Return the size of the cache
"""
return len(self.mapping)
def reset(im_self):
return self.reset()
def get_maxsize(im_self):
return self.maxsize
def set_maxsize(im_self, i):
self.maxsize = i
def get_maxtime(im_self):
return self.maxtime
def set_maxtime(im_self, i):
self.maxtime = i
maxsize = property(get_maxsize, set_maxsize)
maxtime = property(get_maxtime, set_maxtime)
def __repr__(im_self):
return '<%s %r>' % (im_self.__class__.__name__, f)
return LRUCacheWrapped()
class Spliterator(object):
"""
Takes an iterator yielding sliceable things (e.g. strings or lists) and
yields subiterators, each yielding up to the requested number of items
from the source.
>>> si = Spliterator(["abcde", "fg", "hijkl"])
>>> ''.join(si.take(4))
"abcd"
>>> ''.join(si.take(3))
"efg"
>>> ''.join(si.take(1))
"h"
>>> ''.join(si.take(3))
"ijk"
>>> ''.join(si.take(3))
"l" # shorter than requested; this can happen with the last iterator
"""
def __init__(self, source_iterable):
self.input_iterator = iter(source_iterable)
self.leftovers = None
self.leftovers_index = 0
self._iterator_in_progress = False
def take(self, n):
if self._iterator_in_progress:
raise ValueError(
"cannot call take() again until the first iterator is"
" exhausted (has raised StopIteration)")
self._iterator_in_progress = True
try:
if self.leftovers:
# All this string slicing is a little awkward, but it's for
# a good reason. Consider a length N string that someone is
# taking k bytes at a time.
#
# With this implementation, we create one new string of
# length k (copying the bytes) on each call to take(). Once
# the whole input has been consumed, each byte has been
# copied exactly once, giving O(N) bytes copied.
#
# If, instead of this, we were to set leftovers =
# leftovers[k:] and omit leftovers_index, then each call to
# take() would copy k bytes to create the desired substring,
# then copy all the remaining bytes to reset leftovers,
# resulting in an overall O(N^2) bytes copied.
llen = len(self.leftovers) - self.leftovers_index
if llen <= n:
n -= llen
to_yield = self.leftovers[self.leftovers_index:]
self.leftovers = None
self.leftovers_index = 0
yield to_yield
else:
to_yield = self.leftovers[
self.leftovers_index:(self.leftovers_index + n)]
self.leftovers_index += n
n = 0
yield to_yield
while n > 0:
try:
chunk = next(self.input_iterator)
except StopIteration:
return
cl = len(chunk)
if cl <= n:
n -= cl
yield chunk
else:
self.leftovers = chunk
self.leftovers_index = n
yield chunk[:n]
n = 0
finally:
self._iterator_in_progress = False
def ismount(path):
"""
Test whether a path is a mount point. This will catch any
exceptions and translate them into a False return value
Use ismount_raw to have the exceptions raised instead.
"""
try:
return ismount_raw(path)
except OSError:
return False
def ismount_raw(path):
"""
Test whether a path is a mount point. Whereas ismount will catch
any exceptions and just return False, this raw version will not
catch exceptions.
This is code hijacked from C Python 2.6.8, adapted to remove the extra
lstat() system call.
"""
try:
s1 = os.lstat(path)
except os.error as err:
if err.errno == errno.ENOENT:
# It doesn't exist -- so not a mount point :-)
return False
raise
if stat.S_ISLNK(s1.st_mode):
# Some environments (like vagrant-swift-all-in-one) use a symlink at
# the device level but could still provide a stubfile in the target
# to indicate that it should be treated as a mount point for swift's
# purposes.
if os.path.isfile(os.path.join(path, ".ismount")):
return True
# Otherwise, a symlink can never be a mount point
return False
s2 = os.lstat(os.path.join(path, '..'))
dev1 = s1.st_dev
dev2 = s2.st_dev
if dev1 != dev2:
# path/.. on a different device as path
return True
ino1 = s1.st_ino
ino2 = s2.st_ino
if ino1 == ino2:
# path/.. is the same i-node as path
return True
# Device and inode checks are not properly working inside containerized
# environments, therefore using a workaround to check if there is a
# stubfile placed by an operator
if os.path.isfile(os.path.join(path, ".ismount")):
return True
return False
def close_if_possible(maybe_closable):
close_method = getattr(maybe_closable, 'close', None)
if callable(close_method):
return close_method()
@contextmanager
def closing_if_possible(maybe_closable):
"""
Like contextlib.closing(), but doesn't crash if the object lacks a close()
method.
PEP 333 (WSGI) says: "If the iterable returned by the application has a
close() method, the server or gateway must call that method upon
completion of the current request[.]" This function makes that easier.
"""
try:
yield maybe_closable
finally:
close_if_possible(maybe_closable)
def drain_and_close(response_or_app_iter):
"""
Drain and close a swob or WSGI response.
This ensures we don't log a 499 in the proxy just because we realized we
don't care about the body of an error.
"""
app_iter = getattr(response_or_app_iter, 'app_iter', response_or_app_iter)
if app_iter is None: # for example, if we used the Response.body property
return
for _chunk in app_iter:
pass
close_if_possible(app_iter)
_rfc_token = r'[^()<>@,;:\"/\[\]?={}\x00-\x20\x7f]+'
_rfc_extension_pattern = re.compile(
r'(?:\s*;\s*(' + _rfc_token + r")\s*(?:=\s*(" + _rfc_token +
r'|"(?:[^"\\]|\\.)*"))?)')
_content_range_pattern = re.compile(r'^bytes (\d+)-(\d+)/(\d+)$')
def parse_content_range(content_range):
"""
Parse a content-range header into (first_byte, last_byte, total_size).
See RFC 7233 section 4.2 for details on the header format, but it's
basically "Content-Range: bytes ${start}-${end}/${total}".
:param content_range: Content-Range header value to parse,
e.g. "bytes 100-1249/49004"
:returns: 3-tuple (start, end, total)
:raises ValueError: if malformed
"""
found = re.search(_content_range_pattern, content_range)
if not found:
raise ValueError("malformed Content-Range %r" % (content_range,))
return tuple(int(x) for x in found.groups())
def parse_content_type(content_type):
"""
Parse a content-type and its parameters into values.
RFC 2616 sec 14.17 and 3.7 are pertinent.
**Examples**::
'text/plain; charset=UTF-8' -> ('text/plain', [('charset, 'UTF-8')])
'text/plain; charset=UTF-8; level=1' ->
('text/plain', [('charset, 'UTF-8'), ('level', '1')])
:param content_type: content_type to parse
:returns: a tuple containing (content type, list of k, v parameter tuples)
"""
parm_list = []
if ';' in content_type:
content_type, parms = content_type.split(';', 1)
parms = ';' + parms
for m in _rfc_extension_pattern.findall(parms):
key = m[0].strip()
value = m[1].strip()
parm_list.append((key, value))
return content_type, parm_list
def extract_swift_bytes(content_type):
"""
Parse a content-type and return a tuple containing:
- the content_type string minus any swift_bytes param,
- the swift_bytes value or None if the param was not found
:param content_type: a content-type string
:return: a tuple of (content-type, swift_bytes or None)
"""
content_type, params = parse_content_type(content_type)
swift_bytes = None
for k, v in params:
if k == 'swift_bytes':
swift_bytes = v
else:
content_type += ';%s=%s' % (k, v)
return content_type, swift_bytes
def override_bytes_from_content_type(listing_dict, logger=None):
"""
Takes a dict from a container listing and overrides the content_type,
bytes fields if swift_bytes is set.
"""
listing_dict['content_type'], swift_bytes = extract_swift_bytes(
listing_dict['content_type'])
if swift_bytes is not None:
try:
listing_dict['bytes'] = int(swift_bytes)
except ValueError:
if logger:
logger.exception(_("Invalid swift_bytes"))
def clean_content_type(value):
if ';' in value:
left, right = value.rsplit(';', 1)
if right.lstrip().startswith('swift_bytes='):
return left
return value
def quote(value, safe='/'):
"""
Patched version of urllib.quote that encodes utf-8 strings before quoting
"""
quoted = _quote(get_valid_utf8_str(value), safe)
if isinstance(value, six.binary_type):
quoted = quoted.encode('utf-8')
return quoted
def get_expirer_container(x_delete_at, expirer_divisor, acc, cont, obj):
"""
Returns an expiring object container name for given X-Delete-At and
(native string) a/c/o.
"""
shard_int = int(hash_path(acc, cont, obj), 16) % 100
return normalize_delete_at_timestamp(
int(x_delete_at) // expirer_divisor * expirer_divisor - shard_int)
class _MultipartMimeFileLikeObject(object):
def __init__(self, wsgi_input, boundary, input_buffer, read_chunk_size):
self.no_more_data_for_this_file = False
self.no_more_files = False
self.wsgi_input = wsgi_input
self.boundary = boundary
self.input_buffer = input_buffer
self.read_chunk_size = read_chunk_size
def read(self, length=None):
if not length:
length = self.read_chunk_size
if self.no_more_data_for_this_file:
return b''
# read enough data to know whether we're going to run
# into a boundary in next [length] bytes
if len(self.input_buffer) < length + len(self.boundary) + 2:
to_read = length + len(self.boundary) + 2
while to_read > 0:
try:
chunk = self.wsgi_input.read(to_read)
except (IOError, ValueError) as e:
raise swift.common.exceptions.ChunkReadError(str(e))
to_read -= len(chunk)
self.input_buffer += chunk
if not chunk:
self.no_more_files = True
break
boundary_pos = self.input_buffer.find(self.boundary)
# boundary does not exist in the next (length) bytes
if boundary_pos == -1 or boundary_pos > length:
ret = self.input_buffer[:length]
self.input_buffer = self.input_buffer[length:]
# if it does, just return data up to the boundary
else:
ret, self.input_buffer = self.input_buffer.split(self.boundary, 1)
self.no_more_files = self.input_buffer.startswith(b'--')
self.no_more_data_for_this_file = True
self.input_buffer = self.input_buffer[2:]
return ret
def readline(self):
if self.no_more_data_for_this_file:
return b''
boundary_pos = newline_pos = -1
while newline_pos < 0 and boundary_pos < 0:
try:
chunk = self.wsgi_input.read(self.read_chunk_size)
except (IOError, ValueError) as e:
raise swift.common.exceptions.ChunkReadError(str(e))
self.input_buffer += chunk
newline_pos = self.input_buffer.find(b'\r\n')
boundary_pos = self.input_buffer.find(self.boundary)
if not chunk:
self.no_more_files = True
break
# found a newline
if newline_pos >= 0 and \
(boundary_pos < 0 or newline_pos < boundary_pos):
# Use self.read to ensure any logic there happens...
ret = b''
to_read = newline_pos + 2
while to_read > 0:
chunk = self.read(to_read)
# Should never happen since we're reading from input_buffer,
# but just for completeness...
if not chunk:
break
to_read -= len(chunk)
ret += chunk
return ret
else: # no newlines, just return up to next boundary
return self.read(len(self.input_buffer))
def iter_multipart_mime_documents(wsgi_input, boundary, read_chunk_size=4096):
"""
Given a multi-part-mime-encoded input file object and boundary,
yield file-like objects for each part. Note that this does not
split each part into headers and body; the caller is responsible
for doing that if necessary.
:param wsgi_input: The file-like object to read from.
:param boundary: The mime boundary to separate new file-like objects on.
:returns: A generator of file-like objects for each part.
:raises MimeInvalid: if the document is malformed
"""
boundary = b'--' + boundary
blen = len(boundary) + 2 # \r\n
try:
got = wsgi_input.readline(blen)
while got == b'\r\n':
got = wsgi_input.readline(blen)
except (IOError, ValueError) as e:
raise swift.common.exceptions.ChunkReadError(str(e))
if got.strip() != boundary:
raise swift.common.exceptions.MimeInvalid(
'invalid starting boundary: wanted %r, got %r' % (boundary, got))
boundary = b'\r\n' + boundary
input_buffer = b''
done = False
while not done:
it = _MultipartMimeFileLikeObject(wsgi_input, boundary, input_buffer,
read_chunk_size)
yield it
done = it.no_more_files
input_buffer = it.input_buffer
def parse_mime_headers(doc_file):
"""
Takes a file-like object containing a MIME document and returns a
HeaderKeyDict containing the headers. The body of the message is not
consumed: the position in doc_file is left at the beginning of the body.
This function was inspired by the Python standard library's
http.client.parse_headers.
:param doc_file: binary file-like object containing a MIME document
:returns: a swift.common.swob.HeaderKeyDict containing the headers
"""
headers = []
while True:
line = doc_file.readline()
done = line in (b'\r\n', b'\n', b'')
if six.PY3:
try:
line = line.decode('utf-8')
except UnicodeDecodeError:
line = line.decode('latin1')
headers.append(line)
if done:
break
if six.PY3:
header_string = ''.join(headers)
else:
header_string = b''.join(headers)
headers = email.parser.Parser().parsestr(header_string)
return HeaderKeyDict(headers)
def mime_to_document_iters(input_file, boundary, read_chunk_size=4096):
"""
Takes a file-like object containing a multipart MIME document and
returns an iterator of (headers, body-file) tuples.
:param input_file: file-like object with the MIME doc in it
:param boundary: MIME boundary, sans dashes
(e.g. "divider", not "--divider")
:param read_chunk_size: size of strings read via input_file.read()
"""
if six.PY3 and isinstance(boundary, str):
# Since the boundary is in client-supplied headers, it can contain
# garbage that trips us and we don't like client-induced 500.
boundary = boundary.encode('latin-1', errors='replace')
doc_files = iter_multipart_mime_documents(input_file, boundary,
read_chunk_size)
for i, doc_file in enumerate(doc_files):
# this consumes the headers and leaves just the body in doc_file
headers = parse_mime_headers(doc_file)
yield (headers, doc_file)
def maybe_multipart_byteranges_to_document_iters(app_iter, content_type):
"""
Takes an iterator that may or may not contain a multipart MIME document
as well as content type and returns an iterator of body iterators.
:param app_iter: iterator that may contain a multipart MIME document
:param content_type: content type of the app_iter, used to determine
whether it conains a multipart document and, if
so, what the boundary is between documents
"""
content_type, params_list = parse_content_type(content_type)
if content_type != 'multipart/byteranges':
yield app_iter
return
body_file = FileLikeIter(app_iter)
boundary = dict(params_list)['boundary']
for _headers, body in mime_to_document_iters(body_file, boundary):
yield (chunk for chunk in iter(lambda: body.read(65536), b''))
def document_iters_to_multipart_byteranges(ranges_iter, boundary):
"""
Takes an iterator of range iters and yields a multipart/byteranges MIME
document suitable for sending as the body of a multi-range 206 response.
See document_iters_to_http_response_body for parameter descriptions.
"""
if not isinstance(boundary, bytes):
boundary = boundary.encode('ascii')
divider = b"--" + boundary + b"\r\n"
terminator = b"--" + boundary + b"--"
for range_spec in ranges_iter:
start_byte = range_spec["start_byte"]
end_byte = range_spec["end_byte"]
entity_length = range_spec.get("entity_length", "*")
content_type = range_spec["content_type"]
part_iter = range_spec["part_iter"]
if not isinstance(content_type, bytes):
content_type = str(content_type).encode('utf-8')
if not isinstance(entity_length, bytes):
entity_length = str(entity_length).encode('utf-8')
part_header = b''.join((
divider,
b"Content-Type: ", content_type, b"\r\n",
b"Content-Range: ", b"bytes %d-%d/%s\r\n" % (
start_byte, end_byte, entity_length),
b"\r\n"
))
yield part_header
for chunk in part_iter:
yield chunk
yield b"\r\n"
yield terminator
def document_iters_to_http_response_body(ranges_iter, boundary, multipart,
logger):
"""
Takes an iterator of range iters and turns it into an appropriate
HTTP response body, whether that's multipart/byteranges or not.
This is almost, but not quite, the inverse of
request_helpers.http_response_to_document_iters(). This function only
yields chunks of the body, not any headers.
:param ranges_iter: an iterator of dictionaries, one per range.
Each dictionary must contain at least the following key:
"part_iter": iterator yielding the bytes in the range
Additionally, if multipart is True, then the following other keys
are required:
"start_byte": index of the first byte in the range
"end_byte": index of the last byte in the range
"content_type": value for the range's Content-Type header
Finally, there is one optional key that is used in the
multipart/byteranges case:
"entity_length": length of the requested entity (not necessarily
equal to the response length). If omitted, "*" will be used.
Each part_iter will be exhausted prior to calling next(ranges_iter).
:param boundary: MIME boundary to use, sans dashes (e.g. "boundary", not
"--boundary").
:param multipart: True if the response should be multipart/byteranges,
False otherwise. This should be True if and only if you have 2 or
more ranges.
:param logger: a logger
"""
if multipart:
return document_iters_to_multipart_byteranges(ranges_iter, boundary)
else:
try:
response_body_iter = next(ranges_iter)['part_iter']
except StopIteration:
return ''
# We need to make sure ranges_iter does not get garbage-collected
# before response_body_iter is exhausted. The reason is that
# ranges_iter has a finally block that calls close_swift_conn, and
# so if that finally block fires before we read response_body_iter,
# there's nothing there.
def string_along(useful_iter, useless_iter_iter, logger):
with closing_if_possible(useful_iter):
for x in useful_iter:
yield x
try:
next(useless_iter_iter)
except StopIteration:
pass
else:
logger.warning(
_("More than one part in a single-part response?"))
return string_along(response_body_iter, ranges_iter, logger)
def multipart_byteranges_to_document_iters(input_file, boundary,
read_chunk_size=4096):
"""
Takes a file-like object containing a multipart/byteranges MIME document
(see RFC 7233, Appendix A) and returns an iterator of (first-byte,
last-byte, length, document-headers, body-file) 5-tuples.
:param input_file: file-like object with the MIME doc in it
:param boundary: MIME boundary, sans dashes
(e.g. "divider", not "--divider")
:param read_chunk_size: size of strings read via input_file.read()
"""
for headers, body in mime_to_document_iters(input_file, boundary,
read_chunk_size):
first_byte, last_byte, length = parse_content_range(
headers.get('content-range'))
yield (first_byte, last_byte, length, headers.items(), body)
#: Regular expression to match form attributes.
ATTRIBUTES_RE = re.compile(r'(\w+)=(".*?"|[^";]+)(; ?|$)')
def parse_content_disposition(header):
"""
Given the value of a header like:
Content-Disposition: form-data; name="somefile"; filename="test.html"
Return data like
("form-data", {"name": "somefile", "filename": "test.html"})
:param header: Value of a header (the part after the ': ').
:returns: (value name, dict) of the attribute data parsed (see above).
"""
attributes = {}
attrs = ''
if ';' in header:
header, attrs = [x.strip() for x in header.split(';', 1)]
m = True
while m:
m = ATTRIBUTES_RE.match(attrs)
if m:
attrs = attrs[len(m.group(0)):]
attributes[m.group(1)] = m.group(2).strip('"')
return header, attributes
class sockaddr_alg(ctypes.Structure):
_fields_ = [("salg_family", ctypes.c_ushort),
("salg_type", ctypes.c_ubyte * 14),
("salg_feat", ctypes.c_uint),
("salg_mask", ctypes.c_uint),
("salg_name", ctypes.c_ubyte * 64)]
_bound_md5_sockfd = None
def get_md5_socket():
"""
Get an MD5 socket file descriptor. One can MD5 data with it by writing it
to the socket with os.write, then os.read the 16 bytes of the checksum out
later.
NOTE: It is the caller's responsibility to ensure that os.close() is
called on the returned file descriptor. This is a bare file descriptor,
not a Python object. It doesn't close itself.
"""
# Linux's AF_ALG sockets work like this:
#
# First, initialize a socket with socket() and bind(). This tells the
# socket what algorithm to use, as well as setting up any necessary bits
# like crypto keys. Of course, MD5 doesn't need any keys, so it's just the
# algorithm name.
#
# Second, to hash some data, get a second socket by calling accept() on
# the first socket. Write data to the socket, then when finished, read the
# checksum from the socket and close it. This lets you checksum multiple
# things without repeating all the setup code each time.
#
# Since we only need to bind() one socket, we do that here and save it for
# future re-use. That way, we only use one file descriptor to get an MD5
# socket instead of two, and we also get to save some syscalls.
global _bound_md5_sockfd
global _libc_socket
global _libc_bind
global _libc_accept
if _libc_accept is None:
_libc_accept = load_libc_function('accept', fail_if_missing=True)
if _libc_socket is None:
_libc_socket = load_libc_function('socket', fail_if_missing=True)
if _libc_bind is None:
_libc_bind = load_libc_function('bind', fail_if_missing=True)
# Do this at first call rather than at import time so that we don't use a
# file descriptor on systems that aren't using any MD5 sockets.
if _bound_md5_sockfd is None:
sockaddr_setup = sockaddr_alg(
AF_ALG,
(ord('h'), ord('a'), ord('s'), ord('h'), 0),
0, 0,
(ord('m'), ord('d'), ord('5'), 0))
hash_sockfd = _libc_socket(ctypes.c_int(AF_ALG),
ctypes.c_int(socket.SOCK_SEQPACKET),
ctypes.c_int(0))
if hash_sockfd < 0:
raise IOError(ctypes.get_errno(),
"Failed to initialize MD5 socket")
bind_result = _libc_bind(ctypes.c_int(hash_sockfd),
ctypes.pointer(sockaddr_setup),
ctypes.c_int(ctypes.sizeof(sockaddr_alg)))
if bind_result < 0:
os.close(hash_sockfd)
raise IOError(ctypes.get_errno(), "Failed to bind MD5 socket")
_bound_md5_sockfd = hash_sockfd
md5_sockfd = _libc_accept(ctypes.c_int(_bound_md5_sockfd), None, 0)
if md5_sockfd < 0:
raise IOError(ctypes.get_errno(), "Failed to accept MD5 socket")
return md5_sockfd
try:
_test_md5 = hashlib.md5(usedforsecurity=False) # nosec
def md5(string=b'', usedforsecurity=True):
"""Return an md5 hashlib object using usedforsecurity parameter
For python distributions that support the usedforsecurity keyword
parameter, this passes the parameter through as expected.
See https://bugs.python.org/issue9216
"""
return hashlib.md5(string, usedforsecurity=usedforsecurity) # nosec
except TypeError:
def md5(string=b'', usedforsecurity=True):
"""Return an md5 hashlib object without usedforsecurity parameter
For python distributions that do not yet support this keyword
parameter, we drop the parameter
"""
return hashlib.md5(string) # nosec
class ShardRangeOuterBound(object):
"""
A custom singleton type to be subclassed for the outer bounds of
ShardRanges.
"""
_singleton = None
def __new__(cls):
if cls is ShardRangeOuterBound:
raise TypeError('ShardRangeOuterBound is an abstract class; '
'only subclasses should be instantiated')
if cls._singleton is None:
cls._singleton = super(ShardRangeOuterBound, cls).__new__(cls)
return cls._singleton
def __str__(self):
return ''
def __repr__(self):
return type(self).__name__
def __bool__(self):
return False
__nonzero__ = __bool__
class ShardRange(object):
"""
A ShardRange encapsulates sharding state related to a container including
lower and upper bounds that define the object namespace for which the
container is responsible.
Shard ranges may be persisted in a container database. Timestamps
associated with subsets of the shard range attributes are used to resolve
conflicts when a shard range needs to be merged with an existing shard
range record and the most recent version of an attribute should be
persisted.
:param name: the name of the shard range; this should take the form of a
path to a container i.e. <account_name>/<container_name>.
:param timestamp: a timestamp that represents the time at which the
shard range's ``lower``, ``upper`` or ``deleted`` attributes were
last modified.
:param lower: the lower bound of object names contained in the shard range;
the lower bound *is not* included in the shard range namespace.
:param upper: the upper bound of object names contained in the shard range;
the upper bound *is* included in the shard range namespace.
:param object_count: the number of objects in the shard range; defaults to
zero.
:param bytes_used: the number of bytes in the shard range; defaults to
zero.
:param meta_timestamp: a timestamp that represents the time at which the
shard range's ``object_count`` and ``bytes_used`` were last updated;
defaults to the value of ``timestamp``.
:param deleted: a boolean; if True the shard range is considered to be
deleted.
:param state: the state; must be one of ShardRange.STATES; defaults to
CREATED.
:param state_timestamp: a timestamp that represents the time at which
``state`` was forced to its current value; defaults to the value of
``timestamp``. This timestamp is typically not updated with every
change of ``state`` because in general conflicts in ``state``
attributes are resolved by choosing the larger ``state`` value.
However, when this rule does not apply, for example when changing state
from ``SHARDED`` to ``ACTIVE``, the ``state_timestamp`` may be advanced
so that the new ``state`` value is preferred over any older ``state``
value.
:param epoch: optional epoch timestamp which represents the time at which
sharding was enabled for a container.
:param reported: optional indicator that this shard and its stats have
been reported to the root container.
:param tombstones: the number of tombstones in the shard range; defaults to
-1 to indicate that the value is unknown.
"""
FOUND = 10
CREATED = 20
CLEAVED = 30
ACTIVE = 40
SHRINKING = 50
SHARDING = 60
SHARDED = 70
SHRUNK = 80
STATES = {FOUND: 'found',
CREATED: 'created',
CLEAVED: 'cleaved',
ACTIVE: 'active',
SHRINKING: 'shrinking',
SHARDING: 'sharding',
SHARDED: 'sharded',
SHRUNK: 'shrunk'}
STATES_BY_NAME = dict((v, k) for k, v in STATES.items())
@functools.total_ordering
class MaxBound(ShardRangeOuterBound):
# singleton for maximum bound
def __ge__(self, other):
return True
@functools.total_ordering
class MinBound(ShardRangeOuterBound):
# singleton for minimum bound
def __le__(self, other):
return True
MIN = MinBound()
MAX = MaxBound()
def __init__(self, name, timestamp, lower=MIN, upper=MAX,
object_count=0, bytes_used=0, meta_timestamp=None,
deleted=False, state=None, state_timestamp=None, epoch=None,
reported=False, tombstones=-1):
self.account = self.container = self._timestamp = \
self._meta_timestamp = self._state_timestamp = self._epoch = None
self._lower = ShardRange.MIN
self._upper = ShardRange.MAX
self._deleted = False
self._state = None
self.name = name
self.timestamp = timestamp
self.lower = lower
self.upper = upper
self.deleted = deleted
self.object_count = object_count
self.bytes_used = bytes_used
self.meta_timestamp = meta_timestamp
self.state = self.FOUND if state is None else state
self.state_timestamp = state_timestamp
self.epoch = epoch
self.reported = reported
self.tombstones = tombstones
@classmethod
def sort_key(cls, sr):
# defines the sort order for shard ranges
# note if this ever changes to *not* sort by upper first then it breaks
# a key assumption for bisect, which is used by utils.find_shard_range
return sr.upper, sr.state, sr.lower, sr.name
@classmethod
def _encode(cls, value):
if six.PY2 and isinstance(value, six.text_type):
return value.encode('utf-8')
if six.PY3 and isinstance(value, six.binary_type):
# This should never fail -- the value should always be coming from
# valid swift paths, which means UTF-8
return value.decode('utf-8')
return value
def _encode_bound(self, bound):
if isinstance(bound, ShardRangeOuterBound):
return bound
if not (isinstance(bound, six.text_type) or
isinstance(bound, six.binary_type)):
raise TypeError('must be a string type')
return self._encode(bound)
@classmethod
def _make_container_name(cls, root_container, parent_container, timestamp,
index):
if not isinstance(parent_container, bytes):
parent_container = parent_container.encode('utf-8')
return "%s-%s-%s-%s" % (root_container,
md5(parent_container,
usedforsecurity=False).hexdigest(),
cls._to_timestamp(timestamp).internal,
index)
@classmethod
def make_path(cls, shards_account, root_container, parent_container,
timestamp, index):
"""
Returns a path for a shard container that is valid to use as a name
when constructing a :class:`~swift.common.utils.ShardRange`.
:param shards_account: the hidden internal account to which the shard
container belongs.
:param root_container: the name of the root container for the shard.
:param parent_container: the name of the parent container for the
shard; for initial first generation shards this should be the same
as ``root_container``; for shards of shards this should be the name
of the sharding shard container.
:param timestamp: an instance of :class:`~swift.common.utils.Timestamp`
:param index: a unique index that will distinguish the path from any
other path generated using the same combination of
``shards_account``, ``root_container``, ``parent_container`` and
``timestamp``.
:return: a string of the form <account_name>/<container_name>
"""
shard_container = cls._make_container_name(
root_container, parent_container, timestamp, index)
return '%s/%s' % (shards_account, shard_container)
@classmethod
def _to_timestamp(cls, timestamp):
if timestamp is None or isinstance(timestamp, Timestamp):
return timestamp
return Timestamp(timestamp)
@property
def name(self):
return '%s/%s' % (self.account, self.container)
@name.setter
def name(self, path):
path = self._encode(path)
if not path or len(path.split('/')) != 2 or not all(path.split('/')):
raise ValueError(
"Name must be of the form '<account>/<container>', got %r" %
path)
self.account, self.container = path.split('/')
@property
def timestamp(self):
return self._timestamp
@timestamp.setter
def timestamp(self, ts):
if ts is None:
raise TypeError('timestamp cannot be None')
self._timestamp = self._to_timestamp(ts)
@property
def meta_timestamp(self):
if self._meta_timestamp is None:
return self.timestamp
return self._meta_timestamp
@meta_timestamp.setter
def meta_timestamp(self, ts):
self._meta_timestamp = self._to_timestamp(ts)
@property
def lower(self):
return self._lower
@property
def lower_str(self):
return str(self.lower)
@lower.setter
def lower(self, value):
with warnings.catch_warnings():
warnings.simplefilter('ignore', UnicodeWarning)
if value in (None, b'', u''):
value = ShardRange.MIN
try:
value = self._encode_bound(value)
except TypeError as err:
raise TypeError('lower %s' % err)
if value > self._upper:
raise ValueError(
'lower (%r) must be less than or equal to upper (%r)' %
(value, self.upper))
self._lower = value
@property
def end_marker(self):
return self.upper_str + '\x00' if self.upper else ''
@property
def upper(self):
return self._upper
@property
def upper_str(self):
return str(self.upper)
@upper.setter
def upper(self, value):
with warnings.catch_warnings():
warnings.simplefilter('ignore', UnicodeWarning)
if value in (None, b'', u''):
value = ShardRange.MAX
try:
value = self._encode_bound(value)
except TypeError as err:
raise TypeError('upper %s' % err)
if value < self._lower:
raise ValueError(
'upper (%r) must be greater than or equal to lower (%r)' %
(value, self.lower))
self._upper = value
@property
def object_count(self):
return self._count
@object_count.setter
def object_count(self, count):
count = int(count)
if count < 0:
raise ValueError('object_count cannot be < 0')
self._count = count
@property
def bytes_used(self):
return self._bytes
@bytes_used.setter
def bytes_used(self, bytes_used):
bytes_used = int(bytes_used)
if bytes_used < 0:
raise ValueError('bytes_used cannot be < 0')
self._bytes = bytes_used
@property
def tombstones(self):
return self._tombstones
@tombstones.setter
def tombstones(self, tombstones):
self._tombstones = int(tombstones)
@property
def row_count(self):
"""
Returns the total number of rows in the shard range i.e. the sum of
objects and tombstones.
:return: the row count
"""
return self.object_count + max(self.tombstones, 0)
def update_meta(self, object_count, bytes_used, meta_timestamp=None):
"""
Set the object stats metadata to the given values and update the
meta_timestamp to the current time.
:param object_count: should be an integer
:param bytes_used: should be an integer
:param meta_timestamp: timestamp for metadata; if not given the
current time will be set.
:raises ValueError: if ``object_count`` or ``bytes_used`` cannot be
cast to an int, or if meta_timestamp is neither None nor can be
cast to a :class:`~swift.common.utils.Timestamp`.
"""
if self.object_count != int(object_count):
self.object_count = int(object_count)
self.reported = False
if self.bytes_used != int(bytes_used):
self.bytes_used = int(bytes_used)
self.reported = False
if meta_timestamp is None:
self.meta_timestamp = Timestamp.now()
else:
self.meta_timestamp = meta_timestamp
def update_tombstones(self, tombstones, meta_timestamp=None):
"""
Set the tombstones metadata to the given values and update the
meta_timestamp to the current time.
:param tombstones: should be an integer
:param meta_timestamp: timestamp for metadata; if not given the
current time will be set.
:raises ValueError: if ``tombstones`` cannot be cast to an int, or
if meta_timestamp is neither None nor can be cast to a
:class:`~swift.common.utils.Timestamp`.
"""
tombstones = int(tombstones)
if 0 <= tombstones != self.tombstones:
self.tombstones = tombstones
self.reported = False
if meta_timestamp is None:
self.meta_timestamp = Timestamp.now()
else:
self.meta_timestamp = meta_timestamp
def increment_meta(self, object_count, bytes_used):
"""
Increment the object stats metadata by the given values and update the
meta_timestamp to the current time.
:param object_count: should be an integer
:param bytes_used: should be an integer
:raises ValueError: if ``object_count`` or ``bytes_used`` cannot be
cast to an int.
"""
self.update_meta(self.object_count + int(object_count),
self.bytes_used + int(bytes_used))
@classmethod
def resolve_state(cls, state):
"""
Given a value that may be either the name or the number of a state
return a tuple of (state number, state name).
:param state: Either a string state name or an integer state number.
:return: A tuple (state number, state name)
:raises ValueError: if ``state`` is neither a valid state name nor a
valid state number.
"""
try:
try:
# maybe it's a number
float_state = float(state)
state_num = int(float_state)
if state_num != float_state:
raise ValueError('Invalid state %r' % state)
state_name = cls.STATES[state_num]
except (ValueError, TypeError):
# maybe it's a state name
state_name = state.lower()
state_num = cls.STATES_BY_NAME[state_name]
except (KeyError, AttributeError):
raise ValueError('Invalid state %r' % state)
return state_num, state_name
@property
def state(self):
return self._state
@state.setter
def state(self, state):
self._state = self.resolve_state(state)[0]
@property
def state_text(self):
return self.STATES[self.state]
@property
def state_timestamp(self):
if self._state_timestamp is None:
return self.timestamp
return self._state_timestamp
@state_timestamp.setter
def state_timestamp(self, ts):
self._state_timestamp = self._to_timestamp(ts)
@property
def epoch(self):
return self._epoch
@epoch.setter
def epoch(self, epoch):
self._epoch = self._to_timestamp(epoch)
@property
def reported(self):
return self._reported
@reported.setter
def reported(self, value):
self._reported = bool(value)
def update_state(self, state, state_timestamp=None):
"""
Set state to the given value and optionally update the state_timestamp
to the given time.
:param state: new state, should be an integer
:param state_timestamp: timestamp for state; if not given the
state_timestamp will not be changed.
:return: True if the state or state_timestamp was changed, False
otherwise
"""
if state_timestamp is None and self.state == state:
return False
self.state = state
if state_timestamp is not None:
self.state_timestamp = state_timestamp
self.reported = False
return True
@property
def deleted(self):
return self._deleted
@deleted.setter
def deleted(self, value):
self._deleted = bool(value)
def set_deleted(self, timestamp=None):
"""
Mark the shard range deleted and set timestamp to the current time.
:param timestamp: optional timestamp to set; if not given the
current time will be set.
:return: True if the deleted attribute or timestamp was changed, False
otherwise
"""
if timestamp is None and self.deleted:
return False
self.deleted = True
self.timestamp = timestamp or Timestamp.now()
return True
def __contains__(self, item):
# test if the given item is within the namespace
if item == '':
return False
item = self._encode_bound(item)
return self.lower < item <= self.upper
def __lt__(self, other):
# a ShardRange is less than other if its entire namespace is less than
# other; if other is another ShardRange that implies that this
# ShardRange's upper must be less than or equal to the other
# ShardRange's lower
if self.upper == ShardRange.MAX:
return False
if isinstance(other, ShardRange):
return self.upper <= other.lower
elif other is None:
return True
else:
return self.upper < self._encode(other)
def __gt__(self, other):
# a ShardRange is greater than other if its entire namespace is greater
# than other; if other is another ShardRange that implies that this
# ShardRange's lower must be less greater than or equal to the other
# ShardRange's upper
if self.lower == ShardRange.MIN:
return False
if isinstance(other, ShardRange):
return self.lower >= other.upper
elif other is None:
return False
else:
return self.lower >= self._encode(other)
def __eq__(self, other):
# test for equality of range bounds only
if not isinstance(other, ShardRange):
return False
return self.lower == other.lower and self.upper == other.upper
# A by-the-book implementation should probably hash the value, which
# in our case would be account+container+lower+upper (+timestamp ?).
# But we seem to be okay with just the identity.
def __hash__(self):
return id(self)
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return '%s<%r to %r as of %s, (%d, %d) as of %s, %s as of %s>' % (
self.__class__.__name__, self.lower, self.upper,
self.timestamp.internal, self.object_count, self.bytes_used,
self.meta_timestamp.internal, self.state_text,
self.state_timestamp.internal)
def entire_namespace(self):
"""
Returns True if the ShardRange includes the entire namespace, False
otherwise.
"""
return (self.lower == ShardRange.MIN and
self.upper == ShardRange.MAX)
def overlaps(self, other):
"""
Returns True if the ShardRange namespace overlaps with the other
ShardRange's namespace.
:param other: an instance of :class:`~swift.common.utils.ShardRange`
"""
if not isinstance(other, ShardRange):
return False
return max(self.lower, other.lower) < min(self.upper, other.upper)
def includes(self, other):
"""
Returns True if this namespace includes the whole of the other
namespace, False otherwise.
:param other: an instance of :class:`~swift.common.utils.ShardRange`
"""
return (self.lower <= other.lower) and (other.upper <= self.upper)
def __iter__(self):
yield 'name', self.name
yield 'timestamp', self.timestamp.internal
yield 'lower', str(self.lower)
yield 'upper', str(self.upper)
yield 'object_count', self.object_count
yield 'bytes_used', self.bytes_used
yield 'meta_timestamp', self.meta_timestamp.internal
yield 'deleted', 1 if self.deleted else 0
yield 'state', self.state
yield 'state_timestamp', self.state_timestamp.internal
yield 'epoch', self.epoch.internal if self.epoch is not None else None
yield 'reported', 1 if self.reported else 0
yield 'tombstones', self.tombstones
def copy(self, timestamp=None, **kwargs):
"""
Creates a copy of the ShardRange.
:param timestamp: (optional) If given, the returned ShardRange will
have all of its timestamps set to this value. Otherwise the
returned ShardRange will have the original timestamps.
:return: an instance of :class:`~swift.common.utils.ShardRange`
"""
new = ShardRange.from_dict(dict(self, **kwargs))
if timestamp:
new.timestamp = timestamp
new.meta_timestamp = new.state_timestamp = None
return new
@classmethod
def from_dict(cls, params):
"""
Return an instance constructed using the given dict of params. This
method is deliberately less flexible than the class `__init__()` method
and requires all of the `__init__()` args to be given in the dict of
params.
:param params: a dict of parameters
:return: an instance of this class
"""
return cls(
params['name'], params['timestamp'], params['lower'],
params['upper'], params['object_count'], params['bytes_used'],
params['meta_timestamp'], params['deleted'], params['state'],
params['state_timestamp'], params['epoch'],
params.get('reported', 0), params.get('tombstones', -1))
def expand(self, donors):
"""
Expands the bounds as necessary to match the minimum and maximum bounds
of the given donors.
:param donors: A list of :class:`~swift.common.utils.ShardRange`
:return: True if the bounds have been modified, False otherwise.
"""
modified = False
new_lower = self.lower
new_upper = self.upper
for donor in donors:
new_lower = min(new_lower, donor.lower)
new_upper = max(new_upper, donor.upper)
if self.lower > new_lower or self.upper < new_upper:
self.lower = new_lower
self.upper = new_upper
modified = True
return modified
class ShardRangeList(UserList):
"""
This class provides some convenience functions for working with lists of
:class:`~swift.common.utils.ShardRange`.
This class does not enforce ordering or continuity of the list items:
callers should ensure that items are added in order as appropriate.
"""
def __getitem__(self, index):
# workaround for py3 - not needed for py2.7,py3.8
result = self.data[index]
return ShardRangeList(result) if type(result) == list else result
@property
def lower(self):
"""
Returns the lower bound of the first item in the list. Note: this will
only be equal to the lowest bound of all items in the list if the list
contents has been sorted.
:return: lower bound of first item in the list, or ShardRange.MIN
if the list is empty.
"""
if not self:
# empty list has range MIN->MIN
return ShardRange.MIN
return self[0].lower
@property
def upper(self):
"""
Returns the upper bound of the first item in the list. Note: this will
only be equal to the uppermost bound of all items in the list if the
list has previously been sorted.
:return: upper bound of first item in the list, or ShardRange.MIN
if the list is empty.
"""
if not self:
# empty list has range MIN->MIN
return ShardRange.MIN
return self[-1].upper
@property
def object_count(self):
"""
Returns the total number of objects of all items in the list.
:return: total object count
"""
return sum(sr.object_count for sr in self)
@property
def row_count(self):
"""
Returns the total number of rows of all items in the list.
:return: total row count
"""
return sum(sr.row_count for sr in self)
@property
def bytes_used(self):
"""
Returns the total number of bytes in all items in the list.
:return: total bytes used
"""
return sum(sr.bytes_used for sr in self)
@property
def timestamps(self):
return set(sr.timestamp for sr in self)
@property
def states(self):
return set(sr.state for sr in self)
def includes(self, other):
"""
Check if another ShardRange namespace is enclosed between the list's
``lower`` and ``upper`` properties. Note: the list's ``lower`` and
``upper`` properties will only equal the outermost bounds of all items
in the list if the list has previously been sorted.
Note: the list does not need to contain an item matching ``other`` for
this method to return True, although if the list has been sorted and
does contain an item matching ``other`` then the method will return
True.
:param other: an instance of :class:`~swift.common.utils.ShardRange`
:return: True if other's namespace is enclosed, False otherwise.
"""
return self.lower <= other.lower and self.upper >= other.upper
def filter(self, includes=None, marker=None, end_marker=None):
"""
Filter the list for those shard ranges whose namespace includes the
``includes`` name or any part of the namespace between ``marker`` and
``end_marker``. If none of ``includes``, ``marker`` or ``end_marker``
are specified then all shard ranges will be returned.
:param includes: a string; if not empty then only the shard range, if
any, whose namespace includes this string will be returned, and
``marker`` and ``end_marker`` will be ignored.
:param marker: if specified then only shard ranges whose upper bound is
greater than this value will be returned.
:param end_marker: if specified then only shard ranges whose lower
bound is less than this value will be returned.
:return: A new instance of :class:`~swift.common.utils.ShardRangeList`
containing the filtered shard ranges.
"""
return ShardRangeList(
filter_shard_ranges(self, includes, marker, end_marker))
def find_lower(self, condition):
"""
Finds the first shard range satisfies the given condition and returns
its lower bound.
:param condition: A function that must accept a single argument of type
:class:`~swift.common.utils.ShardRange` and return True if the
shard range satisfies the condition or False otherwise.
:return: The lower bound of the first shard range to satisfy the
condition, or the ``upper`` value of this list if no such shard
range is found.
"""
for sr in self:
if condition(sr):
return sr.lower
return self.upper
def find_shard_range(item, ranges):
"""
Find a ShardRange in given list of ``shard_ranges`` whose namespace
contains ``item``.
:param item: The item for a which a ShardRange is to be found.
:param ranges: a sorted list of ShardRanges.
:return: the ShardRange whose namespace contains ``item``, or None if
no suitable range is found.
"""
index = bisect.bisect_left(ranges, item)
if index != len(ranges) and item in ranges[index]:
return ranges[index]
return None
def filter_shard_ranges(shard_ranges, includes, marker, end_marker):
"""
Filter the given shard ranges to those whose namespace includes the
``includes`` name or any part of the namespace between ``marker`` and
``end_marker``. If none of ``includes``, ``marker`` or ``end_marker`` are
specified then all shard ranges will be returned.
:param shard_ranges: A list of :class:`~swift.common.utils.ShardRange`.
:param includes: a string; if not empty then only the shard range, if any,
whose namespace includes this string will be returned, and ``marker``
and ``end_marker`` will be ignored.
:param marker: if specified then only shard ranges whose upper bound is
greater than this value will be returned.
:param end_marker: if specified then only shard ranges whose lower bound is
less than this value will be returned.
:return: A filtered list of :class:`~swift.common.utils.ShardRange`.
"""
if includes:
shard_range = find_shard_range(includes, shard_ranges)
return [shard_range] if shard_range else []
def shard_range_filter(sr):
end = start = True
if end_marker:
end = end_marker > sr.lower
if marker:
start = marker < sr.upper
return start and end
if marker or end_marker:
return list(filter(shard_range_filter, shard_ranges))
if marker == ShardRange.MAX or end_marker == ShardRange.MIN:
# MIN and MAX are both Falsy so not handled by shard_range_filter
return []
return shard_ranges
def modify_priority(conf, logger):
"""
Modify priority by nice and ionice.
"""
global _libc_setpriority
if _libc_setpriority is None:
_libc_setpriority = load_libc_function('setpriority',
errcheck=True)
def _setpriority(nice_priority):
"""
setpriority for this pid
:param nice_priority: valid values are -19 to 20
"""
try:
_libc_setpriority(PRIO_PROCESS, os.getpid(),
int(nice_priority))
except (ValueError, OSError):
print(_("WARNING: Unable to modify scheduling priority of process."
" Keeping unchanged! Check logs for more info. "))
logger.exception('Unable to modify nice priority')
else:
logger.debug('set nice priority to %s' % nice_priority)
nice_priority = conf.get('nice_priority')
if nice_priority is not None:
_setpriority(nice_priority)
global _posix_syscall
if _posix_syscall is None:
_posix_syscall = load_libc_function('syscall', errcheck=True)
def _ioprio_set(io_class, io_priority):
"""
ioprio_set for this process
:param io_class: the I/O class component, can be
IOPRIO_CLASS_RT, IOPRIO_CLASS_BE,
or IOPRIO_CLASS_IDLE
:param io_priority: priority value in the I/O class
"""
try:
io_class = IO_CLASS_ENUM[io_class]
io_priority = int(io_priority)
_posix_syscall(NR_ioprio_set(),
IOPRIO_WHO_PROCESS,
os.getpid(),
IOPRIO_PRIO_VALUE(io_class, io_priority))
except (KeyError, ValueError, OSError):
print(_("WARNING: Unable to modify I/O scheduling class "
"and priority of process. Keeping unchanged! "
"Check logs for more info."))
logger.exception("Unable to modify ionice priority")
else:
logger.debug('set ionice class %s priority %s',
io_class, io_priority)
io_class = conf.get("ionice_class")
if io_class is None:
return
io_priority = conf.get("ionice_priority", 0)
_ioprio_set(io_class, io_priority)
def o_tmpfile_in_path_supported(dirpath):
fd = None
try:
fd = os.open(dirpath, os.O_WRONLY | O_TMPFILE)
return True
except OSError as e:
if e.errno in (errno.EINVAL, errno.EISDIR, errno.EOPNOTSUPP):
return False
else:
raise Exception("Error on '%(path)s' while checking "
"O_TMPFILE: '%(ex)s'" %
{'path': dirpath, 'ex': e})
finally:
if fd is not None:
os.close(fd)
def o_tmpfile_in_tmpdir_supported():
return o_tmpfile_in_path_supported(gettempdir())
def safe_json_loads(value):
if value:
try:
return json.loads(value)
except (TypeError, ValueError):
pass
return None
def strict_b64decode(value, allow_line_breaks=False):
'''
Validate and decode Base64-encoded data.
The stdlib base64 module silently discards bad characters, but we often
want to treat them as an error.
:param value: some base64-encoded data
:param allow_line_breaks: if True, ignore carriage returns and newlines
:returns: the decoded data
:raises ValueError: if ``value`` is not a string, contains invalid
characters, or has insufficient padding
'''
if isinstance(value, bytes):
try:
value = value.decode('ascii')
except UnicodeDecodeError:
raise ValueError
if not isinstance(value, six.text_type):
raise ValueError
# b64decode will silently discard bad characters, but we want to
# treat them as an error
valid_chars = string.digits + string.ascii_letters + '/+'
strip_chars = '='
if allow_line_breaks:
valid_chars += '\r\n'
strip_chars += '\r\n'
if any(c not in valid_chars for c in value.strip(strip_chars)):
raise ValueError
try:
return base64.b64decode(value)
except (TypeError, binascii.Error): # (py2 error, py3 error)
raise ValueError
MD5_BLOCK_READ_BYTES = 4096
def md5_hash_for_file(fname):
"""
Get the MD5 checksum of a file.
:param fname: path to file
:returns: MD5 checksum, hex encoded
"""
with open(fname, 'rb') as f:
md5sum = md5(usedforsecurity=False)
for block in iter(lambda: f.read(MD5_BLOCK_READ_BYTES), b''):
md5sum.update(block)
return md5sum.hexdigest()
def get_partition_for_hash(hex_hash, part_power):
"""
Return partition number for given hex hash and partition power.
:param hex_hash: A hash string
:param part_power: partition power
:returns: partition number
"""
raw_hash = binascii.unhexlify(hex_hash)
part_shift = 32 - int(part_power)
return struct.unpack_from('>I', raw_hash)[0] >> part_shift
def get_partition_from_path(devices, path):
"""
:param devices: directory where devices are mounted (e.g. /srv/node)
:param path: full path to a object file or hashdir
:returns: the (integer) partition from the path
"""
offset_parts = devices.rstrip(os.sep).split(os.sep)
path_components = path.split(os.sep)
if offset_parts == path_components[:len(offset_parts)]:
offset = len(offset_parts)
else:
raise ValueError('Path %r is not under device dir %r' % (
path, devices))
return int(path_components[offset + 2])
def replace_partition_in_path(devices, path, part_power):
"""
Takes a path and a partition power and returns the same path, but with the
correct partition number. Most useful when increasing the partition power.
:param devices: directory where devices are mounted (e.g. /srv/node)
:param path: full path to a object file or hashdir
:param part_power: partition power to compute correct partition number
:returns: Path with re-computed partition power
"""
offset_parts = devices.rstrip(os.sep).split(os.sep)
path_components = path.split(os.sep)
if offset_parts == path_components[:len(offset_parts)]:
offset = len(offset_parts)
else:
raise ValueError('Path %r is not under device dir %r' % (
path, devices))
part = get_partition_for_hash(path_components[offset + 4], part_power)
path_components[offset + 2] = "%d" % part
return os.sep.join(path_components)
def load_pkg_resource(group, uri):
if '#' in uri:
uri, name = uri.split('#', 1)
else:
name = uri
uri = 'egg:swift'
if ':' in uri:
scheme, dist = uri.split(':', 1)
scheme = scheme.lower()
else:
scheme = 'egg'
dist = uri
if scheme != 'egg':
raise TypeError('Unhandled URI scheme: %r' % scheme)
return pkg_resources.load_entry_point(dist, group, name)
class PipeMutex(object):
"""
Mutex using a pipe. Works across both greenlets and real threads, even
at the same time.
"""
def __init__(self):
self.rfd, self.wfd = os.pipe()
# You can't create a pipe in non-blocking mode; you must set it
# later.
rflags = fcntl.fcntl(self.rfd, fcntl.F_GETFL)
fcntl.fcntl(self.rfd, fcntl.F_SETFL, rflags | os.O_NONBLOCK)
os.write(self.wfd, b'-') # start unlocked
self.owner = None
self.recursion_depth = 0
# Usually, it's an error to have multiple greenthreads all waiting
# to read the same file descriptor. It's often a sign of inadequate
# concurrency control; for example, if you have two greenthreads
# trying to use the same memcache connection, they'll end up writing
# interleaved garbage to the socket or stealing part of each others'
# responses.
#
# In this case, we have multiple greenthreads waiting on the same
# file descriptor by design. This lets greenthreads in real thread A
# wait with greenthreads in real thread B for the same mutex.
# Therefore, we must turn off eventlet's multiple-reader detection.
#
# It would be better to turn off multiple-reader detection for only
# our calls to trampoline(), but eventlet does not support that.
eventlet.debug.hub_prevent_multiple_readers(False)
def acquire(self, blocking=True):
"""
Acquire the mutex.
If called with blocking=False, returns True if the mutex was
acquired and False if it wasn't. Otherwise, blocks until the mutex
is acquired and returns True.
This lock is recursive; the same greenthread may acquire it as many
times as it wants to, though it must then release it that many times
too.
"""
current_greenthread_id = id(eventlet.greenthread.getcurrent())
if self.owner == current_greenthread_id:
self.recursion_depth += 1
return True
while True:
try:
# If there is a byte available, this will read it and remove
# it from the pipe. If not, this will raise OSError with
# errno=EAGAIN.
os.read(self.rfd, 1)
self.owner = current_greenthread_id
return True
except OSError as err:
if err.errno != errno.EAGAIN:
raise
if not blocking:
return False
# Tell eventlet to suspend the current greenthread until
# self.rfd becomes readable. This will happen when someone
# else writes to self.wfd.
eventlet.hubs.trampoline(self.rfd, read=True)
def release(self):
"""
Release the mutex.
"""
current_greenthread_id = id(eventlet.greenthread.getcurrent())
if self.owner != current_greenthread_id:
raise RuntimeError("cannot release un-acquired lock")
if self.recursion_depth > 0:
self.recursion_depth -= 1
return
self.owner = None
os.write(self.wfd, b'X')
def close(self):
"""
Close the mutex. This releases its file descriptors.
You can't use a mutex after it's been closed.
"""
if self.wfd is not None:
os.close(self.rfd)
self.rfd = None
os.close(self.wfd)
self.wfd = None
self.owner = None
self.recursion_depth = 0
def __del__(self):
# We need this so we don't leak file descriptors. Otherwise, if you
# call get_logger() and don't explicitly dispose of it by calling
# logger.logger.handlers[0].lock.close() [1], the pipe file
# descriptors are leaked.
#
# This only really comes up in tests. Swift processes tend to call
# get_logger() once and then hang on to it until they exit, but the
# test suite calls get_logger() a lot.
#
# [1] and that's a completely ridiculous thing to expect callers to
# do, so nobody does it and that's okay.
self.close()
class ThreadSafeSysLogHandler(SysLogHandler):
def createLock(self):
self.lock = PipeMutex()
def round_robin_iter(its):
"""
Takes a list of iterators, yield an element from each in a round-robin
fashion until all of them are exhausted.
:param its: list of iterators
"""
while its:
for it in its:
try:
yield next(it)
except StopIteration:
its.remove(it)
OverrideOptions = collections.namedtuple(
'OverrideOptions', ['devices', 'partitions', 'policies'])
def parse_override_options(**kwargs):
"""
Figure out which policies, devices, and partitions we should operate on,
based on kwargs.
If 'override_policies' is already present in kwargs, then return that
value. This happens when using multiple worker processes; the parent
process supplies override_policies=X to each child process.
Otherwise, in run-once mode, look at the 'policies' keyword argument.
This is the value of the "--policies" command-line option. In
run-forever mode or if no --policies option was provided, an empty list
will be returned.
The procedures for devices and partitions are similar.
:returns: a named tuple with fields "devices", "partitions", and
"policies".
"""
run_once = kwargs.get('once', False)
if 'override_policies' in kwargs:
policies = kwargs['override_policies']
elif run_once:
policies = [
int(p) for p in list_from_csv(kwargs.get('policies'))]
else:
policies = []
if 'override_devices' in kwargs:
devices = kwargs['override_devices']
elif run_once:
devices = list_from_csv(kwargs.get('devices'))
else:
devices = []
if 'override_partitions' in kwargs:
partitions = kwargs['override_partitions']
elif run_once:
partitions = [
int(p) for p in list_from_csv(kwargs.get('partitions'))]
else:
partitions = []
return OverrideOptions(devices=devices, partitions=partitions,
policies=policies)
def distribute_evenly(items, num_buckets):
"""
Distribute items as evenly as possible into N buckets.
"""
out = [[] for _ in range(num_buckets)]
for index, item in enumerate(items):
out[index % num_buckets].append(item)
return out
def get_redirect_data(response):
"""
Extract a redirect location from a response's headers.
:param response: a response
:return: a tuple of (path, Timestamp) if a Location header is found,
otherwise None
:raises ValueError: if the Location header is found but a
X-Backend-Redirect-Timestamp is not found, or if there is a problem
with the format of etiher header
"""
headers = HeaderKeyDict(response.getheaders())
if 'Location' not in headers:
return None
location = urlparse(headers['Location']).path
if config_true_value(headers.get('X-Backend-Location-Is-Quoted',
'false')):
location = unquote(location)
account, container, _junk = split_path(location, 2, 3, True)
timestamp_val = headers.get('X-Backend-Redirect-Timestamp')
try:
timestamp = Timestamp(timestamp_val)
except (TypeError, ValueError):
raise ValueError('Invalid timestamp value: %s' % timestamp_val)
return '%s/%s' % (account, container), timestamp
def parse_db_filename(filename):
"""
Splits a db filename into three parts: the hash, the epoch, and the
extension.
>>> parse_db_filename("ab2134.db")
('ab2134', None, '.db')
>>> parse_db_filename("ab2134_1234567890.12345.db")
('ab2134', '1234567890.12345', '.db')
:param filename: A db file basename or path to a db file.
:return: A tuple of (hash , epoch, extension). ``epoch`` may be None.
:raises ValueError: if ``filename`` is not a path to a file.
"""
filename = os.path.basename(filename)
if not filename:
raise ValueError('Path to a file required.')
name, ext = os.path.splitext(filename)
parts = name.split('_')
hash_ = parts.pop(0)
epoch = parts[0] if parts else None
return hash_, epoch, ext
def make_db_file_path(db_path, epoch):
"""
Given a path to a db file, return a modified path whose filename part has
the given epoch.
A db filename takes the form ``<hash>[_<epoch>].db``; this method replaces
the ``<epoch>`` part of the given ``db_path`` with the given ``epoch``
value, or drops the epoch part if the given ``epoch`` is ``None``.
:param db_path: Path to a db file that does not necessarily exist.
:param epoch: A string (or ``None``) that will be used as the epoch
in the new path's filename; non-``None`` values will be
normalized to the normal string representation of a
:class:`~swift.common.utils.Timestamp`.
:return: A modified path to a db file.
:raises ValueError: if the ``epoch`` is not valid for constructing a
:class:`~swift.common.utils.Timestamp`.
"""
hash_, _, ext = parse_db_filename(db_path)
db_dir = os.path.dirname(db_path)
if epoch is None:
return os.path.join(db_dir, hash_ + ext)
epoch = Timestamp(epoch).normal
return os.path.join(db_dir, '%s_%s%s' % (hash_, epoch, ext))
def get_db_files(db_path):
"""
Given the path to a db file, return a sorted list of all valid db files
that actually exist in that path's dir. A valid db filename has the form:
<hash>[_<epoch>].db
where <hash> matches the <hash> part of the given db_path as would be
parsed by :meth:`~swift.utils.common.parse_db_filename`.
:param db_path: Path to a db file that does not necessarily exist.
:return: List of valid db files that do exist in the dir of the
``db_path``. This list may be empty.
"""
db_dir, db_file = os.path.split(db_path)
try:
files = os.listdir(db_dir)
except OSError as err:
if err.errno == errno.ENOENT:
return []
raise
if not files:
return []
match_hash, epoch, ext = parse_db_filename(db_file)
results = []
for f in files:
hash_, epoch, ext = parse_db_filename(f)
if ext != '.db':
continue
if hash_ != match_hash:
continue
results.append(os.path.join(db_dir, f))
return sorted(results)
def systemd_notify(logger=None):
"""
Notify the service manager that started this process, if it is
systemd-compatible, that this process correctly started. To do so,
it communicates through a Unix socket stored in environment variable
NOTIFY_SOCKET. More information can be found in systemd documentation:
https://www.freedesktop.org/software/systemd/man/sd_notify.html
:param logger: a logger object
"""
msg = b'READY=1'
notify_socket = os.getenv('NOTIFY_SOCKET')
if notify_socket:
if notify_socket.startswith('@'):
# abstract namespace socket
notify_socket = '\0%s' % notify_socket[1:]
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
with closing(sock):
try:
sock.connect(notify_socket)
sock.sendall(msg)
del os.environ['NOTIFY_SOCKET']
except EnvironmentError:
if logger:
logger.debug("Systemd notification failed", exc_info=True)
class Watchdog(object):
"""
Implements a watchdog to efficiently manage concurrent timeouts.
Compared to eventlet.timeouts.Timeout, it reduces the number of context
switching in eventlet by avoiding to schedule actions (throw an Exception),
then unschedule them if the timeouts are cancelled.
1. at T+0, request timeout(10)
=> wathdog greenlet sleeps 10 seconds
2. at T+1, request timeout(15)
=> the timeout will expire after the current, no need to wake up the
watchdog greenlet
3. at T+2, request timeout(5)
=> the timeout will expire before the first timeout, wake up the
watchdog greenlet to calculate a new sleep period
4. at T+7, the 3rd timeout expires
=> the exception is raised, then the greenlet watchdog sleep(3) to
wake up for the 1st timeout expiration
"""
def __init__(self):
# key => (timeout, timeout_at, caller_greenthread, exception)
self._timeouts = dict()
self._evt = Event()
self._next_expiration = None
self._run_gth = None
def start(self, timeout, exc, timeout_at=None):
"""
Schedule a timeout action
:param timeout: duration before the timeout expires
:param exc: exception to throw when the timeout expire, must inherit
from eventlet.timeouts.Timeout
:param timeout_at: allow to force the expiration timestamp
:return: id of the scheduled timeout, needed to cancel it
"""
if not timeout_at:
timeout_at = time.time() + timeout
gth = eventlet.greenthread.getcurrent()
timeout_definition = (timeout, timeout_at, gth, exc)
key = id(timeout_definition)
self._timeouts[key] = timeout_definition
# Wake up the watchdog loop only when there is a new shorter timeout
if (self._next_expiration is None
or self._next_expiration > timeout_at):
# There could be concurrency on .send(), so wrap it in a try
try:
if not self._evt.ready():
self._evt.send()
except AssertionError:
pass
return key
def stop(self, key):
"""
Cancel a scheduled timeout
:param key: timeout id, as returned by start()
"""
try:
if key in self._timeouts:
del(self._timeouts[key])
except KeyError:
pass
def spawn(self):
"""
Start the watchdog greenthread.
"""
if self._run_gth is None:
self._run_gth = eventlet.spawn(self.run)
def run(self):
while True:
self._run()
def _run(self):
now = time.time()
self._next_expiration = None
if self._evt.ready():
self._evt.reset()
for k, (timeout, timeout_at, gth, exc) in list(self._timeouts.items()):
if timeout_at <= now:
try:
if k in self._timeouts:
del(self._timeouts[k])
except KeyError:
pass
e = exc()
e.seconds = timeout
eventlet.hubs.get_hub().schedule_call_global(0, gth.throw, e)
else:
if (self._next_expiration is None
or self._next_expiration > timeout_at):
self._next_expiration = timeout_at
if self._next_expiration is None:
sleep_duration = self._next_expiration
else:
sleep_duration = self._next_expiration - now
self._evt.wait(sleep_duration)
class WatchdogTimeout(object):
"""
Context manager to schedule a timeout in a Watchdog instance
"""
def __init__(self, watchdog, timeout, exc, timeout_at=None):
"""
Schedule a timeout in a Watchdog instance
:param watchdog: Watchdog instance
:param timeout: duration before the timeout expires
:param exc: exception to throw when the timeout expire, must inherit
from eventlet.timeouts.Timeout
:param timeout_at: allow to force the expiration timestamp
"""
self.watchdog = watchdog
self.key = watchdog.start(timeout, exc, timeout_at=timeout_at)
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
self.watchdog.stop(self.key)
| []
| []
| [
"HOME",
"NOTIFY_SOCKET"
]
| [] | ["HOME", "NOTIFY_SOCKET"] | python | 2 | 0 | |
pkg/webview2/browser.go | package webview2
import (
"errors"
"fmt"
"os"
"path/filepath"
"sync/atomic"
"syscall"
"unsafe"
"github.com/mattpodraza/webview2/v2/pkg/com"
"github.com/mattpodraza/webview2/v2/pkg/hresult"
"github.com/mattpodraza/webview2/v2/pkg/user32"
"golang.org/x/sys/windows"
)
type browserConfig struct {
initialURL string
builtInErrorPage bool
defaultContextMenus bool
defaultScriptDialogs bool
devtools bool
hostObjects bool
script bool
statusBar bool
webMessage bool
zoomControl bool
}
type browser struct {
hwnd windows.Handle
config *browserConfig
view *com.ICoreWebView2
controller *com.ICoreWebView2Controller
settings *com.ICoreWebView2Settings
controllerCompleted int32
}
func (wv *WebView) Browser() *browser {
return wv.browser
}
func (b *browser) embed(wv *WebView) error {
b.hwnd = wv.window.handle
exePath := make([]uint16, windows.MAX_PATH)
_, err := windows.GetModuleFileName(windows.Handle(0), &exePath[0], windows.MAX_PATH)
if err != nil {
return fmt.Errorf("failed to get module file name: %w", err)
}
dataPath := filepath.Join(os.Getenv("AppData"), filepath.Base(windows.UTF16ToString(exePath)))
r1, _, err := wv.dll.Call(0, uint64(uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(dataPath)))), 0, uint64(wv.environmentCompletedHandler()))
hr := hresult.HRESULT(r1)
if err != nil && err != errOK {
return fmt.Errorf("failed to call CreateCoreWebView2EnvironmentWithOptions: %w", err)
}
if hr > hresult.S_OK {
return fmt.Errorf("failed to call CreateCoreWebView2EnvironmentWithOptions: %s", hr)
}
for {
if atomic.LoadInt32(&b.controllerCompleted) != 0 {
break
}
msg, err := user32.GetMessageW()
if err != nil {
return err
}
if msg == nil {
break
}
err = user32.TranslateMessage(msg)
if err != nil {
return err
}
err = user32.DispatchMessageW(msg)
if err != nil {
return err
}
}
settings := new(com.ICoreWebView2Settings)
r, _, err := syscall.Syscall(b.view.VTBL.GetSettings, 2, uintptr(unsafe.Pointer(b.view)), uintptr(unsafe.Pointer(&settings)), 0)
if !errors.Is(err, errOK) {
return err
}
hr = hresult.HRESULT(r)
if hr > hresult.S_OK {
return fmt.Errorf("failed to get webview settings: %s", hr)
}
b.settings = settings
return nil
}
func (b *browser) resize() error {
if b.controller == nil {
return errors.New("nil controller")
}
bounds, err := user32.GetClientRect(b.hwnd)
if err != nil {
return fmt.Errorf("failed to get client rect: %w", err)
}
_, _, err = syscall.Syscall(
b.controller.VTBL.PutBounds, 2,
uintptr(unsafe.Pointer(b.controller)),
uintptr(unsafe.Pointer(bounds)),
0,
)
if !errors.Is(err, errOK) {
return fmt.Errorf("failed to put bounds: %w", err)
}
return nil
}
func (b *browser) Navigate(url string) error {
_, _, err := syscall.Syscall(
b.view.VTBL.Navigate, 3,
uintptr(unsafe.Pointer(b.view)),
uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(url))),
0,
)
if !errors.Is(err, errOK) {
return err
}
return nil
}
func (b *browser) AddScriptToExecuteOnDocumentCreated(script string) error {
_, _, err := syscall.Syscall(
b.view.VTBL.AddScriptToExecuteOnDocumentCreated, 3,
uintptr(unsafe.Pointer(b.view)),
uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(script))),
0,
)
if !errors.Is(err, errOK) {
return err
}
return nil
}
func (b *browser) ExecuteScript(script string) error {
_, _, err := syscall.Syscall(
b.view.VTBL.ExecuteScript, 3,
uintptr(unsafe.Pointer(b.view)),
uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(script))),
0,
)
if !errors.Is(err, errOK) {
return err
}
return nil
}
func (b *browser) saveSetting(setter uintptr, enabled bool) error {
var flag uintptr = 0
if enabled {
flag = 1
}
_, _, err := syscall.Syscall(
setter, 3,
uintptr(unsafe.Pointer(b.settings)),
flag,
0,
)
if !errors.Is(err, errOK) {
return fmt.Errorf("failed to save a setting: %w", err)
}
return nil
}
func (b *browser) saveSettings() error {
if err := b.saveSetting(b.settings.VTBL.PutIsBuiltInErrorPageEnabled, b.config.builtInErrorPage); err != nil {
return err
}
if err := b.saveSetting(b.settings.VTBL.PutAreDefaultContextMenusEnabled, b.config.defaultContextMenus); err != nil {
return err
}
if err := b.saveSetting(b.settings.VTBL.PutAreDefaultScriptDialogsEnabled, b.config.defaultScriptDialogs); err != nil {
return err
}
if err := b.saveSetting(b.settings.VTBL.PutAreDevToolsEnabled, b.config.devtools); err != nil {
return err
}
if err := b.saveSetting(b.settings.VTBL.PutAreHostObjectsAllowed, b.config.hostObjects); err != nil {
return err
}
if err := b.saveSetting(b.settings.VTBL.PutIsScriptEnabled, b.config.script); err != nil {
return err
}
if err := b.saveSetting(b.settings.VTBL.PutIsStatusBarEnabled, b.config.statusBar); err != nil {
return err
}
if err := b.saveSetting(b.settings.VTBL.PutIsWebMessageEnabled, b.config.webMessage); err != nil {
return err
}
return b.saveSetting(b.settings.VTBL.PutIsZoomControlEnabled, b.config.zoomControl)
}
func (wv *WebView) environmentCompletedHandler() uintptr {
h := &com.ICoreWebView2CreateCoreWebView2EnvironmentCompletedHandler{
VTBL: &com.ICoreWebView2CreateCoreWebView2EnvironmentCompletedHandlerVTBL{
Invoke: windows.NewCallback(func(i uintptr, p uintptr, createdEnvironment *com.ICoreWebView2Environment) uintptr {
_, _, _ = syscall.Syscall(createdEnvironment.VTBL.CreateCoreWebView2Controller, 3, uintptr(unsafe.Pointer(createdEnvironment)), uintptr(wv.window.handle), wv.controllerCompletedHandler())
return 0
}),
},
}
h.VTBL.BasicVTBL = com.NewBasicVTBL(&h.Basic)
return uintptr(unsafe.Pointer(h))
}
func (wv *WebView) controllerCompletedHandler() uintptr {
h := &com.ICoreWebView2CreateCoreWebView2ControllerCompletedHandler{
VTBL: &com.ICoreWebView2CreateCoreWebView2ControllerCompletedHandlerVTBL{
Invoke: windows.NewCallback(func(i *com.ICoreWebView2CreateCoreWebView2ControllerCompletedHandler, p uintptr, createdController *com.ICoreWebView2Controller) uintptr {
_, _, _ = syscall.Syscall(createdController.VTBL.AddRef, 1, uintptr(unsafe.Pointer(createdController)), 0, 0)
wv.browser.controller = createdController
createdWebView2 := new(com.ICoreWebView2)
_, _, _ = syscall.Syscall(createdController.VTBL.GetCoreWebView2, 2, uintptr(unsafe.Pointer(createdController)), uintptr(unsafe.Pointer(&createdWebView2)), 0)
wv.browser.view = createdWebView2
_, _, _ = syscall.Syscall(wv.browser.view.VTBL.AddRef, 1, uintptr(unsafe.Pointer(wv.browser.view)), 0, 0)
atomic.StoreInt32(&wv.browser.controllerCompleted, 1)
return 0
}),
},
}
h.VTBL.BasicVTBL = com.NewBasicVTBL(&h.Basic)
return uintptr(unsafe.Pointer(h))
}
| [
"\"AppData\""
]
| []
| [
"AppData"
]
| [] | ["AppData"] | go | 1 | 0 | |
tools/sirius/femlm/examples/stream/stream.py | import sst
import os
class RtrPorts:
def __init__(self):
self._next_addr = 0
def nextPort(self):
res = self._next_addr
self._next_addr = self._next_addr + 1
return res
def numPorts(self):
return self._next_addr
sst.setProgramOption("timebase", "1ns")
sst_root = os.getenv( "SST_ROOT" )
rtrInfo = RtrPorts()
pagesize = 4096
memoryperlevel = 2048
noc = sst.Component("noc", "merlin.hr_router")
noc.addParams({
"id" : 0,
"topology" : "merlin.singlerouter",
"link_bw" : "320GB/s",
"xbar_bw" : "512GB/s",
"input_latency" : "4ns",
"output_latency" : "4ns",
"input_buf_size" : "4KiB",
"output_buf_size" : "4KiB",
"flit_size" : "72B"
})
corecount = 4;
ariel = sst.Component("a0", "ariel.ariel")
ariel.addParams({
"verbose" : "1",
"clock" : "2GHz",
"maxcorequeue" : "256",
"maxissuepercycle" : "2",
"pipetimeout" : "0",
"corecount" : corecount,
"executable" : "/home/sdhammo/subversion/sst-simulator-org-trunk/tools/ariel/femlm/examples/stream/mlmstream",
"arielmode" : "1",
"arieltool" : "/home/sdhammo/subversion/sst-simulator-org-trunk/tools/ariel/femlm/femlmtool.so",
"memorylevels" : "2",
"pagecount0" : (memoryperlevel * 1024 * 1024) / pagesize,
"pagecount1" : (memoryperlevel * 1024 * 1024) / pagesize,
"defaultlevel" : os.getenv("ARIEL_OVERRIDE_POOL", 1)
})
for x in range(corecount) :
l1cache = sst.Component("l1cache" + str(x), "memHierarchy.Cache")
l1cache.addParams({
"cache_frequency" : "2GHz",
"cache_size" : "16 KB",
"coherence_protocol" : "MESI",
"replacement_policy" : "lru",
"associativity" : "2",
"access_latency_cycles" : "2",
"low_network_links" : "1",
"cache_line_size" : "64",
"L1" : "1",
"debug" : "0",
"statistics" : "1"
})
l2cache_net_addr = rtrInfo.nextPort()
l2cache = sst.Component("l2cache" + str(x), "memHierarchy.Cache")
l2cache.addParams({
"cache_frequency" : "1500MHz",
"cache_size" : "64 KB",
"coherence_protocol" : "MESI",
"replacement_policy" : "lru",
"associativity" : "16",
"access_latency_cycles" : "10",
"low_network_links" : "1",
"cache_line_size" : "64",
"directory_at_next_level" : 1,
"L1" : "0",
"debug" : "0",
"statistics" : "1",
"network_address" : l2cache_net_addr
})
ariel_core_link = sst.Link("cpu_cache_link_" + str(x))
ariel_core_link.connect( (ariel, "cache_link_" + str(x), "50ps"), (l1cache, "high_network_0", "50ps") )
l1tol2_link = sst.Link("l1_l2_link_" + str(x))
l1tol2_link.connect( (l2cache, "high_network_0", "50ps"), (l1cache, "low_network_0", "50ps") )
l2cache_link = sst.Link("l2cache_link_" + str(x))
l2cache_link.connect( (l2cache, "directory", "50ps"), (noc, "port" + str(l2cache_net_addr), "50ps") )
fast_memory = sst.Component("fast_memory", "memHierarchy.MemController")
fast_memory.addParams({
"coherence_protocol" : "MESI",
"access_time" : "60ns",
"mem_size" : memoryperlevel,
"rangeStart" : 0,
"rangeEnd" : memoryperlevel * 1024 * 1024,
"clock" : "2GHz",
"use_dramsim" : "0",
"device_ini" : "DDR3_micron_32M_8B_x4_sg125.ini",
"system_ini" : "system.ini",
"statistics" : 1
})
fast_dc_port = rtrInfo.nextPort()
fast_dc = sst.Component("fast_dc", "memHierarchy.DirectoryController")
fast_dc.addParams({
"coherence_protocol" : "MESI",
"network_bw" : "320GB/s",
"addr_range_start" : "0",
"addr_range_end" : memoryperlevel * 1024 * 1024,
"entry_cache_size" : 128 * 1024,
"clock" : "1GHz",
"statistics" : 1,
"network_address" : fast_dc_port
})
fast_dc_link = sst.Link("fast_dc_link")
fast_dc_link.connect( (fast_memory, "direct_link", "50ps") , (fast_dc, "memory", "50ps") )
fast_net_link = sst.Link("fast_dc_net_link")
fast_net_link.connect( (fast_dc, "network", "50ps") , (noc, "port" + str(fast_dc_port), "50ps") )
slow_memory = sst.Component("slow_memory", "memHierarchy.MemController")
slow_memory.addParams({
"coherence_protocol" : "MESI",
"access_time" : "60ns",
"mem_size" : memoryperlevel,
"rangeStart" : 0,
"rangeEnd" : memoryperlevel * 1024 * 1024,
"clock" : "300MHz",
"use_dramsim" : "0",
"device_ini" : "DDR3_micron_32M_8B_x4_sg125.ini",
"system_ini" : "system.ini",
"statistics" : 1
})
slow_dc_port = rtrInfo.nextPort()
slow_dc = sst.Component("slow_dc", "memHierarchy.DirectoryController")
slow_dc.addParams({
"coherence_protocol" : "MESI",
"network_bw" : "50GB/s",
"addr_range_start" : memoryperlevel * 1024 * 1024,
"addr_range_end" : memoryperlevel * 1024 * 1024 * 2,
"entry_cache_size" : 128 * 1024,
"clock" : "512MHz",
"statistics" : 1,
"network_address" : slow_dc_port
})
slow_dc_link = sst.Link("slow_dc_link")
slow_dc_link.connect( (slow_memory, "direct_link", "50ps") , (slow_dc, "memory", "50ps") )
slow_net_link = sst.Link("slow_dc_net_link")
slow_net_link.connect( (slow_dc, "network", "50ps") , (noc, "port" + str(slow_dc_port), "50ps") )
cpu_cache_link = sst.Link("cpu_cache_link")
cpu_cache_link.connect( (ariel, "cache_link_0", "50ps"), (l1cache, "high_network_0", "50ps") )
noc.addParam("num_ports", rtrInfo.numPorts())
print "Finished Configuration of SST"
| []
| []
| [
"ARIEL_OVERRIDE_POOL",
"SST_ROOT\""
]
| [] | ["ARIEL_OVERRIDE_POOL", "SST_ROOT\""] | python | 2 | 0 | |
chatbot/custom/custom_tracker.py | import json
import logging
import pickle
import typing
from typing import Iterator, Optional, Text, Iterable, Union, Dict, List
import itertools
import traceback
from time import sleep
from rasa.core.brokers.event_channel import EventChannel
from rasa.core.trackers import ActionExecuted, DialogueStateTracker, EventVerbosity
from rasa.core.tracker_store import TrackerStore
from rasa.core.domain import Domain
from rasa.core.events import SessionStarted
from datetime import datetime
from termcolor import colored
import inspect
import os
import Tracker4J
from dotenv import load_dotenv
load_dotenv()
class GridTrackerStore(TrackerStore):
def __init__(
self,
domain,
host=os.getenv("MONGO_URL") or "mongodb://localhost:27017",
db="rasa",
username=None,
password=None,
auth_source="admin",
collection="conversations",
neo4j_host=os.getenv("NEO4J_URL") or "localhost",
neo4j_http_port=int(os.getenv("NEO4J_PORT") or 7687),
neo4j_user=os.getenv("NEO4J_USER") or None,
neo4j_password=os.getenv("NEO4J_PASSWORD") or None,
event_broker=None,
):
from pymongo.database import Database
from pymongo import MongoClient
self.client = MongoClient(
host,
username=username,
password=password,
authSource=auth_source,
connect=False,
)
try:
self.Tracker4J = Tracker4J.Tracker4J(host=neo4j_host,
port=neo4j_http_port,
user=neo4j_user,
password=neo4j_password)
except:
self.Tracker4J = None
self.db = Database(self.client, db)
self.collection = collection
super().__init__(domain, event_broker)
self._ensure_indices()
@property
def conversations(self):
return self.db[self.collection]
def _ensure_indices(self):
self.conversations.create_index("sender_id")
@staticmethod
def _current_tracker_state_without_events(tracker):
state = tracker.current_state(EventVerbosity.ALL)
state.pop("events", None)
return state
def save(self, tracker, timeout=None):
if self.event_broker:
self.stream_events(tracker)
additional_events = self._additional_events(tracker)
sender_id = tracker.sender_id
events = tracker.current_state(EventVerbosity.ALL)["events"]
self.conversations.update_one(
{"sender_id": tracker.sender_id},
{
"$set": self._current_tracker_state_without_events(tracker),
"$push": {
"events": {"$each": [e.as_dict() for e in additional_events]}
},
},
upsert=True,
)
try:
if self.Tracker4J is not None:
self.Tracker4J.CreateNodeFromEvents(events, sender_id)
except:
pass
def _additional_events(self, tracker: DialogueStateTracker) -> Iterator:
"""Return events from the tracker which aren't currently stored.
Args:
tracker: Tracker to inspect.
Returns:
List of serialised events that aren't currently stored.
"""
stored = self.conversations.find_one({"sender_id": tracker.sender_id}) or {}
all_events = self._events_from_serialized_tracker(stored)
number_events_since_last_session = len(
self._events_since_last_session_start(all_events)
)
return itertools.islice(
tracker.events, number_events_since_last_session, len(tracker.events)
)
@staticmethod
def _events_from_serialized_tracker(serialised: Dict) -> List[Dict]:
return serialised.get("events", [])
@staticmethod
def _events_since_last_session_start(events: List[Dict]) -> List[Dict]:
"""Retrieve events since and including the latest `SessionStart` event.
Args:
events: All events for a conversation ID.
Returns:
List of serialised events since and including the latest `SessionStarted`
event. Returns all events if no such event is found.
"""
events_after_session_start = []
for event in reversed(events):
events_after_session_start.append(event)
if event["event"] == SessionStarted.type_name:
break
return list(reversed(events_after_session_start))
def retrieve(self, sender_id: Text) -> Optional[DialogueStateTracker]:
"""
Args:
sender_id: the message owner ID
Returns:
`DialogueStateTracker`
"""
stored = self.conversations.find_one({"sender_id": sender_id})
# look for conversations which have used an `int` sender_id in the past
# and update them.
if not stored and sender_id.isdigit():
from pymongo import ReturnDocument
stored = self.conversations.find_one_and_update(
{"sender_id": int(sender_id)},
{"$set": {"sender_id": str(sender_id)}},
return_document=ReturnDocument.AFTER,
)
if not stored:
return
events = self._events_from_serialized_tracker(stored)
if not self.load_events_from_previous_conversation_sessions:
events = self._events_since_last_session_start(events)
return DialogueStateTracker.from_dict(sender_id, events, self.domain.slots)
def keys(self) -> Iterable[Text]:
"""Returns sender_ids of the Mongo Tracker Store"""
return [c["sender_id"] for c in self.conversations.find()]
| []
| []
| [
"NEO4J_URL",
"NEO4J_PASSWORD",
"NEO4J_PORT",
"NEO4J_USER",
"MONGO_URL"
]
| [] | ["NEO4J_URL", "NEO4J_PASSWORD", "NEO4J_PORT", "NEO4J_USER", "MONGO_URL"] | python | 5 | 0 | |
client/starwhale/utils/venv.py | import os
import platform
import typing as t
from pathlib import Path
import shutil
from loguru import logger
import conda_pack # type: ignore
from rich import print as rprint
from rich.console import Console
from starwhale.utils.error import UnExpectedConfigFieldError
from starwhale.utils import (
get_python_run_env,
get_python_version,
is_conda,
is_venv,
is_darwin,
is_linux,
is_windows,
get_conda_env,
)
from starwhale.utils.error import NoSupportError
from starwhale.utils.fs import ensure_dir, ensure_file
from starwhale.utils.process import check_call
CONDA_ENV_TAR = "env.tar.gz"
DUMP_CONDA_ENV_FNAME = "env-lock.yaml"
DUMP_PIP_REQ_FNAME = "requirements-lock.txt"
DUMP_USER_PIP_REQ_FNAME = "requirements.txt"
SW_ACTIVATE_SCRIPT = "activate.sw"
SUPPORTED_PIP_REQ = [DUMP_USER_PIP_REQ_FNAME, "pip-req.txt", "pip3-req.txt"]
SW_PYPI_INDEX_URL = os.environ.get(
"SW_PYPI_INDEX_URL", "https://pypi.doubanio.com/simple/"
)
SW_PYPI_EXTRA_INDEX_URL = os.environ.get(
"SW_PYPI_EXTRA_INDEX_URL",
"https://pypi.tuna.tsinghua.edu.cn/simple/ http://pypi.mirrors.ustc.edu.cn/simple/ https://pypi.org/simple",
)
SW_PYPI_TRUSTED_HOST = os.environ.get(
"SW_PYPI_TRUSTED_HOST",
"pypi.tuna.tsinghua.edu.cn pypi.mirrors.ustc.edu.cn pypi.doubanio.com pypi.org",
)
def install_req(venvdir: t.Union[str, Path], req: t.Union[str, Path]) -> None:
# TODO: use custom pip source
venvdir = str(venvdir)
req = str(req)
cmd = [
os.path.join(venvdir, "bin", "pip"),
"install",
"--exists-action",
"w",
"--index-url",
SW_PYPI_INDEX_URL,
"--extra-index-url",
SW_PYPI_EXTRA_INDEX_URL,
"--trusted-host",
SW_PYPI_TRUSTED_HOST,
]
cmd += ["-r", req] if os.path.isfile(req) else [req]
check_call(cmd)
def venv_activate(venvdir: t.Union[str, Path]) -> None:
_fpath = Path(venvdir) / "bin" / "activate"
cmd = f"source {_fpath.absolute()}"
check_call(cmd, shell=True, executable="/bin/bash")
def venv_setup(venvdir: t.Union[str, Path]) -> None:
# TODO: define starwhale virtualenv.py
# TODO: use more elegant method to make venv portable
check_call(f"python3 -m venv {venvdir}", shell=True)
def pip_freeze(path: t.Union[str, Path]) -> None:
# TODO: add cmd timeout and error log
check_call(f"pip freeze > {path}", shell=True)
def conda_export(path: t.Union[str, Path], env: str = "") -> None:
# TODO: add cmd timeout
cmd = f"{get_conda_bin()} env export"
env = f"-n {env}" if env else ""
check_call(f"{cmd} {env} > {path}", shell=True)
def conda_restore(
env_fpath: t.Union[str, Path], target_env: t.Union[str, Path]
) -> None:
cmd = f"{get_conda_bin()} env update --file {env_fpath} --prefix {target_env}"
check_call(cmd, shell=True)
def conda_activate(env: t.Union[str, Path]) -> None:
cmd = f"{get_conda_bin()} activate {env}"
check_call(cmd, shell=True)
def conda_activate_render(env: t.Union[str, Path], path: Path) -> None:
content = """
_conda_hook="$(/opt/miniconda3/bin/conda shell.bash hook)"
cat >> /dev/stdout << EOF
$_conda_hook
conda activate /opt/starwhale/swmp/dep/conda/env
EOF
"""
_render_sw_activate(content, path)
def venv_activate_render(
venvdir: t.Union[str, Path], path: Path, relocate: bool = False
) -> None:
bin = f"{venvdir}/bin"
if relocate:
content = f"""
sed -i '1d' {bin}/starwhale {bin}/sw {bin}/swcli {bin}/pip* {bin}/virtualenv
sed -i '1i\#!{bin}/python3' {bin}/starwhale {bin}/sw {bin}/swcli {bin}/pip* {bin}/virtualenv
sed -i 's#^VIRTUAL_ENV=.*$#VIRTUAL_ENV={venvdir}#g' {bin}/activate
rm -rf {bin}/python3
ln -s /usr/bin/python3 {bin}/python3
echo 'source {bin}/activate'
"""
else:
content = f"""
echo 'source {venvdir}/bin/activate'
"""
_render_sw_activate(content, path)
def _render_sw_activate(content: str, path: Path) -> None:
ensure_file(path, content, mode=0o755)
rprint(f" :clap: {path.name} is generated at {path}")
rprint(" :compass: run cmd: ")
rprint(f" \t [bold red] $(sh {path}) [/]")
def get_conda_bin() -> str:
# TODO: add process cache
for _p in (
"/opt/miniconda3/bin/conda",
"/opt/anaconda3/bin/conda",
os.path.expanduser("~/miniconda3/bin/conda"),
os.path.expanduser("~/anaconda3/bin/conda"),
):
if os.path.exists(_p):
return _p
else:
return "conda"
def dump_python_dep_env(
dep_dir: t.Union[str, Path],
pip_req_fpath: str,
skip_gen_env: bool = False,
console: t.Optional[Console] = None,
expected_runtime: str = "",
) -> t.Dict[str, t.Any]:
# TODO: smart dump python dep by starwhale sdk-api, pip ast analysis?
dep_dir = Path(dep_dir)
console = console or Console()
pr_env = get_python_run_env()
sys_name = platform.system()
py_ver = get_python_version()
expected_runtime = expected_runtime.strip().lower()
if expected_runtime and not py_ver.startswith(expected_runtime):
raise UnExpectedConfigFieldError(
f"expected runtime({expected_runtime}) is not equal to detected runtime{py_ver}"
)
_manifest = dict(
env=pr_env,
system=sys_name,
python=py_ver,
local_gen_env=False,
venv=dict(use=not is_conda()),
conda=dict(use=is_conda()),
)
_conda_dir = dep_dir / "conda"
_python_dir = dep_dir / "python"
_venv_dir = _python_dir / "venv"
_pip_lock_req = _python_dir / DUMP_PIP_REQ_FNAME
_conda_lock_env = _conda_dir / DUMP_CONDA_ENV_FNAME
ensure_dir(_venv_dir)
ensure_dir(_conda_dir)
ensure_dir(_python_dir)
logger.info(f"[info:dep]python env({pr_env}), os({sys_name}, python({py_ver}))")
console.print(f":dizzy: python{py_ver}@{pr_env}, try to export environment...")
if os.path.exists(pip_req_fpath):
shutil.copyfile(pip_req_fpath, str(_python_dir / DUMP_USER_PIP_REQ_FNAME))
if is_conda():
logger.info(f"[info:dep]dump conda environment yaml: {_conda_lock_env}")
conda_export(_conda_lock_env)
elif is_venv():
logger.info(f"[info:dep]dump pip-req with freeze: {_pip_lock_req}")
pip_freeze(_pip_lock_req)
else:
# TODO: add other env tools
logger.warning(
"detect use system python, swcli does not pip freeze, only use custom pip-req"
)
if is_windows() or is_darwin() or skip_gen_env:
# TODO: win/osx will produce env in controller agent with task
logger.info(f"[info:dep]{sys_name} will skip conda/venv dump or generate")
elif is_linux():
# TODO: more design local or remote build venv
# TODO: ignore some pkg when dump, like notebook?
_manifest["local_gen_env"] = True # type: ignore
if is_conda():
cenv = get_conda_env()
dest = str(_conda_dir / CONDA_ENV_TAR)
if not cenv:
raise Exception("cannot get conda env value")
# TODO: add env/env-name into model.yaml, user can set custom vars.
logger.info("[info:dep]try to pack conda...")
conda_pack.pack(
name=cenv, force=True, output=dest, ignore_editable_packages=True
)
logger.info(f"[info:dep]finish conda pack {dest})")
console.print(f":beer_mug: conda pack @ [underline]{dest}[/]")
else:
# TODO: tune venv create performance, use clone?
logger.info(f"[info:dep]build venv dir: {_venv_dir}")
venv_setup(_venv_dir)
logger.info(
f"[info:dep]install pip freeze({_pip_lock_req}) to venv: {_venv_dir}"
)
install_req(_venv_dir, _pip_lock_req)
if os.path.exists(pip_req_fpath):
logger.info(
f"[info:dep]install custom pip({pip_req_fpath}) to venv: {_venv_dir}"
)
install_req(_venv_dir, pip_req_fpath)
console.print(f":beer_mug: venv @ [underline]{_venv_dir}[/]")
else:
raise NoSupportError(f"no support {sys_name} system")
return _manifest
def detect_pip_req(workdir: t.Union[str, Path], fname: str = "") -> str:
workdir = Path(workdir)
if fname and (workdir / fname).exists():
return str(workdir / fname)
else:
for p in SUPPORTED_PIP_REQ:
if (workdir / p).exists():
return str(workdir / p)
else:
return ""
| []
| []
| [
"SW_PYPI_TRUSTED_HOST",
"SW_PYPI_EXTRA_INDEX_URL",
"SW_PYPI_INDEX_URL"
]
| [] | ["SW_PYPI_TRUSTED_HOST", "SW_PYPI_EXTRA_INDEX_URL", "SW_PYPI_INDEX_URL"] | python | 3 | 0 | |
src/cmd/go/internal/vcs/vcs.go | // Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package vcs
import (
"encoding/json"
"errors"
"fmt"
exec "internal/execabs"
"internal/lazyregexp"
"internal/singleflight"
"io/fs"
"log"
urlpkg "net/url"
"os"
"path/filepath"
"regexp"
"strings"
"sync"
"cmd/go/internal/base"
"cmd/go/internal/cfg"
"cmd/go/internal/search"
"cmd/go/internal/str"
"cmd/go/internal/web"
"golang.org/x/mod/module"
)
// A vcsCmd describes how to use a version control system
// like Mercurial, Git, or Subversion.
type Cmd struct {
Name string
Cmd string // name of binary to invoke command
CreateCmd []string // commands to download a fresh copy of a repository
DownloadCmd []string // commands to download updates into an existing repository
TagCmd []tagCmd // commands to list tags
TagLookupCmd []tagCmd // commands to lookup tags before running tagSyncCmd
TagSyncCmd []string // commands to sync to specific tag
TagSyncDefault []string // commands to sync to default tag
Scheme []string
PingCmd string
RemoteRepo func(v *Cmd, rootDir string) (remoteRepo string, err error)
ResolveRepo func(v *Cmd, rootDir, remoteRepo string) (realRepo string, err error)
}
var defaultSecureScheme = map[string]bool{
"https": true,
"git+ssh": true,
"bzr+ssh": true,
"svn+ssh": true,
"ssh": true,
}
func (v *Cmd) IsSecure(repo string) bool {
u, err := urlpkg.Parse(repo)
if err != nil {
// If repo is not a URL, it's not secure.
return false
}
return v.isSecureScheme(u.Scheme)
}
func (v *Cmd) isSecureScheme(scheme string) bool {
switch v.Cmd {
case "git":
// GIT_ALLOW_PROTOCOL is an environment variable defined by Git. It is a
// colon-separated list of schemes that are allowed to be used with git
// fetch/clone. Any scheme not mentioned will be considered insecure.
if allow := os.Getenv("GIT_ALLOW_PROTOCOL"); allow != "" {
for _, s := range strings.Split(allow, ":") {
if s == scheme {
return true
}
}
return false
}
}
return defaultSecureScheme[scheme]
}
// A tagCmd describes a command to list available tags
// that can be passed to tagSyncCmd.
type tagCmd struct {
cmd string // command to list tags
pattern string // regexp to extract tags from list
}
// vcsList lists the known version control systems
var vcsList = []*Cmd{
vcsHg,
vcsGit,
vcsSvn,
vcsBzr,
vcsFossil,
}
// vcsMod is a stub for the "mod" scheme. It's returned by
// repoRootForImportPathDynamic, but is otherwise not treated as a VCS command.
var vcsMod = &Cmd{Name: "mod"}
// vcsByCmd returns the version control system for the given
// command name (hg, git, svn, bzr).
func vcsByCmd(cmd string) *Cmd {
for _, vcs := range vcsList {
if vcs.Cmd == cmd {
return vcs
}
}
return nil
}
// vcsHg describes how to use Mercurial.
var vcsHg = &Cmd{
Name: "Mercurial",
Cmd: "hg",
CreateCmd: []string{"clone -U -- {repo} {dir}"},
DownloadCmd: []string{"pull"},
// We allow both tag and branch names as 'tags'
// for selecting a version. This lets people have
// a go.release.r60 branch and a go1 branch
// and make changes in both, without constantly
// editing .hgtags.
TagCmd: []tagCmd{
{"tags", `^(\S+)`},
{"branches", `^(\S+)`},
},
TagSyncCmd: []string{"update -r {tag}"},
TagSyncDefault: []string{"update default"},
Scheme: []string{"https", "http", "ssh"},
PingCmd: "identify -- {scheme}://{repo}",
RemoteRepo: hgRemoteRepo,
}
func hgRemoteRepo(vcsHg *Cmd, rootDir string) (remoteRepo string, err error) {
out, err := vcsHg.runOutput(rootDir, "paths default")
if err != nil {
return "", err
}
return strings.TrimSpace(string(out)), nil
}
// vcsGit describes how to use Git.
var vcsGit = &Cmd{
Name: "Git",
Cmd: "git",
CreateCmd: []string{"clone -- {repo} {dir}", "-go-internal-cd {dir} submodule update --init --recursive"},
DownloadCmd: []string{"pull --ff-only", "submodule update --init --recursive"},
TagCmd: []tagCmd{
// tags/xxx matches a git tag named xxx
// origin/xxx matches a git branch named xxx on the default remote repository
{"show-ref", `(?:tags|origin)/(\S+)$`},
},
TagLookupCmd: []tagCmd{
{"show-ref tags/{tag} origin/{tag}", `((?:tags|origin)/\S+)$`},
},
TagSyncCmd: []string{"checkout {tag}", "submodule update --init --recursive"},
// both createCmd and downloadCmd update the working dir.
// No need to do more here. We used to 'checkout master'
// but that doesn't work if the default branch is not named master.
// DO NOT add 'checkout master' here.
// See golang.org/issue/9032.
TagSyncDefault: []string{"submodule update --init --recursive"},
Scheme: []string{"git", "https", "http", "git+ssh", "ssh"},
// Leave out the '--' separator in the ls-remote command: git 2.7.4 does not
// support such a separator for that command, and this use should be safe
// without it because the {scheme} value comes from the predefined list above.
// See golang.org/issue/33836.
PingCmd: "ls-remote {scheme}://{repo}",
RemoteRepo: gitRemoteRepo,
}
// scpSyntaxRe matches the SCP-like addresses used by Git to access
// repositories by SSH.
var scpSyntaxRe = lazyregexp.New(`^([a-zA-Z0-9_]+)@([a-zA-Z0-9._-]+):(.*)$`)
func gitRemoteRepo(vcsGit *Cmd, rootDir string) (remoteRepo string, err error) {
cmd := "config remote.origin.url"
errParse := errors.New("unable to parse output of git " + cmd)
errRemoteOriginNotFound := errors.New("remote origin not found")
outb, err := vcsGit.run1(rootDir, cmd, nil, false)
if err != nil {
// if it doesn't output any message, it means the config argument is correct,
// but the config value itself doesn't exist
if outb != nil && len(outb) == 0 {
return "", errRemoteOriginNotFound
}
return "", err
}
out := strings.TrimSpace(string(outb))
var repoURL *urlpkg.URL
if m := scpSyntaxRe.FindStringSubmatch(out); m != nil {
// Match SCP-like syntax and convert it to a URL.
// Eg, "[email protected]:user/repo" becomes
// "ssh://[email protected]/user/repo".
repoURL = &urlpkg.URL{
Scheme: "ssh",
User: urlpkg.User(m[1]),
Host: m[2],
Path: m[3],
}
} else {
repoURL, err = urlpkg.Parse(out)
if err != nil {
return "", err
}
}
// Iterate over insecure schemes too, because this function simply
// reports the state of the repo. If we can't see insecure schemes then
// we can't report the actual repo URL.
for _, s := range vcsGit.Scheme {
if repoURL.Scheme == s {
return repoURL.String(), nil
}
}
return "", errParse
}
// vcsBzr describes how to use Bazaar.
var vcsBzr = &Cmd{
Name: "Bazaar",
Cmd: "bzr",
CreateCmd: []string{"branch -- {repo} {dir}"},
// Without --overwrite bzr will not pull tags that changed.
// Replace by --overwrite-tags after http://pad.lv/681792 goes in.
DownloadCmd: []string{"pull --overwrite"},
TagCmd: []tagCmd{{"tags", `^(\S+)`}},
TagSyncCmd: []string{"update -r {tag}"},
TagSyncDefault: []string{"update -r revno:-1"},
Scheme: []string{"https", "http", "bzr", "bzr+ssh"},
PingCmd: "info -- {scheme}://{repo}",
RemoteRepo: bzrRemoteRepo,
ResolveRepo: bzrResolveRepo,
}
func bzrRemoteRepo(vcsBzr *Cmd, rootDir string) (remoteRepo string, err error) {
outb, err := vcsBzr.runOutput(rootDir, "config parent_location")
if err != nil {
return "", err
}
return strings.TrimSpace(string(outb)), nil
}
func bzrResolveRepo(vcsBzr *Cmd, rootDir, remoteRepo string) (realRepo string, err error) {
outb, err := vcsBzr.runOutput(rootDir, "info "+remoteRepo)
if err != nil {
return "", err
}
out := string(outb)
// Expect:
// ...
// (branch root|repository branch): <URL>
// ...
found := false
for _, prefix := range []string{"\n branch root: ", "\n repository branch: "} {
i := strings.Index(out, prefix)
if i >= 0 {
out = out[i+len(prefix):]
found = true
break
}
}
if !found {
return "", fmt.Errorf("unable to parse output of bzr info")
}
i := strings.Index(out, "\n")
if i < 0 {
return "", fmt.Errorf("unable to parse output of bzr info")
}
out = out[:i]
return strings.TrimSpace(out), nil
}
// vcsSvn describes how to use Subversion.
var vcsSvn = &Cmd{
Name: "Subversion",
Cmd: "svn",
CreateCmd: []string{"checkout -- {repo} {dir}"},
DownloadCmd: []string{"update"},
// There is no tag command in subversion.
// The branch information is all in the path names.
Scheme: []string{"https", "http", "svn", "svn+ssh"},
PingCmd: "info -- {scheme}://{repo}",
RemoteRepo: svnRemoteRepo,
}
func svnRemoteRepo(vcsSvn *Cmd, rootDir string) (remoteRepo string, err error) {
outb, err := vcsSvn.runOutput(rootDir, "info")
if err != nil {
return "", err
}
out := string(outb)
// Expect:
//
// ...
// URL: <URL>
// ...
//
// Note that we're not using the Repository Root line,
// because svn allows checking out subtrees.
// The URL will be the URL of the subtree (what we used with 'svn co')
// while the Repository Root may be a much higher parent.
i := strings.Index(out, "\nURL: ")
if i < 0 {
return "", fmt.Errorf("unable to parse output of svn info")
}
out = out[i+len("\nURL: "):]
i = strings.Index(out, "\n")
if i < 0 {
return "", fmt.Errorf("unable to parse output of svn info")
}
out = out[:i]
return strings.TrimSpace(out), nil
}
// fossilRepoName is the name go get associates with a fossil repository. In the
// real world the file can be named anything.
const fossilRepoName = ".fossil"
// vcsFossil describes how to use Fossil (fossil-scm.org)
var vcsFossil = &Cmd{
Name: "Fossil",
Cmd: "fossil",
CreateCmd: []string{"-go-internal-mkdir {dir} clone -- {repo} " + filepath.Join("{dir}", fossilRepoName), "-go-internal-cd {dir} open .fossil"},
DownloadCmd: []string{"up"},
TagCmd: []tagCmd{{"tag ls", `(.*)`}},
TagSyncCmd: []string{"up tag:{tag}"},
TagSyncDefault: []string{"up trunk"},
Scheme: []string{"https", "http"},
RemoteRepo: fossilRemoteRepo,
}
func fossilRemoteRepo(vcsFossil *Cmd, rootDir string) (remoteRepo string, err error) {
out, err := vcsFossil.runOutput(rootDir, "remote-url")
if err != nil {
return "", err
}
return strings.TrimSpace(string(out)), nil
}
func (v *Cmd) String() string {
return v.Name
}
// run runs the command line cmd in the given directory.
// keyval is a list of key, value pairs. run expands
// instances of {key} in cmd into value, but only after
// splitting cmd into individual arguments.
// If an error occurs, run prints the command line and the
// command's combined stdout+stderr to standard error.
// Otherwise run discards the command's output.
func (v *Cmd) run(dir string, cmd string, keyval ...string) error {
_, err := v.run1(dir, cmd, keyval, true)
return err
}
// runVerboseOnly is like run but only generates error output to standard error in verbose mode.
func (v *Cmd) runVerboseOnly(dir string, cmd string, keyval ...string) error {
_, err := v.run1(dir, cmd, keyval, false)
return err
}
// runOutput is like run but returns the output of the command.
func (v *Cmd) runOutput(dir string, cmd string, keyval ...string) ([]byte, error) {
return v.run1(dir, cmd, keyval, true)
}
// run1 is the generalized implementation of run and runOutput.
func (v *Cmd) run1(dir string, cmdline string, keyval []string, verbose bool) ([]byte, error) {
m := make(map[string]string)
for i := 0; i < len(keyval); i += 2 {
m[keyval[i]] = keyval[i+1]
}
args := strings.Fields(cmdline)
for i, arg := range args {
args[i] = expand(m, arg)
}
if len(args) >= 2 && args[0] == "-go-internal-mkdir" {
var err error
if filepath.IsAbs(args[1]) {
err = os.Mkdir(args[1], fs.ModePerm)
} else {
err = os.Mkdir(filepath.Join(dir, args[1]), fs.ModePerm)
}
if err != nil {
return nil, err
}
args = args[2:]
}
if len(args) >= 2 && args[0] == "-go-internal-cd" {
if filepath.IsAbs(args[1]) {
dir = args[1]
} else {
dir = filepath.Join(dir, args[1])
}
args = args[2:]
}
_, err := exec.LookPath(v.Cmd)
if err != nil {
fmt.Fprintf(os.Stderr,
"go: missing %s command. See https://golang.org/s/gogetcmd\n",
v.Name)
return nil, err
}
cmd := exec.Command(v.Cmd, args...)
cmd.Dir = dir
cmd.Env = base.AppendPWD(os.Environ(), cmd.Dir)
if cfg.BuildX {
fmt.Fprintf(os.Stderr, "cd %s\n", dir)
fmt.Fprintf(os.Stderr, "%s %s\n", v.Cmd, strings.Join(args, " "))
}
out, err := cmd.Output()
if err != nil {
if verbose || cfg.BuildV {
fmt.Fprintf(os.Stderr, "# cd %s; %s %s\n", dir, v.Cmd, strings.Join(args, " "))
if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 {
os.Stderr.Write(ee.Stderr)
} else {
fmt.Fprintf(os.Stderr, err.Error())
}
}
}
return out, err
}
// Ping pings to determine scheme to use.
func (v *Cmd) Ping(scheme, repo string) error {
return v.runVerboseOnly(".", v.PingCmd, "scheme", scheme, "repo", repo)
}
// Create creates a new copy of repo in dir.
// The parent of dir must exist; dir must not.
func (v *Cmd) Create(dir, repo string) error {
for _, cmd := range v.CreateCmd {
if err := v.run(".", cmd, "dir", dir, "repo", repo); err != nil {
return err
}
}
return nil
}
// Download downloads any new changes for the repo in dir.
func (v *Cmd) Download(dir string) error {
for _, cmd := range v.DownloadCmd {
if err := v.run(dir, cmd); err != nil {
return err
}
}
return nil
}
// Tags returns the list of available tags for the repo in dir.
func (v *Cmd) Tags(dir string) ([]string, error) {
var tags []string
for _, tc := range v.TagCmd {
out, err := v.runOutput(dir, tc.cmd)
if err != nil {
return nil, err
}
re := regexp.MustCompile(`(?m-s)` + tc.pattern)
for _, m := range re.FindAllStringSubmatch(string(out), -1) {
tags = append(tags, m[1])
}
}
return tags, nil
}
// tagSync syncs the repo in dir to the named tag,
// which either is a tag returned by tags or is v.tagDefault.
func (v *Cmd) TagSync(dir, tag string) error {
if v.TagSyncCmd == nil {
return nil
}
if tag != "" {
for _, tc := range v.TagLookupCmd {
out, err := v.runOutput(dir, tc.cmd, "tag", tag)
if err != nil {
return err
}
re := regexp.MustCompile(`(?m-s)` + tc.pattern)
m := re.FindStringSubmatch(string(out))
if len(m) > 1 {
tag = m[1]
break
}
}
}
if tag == "" && v.TagSyncDefault != nil {
for _, cmd := range v.TagSyncDefault {
if err := v.run(dir, cmd); err != nil {
return err
}
}
return nil
}
for _, cmd := range v.TagSyncCmd {
if err := v.run(dir, cmd, "tag", tag); err != nil {
return err
}
}
return nil
}
// A vcsPath describes how to convert an import path into a
// version control system and repository name.
type vcsPath struct {
pathPrefix string // prefix this description applies to
regexp *lazyregexp.Regexp // compiled pattern for import path
repo string // repository to use (expand with match of re)
vcs string // version control system to use (expand with match of re)
check func(match map[string]string) error // additional checks
schemelessRepo bool // if true, the repo pattern lacks a scheme
}
// FromDir inspects dir and its parents to determine the
// version control system and code repository to use.
// On return, root is the import path
// corresponding to the root of the repository.
func FromDir(dir, srcRoot string) (vcs *Cmd, root string, err error) {
// Clean and double-check that dir is in (a subdirectory of) srcRoot.
dir = filepath.Clean(dir)
srcRoot = filepath.Clean(srcRoot)
if len(dir) <= len(srcRoot) || dir[len(srcRoot)] != filepath.Separator {
return nil, "", fmt.Errorf("directory %q is outside source root %q", dir, srcRoot)
}
var vcsRet *Cmd
var rootRet string
origDir := dir
for len(dir) > len(srcRoot) {
for _, vcs := range vcsList {
if _, err := os.Stat(filepath.Join(dir, "."+vcs.Cmd)); err == nil {
root := filepath.ToSlash(dir[len(srcRoot)+1:])
// Record first VCS we find, but keep looking,
// to detect mistakes like one kind of VCS inside another.
if vcsRet == nil {
vcsRet = vcs
rootRet = root
continue
}
// Allow .git inside .git, which can arise due to submodules.
if vcsRet == vcs && vcs.Cmd == "git" {
continue
}
// Otherwise, we have one VCS inside a different VCS.
return nil, "", fmt.Errorf("directory %q uses %s, but parent %q uses %s",
filepath.Join(srcRoot, rootRet), vcsRet.Cmd, filepath.Join(srcRoot, root), vcs.Cmd)
}
}
// Move to parent.
ndir := filepath.Dir(dir)
if len(ndir) >= len(dir) {
// Shouldn't happen, but just in case, stop.
break
}
dir = ndir
}
if vcsRet != nil {
if err := checkGOVCS(vcsRet, rootRet); err != nil {
return nil, "", err
}
return vcsRet, rootRet, nil
}
return nil, "", fmt.Errorf("directory %q is not using a known version control system", origDir)
}
// A govcsRule is a single GOVCS rule like private:hg|svn.
type govcsRule struct {
pattern string
allowed []string
}
// A govcsConfig is a full GOVCS configuration.
type govcsConfig []govcsRule
func parseGOVCS(s string) (govcsConfig, error) {
s = strings.TrimSpace(s)
if s == "" {
return nil, nil
}
var cfg govcsConfig
have := make(map[string]string)
for _, item := range strings.Split(s, ",") {
item = strings.TrimSpace(item)
if item == "" {
return nil, fmt.Errorf("empty entry in GOVCS")
}
i := strings.Index(item, ":")
if i < 0 {
return nil, fmt.Errorf("malformed entry in GOVCS (missing colon): %q", item)
}
pattern, list := strings.TrimSpace(item[:i]), strings.TrimSpace(item[i+1:])
if pattern == "" {
return nil, fmt.Errorf("empty pattern in GOVCS: %q", item)
}
if list == "" {
return nil, fmt.Errorf("empty VCS list in GOVCS: %q", item)
}
if search.IsRelativePath(pattern) {
return nil, fmt.Errorf("relative pattern not allowed in GOVCS: %q", pattern)
}
if old := have[pattern]; old != "" {
return nil, fmt.Errorf("unreachable pattern in GOVCS: %q after %q", item, old)
}
have[pattern] = item
allowed := strings.Split(list, "|")
for i, a := range allowed {
a = strings.TrimSpace(a)
if a == "" {
return nil, fmt.Errorf("empty VCS name in GOVCS: %q", item)
}
allowed[i] = a
}
cfg = append(cfg, govcsRule{pattern, allowed})
}
return cfg, nil
}
func (c *govcsConfig) allow(path string, private bool, vcs string) bool {
for _, rule := range *c {
match := false
switch rule.pattern {
case "private":
match = private
case "public":
match = !private
default:
// Note: rule.pattern is known to be comma-free,
// so MatchPrefixPatterns is only matching a single pattern for us.
match = module.MatchPrefixPatterns(rule.pattern, path)
}
if !match {
continue
}
for _, allow := range rule.allowed {
if allow == vcs || allow == "all" {
return true
}
}
return false
}
// By default, nothing is allowed.
return false
}
var (
govcs govcsConfig
govcsErr error
govcsOnce sync.Once
)
// defaultGOVCS is the default setting for GOVCS.
// Setting GOVCS adds entries ahead of these but does not remove them.
// (They are appended to the parsed GOVCS setting.)
//
// The rationale behind allowing only Git and Mercurial is that
// these two systems have had the most attention to issues
// of being run as clients of untrusted servers. In contrast,
// Bazaar, Fossil, and Subversion have primarily been used
// in trusted, authenticated environments and are not as well
// scrutinized as attack surfaces.
//
// See golang.org/issue/41730 for details.
var defaultGOVCS = govcsConfig{
{"private", []string{"all"}},
{"public", []string{"git", "hg"}},
}
func checkGOVCS(vcs *Cmd, root string) error {
if vcs == vcsMod {
// Direct module (proxy protocol) fetches don't
// involve an external version control system
// and are always allowed.
return nil
}
govcsOnce.Do(func() {
govcs, govcsErr = parseGOVCS(os.Getenv("GOVCS"))
govcs = append(govcs, defaultGOVCS...)
})
if govcsErr != nil {
return govcsErr
}
private := module.MatchPrefixPatterns(cfg.GOPRIVATE, root)
if !govcs.allow(root, private, vcs.Cmd) {
what := "public"
if private {
what = "private"
}
return fmt.Errorf("GOVCS disallows using %s for %s %s; see 'go help vcs'", vcs.Cmd, what, root)
}
return nil
}
// CheckNested checks for an incorrectly-nested VCS-inside-VCS
// situation for dir, checking parents up until srcRoot.
func CheckNested(vcs *Cmd, dir, srcRoot string) error {
if len(dir) <= len(srcRoot) || dir[len(srcRoot)] != filepath.Separator {
return fmt.Errorf("directory %q is outside source root %q", dir, srcRoot)
}
otherDir := dir
for len(otherDir) > len(srcRoot) {
for _, otherVCS := range vcsList {
if _, err := os.Stat(filepath.Join(otherDir, "."+otherVCS.Cmd)); err == nil {
// Allow expected vcs in original dir.
if otherDir == dir && otherVCS == vcs {
continue
}
// Allow .git inside .git, which can arise due to submodules.
if otherVCS == vcs && vcs.Cmd == "git" {
continue
}
// Otherwise, we have one VCS inside a different VCS.
return fmt.Errorf("directory %q uses %s, but parent %q uses %s", dir, vcs.Cmd, otherDir, otherVCS.Cmd)
}
}
// Move to parent.
newDir := filepath.Dir(otherDir)
if len(newDir) >= len(otherDir) {
// Shouldn't happen, but just in case, stop.
break
}
otherDir = newDir
}
return nil
}
// RepoRoot describes the repository root for a tree of source code.
type RepoRoot struct {
Repo string // repository URL, including scheme
Root string // import path corresponding to root of repo
IsCustom bool // defined by served <meta> tags (as opposed to hard-coded pattern)
VCS *Cmd
}
func httpPrefix(s string) string {
for _, prefix := range [...]string{"http:", "https:"} {
if strings.HasPrefix(s, prefix) {
return prefix
}
}
return ""
}
// ModuleMode specifies whether to prefer modules when looking up code sources.
type ModuleMode int
const (
IgnoreMod ModuleMode = iota
PreferMod
)
// RepoRootForImportPath analyzes importPath to determine the
// version control system, and code repository to use.
func RepoRootForImportPath(importPath string, mod ModuleMode, security web.SecurityMode) (*RepoRoot, error) {
rr, err := repoRootFromVCSPaths(importPath, security, vcsPaths)
if err == errUnknownSite {
rr, err = repoRootForImportDynamic(importPath, mod, security)
if err != nil {
err = importErrorf(importPath, "unrecognized import path %q: %v", importPath, err)
}
}
if err != nil {
rr1, err1 := repoRootFromVCSPaths(importPath, security, vcsPathsAfterDynamic)
if err1 == nil {
rr = rr1
err = nil
}
}
// Should have been taken care of above, but make sure.
if err == nil && strings.Contains(importPath, "...") && strings.Contains(rr.Root, "...") {
// Do not allow wildcards in the repo root.
rr = nil
err = importErrorf(importPath, "cannot expand ... in %q", importPath)
}
return rr, err
}
var errUnknownSite = errors.New("dynamic lookup required to find mapping")
// repoRootFromVCSPaths attempts to map importPath to a repoRoot
// using the mappings defined in vcsPaths.
func repoRootFromVCSPaths(importPath string, security web.SecurityMode, vcsPaths []*vcsPath) (*RepoRoot, error) {
if str.HasPathPrefix(importPath, "example.net") {
// TODO(rsc): This should not be necessary, but it's required to keep
// tests like ../../testdata/script/mod_get_extra.txt from using the network.
// That script has everything it needs in the replacement set, but it is still
// doing network calls.
return nil, fmt.Errorf("no modules on example.net")
}
if importPath == "rsc.io" {
// This special case allows tests like ../../testdata/script/govcs.txt
// to avoid making any network calls. The module lookup for a path
// like rsc.io/nonexist.svn/foo needs to not make a network call for
// a lookup on rsc.io.
return nil, fmt.Errorf("rsc.io is not a module")
}
// A common error is to use https://packagepath because that's what
// hg and git require. Diagnose this helpfully.
if prefix := httpPrefix(importPath); prefix != "" {
// The importPath has been cleaned, so has only one slash. The pattern
// ignores the slashes; the error message puts them back on the RHS at least.
return nil, fmt.Errorf("%q not allowed in import path", prefix+"//")
}
for _, srv := range vcsPaths {
if !str.HasPathPrefix(importPath, srv.pathPrefix) {
continue
}
m := srv.regexp.FindStringSubmatch(importPath)
if m == nil {
if srv.pathPrefix != "" {
return nil, importErrorf(importPath, "invalid %s import path %q", srv.pathPrefix, importPath)
}
continue
}
// Build map of named subexpression matches for expand.
match := map[string]string{
"prefix": srv.pathPrefix + "/",
"import": importPath,
}
for i, name := range srv.regexp.SubexpNames() {
if name != "" && match[name] == "" {
match[name] = m[i]
}
}
if srv.vcs != "" {
match["vcs"] = expand(match, srv.vcs)
}
if srv.repo != "" {
match["repo"] = expand(match, srv.repo)
}
if srv.check != nil {
if err := srv.check(match); err != nil {
return nil, err
}
}
vcs := vcsByCmd(match["vcs"])
if vcs == nil {
return nil, fmt.Errorf("unknown version control system %q", match["vcs"])
}
if err := checkGOVCS(vcs, match["root"]); err != nil {
return nil, err
}
var repoURL string
if !srv.schemelessRepo {
repoURL = match["repo"]
} else {
scheme := vcs.Scheme[0] // default to first scheme
repo := match["repo"]
if vcs.PingCmd != "" {
// If we know how to test schemes, scan to find one.
for _, s := range vcs.Scheme {
if security == web.SecureOnly && !vcs.isSecureScheme(s) {
continue
}
if vcs.Ping(s, repo) == nil {
scheme = s
break
}
}
}
repoURL = scheme + "://" + repo
}
rr := &RepoRoot{
Repo: repoURL,
Root: match["root"],
VCS: vcs,
}
return rr, nil
}
return nil, errUnknownSite
}
// urlForImportPath returns a partially-populated URL for the given Go import path.
//
// The URL leaves the Scheme field blank so that web.Get will try any scheme
// allowed by the selected security mode.
func urlForImportPath(importPath string) (*urlpkg.URL, error) {
slash := strings.Index(importPath, "/")
if slash < 0 {
slash = len(importPath)
}
host, path := importPath[:slash], importPath[slash:]
if !strings.Contains(host, ".") {
return nil, errors.New("import path does not begin with hostname")
}
if len(path) == 0 {
path = "/"
}
return &urlpkg.URL{Host: host, Path: path, RawQuery: "go-get=1"}, nil
}
// repoRootForImportDynamic finds a *RepoRoot for a custom domain that's not
// statically known by repoRootForImportPathStatic.
//
// This handles custom import paths like "name.tld/pkg/foo" or just "name.tld".
func repoRootForImportDynamic(importPath string, mod ModuleMode, security web.SecurityMode) (*RepoRoot, error) {
url, err := urlForImportPath(importPath)
if err != nil {
return nil, err
}
resp, err := web.Get(security, url)
if err != nil {
msg := "https fetch: %v"
if security == web.Insecure {
msg = "http/" + msg
}
return nil, fmt.Errorf(msg, err)
}
body := resp.Body
defer body.Close()
imports, err := parseMetaGoImports(body, mod)
if len(imports) == 0 {
if respErr := resp.Err(); respErr != nil {
// If the server's status was not OK, prefer to report that instead of
// an XML parse error.
return nil, respErr
}
}
if err != nil {
return nil, fmt.Errorf("parsing %s: %v", importPath, err)
}
// Find the matched meta import.
mmi, err := matchGoImport(imports, importPath)
if err != nil {
if _, ok := err.(ImportMismatchError); !ok {
return nil, fmt.Errorf("parse %s: %v", url, err)
}
return nil, fmt.Errorf("parse %s: no go-import meta tags (%s)", resp.URL, err)
}
if cfg.BuildV {
log.Printf("get %q: found meta tag %#v at %s", importPath, mmi, url)
}
// If the import was "uni.edu/bob/project", which said the
// prefix was "uni.edu" and the RepoRoot was "evilroot.com",
// make sure we don't trust Bob and check out evilroot.com to
// "uni.edu" yet (possibly overwriting/preempting another
// non-evil student). Instead, first verify the root and see
// if it matches Bob's claim.
if mmi.Prefix != importPath {
if cfg.BuildV {
log.Printf("get %q: verifying non-authoritative meta tag", importPath)
}
var imports []metaImport
url, imports, err = metaImportsForPrefix(mmi.Prefix, mod, security)
if err != nil {
return nil, err
}
metaImport2, err := matchGoImport(imports, importPath)
if err != nil || mmi != metaImport2 {
return nil, fmt.Errorf("%s and %s disagree about go-import for %s", resp.URL, url, mmi.Prefix)
}
}
if err := validateRepoRoot(mmi.RepoRoot); err != nil {
return nil, fmt.Errorf("%s: invalid repo root %q: %v", resp.URL, mmi.RepoRoot, err)
}
var vcs *Cmd
if mmi.VCS == "mod" {
vcs = vcsMod
} else {
vcs = vcsByCmd(mmi.VCS)
if vcs == nil {
return nil, fmt.Errorf("%s: unknown vcs %q", resp.URL, mmi.VCS)
}
}
if err := checkGOVCS(vcs, mmi.Prefix); err != nil {
return nil, err
}
rr := &RepoRoot{
Repo: mmi.RepoRoot,
Root: mmi.Prefix,
IsCustom: true,
VCS: vcs,
}
return rr, nil
}
// validateRepoRoot returns an error if repoRoot does not seem to be
// a valid URL with scheme.
func validateRepoRoot(repoRoot string) error {
url, err := urlpkg.Parse(repoRoot)
if err != nil {
return err
}
if url.Scheme == "" {
return errors.New("no scheme")
}
if url.Scheme == "file" {
return errors.New("file scheme disallowed")
}
return nil
}
var fetchGroup singleflight.Group
var (
fetchCacheMu sync.Mutex
fetchCache = map[string]fetchResult{} // key is metaImportsForPrefix's importPrefix
)
// metaImportsForPrefix takes a package's root import path as declared in a <meta> tag
// and returns its HTML discovery URL and the parsed metaImport lines
// found on the page.
//
// The importPath is of the form "golang.org/x/tools".
// It is an error if no imports are found.
// url will still be valid if err != nil.
// The returned url will be of the form "https://golang.org/x/tools?go-get=1"
func metaImportsForPrefix(importPrefix string, mod ModuleMode, security web.SecurityMode) (*urlpkg.URL, []metaImport, error) {
setCache := func(res fetchResult) (fetchResult, error) {
fetchCacheMu.Lock()
defer fetchCacheMu.Unlock()
fetchCache[importPrefix] = res
return res, nil
}
resi, _, _ := fetchGroup.Do(importPrefix, func() (resi interface{}, err error) {
fetchCacheMu.Lock()
if res, ok := fetchCache[importPrefix]; ok {
fetchCacheMu.Unlock()
return res, nil
}
fetchCacheMu.Unlock()
url, err := urlForImportPath(importPrefix)
if err != nil {
return setCache(fetchResult{err: err})
}
resp, err := web.Get(security, url)
if err != nil {
return setCache(fetchResult{url: url, err: fmt.Errorf("fetching %s: %v", importPrefix, err)})
}
body := resp.Body
defer body.Close()
imports, err := parseMetaGoImports(body, mod)
if len(imports) == 0 {
if respErr := resp.Err(); respErr != nil {
// If the server's status was not OK, prefer to report that instead of
// an XML parse error.
return setCache(fetchResult{url: url, err: respErr})
}
}
if err != nil {
return setCache(fetchResult{url: url, err: fmt.Errorf("parsing %s: %v", resp.URL, err)})
}
if len(imports) == 0 {
err = fmt.Errorf("fetching %s: no go-import meta tag found in %s", importPrefix, resp.URL)
}
return setCache(fetchResult{url: url, imports: imports, err: err})
})
res := resi.(fetchResult)
return res.url, res.imports, res.err
}
type fetchResult struct {
url *urlpkg.URL
imports []metaImport
err error
}
// metaImport represents the parsed <meta name="go-import"
// content="prefix vcs reporoot" /> tags from HTML files.
type metaImport struct {
Prefix, VCS, RepoRoot string
}
// A ImportMismatchError is returned where metaImport/s are present
// but none match our import path.
type ImportMismatchError struct {
importPath string
mismatches []string // the meta imports that were discarded for not matching our importPath
}
func (m ImportMismatchError) Error() string {
formattedStrings := make([]string, len(m.mismatches))
for i, pre := range m.mismatches {
formattedStrings[i] = fmt.Sprintf("meta tag %s did not match import path %s", pre, m.importPath)
}
return strings.Join(formattedStrings, ", ")
}
// matchGoImport returns the metaImport from imports matching importPath.
// An error is returned if there are multiple matches.
// An ImportMismatchError is returned if none match.
func matchGoImport(imports []metaImport, importPath string) (metaImport, error) {
match := -1
errImportMismatch := ImportMismatchError{importPath: importPath}
for i, im := range imports {
if !str.HasPathPrefix(importPath, im.Prefix) {
errImportMismatch.mismatches = append(errImportMismatch.mismatches, im.Prefix)
continue
}
if match >= 0 {
if imports[match].VCS == "mod" && im.VCS != "mod" {
// All the mod entries precede all the non-mod entries.
// We have a mod entry and don't care about the rest,
// matching or not.
break
}
return metaImport{}, fmt.Errorf("multiple meta tags match import path %q", importPath)
}
match = i
}
if match == -1 {
return metaImport{}, errImportMismatch
}
return imports[match], nil
}
// expand rewrites s to replace {k} with match[k] for each key k in match.
func expand(match map[string]string, s string) string {
// We want to replace each match exactly once, and the result of expansion
// must not depend on the iteration order through the map.
// A strings.Replacer has exactly the properties we're looking for.
oldNew := make([]string, 0, 2*len(match))
for k, v := range match {
oldNew = append(oldNew, "{"+k+"}", v)
}
return strings.NewReplacer(oldNew...).Replace(s)
}
// vcsPaths defines the meaning of import paths referring to
// commonly-used VCS hosting sites (github.com/user/dir)
// and import paths referring to a fully-qualified importPath
// containing a VCS type (foo.com/repo.git/dir)
var vcsPaths = []*vcsPath{
// GitHub
{
pathPrefix: "github.com",
regexp: lazyregexp.New(`^(?P<root>github\.com/[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+)(/[A-Za-z0-9_.\-]+)*$`),
vcs: "git",
repo: "https://{root}",
check: noVCSSuffix,
},
// Bitbucket
{
pathPrefix: "bitbucket.org",
regexp: lazyregexp.New(`^(?P<root>bitbucket\.org/(?P<bitname>[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+))(/[A-Za-z0-9_.\-]+)*$`),
repo: "https://{root}",
check: bitbucketVCS,
},
// IBM DevOps Services (JazzHub)
{
pathPrefix: "hub.jazz.net/git",
regexp: lazyregexp.New(`^(?P<root>hub\.jazz\.net/git/[a-z0-9]+/[A-Za-z0-9_.\-]+)(/[A-Za-z0-9_.\-]+)*$`),
vcs: "git",
repo: "https://{root}",
check: noVCSSuffix,
},
// Git at Apache
{
pathPrefix: "git.apache.org",
regexp: lazyregexp.New(`^(?P<root>git\.apache\.org/[a-z0-9_.\-]+\.git)(/[A-Za-z0-9_.\-]+)*$`),
vcs: "git",
repo: "https://{root}",
},
// Git at OpenStack
{
pathPrefix: "git.openstack.org",
regexp: lazyregexp.New(`^(?P<root>git\.openstack\.org/[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+)(\.git)?(/[A-Za-z0-9_.\-]+)*$`),
vcs: "git",
repo: "https://{root}",
},
// chiselapp.com for fossil
{
pathPrefix: "chiselapp.com",
regexp: lazyregexp.New(`^(?P<root>chiselapp\.com/user/[A-Za-z0-9]+/repository/[A-Za-z0-9_.\-]+)$`),
vcs: "fossil",
repo: "https://{root}",
},
// General syntax for any server.
// Must be last.
{
regexp: lazyregexp.New(`(?P<root>(?P<repo>([a-z0-9.\-]+\.)+[a-z0-9.\-]+(:[0-9]+)?(/~?[A-Za-z0-9_.\-]+)+?)\.(?P<vcs>bzr|fossil|git|hg|svn))(/~?[A-Za-z0-9_.\-]+)*$`),
schemelessRepo: true,
},
}
// vcsPathsAfterDynamic gives additional vcsPaths entries
// to try after the dynamic HTML check.
// This gives those sites a chance to introduce <meta> tags
// as part of a graceful transition away from the hard-coded logic.
var vcsPathsAfterDynamic = []*vcsPath{
// Launchpad. See golang.org/issue/11436.
{
pathPrefix: "launchpad.net",
regexp: lazyregexp.New(`^(?P<root>launchpad\.net/((?P<project>[A-Za-z0-9_.\-]+)(?P<series>/[A-Za-z0-9_.\-]+)?|~[A-Za-z0-9_.\-]+/(\+junk|[A-Za-z0-9_.\-]+)/[A-Za-z0-9_.\-]+))(/[A-Za-z0-9_.\-]+)*$`),
vcs: "bzr",
repo: "https://{root}",
check: launchpadVCS,
},
}
// noVCSSuffix checks that the repository name does not
// end in .foo for any version control system foo.
// The usual culprit is ".git".
func noVCSSuffix(match map[string]string) error {
repo := match["repo"]
for _, vcs := range vcsList {
if strings.HasSuffix(repo, "."+vcs.Cmd) {
return fmt.Errorf("invalid version control suffix in %s path", match["prefix"])
}
}
return nil
}
// bitbucketVCS determines the version control system for a
// Bitbucket repository, by using the Bitbucket API.
func bitbucketVCS(match map[string]string) error {
if err := noVCSSuffix(match); err != nil {
return err
}
var resp struct {
SCM string `json:"scm"`
}
url := &urlpkg.URL{
Scheme: "https",
Host: "api.bitbucket.org",
Path: expand(match, "/2.0/repositories/{bitname}"),
RawQuery: "fields=scm",
}
data, err := web.GetBytes(url)
if err != nil {
if httpErr, ok := err.(*web.HTTPError); ok && httpErr.StatusCode == 403 {
// this may be a private repository. If so, attempt to determine which
// VCS it uses. See issue 5375.
root := match["root"]
for _, vcs := range []string{"git", "hg"} {
if vcsByCmd(vcs).Ping("https", root) == nil {
resp.SCM = vcs
break
}
}
}
if resp.SCM == "" {
return err
}
} else {
if err := json.Unmarshal(data, &resp); err != nil {
return fmt.Errorf("decoding %s: %v", url, err)
}
}
if vcsByCmd(resp.SCM) != nil {
match["vcs"] = resp.SCM
if resp.SCM == "git" {
match["repo"] += ".git"
}
return nil
}
return fmt.Errorf("unable to detect version control system for bitbucket.org/ path")
}
// launchpadVCS solves the ambiguity for "lp.net/project/foo". In this case,
// "foo" could be a series name registered in Launchpad with its own branch,
// and it could also be the name of a directory within the main project
// branch one level up.
func launchpadVCS(match map[string]string) error {
if match["project"] == "" || match["series"] == "" {
return nil
}
url := &urlpkg.URL{
Scheme: "https",
Host: "code.launchpad.net",
Path: expand(match, "/{project}{series}/.bzr/branch-format"),
}
_, err := web.GetBytes(url)
if err != nil {
match["root"] = expand(match, "launchpad.net/{project}")
match["repo"] = expand(match, "https://{root}")
}
return nil
}
// importError is a copy of load.importError, made to avoid a dependency cycle
// on cmd/go/internal/load. It just needs to satisfy load.ImportPathError.
type importError struct {
importPath string
err error
}
func importErrorf(path, format string, args ...interface{}) error {
err := &importError{importPath: path, err: fmt.Errorf(format, args...)}
if errStr := err.Error(); !strings.Contains(errStr, path) {
panic(fmt.Sprintf("path %q not in error %q", path, errStr))
}
return err
}
func (e *importError) Error() string {
return e.err.Error()
}
func (e *importError) Unwrap() error {
// Don't return e.err directly, since we're only wrapping an error if %w
// was passed to ImportErrorf.
return errors.Unwrap(e.err)
}
func (e *importError) ImportPath() string {
return e.importPath
}
| [
"\"GIT_ALLOW_PROTOCOL\"",
"\"GOVCS\""
]
| []
| [
"GIT_ALLOW_PROTOCOL",
"GOVCS"
]
| [] | ["GIT_ALLOW_PROTOCOL", "GOVCS"] | go | 2 | 0 | |
src/npgru/upload.py | import gzip
import logging
import os
import pathlib
import shutil
from typing import Dict
import boto3
import numpy as np
import pandas as pd
import tensorflow.keras as keras
from dotenv import load_dotenv
def upload(project_dir: pathlib.Path, logger: logging.Logger) -> None:
load_dotenv()
model_dir = project_dir.joinpath("models")
data_dir = project_dir.joinpath("data")
classifier_dir = model_dir.joinpath("tensorflow")
logger.info("Load model to extract weights")
model = keras.models.load_model(filepath=str(model_dir.joinpath("tensorflow")))
logger.info("Extract weights from trained model")
weights = extract_model_weights(model)
logger.info("Save each set of weights as csv file")
weight_dir = model_dir.joinpath("weights")
weight_dir.mkdir(parents=True, exist_ok=True)
for weight_name in weights:
weight = weights[weight_name]
pd.DataFrame(weight).to_csv(weight_dir.joinpath(f"{weight_name}.csv"), index=False, header=False)
logger.info("Zip tensorflow model, weight directory and tokenizer file")
shutil.make_archive(classifier_dir, "zip", classifier_dir)
shutil.make_archive(weight_dir, "zip", weight_dir)
tokenizer_name = "tokenizer.model"
source = model_dir.joinpath(tokenizer_name)
target = model_dir.joinpath(tokenizer_name + ".gz")
with open(source, "rb") as source_file, gzip.open(target, "wb", compresslevel=1) as target_file:
shutil.copyfileobj(source_file, target_file)
logger.info("Send zipped files to S3 bucket")
bucket_name = os.environ.get("S3_BUCKET_NAME")
prefix = "gru-forward-numpy"
zipped_tokenizer_name = tokenizer_name + ".gz"
zipped_classifier_name = "tensorflow.zip"
zipped_weights_name = "weights.zip"
zipped_data_name = os.environ.get("ZIPFILE_NAME")
upload_to_s3(bucket_name, prefix, zipped_classifier_name, model_dir.joinpath(zipped_classifier_name))
upload_to_s3(bucket_name, prefix, zipped_tokenizer_name, model_dir.joinpath(zipped_tokenizer_name))
upload_to_s3(bucket_name, prefix, zipped_weights_name, model_dir.joinpath(zipped_weights_name))
upload_to_s3(bucket_name, prefix, zipped_data_name, data_dir.joinpath(zipped_data_name))
def extract_model_weights(model: keras.Sequential) -> Dict[str, np.array]:
embedding = model.weights[0].numpy()
input_kernel = model.weights[1].numpy()
input_bias = model.weights[3].numpy()[0, :]
return {
"embedding_affine": embedding @ input_kernel + np.outer(np.ones(embedding.shape[0]), input_bias),
"hidden_kernel": model.weights[2].numpy(),
"hidden_bias": model.weights[3].numpy()[1, :],
"dense_kernel": model.weights[4].numpy(),
"dense_bias": model.weights[5].numpy()
}
def upload_to_s3(bucket_name: str, prefix: str, file_name: str, local_file_path: pathlib.Path) -> None:
s3_client = boto3.client("s3")
s3_key = f"{prefix}/{file_name}"
s3_client.upload_file(str(local_file_path), bucket_name, s3_key) # upload $1 to bucket $2 as an object with path $3
| []
| []
| [
"S3_BUCKET_NAME",
"ZIPFILE_NAME"
]
| [] | ["S3_BUCKET_NAME", "ZIPFILE_NAME"] | python | 2 | 0 | |
python/paddle/distributed/fleet/meta_optimizers/ps_optimizer.py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
from paddle import fluid
import paddle.distributed.passes
from .meta_optimizer_base import MetaOptimizerBase
from paddle.fluid import core
import subprocess
import re
import os
import platform
from paddle.distributed.ps.utils.public import *
from paddle.distributed.passes import PassContext
from ..base.private_helper_function import wait_server_ready
from paddle.distributed.ps.utils.ps_factory import PsProgramBuilderFactory
class ParameterServerOptimizer(MetaOptimizerBase):
def __init__(self, optimizer):
super(ParameterServerOptimizer, self).__init__(optimizer)
self.inner_opt = optimizer
# we do not allow meta optimizer to be inner optimizer currently
self.meta_optimizers_white_list = []
def _set_basic_info(self, loss, role_maker, user_defined_optimizer,
user_defined_strategy):
super(ParameterServerOptimizer, self)._set_basic_info(
loss, role_maker, user_defined_optimizer, user_defined_strategy)
def _set_origin_programs(self, losses):
self.origin_main_programs = []
for loss in losses:
self.origin_main_programs.append(loss.block.program)
def _init_ps_pass_context(self, loss, startup_program):
self.pass_ctx = PassContext()
attrs = {}
# trainer
attrs["env"] = get_dist_env()
attrs['loss'] = loss
attrs['min_block_size'] = 81920
attrs['origin_main_program'] = loss.block.program
attrs['origin_startup_program'] = startup_program
attrs['origin_main_programs'] = self.origin_main_programs
attrs['cloned_main'] = attrs['origin_main_program'].clone()
attrs['cloned_startup'] = attrs['origin_startup_program'].clone()
attrs['user_defined_strategy'] = self.user_defined_strategy
attrs['valid_strategy'] = self.user_defined_strategy
attrs['trainer'] = TrainerRuntimeConfig(self.user_defined_strategy)
attrs['ps_mode'] = attrs['trainer'].mode
logger.info("ps_mode: {}".format(attrs['ps_mode']))
attrs['role_maker'] = self.role_maker
attrs[
'is_heter_ps_mode'] = self.role_maker._is_heter_parameter_server_mode
attrs['is_worker'] = self.role_maker._is_worker()
attrs['is_server'] = self.role_maker._is_server()
attrs['is_heter_worker'] = self.role_maker._is_heter_worker()
logger.info("this process is heter? {}".format(attrs[
'is_heter_worker']))
attrs['use_ps_gpu'] = self.user_defined_strategy.a_sync_configs[
"use_ps_gpu"]
attrs['lr_decay_steps'] = self.user_defined_strategy.a_sync_configs[
"lr_decay_steps"]
attrs['k_steps'] = self.user_defined_strategy.a_sync_configs["k_steps"]
attrs['launch_barrier'] = self.user_defined_strategy.a_sync_configs[
"launch_barrier"]
attrs['launch_barrier_flag'] = int(
os.getenv("FLAGS_LAUNCH_BARRIER", "1"))
build_var_distributed(attrs)
# server
attrs['_main_server'] = fluid.Program()
attrs['_startup_server'] = fluid.Program()
attrs['tensor_table'] = {}
self.pass_ctx._attrs = attrs
def _is_graph_out(self):
return False
def _can_apply(self):
if self.role_maker._is_collective:
return False
k_steps = self.user_defined_strategy.a_sync_configs["k_steps"]
return True if k_steps >= 0 else False
def minimize_impl(self,
loss,
startup_program=None,
parameter_list=None,
no_grad_set=None):
self.inner_opt.minimize(loss, startup_program, parameter_list,
no_grad_set)
if startup_program == None:
startup_program = paddle.static.default_startup_program()
print("program after inner optimizer minimize:",
str(loss.block.program))
self._set_origin_programs([loss])
self._init_ps_pass_context(loss, startup_program)
ps_builder = PsProgramBuilderFactory()._create_ps_program_builder(
self.pass_ctx)
ps_builder._build_programs()
return None, None
def minimize_losses_impl(self,
losses,
startup_program=None,
parameter_list=None,
no_grad_set=None):
if parameter_list is None:
parameter_list = [None] * len(losses)
for idx, loss in enumerate(losses):
startup_prog = startup_program[idx]
parameters = parameter_list[idx]
self.inner_opt.minimize(loss, startup_prog, parameters, no_grad_set)
self._set_origin_programs(losses)
for idx, loss in enumerate(losses):
print("ps_optimizer idx loss:", idx, loss)
startup_prog = startup_program[idx]
self._init_ps_pass_context(loss, startup_prog)
ps_builder = PsProgramBuilderFactory()._create_ps_program_builder(
self.pass_ctx)
ps_builder._build_programs()
startup_program[idx] = self.pass_ctx._attrs['cloned_startup']
return None, None
def _can_apply_geo(self, program):
def get_sys_free_mem():
plat = platform.system()
if platform.system() == "Darwin":
vm = subprocess.Popen(
['vm_stat'], stdout=subprocess.PIPE).communicate()[0]
# Process vm_stat
vmLines = vm.split('\n')
sep = re.compile(r':[\s]+')
vmStats = {}
for row in range(1, len(vmLines) - 2):
rowText = vmLines[row].strip()
rowElements = sep.split(rowText)
vmStats[(rowElements[0]
)] = int(rowElements[1].strip(r'\.')) * 4096
return vmStats["Pages free"]
elif platform.system() == "Linux":
mems = {}
with open('/proc/meminfo', 'rb') as f:
for line in f:
fields = line.split()
mems[fields[0]] = int(fields[1]) * 1024
free = mems[b'MemFree:']
return free
else:
raise ValueError(
"%s platform is unsupported is parameter server optimizer" %
(platform.system()))
if not isinstance(self.inner_opt, fluid.optimizer.SGDOptimizer):
return False
free = get_sys_free_mem()
processed_var_names = set(["@EMPTY@"])
param_memory_size = 0
for varname in program.global_block().vars:
var = program.global_block().vars[varname]
if not var.persistable or var.desc.type(
) != core.VarDesc.VarType.LOD_TENSOR:
continue
set_var_lod_type(var)
param_memory_size += get_var_mem_size(var)
processed_var_names.add(varname)
upper_mem_use = param_memory_size * 5.0
program_tmp_vars = dict()
eval_batch_size = 1024
for op in program.global_block().ops:
for var_name in op.output_arg_names:
if var_name in processed_var_names:
continue
processed_var_names.add(var_name)
var = program.global_block().vars[var_name]
if var.desc.type() != core.VarDesc.VarType.LOD_TENSOR:
continue
data_count = 1
neg_dim_count = 0
for x in var.shape:
if x < 0:
if neg_dim_count >= 1:
raise ValueError(
"Var %s has more than one negative dim." %
(var_name))
neg_dim_count += 1
data_count *= (-x)
else:
data_count *= x
program_tmp_vars[var_name] = (
data_count, neg_dim_count,
vars_metatools.dtype_to_size[var.dtype])
for varname in program_tmp_vars:
data_count, neg_dim_count, type_size = program_tmp_vars[varname]
if neg_dim_count == 1:
data_count *= eval_batch_size
var_memory = data_count * type_size
upper_mem_use += var_memory
if upper_mem_use < free:
return True
else:
return False
def _enable_strategy(self, dist_strategy, context):
if dist_strategy.a_sync_configs["k_steps"] >= 0:
return
dist_strategy.a_sync = True
is_geo = self._can_apply_geo(context["origin_main_program"])
dist_strategy.a_sync_configs["k_steps"] = 800 if is_geo else 0
def _disable_strategy(self, dist_strategy):
dist_strategy.a_sync = False
dist_strategy.a_sync_configs["k_steps"] = -1
| []
| []
| [
"FLAGS_LAUNCH_BARRIER"
]
| [] | ["FLAGS_LAUNCH_BARRIER"] | python | 1 | 0 | |
lib/test/flags.go | /*
Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package test
import (
"flag"
"os"
)
// Flags holds the command line flags or defaults for settings in the user's environment.
// See ClientFlags for the list of supported fields.
var Flags = InitializeFlags()
// ClientFlags define the flags that are needed to run the e2e tests.
type ClientFlags struct {
DockerConfigJSON string
ReuseNamespace bool
}
// InitializeFlags initializes the client's flags
func InitializeFlags() *ClientFlags {
var f ClientFlags
dockerConfigJSON := os.Getenv("DOCKER_CONFIG_JSON")
flag.StringVar(&f.DockerConfigJSON, "dockerconfigjson", dockerConfigJSON,
"Provide the path to Docker configuration file in json format. Defaults to $DOCKER_CONFIG_JSON")
// Might be useful in restricted environments where namespaces need to be
// created by a user with increased privileges (admin).
flag.BoolVar(&f.ReuseNamespace, "reusenamespace", false, "Whether to re-use namespace for test if it already exists.")
return &f
}
| [
"\"DOCKER_CONFIG_JSON\""
]
| []
| [
"DOCKER_CONFIG_JSON"
]
| [] | ["DOCKER_CONFIG_JSON"] | go | 1 | 0 | |
osdsctl/cli/filesharesnapshot_test.go | // Copyright 2019 The OpenSDS Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cli
import (
"os"
"os/exec"
"testing"
c "github.com/opensds/opensds/client"
)
func init() {
client = c.NewFakeClient(&c.Config{Endpoint: c.TestEp})
}
func TestFileShareSnapshotAction(t *testing.T) {
beCrasher := os.Getenv("BE_CRASHER")
if beCrasher == "1" {
var args []string
fileShareSnapshotAction(fileShareSnapshotCommand, args)
return
}
cmd := exec.Command(os.Args[0], "-test.run=TestFileShareSnapshotAction")
cmd.Env = append(os.Environ(), "BE_CRASHER=1")
err := cmd.Run()
e, ok := err.(*exec.ExitError)
if ok && ("exit status 1" == e.Error()) {
return
}
t.Fatalf("process ran with %s, want exit status 1", e.Error())
}
func TestFileShareSnapshotCreateAction(t *testing.T) {
var args []string
args = append(args, "bd5b12a8-a101-11e7-941e-d77981b584d8")
fileShareSnapshotCreateAction(fileShareSnapshotCreateCommand, args)
}
func TestFileShareSnapshotShowAction(t *testing.T) {
var args []string
args = append(args, "3769855c-a102-11e7-b772-17b880d2f537")
fileShareSnapshotShowAction(fileShareSnapshotShowCommand, args)
}
func TestFileShareSnapshotListAction(t *testing.T) {
var args []string
fileShareSnapshotListAction(fileShareSnapshotListCommand, args)
}
func TestFileShareSnapshotDeleteAction(t *testing.T) {
var args []string
args = append(args, "3769855c-a102-11e7-b772-17b880d2f537")
fileShareSnapshotDeleteAction(fileShareSnapshotDeleteCommand, args)
}
func TestFileShareSnapshotUpdateAction(t *testing.T) {
var args []string
args = append(args, "3769855c-a102-11e7-b772-17b880d2f537")
fileShareSnapshotUpdateAction(fileShareSnapshotDeleteCommand, args)
}
| [
"\"BE_CRASHER\""
]
| []
| [
"BE_CRASHER"
]
| [] | ["BE_CRASHER"] | go | 1 | 0 | |
tests/director/gateway-integration/integration_system_scope_test.go | package gateway_integration
import (
"context"
"fmt"
"os"
"testing"
"github.com/kyma-incubator/compass/tests/director/pkg/ptr"
"github.com/kyma-incubator/compass/tests/director/pkg/idtokenprovider"
"github.com/kyma-incubator/compass/components/director/pkg/graphql"
"github.com/kyma-incubator/compass/tests/director/pkg/gql"
"github.com/stretchr/testify/require"
)
func TestIntegrationSystemScenario(t *testing.T) {
domain := os.Getenv("DOMAIN")
require.NotEmpty(t, domain)
tenant := os.Getenv("DEFAULT_TENANT")
require.NotEmpty(t, tenant)
ctx := context.Background()
t.Log("Get Dex id_token")
dexToken, err := idtokenprovider.GetDexToken()
require.NoError(t, err)
dexGraphQLClient := gql.NewAuthorizedGraphQLClient(dexToken)
t.Log("Register Integration System with Dex id token")
intSys := registerIntegrationSystem(t, ctx, dexGraphQLClient, tenant, "integration-system")
t.Log("Request Client Credentials for Integration System")
intSysOauthCredentialData := requestClientCredentialsForIntegrationSystem(t, ctx, dexGraphQLClient, tenant, intSys.ID)
t.Log("Issue a token with Client Credentials")
token := getAccessToken(t, intSysOauthCredentialData, integrationSystemScopes)
oauthGraphQLClient := gql.NewAuthorizedGraphQLClientWithCustomURL(token, fmt.Sprintf("https://compass-gateway-auth-oauth.%s/director/graphql", domain))
t.Run("Test application scopes", func(t *testing.T) {
t.Log("Register an application")
appInput := graphql.ApplicationRegisterInput{
Name: "app",
ProviderName: ptr.String("compass"),
IntegrationSystemID: &intSys.ID,
}
appByIntSys := registerApplicationFromInputWithinTenant(t, ctx, oauthGraphQLClient, tenant, appInput)
require.NotEmpty(t, appByIntSys.ID)
t.Log("Get application")
app := getApplication(t, ctx, oauthGraphQLClient, tenant, appByIntSys.ID)
require.NotEmpty(t, app.ID)
require.Equal(t, appByIntSys.ID, app.ID)
t.Log("Unregister application")
unregisterApplication(t, ctx, oauthGraphQLClient, tenant, appByIntSys.ID)
})
t.Run("Test application template scopes", func(t *testing.T) {
t.Log("Create an application template")
appTplInput := graphql.ApplicationTemplateInput{
Name: "test",
Description: nil,
ApplicationInput: &graphql.ApplicationRegisterInput{
Name: "test",
ProviderName: ptr.String("test"),
},
Placeholders: nil,
AccessLevel: "GLOBAL",
}
appTpl := createApplicationTemplate(t, ctx, oauthGraphQLClient, tenant, appTplInput)
require.NotEmpty(t, appTpl.ID)
t.Log("Get application template")
gqlAppTpl := getApplicationTemplate(t, ctx, oauthGraphQLClient, tenant, appTpl.ID)
require.NotEmpty(t, gqlAppTpl.ID)
require.Equal(t, appTpl.ID, gqlAppTpl.ID)
t.Log("Delete application template")
deleteApplicationTemplate(t, ctx, oauthGraphQLClient, tenant, appTpl.ID)
})
t.Run("Test runtime scopes", func(t *testing.T) {
t.Log("Register runtime")
runtimeInput := graphql.RuntimeInput{
Name: "test",
}
runtime := registerRuntimeFromInputWithinTenant(t, ctx, oauthGraphQLClient, tenant, &runtimeInput)
require.NotEmpty(t, runtime.ID)
t.Log("Get runtime")
gqlRuntime := getRuntime(t, ctx, oauthGraphQLClient, tenant, runtime.ID)
require.NotEmpty(t, gqlRuntime.ID)
require.Equal(t, runtime.ID, gqlRuntime.ID)
t.Log("Unregister runtime")
unregisterRuntimeWithinTenant(t, ctx, oauthGraphQLClient, tenant, runtime.ID)
})
t.Log("Unregister Integration System")
unregisterIntegrationSystem(t, ctx, dexGraphQLClient, tenant, intSys.ID)
}
| [
"\"DOMAIN\"",
"\"DEFAULT_TENANT\""
]
| []
| [
"DOMAIN",
"DEFAULT_TENANT"
]
| [] | ["DOMAIN", "DEFAULT_TENANT"] | go | 2 | 0 | |
ckan_ttnmapper_updater/__init__.py | import json
import logging
import os
import requests
import sys
from requests_toolbelt.multipart.encoder import MultipartEncoder
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
level=logging.INFO,
stream=sys.stdout)
TTNMAPPER_URL = "https://www.ttnmapper.org/geojson/"
def get_config():
try:
config_file = os.environ['CONFIG_FILE']
with open(config_file, 'r') as fhandle:
config_data = json.load(fhandle)
ckan_url = config_data['ckan_url']
api_key = config_data['api_key']
gateways = config_data['gateways']
except json.decoder.JSONDecodeError:
logging.error("Configuration file does not appear to be "
"valid JSON")
sys.exit(1)
except FileNotFoundError:
logging.error("Configuration file was not found at %s"
% config_file)
sys.exit(1)
except KeyError:
logging.error("Configuration file is missing required key. "
"Check you have url, api_key and gateways")
sys.exit(1)
return ckan_url, api_key, gateways
def process_gateway(ckan_url, api_key, gateway):
logging.info("Fetching gateway's alphashape GeoJSON file")
gw_url = ('%s%s/alphashape.geojson'
% (TTNMAPPER_URL, gateway['ttn_id']))
geojson_resp = requests.get(gw_url)
if geojson_resp.status_code == 200:
logging.info("Fetched GeoJSON file successfully")
geojson_data = geojson_resp.text
else:
raise Exception("Unable to process gateway, couldn't fetch "
"GeoJSON data from TTNMapper service")
logging.info("Uploading acquired data to CKAN service")
multipart_data = MultipartEncoder(
fields={
'upload': ('alphashapre.geojson', geojson_data, 'text/plain'),
'format': 'GeoJSON',
'id': gateway['ckan_id'],
'name': gateway['name']
}
)
headers = {'Content-Type': multipart_data.content_type,
'Authorization': api_key}
ckan_resp = requests.post('%sapi/action/resource_update' % ckan_url,
data=multipart_data,
headers=headers)
if ckan_resp.status_code == 200:
logging.info("Successfully updated gateway within CKAN")
else:
raise Exception("Error occured uploading data to CKAN: %s"
% ckan_resp.text)
def run_updater():
logging.info("Running CKAN TTNMapper Updater")
logging.info("Loading configuration file")
ckan_url, api_key, gateways = get_config()
logging.info("Updating resources at %s" % ckan_url)
for gateway in gateways:
logging.info("Currently processing %s" % gateway['name'])
try:
process_gateway(ckan_url, api_key, gateway)
except Exception as err:
logging.error("Error occured processing gateway: %s" % str(err))
logging.info("Completed syncing data from TTNMapper to CKAN")
if __name__ == "__main__":
run_updater()
| []
| []
| [
"CONFIG_FILE"
]
| [] | ["CONFIG_FILE"] | python | 1 | 0 | |
kubetest/kops.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"bytes"
"context"
"encoding/json"
"errors"
"flag"
"fmt"
"io/ioutil"
"log"
"math/rand"
"net"
"os"
"os/exec"
"os/user"
"path/filepath"
"strconv"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
"golang.org/x/crypto/ssh"
"k8s.io/test-infra/kubetest/e2e"
"k8s.io/test-infra/kubetest/util"
)
// kopsAWSMasterSize is the default ec2 instance type for kops on aws
const kopsAWSMasterSize = "c5.large"
const externalIPMetadataURL = "http://metadata.google.internal/computeMetadata/v1/instance/network-interfaces/0/access-configs/0/external-ip"
var externalIPServiceURLs = []string{
"https://ip.jsb.workers.dev",
"https://v4.ifconfig.co",
}
var (
// kops specific flags.
kopsPath = flag.String("kops", "", "(kops only) Path to the kops binary. kops will be downloaded from kops-base-url if not set.")
kopsCluster = flag.String("kops-cluster", "", "(kops only) Deprecated. Cluster name for kops; if not set defaults to --cluster.")
kopsState = flag.String("kops-state", "", "(kops only) s3:// path to kops state store. Must be set.")
kopsSSHUser = flag.String("kops-ssh-user", os.Getenv("USER"), "(kops only) Username for SSH connections to nodes.")
kopsSSHKey = flag.String("kops-ssh-key", "", "(kops only) Path to ssh key-pair for each node (defaults '~/.ssh/kube_aws_rsa' if unset.)")
kopsSSHPublicKey = flag.String("kops-ssh-public-key", "", "(kops only) Path to ssh public key for each node (defaults to --kops-ssh-key value with .pub suffix if unset.)")
kopsKubeVersion = flag.String("kops-kubernetes-version", "", "(kops only) If set, the version of Kubernetes to deploy (can be a URL to a GCS path where the release is stored) (Defaults to kops default, latest stable release.).")
kopsZones = flag.String("kops-zones", "", "(kops only) zones for kops deployment, comma delimited.")
kopsNodes = flag.Int("kops-nodes", 2, "(kops only) Number of nodes to create.")
kopsUpTimeout = flag.Duration("kops-up-timeout", 20*time.Minute, "(kops only) Time limit between 'kops config / kops update' and a response from the Kubernetes API.")
kopsAdminAccess = flag.String("kops-admin-access", "", "(kops only) If set, restrict apiserver access to this CIDR range.")
kopsImage = flag.String("kops-image", "", "(kops only) Image (AMI) for nodes to use. (Defaults to kops default, a Debian image with a custom kubernetes kernel.)")
kopsArgs = flag.String("kops-args", "", "(kops only) Additional space-separated args to pass unvalidated to 'kops create cluster', e.g. '--kops-args=\"--dns private --node-size t2.micro\"'")
kopsPriorityPath = flag.String("kops-priority-path", "", "(kops only) Insert into PATH if set")
kopsBaseURL = flag.String("kops-base-url", "", "(kops only) Base URL for a prebuilt version of kops")
kopsVersion = flag.String("kops-version", "", "(kops only) URL to a file containing a valid kops-base-url")
kopsDiskSize = flag.Int("kops-disk-size", 48, "(kops only) Disk size to use for nodes and masters")
kopsPublish = flag.String("kops-publish", "", "(kops only) Publish kops version to the specified gs:// path on success")
kopsMasterSize = flag.String("kops-master-size", kopsAWSMasterSize, "(kops only) master instance type")
kopsMasterCount = flag.Int("kops-master-count", 1, "(kops only) Number of masters to run")
kopsDNSProvider = flag.String("kops-dns-provider", "", "(kops only) DNS Provider. CoreDNS or KubeDNS")
kopsEtcdVersion = flag.String("kops-etcd-version", "", "(kops only) Etcd Version")
kopsNetworkMode = flag.String("kops-network-mode", "", "(kops only) Networking mode to use. kubenet (default), classic, external, kopeio-vxlan (or kopeio), weave, flannel-vxlan (or flannel), flannel-udp, calico, canal, kube-router, romana, amazon-vpc-routed-eni, cilium.")
kopsOverrides = flag.String("kops-overrides", "", "(kops only) List of Kops cluster configuration overrides, comma delimited.")
kopsFeatureFlags = flag.String("kops-feature-flags", "", "(kops only) List of Kops feature flags to enable, comma delimited.")
kopsMultipleZones = flag.Bool("kops-multiple-zones", false, "(kops only) run tests in multiple zones")
awsRegions = []string{
"ap-south-1",
"eu-west-2",
"eu-west-1",
"ap-northeast-2",
"ap-northeast-1",
"sa-east-1",
"ca-central-1",
// not supporting Singapore since they do not seem to have capacity for c4.large
//"ap-southeast-1",
"ap-southeast-2",
"eu-central-1",
"us-east-1",
"us-east-2",
"us-west-1",
"us-west-2",
// not supporting Paris yet as AWS does not have all instance types available
//"eu-west-3",
}
)
type kops struct {
path string
kubeVersion string
zones []string
nodes int
adminAccess string
cluster string
image string
args string
kubecfg string
diskSize int
// sshUser is the username to use when SSHing to nodes (for example for log capture)
sshUser string
// sshPublicKey is the path to the SSH public key matching sshPrivateKey
sshPublicKey string
// sshPrivateKey is the path to the SSH private key matching sshPublicKey
sshPrivateKey string
// GCP project we should use
gcpProject string
// Cloud provider in use (gce, aws)
provider string
// kopsVersion is the version of kops we are running (used for publishing)
kopsVersion string
// kopsPublish is the path where we will publish kopsVersion, after a successful test
kopsPublish string
// masterCount denotes how many masters to start
masterCount int
// dnsProvider is the DNS Provider the cluster will use (CoreDNS or KubeDNS)
dnsProvider string
// etcdVersion is the etcd version to run
etcdVersion string
// masterSize is the EC2 instance type for the master
masterSize string
// networkMode is the networking mode to use for the cluster (e.g kubenet)
networkMode string
// overrides is a list of cluster configuration overrides, comma delimited
overrides string
// featureFlags is a list of feature flags to enable, comma delimited
featureFlags string
// multipleZones denotes using more than one zone
multipleZones bool
}
var _ deployer = kops{}
func migrateKopsEnv() error {
return util.MigrateOptions([]util.MigratedOption{
{
Env: "KOPS_STATE_STORE",
Option: kopsState,
Name: "--kops-state",
SkipPush: true,
},
{
Env: "AWS_SSH_KEY",
Option: kopsSSHKey,
Name: "--kops-ssh-key",
SkipPush: true,
},
{
Env: "PRIORITY_PATH",
Option: kopsPriorityPath,
Name: "--kops-priority-path",
SkipPush: true,
},
})
}
func newKops(provider, gcpProject, cluster string) (*kops, error) {
tmpdir, err := ioutil.TempDir("", "kops")
if err != nil {
return nil, err
}
if err := migrateKopsEnv(); err != nil {
return nil, err
}
if *kopsCluster != "" {
cluster = *kopsCluster
}
if cluster == "" {
return nil, fmt.Errorf("--cluster or --kops-cluster must be set to a valid cluster name for kops deployment")
}
if *kopsState == "" {
return nil, fmt.Errorf("--kops-state must be set to a valid S3 path for kops deployment")
}
if *kopsPriorityPath != "" {
if err := util.InsertPath(*kopsPriorityPath); err != nil {
return nil, err
}
}
// TODO(fejta): consider explicitly passing these env items where needed.
sshKey := *kopsSSHKey
if sshKey == "" {
usr, err := user.Current()
if err != nil {
return nil, err
}
sshKey = filepath.Join(usr.HomeDir, ".ssh/kube_aws_rsa")
}
if err := os.Setenv("KOPS_STATE_STORE", *kopsState); err != nil {
return nil, err
}
sshPublicKey := *kopsSSHPublicKey
if sshPublicKey == "" {
sshPublicKey = sshKey + ".pub"
}
sshUser := *kopsSSHUser
if sshUser != "" {
if err := os.Setenv("KUBE_SSH_USER", sshUser); err != nil {
return nil, err
}
}
// Repoint KUBECONFIG to an isolated kubeconfig in our temp directory
kubecfg := filepath.Join(tmpdir, "kubeconfig")
f, err := os.Create(kubecfg)
if err != nil {
return nil, err
}
defer f.Close()
if err := f.Chmod(0600); err != nil {
return nil, err
}
if err := os.Setenv("KUBECONFIG", kubecfg); err != nil {
return nil, err
}
// Set KUBERNETES_CONFORMANCE_TEST so the auth info is picked up
// from kubectl instead of bash inference.
if err := os.Setenv("KUBERNETES_CONFORMANCE_TEST", "yes"); err != nil {
return nil, err
}
// Set KUBERNETES_CONFORMANCE_PROVIDER to override the
// cloudprovider for KUBERNETES_CONFORMANCE_TEST.
// This value is set by the provider flag that is passed into kubetest.
// HACK: until we merge #7408, there's a bug in the ginkgo-e2e.sh script we have to work around
// TODO(justinsb): remove this hack once #7408 merges
// if err := os.Setenv("KUBERNETES_CONFORMANCE_PROVIDER", provider); err != nil {
if err := os.Setenv("KUBERNETES_CONFORMANCE_PROVIDER", "aws"); err != nil {
return nil, err
}
// AWS_SSH_KEY is required by the AWS e2e tests.
if err := os.Setenv("AWS_SSH_KEY", sshKey); err != nil {
return nil, err
}
// zones are required by the kops e2e tests.
var zones []string
// if zones is set to zero and gcp project is not set then pick random aws zone
if *kopsZones == "" && provider == "aws" {
zones, err = getRandomAWSZones(*kopsMasterCount, *kopsMultipleZones)
if err != nil {
return nil, err
}
} else {
zones = strings.Split(*kopsZones, ",")
}
// set ZONES for e2e.go
if err := os.Setenv("ZONE", zones[0]); err != nil {
return nil, err
}
if len(zones) == 0 {
return nil, errors.New("no zones found")
} else if zones[0] == "" {
return nil, errors.New("zone cannot be a empty string")
}
log.Printf("executing kops with zones: %q", zones)
// Set kops-base-url from kops-version
if *kopsVersion != "" {
if *kopsBaseURL != "" {
return nil, fmt.Errorf("cannot set --kops-version and --kops-base-url")
}
var b bytes.Buffer
if err := httpRead(*kopsVersion, &b); err != nil {
return nil, err
}
latest := strings.TrimSpace(b.String())
log.Printf("Got latest kops version from %v: %v", *kopsVersion, latest)
if latest == "" {
return nil, fmt.Errorf("version URL %v was empty", *kopsVersion)
}
*kopsBaseURL = latest
}
// kops looks at KOPS_BASE_URL env var, so export it here
if *kopsBaseURL != "" {
if err := os.Setenv("KOPS_BASE_URL", *kopsBaseURL); err != nil {
return nil, err
}
}
// Download kops from kopsBaseURL if kopsPath is not set
if *kopsPath == "" {
if *kopsBaseURL == "" {
return nil, errors.New("--kops or --kops-base-url must be set")
}
kopsBinURL := *kopsBaseURL + "/linux/amd64/kops"
log.Printf("Download kops binary from %s", kopsBinURL)
kopsBin := filepath.Join(tmpdir, "kops")
f, err := os.Create(kopsBin)
if err != nil {
return nil, fmt.Errorf("error creating file %q: %v", kopsBin, err)
}
defer f.Close()
if err := httpRead(kopsBinURL, f); err != nil {
return nil, err
}
if err := util.EnsureExecutable(kopsBin); err != nil {
return nil, err
}
*kopsPath = kopsBin
}
return &kops{
path: *kopsPath,
kubeVersion: *kopsKubeVersion,
sshPrivateKey: sshKey,
sshPublicKey: sshPublicKey,
sshUser: sshUser,
zones: zones,
nodes: *kopsNodes,
adminAccess: *kopsAdminAccess,
cluster: cluster,
image: *kopsImage,
args: *kopsArgs,
kubecfg: kubecfg,
provider: provider,
gcpProject: gcpProject,
diskSize: *kopsDiskSize,
kopsVersion: *kopsBaseURL,
kopsPublish: *kopsPublish,
masterCount: *kopsMasterCount,
dnsProvider: *kopsDNSProvider,
etcdVersion: *kopsEtcdVersion,
masterSize: *kopsMasterSize,
networkMode: *kopsNetworkMode,
overrides: *kopsOverrides,
featureFlags: *kopsFeatureFlags,
}, nil
}
func (k kops) isGoogleCloud() bool {
return k.provider == "gce"
}
func (k kops) Up() error {
// If we downloaded kubernetes, pass that version to kops
if k.kubeVersion == "" {
// TODO(justinsb): figure out a refactor that allows us to get this from acquireKubernetes cleanly
kubeReleaseURL := os.Getenv("KUBERNETES_RELEASE_URL")
kubeRelease := os.Getenv("KUBERNETES_RELEASE")
if kubeReleaseURL != "" && kubeRelease != "" {
if !strings.HasSuffix(kubeReleaseURL, "/") {
kubeReleaseURL += "/"
}
k.kubeVersion = kubeReleaseURL + kubeRelease
}
}
createArgs := []string{
"create", "cluster",
"--name", k.cluster,
"--ssh-public-key", k.sshPublicKey,
"--node-count", strconv.Itoa(k.nodes),
"--node-volume-size", strconv.Itoa(k.diskSize),
"--master-volume-size", strconv.Itoa(k.diskSize),
"--master-count", strconv.Itoa(k.masterCount),
"--zones", strings.Join(k.zones, ","),
}
var featureFlags []string
if k.featureFlags != "" {
featureFlags = append(featureFlags, k.featureFlags)
}
var overrides []string
if k.overrides != "" {
overrides = append(overrides, k.overrides)
}
// We are defaulting the master size to c5.large on AWS because it's cheapest non-throttled instance type.
// When we are using GCE, then we need to handle the flag differently.
// If we are not using gce then add the masters size flag, or if we are using gce, and the
// master size is not set to the aws default, then add the master size flag.
if !k.isGoogleCloud() || (k.isGoogleCloud() && k.masterSize != kopsAWSMasterSize) {
createArgs = append(createArgs, "--master-size", k.masterSize)
}
if k.kubeVersion != "" {
createArgs = append(createArgs, "--kubernetes-version", k.kubeVersion)
}
if k.adminAccess == "" {
externalIPRange, err := getExternalIPRange()
if err != nil {
return fmt.Errorf("external IP cannot be retrieved: %v", err)
}
log.Printf("Using external IP for admin access: %v", externalIPRange)
k.adminAccess = externalIPRange
}
createArgs = append(createArgs, "--admin-access", k.adminAccess)
// Since https://github.com/kubernetes/kubernetes/pull/80655 conformance now require node ports to be open to all nodes
overrides = append(overrides, "cluster.spec.nodePortAccess=0.0.0.0/0")
if k.image != "" {
createArgs = append(createArgs, "--image", k.image)
}
if k.gcpProject != "" {
createArgs = append(createArgs, "--project", k.gcpProject)
}
if k.isGoogleCloud() {
featureFlags = append(featureFlags, "AlphaAllowGCE")
createArgs = append(createArgs, "--cloud", "gce")
} else {
// append cloud type to allow for use of new regions without updates
createArgs = append(createArgs, "--cloud", "aws")
}
if k.networkMode != "" {
createArgs = append(createArgs, "--networking", k.networkMode)
}
if k.args != "" {
createArgs = append(createArgs, strings.Split(k.args, " ")...)
}
if k.dnsProvider != "" {
overrides = append(overrides, "spec.kubeDNS.provider="+k.dnsProvider)
}
if k.etcdVersion != "" {
overrides = append(overrides, "cluster.spec.etcdClusters[*].version="+k.etcdVersion)
}
if len(overrides) != 0 {
featureFlags = append(featureFlags, "SpecOverrideFlag")
createArgs = append(createArgs, "--override", strings.Join(overrides, ","))
}
if len(featureFlags) != 0 {
os.Setenv("KOPS_FEATURE_FLAGS", strings.Join(featureFlags, ","))
}
createArgs = append(createArgs, "--yes")
if err := control.FinishRunning(exec.Command(k.path, createArgs...)); err != nil {
return fmt.Errorf("kops create cluster failed: %v", err)
}
// TODO: Once this gets support for N checks in a row, it can replace the above node readiness check
if err := control.FinishRunning(exec.Command(k.path, "validate", "cluster", k.cluster, "--wait", "15m")); err != nil {
return fmt.Errorf("kops validate cluster failed: %v", err)
}
// We require repeated successes, so we know that the cluster is stable
// (e.g. in HA scenarios, or where we're using multiple DNS servers)
// We use a relatively high number as DNS can take a while to
// propagate across multiple servers / caches
requiredConsecutiveSuccesses := 10
// Wait for nodes to become ready
if err := waitForReadyNodes(k.nodes+1, *kopsUpTimeout, requiredConsecutiveSuccesses); err != nil {
return fmt.Errorf("kops nodes not ready: %v", err)
}
return nil
}
// getExternalIPRange returns the external IP range where the test job
// is running, e.g. 8.8.8.8/32, useful for restricting access to the
// apiserver and any other exposed endpoints.
func getExternalIPRange() (string, error) {
var b bytes.Buffer
err := httpReadWithHeaders(externalIPMetadataURL, map[string]string{"Metadata-Flavor": "Google"}, &b)
if err != nil {
// This often fails due to workload identity
log.Printf("failed to get external ip from metadata service: %v", err)
} else if ip := net.ParseIP(strings.TrimSpace(b.String())); ip != nil {
return ip.String() + "/32", nil
} else {
log.Printf("metadata service returned invalid ip %q", b.String())
}
for attempt := 0; attempt < 5; attempt++ {
for _, u := range externalIPServiceURLs {
b.Reset()
err = httpRead(u, &b)
if err != nil {
// The external service may well be down
log.Printf("failed to get external ip from %s: %v", u, err)
} else if ip := net.ParseIP(strings.TrimSpace(b.String())); ip != nil {
return ip.String() + "/32", nil
} else {
log.Printf("service %s returned invalid ip %q", u, b.String())
}
}
time.Sleep(2 * time.Second)
}
return "", fmt.Errorf("external IP cannot be retrieved")
}
func (k kops) IsUp() error {
return isUp(k)
}
func (k kops) DumpClusterLogs(localPath, gcsPath string) error {
privateKeyPath := k.sshPrivateKey
if strings.HasPrefix(privateKeyPath, "~/") {
privateKeyPath = filepath.Join(os.Getenv("HOME"), privateKeyPath[2:])
}
key, err := ioutil.ReadFile(privateKeyPath)
if err != nil {
return fmt.Errorf("error reading private key %q: %v", k.sshPrivateKey, err)
}
signer, err := ssh.ParsePrivateKey(key)
if err != nil {
return fmt.Errorf("error parsing private key %q: %v", k.sshPrivateKey, err)
}
sshConfig := &ssh.ClientConfig{
User: k.sshUser,
Auth: []ssh.AuthMethod{
ssh.PublicKeys(signer),
},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}
sshClientFactory := &sshClientFactoryImplementation{
sshConfig: sshConfig,
}
logDumper, err := newLogDumper(sshClientFactory, localPath)
if err != nil {
return err
}
// Capture sysctl settings
logDumper.DumpSysctls = true
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
finished := make(chan error)
go func() {
finished <- k.dumpAllNodes(ctx, logDumper)
}()
logDumper.dumpPods(ctx, "kube-system", []string{"k8s-app=kops-controller"})
for {
select {
case <-interrupt.C:
cancel()
case err := <-finished:
return err
}
}
}
// dumpAllNodes connects to every node and dumps the logs
func (k *kops) dumpAllNodes(ctx context.Context, d *logDumper) error {
// Make sure kubeconfig is set, in particular before calling DumpAllNodes, which calls kubectlGetNodes
if err := k.TestSetup(); err != nil {
return fmt.Errorf("error setting up kubeconfig: %v", err)
}
var additionalIPs []string
dump, err := k.runKopsDump()
if err != nil {
log.Printf("unable to get cluster status from kops: %v", err)
} else {
for _, instance := range dump.Instances {
name := instance.Name
if len(instance.PublicAddresses) == 0 {
log.Printf("ignoring instance in kops status with no public address: %v", name)
continue
}
additionalIPs = append(additionalIPs, instance.PublicAddresses[0])
}
}
if err := d.DumpAllNodes(ctx, additionalIPs); err != nil {
return err
}
return nil
}
func (k kops) TestSetup() error {
info, err := os.Stat(k.kubecfg)
if err != nil {
if os.IsNotExist(err) {
log.Printf("kubeconfig file %s not found", k.kubecfg)
} else {
return err
}
} else if info.Size() > 0 {
// Assume that if we already have it, it's good.
return nil
}
if err := control.FinishRunning(exec.Command(k.path, "export", "kubecfg", k.cluster)); err != nil {
return fmt.Errorf("failure from 'kops export kubecfg %s': %v", k.cluster, err)
}
// Double-check that the file was exported
info, err = os.Stat(k.kubecfg)
if err != nil {
return fmt.Errorf("kubeconfig file %s was not exported", k.kubecfg)
}
if info.Size() == 0 {
return fmt.Errorf("exported kubeconfig file %s was empty", k.kubecfg)
}
return nil
}
// BuildTester returns a standard ginkgo-script tester, except for GCE where we build an e2e.Tester
func (k kops) BuildTester(o *e2e.BuildTesterOptions) (e2e.Tester, error) {
kubecfg, err := parseKubeconfig(k.kubecfg)
if err != nil {
return nil, fmt.Errorf("error parsing kubeconfig %q: %v", k.kubecfg, err)
}
log.Printf("running ginkgo tests directly")
t := e2e.NewGinkgoTester(o)
t.KubeRoot = "."
t.Kubeconfig = k.kubecfg
t.Provider = k.provider
t.ClusterID = k.cluster
if len(kubecfg.Clusters) > 0 {
t.KubeMasterURL = kubecfg.Clusters[0].Cluster.Server
}
if k.provider == "gce" {
t.GCEProject = k.gcpProject
if len(k.zones) > 0 {
zone := k.zones[0]
t.GCEZone = zone
// us-central1-a => us-central1
lastDash := strings.LastIndex(zone, "-")
if lastDash == -1 {
return nil, fmt.Errorf("unexpected format for GCE zone: %q", zone)
}
t.GCERegion = zone[0:lastDash]
}
} else if k.provider == "aws" {
if len(k.zones) > 0 {
zone := k.zones[0]
// These GCE fields are actually provider-agnostic
t.GCEZone = zone
if zone == "" {
return nil, errors.New("zone cannot be a empty string")
}
// us-east-1a => us-east-1
t.GCERegion = zone[0 : len(zone)-1]
}
}
return t, nil
}
func (k kops) Down() error {
// We do a "kops get" first so the exit status of "kops delete" is
// more sensical in the case of a non-existent cluster. ("kops
// delete" will exit with status 1 on a non-existent cluster)
err := control.FinishRunning(exec.Command(k.path, "get", "clusters", k.cluster))
if err != nil {
// This is expected if the cluster doesn't exist.
return nil
}
return control.FinishRunning(exec.Command(k.path, "delete", "cluster", k.cluster, "--yes"))
}
func (k kops) GetClusterCreated(gcpProject string) (time.Time, error) {
return time.Time{}, errors.New("not implemented")
}
// kopsDump is the format of data as dumped by `kops toolbox dump -ojson`
type kopsDump struct {
Instances []*kopsDumpInstance `json:"instances"`
}
// String implements fmt.Stringer
func (o *kopsDump) String() string {
return util.JSONForDebug(o)
}
// kopsDumpInstance is the format of an instance (machine) in a kops dump
type kopsDumpInstance struct {
Name string `json:"name"`
PublicAddresses []string `json:"publicAddresses"`
}
// String implements fmt.Stringer
func (o *kopsDumpInstance) String() string {
return util.JSONForDebug(o)
}
// runKopsDump runs a kops toolbox dump to dump the status of the cluster
func (k *kops) runKopsDump() (*kopsDump, error) {
o, err := control.Output(exec.Command(k.path, "toolbox", "dump", "--name", k.cluster, "-ojson"))
if err != nil {
log.Printf("error running kops toolbox dump: %s\n%s", wrapError(err).Error(), string(o))
return nil, err
}
dump := &kopsDump{}
if err := json.Unmarshal(o, dump); err != nil {
return nil, fmt.Errorf("error parsing kops toolbox dump output: %v", err)
}
return dump, nil
}
// kops deployer implements publisher
var _ publisher = &kops{}
// kops deployer implements e2e.TestBuilder
var _ e2e.TestBuilder = &kops{}
// Publish will publish a success file, it is called if the tests were successful
func (k kops) Publish() error {
if k.kopsPublish == "" {
// No publish destination set
return nil
}
if k.kopsVersion == "" {
return errors.New("kops-version not set; cannot publish")
}
return control.XMLWrap(&suite, "Publish kops version", func() error {
log.Printf("Set %s version to %s", k.kopsPublish, k.kopsVersion)
return gcsWrite(k.kopsPublish, []byte(k.kopsVersion))
})
}
func (k kops) KubectlCommand() (*exec.Cmd, error) { return nil, nil }
// getRandomAWSZones looks up all regions, and the availability zones for those regions. A random
// region is then chosen and the AZ's for that region is returned. At least masterCount zones will be
// returned, all in the same region.
func getRandomAWSZones(masterCount int, multipleZones bool) ([]string, error) {
// TODO(chrislovecnm): get the number of ec2 instances in the region and ensure that there are not too many running
for _, i := range rand.Perm(len(awsRegions)) {
ec2Session, err := getAWSEC2Session(awsRegions[i])
if err != nil {
return nil, err
}
// az for a region. AWS Go API does not allow us to make a single call
zoneResults, err := ec2Session.DescribeAvailabilityZones(&ec2.DescribeAvailabilityZonesInput{})
if err != nil {
return nil, fmt.Errorf("unable to call aws api DescribeAvailabilityZones for %q: %v", awsRegions[i], err)
}
var selectedZones []string
if len(zoneResults.AvailabilityZones) >= masterCount && multipleZones {
for _, z := range zoneResults.AvailabilityZones {
selectedZones = append(selectedZones, *z.ZoneName)
}
log.Printf("Launching cluster in region: %q", awsRegions[i])
return selectedZones, nil
} else if !multipleZones {
z := zoneResults.AvailabilityZones[rand.Intn(len(zoneResults.AvailabilityZones))]
selectedZones = append(selectedZones, *z.ZoneName)
log.Printf("Launching cluster in region: %q", awsRegions[i])
return selectedZones, nil
}
}
return nil, fmt.Errorf("unable to find region with %d zones", masterCount)
}
// getAWSEC2Session creates an returns a EC2 API session.
func getAWSEC2Session(region string) (*ec2.EC2, error) {
config := aws.NewConfig().WithRegion(region)
// This avoids a confusing error message when we fail to get credentials
config = config.WithCredentialsChainVerboseErrors(true)
s, err := session.NewSession(config)
if err != nil {
return nil, fmt.Errorf("unable to build aws API session with region: %q: %v", region, err)
}
return ec2.New(s, config), nil
}
// kubeconfig is a simplified version of the kubernetes Config type
type kubeconfig struct {
Clusters []struct {
Cluster struct {
Server string `json:"server"`
} `json:"cluster"`
} `json:"clusters"`
}
// parseKubeconfig uses kubectl to extract the current kubeconfig configuration
func parseKubeconfig(kubeconfigPath string) (*kubeconfig, error) {
cmd := "kubectl"
o, err := control.Output(exec.Command(cmd, "config", "view", "--minify", "-ojson", "--kubeconfig", kubeconfigPath))
if err != nil {
log.Printf("kubectl config view failed: %s\n%s", wrapError(err).Error(), string(o))
return nil, err
}
cfg := &kubeconfig{}
if err := json.Unmarshal(o, cfg); err != nil {
return nil, fmt.Errorf("error parsing kubectl config view output: %v", err)
}
return cfg, nil
}
| [
"\"USER\"",
"\"KUBERNETES_RELEASE_URL\"",
"\"KUBERNETES_RELEASE\"",
"\"HOME\""
]
| []
| [
"USER",
"HOME",
"KUBERNETES_RELEASE",
"KUBERNETES_RELEASE_URL"
]
| [] | ["USER", "HOME", "KUBERNETES_RELEASE", "KUBERNETES_RELEASE_URL"] | go | 4 | 0 | |
main.go | package main
import (
"bytes"
"crypto/tls"
"encoding/json"
"fmt"
"io"
"log"
"net"
"net/http"
"os"
"runtime"
"runtime/debug"
"strings"
"sync/atomic"
"github.com/millisecond/olb/admin"
"github.com/millisecond/olb/cert"
"github.com/millisecond/olb/config"
"github.com/millisecond/olb/exit"
"github.com/millisecond/olb/logger"
"github.com/millisecond/olb/metrics"
"github.com/millisecond/olb/proxy"
"github.com/millisecond/olb/proxy/tcp"
"github.com/millisecond/olb/route"
"github.com/pkg/profile"
dmp "github.com/sergi/go-diff/diffmatchpatch"
"github.com/millisecond/olb/model"
"github.com/millisecond/olb/route/picker"
)
// version contains the version number
//
// It is set by build/release.sh for tagged releases
// so that 'go get' just works.
//
// It is also set by the linker when olb
// is built via the Makefile or the build/docker.sh
// script to ensure the correct version nubmer
var version = "0.1.0"
var shuttingDown int32
func main() {
cfg, err := config.Load(os.Args, os.Environ())
if err != nil {
exit.Fatalf("[FATAL] %s. %s", version, err)
}
if cfg == nil {
fmt.Println(version)
return
}
log.Printf("[INFO] Runtime config\n" + toJSON(cfg))
log.Printf("[INFO] Version %s starting", version)
log.Printf("[INFO] Go runtime is %s", runtime.Version())
// setup profiling if enabled
var prof interface {
Stop()
}
if cfg.ProfileMode != "" {
var mode func(*profile.Profile)
switch cfg.ProfileMode {
case "":
// do nothing
case "cpu":
mode = profile.CPUProfile
case "mem":
mode = profile.MemProfile
case "mutex":
mode = profile.MutexProfile
case "block":
mode = profile.BlockProfile
default:
log.Fatalf("[FATAL] Invalid profile mode %q", cfg.ProfileMode)
}
prof = profile.Start(mode, profile.ProfilePath(cfg.ProfilePath), profile.NoShutdownHook)
log.Printf("[INFO] Profile mode %q", cfg.ProfileMode)
log.Printf("[INFO] Profile path %q", cfg.ProfilePath)
}
exit.Listen(func(s os.Signal) {
atomic.StoreInt32(&shuttingDown, 1)
proxy.Shutdown(cfg.Proxy.ShutdownWait)
if prof != nil {
prof.Stop()
}
})
// init metrics early since that create the global metric registries
// that are used by other parts of the code.
initMetrics(cfg)
initRuntime(cfg)
initConfigService(cfg)
startAdmin(cfg)
first := make(chan bool)
go watchBackend(cfg, first)
log.Print("[INFO] Waiting for first routing table")
<-first
// create proxies after metrics since they use the metrics registry.
startServers(cfg)
exit.Wait()
log.Print("[INFO] Down")
}
func newHTTPProxy(cfg *config.Config) http.Handler {
var w io.Writer
switch cfg.Log.AccessTarget {
case "":
log.Printf("[INFO] Access logging disabled")
case "stdout":
log.Printf("[INFO] Writing access log to stdout")
w = os.Stdout
default:
exit.Fatal("[FATAL] Invalid access log target ", cfg.Log.AccessTarget)
}
format := cfg.Log.AccessFormat
switch format {
case "common":
format = logger.CommonFormat
case "combined":
format = logger.CombinedFormat
}
l, err := logger.New(w, format)
if err != nil {
exit.Fatal("[FATAL] Invalid log format: ", err)
}
pick := picker.HTTPPickers[cfg.Proxy.Strategy]
match := route.Matcher[cfg.Proxy.Matcher]
notFound := metrics.DefaultRegistry.GetCounter("notfound")
log.Printf("[INFO] Using routing strategy %q", cfg.Proxy.Strategy)
log.Printf("[INFO] Using route matching %q", cfg.Proxy.Matcher)
newTransport := func(tlscfg *tls.Config) *http.Transport {
return &http.Transport{
ResponseHeaderTimeout: cfg.Proxy.ResponseHeaderTimeout,
MaxIdleConnsPerHost: cfg.Proxy.MaxConn,
Dial: (&net.Dialer{
Timeout: cfg.Proxy.DialTimeout,
KeepAlive: cfg.Proxy.KeepAliveTimeout,
}).Dial,
TLSClientConfig: tlscfg,
}
}
return &proxy.HTTPProxy{
Config: cfg.Proxy,
Transport: newTransport(nil),
InsecureTransport: newTransport(&tls.Config{InsecureSkipVerify: true}),
Lookup: func(w http.ResponseWriter, req *http.Request) *model.Target {
target := route.GetTable().LookupHTTP(w, req, req.Header.Get("trace"), pick, match)
if target == nil {
notFound.Inc(1)
log.Print("[WARN] No route for ", req.Host, req.URL)
}
return target
},
Requests: metrics.DefaultRegistry.GetTimer("requests"),
Noroute: metrics.DefaultRegistry.GetCounter("notfound"),
Logger: l,
}
}
func lookupHostFn(cfg *config.Config) func(string) string {
pick := picker.Pickers[cfg.Proxy.Strategy]
notFound := metrics.DefaultRegistry.GetCounter("notfound")
return func(host string) string {
t := route.GetTable().LookupHost(host, pick)
if t == nil {
notFound.Inc(1)
log.Print("[WARN] No route for ", host)
return ""
}
return t.URL.Host
}
}
func makeTLSConfig(l config.Listen) (*tls.Config, error) {
if l.CertSource.Name == "" {
return nil, nil
}
src, err := cert.NewSource(l.CertSource)
if err != nil {
return nil, fmt.Errorf("Failed to create cert source %s. %s", l.CertSource.Name, err)
}
tlscfg, err := cert.TLSConfig(src, l.StrictMatch, l.TLSMinVersion, l.TLSMaxVersion, l.TLSCiphers)
if err != nil {
return nil, fmt.Errorf("[FATAL] Failed to create TLS config for cert source %s. %s", l.CertSource.Name, err)
}
return tlscfg, nil
}
func startAdmin(cfg *config.Config) {
log.Printf("[INFO] Admin server access mode %q", cfg.UI.Access)
log.Printf("[INFO] Admin server listening on %q", cfg.UI.Listen.Addr)
go func() {
l := cfg.UI.Listen
tlscfg, err := makeTLSConfig(l)
if err != nil {
exit.Fatal("[FATAL] ", err)
}
srv := &admin.Server{
Access: cfg.UI.Access,
Color: cfg.UI.Color,
Title: cfg.UI.Title,
Version: version,
Commands: route.Commands,
Cfg: cfg,
}
if err := srv.ListenAndServe(l, tlscfg); err != nil {
exit.Fatal("[FATAL] ui: ", err)
}
}()
}
func startServers(cfg *config.Config) {
for _, l := range cfg.Listen {
l := l // capture loop var for go routines below
tlscfg, err := makeTLSConfig(l)
if err != nil {
exit.Fatal("[FATAL] ", err)
}
log.Printf("[INFO] %s proxy listening on %s", strings.ToUpper(l.Proto), l.Addr)
if tlscfg != nil && tlscfg.ClientAuth == tls.RequireAndVerifyClientCert {
log.Printf("[INFO] Client certificate authentication enabled on %s", l.Addr)
}
switch l.Proto {
case "http", "https":
go func() {
h := newHTTPProxy(cfg)
if err := proxy.ListenAndServeHTTP(l, h, tlscfg); err != nil {
exit.Fatal("[FATAL] ", err)
}
}()
case "tcp":
go func() {
h := &tcp.Proxy{cfg.Proxy.DialTimeout, lookupHostFn(cfg)}
if err := proxy.ListenAndServeTCP(l, h, tlscfg); err != nil {
exit.Fatal("[FATAL] ", err)
}
}()
case "tcp+sni":
go func() {
h := &tcp.SNIProxy{cfg.Proxy.DialTimeout, lookupHostFn(cfg)}
if err := proxy.ListenAndServeTCP(l, h, tlscfg); err != nil {
exit.Fatal("[FATAL] ", err)
}
}()
default:
exit.Fatal("[FATAL] Invalid protocol ", l.Proto)
}
}
}
func initMetrics(cfg *config.Config) {
if cfg.Metrics.Target == "" {
log.Printf("[INFO] Metrics disabled")
return
}
var err error
if metrics.DefaultRegistry, err = metrics.NewRegistry(cfg.Metrics); err != nil {
exit.Fatal("[FATAL] ", err)
}
if metrics.ServiceRegistry, err = metrics.NewRegistry(cfg.Metrics); err != nil {
exit.Fatal("[FATAL] ", err)
}
}
func initRuntime(cfg *config.Config) {
if os.Getenv("GOGC") == "" {
log.Print("[INFO] Setting GOGC=", cfg.Runtime.GOGC)
debug.SetGCPercent(cfg.Runtime.GOGC)
} else {
log.Print("[INFO] Using GOGC=", os.Getenv("GOGC"), " from env")
}
if os.Getenv("GOMAXPROCS") == "" {
log.Print("[INFO] Setting GOMAXPROCS=", cfg.Runtime.GOMAXPROCS)
runtime.GOMAXPROCS(cfg.Runtime.GOMAXPROCS)
} else {
log.Print("[INFO] Using GOMAXPROCS=", os.Getenv("GOMAXPROCS"), " from env")
}
}
func initConfigService(cfg *config.Config) {
//var deadline = time.Now().Add(cfg.Registry.Timeout)
//
//var err error
//for {
// switch cfg.Registry.Backend {
// case "file":
// registry.Default, err = file.NewBackend(cfg.Registry.File.Path)
// default:
// exit.Fatal("[FATAL] Unknown registry backend ", cfg.Registry.Backend)
// }
//
// if err == nil {
// if err = registry.Default.Register(); err == nil {
// return
// }
// }
// log.Print("[WARN] Error initializing backend. ", err)
//
// if time.Now().After(deadline) {
// exit.Fatal("[FATAL] Timeout registering backend.")
// }
//
// time.Sleep(cfg.Registry.Retry)
// if atomic.LoadInt32(&shuttingDown) > 0 {
// exit.Exit(1)
// }
//}
}
func watchBackend(cfg *config.Config, first chan bool) {
//var (
// last string
// svccfg string
// mancfg string
//
// once sync.Once
//)
//
//svc := registry.Default.WatchServices()
//man := registry.Default.WatchManual()
//
//for {
// select {
// case svccfg = <-svc:
// case mancfg = <-man:
// }
//
// // manual config overrides service config
// // order matters
// next := svccfg + "\n" + mancfg
// if next == last {
// continue
// }
//
// t, err := route.NewTable(next)
// if err != nil {
// log.Printf("[WARN] %s", err)
// continue
// }
// route.SetTable(t)
// logRoutes(last, next, cfg.Log.RoutesFormat)
// last = next
//
// once.Do(func() { close(first) })
//}
}
func logRoutes(last, next, format string) {
fmtDiff := func(diffs []dmp.Diff) string {
var b bytes.Buffer
for _, d := range diffs {
t := strings.TrimSpace(d.Text)
if t == "" {
continue
}
switch d.Type {
case dmp.DiffDelete:
b.WriteString("- ")
b.WriteString(strings.Replace(t, "\n", "\n- ", -1))
case dmp.DiffInsert:
b.WriteString("+ ")
b.WriteString(strings.Replace(t, "\n", "\n+ ", -1))
}
}
return b.String()
}
const defFormat = "delta"
switch format {
case "delta":
if delta := fmtDiff(dmp.New().DiffMain(last, next, true)); delta != "" {
log.Printf("[INFO] Config updates\n%s", delta)
}
case "all":
log.Printf("[INFO] Updated config to\n%s", next)
default:
log.Printf("[WARN] Invalid route format %q. Defaulting to %q", format, defFormat)
logRoutes(last, next, defFormat)
}
}
func toJSON(v interface{}) string {
data, err := json.MarshalIndent(v, "", " ")
if err != nil {
panic("json: " + err.Error())
}
return string(data)
}
| [
"\"GOGC\"",
"\"GOGC\"",
"\"GOMAXPROCS\"",
"\"GOMAXPROCS\""
]
| []
| [
"GOGC",
"GOMAXPROCS"
]
| [] | ["GOGC", "GOMAXPROCS"] | go | 2 | 0 | |
firebase-db/firebase_db_stream.go | package firebase_db
import (
"bufio"
"encoding/json"
"errors"
"fmt"
"net/http"
)
type EventType int
const (
PUT EventType = iota
PATCH
KEEP_ALIVE
CANCEL
AUTH_REVOKED
)
var EVENT_TYPE_LOOKUP map[EventType]string = map[EventType]string{
PUT: "put",
PATCH: "patch",
KEEP_ALIVE: "keep-alive",
CANCEL: "cancel",
AUTH_REVOKED: "auth_revoked",
}
// EVENT_TYPE_REVERSE_LOOKUP is dynamically generated.
var EVENT_TYPE_REVERSE_LOOKUP map[string]EventType = map[string]EventType{}
// See https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format.
type FirebaseDBStreamEvent struct {
Event EventType
Data []byte
}
type FirebaseDBEventData struct {
Path string `json:"path"`
Data []byte `json:"data"`
}
func (self *FirebaseDBStreamEvent) GetEventData() (FirebaseDBEventData, error) {
f := FirebaseDBEventData{}
if self.Event == AUTH_REVOKED {
return f, errors.New(fmt.Sprintf("Authorization revoked: %s", string(self.Data)))
}
err := json.Unmarshal(self.Data, &f)
return f, err
}
func stream(
c *http.Client,
path string,
queryParameters map[string]string) (*bufio.Reader, int, error) {
path += paramToURL(queryParameters)
req, err := http.NewRequest(http.MethodGet, path, nil)
if err != nil {
return nil, 0, err
}
req.Header.Set("Accept", "text/event-stream")
resp, err := c.Do(req)
r := bufio.NewReader(resp.Body)
return r, resp.StatusCode, err
}
func doRead(r *bufio.Reader) {
}
func init() {
for eventType, eventTypeString := range EVENT_TYPE_LOOKUP {
EVENT_TYPE_REVERSE_LOOKUP[eventTypeString] = eventType
}
}
| []
| []
| []
| [] | [] | go | null | null | null |
test/kube2e/gateway/gateway_suite_test.go | package gateway_test
import (
"context"
"fmt"
"os"
"path/filepath"
"testing"
"time"
"github.com/solo-io/gloo/test/helpers"
"github.com/solo-io/gloo/test/kube2e"
"github.com/solo-io/go-utils/log"
"github.com/solo-io/k8s-utils/testutils/helper"
skhelpers "github.com/solo-io/solo-kit/test/helpers"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestGateway(t *testing.T) {
if os.Getenv("KUBE2E_TESTS") != "gateway" {
log.Warnf("This test is disabled. " +
"To enable, set KUBE2E_TESTS to 'gateway' in your env.")
return
}
helpers.RegisterGlooDebugLogPrintHandlerAndClearLogs()
skhelpers.RegisterCommonFailHandlers()
skhelpers.SetupLog()
RunSpecs(t, "Gateway Suite")
}
var testHelper *helper.SoloTestHelper
var ctx, cancel = context.WithCancel(context.Background())
var _ = BeforeSuite(StartTestHelper)
var _ = AfterSuite(TearDownTestHelper)
func StartTestHelper() {
cwd, err := os.Getwd()
Expect(err).NotTo(HaveOccurred())
randomNumber := time.Now().Unix() % 10000
testHelper, err = helper.NewSoloTestHelper(func(defaults helper.TestConfig) helper.TestConfig {
defaults.RootDir = filepath.Join(cwd, "../../..")
defaults.HelmChartName = "gloo"
defaults.InstallNamespace = "gateway-test-" + fmt.Sprintf("%d-%d", randomNumber, GinkgoParallelNode())
defaults.Verbose = true
return defaults
})
Expect(err).NotTo(HaveOccurred())
// Register additional fail handlers
skhelpers.RegisterPreFailHandler(helpers.KubeDumpOnFail(GinkgoWriter, "knative-serving", testHelper.InstallNamespace))
valueOverrideFile, cleanupFunc := kube2e.GetHelmValuesOverrideFile()
defer cleanupFunc()
err = testHelper.InstallGloo(ctx, helper.GATEWAY, 5*time.Minute, helper.ExtraArgs("--values", valueOverrideFile))
Expect(err).NotTo(HaveOccurred())
// Check that everything is OK
kube2e.GlooctlCheckEventuallyHealthy(1, testHelper, "90s")
// TODO(marco): explicitly enable strict validation, this can be removed once we enable validation by default
// See https://github.com/solo-io/gloo/issues/1374
kube2e.UpdateAlwaysAcceptSetting(ctx, false, testHelper.InstallNamespace)
// Ensure gloo reaches valid state and doesn't continually resync
// we can consider doing the same for leaking go-routines after resyncs
kube2e.EventuallyReachesConsistentState(testHelper.InstallNamespace)
}
func TearDownTestHelper() {
if os.Getenv("TEAR_DOWN") == "true" {
Expect(testHelper).ToNot(BeNil())
err := testHelper.UninstallGloo()
Expect(err).NotTo(HaveOccurred())
_, err = kube2e.MustKubeClient().CoreV1().Namespaces().Get(ctx, testHelper.InstallNamespace, metav1.GetOptions{})
Expect(apierrors.IsNotFound(err)).To(BeTrue())
cancel()
}
}
| [
"\"KUBE2E_TESTS\"",
"\"TEAR_DOWN\""
]
| []
| [
"KUBE2E_TESTS",
"TEAR_DOWN"
]
| [] | ["KUBE2E_TESTS", "TEAR_DOWN"] | go | 2 | 0 | |
GUI_Master.py | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 25 10:19:00 2021
@author: holmanf
"""
import os,sys
from typing_extensions import IntVar
## Defines current working directory even in executable.
def resource_path(relative_path):
""" Get absolute path to resource, works for development and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
os.environ["GDAL_DATA"] = resource_path('gdal')
os.environ["PROJ_LIB"] = resource_path('proj')
os.environ["USE_PATH_FOR_GDAL_PYTHON"] = 'YES'
os.environ['PATH'] = os.pathsep.join(os.environ['PATH'].split(os.pathsep)[:-1])
print(os.environ["PROJ_LIB"])
abspath = os.path.abspath(__file__)
dname = (os.path.dirname(abspath))
os.chdir(dname)
print(dname)
import math
import time
import shutil
import exifread
import threading
import subprocess
import numpy as np
import glob, re
import pandas as pd
import tkinter as tk
import tifffile as tiff
import fiona
import geopandas
import traceback
import xlsxwriter
import threading
import ctypes
import rasterio
import gc
import openpyxl
from random import randint
from rasterio.mask import mask
from datetime import datetime
from osgeo import ogr, gdal
from shapely.geometry import shape, mapping
from ttkwidgets import tooltips
from tkinter import ttk
from tkinter import filedialog
from tkinter import messagebox
from tkinter import font
from tkinter import PhotoImage
from tkinter.ttk import Style
from ttkwidgets.autocomplete import AutocompleteCombobox
from queue import Queue
from SonyImage_master import SonyImage_Master
from PlotShapfile_Extractor import shapefile_gen
from Data_Extractor import hyperspec_master
from mergingSony import orthoMerge
from itertools import count
from PIL import ImageTk, Image
from natsort import natsorted
##Set DPI awareness to improve display##
ctypes.windll.shcore.SetProcessDpiAwareness(1)
## Define style variables for Tkinter##
Large_Font=("Verdana",12,'bold')
Norm_Font=("Verdana",10)
Small_font=("Verdana",8)
## Define text files for saving locations of shapefiles##
fieldfile=open(resource_path('Requirements\\fieldshapefile.txt'),'r+')
fieldshape=fieldfile.read()
file=open(resource_path('Requirements\\plotshapefiles.txt'),'r+')
shapefolder=file.read()
groundfile=open(resource_path('Requirements\\groundDEM.txt'),'r+')
grnddem=groundfile.read()
##Define Graphics file locations##
info_button = resource_path('Graphics\\button_info.png')
home_button = resource_path('Graphics\\button_home.png')
exit_button = resource_path('Graphics\\button_exit.png')
back_button = resource_path('Graphics\\button_back.png')
sony_button = resource_path('Graphics\\button_camera.png')
geojson_button = resource_path('Graphics\\button_shapefile.png')
spectrum_button = resource_path('Graphics\\button_extractor.png')
layers_button = resource_path('Graphics\\button_layers.png')
class ImageLabel(tk.Label):
"""a label that displays images, and plays them if they are gifs"""
def load(self, im):
if isinstance(im, str):
im = Image.open(im)
self.loc = 0
self.frames = []
try:
for i in count(1):
self.frames.append(ImageTk.PhotoImage(im.copy()))
im.seek(i)
except EOFError:
pass
try:
self.delay = im.info['duration']
except:
self.delay = 100
if len(self.frames) == 1:
self.config(image=self.frames[0])
else:
self.next_frame()
def unload(self):
self.config(image="")
self.frames = None
def next_frame(self):
if self.frames:
self.loc += 1
self.loc %= len(self.frames)
self.config(image=self.frames[self.loc])
self.after(self.delay, self.next_frame)
## Software Class: Defines the Tkinter variables for the GUI software.
class software(tk.Tk):
def __init__(self,*args,**kwargs):
tk.Tk.__init__(self,*args,**kwargs)
# self.tk.call('tk', 'scaling', 3.0)
tk.Tk.wm_title(self,"UAV Data Tool")
container=tk.Frame(self)
container.pack(side='top',fill='both',expand=True)
container.grid_rowconfigure(0,weight=1)
container.grid_columnconfigure(0,weight=1)
self.frames={}
for F in (HomePage,ImageCalibrator,batchcalibrator,Shapefilegenerator,HyperSpecExtractor,OrthoMerging): #DataMerger,DataExtractor
frame=F(container,self)
self.frames[F]=frame
frame.grid(row=0,column=0,sticky='nsew')
self.show_frame(HomePage)
def show_frame(self,cont):
frame=self.frames[cont]
frame.tkraise()
def enditall(self):
global file
file.close()
self.destroy()
## Homepage Class: Defines the variables for the appearance and function of the software homepage.
class HomePage(ttk.Frame):
def popup_window(self):
win_x = self.winfo_rootx()+300
win_y = self.winfo_rooty()+100
window = tk.Toplevel()
window.geometry(f'+{win_x}+{win_y}')
window.title('UAV Data Tool - Help')
label = tk.Label(window, text='''
Image Calibration Tool: if you want to calibrate raw Sony imagery into reflectances.
Ortho Merger: if you want to merge multiple spatial rasters together into one file.
Shapefile Generator: if you want to process experiment shapefiles ready for data extraction.
Data Extractor: if you want to extract data values from spatial raster datasets.
''',justify='left')
label.pack(fill='x', padx=50, pady=50)
button_close = ttk.Button(window, text="Close", command=window.destroy)
button_close.pack(fill='x')
def __init__(self,parent,controller):
tk.Frame.__init__(self,parent)
self.grid_rowconfigure(1, weight=1)
self.grid_columnconfigure(0, weight=1)
self.topframe = tk.Frame(self)
self.midframe = tk.Frame(self)
self.btmframe = tk.Frame(self)
self.topframe.grid(row=0)
self.midframe.grid(row=1)
self.btmframe.grid(row=2)
label2 = tk.Label(self.topframe,text='UAV Data Tool: Version 1.1')
label2.pack(fill='x')
info_btn = PhotoImage(file=info_button,master=self).subsample(5,5)
ext_btn = PhotoImage(file=exit_button,master=self).subsample(5,5)
clb_btn = PhotoImage(file=sony_button,master=self).subsample(2,2)
shp_btn = PhotoImage(file=geojson_button,master=self).subsample(2,2)
spc_btn = PhotoImage(file=spectrum_button,master=self).subsample(2,2)
lyr_btn = PhotoImage(file=layers_button,master=self).subsample(2,2)
button1=ttk.Button(self.midframe,text='Image Calibration Tool',image=clb_btn,tooltip='Tool for calibrating Sony a6000 RAW imagery to calibrated reflectance' ,command=lambda: controller.show_frame(ImageCalibrator),compound='top')
button1.image = clb_btn
button1.grid(row=1,column=1,padx=15, pady=15)
button2=ttk.Button(self.midframe,text='Ortho Merger',image=lyr_btn,tooltip='Tool for merging orthomosaics from different sensors',command=lambda: controller.show_frame(OrthoMerging),compound='top')
button2.image = lyr_btn
button2.grid(row=1,column=2,padx=15, pady=15)
button4=ttk.Button(self.midframe,text='Shapefile Generator',image=shp_btn,tooltip='Tool for generating GeoJSON files from Area of Interest shapefiles',command=lambda: controller.show_frame(Shapefilegenerator),compound='top')
button4.image = shp_btn
button4.grid(row=2,column=1,padx=15, pady=15)
button3=ttk.Button(self.midframe,text='Data Extractor',image=spc_btn,tooltip='Tool for extracting statstics from spatial datasets',command=lambda: controller.show_frame(HyperSpecExtractor),compound='top')
button3.image = spc_btn
button3.grid(row=2,column=2,padx=15, pady=15)
button5=ttk.Button(self.btmframe,text='Quit',image=ext_btn,tooltip='Quit software and all running processes',command=controller.enditall,compound='top')
button5.image=ext_btn
button5.grid(row=1,column=3,padx=5,pady=10)
button6=ttk.Button(self.btmframe,text='Help',image=info_btn,tooltip='Get additional help',command=self.popup_window,compound='top')
button6.image=info_btn
button6.grid(row=1,column=1,padx=5,pady=10)
## Image Calibrator Class: defines the variables for the appearance and function of the Image Calibration Tool.
class ImageCalibrator(ttk.Frame):
def popup_window(self):
win_x = self.winfo_rootx()+300
win_y = self.winfo_rooty()+100
window = tk.Toplevel()
window.geometry(f'+{win_x}+{win_y}')
window.title('Image Calibrator - Help')
label = tk.Label(window, text='''
Raw imagery = Folder containing sony .ARW image files from data capture flight
Tec5 file = .xlsx file containing Tec5 irradiance data from data capture flight
Average Irradiance = Select to use single meaned irradiance value for relfectance corrections
Vignetting Models = Destination folder for band vignetting models produced during processing
Out folder = Destination folder for calibrated .tiff imagery''',justify='left')
label.pack(fill='x', padx=50, pady=50)
button_close = ttk.Button(window, text="Close", command=window.destroy)
button_close.pack(fill='x')
def get_raw(self):
self.button5.configure(state='enabled')
folder=tk.filedialog.askdirectory(initialdir = "/",title = 'Select Raw Folder')
self.rawfolder.set(folder+'/')
try:
self.t5file.set((glob.glob(os.path.abspath(os.path.join(self.rawfolder.get(),'../'))+'/'+'*ec5*'+'*.xlsx'))[0])
except:
tk.messagebox.showwarning(title='Warning',message='No Tec5 File found')
self.t5file.set('blank')
self.vigfolder.set(os.path.join(os.path.abspath(os.path.join(self.rawfolder.get(),"../")+'VIG_models\\'),''))
try:
self.cam.set(re.split('/',self.rawfolder.get())[re.split('/',self.rawfolder.get()).index('NIR')])
self.outfolder.set(os.path.join(os.path.abspath(os.path.join(self.rawfolder.get(),"../")+(re.split('/',self.rawfolder.get())[re.split('/',self.rawfolder.get()).index('NIR')])+'_AllCorrect'),''))
except:
self.cam.set(re.split('/',self.rawfolder.get())[re.split('/',self.rawfolder.get()).index('RGB')])
self.outfolder.set(os.path.join(os.path.abspath(os.path.join(self.rawfolder.get(),"../")+(re.split('/',self.rawfolder.get())[re.split('/',self.rawfolder.get()).index('RGB')])+'_AllCorrect'),''))
def get_t5(self):
try:
folder=tk.filedialog.askopenfilename(initialdir = os.path.abspath(os.path.join(self.rawfolder.get() ,"../")),title = 'Select Tec5 File',filetypes = (("excel files","*.xlsx"),("all files","*.*")))
self.t5file.set(folder)
except:
tk.messagebox.showwarning(title='Warning',message='No Tec5 File found')
self.t5file.set('blank')
def get_vig(self):
folder=tk.filedialog.askdirectory(initialdir = os.path.abspath(os.path.join(self.rawfolder.get() ,"../")),title = 'Select Vignette Model Folder')
self.vigfolder.set(folder+'/')
def get_out(self):
folder=tk.filedialog.askdirectory(initialdir = os.path.abspath(os.path.join(self.rawfolder.get() ,"../")),title = 'Select Output Folder')
self.outfolder.set(folder+'/')
def _toggle_state(self, state):
state = state if state in ('normal', 'disabled') else 'normal'
widgets = (self.button1, self.button2,self.button3,self.button4,self.button5,self.rgb,self.button8,self.button9,self.button10)
for widget in widgets:
widget.configure(state=state)
def monitor(self, thread):
if thread.is_alive():
# check the thread every 100ms
self.after(100, lambda: self.monitor(thread))
else:
tk.messagebox.showinfo("Processing Complete", "Processing Complete")
gc.collect()
self._toggle_state('enabled')
def click_run(self):
self._toggle_state('disabled')
try:
variables={'infolder':self.rawfolder.get(),'outfolder':self.outfolder.get(),'t5file':self.t5file.get(),'vigdest':self.vigfolder.get(),'camera':self.cam.get(),'average':self.average.get()}
gc.collect()
thread_1 = threading.Thread(target=SonyImage_Master(variables))
thread_1.setDaemon(True)
thread_1.start()
self.monitor(thread_1)
except Exception as e:
tk.messagebox.showerror("Error", e)
traceback.print_exc()
self._toggle_state('normal')
def __init__(self,parent,controller):
tk.Frame.__init__(self,parent)
self.grid_rowconfigure(1, weight=1)
self.grid_columnconfigure(0, weight=1)
self.topframe = tk.Frame(self)
self.midframe = tk.Frame(self)
self.btmframe = tk.Frame(self)
self.topframe.grid(row=0)
self.midframe.grid(row=1)
self.btmframe.grid(row=2)
#---VARIABLES---#
self.rawfolder=tk.StringVar()
self.t5file=tk.StringVar()
self.vigfolder=tk.StringVar()
self.outfolder=tk.StringVar()
self.cam=tk.StringVar()
self.queue=Queue()
options=['Please Select*','NIR','RGB']
self.batch=[]
self.average=tk.IntVar()
info_btn = PhotoImage(file=info_button,master=self).subsample(5,5)
hme_btn = PhotoImage(file=home_button,master=self).subsample(5,5)
ext_btn = PhotoImage(file=exit_button,master=self).subsample(5,5)
#---LABELS---#
self.label=tk.Label(self.topframe,text='Image Calibration',font=Large_Font)
self.label.grid(row=0,column=2,padx=10)
self.label=tk.Label(self.topframe,font=Norm_Font,
text='Calibrate raw (.ARW) sony images to reflectance (%).')
self.label.grid(row=1,column=2,padx=10)
#---BUTTONS---#
self.button1=ttk.Button(self.midframe,text='Raw Imagery',command=self.get_raw,width=20)
self.button1.grid(row=3,column=1,pady=5)
self.button2=ttk.Button(self.midframe,text='Tec5 File',command=self.get_t5,width=20)
self.button2.grid(row=4,column=1,pady=5)
self.button3=ttk.Button(self.midframe,text='Vignetting Models',command=self.get_vig,width=20)
self.button3.grid(row=5,column=1,pady=5)
self.button4=ttk.Button(self.midframe,text='Out Folder',command=self.get_out,width=20)
self.button4.grid(row=6,column=1,pady=5)
self.rgb=ttk.OptionMenu(self.midframe,self.cam,*options)
self.rgb.grid(row=7,column=2,pady=5)
self.button5=ttk.Button(self.midframe,text='Run',command=self.click_run)
self.button5.configure(state='disabled')
self.button5.grid(row=8,column=2,pady=10)
self.button6=ttk.Button(self.midframe,text='Batch Processor',command=lambda: controller.show_frame(batchcalibrator))
self.button6.configure(state='enabled')
self.button6.grid(row=9,column=2)
self.button7=ttk.Checkbutton(self.midframe,text='Average Irradiance',variable=self.average)
self.button7.grid(row=4,column=3)
self.button8=ttk.Button(self.btmframe,image=hme_btn,text='Home',tooltip='Go Home (your drunk)',command=lambda: controller.show_frame(HomePage),compound="top")
self.button8.image=hme_btn
self.button8.grid(row=1,column=1,padx=5,pady=10)
self.button9=ttk.Button(self.btmframe,image=info_btn,text='Help',command=self.popup_window,tooltip='Press for more help',compound="top")
self.button9.image=info_btn
self.button9.grid(row=1,column=2,padx=5,pady=10)
self.button10=ttk.Button(self.btmframe,text='Quit',image=ext_btn,tooltip='Quit software and all running processes',command=controller.enditall,compound="top")
self.button10.image=ext_btn
self.button10.grid(row=1,column=3,padx=5,pady=10)
#---ENTRIES---#
self.entry1=ttk.Entry(self.midframe,textvariable=self.rawfolder,width=75)
self.entry1.grid(row=3,column=2,padx=5)
self.entry2=ttk.Entry(self.midframe,textvariable=self.t5file,width=75)
self.entry2.grid(row=4,column=2,padx=5)
self.entry3=ttk.Entry(self.midframe,textvariable=self.vigfolder,width=75)
self.entry3.grid(row=5,column=2,padx=5)
self.entry4=ttk.Entry(self.midframe,textvariable=self.outfolder,width=75)
self.entry4.grid(row=6,column=2,padx=5)
## Image Calibrator Class: defines the variables for the appearance and function of the Batch Processing Image Calibration Tool.
class batchcalibrator(ttk.Frame):
def popup_window(self):
win_x = self.winfo_rootx()+300
win_y = self.winfo_rooty()+100
window = tk.Toplevel()
window.geometry(f'+{win_x}+{win_y}')
window.title('Batch Calibrator - Help')
label = tk.Label(window, text='''
Raw imagery = Folder containing sony .ARW image files from data capture flight
Tec5 file = .xlsx file containing Tec5 irradiance data from data capture flight
Average Irradiance = Select to use single meaned irradiance value for reflectance corrections
Vignetting Models = Destination folder for band vignetting models produced during processing
Out folder = Destination folder for calibrated .tiff imagery
Add/Delete = add or remove the current dataset to the batch processing queue
''',justify='left')
label.pack(fill='x', padx=50, pady=50)
button_close = ttk.Button(window, text="Close", command=window.destroy)
button_close.pack(fill='x')
def get_raw(self):
self.button5.configure(state='enabled')
folder=tk.filedialog.askdirectory(initialdir = "/",title = 'Select Raw Folder')
self.rawfolder.set(folder+'/')
try:
self.t5file.set((glob.glob(os.path.abspath(os.path.join(self.rawfolder.get(),'../'))+'/'+'*ec5*'+'*.xlsx'))[0])
except:
tk.messagebox.showinfo('Error','No Tec5 file found')
self.t5file.set('blank')
self.vigfolder.set(os.path.join(os.path.abspath(os.path.join(self.rawfolder.get(),"../")+'VIG_models\\'),''))
try:
self.cam.set(re.split('/',self.rawfolder.get())[re.split('/',self.rawfolder.get()).index('NIR')])
self.outfolder.set(os.path.join(os.path.abspath(os.path.join(self.rawfolder.get(),"../")+(re.split('/',self.rawfolder.get())[re.split('/',self.rawfolder.get()).index('NIR')])+'_AllCorrect'),''))
except:
self.cam.set(re.split('/',self.rawfolder.get())[re.split('/',self.rawfolder.get()).index('RGB')])
self.outfolder.set(os.path.join(os.path.abspath(os.path.join(self.rawfolder.get(),"../")+(re.split('/',self.rawfolder.get())[re.split('/',self.rawfolder.get()).index('RGB')])+'_AllCorrect'),''))
def get_t5(self):
try:
folder=tk.filedialog.askopenfilename(initialdir = os.path.abspath(os.path.join(self.rawfolder.get() ,"../")),title = 'Select Tec5 File',filetypes = (("excel files","*.xlsx"),("all files","*.*")))
self.t5file.set(folder)
except:
tk.messagebox.showinfo('Error','No Tec5 file found')
self.t5file.set('blank')
def get_vig(self):
folder=tk.filedialog.askdirectory(initialdir = os.path.abspath(os.path.join(self.rawfolder.get() ,"../")),title = 'Select Vignette Model Folder')
self.vigfolder.set(folder+'/')
def get_out(self):
folder=tk.filedialog.askdirectory(initialdir = os.path.abspath(os.path.join(self.rawfolder.get() ,"../")),title = 'Select Output Folder')
self.outfolder.set(folder+'/')
def add2batch(self):
batch_vars={'infolder':self.rawfolder.get(),'outfolder':self.outfolder.get(),'t5file':self.t5file.get(),'vigdest':self.vigfolder.get(),'camera':self.cam.get(),'average':self.average.get()}
self.batch.append(batch_vars)
self.batchbox.insert('end',str(batch_vars))
self.button9.configure(state='enabled')
def deletebatch(self):
self.batch.pop(-1)
self.batchbox.delete('1.0','end')
self.batchbox.insert('end',self.batch)
def _toggle_state(self, state):
state = state if state in ('normal', 'disabled') else 'normal'
widgets = (self.button1, self.button2,self.button3,self.button4,self.button5,self.rgb,self.button8,self.button9,self.button10)
for widget in widgets:
widget.configure(state=state)
def monitor(self, thread):
if thread.is_alive():
# check the thread every 100ms
self.after(100, lambda: self.monitor(thread))
else:
tk.messagebox.showinfo("Processing Complete", "Processing Complete")
gc.collect()
self._toggle_state('enabled')
def batchrun(self):
self._toggle_state('disabled')
for batch in self.batch:
print(batch)
gc.collect()
thread_1 = threading.Thread(target=SonyImage_Master(batch))
thread_1.setDaemon(True)
thread_1.start()
self.monitor(thread_1)
def __init__(self,parent,controller):
tk.Frame.__init__(self,parent)
self.grid_rowconfigure(1, weight=1)
self.grid_columnconfigure(0, weight=1)
self.topframe = tk.Frame(self)
self.midframe = tk.Frame(self)
self.btmframe = tk.Frame(self)
self.topframe.grid(row=0)
self.midframe.grid(row=1)
self.btmframe.grid(row=2)
#---VARIABLES---#
self.rawfolder=tk.StringVar()
self.t5file=tk.StringVar()
self.vigfolder=tk.StringVar()
self.outfolder=tk.StringVar()
self.cam=tk.StringVar()
self.queue=Queue()
options=['Please Select*','NIR','RGB']
self.batch=[]
self.average=tk.IntVar()
info_btn = PhotoImage(file=info_button,master=self).subsample(5,5)
hme_btn = PhotoImage(file=home_button,master=self).subsample(5,5)
ext_btn = PhotoImage(file=exit_button,master=self).subsample(5,5)
bck_btn = PhotoImage(file=back_button,master=self).subsample(5,5)
#---LABELS---#
self.label=tk.Label(self.topframe,text='Image Calibration',font=Large_Font)
self.label.grid(row=0,column=2,padx=10)
self.label1=tk.Label(self.topframe,font=Norm_Font,
text='Calibrate raw (.ARW) sony images to reflectance (%), but now in batches!')
self.label1.grid(row=1,column=2,padx=10)
#---BUTTONS---#
self.button1=ttk.Button(self.midframe,text='Raw Imagery',command=self.get_raw,width=20)
self.button1.grid(row=1,column=1,pady=5)
self.button2=ttk.Button(self.midframe,text='Tec5 File',command=self.get_t5,width=20)
self.button2.grid(row=2,column=1,pady=5)
self.button3=ttk.Button(self.midframe,text='Vignetting Models',command=self.get_vig,width=20)
self.button3.grid(row=3,column=1,pady=5)
self.button4=ttk.Button(self.midframe,text='Out Folder',command=self.get_out,width=20)
self.button4.grid(row=4,column=1,pady=5)
self.rgb=ttk.OptionMenu(self.midframe,self.cam,*options)
self.rgb.grid(row=5,column=2,pady=5,columnspan=2)
self.button5=ttk.Button(self.midframe,text='Batch Run',command=self.batchrun)
self.button5.configure(state='disabled')
self.button5.grid(row=9,column=2,columnspan=2)
self.button7=ttk.Button(self.midframe,text='Add',command=self.add2batch)
self.button7.grid(row=6,column=2,sticky='e')
self.button17=ttk.Button(self.midframe,text='Delete',command=self.deletebatch)
self.button17.grid(row=6,column=3,sticky='w')
self.button27=ttk.Checkbutton(self.midframe,text='Average Irradiance',variable=self.average)
self.button27.grid(row=2,column=4)
self.button8=ttk.Button(self.btmframe,image=hme_btn,text='Home',tooltip='Go Home (your drunk)',command=lambda: controller.show_frame(HomePage),compound="top")
self.button8.image=hme_btn
self.button8.grid(row=1,column=1,padx=5,pady=10)
self.button9=ttk.Button(self.btmframe,image=info_btn,text='Help',command=self.popup_window,tooltip='Press for more help',compound="top")
self.button9.image=info_btn
self.button9.grid(row=1,column=2,padx=5,pady=10)
self.button10=ttk.Button(self.btmframe,text='Quit',image=ext_btn,tooltip='Quit software and all running processes',command=controller.enditall,compound="top")
self.button10.image=ext_btn
self.button10.grid(row=1,column=3,padx=5,pady=10)
self.button11=ttk.Button(self.btmframe,text='Back',image=bck_btn,command=lambda: controller.show_frame(ImageCalibrator),compound="top")
self.button11.image=bck_btn
self.button11.grid(row=0,column=2,pady=5)
#---ENTRIES---#
self.entry1=ttk.Entry(self.midframe,textvariable=self.rawfolder,width=75)
self.entry1.grid(row=1,column=2,padx=5,columnspan=2)
self.entry2=ttk.Entry(self.midframe,textvariable=self.t5file,width=75)
self.entry2.grid(row=2,column=2,padx=5,columnspan=2)
self.entry3=ttk.Entry(self.midframe,textvariable=self.vigfolder,width=75)
self.entry3.grid(row=3,column=2,padx=5,columnspan=2)
self.entry4=ttk.Entry(self.midframe,textvariable=self.outfolder,width=75)
self.entry4.grid(row=4,column=2,padx=5,columnspan=2)
self.batchbox=tk.Text(self.midframe,width=50,height=10)
self.batchbox.grid(row=8,column=2,columnspan=2, padx=10, pady=5)
## Shapefile Class: defines the variables for the appearance and function of the shapefile generator Tool.
class Shapefilegenerator(ttk.Frame):
def popup_window(self):
win_x = self.winfo_rootx()+300
win_y = self.winfo_rooty()+100
window = tk.Toplevel()
window.geometry(f'+{win_x}+{win_y}')
window.title('Shapefile Generator - Help')
label = tk.Label(window, text='''
Shapefile = the input shapefile generated using GIS software identifying plot names and locations.
Output file = the output file location ane name of the generated geoJSON file.
''',justify='left')
label.pack(fill='x', padx=50, pady=50)
button_close = ttk.Button(window, text="Close", command=window.destroy)
button_close.pack(fill='x')
def get_shapefile(self):
folder=tk.filedialog.askopenfilename(initialdir = "/",title = 'Shapefile',filetypes=(("shp","*.shp"),("all files","*.*")))
self.shapefile.set(folder)
def get_outfilename(self):
folder=tk.filedialog.asksaveasfilename(initialdir = os.path.abspath(os.path.join(self.shapefile.get(),'../')),title = 'Output file',filetypes=(("geojson","*.geojson"),("all files","*.*")))
self.out_file.set(folder)
self._toggle_state('normal')
def run(self):
if self.out_file.get() == 'blank':
tk.messagebox.showinfo("Select Output file", "Please define a file name and location")
else:
self._toggle_state('disabled')
try:
shapefile_gen(self.shapefile.get(),self.out_file.get())
tk.messagebox.showinfo("Processing Complete", "Processing Complete")
self._toggle_state('normal')
except Exception as e:
tk.messagebox.showerror("Error", e)
traceback.print_exc()
self._toggle_state('normal')
def _toggle_state(self, state):
state = state if state in ('normal', 'disabled') else 'normal'
widgets = (self.button1, self.button2,self.button3)
for widget in widgets:
widget.configure(state=state)
def __init__(self,parent,controller):
tk.Frame.__init__(self,parent)
self.grid_rowconfigure(1, weight=1)
self.grid_columnconfigure(0, weight=1)
self.topframe = tk.Frame(self)
self.midframe = tk.Frame(self)
self.btmframe = tk.Frame(self)
self.topframe.grid(row=0)
self.midframe.grid(row=1)
self.btmframe.grid(row=2)
#---VARIABLES---#
self.shapefile=tk.StringVar()
self.shapefile.set('blank')
self.out_file=tk.StringVar()
self.out_file.set('blank')
info_btn = PhotoImage(file=info_button,master=self).subsample(5,5)
hme_btn = PhotoImage(file=home_button,master=self).subsample(5,5)
ext_btn = PhotoImage(file=exit_button,master=self).subsample(5,5)
self.label=tk.Label(self.topframe,text='Shapefile to GeoJSON',font=Large_Font)
self.label.grid(row=0,column=2,padx=10)
self.label=tk.Label(self.topframe,text='Convert plot shapefiles (.shp) to GeoJSON for storage and further processing.',font=Norm_Font)
self.label.grid(row=1,column=2,padx=10)
self.button1=ttk.Button(self.midframe,text='Shapefile (.shp)',command=self.get_shapefile,width=20)
self.button1.grid(row=2,column=1,pady=10)
self.button2=ttk.Button(self.midframe,text='Output file (.geojson)',command=self.get_outfilename,width=20)
self.button2.grid(row=3,column=1,pady=10)
self.button3=ttk.Button(self.midframe,text='Run',command=self.run,width=15)
self.button3.configure(state='disabled')
self.button3.grid(row=10,column=2,pady=10)
self.button8=ttk.Button(self.btmframe,image=hme_btn,text='Home',tooltip='Go Home (your drunk)',command=lambda: controller.show_frame(HomePage),compound="top")
self.button8.image=hme_btn
self.button8.grid(row=1,column=1,padx=5,pady=10)
self.button9=ttk.Button(self.btmframe,image=info_btn,text='Help',command=self.popup_window,tooltip='Press for more help',compound="top")
self.button9.image=info_btn
self.button9.grid(row=1,column=2,padx=5,pady=10)
self.button10=ttk.Button(self.btmframe,text='Quit',image=ext_btn,tooltip='Quit software and all running processes',command=controller.enditall,compound="top")
self.button10.image=ext_btn
self.button10.grid(row=1,column=3,padx=5,pady=10)
#---ENTRIES---#
self.entry1=ttk.Entry(self.midframe,textvariable=self.shapefile,width=75)
self.entry1.grid(row=2,column=2,padx=5)
self.entry2=ttk.Entry(self.midframe,textvariable=self.out_file,width=75)
self.entry2.grid(row=3,column=2,padx=5)
## HyperSpecExtractor Class: defines the variables for the appearance and function of the Hyperspec data extractor Tool.
class HyperSpecExtractor(ttk.Frame):
def popup_window(self):
win_x = self.winfo_rootx()+500
win_y = self.winfo_rooty()+150
window = tk.Toplevel()
window.geometry(f'+{win_x}+{win_y}')
window.title('UAV Data Extractor - Help')
label = ImageLabel(window)
label.pack(padx=10,pady=10)
label.load(resource_path('Graphics\\'+str(randint(1,5))+'.gif'))
button_close = ttk.Button(window, text="Close", command=window.destroy)
button_close.pack(fill='x')
def checkBands(self,window,file):
bands = []
def chkchk(bands, window):
if all([b.get() != '' for b in bands]):
self.bandnames.extend([bands[a].get() for a in range(0,len(bands))])
window.destroy()
else:
tk.messagebox.showerror("Error", "Band Name missing")
window.lift()
with rasterio.open(file) as f:
if any([a is None for a in f.descriptions]):
window.title('UAV Data Extractor - Help')
self.label = ttk.Label(window,text='Unknown bands, please provide band names')
self.label.grid(row=1,column=1,columnspan=2)
for a in range(1,f.count+1):
bands.append(tk.StringVar())
self.bndNM = AutocompleteCombobox(window,width=10, textvariable=bands[a-1], completevalues=self.band_opts)
self.bndNM.grid(row=a+1,column=2)
self.bnd = ttk.Label(window,text=f'Band {a}:')
self.bnd.grid(row=a+1,column=1)
self.button_sub = ttk.Button(window, text="Submit", width=20, command=lambda: chkchk(bands, window))
self.button_sub.grid(row=a+2,column=1,columnspan=2)
def get_data(self):
files=tk.filedialog.askopenfilenames(initialdir = "/",title = 'Source File')
self.data.set([a for a in files])
self.data_short.set([os.path.basename(b)+' ' for b in files])
for a in files:
with rasterio.open(a) as b:
if any([a is None for a in b.descriptions]):
win_x = self.winfo_rootx()+500
win_y = self.winfo_rooty()+150
pop = tk.Toplevel()
pop.geometry(f'+{win_x}+{win_y}')
pop.title('UAV Data Extractor - Help')
self.checkBands(pop,a)
self.wait_window(pop)
if os.path.isfile(files[0]):
self.button5=ttk.Button(self.midframe,text='Results Output file (.csv)',command=lambda: self.get_outfilename(),tooltip='Output CSV path.',width=20)
self.button5.grid(row=4,column=1,pady=10)
self.entry4=ttk.Entry(self.midframe,textvariable=self.out_file,width=75)
self.entry4.grid(row=4,column=2,columnspan=2,padx=5)
self.get_outfilename()
return(self.button5)
def get_shapefile(self):
folder=tk.filedialog.askopenfilename(initialdir = "/",title = 'Shapefile',filetypes=(("geojson","*.geojson"),("all files","*.*")))
self.shapefile.set(folder)
def getplots(self):
def getfolder():
folder=tk.filedialog.askdirectory(initialdir = '/', title = 'Plot Value Folder')
self.outFolder.set(folder)
if self.checkPlotVals.instate(['selected']) == True:
folder=tk.filedialog.askdirectory(initialdir = '/', title = 'Plot Value Folder')
self.outFolder.set(folder)
self.plotVals.set(True)
self.button6=ttk.Button(self.midframe,text='Pixel Folder',command=lambda: getfolder(),width=20)
self.button6.grid(row=11,column=1,pady=10)
self.entry6=ttk.Entry(self.midframe,textvariable=self.outFolder,width=75)
self.entry6.grid(row=11,column=2,columnspan=2,padx=5)
if self.checkPlotVals.instate(['selected']) == False:
self.plotVals.set(False)
self.button6.grid_forget()
self.entry6.grid_forget()
def checkall(self):
all = [self.checkMean,self.checkMedian,self.checkStdev,self.checkCount,self.checkPrcnt99,self.checkPrcnt90]
if self.checkAll.instate(['selected']) == True:
for a in all:
if a.instate(['selected']) == False:
a.invoke()
elif self.checkAll.instate(['selected']) == False:
for a in all:
if a.instate(['selected']) == True:
a.invoke()
def get_outfilename(self):
folder=tk.filedialog.asksaveasfilename(initialdir = os.path.abspath(os.path.join(self.shapefile.get(),'../')),title = 'Output file',filetypes=(("csv","*.csv"),("all files","*.*")))
if '.csv' not in folder:
folder += '.csv'
self.out_file.set(folder)
self._toggle_state('normal')
def monitor(self, thread):
if thread.is_alive():
# check the thread every 100ms
self.after(100, lambda: self.monitor(thread))
else:
tk.messagebox.showinfo("Processing Complete", "Processing Complete")
gc.collect()
self._toggle_state('enabled')
def run(self):
if self.out_file.get() == '.csv':
tk.messagebox.showinfo("Select Output file", "Please define an output file name and location")
else:
all = [self.checkMean,self.checkMedian,self.checkStdev,self.checkCount,self.checkPrcnt99,self.checkPrcnt90]
samples = []
for a in all:
if a.instate(['selected']) == True:
samples.append(a.cget('text'))
self._toggle_state('disabled')
if self.checkPrcntC.instate(['selected']) == True:
if self.customPrcnt.get() == '':
tk.messagebox.showerror("Error", "Invalid percentile value")
self._toggle_state('normal')
return
else:
samples.append({'custom':self.customPrcnt.get()})
if not samples and self.checkPlotVals == False:
tk.messagebox.showinfo("Select Someting", "No outputs selected")
self._toggle_state('normal')
return
try:
variables = {'outfile':self.out_file.get(),'shapefile':self.shapefile.get(),'samples':samples,'bandnames':self.bandnames,'PlotValues':self.plotVals.get(),'outFolder':self.outFolder.get()}
layers = {'inFiles':self.data.get()}
gc.collect()
thread_1 = threading.Thread(target=hyperspec_master, args=(variables,layers))
thread_1.setDaemon(True)
thread_1.start()
self.monitor(thread_1)
except Exception as e:
tk.messagebox.showerror("Error", e)
traceback.print_exc()
self._toggle_state('normal')
def _toggle_state(self, state):
state = state if state in ('normal', 'disabled') else 'normal'
try:
widgets = (self.button1, self.button2, self.button4, self.button5, self.button8,self.button9,self.button10)
except:
widgets = (self.button1, self.button2, self.button4, self.button5, self.button8,self.button9,self.button10)
for widget in widgets:
widget.configure(state=state)
def __init__(self,parent,controller):
tk.Frame.__init__(self,parent)
self.grid_rowconfigure(1, weight=1)
self.grid_columnconfigure(0, weight=1)
self.topframe = tk.Frame(self)
self.midframe = tk.Frame(self)
self.btmframe = tk.Frame(self)
self.topframe.grid(row=0)
self.midframe.grid(row=1)
self.btmframe.grid(row=2)
#---VARIABLES---#
self.bandnames = []
self.data=tk.StringVar()
self.data.set('')
self.data_short=tk.StringVar()
self.data_short.set('')
self.outFolder=tk.StringVar()
self.outFolder.set('')
self.plotVals=tk.BooleanVar()
self.plotVals.set(False)
self.shapefile=tk.StringVar()
self.shapefile.set('')
self.out_file=tk.StringVar()
self.out_file.set('')
# self.out_swir=tk.StringVar()
# self.out_swir.set('')
self.xmean = tk.IntVar()
self.xmedian = tk.IntVar()
self.xstdev = tk.IntVar()
self.xcount = tk.IntVar()
self.xprcnt99 = tk.IntVar()
self.xprcnt90 = tk.IntVar()
self.xprcntcstm = tk.IntVar()
self.xall = tk.IntVar()
info_btn = PhotoImage(file=info_button,master=self).subsample(5,5)
hme_btn = PhotoImage(file=home_button,master=self).subsample(5,5)
ext_btn = PhotoImage(file=exit_button,master=self).subsample(5,5)
self.band_opts = ['Red','Green','Blue','Alpha','NIR','Thermal']
self.label=tk.Label(self.topframe,text='UAV Data Extractor',font=Large_Font)
self.label.grid(row=0,column=2,padx=10)
self.label=tk.Label(self.topframe,text='Extract and statistically sample areas of interest from spatial data.\n \n Hover over inputs/outputs for more info.',font=Norm_Font)
self.label.grid(row=1,column=2,padx=10)
self.button1=ttk.Button(self.midframe,text='Shapefile (.geojson)',command=self.get_shapefile,tooltip='GeoJSON file containing Area of Interest Polygons',width=20)
self.button1.grid(row=2,column=1,pady=10)
self.button2=ttk.Button(self.midframe,text='Input File',command=self.get_data,tooltip='Source file from which data will be extracted',width=20)
self.button2.grid(row=3,column=1,pady=10)
# self.button3=ttk.Button(self.midframe,text='SWIR',command=self.get_swir,tooltip='SWIR binary file from sensor (not .hdr!)',width=20)
# self.button3.grid(row=5,column=1,pady=10)
self.button4=ttk.Button(self.midframe,text='Run',command=self.run,width=20)
self.button4.configure(state='disabled')
self.button4.grid(row=12,column=2,pady=10,padx=75)
self.checkframe=tk.Frame(self.midframe)
self.checkframe.grid(row=10,column=2)
self.checkMean=ttk.Checkbutton(self.checkframe,text='Mean',variable=self.xmean,onvalue='Mean')
self.checkMean.grid(row=1,column=1)
self.checkMedian=ttk.Checkbutton(self.checkframe,text='Median',variable=self.xmedian,onvalue='Median')
self.checkMedian.grid(row=1,column=3)
self.checkStdev=ttk.Checkbutton(self.checkframe,text='StDev',variable=self.xstdev,onvalue='StDev')
self.checkStdev.grid(row=1,column=2)
self.checkCount=ttk.Checkbutton(self.checkframe,text='Count',variable=self.xcount,onvalue='Count')
self.checkCount.grid(row=2,column=1)
self.checkPrcnt99=ttk.Checkbutton(self.checkframe,text='99th%',variable=self.xprcnt99,onvalue='99th%')
self.checkPrcnt99.grid(row=2,column=2)
self.checkPrcnt90=ttk.Checkbutton(self.checkframe,text='90th%',variable=self.xprcnt90,onvalue='90th%')
self.checkPrcnt90.grid(row=2,column=3)
self.checkAll=ttk.Checkbutton(self.checkframe,text='All',command=self.checkall,variable=self.xall)
self.checkAll.grid(row=3,column=2)
self.checkPrcntC=ttk.Checkbutton(self.checkframe,text='Custom Percentile (0-100)',variable=self.xprcntcstm,onvalue='Cstm%')
self.checkPrcntC.grid(row=4,column=2)
self.customPrcnt=ttk.Combobox(self.checkframe,width=5,values=natsorted([str(a) for a in range(0,101)]))
self.customPrcnt.grid(row=4,column=3)
self.checkPlotVals=ttk.Checkbutton(self.checkframe,text='Extract Plot Pixel Values?',command=self.getplots,variable=self.plotVals)
self.checkPlotVals.grid(row=5,column=2)
self.button8=ttk.Button(self.btmframe,image=hme_btn,text='Home',tooltip='Go Home (your drunk)',command=lambda: controller.show_frame(HomePage),compound="top")
self.button8.image=hme_btn
self.button8.grid(row=1,column=1,padx=5,pady=10)
self.button9=ttk.Button(self.btmframe,image=info_btn,text='Help',command=self.popup_window,tooltip='Press for more help',compound="top")
self.button9.image=info_btn
self.button9.grid(row=1,column=2,padx=5,pady=10)
self.button10=ttk.Button(self.btmframe,text='Quit',image=ext_btn,tooltip='Quit software and all running processes',command=controller.enditall,compound="top")
self.button10.image=ext_btn
self.button10.grid(row=1,column=3,padx=5,pady=10)
#---ENTRIES---#
self.entry1=ttk.Entry(self.midframe,textvariable=self.shapefile,width=75)
self.entry1.grid(row=2,column=2,columnspan=2,padx=5)
self.entry2=ttk.Entry(self.midframe,textvariable=self.data_short,width=75)
self.entry2.grid(row=3,column=2,columnspan=2,padx=5)
# self.entry3=ttk.Entry(self.midframe,textvariable=self.swir_short,width=75)
# self.entry3.grid(row=5,column=2,columnspan=2,padx=5)
class OrthoMerging(ttk.Frame):
def popup_window(self):
win_x = self.winfo_rootx()+500
win_y = self.winfo_rooty()+150
window = tk.Toplevel()
window.geometry(f'+{win_x}+{win_y}')
window.title('HyperSpec Extractor - Help')
label = tk.Label(window, text='''
Inputs = orthomosaics for same field to be combined into one single multi-layer ortho.
Output will be compressed using LZW and predictor 2, please ensure enough space on disk for temporary files.
DEM (Digital Elevation Model) is the bare ground one, double check if you need to, but definitely bare ground.''',justify='left')
label.pack(fill='x', padx=50, pady=50)
button_close = ttk.Button(window, text="Close", command=window.destroy)
button_close.pack(fill='x')
def checkBands(self,file,pop):
bands = []
def chkchk(bands, window):
if all([b.get() != '' for b in bands]):
self.bandnames.set(self.bandnames.get() + ', '.join(bands[a].get() for a in range(0,len(bands))))
# str([bands[a].get() for a in range(0,len(bands))]))
# self.label3.config(text=self.bandnames.get())
print(self.bandnames.get())
window.destroy()
else:
tk.messagebox.showerror("Error", "Band Name missing")
window.lift()
win_x = self.winfo_rootx()+500
win_y = self.winfo_rooty()+150
# pop = tk.Toplevel()
with rasterio.open(file) as f:
if any([a is None for a in f.descriptions]):
pop.geometry(f'+{win_x}+{win_y}')
pop.title('UAV Data Extractor - Help')
self.label = ttk.Label(pop,text='Unknown bands, please provide band names')
self.label.grid(row=1,column=1,columnspan=2)
for a in range(1,f.count+1):
bands.append(tk.StringVar())
self.bndNM = AutocompleteCombobox(pop,width=10, textvariable=bands[a-1], completevalues=self.band_opts)
self.bndNM.grid(row=a+1,column=2)
self.bnd = ttk.Label(pop,text=f'Band {a}:')
self.bnd.grid(row=a+1,column=1)
self.button_sub = ttk.Button(pop, text="Submit", width=20, command=lambda: chkchk(bands, pop))
self.button_sub.grid(row=a+2,column=1,columnspan=2)
else:
self.bandnames.set(self.bandnames.get() + ', '.join(f.descriptions[a] for a in range(0,f.count-1)))
pop.destroy()
def get_rgb(self):
files=tk.filedialog.askopenfilename(initialdir = "/",title = 'RGB',filetype=(("tif","*.tif"),("all files","*.*")))
if files != '':
pop = tk.Toplevel()
self.checkBands(files,pop)
self.wait_window(pop)
self.rgb.set(files)
self.rgb_short.set(os.path.basename(files))
# return(self.button5)
def get_nir(self):
files=tk.filedialog.askopenfilename(initialdir = os.path.dirname(self.rgb.get()),title = 'NIR',filetypes=(("tif","*.tif"),("all files","*.*")))
self.nir.set(files)
self.nir_short.set(os.path.basename(files))
def get_DEM(self):
files=tk.filedialog.askopenfilename(initialdir = os.path.dirname(self.rgb.get()),title = 'Bare Ground DEM',filetypes=(("tif","*.tif"),("all files","*.*")))
self.DEM.set(files)
self.DEM_short.set(os.path.basename(files))
def get_DSM(self):
files=tk.filedialog.askopenfilename(initialdir = os.path.dirname(self.rgb.get()),title = 'DSM',filetypes=(("tif","*.tif"),("all files","*.*")))
self.DSM.set(files)
self.DSM_short.set(os.path.basename(files))
def get_Other(self):
files=tk.filedialog.askopenfilename(initialdir = os.path.dirname(self.rgb.get()),title = 'Other File',filetypes=(("tif","*.tif"),("all files","*.*")))
other_name=tk.simpledialog.askstring(title='Layer Name',prompt='Provide a name for this layer (e.g. Thermal)')
self.other_file.set(files)
self.other.set(other_name)
def get_outfilename(self):
folder=tk.filedialog.asksaveasfilename(initialdir = os.path.abspath(os.path.join(self.rgb.get(),'../')),title = 'Output file',filetypes=(("tif","*.tif"),("all files","*.*")))
if '.tif' not in folder:
folder += '.tif'
self.out_file.set(folder)
self._toggle_state('normal')
def monitor(self, thread):
if thread.is_alive():
# check the thread every 100ms
self.after(100, lambda: self.monitor(thread))
else:
tk.messagebox.showinfo("Processing Complete", "Processing Complete")
gc.collect()
self._toggle_state('enabled')
def run(self):
if self.rgb.get() == '':
tk.messagebox.showinfo("Select RGB file", "Please a RGB or Primary dataset")
if self.out_file.get() == '':
tk.messagebox.showinfo("Select Output file", "Please define a file name and location")
else:
self._toggle_state('disabled')
try:
print(self.bandnames.get())
others = (self.other.get(),self.other_file.get())
gc.collect()
thread_1 = threading.Thread(target=orthoMerge, args=(self.rgb.get(),self.nir.get(),self.DEM.get(),self.DSM.get(),others,self.out_file.get(),self.bandnames.get()))
thread_1.setDaemon(True)
thread_1.start()
self.monitor(thread_1)
except Exception as e:
tk.messagebox.showerror("Error", e)
traceback.print_exc()
self._toggle_state('normal')
def _toggle_state(self, state):
state = state if state in ('normal', 'disabled') else 'normal'
try:
widgets = (self.button1, self.button2, self.button3, self.button4, self.button5, self.button6, self.button7, self.button8,self.button9,self.button10)
except:
widgets = (self.button1, self.button2, self.button3, self.button4, self.button5, self.button8,self.button9,self.button10)
for widget in widgets:
widget.configure(state=state)
def clear(self):
a = [b for b in self.bandnames.get().split(', ')]
print(a)
print(self.bandnames.get())
self.bandnames.set('')
# self.entry7.config(text=self.bandnames.get())
def __init__(self,parent,controller):
tk.Frame.__init__(self,parent)
self.grid_rowconfigure(1, weight=1)
self.grid_columnconfigure(0, weight=1)
self.topframe = tk.Frame(self)
self.midframe = tk.Frame(self)
self.btmframe = tk.Frame(self)
self.topframe.grid(row=0)
self.midframe.grid(row=1)
self.btmframe.grid(row=2)
#---VARIABLES---#
self.bandnames = tk.StringVar()
self.rgb=tk.StringVar()
self.rgb.set('')
self.rgb_short=tk.StringVar()
self.rgb_short.set('')
self.nir=tk.StringVar()
self.nir.set('')
self.nir_short=tk.StringVar()
self.nir_short.set('')
self.DEM=tk.StringVar()
self.DEM.set('')
self.DEM_short=tk.StringVar()
self.DEM_short.set('')
self.DSM=tk.StringVar()
self.DSM.set('')
self.DSM_short=tk.StringVar()
self.DSM_short.set('')
self.other=tk.StringVar()
self.other.set('')
self.other_file=tk.StringVar()
self.other_file.set('')
self.out_file=tk.StringVar()
self.out_file.set('')
info_btn = PhotoImage(file=info_button,master=self).subsample(5,5)
hme_btn = PhotoImage(file=home_button,master=self).subsample(5,5)
ext_btn = PhotoImage(file=exit_button,master=self).subsample(5,5)
self.band_opts = ['Red','Green','Blue','Alpha','NIR','Thermal']
self.label1=tk.Label(self.topframe,text='Orthophoto Merger',font=Large_Font)
self.label1.grid(row=0,column=2,padx=10)
self.label2=tk.Label(self.topframe,text='Merge orthophotos into a single multilayer orthophoto, compressed using LZW(2).\n \n Hover over inputs/outputs for more info.\n \n DEM = Bare Ground; DSM = the one with crops in.',font=Norm_Font)
self.label2.grid(row=1,column=2,padx=10)
self.button1=ttk.Button(self.midframe,text='RGB',command=self.get_rgb,tooltip='RGB orthomosaic',width=20)
self.button1.grid(row=2,column=1,pady=10)
self.button2=ttk.Button(self.midframe,text='NIR',command=self.get_nir,tooltip='NIR orthomosaic',width=20)
self.button2.grid(row=3,column=1,pady=10)
self.button3=ttk.Button(self.midframe,text='DEM (BG)',command=self.get_DEM,tooltip='Bare Ground DEM',width=20)
self.button3.grid(row=4,column=1,pady=10)
self.button4=ttk.Button(self.midframe,text='DSM',command=self.get_DSM,tooltip='Digital Surface Model (with crops!)',width=20)
self.button4.grid(row=5,column=1,pady=10)
self.button5=ttk.Button(self.midframe,text='Other',command=self.get_Other,tooltip='Any other orthomosaic to include (e.g. Thermal)',width=20)
self.button5.grid(row=6,column=1,pady=10)
self.button6=ttk.Button(self.midframe,text='OutFile',command=self.get_outfilename,tooltip='Outpath for generated orthomosaic (.tif)',width=20)
self.button6.grid(row=7,column=1,pady=10)
self.button7=ttk.Button(self.midframe,text='Run',command=self.run,width=15)
self.button7.configure(state='disabled')
self.button7.grid(row=11,column=2,pady=10,padx=75)
self.button8=ttk.Button(self.midframe, text='clear',command=self.clear,tooltip='Clear Band Names',width=20)
self.button8.grid(row=8,column=1,pady=10)
self.button9=ttk.Button(self.btmframe,image=hme_btn,text='Home',tooltip='Go Home (your drunk)',command=lambda: controller.show_frame(HomePage),compound="top")
self.button9.image=hme_btn
self.button9.grid(row=1,column=1,padx=5,pady=10)
self.button10=ttk.Button(self.btmframe,image=info_btn,text='Help',command=self.popup_window,tooltip='Press for more help',compound="top")
self.button10.image=info_btn
self.button10.grid(row=1,column=2,padx=5,pady=10)
self.button11=ttk.Button(self.btmframe,text='Quit',image=ext_btn,tooltip='Quit software and all running processes',command=controller.enditall,compound="top")
self.button11.image=ext_btn
self.button11.grid(row=1,column=3,padx=5,pady=10)
#---ENTRIES---#
self.entry1=ttk.Entry(self.midframe,textvariable=self.rgb_short,width=75)
self.entry1.grid(row=2,column=2,columnspan=2,padx=5)
self.entry2=ttk.Entry(self.midframe,textvariable=self.nir_short,width=75)
self.entry2.grid(row=3,column=2,columnspan=2,padx=5)
self.entry3=ttk.Entry(self.midframe,textvariable=self.DEM_short,width=75)
self.entry3.grid(row=4,column=2,columnspan=2,padx=5)
self.entry4=ttk.Entry(self.midframe,textvariable=self.DSM_short,width=75)
self.entry4.grid(row=5,column=2,columnspan=2,padx=5)
self.entry5=ttk.Entry(self.midframe,textvariable=self.other_file,width=75)
self.entry5.grid(row=6,column=2,columnspan=2,padx=5)
self.entry6=ttk.Entry(self.midframe,textvariable=self.out_file,width=75)
self.entry6.grid(row=7,column=2,columnspan=2,padx=5)
self.entry7=ttk.Entry(self.midframe,textvariable=self.bandnames,width=75)
self.entry7.grid(row=8,column=2,columnspan=2)
if __name__ == "__main__":
app=software()
app.geometry("1200x900")
app.mainloop() | []
| []
| [
"USE_PATH_FOR_GDAL_PYTHON",
"GDAL_DATA",
"PROJ_LIB",
"PATH"
]
| [] | ["USE_PATH_FOR_GDAL_PYTHON", "GDAL_DATA", "PROJ_LIB", "PATH"] | python | 4 | 0 | |
frontend/config/parser.py | #
# Copyright (c) 2013-2018 Quarkslab.
# This file is part of IRMA project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the top-level directory
# of this distribution and at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# No part of the project, including this file, may be copied,
# modified, propagated, or distributed except according to the
# terms contained in the LICENSE file.
import os
import logging
import ssl
from kombu import Queue
from celery.schedules import crontab
from logging import BASIC_FORMAT, Formatter
from logging.handlers import SysLogHandler
from celery.log import redirect_stdouts_to_logger
from celery.signals import after_setup_task_logger, after_setup_logger
from irma.common.base.utils import common_celery_options
from irma.common.base.exceptions import IrmaConfigurationError
from irma.common.configuration.ini import TemplatedConfiguration
from irma.common.configuration.sql import SQLConf
from irma.common.ftp.sftp import IrmaSFTP
from irma.common.ftp.sftpv2 import IrmaSFTPv2
from irma.common.ftp.ftps import IrmaFTPS
# ==========
# Template
# ==========
template_frontend_config = {
'log': [
('syslog', TemplatedConfiguration.integer, 0),
('prefix', TemplatedConfiguration.string, "irma-frontend :"),
('debug', TemplatedConfiguration.boolean, False),
('sql_debug', TemplatedConfiguration.boolean, False),
],
'sqldb': [
('username', TemplatedConfiguration.string, None),
('password', TemplatedConfiguration.string, None),
('host', TemplatedConfiguration.string, None),
('port', TemplatedConfiguration.string, None),
('dbname', TemplatedConfiguration.string, None),
('tables_prefix', TemplatedConfiguration.string, None),
('sslmode', TemplatedConfiguration.string, "disable"),
('sslrootcert', TemplatedConfiguration.string, ""),
('sslcert', TemplatedConfiguration.string, ""),
('sslkey', TemplatedConfiguration.string, ""),
],
'samples_storage': [
('path', TemplatedConfiguration.string, None)
],
'celery_brain': [
('timeout', TemplatedConfiguration.integer, 60),
],
'celery_options': [
('concurrency', TemplatedConfiguration.integer, 0),
('soft_time_limit', TemplatedConfiguration.integer, 300),
('time_limit', TemplatedConfiguration.integer, 1500),
('beat_schedule', TemplatedConfiguration.string,
"/var/irma/frontend_beat_schedule"),
],
'broker_brain': [
('host', TemplatedConfiguration.string, None),
('port', TemplatedConfiguration.integer, 5672),
('vhost', TemplatedConfiguration.string, None),
('username', TemplatedConfiguration.string, None),
('password', TemplatedConfiguration.string, None),
('queue', TemplatedConfiguration.string, None),
],
'broker_frontend': [
('host', TemplatedConfiguration.string, None),
('port', TemplatedConfiguration.integer, 5672),
('vhost', TemplatedConfiguration.string, None),
('username', TemplatedConfiguration.string, None),
('password', TemplatedConfiguration.string, None),
('queue', TemplatedConfiguration.string, None),
],
'ftp': [
('protocol', TemplatedConfiguration.string, "sftpv2"),
],
'ftp_brain': [
('host', TemplatedConfiguration.string, None),
('auth', TemplatedConfiguration.string, "password"),
('key_path', TemplatedConfiguration.string, ""),
('port', TemplatedConfiguration.integer, 22),
('username', TemplatedConfiguration.string, None),
('password', TemplatedConfiguration.string, None),
],
'cron_clean_file_age': [
('clean_fs_max_age', TemplatedConfiguration.string, "0"),
('clean_fs_age_cron_hour', TemplatedConfiguration.string, "0"),
('clean_fs_age_cron_minute', TemplatedConfiguration.string, "0"),
('clean_fs_age_cron_day_of_week', TemplatedConfiguration.string, "*"),
],
'cron_clean_file_size': [
('clean_fs_max_size', TemplatedConfiguration.string, "0"),
('clean_fs_size_cron_hour', TemplatedConfiguration.string, "*"),
('clean_fs_size_cron_minute', TemplatedConfiguration.string, "0"),
('clean_fs_size_cron_day_of_week', TemplatedConfiguration.string, "*"),
],
'interprocess_lock': [
('path', TemplatedConfiguration.string,
"/var/run/lock/irma-frontend.lock"),
],
'ssl_config': [
('activate_ssl', TemplatedConfiguration.boolean, False),
('ca_certs', TemplatedConfiguration.string, None),
('keyfile', TemplatedConfiguration.string, None),
('certfile', TemplatedConfiguration.string, None),
],
'processing': [
('max_resubmit', TemplatedConfiguration.integer, 15),
],
}
config_path = os.environ.get('IRMA_FRONTEND_CFG_PATH')
if config_path is None:
# Fallback to default path that is
# current working directory
config_path = os.path.abspath(os.path.dirname(__file__))
cfg_file = os.path.join(config_path, "frontend.ini")
frontend_config = TemplatedConfiguration(cfg_file, template_frontend_config)
# ===============
# Celery helper
# ===============
def get_celery_options(app, app_name):
concurrency = frontend_config.celery_options.concurrency
soft_time_limit = frontend_config.celery_options.soft_time_limit
time_limit = frontend_config.celery_options.time_limit
options = common_celery_options(app,
app_name,
concurrency,
soft_time_limit,
time_limit)
options.append("--beat")
beat_schedule = frontend_config.celery_options.beat_schedule
options.append("--schedule={}".format(beat_schedule))
return options
def _conf_celery(app, broker, backend=None, queue=None):
app.conf.update(
BROKER_URL=broker,
CELERY_ACCEPT_CONTENT=['json'],
CELERY_TASK_SERIALIZER='json',
CELERY_RESULT_SERIALIZER='json'
)
if backend is not None:
app.conf.update(CELERY_RESULT_BACKEND=backend)
app.conf.update(CELERY_TASK_RESULT_EXPIRES=300) # 5 minutes
if queue is not None:
app.conf.update(
CELERY_DEFAULT_QUEUE=queue,
# delivery_mode=1 enable transient mode
# (don't survive to a server restart)
CELERY_QUEUES=(Queue(queue, routing_key=queue),)
)
if frontend_config.ssl_config.activate_ssl:
ca_certs_path = frontend_config.ssl_config.ca_certs
keyfile_path = frontend_config.ssl_config.keyfile
certfile_path = frontend_config.ssl_config.certfile
app.conf.update(
BROKER_USE_SSL={
'ca_certs': ca_certs_path,
'keyfile': keyfile_path,
'certfile': certfile_path,
'cert_reqs': ssl.CERT_REQUIRED
}
)
return
def conf_brain_celery(app):
broker = get_brain_broker_uri()
# default backend for brain celery
# is amqp
backend = get_brain_backend_uri()
queue = frontend_config.broker_brain.queue
_conf_celery(app, broker, backend=backend, queue=queue)
def conf_frontend_celery(app):
broker = get_frontend_broker_uri()
queue = frontend_config.broker_frontend.queue
_conf_celery(app, broker, queue=queue)
# add celerybeat conf only for frontend app
cron_age_cfg = frontend_config['cron_clean_file_age']
cron_size_cfg = frontend_config['cron_clean_file_size']
app.conf.update(
CELERYBEAT_SCHEDULE={
# File System clean according to file max age
'clean_fs_age': {
'task': 'frontend_app.clean_fs_age',
'schedule': crontab(
hour=cron_age_cfg['clean_fs_age_cron_hour'],
minute=cron_age_cfg['clean_fs_age_cron_minute'],
day_of_week=cron_age_cfg['clean_fs_age_cron_day_of_week']
),
'args': (),
},
# File System clean according to sum max size
'clean_fs_size': {
'task': 'frontend_app.clean_fs_size',
'schedule': crontab(
hour=cron_size_cfg['clean_fs_size_cron_hour'],
minute=cron_size_cfg['clean_fs_size_cron_minute'],
day_of_week=cron_size_cfg['clean_fs_size_cron_day_of_week']
),
'args': (),
},
},
CELERY_TIMEZONE='UTC'
)
def get_brain_celery_timeout():
return frontend_config.celery_brain.timeout
# ========================
# Broker/Backend helpers
# ========================
def _get_amqp_uri(broker_config):
user = broker_config.username
pwd = broker_config.password
host = broker_config.host
port = broker_config.port
vhost = broker_config.vhost
return "amqp://{user}:{pwd}@{host}:{port}/{vhost}".format(user=user,
pwd=pwd,
host=host,
port=port,
vhost=vhost)
def get_brain_broker_uri():
return _get_amqp_uri(frontend_config.broker_brain)
def get_brain_backend_uri():
return _get_amqp_uri(frontend_config.broker_brain)
def get_frontend_broker_uri():
return _get_amqp_uri(frontend_config.broker_frontend)
# ================
# Syslog helpers
# ================
def configure_syslog(app):
if frontend_config.log.syslog:
app.conf.update(CELERYD_LOG_COLOR=False)
after_setup_logger.connect(setup_log)
after_setup_task_logger.connect(setup_log)
def setup_log(**args):
# redirect stdout and stderr to logger
redirect_stdouts_to_logger(args['logger'])
# logs to local syslog
hl = SysLogHandler('/dev/log',
facility=SysLogHandler.facility_names['syslog'])
# setting log level
hl.setLevel(args['loglevel'])
# setting log format
formatter = Formatter(frontend_config.log.prefix + BASIC_FORMAT)
hl.setFormatter(formatter)
# add new handler to logger
args['logger'].addHandler(hl)
def debug_enabled():
return frontend_config.log.debug
def sql_debug_enabled():
return frontend_config.log.sql_debug
def setup_debug_logger(logger):
log = logging.getLogger()
log.setLevel(logging.DEBUG)
FORMAT = "%(asctime)-15s %(name)s %(process)d %(filename)s:"
FORMAT += "%(lineno)d (%(funcName)s) %(message)s"
logging.basicConfig(format=FORMAT)
logger.setLevel(logging.DEBUG)
return
# ==================
# Database helpers
# ==================
sqldb = SQLConf(dbms="postgresql", dialect="psycopg2", **frontend_config.sqldb)
def get_samples_storage_path():
return os.path.abspath(frontend_config.samples_storage.path)
# =============
# FTP helpers
# =============
def get_ftp_class():
protocol = frontend_config.ftp.protocol
if protocol == "sftp":
key_path = frontend_config.ftp_brain.key_path
auth = frontend_config.ftp_brain.auth
if auth == "key" and not os.path.isfile(key_path):
msg = "You are using SFTP authentication by key but the path of " \
"the private key does not exist:['" + key_path + "']"
raise IrmaConfigurationError(msg)
return IrmaSFTP
elif protocol == "sftpv2":
auth = frontend_config.ftp_brain.auth
if auth == "key":
raise IrmaConfigurationError("SFTPv2 pubkey auth not implemented")
return IrmaSFTPv2
elif protocol == "ftps":
return IrmaFTPS
# =====================
# Concurrency helpers
# =====================
def get_lock_path():
return frontend_config.interprocess_lock.path
# ====================
# Processing helpers
# ====================
def get_max_resubmit_level():
return frontend_config.processing.max_resubmit
| []
| []
| [
"IRMA_FRONTEND_CFG_PATH"
]
| [] | ["IRMA_FRONTEND_CFG_PATH"] | python | 1 | 0 | |
vendor/github.com/mitchellh/packer/builder/profitbricks/config.go | package profitbricks
import (
"errors"
"github.com/mitchellh/mapstructure"
"github.com/mitchellh/packer/common"
"github.com/mitchellh/packer/helper/communicator"
"github.com/mitchellh/packer/helper/config"
"github.com/mitchellh/packer/packer"
"github.com/mitchellh/packer/template/interpolate"
"os"
)
type Config struct {
common.PackerConfig `mapstructure:",squash"`
Comm communicator.Config `mapstructure:",squash"`
PBUsername string `mapstructure:"username"`
PBPassword string `mapstructure:"password"`
PBUrl string `mapstructure:"url"`
Region string `mapstructure:"location"`
Image string `mapstructure:"image"`
SSHKey string
SnapshotName string `mapstructure:"snapshot_name"`
DiskSize int `mapstructure:"disk_size"`
DiskType string `mapstructure:"disk_type"`
Cores int `mapstructure:"cores"`
Ram int `mapstructure:"ram"`
Retries int `mapstructure:"retries"`
CommConfig communicator.Config `mapstructure:",squash"`
ctx interpolate.Context
}
func NewConfig(raws ...interface{}) (*Config, []string, error) {
var c Config
var md mapstructure.Metadata
err := config.Decode(&c, &config.DecodeOpts{
Metadata: &md,
Interpolate: true,
InterpolateContext: &c.ctx,
InterpolateFilter: &interpolate.RenderFilter{
Exclude: []string{
"run_command",
},
},
}, raws...)
if err != nil {
return nil, nil, err
}
var errs *packer.MultiError
if c.Comm.SSHPassword == "" && c.Comm.SSHPrivateKey == "" {
errs = packer.MultiErrorAppend(
errs, errors.New("Either ssh private key path or ssh password must be set."))
}
if c.SnapshotName == "" {
def, err := interpolate.Render("packer-{{timestamp}}", nil)
if err != nil {
panic(err)
}
// Default to packer-{{ unix timestamp (utc) }}
c.SnapshotName = def
}
if c.PBUsername == "" {
c.PBUsername = os.Getenv("PROFITBRICKS_USERNAME")
}
if c.PBPassword == "" {
c.PBPassword = os.Getenv("PROFITBRICKS_PASSWORD")
}
if c.PBUrl == "" {
c.PBUrl = "https://api.profitbricks.com/rest/v2"
}
if c.Cores == 0 {
c.Cores = 4
}
if c.Ram == 0 {
c.Ram = 2048
}
if c.DiskSize == 0 {
c.DiskSize = 50
}
if c.Region == "" {
c.Region = "us/las"
}
if c.DiskType == "" {
c.DiskType = "HDD"
}
if es := c.Comm.Prepare(&c.ctx); len(es) > 0 {
errs = packer.MultiErrorAppend(errs, es...)
}
if c.Image == "" {
errs = packer.MultiErrorAppend(
errs, errors.New("ProfitBricks 'image' is required"))
}
if c.PBUsername == "" {
errs = packer.MultiErrorAppend(
errs, errors.New("ProfitBricks username is required"))
}
if c.PBPassword == "" {
errs = packer.MultiErrorAppend(
errs, errors.New("ProfitBricks password is required"))
}
if errs != nil && len(errs.Errors) > 0 {
return nil, nil, errs
}
common.ScrubConfig(c, c.PBUsername)
return &c, nil, nil
}
| [
"\"PROFITBRICKS_USERNAME\"",
"\"PROFITBRICKS_PASSWORD\""
]
| []
| [
"PROFITBRICKS_PASSWORD",
"PROFITBRICKS_USERNAME"
]
| [] | ["PROFITBRICKS_PASSWORD", "PROFITBRICKS_USERNAME"] | go | 2 | 0 | |
vendor/github.com/cloudfoundry/libbuildpack/cutlass/cf.go | package cutlass
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"github.com/blang/semver"
"github.com/tidwall/gjson"
)
var DefaultMemory string = ""
var DefaultDisk string = ""
var Cached bool = false
var DefaultStdoutStderr io.Writer = nil
type cfConfig struct {
SpaceFields struct {
GUID string
}
}
type cfApps struct {
Resources []struct {
Metadata struct {
GUID string `json:"guid"`
} `json:"metadata"`
} `json:"resources"`
}
type cfInstance struct {
State string `json:"state"`
}
type App struct {
Name string
Path string
Stack string
Buildpacks []string
Memory string
Disk string
StartCommand string
Stdout *Buffer
appGUID string
env map[string]string
logCmd *exec.Cmd
HealthCheck string
}
func New(fixture string) *App {
return &App{
Name: filepath.Base(fixture) + "-" + RandStringRunes(20),
Path: fixture,
Stack: os.Getenv("CF_STACK"),
Buildpacks: []string{},
Memory: DefaultMemory,
Disk: DefaultDisk,
StartCommand: "",
appGUID: "",
env: map[string]string{},
logCmd: nil,
HealthCheck: "",
}
}
func ApiVersion() (string, error) {
cmd := exec.Command("cf", "curl", "/v2/info")
cmd.Stderr = DefaultStdoutStderr
bytes, err := cmd.Output()
if err != nil {
return "", err
}
var info struct {
ApiVersion string `json:"api_version"`
}
if err := json.Unmarshal(bytes, &info); err != nil {
return "", err
}
return info.ApiVersion, nil
}
func ApiGreaterThan(version string) (bool, error) {
apiVersionString, err := ApiVersion()
if err != nil {
return false, err
}
apiVersion, err := semver.Make(apiVersionString)
if err != nil {
return false, err
}
reqVersion, err := semver.ParseRange(">= " + version)
if err != nil {
return false, err
}
return reqVersion(apiVersion), nil
}
func Stacks() ([]string, error) {
cmd := exec.Command("cf", "curl", "/v2/stacks")
cmd.Stderr = DefaultStdoutStderr
bytes, err := cmd.Output()
if err != nil {
return nil, err
}
var info struct {
Resources []struct {
Entity struct {
Name string `json:"name"`
} `json:"entity"`
} `json:"resources"`
}
if err := json.Unmarshal(bytes, &info); err != nil {
return nil, err
}
var out []string
for _, r := range info.Resources {
out = append(out, r.Entity.Name)
}
return out, nil
}
func DeleteOrphanedRoutes() error {
command := exec.Command("cf", "delete-orphaned-routes", "-f")
command.Stdout = DefaultStdoutStderr
command.Stderr = DefaultStdoutStderr
if err := command.Run(); err != nil {
return err
}
return nil
}
func DeleteBuildpack(language string) error {
command := exec.Command("cf", "delete-buildpack", "-f", fmt.Sprintf("%s_buildpack", language))
if data, err := command.CombinedOutput(); err != nil {
fmt.Println(string(data))
return err
}
return nil
}
func UpdateBuildpack(language, file, stack string) error {
command := exec.Command("cf", "update-buildpack", fmt.Sprintf("%s_buildpack", language), "-p", file, "--enable", "-s", stack)
if data, err := command.CombinedOutput(); err != nil {
return fmt.Errorf("Failed to update buildpack by running '%s':\n%s\n%v", strings.Join(command.Args, " "), string(data), err)
}
return nil
}
func createBuildpack(language, file string) error {
command := exec.Command("cf", "create-buildpack", fmt.Sprintf("%s_buildpack", language), file, "100", "--enable")
if data, err := command.CombinedOutput(); err != nil {
return fmt.Errorf("Failed to create buildpack by running '%s':\n%s\n%v", strings.Join(command.Args, " "), string(data), err)
}
return nil
}
func CountBuildpack(language string) (int, error) {
command := exec.Command("cf", "buildpacks")
targetBpname := fmt.Sprintf("%s_buildpack", language)
matches := 0
lines, err := command.CombinedOutput()
if err != nil {
return -1, err
}
for _, line := range strings.Split(string(lines), "\n") {
bpname := strings.SplitN(line, " ", 2)[0]
if bpname == targetBpname {
matches++
}
}
return matches, nil
}
func CreateOrUpdateBuildpack(language, file, stack string) error {
createBuildpack(language, file)
return UpdateBuildpack(language, file, stack)
}
func (a *App) ConfirmBuildpack(version string) error {
if !strings.Contains(a.Stdout.String(), fmt.Sprintf("Buildpack version %s\n", version)) {
var versionLine string
for _, line := range strings.Split(a.Stdout.String(), "\n") {
if versionLine == "" && strings.Contains(line, " Buildpack version ") {
versionLine = line
}
}
return fmt.Errorf("Wrong buildpack version. Expected '%s', but this was logged: %s", version, versionLine)
}
return nil
}
func (a *App) RunTask(command string) ([]byte, error) {
cmd := exec.Command("cf", "run-task", a.Name, command)
cmd.Stderr = DefaultStdoutStderr
bytes, err := cmd.Output()
if err != nil {
return bytes, err
}
return bytes, nil
}
func (a *App) Stop() error {
command := exec.Command("cf", "stop", a.Name)
command.Stdout = DefaultStdoutStderr
command.Stderr = DefaultStdoutStderr
if err := command.Run(); err != nil {
return err
}
return nil
}
func (a *App) Restart() error {
command := exec.Command("cf", "restart", a.Name)
command.Stdout = DefaultStdoutStderr
command.Stderr = DefaultStdoutStderr
if err := command.Run(); err != nil {
return err
}
return nil
}
func (a *App) SetEnv(key, value string) {
a.env[key] = value
}
func (a *App) SpaceGUID() (string, error) {
cfHome := os.Getenv("CF_HOME")
if cfHome == "" {
cfHome = os.Getenv("HOME")
}
bytes, err := ioutil.ReadFile(filepath.Join(cfHome, ".cf", "config.json"))
if err != nil {
return "", err
}
var config cfConfig
if err := json.Unmarshal(bytes, &config); err != nil {
return "", err
}
return config.SpaceFields.GUID, nil
}
func (a *App) AppGUID() (string, error) {
if a.appGUID != "" {
return a.appGUID, nil
}
guid, err := a.SpaceGUID()
if err != nil {
return "", err
}
cmd := exec.Command("cf", "curl", "/v2/apps?q=space_guid:"+guid+"&q=name:"+a.Name)
cmd.Stderr = DefaultStdoutStderr
bytes, err := cmd.Output()
if err != nil {
return "", err
}
var apps cfApps
if err := json.Unmarshal(bytes, &apps); err != nil {
return "", err
}
if len(apps.Resources) != 1 {
return "", fmt.Errorf("Expected one app, found %d", len(apps.Resources))
}
a.appGUID = apps.Resources[0].Metadata.GUID
return a.appGUID, nil
}
func (a *App) InstanceStates() ([]string, error) {
guid, err := a.AppGUID()
if err != nil {
return []string{}, err
}
cmd := exec.Command("cf", "curl", "/v2/apps/"+guid+"/instances")
cmd.Stderr = DefaultStdoutStderr
bytes, err := cmd.Output()
if err != nil {
return []string{}, err
}
var data map[string]cfInstance
if err := json.Unmarshal(bytes, &data); err != nil {
return []string{}, err
}
var states []string
for _, value := range data {
states = append(states, value.State)
}
return states, nil
}
func (a *App) PushNoStart() error {
args := []string{"push", a.Name, "--no-start", "-p", a.Path}
if a.Stack != "" {
args = append(args, "-s", a.Stack)
}
for _, buildpack := range a.Buildpacks {
args = append(args, "-b", buildpack)
}
if _, err := os.Stat(filepath.Join(a.Path, "manifest.yml")); err == nil {
args = append(args, "-f", filepath.Join(a.Path, "manifest.yml"))
}
if a.Memory != "" {
args = append(args, "-m", a.Memory)
}
if a.Disk != "" {
args = append(args, "-k", a.Disk)
}
if a.StartCommand != "" {
args = append(args, "-c", a.StartCommand)
}
if a.HealthCheck != "" {
args = append(args, "-u", a.HealthCheck)
}
command := exec.Command("cf", args...)
command.Stdout = DefaultStdoutStderr
command.Stderr = DefaultStdoutStderr
if err := command.Run(); err != nil {
return err
}
for k, v := range a.env {
command := exec.Command("cf", "set-env", a.Name, k, v)
command.Stdout = DefaultStdoutStderr
command.Stderr = DefaultStdoutStderr
if err := command.Run(); err != nil {
return err
}
}
if a.logCmd == nil {
a.logCmd = exec.Command("cf", "logs", a.Name)
a.logCmd.Stderr = DefaultStdoutStderr
a.Stdout = &Buffer{}
a.logCmd.Stdout = a.Stdout
if err := a.logCmd.Start(); err != nil {
return err
}
}
return nil
}
func (a *App) V3Push() error {
if err := a.PushNoStart(); err != nil {
return err
}
args := []string{"v3-push", a.Name, "-p", a.Path}
if len(a.Buildpacks) > 1 {
for _, buildpack := range a.Buildpacks {
args = append(args, "-b", buildpack)
}
}
command := exec.Command("cf", args...)
command.Stdout = DefaultStdoutStderr
command.Stderr = DefaultStdoutStderr
if err := command.Run(); err != nil {
return err
}
return nil
}
func (a *App) Push() error {
if err := a.PushNoStart(); err != nil {
return err
}
command := exec.Command("cf", "start", a.Name)
command.Stdout = DefaultStdoutStderr
command.Stderr = DefaultStdoutStderr
if err := command.Run(); err != nil {
return err
}
return nil
}
func (a *App) GetUrl(path string) (string, error) {
guid, err := a.AppGUID()
if err != nil {
return "", err
}
cmd := exec.Command("cf", "curl", "/v2/apps/"+guid+"/summary")
cmd.Stderr = DefaultStdoutStderr
data, err := cmd.Output()
if err != nil {
return "", err
}
schema, found := os.LookupEnv("CUTLASS_SCHEMA")
if !found {
schema = "http"
}
host := gjson.Get(string(data), "routes.0.host").String()
domain := gjson.Get(string(data), "routes.0.domain.name").String()
return fmt.Sprintf("%s://%s.%s%s", schema, host, domain, path), nil
}
func (a *App) Get(path string, headers map[string]string) (string, map[string][]string, error) {
url, err := a.GetUrl(path)
if err != nil {
return "", map[string][]string{}, err
}
client := &http.Client{}
if headers["NoFollow"] == "true" {
client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
delete(headers, "NoFollow")
}
req, _ := http.NewRequest("GET", url, nil)
for k, v := range headers {
req.Header.Add(k, v)
}
if headers["user"] != "" && headers["password"] != "" {
req.SetBasicAuth(headers["user"], headers["password"])
delete(headers, "user")
delete(headers, "password")
}
resp, err := client.Do(req)
if err != nil {
return "", map[string][]string{}, err
}
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", map[string][]string{}, err
}
resp.Header["StatusCode"] = []string{strconv.Itoa(resp.StatusCode)}
return string(data), resp.Header, err
}
func (a *App) GetBody(path string) (string, error) {
body, _, err := a.Get(path, map[string]string{})
// TODO: Non 200 ??
// if !(len(headers["StatusCode"]) == 1 && headers["StatusCode"][0] == "200") {
// return "", fmt.Errorf("non 200 status: %v", headers)
// }
return body, err
}
func (a *App) Files(path string) ([]string, error) {
cmd := exec.Command("cf", "ssh", a.Name, "-c", "find "+path)
cmd.Stderr = DefaultStdoutStderr
output, err := cmd.Output()
if err != nil {
return []string{}, err
}
return strings.Split(string(output), "\n"), nil
}
func (a *App) DownloadDroplet(path string) error {
guid, err := a.AppGUID()
if err != nil {
return err
}
cmd := exec.Command("cf", "curl", "/v2/apps/"+guid+"/droplet/download", "--output", path)
cmd.Stderr = DefaultStdoutStderr
_, err = cmd.Output()
return err
}
func (a *App) Destroy() error {
if a.logCmd != nil && a.logCmd.Process != nil {
if err := a.logCmd.Process.Kill(); err != nil {
return err
}
}
command := exec.Command("cf", "delete", "-f", a.Name)
command.Stdout = DefaultStdoutStderr
command.Stderr = DefaultStdoutStderr
return command.Run()
}
var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz")
func RandStringRunes(n int) string {
b := make([]rune, n)
for i := range b {
b[i] = letterRunes[rand.Intn(len(letterRunes))]
}
return string(b)
}
| [
"\"CF_STACK\"",
"\"CF_HOME\"",
"\"HOME\""
]
| []
| [
"CF_STACK",
"HOME",
"CF_HOME"
]
| [] | ["CF_STACK", "HOME", "CF_HOME"] | go | 3 | 0 | |
trading_app/asgi.py | """
ASGI config for trading_app project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "trading_app.settings")
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
pkg/helm/helm_template.go | package helm
import (
"bufio"
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"k8s.io/helm/pkg/chartutil"
"k8s.io/helm/pkg/proto/hapi/chart"
"github.com/jenkins-x/jx/pkg/kube"
"github.com/jenkins-x/jx/pkg/log"
"github.com/jenkins-x/jx/pkg/util"
"github.com/pkg/errors"
yaml "gopkg.in/yaml.v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
const (
// AnnotationChartName stores the chart name
AnnotationChartName = "jenkins.io/chart"
// AnnotationAppVersion stores the chart's app version
AnnotationAppVersion = "jenkins.io/chart-app-version"
// AnnotationAppDescription stores the chart's app version
AnnotationAppDescription = "jenkins.io/chart-description"
// AnnotationAppRepository stores the chart's app repository
AnnotationAppRepository = "jenkins.io/chart-repository"
// LabelReleaseName stores the chart release name
LabelReleaseName = "jenkins.io/chart-release"
// LabelNamespace stores the chart namespace for cluster wide resources
LabelNamespace = "jenkins.io/namespace"
// LabelReleaseChartVersion stores the version of a chart installation in a label
LabelReleaseChartVersion = "jenkins.io/version"
// LabelAppName stores the chart's app name
LabelAppName = "jenkins.io/app-name"
// LabelAppVersion stores the chart's app version
LabelAppVersion = "jenkins.io/app-version"
hookFailed = "hook-failed"
hookSucceeded = "hook-succeeded"
// resourcesSeparator is used to separate multiple objects stored in the same YAML file
resourcesSeparator = "---"
)
// HelmTemplate implements common helm actions but purely as client side operations
// delegating a separate Helmer such as HelmCLI for the client side operations
type HelmTemplate struct {
Client *HelmCLI
WorkDir string
CWD string
Binary string
Runner util.Commander
KubectlValidate bool
KubeClient kubernetes.Interface
Namespace string
}
// NewHelmTemplate creates a new HelmTemplate instance configured to the given client side Helmer
func NewHelmTemplate(client *HelmCLI, workDir string, kubeClient kubernetes.Interface, ns string) *HelmTemplate {
cli := &HelmTemplate{
Client: client,
WorkDir: workDir,
Runner: client.Runner,
Binary: "kubectl",
CWD: client.CWD,
KubectlValidate: false,
KubeClient: kubeClient,
Namespace: ns,
}
return cli
}
type HelmHook struct {
Kind string
Name string
File string
Hooks []string
HookDeletePolicies []string
}
// SetHost is used to point at a locally running tiller
func (h *HelmTemplate) SetHost(tillerAddress string) {
// NOOP
}
// SetCWD configures the common working directory of helm CLI
func (h *HelmTemplate) SetCWD(dir string) {
h.Client.SetCWD(dir)
h.CWD = dir
}
// HelmBinary return the configured helm CLI
func (h *HelmTemplate) HelmBinary() string {
return h.Client.HelmBinary()
}
// SetHelmBinary configure a new helm CLI
func (h *HelmTemplate) SetHelmBinary(binary string) {
h.Client.SetHelmBinary(binary)
}
// Init executes the helm init command according with the given flags
func (h *HelmTemplate) Init(clientOnly bool, serviceAccount string, tillerNamespace string, upgrade bool) error {
return h.Client.Init(true, serviceAccount, tillerNamespace, upgrade)
}
// AddRepo adds a new helm repo with the given name and URL
func (h *HelmTemplate) AddRepo(repo, URL, username, password string) error {
return h.Client.AddRepo(repo, URL, username, password)
}
// RemoveRepo removes the given repo from helm
func (h *HelmTemplate) RemoveRepo(repo string) error {
return h.Client.RemoveRepo(repo)
}
// ListRepos list the installed helm repos together with their URL
func (h *HelmTemplate) ListRepos() (map[string]string, error) {
return h.Client.ListRepos()
}
// SearchCharts searches for all the charts matching the given filter
func (h *HelmTemplate) SearchCharts(filter string, allVersions bool) ([]ChartSummary, error) {
return h.Client.SearchCharts(filter, false)
}
// IsRepoMissing checks if the repository with the given URL is missing from helm
func (h *HelmTemplate) IsRepoMissing(URL string) (bool, string, error) {
return h.Client.IsRepoMissing(URL)
}
// UpdateRepo updates the helm repositories
func (h *HelmTemplate) UpdateRepo() error {
return h.Client.UpdateRepo()
}
// RemoveRequirementsLock removes the requirements.lock file from the current working directory
func (h *HelmTemplate) RemoveRequirementsLock() error {
return h.Client.RemoveRequirementsLock()
}
// BuildDependency builds the helm dependencies of the helm chart from the current working directory
func (h *HelmTemplate) BuildDependency() error {
return h.Client.BuildDependency()
}
// ListReleases lists the releases in ns
func (h *HelmTemplate) ListReleases(ns string) (map[string]ReleaseSummary, []string, error) {
list, err := h.KubeClient.AppsV1().Deployments(ns).List(metav1.ListOptions{})
if err != nil {
return nil, nil, errors.WithStack(err)
}
charts := make(map[string]ReleaseSummary)
keys := make([]string, 0)
if list != nil {
for _, deploy := range list.Items {
labels := deploy.Labels
ann := deploy.Annotations
if labels != nil && ann != nil {
status := "ERROR"
if deploy.Status.Replicas > 0 {
if deploy.Status.UnavailableReplicas > 0 {
status = "PENDING"
} else {
status = "DEPLOYED"
}
}
updated := deploy.CreationTimestamp.Format("Mon Jan 2 15:04:05 2006")
chartName := ann[AnnotationChartName]
chartVersion := labels[LabelReleaseChartVersion]
releaseName := labels[LabelReleaseName]
keys = append(keys, releaseName)
charts[releaseName] = ReleaseSummary{
Chart: chartName,
ChartFullName: chartName + "-" + chartVersion,
Revision: strconv.FormatInt(deploy.Generation, 10),
Updated: updated,
Status: status,
ChartVersion: chartVersion,
ReleaseName: releaseName,
AppVersion: ann[AnnotationAppVersion],
Namespace: ns,
}
}
}
}
return charts, keys, nil
}
// FindChart find a chart in the current working directory, if no chart file is found an error is returned
func (h *HelmTemplate) FindChart() (string, error) {
return h.Client.FindChart()
}
// Lint lints the helm chart from the current working directory and returns the warnings in the output
func (h *HelmTemplate) Lint(valuesFiles []string) (string, error) {
return h.Client.Lint(valuesFiles)
}
// Env returns the environment variables for the helmer
func (h *HelmTemplate) Env() map[string]string {
return h.Client.Env()
}
// PackageChart packages the chart from the current working directory
func (h *HelmTemplate) PackageChart() error {
return h.Client.PackageChart()
}
// Version executes the helm version command and returns its output
func (h *HelmTemplate) Version(tls bool) (string, error) {
return h.Client.VersionWithArgs(tls, "--client")
}
// Template generates the YAML from the chart template to the given directory
func (h *HelmTemplate) Template(chart string, releaseName string, ns string, outDir string, upgrade bool, values []string,
valueFiles []string) error {
return h.Client.Template(chart, releaseName, ns, outDir, upgrade, values, valueFiles)
}
// Mutation API
// InstallChart installs a helm chart according with the given flags
func (h *HelmTemplate) InstallChart(chart string, releaseName string, ns string, version string, timeout int,
values []string, valueFiles []string, repo string, username string, password string) error {
err := h.clearOutputDir(releaseName)
if err != nil {
return err
}
outputDir, _, chartsDir, err := h.getDirectories(releaseName)
chartDir, err := h.fetchChart(chart, version, chartsDir, repo, username, password)
if err != nil {
return err
}
err = h.Client.Template(chartDir, releaseName, ns, outputDir, false, values, valueFiles)
if err != nil {
return err
}
// Skip the chart when no resources are generated by the template
if empty, err := util.IsEmpty(outputDir); empty || err != nil {
return nil
}
metadata, versionText, err := h.getChart(chartDir, version)
if err != nil {
return err
}
helmHooks, err := h.addLabelsToFiles(chart, releaseName, versionText, metadata, ns)
if err != nil {
return err
}
helmCrdPhase := "crd-install"
helmPrePhase := "pre-install"
helmPostPhase := "post-install"
wait := true
create := true
force := true
err = h.runHooks(helmHooks, helmCrdPhase, ns, chart, releaseName, wait, create, force)
if err != nil {
return err
}
err = h.runHooks(helmHooks, helmPrePhase, ns, chart, releaseName, wait, create, force)
if err != nil {
return err
}
err = h.kubectlApply(ns, releaseName, wait, create, force, outputDir)
if err != nil {
h.deleteHooks(helmHooks, helmPrePhase, hookFailed, ns)
return err
}
log.Logger().Info("")
h.deleteHooks(helmHooks, helmPrePhase, hookSucceeded, ns)
err = h.runHooks(helmHooks, helmPostPhase, ns, chart, releaseName, wait, create, force)
if err != nil {
h.deleteHooks(helmHooks, helmPostPhase, hookFailed, ns)
return err
}
err = h.deleteHooks(helmHooks, helmPostPhase, hookSucceeded, ns)
err2 := h.deleteOldResources(ns, releaseName, versionText, wait)
log.Logger().Info("")
return util.CombineErrors(err, err2)
}
// FetchChart fetches a Helm Chart
func (h *HelmTemplate) FetchChart(chart string, version string, untar bool, untardir string, repo string,
username string, password string) error {
_, err := h.fetchChart(chart, version, untardir, repo, username, password)
return err
}
// UpgradeChart upgrades a helm chart according with given helm flags
func (h *HelmTemplate) UpgradeChart(chart string, releaseName string, ns string, version string, install bool, timeout int, force bool, wait bool, values []string, valueFiles []string, repo string, username string, password string) error {
err := h.clearOutputDir(releaseName)
if err != nil {
return err
}
outputDir, _, chartsDir, err := h.getDirectories(releaseName)
// check if we are installing a chart from the filesystem
chartDir := filepath.Join(h.CWD, chart)
exists, err := util.FileExists(chartDir)
if err != nil {
return err
}
if !exists {
log.Logger().Debugf("Fetching chart: %s", chart)
chartDir, err = h.fetchChart(chart, version, chartsDir, repo, username, password)
if err != nil {
return err
}
}
err = h.Client.Template(chartDir, releaseName, ns, outputDir, false, values, valueFiles)
if err != nil {
return err
}
// Skip the chart when no resources are generated by the template
if empty, err := util.IsEmpty(outputDir); empty || err != nil {
return nil
}
metadata, versionText, err := h.getChart(chartDir, version)
if err != nil {
return err
}
helmHooks, err := h.addLabelsToFiles(chart, releaseName, versionText, metadata, ns)
if err != nil {
return err
}
helmCrdPhase := "crd-install"
helmPrePhase := "pre-upgrade"
helmPostPhase := "post-upgrade"
create := false
err = h.runHooks(helmHooks, helmCrdPhase, ns, chart, releaseName, wait, create, force)
if err != nil {
return err
}
err = h.runHooks(helmHooks, helmPrePhase, ns, chart, releaseName, wait, create, force)
if err != nil {
return err
}
err = h.kubectlApply(ns, releaseName, wait, create, force, outputDir)
if err != nil {
h.deleteHooks(helmHooks, helmPrePhase, hookFailed, ns)
return err
}
h.deleteHooks(helmHooks, helmPrePhase, hookSucceeded, ns)
err = h.runHooks(helmHooks, helmPostPhase, ns, chart, releaseName, wait, create, force)
if err != nil {
h.deleteHooks(helmHooks, helmPostPhase, hookFailed, ns)
return err
}
err = h.deleteHooks(helmHooks, helmPostPhase, hookSucceeded, ns)
err2 := h.deleteOldResources(ns, releaseName, versionText, wait)
return util.CombineErrors(err, err2)
}
func (h *HelmTemplate) DecryptSecrets(location string) error {
return h.Client.DecryptSecrets(location)
}
func (h *HelmTemplate) kubectlApply(ns string, releaseName string, wait bool, create bool, force bool, dir string) error {
// does namespaces dir exist?
namespacesDir := filepath.Join(dir, "namespaces")
if _, err := os.Stat(namespacesDir); !os.IsNotExist(err) {
fileInfo, err := ioutil.ReadDir(namespacesDir)
if err != nil {
return errors.Wrapf(err, "unable to locate subdirs in %s", namespacesDir)
}
for _, path := range fileInfo {
namespace := filepath.Base(path.Name())
fullPath := filepath.Join(namespacesDir, path.Name())
log.Logger().Debugf("Applying generated chart '%s' YAML via kubectl in dir: %s to namespace %s", releaseName, fullPath, namespace)
command := "apply"
if create {
command = "create"
}
args := []string{command, "--recursive", "-f", fullPath, "-l", LabelReleaseName + "=" + releaseName}
applyNs := namespace
if applyNs == "" {
applyNs = ns
}
if applyNs != "" {
args = append(args, "--namespace", applyNs)
}
if wait && !create {
args = append(args, "--wait")
}
if !h.KubectlValidate {
args = append(args, "--validate=false")
}
err = h.runKubectl(args...)
if err != nil {
return err
}
log.Logger().Info("")
}
return err
}
log.Logger().Debugf("Applying generated chart '%s' YAML via kubectl in dir: %s to namespace %s", releaseName, dir, ns)
command := "apply"
if create {
command = "create"
}
args := []string{command, "--recursive", "-f", dir, "-l", LabelReleaseName + "=" + releaseName}
if ns != "" {
args = append(args, "--namespace", ns)
}
if wait && !create {
args = append(args, "--wait")
}
if force {
args = append(args, "--force")
}
if !h.KubectlValidate {
args = append(args, "--validate=false")
}
err := h.runKubectl(args...)
if err != nil {
return err
}
log.Logger().Info("")
return nil
}
func (h *HelmTemplate) kubectlApplyFile(ns string, helmHook string, wait bool, create bool, force bool, file string) error {
log.Logger().Debugf("Applying Helm hook %s YAML via kubectl in file: %s", helmHook, file)
command := "apply"
if create {
command = "create"
}
args := []string{command, "-f", file}
if ns != "" {
args = append(args, "--namespace", ns)
}
if wait && !create {
args = append(args, "--wait")
}
if force {
args = append(args, "--force")
}
if !h.KubectlValidate {
args = append(args, "--validate=false")
}
err := h.runKubectl(args...)
log.Logger().Info("")
return err
}
func (h *HelmTemplate) kubectlDeleteFile(ns string, file string) error {
log.Logger().Debugf("Deleting helm hook sources from file: %s", file)
return h.runKubectl("delete", "-f", file, "--namespace", ns, "--wait")
}
func (h *HelmTemplate) deleteOldResources(ns string, releaseName string, versionText string, wait bool) error {
selector := LabelReleaseName + "=" + releaseName + "," + LabelReleaseChartVersion + "!=" + versionText
return h.deleteResourcesAndClusterResourcesBySelector(ns, selector, wait, "older releases")
}
func (h *HelmTemplate) deleteResourcesAndClusterResourcesBySelector(ns string, selector string, wait bool, message string) error {
kinds := []string{"all", "pvc", "configmap", "release", "sa", "role", "rolebinding", "secret"}
clusterKinds := []string{"clusterrole", "clusterrolebinding"}
errList := []error{}
log.Logger().Debugf("Removing Kubernetes resources from %s using selector: %s from %s", message, util.ColorInfo(selector), strings.Join(kinds, " "))
errs := h.deleteResourcesBySelector(ns, kinds, selector, wait)
errList = append(errList, errs...)
selector += "," + LabelNamespace + "=" + ns
log.Logger().Debugf("Removing Kubernetes resources from %s using selector: %s from %s", message, util.ColorInfo(selector), strings.Join(clusterKinds, " "))
errs = h.deleteResourcesBySelector("", clusterKinds, selector, wait)
errList = append(errList, errs...)
return util.CombineErrors(errList...)
}
func (h *HelmTemplate) deleteResourcesBySelector(ns string, kinds []string, selector string, wait bool) []error {
errList := []error{}
for _, kind := range kinds {
args := []string{"delete", kind, "--ignore-not-found", "-l", selector}
if ns != "" {
args = append(args, "--namespace", ns)
}
if wait {
args = append(args, "--wait")
}
output, err := h.runKubectlWithOutput(args...)
if err != nil {
errList = append(errList, err)
} else {
output = strings.TrimSpace(output)
if output != "No resources found" {
log.Logger().Info(output)
}
}
}
return errList
}
// isClusterKind returns true if the kind or resource name is a cluster wide resource
func isClusterKind(kind string) bool {
lower := strings.ToLower(kind)
return strings.HasPrefix(lower, "cluster") || strings.HasPrefix(lower, "namespace")
}
// DeleteRelease removes the given release
func (h *HelmTemplate) DeleteRelease(ns string, releaseName string, purge bool) error {
if ns == "" {
ns = h.Namespace
}
selector := LabelReleaseName + "=" + releaseName
return h.deleteResourcesAndClusterResourcesBySelector(ns, selector, true, fmt.Sprintf("release %s", releaseName))
}
// StatusRelease returns the output of the helm status command for a given release
func (h *HelmTemplate) StatusRelease(ns string, releaseName string) error {
releases, _, err := h.ListReleases(ns)
if err != nil {
return errors.Wrap(err, "listing current chart releases")
}
if _, ok := releases[releaseName]; ok {
return nil
}
return fmt.Errorf("chart release %q not found", releaseName)
}
// StatusReleaseWithOutput returns the output of the helm status command for a given release
func (h *HelmTemplate) StatusReleaseWithOutput(ns string, releaseName string, outputFormat string) (string, error) {
return h.Client.StatusReleaseWithOutput(ns, releaseName, outputFormat)
}
func (h *HelmTemplate) getDirectories(releaseName string) (string, string, string, error) {
if releaseName == "" {
return "", "", "", fmt.Errorf("No release name specified!")
}
if h.WorkDir == "" {
var err error
h.WorkDir, err = ioutil.TempDir("", "helm-template-workdir-")
if err != nil {
return "", "", "", errors.Wrap(err, "Failed to create temporary directory for helm template workdir")
}
}
workDir := h.WorkDir
outDir := filepath.Join(workDir, releaseName, "output")
helmHookDir := filepath.Join(workDir, releaseName, "helmHooks")
chartsDir := filepath.Join(workDir, releaseName, "chartFiles")
dirs := []string{outDir, helmHookDir, chartsDir}
for _, d := range dirs {
err := os.MkdirAll(d, util.DefaultWritePermissions)
if err != nil {
return "", "", "", err
}
}
return outDir, helmHookDir, chartsDir, nil
}
// clearOutputDir removes all files in the helm output dir
func (h *HelmTemplate) clearOutputDir(releaseName string) error {
dir, helmDir, chartsDir, err := h.getDirectories(releaseName)
if err != nil {
return err
}
return util.RecreateDirs(dir, helmDir, chartsDir)
}
func (h *HelmTemplate) fetchChart(chart string, version string, dir string, repo string, username string,
password string) (string, error) {
exists, err := util.FileExists(chart)
if err != nil {
return "", err
}
if exists {
log.Logger().Infof("Chart dir already exists: %s", dir)
return chart, nil
}
if dir == "" {
return "", fmt.Errorf("must specify dir for chart %s", chart)
}
args := []string{
"fetch", "-d", dir, "--untar", chart,
}
if repo != "" {
args = append(args, "--repo", repo)
}
if version != "" {
args = append(args, "--version", version)
}
if username != "" {
args = append(args, "--username", username)
}
if password != "" {
args = append(args, "--password", password)
}
err = h.Client.runHelm(args...)
if err != nil {
return "", err
}
answer := dir
files, err := ioutil.ReadDir(dir)
if err != nil {
return "", err
}
for _, f := range files {
if f.IsDir() {
answer = filepath.Join(dir, f.Name())
break
}
}
log.Logger().Debugf("Fetched chart %s to dir %s", chart, answer)
return answer, nil
}
func (h *HelmTemplate) addLabelsToFiles(chart string, releaseName string, version string, metadata *chart.Metadata, ns string) ([]*HelmHook, error) {
dir, helmHookDir, _, err := h.getDirectories(releaseName)
if err != nil {
return nil, err
}
return addLabelsToChartYaml(dir, helmHookDir, chart, releaseName, version, metadata, ns)
}
func splitObjectsInFiles(inputFile string, baseDir string, relativePath, defaultNamespace string) ([]string, error) {
result := make([]string, 0)
f, err := os.Open(inputFile)
if err != nil {
return result, errors.Wrapf(err, "opening inputFile %q", inputFile)
}
defer f.Close()
scanner := bufio.NewScanner(f)
var buf bytes.Buffer
fileName := filepath.Base(inputFile)
count := 0
for scanner.Scan() {
line := scanner.Text()
if line == resourcesSeparator {
// ensure that we actually have YAML in the buffer
data := buf.Bytes()
if isWhitespaceOrComments(data) {
buf.Reset()
continue
}
m := yaml.MapSlice{}
err = yaml.Unmarshal(data, &m)
namespace := getYamlValueString(&m, "metadata", "namespace")
if namespace == "" {
namespace = defaultNamespace
}
if err != nil {
return make([]string, 0), errors.Wrapf(err, "Failed to parse the following YAML from inputFile '%s':\n%s", inputFile, buf.String())
}
if len(m) == 0 {
buf.Reset()
continue
}
partFile, err := writeObjectInFile(&buf, baseDir, relativePath, namespace, fileName, count)
if err != nil {
return result, errors.Wrapf(err, "saving object")
}
result = append(result, partFile)
buf.Reset()
count += count + 1
} else {
_, err := buf.WriteString(line)
if err != nil {
return result, errors.Wrapf(err, "writing line from inputFile %q into a buffer", inputFile)
}
_, err = buf.WriteString("\n")
if err != nil {
return result, errors.Wrapf(err, "writing a new line in the buffer")
}
}
}
if buf.Len() > 0 && !isWhitespaceOrComments(buf.Bytes()) {
data := buf.Bytes()
m := yaml.MapSlice{}
err = yaml.Unmarshal(data, &m)
namespace := getYamlValueString(&m, "metadata", "namespace")
if namespace == "" {
namespace = defaultNamespace
}
partFile, err := writeObjectInFile(&buf, baseDir, relativePath, namespace, fileName, count)
if err != nil {
return result, errors.Wrapf(err, "saving object")
}
result = append(result, partFile)
}
return result, nil
}
// isWhitespaceOrComments returns true if the data is empty, whitespace or comments only
func isWhitespaceOrComments(data []byte) bool {
if len(data) == 0 {
return true
}
lines := strings.Split(string(data), "\n")
for _, line := range lines {
t := strings.TrimSpace(line)
if t != "" && !strings.HasPrefix(t, "#") {
return false
}
}
return true
}
func writeObjectInFile(buf io.WriterTo, baseDir string, relativePath, namespace string, fileName string, count int) (string, error) {
relativeDir := filepath.Dir(relativePath)
const filePrefix = "part"
partFile := fmt.Sprintf("%s%d-%s", filePrefix, count, fileName)
absFile := filepath.Join(baseDir, "namespaces", namespace, relativeDir, partFile)
absFileDir := filepath.Dir(absFile)
log.Logger().Debugf("creating file: %s", absFile)
err := os.MkdirAll(absFileDir, os.ModePerm)
if err != nil {
return "", errors.Wrapf(err, "creating directory %q", absFileDir)
}
file, err := os.Create(absFile)
if err != nil {
return "", errors.Wrapf(err, "creating file %q", absFile)
}
log.Logger().Debugf("writing data to %s", absFile)
defer file.Close()
_, err = buf.WriteTo(file)
if err != nil {
return "", errors.Wrapf(err, "writing object to file %q", absFile)
}
return absFile, nil
}
func addLabelsToChartYaml(basedir string, hooksDir string, chart string, releaseName string, version string, metadata *chart.Metadata, ns string) ([]*HelmHook, error) {
helmHooks := []*HelmHook{}
log.Logger().Debugf("Searching for yaml files from basedir %s", basedir)
err := filepath.Walk(basedir, func(path string, f os.FileInfo, err error) error {
ext := filepath.Ext(path)
if ext == ".yaml" {
file := path
relativePath, err := filepath.Rel(basedir, file)
if err != nil {
return errors.Wrapf(err, "unable to determine relative path %q", file)
}
partFiles, err := splitObjectsInFiles(file, basedir, relativePath, ns)
if err != nil {
return errors.Wrapf(err, "splitting objects from file %q", file)
}
log.Logger().Debugf("part files list: %v", partFiles)
for _, partFile := range partFiles {
log.Logger().Debugf("processing part file: %s", partFile)
data, err := ioutil.ReadFile(partFile)
if err != nil {
return errors.Wrapf(err, "Failed to load partFile %s", partFile)
}
m := yaml.MapSlice{}
err = yaml.Unmarshal(data, &m)
if err != nil {
return errors.Wrapf(err, "Failed to parse YAML of partFile %s", partFile)
}
kind := getYamlValueString(&m, "kind")
helmHookType := getYamlValueString(&m, "metadata", "annotations", "helm.sh/hook")
if helmHookType != "" {
helmHook, err := getHelmHookFromFile(basedir, path, hooksDir, helmHookType, kind, &m, partFile)
if err != nil {
return errors.Wrap(err, fmt.Sprintf("when getting helm hook from part file '%s'", partFile))
}
helmHooks = append(helmHooks, helmHook)
} else {
err := processChartResource(partFile, data, kind, ns, releaseName, &m, metadata, version, chart)
if err != nil {
return errors.Wrap(err, fmt.Sprintf("when processing chart resource '%s'", partFile))
}
}
}
}
return nil
})
return helmHooks, err
}
func getHelmHookFromFile(basedir string, path string, hooksDir string, helmHook string, kind string, m *yaml.MapSlice, partFile string) (*HelmHook, error) {
// lets move any helm hooks to the new partFile
relPath, err := filepath.Rel(basedir, path)
if err != nil {
return &HelmHook{}, err
}
if relPath == "" {
return &HelmHook{}, fmt.Errorf("Failed to find relative path of basedir %s and path %s", basedir, partFile)
}
// add the hook type into the directory structure
newPath := filepath.Join(hooksDir, relPath)
newDir, _ := filepath.Split(newPath)
err = os.MkdirAll(newDir, util.DefaultWritePermissions)
if err != nil {
return &HelmHook{}, errors.Wrap(err, fmt.Sprintf("when creating '%s'", newDir))
}
// copy the hook part file to the hooks path
_, hookFileName := filepath.Split(partFile)
hookFile := filepath.Join(newDir, hookFileName)
err = os.Rename(partFile, hookFile)
if err != nil {
return &HelmHook{}, errors.Wrap(err, fmt.Sprintf("when copying from '%s' to '%s'", partFile, hookFile))
}
name := getYamlValueString(m, "metadata", "name")
helmDeletePolicy := getYamlValueString(m, "metadata", "annotations", "helm.sh/hook-delete-policy")
return NewHelmHook(kind, name, hookFile, helmHook, helmDeletePolicy), nil
}
func processChartResource(partFile string, data []byte, kind string, ns string, releaseName string, m *yaml.MapSlice, metadata *chart.Metadata, version string, chart string) error {
err := setYamlValue(m, releaseName, "metadata", "labels", LabelReleaseName)
if err != nil {
return errors.Wrapf(err, "Failed to modify YAML of partFile %s", partFile)
}
if !isClusterKind(kind) {
err = setYamlValue(m, ns, "metadata", "labels", LabelNamespace)
if err != nil {
return errors.Wrapf(err, "Failed to modify YAML of partFile %s", partFile)
}
}
err = setYamlValue(m, version, "metadata", "labels", LabelReleaseChartVersion)
if err != nil {
return errors.Wrapf(err, "Failed to modify YAML of partFile %s", partFile)
}
chartName := ""
if metadata != nil {
chartName = metadata.GetName()
appVersion := metadata.GetAppVersion()
if appVersion != "" {
err = setYamlValue(m, appVersion, "metadata", "annotations", AnnotationAppVersion)
if err != nil {
return errors.Wrapf(err, "Failed to modify YAML of partFile %s", partFile)
}
}
}
if chartName == "" {
chartName = chart
}
err = setYamlValue(m, chartName, "metadata", "annotations", AnnotationChartName)
if err != nil {
return errors.Wrapf(err, "Failed to modify YAML of partFile %s", partFile)
}
data, err = yaml.Marshal(m)
if err != nil {
return errors.Wrapf(err, "Failed to marshal YAML of partFile %s", partFile)
}
err = ioutil.WriteFile(partFile, data, util.DefaultWritePermissions)
if err != nil {
return errors.Wrapf(err, "Failed to write YAML partFile %s", partFile)
}
return nil
}
func getYamlValueString(mapSlice *yaml.MapSlice, keys ...string) string {
value := getYamlValue(mapSlice, keys...)
answer, ok := value.(string)
if ok {
return answer
}
return ""
}
func getYamlValue(mapSlice *yaml.MapSlice, keys ...string) interface{} {
if mapSlice == nil {
return nil
}
if mapSlice == nil {
return fmt.Errorf("No map input!")
}
m := mapSlice
lastIdx := len(keys) - 1
for idx, k := range keys {
last := idx >= lastIdx
found := false
for _, mi := range *m {
if mi.Key == k {
found = true
if last {
return mi.Value
} else {
value := mi.Value
if value == nil {
return nil
} else {
v, ok := value.(yaml.MapSlice)
if ok {
m = &v
} else {
v2, ok := value.(*yaml.MapSlice)
if ok {
m = v2
} else {
return nil
}
}
}
}
}
}
if !found {
return nil
}
}
return nil
}
// setYamlValue navigates through the YAML object structure lazily creating or inserting new values
func setYamlValue(mapSlice *yaml.MapSlice, value string, keys ...string) error {
if mapSlice == nil {
return fmt.Errorf("No map input!")
}
m := mapSlice
lastIdx := len(keys) - 1
for idx, k := range keys {
last := idx >= lastIdx
found := false
for i, mi := range *m {
if mi.Key == k {
found = true
if last {
(*m)[i].Value = value
} else if i < len(*m) {
value := (*m)[i].Value
if value == nil {
v := &yaml.MapSlice{}
(*m)[i].Value = v
m = v
} else {
v, ok := value.(yaml.MapSlice)
if ok {
m2 := &yaml.MapSlice{}
*m2 = append(*m2, v...)
(*m)[i].Value = m2
m = m2
} else {
v2, ok := value.(*yaml.MapSlice)
if ok {
m2 := &yaml.MapSlice{}
*m2 = append(*m2, *v2...)
(*m)[i].Value = m2
m = m2
} else {
return fmt.Errorf("Could not convert key %s value %#v to a yaml.MapSlice", k, value)
}
}
}
}
}
}
if !found {
if last {
*m = append(*m, yaml.MapItem{
Key: k,
Value: value,
})
} else {
m2 := &yaml.MapSlice{}
*m = append(*m, yaml.MapItem{
Key: k,
Value: m2,
})
m = m2
}
}
}
return nil
}
func (h *HelmTemplate) runKubectl(args ...string) error {
h.Runner.SetDir(h.CWD)
h.Runner.SetName(h.Binary)
h.Runner.SetArgs(args)
output, err := h.Runner.RunWithoutRetry()
log.Logger().Debugf(output)
return err
}
func (h *HelmTemplate) runKubectlWithOutput(args ...string) (string, error) {
h.Runner.SetDir(h.CWD)
h.Runner.SetName(h.Binary)
h.Runner.SetArgs(args)
return h.Runner.RunWithoutRetry()
}
// getChartNameAndVersion returns the chart name and version for the current chart folder
func (h *HelmTemplate) getChartNameAndVersion(chartDir string, version *string) (string, string, error) {
versionText := ""
if version != nil && *version != "" {
versionText = *version
}
file := filepath.Join(chartDir, ChartFileName)
if !filepath.IsAbs(chartDir) {
file = filepath.Join(h.Runner.CurrentDir(), file)
}
exists, err := util.FileExists(file)
if err != nil {
return "", versionText, err
}
if !exists {
return "", versionText, fmt.Errorf("No file %s found!", file)
}
chartName, versionText, err := LoadChartNameAndVersion(file)
return chartName, versionText, err
}
// getChart returns the chart metadata for the given dir
func (h *HelmTemplate) getChart(chartDir string, version string) (*chart.Metadata, string, error) {
file := filepath.Join(chartDir, ChartFileName)
if !filepath.IsAbs(chartDir) {
file = filepath.Join(h.Runner.CurrentDir(), file)
}
exists, err := util.FileExists(file)
if err != nil {
return nil, version, err
}
if !exists {
return nil, version, fmt.Errorf("no file %s found!", file)
}
metadata, err := chartutil.LoadChartfile(file)
if version == "" && metadata != nil {
version = metadata.GetVersion()
}
return metadata, version, err
}
func (h *HelmTemplate) runHooks(hooks []*HelmHook, hookPhase string, ns string, chart string, releaseName string, wait bool, create bool, force bool) error {
matchingHooks := MatchingHooks(hooks, hookPhase, "")
for _, hook := range matchingHooks {
err := h.kubectlApplyFile(ns, hookPhase, wait, create, force, hook.File)
if err != nil {
return err
}
}
return nil
}
func (h *HelmTemplate) deleteHooks(hooks []*HelmHook, hookPhase string, hookDeletePolicy string, ns string) error {
flag := os.Getenv("JX_DISABLE_DELETE_HELM_HOOKS")
matchingHooks := MatchingHooks(hooks, hookPhase, hookDeletePolicy)
for _, hook := range matchingHooks {
kind := hook.Kind
name := hook.Name
if kind == "Job" && name != "" {
log.Logger().Debugf("Waiting for helm %s hook Job %s to complete before removing it", hookPhase, name)
err := kube.WaitForJobToComplete(h.KubeClient, ns, name, time.Minute*30, false)
if err != nil {
log.Logger().Warnf("Job %s has not yet terminated for helm hook phase %s due to: %s so removing it anyway", name, hookPhase, err)
}
} else {
log.Logger().Warnf("Could not wait for hook resource to complete as it is kind %s and name %s for phase %s", kind, name, hookPhase)
}
if flag == "true" {
log.Logger().Infof("Not deleting the Job %s as we have the $JX_DISABLE_DELETE_HELM_HOOKS enabled", name)
continue
}
err := h.kubectlDeleteFile(ns, hook.File)
if err != nil {
return err
}
}
return nil
}
// NewHelmHook returns a newly created HelmHook
func NewHelmHook(kind string, name string, file string, hook string, hookDeletePolicy string) *HelmHook {
return &HelmHook{
Kind: kind,
Name: name,
File: file,
Hooks: strings.Split(hook, ","),
HookDeletePolicies: strings.Split(hookDeletePolicy, ","),
}
}
// MatchingHooks returns the matching files which have the given hook name and if hookPolicy is not blank the hook policy too
func MatchingHooks(hooks []*HelmHook, hook string, hookDeletePolicy string) []*HelmHook {
answer := []*HelmHook{}
for _, h := range hooks {
if util.StringArrayIndex(h.Hooks, hook) >= 0 &&
(hookDeletePolicy == "" || util.StringArrayIndex(h.HookDeletePolicies, hookDeletePolicy) >= 0) {
answer = append(answer, h)
}
}
return answer
}
| [
"\"JX_DISABLE_DELETE_HELM_HOOKS\""
]
| []
| [
"JX_DISABLE_DELETE_HELM_HOOKS"
]
| [] | ["JX_DISABLE_DELETE_HELM_HOOKS"] | go | 1 | 0 | |
log_debug.go | //go:build debug
// +build debug
package amqp
import "log"
import "os"
import "strconv"
var (
debugLevel = 1
logger = log.New(os.Stderr, "", log.Lmicroseconds)
)
func init() {
level, err := strconv.Atoi(os.Getenv("DEBUG_LEVEL"))
if err != nil {
return
}
debugLevel = level
}
func debug(level int, format string, v ...interface{}) {
if level <= debugLevel {
logger.Printf(format, v...)
}
}
| [
"\"DEBUG_LEVEL\""
]
| []
| [
"DEBUG_LEVEL"
]
| [] | ["DEBUG_LEVEL"] | go | 1 | 0 | |
src/pretalx/settings.py | import os
import sys
from contextlib import suppress
from pathlib import Path
from urllib.parse import urlparse
from django.contrib.messages import constants as messages
from django.utils.crypto import get_random_string
from django.utils.translation import gettext_lazy as _
from pkg_resources import iter_entry_points
from pretalx import __version__
from pretalx.common.settings.config import build_config
from pretalx.common.settings.utils import log_initial
config, CONFIG_FILES = build_config()
CONFIG = config
##
# This settings file is rather lengthy. It follows this structure:
# Directories, Apps, Url, Security, Databases, Logging, Email, Caching (and Sessions)
# I18n, Auth, Middleware, Templates and Staticfiles, External Apps
#
# Search for "## {AREA} SETTINGS" to navigate this file
##
DEBUG = config.getboolean("site", "debug")
## DIRECTORY SETTINGS
BASE_DIR = Path(config.get("filesystem", "base"))
DATA_DIR = Path(
config.get(
"filesystem",
"data",
fallback=os.environ.get("PRETALX_DATA_DIR", BASE_DIR / "data"),
)
)
LOG_DIR = Path(config.get("filesystem", "logs", fallback=DATA_DIR / "logs"))
MEDIA_ROOT = Path(config.get("filesystem", "media", fallback=DATA_DIR / "media"))
STATIC_ROOT = Path(
config.get(
"filesystem",
"static",
fallback=BASE_DIR / "static.dist",
)
)
HTMLEXPORT_ROOT = Path(
config.get(
"filesystem",
"htmlexport",
fallback=DATA_DIR / "htmlexport",
)
)
for directory in (BASE_DIR, DATA_DIR, LOG_DIR, MEDIA_ROOT, HTMLEXPORT_ROOT):
directory.mkdir(parents=True, exist_ok=True)
## APP SETTINGS
DJANGO_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
]
EXTERNAL_APPS = [
"compressor",
"djangoformsetjs",
"django_filters",
"jquery",
"rest_framework.authtoken",
"rules",
]
LOCAL_APPS = [
"pretalx.api",
"pretalx.common",
"pretalx.event",
"pretalx.mail",
"pretalx.person",
"pretalx.schedule",
"pretalx.submission",
"pretalx.agenda",
"pretalx.cfp",
"pretalx.orga",
]
FALLBACK_APPS = [
"bootstrap4",
"django.forms",
"rest_framework",
]
INSTALLED_APPS = DJANGO_APPS + EXTERNAL_APPS + LOCAL_APPS + FALLBACK_APPS
PLUGINS = []
for entry_point in iter_entry_points(group="pretalx.plugin", name=None):
PLUGINS.append(entry_point.module_name)
INSTALLED_APPS.append(entry_point.module_name)
CORE_MODULES = LOCAL_APPS + [
module for module in config.get("site", "core_modules").split(",") if module
]
## PLUGIN SETTINGS
PLUGIN_SETTINGS = {}
for section in config.sections():
if section.startswith("plugin:"):
PLUGIN_SETTINGS[section[len("plugin:") :]] = dict(config.items(section))
## URL SETTINGS
SITE_URL = config.get("site", "url", fallback="http://localhost")
SITE_NETLOC = urlparse(SITE_URL).netloc
ALLOWED_HOSTS = [
"*"
] # We have our own security middleware to allow for custom event URLs
ROOT_URLCONF = "pretalx.urls"
STATIC_URL = config.get("site", "static")
MEDIA_URL = config.get("site", "media")
FILE_UPLOAD_DIRECTORY_PERMISSIONS = 0o755
FILE_UPLOAD_DEFAULT_LIMIT = 10 * 1024 * 1024
IMAGE_DEFAULT_MAX_WIDTH = 1920
IMAGE_DEFAULT_MAX_HEIGHT = 1080
## SECURITY SETTINGS
SECURE_BROWSER_XSS_FILTER = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_REFERRER_POLICY = "strict-origin-when-cross-origin"
CSP_DEFAULT_SRC = "'self'"
CSP_SCRIPT_SRC = ("'self'",)
CSP_STYLE_SRC = ("'self'", "'unsafe-inline'")
CSP_IMG_SRC = ("'self'", "data:")
CSP_BASE_URI = "'none'"
CSP_FORM_ACTION = "'self'"
CSRF_COOKIE_NAME = "pretalx_csrftoken"
CSRF_TRUSTED_ORIGINS = [urlparse(SITE_URL).hostname]
SESSION_COOKIE_NAME = "pretalx_session"
SESSION_COOKIE_HTTPONLY = True
if config.get("site", "cookie_domain"):
SESSION_COOKIE_DOMAIN = CSRF_COOKIE_DOMAIN = config.get("site", "cookie_domain")
SESSION_COOKIE_SECURE = config.getboolean(
"site", "https", fallback=SITE_URL.startswith("https:")
)
if config.has_option("site", "secret"):
SECRET_KEY = config.get("site", "secret")
else:
SECRET_FILE = DATA_DIR / ".secret"
if SECRET_FILE.exists():
with SECRET_FILE.open() as f:
SECRET_KEY = f.read().strip()
else:
chars = "abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)"
SECRET_KEY = get_random_string(50, chars)
with SECRET_FILE.open(mode="w") as f:
SECRET_FILE.chmod(0o600)
with suppress(Exception): # chown is not available on all platforms
os.chown(SECRET_FILE, os.getuid(), os.getgid())
f.write(SECRET_KEY)
## TASK RUNNER SETTINGS
HAS_CELERY = bool(config.get("celery", "broker", fallback=None))
if HAS_CELERY:
CELERY_BROKER_URL = config.get("celery", "broker")
CELERY_RESULT_BACKEND = config.get("celery", "backend")
else:
CELERY_TASK_ALWAYS_EAGER = True
## DATABASE SETTINGS
db_backend = config.get("database", "backend")
db_name = config.get("database", "name", fallback=str(DATA_DIR / "db.sqlite3"))
if db_backend == "mysql":
db_opts = {
"charset": "utf8mb4",
"use_unicode": True,
"init_command": "SET character_set_connection=utf8mb4,collation_connection=utf8mb4_unicode_ci;",
}
else:
db_opts = {}
DATABASES = {
"default": {
"ENGINE": "django.db.backends." + db_backend,
"NAME": db_name,
"USER": config.get("database", "user"),
"PASSWORD": config.get("database", "password"),
"HOST": config.get("database", "host"),
"PORT": config.get("database", "port"),
"CONN_MAX_AGE": 0 if db_backend == "sqlite3" or HAS_CELERY else 120,
"OPTIONS": db_opts,
"TEST": {
"CHARSET": "utf8mb4",
"COLLATION": "utf8mb4_unicode_ci",
}
if "mysql" in db_backend
else {},
}
}
## LOGGING SETTINGS
loglevel = "DEBUG" if DEBUG else "INFO"
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"default": {
"format": "%(levelname)s %(asctime)s %(name)s %(module)s %(message)s"
}
},
"handlers": {
"console": {
"level": loglevel,
"class": "logging.StreamHandler",
"formatter": "default",
},
"file": {
"level": loglevel,
"class": "logging.FileHandler",
"filename": LOG_DIR / "pretalx.log",
"formatter": "default",
},
"null": {
"class": "logging.NullHandler",
},
},
"loggers": {
"": {"handlers": ["file", "console"], "level": loglevel, "propagate": True},
"django.request": {
"handlers": ["file", "console"],
"level": loglevel,
"propagate": False,
},
"django.security": {
"handlers": ["file", "console"],
"level": loglevel,
"propagate": True,
},
"django.security.DisallowedHost": {
"handlers": ["null"],
"propagate": False,
},
"django.db.backends": {
"handlers": ["file", "console"],
"level": "INFO", # Do not output all the queries
"propagate": True,
},
},
}
email_level = config.get("logging", "email_level", fallback="ERROR") or "ERROR"
emails = config.get("logging", "email", fallback="").split(",")
DEFAULT_EXCEPTION_REPORTER = "pretalx.common.exceptions.PretalxExceptionReporter"
MANAGERS = ADMINS = [(email, email) for email in emails if email]
if ADMINS:
LOGGING["handlers"]["mail_admins"] = {
"level": email_level,
"class": "pretalx.common.exceptions.PretalxAdminEmailHandler",
}
LOGGING["loggers"]["django.request"]["handlers"].append("mail_admins")
## EMAIL SETTINGS
MAIL_FROM = SERVER_EMAIL = DEFAULT_FROM_EMAIL = config.get("mail", "from")
if DEBUG:
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
else:
EMAIL_HOST = config.get("mail", "host")
EMAIL_PORT = config.get("mail", "port")
EMAIL_HOST_USER = config.get("mail", "user")
EMAIL_HOST_PASSWORD = config.get("mail", "password")
EMAIL_USE_TLS = config.getboolean("mail", "tls")
EMAIL_USE_SSL = config.getboolean("mail", "ssl")
## CACHE SETTINGS
CACHES = {"default": {"BACKEND": "django.core.cache.backends.dummy.DummyCache"}}
REAL_CACHE_USED = False
SESSION_ENGINE = None
HAS_MEMCACHED = bool(os.getenv("PRETALX_MEMCACHE", ""))
if HAS_MEMCACHED:
REAL_CACHE_USED = True
CACHES["default"] = {
"BACKEND": "django.core.cache.backends.memcached.PyLibMCCache",
"LOCATION": os.getenv("PRETALX_MEMCACHE"),
}
HAS_REDIS = config.get("redis", "location") != "False"
if HAS_REDIS:
CACHES["redis"] = {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": config.get("redis", "location"),
"OPTIONS": {"CLIENT_CLASS": "django_redis.client.DefaultClient"},
}
CACHES["redis_sessions"] = {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": config.get("redis", "location"),
"TIMEOUT": 3600 * 24 * 30,
"OPTIONS": {"CLIENT_CLASS": "django_redis.client.DefaultClient"},
}
if not HAS_MEMCACHED:
CACHES["default"] = CACHES["redis"]
REAL_CACHE_USED = True
if config.getboolean("redis", "session"):
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
SESSION_CACHE_ALIAS = "redis_sessions"
if not SESSION_ENGINE:
if REAL_CACHE_USED:
SESSION_ENGINE = "django.contrib.sessions.backends.cached_db"
else:
SESSION_ENGINE = "django.contrib.sessions.backends.db"
MESSAGE_STORAGE = "django.contrib.messages.storage.session.SessionStorage"
MESSAGE_TAGS = {
messages.INFO: "info",
messages.ERROR: "danger",
messages.WARNING: "warning",
messages.SUCCESS: "success",
}
## I18N SETTINGS
USE_I18N = True
USE_L10N = True
USE_TZ = True
TIME_ZONE = config.get("locale", "time_zone")
LOCALE_PATHS = (Path(__file__).resolve().parent / "locale",)
FORMAT_MODULE_PATH = ["pretalx.common.formats"]
LANGUAGE_CODE = config.get("locale", "language_code")
LANGUAGES_INFORMATION = {
"en": {
"name": _("English"),
"natural_name": "English",
"official": True,
"percentage": 100,
},
"de": {
"name": _("German"),
"natural_name": "Deutsch",
"official": True,
"percentage": 100,
},
"de-formal": {
"name": _("German (formal)"),
"natural_name": "Deutsch",
"official": True,
"percentage": 100,
"public_code": "de",
},
"fr": {
"name": _("French"),
"natural_name": "Français",
"official": False,
"percentage": 93,
},
"zh-tw": {
"name": _("Traditional Chinese (Taiwan)"),
"natural_name": "Traditional Chinese (Taiwan)",
"official": False,
"percentage": 73,
},
"ja-JP": {
"name": _("Japanese"),
"natural_name": "Japanese",
"official": False,
"percentage": 96,
"public_code": "jp",
},
}
for section in config.sections():
# Plugins can add languages, which will not be visible
# without the providing plugin being activated
if section.startswith("language:"):
language_code = section[len("language:") :]
LANGUAGES_INFORMATION[language_code] = {
"name": config.get(section, "name"),
"public_code": config.get(section, "public_code", fallback=None)
or language_code,
"natural_name": config.get(section, "name"),
"visible": False,
"official": False,
"percentage": None,
}
for code, language in LANGUAGES_INFORMATION.items():
language["code"] = code
LANGUAGES = [
(language["code"], language["name"]) for language in LANGUAGES_INFORMATION.values()
]
## AUTHENTICATION SETTINGS
AUTH_USER_MODEL = "person.User"
LOGIN_URL = "/orga/login"
AUTHENTICATION_BACKENDS = (
"rules.permissions.ObjectPermissionBackend",
"django.contrib.auth.backends.ModelBackend",
"pretalx.common.auth.AuthenticationTokenBackend",
)
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
## MIDDLEWARE SETTINGS
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware", # Security first
"whitenoise.middleware.WhiteNoiseMiddleware", # Next up: static files
"django.middleware.common.CommonMiddleware", # Set some sensible defaults, now, before responses are modified
"pretalx.common.middleware.SessionMiddleware", # Add session handling
"django.contrib.auth.middleware.AuthenticationMiddleware", # Uses sessions
"pretalx.common.auth.AuthenticationTokenMiddleware", # Make auth tokens work
"pretalx.common.middleware.MultiDomainMiddleware", # Check which host is used and if it is valid
"pretalx.common.middleware.EventPermissionMiddleware", # Sets locales, request.event, available events, etc.
"pretalx.common.middleware.CsrfViewMiddleware", # Protect against CSRF attacks before forms/data are processed
"django.contrib.messages.middleware.MessageMiddleware", # Uses sessions
"django.middleware.clickjacking.XFrameOptionsMiddleware", # Protects against clickjacking
"csp.middleware.CSPMiddleware", # Modifies/sets CSP headers
]
## TEMPLATE AND STATICFILES SETTINGS
template_loaders = (
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
)
if not DEBUG:
template_loaders = (("django.template.loaders.cached.Loader", template_loaders),)
FORM_RENDERER = "django.forms.renderers.TemplatesSetting"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [
DATA_DIR / "templates",
BASE_DIR / "templates",
],
"OPTIONS": {
"context_processors": [
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.request",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"pretalx.agenda.context_processors.is_html_export",
"pretalx.common.context_processors.add_events",
"pretalx.common.context_processors.locale_context",
"pretalx.common.context_processors.messages",
"pretalx.common.context_processors.system_information",
"pretalx.orga.context_processors.orga_events",
],
"loaders": template_loaders,
},
}
]
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
"compressor.finders.CompressorFinder",
)
static_path = BASE_DIR / "pretalx" / "static"
STATICFILES_DIRS = [static_path] if static_path.exists() else []
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
## EXTERNAL APP SETTINGS
with suppress(ImportError):
import django_extensions # noqa
INSTALLED_APPS.append("django_extensions")
if DEBUG:
with suppress(ImportError):
from debug_toolbar import settings as toolbar_settings # noqa
INTERNAL_IPS = ["127.0.0.1", "0.0.0.0", "::1"]
INSTALLED_APPS.append("debug_toolbar")
MIDDLEWARE.append("debug_toolbar.middleware.DebugToolbarMiddleware")
DEBUG_TOOLBAR_PATCH_SETTINGS = False
DEBUG_TOOLBAR_CONFIG = {
"JQUERY_URL": "",
"DISABLE_PANELS": toolbar_settings.PANELS_DEFAULTS,
}
BOOTSTRAP4 = {
"field_renderers": {
"default": "bootstrap4.renderers.FieldRenderer",
"inline": "bootstrap4.renderers.InlineFieldRenderer",
"event": "pretalx.common.forms.renderers.EventFieldRenderer",
"event-inline": "pretalx.common.forms.renderers.EventInlineFieldRenderer",
}
}
COMPRESS_ENABLED = COMPRESS_OFFLINE = not DEBUG
COMPRESS_PRECOMPILERS = (("text/x-scss", "django_libsass.SassCompiler"),)
COMPRESS_CSS_FILTERS = (
# CssAbsoluteFilter is incredibly slow, especially when dealing with our _flags.scss
# However, we don't need it if we consequently use the static() function in Sass
# 'compressor.filters.css_default.CssAbsoluteFilter',
"compressor.filters.cssmin.CSSCompressorFilter",
)
REST_FRAMEWORK = {
"DEFAULT_RENDERER_CLASSES": (
"i18nfield.rest_framework.I18nJSONRenderer",
"rest_framework.renderers.BrowsableAPIRenderer",
),
"DEFAULT_AUTHENTICATION_CLASSES": (
"rest_framework.authentication.TokenAuthentication",
"rest_framework.authentication.SessionAuthentication",
),
"DEFAULT_PERMISSION_CLASSES": ("pretalx.api.permissions.ApiPermission",),
"DEFAULT_FILTER_BACKENDS": (
"rest_framework.filters.SearchFilter",
"django_filters.rest_framework.DjangoFilterBackend",
),
"DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.LimitOffsetPagination",
"PAGE_SIZE": 25,
"SEARCH_PARAM": "q",
"ORDERING_PARAM": "o",
"VERSIONING_PARAM": "v",
"DATETIME_FORMAT": "iso-8601",
}
if DEBUG:
REST_FRAMEWORK["COMPACT_JSON"] = False
WSGI_APPLICATION = "pretalx.wsgi.application"
PRETALX_VERSION = __version__
if DEBUG:
with suppress(Exception):
import subprocess
PRETALX_VERSION = (
subprocess.check_output(["/usr/bin/git", "describe", "--always"])
.decode()
.strip()
)
with suppress(ImportError):
from .override_settings import * # noqa
if "--no-pretalx-information" in sys.argv:
sys.argv.remove("--no-pretalx-information")
else:
log_initial(
debug=DEBUG,
config_files=CONFIG_FILES,
db_name=db_name,
db_backend=db_backend,
LOG_DIR=LOG_DIR,
plugins=PLUGINS,
)
| []
| []
| [
"PRETALX_MEMCACHE",
"PRETALX_DATA_DIR"
]
| [] | ["PRETALX_MEMCACHE", "PRETALX_DATA_DIR"] | python | 2 | 0 | |
LambdaHandler.go | // (C) Copyright 2019 Kira Systems
//
// This file contains the necessary code for an AWS lambda function
// that, when run, inspects a designated security group and deletes
// from it rules that
//
// a) have timestamps at the end of their description text that are
// older than the expiry duration relative to the point at which the
// code is invoked; or
//
// b) have missing or invalid timestamps in their descriptions.
//
// The security group is designated via the `SG_ID` environment
// variable, and the expiry duration is similarly supplied via the
// `EXPIRY` environment variable. The syntax for the duration is
// specified in the Go language's time.Duration documentation, but
// obvious permutations of things like "15m" and "1h10m30s" ought to
// work. The environment variable for the actual Lambda function are
// specified in the CloudFormation template.
//
package main
import (
"log"
"os"
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-lambda-go/lambda"
)
// The usual main function. Presently it has a simple check to see if
// it's being run from the commandline for debugging --- to set it in
// that mode, supply any argument on the commandline (its presence is
// what is checked-for, not its value).
func main() {
if len(os.Args) == 1 {
lambda.Start(LambdaHandler)
} else {
RunFromCommand()
}
}
// Primary function to set this code up to run as a Lambda function in
// AWS. Reads in the parameters from environment variables, creates
// and AWS API session from environment-attached credentials, and then
// calls the main logic.
func LambdaHandler() error {
sg_id := os.Getenv("SG_ID")
log.Printf("Starting cleanup for group %s", sg_id)
expiry := os.Getenv("EXPIRY")
duration, err := time.ParseDuration(expiry)
if err != nil {
log.Printf("Expiry string %s not a valid duration (see Golang time.Duration docs)", expiry)
return nil
}
svc := ec2.New(session.Must(session.NewSession()))
err = cleanSecurityGroup(svc, aws.String(sg_id), duration)
log.Printf("Finished cleanup for group %s", sg_id)
return err
}
// Secondary function to allow this code to be called on the
// commandline. Security group, expiry, region, and AWS credential
// profile are all presently hardcoded.
func RunFromCommand() {
sg_id := "sg-00112233445566778" // replace these...
expiry := "1m"
profile := "default"
region := "ca-central-1"
log.Printf("[Command Line Invocation] Starting cleanup for group %s", sg_id)
duration, err := time.ParseDuration(expiry)
if err != nil {
log.Printf("Expiry string %s not a valid duration (see Golang time.Duration docs)", expiry)
return
}
sess := session.Must(session.NewSession(&aws.Config{
Region: aws.String(region),
Credentials: credentials.NewSharedCredentials("", profile),
}))
svc := ec2.New(sess)
cleanSecurityGroup(svc, aws.String(sg_id), duration)
log.Printf("Finished cleanup for group %s", sg_id)
return
}
// The main logic that we want to run. This uses the AWS EC2 API to
// read in the details of the given security group, build a list of
// inbound rules that have expired, then delete those rules.
func cleanSecurityGroup(svc *ec2.EC2, sg_id *string, duration time.Duration) error {
// create parameter structure for the API call to restrict the
// result to only the security group that we're interested in
fetchParams := &ec2.DescribeSecurityGroupsInput{
GroupIds: []*string{sg_id},
}
result, err := svc.DescribeSecurityGroups(fetchParams)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case "InvalidGroupId.Malformed":
log.Printf("Security Group id %s is malformed: %s.", sg_id, aerr.Message())
case "InvalidGroup.NotFound":
log.Printf("Security Group id %s not found: %s.", sg_id, aerr.Message())
}
}
log.Printf("Unable to get description for security group %s, %v", sg_id, err)
return err
}
// Iterate through the inbound rules (the bare `IpPermissions`
// structure) of the first (and only) security group returned
deletable := []*ec2.IpPermission{}
for _, ipPerm := range result.SecurityGroups[0].IpPermissions {
// SG Rules are aggregated by port/protocol, with potentially multiple CIDR blocks associated as "child" structures
for _, ipRange := range ipPerm.IpRanges {
// guard checkExpired() from nil strings; lack of description is considered expired
if ipRange.Description == nil || checkExpired(*ipRange.Description, duration) {
if ipRange.Description == nil {
log.Printf("Will delete rule for %d/%s (no description)", *ipPerm.FromPort, *ipPerm.IpProtocol)
} else {
log.Printf("Will delete rule for %d/%s (%s)", *ipPerm.FromPort, *ipPerm.IpProtocol, *ipRange.Description)
}
deletable = append(deletable, &ec2.IpPermission{
FromPort: ipPerm.FromPort,
ToPort: ipPerm.ToPort,
IpProtocol: ipPerm.IpProtocol,
IpRanges: []*ec2.IpRange{ipRange},
})
}
}
}
// If there are things to delete, actually do so
if len(deletable) > 0 {
revokeInput := &ec2.RevokeSecurityGroupIngressInput{
GroupId: sg_id,
IpPermissions: deletable,
}
_, err := svc.RevokeSecurityGroupIngress(revokeInput)
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
switch aerr.Code() {
case "InvalidPermission.NotFound":
log.Printf("One or more of the rules targeted for deletion in %s was not found: %s.", sg_id, aerr.Message())
case "InvalidGroupId.Malformed":
log.Printf("Security Group id %s is malformed: %s.", sg_id, aerr.Message())
case "InvalidGroup.NotFound":
log.Printf("Security Group id %s not found: %s.", sg_id, aerr.Message())
}
}
log.Printf("Something failed revoking security group rules in %s, %v", sg_id, err)
return err
}
}
return nil
}
// Check the description string from a security group to see if the
// timestamp is expired; also considers invalid-format timestamps are
// also considered expired. Returns true when expired.
//
// The expected format is simply some text followed by a space then a
// RFC3339-format timestamp. For example,
// jcoleman 2019-03-25T19:26:09Z
// would be valid, but just
// 2019-03-25T19:26:09Z
// would not.
func checkExpired(desc string, duration time.Duration) bool {
if desc == "" {
return true
}
splits := strings.Split(desc, " ")
desc_ts := splits[len(splits)-1]
ts, err := time.Parse(time.RFC3339, desc_ts)
if err != nil {
// If the last element of the rule description is malformed, just delete the rule anyway
log.Printf("Malformed time \"%s\" in rule, considering it expired.", desc)
return true
}
return ts.Add(duration).Before(time.Now().UTC())
}
| [
"\"SG_ID\"",
"\"EXPIRY\""
]
| []
| [
"EXPIRY",
"SG_ID"
]
| [] | ["EXPIRY", "SG_ID"] | go | 2 | 0 | |
util/misc.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Misc functions, including distributed helpers.
Mostly copy-paste from torchvision references.
"""
import os
import subprocess
import time
from collections import defaultdict, deque
import datetime
import pickle
from typing import Optional, List
import torch
import torch.distributed as dist
from torch import Tensor
# needed due to empty tensor bug in pytorch and torchvision 0.5
import torchvision
if float(torchvision.__version__[:3]) < 0.7:
from torchvision.ops import _new_empty_tensor
from torchvision.ops.misc import _output_size
import wandb
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
# @property
# def measurable(self):
# return self.count > 0
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.tensor([tensor.numel()], device="cuda")
size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda"))
if local_size != max_size:
padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that all processes
have the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.all_reduce(values)
if average:
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
class MetricLogger(object):
def __init__(self, prefix, epoch, num_batches, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
self.prefix = prefix
self.epoch = epoch
self.num_batches = num_batches
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
if torch.cuda.is_available():
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}',
'max mem: {memory:.0f}'
])
else:
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
])
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
# Log every step to wandb
stats = {k: meter.global_avg for k, meter in self.meters.items()}
log_stats = {**{f'{self.prefix}_{k}': v for k, v in stats.items()},
'epoch': self.epoch,
'batch_step': i,
'step': self.epoch * self.num_batches + i
}
wandb.log(log_stats)
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
def get_sha():
cwd = os.path.dirname(os.path.abspath(__file__))
def _run(command):
return subprocess.check_output(command, cwd=cwd).decode('ascii').strip()
sha = 'N/A'
diff = "clean"
branch = 'N/A'
try:
sha = _run(['git', 'rev-parse', 'HEAD'])
subprocess.check_output(['git', 'diff'], cwd=cwd)
diff = _run(['git', 'diff-index', 'HEAD'])
diff = "has uncommited changes" if diff else "clean"
branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
except Exception:
pass
message = f"sha: {sha}, status: {diff}, branch: {branch}"
return message
def collate_fn(batch):
batch = list(zip(*batch))
batch[0] = nested_tensor_from_tensor_list(batch[0])
return tuple(batch)
def _max_by_axis(the_list):
# type: (List[List[int]]) -> List[int]
maxes = the_list[0]
for sublist in the_list[1:]:
for index, item in enumerate(sublist):
maxes[index] = max(maxes[index], item)
return maxes
class NestedTensor(object):
def __init__(self, tensors, mask: Optional[Tensor]):
self.tensors = tensors
self.mask = mask
def to(self, device):
# type: (Device) -> NestedTensor # noqa
cast_tensor = self.tensors.to(device)
mask = self.mask
if mask is not None:
assert mask is not None
cast_mask = mask.to(device)
else:
cast_mask = None
return NestedTensor(cast_tensor, cast_mask)
def decompose(self):
return self.tensors, self.mask
def __repr__(self):
return str(self.tensors)
def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
# TODO make this more general
if tensor_list[0].ndim == 3:
if torchvision._is_tracing():
# nested_tensor_from_tensor_list() does not export well to ONNX
# call _onnx_nested_tensor_from_tensor_list() instead
return _onnx_nested_tensor_from_tensor_list(tensor_list)
# TODO make it support different-sized images
max_size = _max_by_axis([list(img.shape) for img in tensor_list])
# min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
batch_shape = [len(tensor_list)] + max_size
b, c, h, w = batch_shape
dtype = tensor_list[0].dtype
device = tensor_list[0].device
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
mask = torch.ones((b, h, w), dtype=torch.bool, device=device)
for img, pad_img, m in zip(tensor_list, tensor, mask):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
m[: img.shape[1], :img.shape[2]] = False
else:
raise ValueError('not supported')
return NestedTensor(tensor, mask)
# _onnx_nested_tensor_from_tensor_list() is an implementation of
# nested_tensor_from_tensor_list() that is supported by ONNX tracing.
@torch.jit.unused
def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor:
max_size = []
for i in range(tensor_list[0].dim()):
max_size_i = torch.max(torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32)).to(torch.int64)
max_size.append(max_size_i)
max_size = tuple(max_size)
# work around for
# pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
# m[: img.shape[1], :img.shape[2]] = False
# which is not yet supported in onnx
padded_imgs = []
padded_masks = []
for img in tensor_list:
padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))]
padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0]))
padded_imgs.append(padded_img)
m = torch.zeros_like(img[0], dtype=torch.int, device=img.device)
padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1)
padded_masks.append(padded_mask.to(torch.bool))
tensor = torch.stack(padded_imgs)
mask = torch.stack(padded_masks)
return NestedTensor(tensor, mask=mask)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
@torch.no_grad()
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
if target.numel() == 0:
return [torch.zeros([], device=output.device)]
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None):
# type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor
"""
Equivalent to nn.functional.interpolate, but with support for empty batch sizes.
This will eventually be supported natively by PyTorch, and this
class can go away.
"""
if float(torchvision.__version__[:3]) < 0.7:
if input.numel() > 0:
return torch.nn.functional.interpolate(
input, size, scale_factor, mode, align_corners
)
output_shape = _output_size(2, input, size, scale_factor)
output_shape = list(input.shape[:-2]) + list(output_shape)
return _new_empty_tensor(input, output_shape)
else:
return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)
| []
| []
| [
"LOCAL_RANK",
"WORLD_SIZE",
"SLURM_PROCID",
"RANK"
]
| [] | ["LOCAL_RANK", "WORLD_SIZE", "SLURM_PROCID", "RANK"] | python | 4 | 0 | |
components/secrets-service/integration_test/global_test.go | //
// Author:: Salim Afiune <[email protected]>
// Copyright:: Copyright 2017, Chef Software Inc.
//
package integration_test
import (
"fmt"
"os"
"testing"
"github.com/chef/automate/components/secrets-service/config"
"github.com/chef/automate/components/secrets-service/dao"
"github.com/chef/automate/components/secrets-service/server"
)
// Global variables
var (
// The postgresql URL is coming from the environment variable POSTGRESQL_URL
postgresqlUrl = os.Getenv("POSTGRESQL_URL")
secretsDb = createDatabaseObject()
// This suite variable will be available for every single test as long as they
// belong to the 'integration_test' package.
suite = NewSuite(secretsDb)
// A global CfgMgmt Server instance to call any rpc function
//
// From any test you can directly call:
// ```
// res, err := cfgmgmt.GetNodesCounts(ctx, &req)
// ```
secretsServer = server.New(secretsDb)
)
func createDatabaseObject() *dao.DB {
connectionString := "postgresql://secrets@" + postgresqlUrl +
"/secrets_service?sslmode=verify-ca&sslcert=/hab/svc/secrets-service/config/service.crt&sslkey=/hab/svc/secrets-service/config/service.key&sslrootcert=/hab/svc/secrets-service/config/root_ca.crt"
postgresConfig := config.Postgres{ConnectionString: connectionString, MigrationsPath: "/src/components/secrets-service/dao/migration/sql"}
db, err := dao.New(&postgresConfig, "75e79c17ae62445e9771cd13fc4216f4")
if err != nil {
fmt.Printf("Could not create postgresql client from '%s': %s\n", connectionString, err)
os.Exit(1)
}
return db
}
// TestMain allow us to run a setup before running our tests and also
// teardown everything after we have finished testing.
//
// => Docs: https://golang.org/pkg/testing/#hdr-Main
func TestMain(m *testing.M) {
// Global Setup hook: Here is where you can initialize anythings you need
// for your tests to run, things like; Initialize ES indices, insert
// nodes or runs, etc.
suite.GlobalSetup()
// Execute the test suite and record the exit code
exitCode := m.Run()
// Teardown hook: It says it all, this hook should clean documents
// from ES so that the next test can run on a clean env.
suite.GlobalTeardown()
// call with result of m.Run()
os.Exit(exitCode)
}
| [
"\"POSTGRESQL_URL\""
]
| []
| [
"POSTGRESQL_URL"
]
| [] | ["POSTGRESQL_URL"] | go | 1 | 0 | |
main.go | package main
import (
"encoding/json"
"fmt"
"os"
"strings"
"github.com/actions-go/toolkit/core"
)
func runMain() {
secrets := os.Getenv("SECRETS")
if secrets == "" {
core.Error("secrets is not passed")
return
}
branch := os.Getenv("BRANCH_NAME")
if branch == "" {
core.Error("branch is not passed")
return
}
var secretsMap map[string]string
if err := json.Unmarshal([]byte(secrets), &secretsMap); err != nil {
core.Error(fmt.Sprintf("error reading in secrets map %s", err.Error()))
return
}
segmentIOValue := secretsMap[fmt.Sprintf("SEGMENT_IO_KEY_%s", strings.ToUpper(branch))]
if segmentIOValue == "" {
core.Warning("falling back SEGMENT_IO_KEY_DEVELOPMENT")
segmentIOValue = secretsMap["SEGMENT_IO_KEY_DEVELOPMENT"]
}
splitIOValue := secretsMap[fmt.Sprintf("SPLIT_IO_JS_%s", strings.ToUpper(branch))]
if splitIOValue == "" {
core.Warning("falling back SPLIT_IO_JS_DEVELOPMENT")
splitIOValue = secretsMap["SPLIT_IO_JS_DEVELOPMENT"]
}
core.SetOutput("FEATURE_FLAG_API_KEY", splitIOValue)
core.SetOutput("SEGMENT_IO_KEY", segmentIOValue)
}
func main() {
runMain()
}
| [
"\"SECRETS\"",
"\"BRANCH_NAME\""
]
| []
| [
"SECRETS",
"BRANCH_NAME"
]
| [] | ["SECRETS", "BRANCH_NAME"] | go | 2 | 0 | |
pkg/test/postgresql.go | package test
import (
"context"
"fmt"
"io/ioutil"
"os"
"path"
"runtime"
"github.com/gobuffalo/pop/v5"
"github.com/golang-migrate/migrate/v4"
"github.com/golang-migrate/migrate/v4/database/postgres"
_ "github.com/golang-migrate/migrate/v4/source/file"
"github.com/jackc/pgx/v4/pgxpool"
_ "github.com/jackc/pgx/v4/stdlib"
"github.com/jmoiron/sqlx"
configd "github.com/smiletrl/micro_ecommerce/pkg/config"
)
var (
dbpool *pgxpool.Pool
db *sqlx.DB
cfg configd.Config
migrator pop.FileMigrator
m *migrate.Migrate
)
func init() {
var err error
_, filename, _, _ := runtime.Caller(0)
dir := path.Join(path.Dir(filename), "..") + "/.."
err = os.Chdir(dir)
if err != nil {
panic(err)
}
stage := os.Getenv("STAGE")
if stage == "" {
stage = "local"
}
cfg, err = configd.Load(stage)
if err != nil {
panic(err)
}
// Only use this extra config DBConnString to make it work for github action test.
// Not sure why, but github action jobs can not parse the following fmt sprintf string.
connStr := cfg.PostgresqlConnString
if cfg.PostgresqlConnString == "" {
connStr = fmt.Sprintf("user=%s sslmode=%s host=%s password=%s port=%s dbname=%s", cfg.Postgresql.User, cfg.Postgresql.SSLMode, cfg.Postgresql.Host, cfg.Postgresql.Password, cfg.Postgresql.Port, cfg.Postgresql.Name)
}
dbpool, err = pgxpool.Connect(context.Background(), connStr)
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to connect to database: %v\n", err)
os.Exit(1)
}
db, err = sqlx.Connect("pgx", connStr)
if err != nil {
panic(err)
}
driver, err := postgres.WithInstance(db.DB, &postgres.Config{})
if err != nil {
panic(err)
}
m, err = migrate.NewWithDatabaseInstance("file://migrations", "postgres", driver)
if err != nil {
panic(err)
}
}
// Postgres returns the postgres instance
func Postgres() (*pgxpool.Pool, configd.Config, error) {
if err := m.Down(); err != nil && err.Error() != "no change" {
return nil, cfg, err
}
if err := m.Up(); err != nil && err.Error() != "no change" {
return nil, cfg, err
}
content, err := ioutil.ReadFile("testdata/test_insert.sql")
if err != nil {
return nil, cfg, err
}
if _, err := db.Exec(string(content)); err != nil {
return nil, cfg, err
}
return dbpool, cfg, nil
}
| [
"\"STAGE\""
]
| []
| [
"STAGE"
]
| [] | ["STAGE"] | go | 1 | 0 | |
main.go | package main
import (
"context"
"errors"
"log"
"os"
"strings"
"github.com/perkbox/cloud-access-bot/commands"
"github.com/slack-go/slack"
"github.com/perkbox/cloud-access-bot/internal/settings"
"github.com/perkbox/cloud-access-bot/internal/messenger"
"github.com/slack-go/slack/socketmode"
"github.com/perkbox/cloud-access-bot/internal/identitydata"
"github.com/perkbox/cloud-access-bot/internal/policy"
"github.com/aws/aws-sdk-go-v2/config"
runtime "github.com/banzaicloud/logrus-runtime-formatter"
"github.com/joho/godotenv"
"github.com/perkbox/cloud-access-bot/internal"
"github.com/perkbox/cloud-access-bot/internal/awsproviderv2"
"github.com/perkbox/cloud-access-bot/internal/repository"
"github.com/sirupsen/logrus"
)
func init() {
formatter := runtime.Formatter{ChildFormatter: &logrus.JSONFormatter{}}
formatter.Line = true
logrus.SetFormatter(&formatter)
}
func main() {
//Load config from .env when running, works both locally and in production settings
_ = godotenv.Load()
//
if os.Getenv("BOT_CONFIG_S3_BUCKET") == "" || os.Getenv("BOT_CONFIG_S3_KEY") == "" {
logrus.Errorf("Missing Env Vars Err: Ensure both BOT_CONFIG_S3_BUCKET & BOT_CONFIG_S3_KEY are set. ")
os.Exit(1)
}
cfg, _ := config.LoadDefaultConfig(context.TODO(),
config.WithRegion("eu-west-1"),
)
settings, err := settings.NewS3Config(cfg, os.Getenv("BOT_CONFIG_S3_KEY"), os.Getenv("BOT_CONFIG_S3_BUCKET"))
if err != nil {
logrus.Errorf("Unable get Config. Err %s", err)
os.Exit(1)
}
client, err := connectToSlackViaSocketmode()
if err != nil {
logrus.Errorf("Unable to connect to slack. Err: %s", err)
os.Exit(1)
}
service := internal.NewService(
awsproviderv2.NewAwsResourceFinder(cfg, settings),
repository.NewDynamoDBRRepo(cfg, settings.GetDynamodbTable()),
policy.NewPolicyManager(cfg, settings, nil, nil),
identitydata.NewIamDefinitions(),
messenger.NewMessenger(client.GetApiClient()),
)
socketmodeHandler := socketmode.NewsSocketmodeHandler(client)
commands.NewRequestCommandHandler(settings, service, socketmodeHandler)
socketmodeHandler.RunEventLoop()
}
func connectToSlackViaSocketmode() (*socketmode.Client, error) {
appToken := os.Getenv("SLACK_APP_TOKEN")
if appToken == "" {
return nil, errors.New("SLACK_APP_TOKEN must be set")
}
if !strings.HasPrefix(appToken, "xapp-") {
return nil, errors.New("SLACK_APP_TOKEN must have the prefix \"xapp-\".")
}
botToken := os.Getenv("SLACK_BOT_TOKEN")
if botToken == "" {
return nil, errors.New("SLACK_BOT_TOKEN must be set.")
}
if !strings.HasPrefix(botToken, "xoxb-") {
return nil, errors.New("SLACK_BOT_TOKEN must have the prefix \"xoxb-\".")
}
api := slack.New(
botToken,
//slack.OptionDebug(true),
slack.OptionAppLevelToken(appToken),
slack.OptionLog(log.New(os.Stdout, "api: ", log.Lshortfile|log.LstdFlags)),
)
client := socketmode.New(
api,
//socketmode.OptionDebug(true),
socketmode.OptionLog(log.New(os.Stdout, "socketmode: ", log.Lshortfile|log.LstdFlags)),
)
return client, nil
}
| [
"\"BOT_CONFIG_S3_BUCKET\"",
"\"BOT_CONFIG_S3_KEY\"",
"\"BOT_CONFIG_S3_KEY\"",
"\"BOT_CONFIG_S3_BUCKET\"",
"\"SLACK_APP_TOKEN\"",
"\"SLACK_BOT_TOKEN\""
]
| []
| [
"SLACK_APP_TOKEN",
"BOT_CONFIG_S3_KEY",
"BOT_CONFIG_S3_BUCKET",
"SLACK_BOT_TOKEN"
]
| [] | ["SLACK_APP_TOKEN", "BOT_CONFIG_S3_KEY", "BOT_CONFIG_S3_BUCKET", "SLACK_BOT_TOKEN"] | go | 4 | 0 | |
cointop/cointop.go | package cointop
import (
"errors"
"fmt"
"io/ioutil"
"os"
"strings"
"sync"
"time"
"github.com/miguelmota/cointop/cointop/common/api"
"github.com/miguelmota/cointop/cointop/common/api/types"
"github.com/miguelmota/cointop/cointop/common/filecache"
"github.com/miguelmota/cointop/cointop/common/gizak/termui"
"github.com/miguelmota/cointop/cointop/common/humanize"
"github.com/miguelmota/cointop/cointop/common/table"
"github.com/miguelmota/gocui"
"github.com/patrickmn/go-cache"
)
// TODO: clean up and optimize codebase
// ErrInvalidAPIChoice is error for invalid API choice
var ErrInvalidAPIChoice = errors.New("Invalid API choice")
// Views are all views in cointop
type Views struct {
Chart *ChartView
Table *TableView
TableHeader *TableHeaderView
Marketbar *MarketbarView
SearchField *SearchFieldView
Statusbar *StatusbarView
Help *HelpView
ConvertMenu *ConvertMenuView
Input *InputView
PortfolioUpdateMenu *PortfolioUpdateMenuView
}
// State is the state preferences of cointop
type State struct {
allCoins []*Coin
allCoinsSlugMap sync.Map
coins []*Coin
chartPoints [][]termui.Cell
currencyConversion string
convertMenuVisible bool
defaultView string
// DEPRECATED: favorites by 'symbol' is deprecated because of collisions.
favoritesBySymbol map[string]bool
favorites map[string]bool
filterByFavorites bool
helpVisible bool
hideMarketbar bool
hideChart bool
hideStatusbar bool
lastSelectedRowIndex int
page int
perPage int
portfolio *Portfolio
portfolioVisible bool
portfolioUpdateMenuVisible bool
refreshRate time.Duration
searchFieldVisible bool
selectedCoin *Coin
selectedChartRange string
shortcutKeys map[string]string
sortDesc bool
sortBy string
onlyTable bool
chartHeight int
}
// Cointop cointop
type Cointop struct {
g *gocui.Gui
ActionsMap map[string]bool
apiKeys *APIKeys
cache *cache.Cache
config config // toml config
configFilepath string
api api.Interface
apiChoice string
chartRanges []string
chartRangesMap map[string]time.Duration
colorschemeName string
colorscheme *Colorscheme
debug bool
forceRefresh chan bool
limiter <-chan time.Time
maxTableWidth int
refreshMux sync.Mutex
refreshTicker *time.Ticker
saveMux sync.Mutex
State *State
table *table.Table
TableColumnOrder []string
Views *Views
}
// CoinMarketCap is API choice
var CoinMarketCap = "coinmarketcap"
// CoinGecko is API choice
var CoinGecko = "coingecko"
// PortfolioEntry is portfolio entry
type PortfolioEntry struct {
Coin string
Holdings float64
}
// Portfolio is portfolio structure
type Portfolio struct {
Entries map[string]*PortfolioEntry
}
// Config config options
type Config struct {
APIChoice string
Colorscheme string
ConfigFilepath string
CoinMarketCapAPIKey string
NoPrompts bool
HideMarketbar bool
HideChart bool
HideStatusbar bool
OnlyTable bool
RefreshRate *uint
}
// APIKeys is api keys structure
type APIKeys struct {
cmc string
}
var defaultConfigPath = "~/.cointop/config.toml"
var defaultColorscheme = "cointop"
// NewCointop initializes cointop
func NewCointop(config *Config) (*Cointop, error) {
var debug bool
if os.Getenv("DEBUG") != "" {
debug = true
}
configFilepath := defaultConfigPath
if config != nil {
if config.ConfigFilepath != "" {
configFilepath = config.ConfigFilepath
}
}
ct := &Cointop{
apiChoice: CoinGecko,
apiKeys: new(APIKeys),
forceRefresh: make(chan bool),
maxTableWidth: 175,
ActionsMap: ActionsMap(),
cache: cache.New(1*time.Minute, 2*time.Minute),
configFilepath: configFilepath,
chartRanges: chartRanges(),
debug: debug,
chartRangesMap: chartRangesMap(),
limiter: time.Tick(2 * time.Second),
State: &State{
allCoins: []*Coin{},
currencyConversion: "USD",
// DEPRECATED: favorites by 'symbol' is deprecated because of collisions. Kept for backward compatibility.
favoritesBySymbol: make(map[string]bool),
favorites: make(map[string]bool),
hideMarketbar: config.HideMarketbar,
hideChart: config.HideChart,
hideStatusbar: config.HideStatusbar,
onlyTable: config.OnlyTable,
refreshRate: 60 * time.Second,
selectedChartRange: "7D",
shortcutKeys: DefaultShortcuts(),
sortBy: "rank",
page: 0,
perPage: 100,
portfolio: &Portfolio{
Entries: make(map[string]*PortfolioEntry, 0),
},
chartHeight: 10,
},
TableColumnOrder: TableColumnOrder(),
Views: &Views{
Chart: NewChartView(),
Table: NewTableView(),
TableHeader: NewTableHeaderView(),
Marketbar: NewMarketbarView(),
SearchField: NewSearchFieldView(),
Statusbar: NewStatusbarView(),
Help: NewHelpView(),
ConvertMenu: NewConvertMenuView(),
Input: NewInputView(),
PortfolioUpdateMenu: NewPortfolioUpdateMenuView(),
},
}
err := ct.setupConfig()
if err != nil {
return nil, err
}
ct.cache.Set("onlyTable", ct.State.onlyTable, cache.NoExpiration)
ct.cache.Set("hideMarketbar", ct.State.hideMarketbar, cache.NoExpiration)
ct.cache.Set("hideChart", ct.State.hideChart, cache.NoExpiration)
ct.cache.Set("hideStatusbar", ct.State.hideStatusbar, cache.NoExpiration)
if config.RefreshRate != nil {
ct.State.refreshRate = time.Duration(*config.RefreshRate) * time.Second
}
if ct.State.refreshRate == 0 {
ct.refreshTicker = time.NewTicker(time.Duration(1))
ct.refreshTicker.Stop()
} else {
ct.refreshTicker = time.NewTicker(ct.State.refreshRate)
}
// prompt for CoinMarketCap api key if not found
if config.CoinMarketCapAPIKey != "" {
ct.apiKeys.cmc = config.CoinMarketCapAPIKey
if err := ct.saveConfig(); err != nil {
return nil, err
}
}
if config.Colorscheme != "" {
ct.colorschemeName = config.Colorscheme
}
colors, err := ct.getColorschemeColors()
if err != nil {
return nil, err
}
ct.colorscheme = NewColorscheme(colors)
if config.APIChoice != "" {
ct.apiChoice = config.APIChoice
if err := ct.saveConfig(); err != nil {
return nil, err
}
}
if ct.apiChoice == CoinMarketCap && ct.apiKeys.cmc == "" {
apiKey := os.Getenv("CMC_PRO_API_KEY")
if apiKey == "" {
if !config.NoPrompts {
apiKey, err = ct.ReadAPIKeyFromStdin("CoinMarketCap Pro")
if err != nil {
return nil, err
}
ct.apiKeys.cmc = apiKey
}
} else {
ct.apiKeys.cmc = apiKey
}
if err := ct.saveConfig(); err != nil {
return nil, err
}
}
if ct.apiChoice == CoinGecko {
ct.State.selectedChartRange = "1Y"
}
if ct.apiChoice == CoinMarketCap {
ct.api = api.NewCMC(ct.apiKeys.cmc)
} else if ct.apiChoice == CoinGecko {
ct.api = api.NewCG()
} else {
return nil, ErrInvalidAPIChoice
}
allCoinsSlugMap := make(map[string]*Coin)
coinscachekey := ct.CacheKey("allCoinsSlugMap")
filecache.Get(coinscachekey, &allCoinsSlugMap)
for k, v := range allCoinsSlugMap {
ct.State.allCoinsSlugMap.Store(k, v)
}
ct.State.allCoinsSlugMap.Range(func(key, value interface{}) bool {
if coin, ok := value.(*Coin); ok {
ct.State.allCoins = append(ct.State.allCoins, coin)
}
return true
})
if len(ct.State.allCoins) > 1 {
max := len(ct.State.allCoins)
if max > 100 {
max = 100
}
ct.sort(ct.State.sortBy, ct.State.sortDesc, ct.State.allCoins, false)
ct.State.coins = ct.State.allCoins[0:max]
}
// DEPRECATED: favorites by 'symbol' is deprecated because of collisions. Kept for backward compatibility.
// Here we're doing a lookup based on symbol and setting the favorite to the coin name instead of coin symbol.
ct.State.allCoinsSlugMap.Range(func(key, value interface{}) bool {
if coin, ok := value.(*Coin); ok {
for k := range ct.State.favoritesBySymbol {
if coin.Symbol == k {
ct.State.favorites[coin.Name] = true
delete(ct.State.favoritesBySymbol, k)
}
}
}
return true
})
var globaldata []float64
chartcachekey := ct.CacheKey(fmt.Sprintf("%s_%s", "globaldata", strings.Replace(ct.State.selectedChartRange, " ", "", -1)))
filecache.Get(chartcachekey, &globaldata)
ct.cache.Set(chartcachekey, globaldata, 10*time.Second)
var market types.GlobalMarketData
marketcachekey := ct.CacheKey("market")
filecache.Get(marketcachekey, &market)
ct.cache.Set(marketcachekey, market, 10*time.Second)
// TODO: notify offline status in status bar
/*
if err := ct.api.Ping(); err != nil {
return nil, err
}
*/
return ct, nil
}
// Run runs cointop
func (ct *Cointop) Run() error {
ct.debuglog("run()")
g, err := gocui.NewGui(gocui.Output256)
if err != nil {
return fmt.Errorf("new gocui: %v", err)
}
g.FgColor = ct.colorscheme.BaseFg()
g.BgColor = ct.colorscheme.BaseBg()
ct.g = g
defer g.Close()
g.InputEsc = true
g.Mouse = true
g.Highlight = true
g.SetManagerFunc(ct.layout)
if err := ct.keybindings(g); err != nil {
return fmt.Errorf("keybindings: %v", err)
}
if err := g.MainLoop(); err != nil && err != gocui.ErrQuit {
return fmt.Errorf("main loop: %v", err)
}
return nil
}
// PriceConfig is the config options for the price command
type PriceConfig struct {
Coin string
Currency string
APIChoice string
}
// PrintPrice outputs the current price of the coin
func PrintPrice(config *PriceConfig) error {
var priceAPI api.Interface
if config.APIChoice == CoinMarketCap {
priceAPI = api.NewCMC("")
} else if config.APIChoice == CoinGecko {
priceAPI = api.NewCG()
} else {
return ErrInvalidAPIChoice
}
price, err := priceAPI.Price(config.Coin, config.Currency)
if err != nil {
return err
}
symbol := currencySymbol(config.Currency)
fmt.Fprintf(os.Stdout, "%s%s", symbol, humanize.Commaf(price))
return nil
}
// Clean ...
func Clean() error {
tmpPath := "/tmp"
if _, err := os.Stat(tmpPath); !os.IsNotExist(err) {
files, err := ioutil.ReadDir(tmpPath)
if err != nil {
return err
}
for _, f := range files {
if strings.HasPrefix(f.Name(), "fcache.") {
file := fmt.Sprintf("%s/%s", tmpPath, f.Name())
fmt.Printf("removing %s\n", file)
if err := os.Remove(file); err != nil {
return err
}
}
}
}
fmt.Println("cointop cache has been cleaned")
return nil
}
// Reset ...
func Reset() error {
if err := Clean(); err != nil {
return err
}
// default config path
configPath := fmt.Sprintf("%s%s", UserPreferredHomeDir(), "/.cointop")
if _, err := os.Stat(configPath); !os.IsNotExist(err) {
fmt.Printf("removing %s\n", configPath)
if err := os.RemoveAll(configPath); err != nil {
return err
}
}
fmt.Println("cointop has been reset")
return nil
}
| [
"\"DEBUG\"",
"\"CMC_PRO_API_KEY\""
]
| []
| [
"CMC_PRO_API_KEY",
"DEBUG"
]
| [] | ["CMC_PRO_API_KEY", "DEBUG"] | go | 2 | 0 | |
tests/sparseml/pytorch/optim/test_modifier_quantization.py | # Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
import torch
from torch.nn import Conv2d, Linear
from sparseml.pytorch.optim import QuantizationModifier
from tests.sparseml.pytorch.helpers import LinearNet, create_optim_sgd
from tests.sparseml.pytorch.optim.test_modifier import ScheduledModifierTest
from tests.sparseml.pytorch.helpers import ( # noqa isort:skip
test_epoch,
test_loss,
test_steps_per_epoch,
)
try:
from torch import quantization as torch_quantization
except Exception:
torch_quantization = None
QUANTIZATION_MODIFIERS = [
lambda: QuantizationModifier(
start_epoch=0.0,
disable_quantization_observer_epoch=2,
freeze_bn_stats_epoch=3.0,
),
lambda: QuantizationModifier(start_epoch=2.0, submodules=["seq"]),
lambda: QuantizationModifier(start_epoch=2.0, submodules=["seq"]),
]
def _is_valid_submodule(module_name, submodule_names):
return module_name in submodule_names or any(
module_name.startswith(name) for name in submodule_names
)
def _is_quantiable_module(module):
if isinstance(module, torch.quantization.FakeQuantize):
return False
return (
len(list(module.children())) > 0
or isinstance(module, Conv2d)
or isinstance(module, Linear)
)
def _test_qat_applied(modifier, model):
# test quantization mods are applied
if not modifier.submodules or modifier.submodules == [""]:
assert hasattr(model, "qconfig") and model.qconfig is not None
submodules = [""]
for module in model.modules():
if _is_quantiable_module(module):
assert hasattr(module, "qconfig") and module.qconfig == model.qconfig
else:
assert not hasattr(model, "qconfig") or model.qconfig is None
submodules = modifier.submodules
# check qconfig propagation
for name, module in model.named_modules():
if _is_valid_submodule(name, submodules) and _is_quantiable_module(module):
assert hasattr(module, "qconfig") and module.qconfig is not None
@pytest.mark.skipif(
os.getenv("NM_ML_SKIP_PYTORCH_TESTS", False),
reason="Skipping pytorch tests",
)
@pytest.mark.skipif(
os.getenv("NM_ML_SKIP_PYTORCH_QUANT_TESTS", False),
reason="Skipping pytorch torch quantization tests",
)
@pytest.mark.skipif(
torch_quantization is None,
reason="torch quantization not available",
)
@pytest.mark.parametrize("modifier_lambda", QUANTIZATION_MODIFIERS, scope="function")
@pytest.mark.parametrize("model_lambda", [LinearNet], scope="function")
@pytest.mark.parametrize("optim_lambda", [create_optim_sgd], scope="function")
class TestQuantizationModifierImpl(ScheduledModifierTest):
def test_lifecycle(
self,
modifier_lambda,
model_lambda,
optim_lambda,
test_steps_per_epoch, # noqa: F811
):
modifier = modifier_lambda()
model = model_lambda()
optimizer = optim_lambda(model)
self.initialize_helper(modifier, model)
for epoch in range(int(modifier.start_epoch)):
assert not modifier.update_ready(epoch, test_steps_per_epoch)
update_epochs = [modifier.start_epoch]
if modifier.disable_quantization_observer_epoch is not None:
update_epochs.append(modifier.disable_quantization_observer_epoch)
if modifier.freeze_bn_stats_epoch is not None:
update_epochs.append(modifier.freeze_bn_stats_epoch)
for epoch in update_epochs:
assert modifier.update_ready(epoch, test_steps_per_epoch)
# test update ready is still true after start epoch
# even if quantization has not been applied yet
assert modifier.update_ready(modifier.start_epoch + 0.1, test_steps_per_epoch)
# test QAT setup
if modifier.start_epoch > 0:
for module in model.modules():
assert not hasattr(module, "qconfig") or module.qconfig is None
else:
# QAT should be applied
_test_qat_applied(modifier, model)
modifier.scheduled_update(
model, optimizer, modifier.start_epoch, test_steps_per_epoch
)
# test update ready is False after start epoch is applied, before diable epochs
if (
len(update_epochs) == 1
or min(update_epochs[1:]) <= modifier.start_epoch + 1
):
# test epochs in 0.1 intervals
for epoch_interval in range(10):
epoch_interval *= 0.1
epoch = modifier.start_epoch + 0.1 * epoch_interval
assert not modifier.update_ready(epoch, test_steps_per_epoch)
_test_qat_applied(modifier, model)
@pytest.mark.skipif(
os.getenv("NM_ML_SKIP_PYTORCH_TESTS", False),
reason="Skipping pytorch tests",
)
@pytest.mark.skipif(
os.getenv("NM_ML_SKIP_PYTORCH_QUANT_TESTS", False),
reason="Skipping pytorch torch quantization tests",
)
@pytest.mark.skipif(
torch_quantization is None,
reason="torch quantization not available",
)
def test_quantization_modifier_yaml():
start_epoch = 0.0
submodules = ["block.0", "block.2"]
model_fuse_fn_name = "fuse_module"
disable_quantization_observer_epoch = 2.0
freeze_bn_stats_epoch = 3.0
yaml_str = """
!QuantizationModifier
start_epoch: {start_epoch}
submodules: {submodules}
model_fuse_fn_name: {model_fuse_fn_name}
disable_quantization_observer_epoch: {disable_quantization_observer_epoch}
freeze_bn_stats_epoch: {freeze_bn_stats_epoch}
""".format(
start_epoch=start_epoch,
submodules=submodules,
model_fuse_fn_name=model_fuse_fn_name,
disable_quantization_observer_epoch=disable_quantization_observer_epoch,
freeze_bn_stats_epoch=freeze_bn_stats_epoch,
)
yaml_modifier = QuantizationModifier.load_obj(
yaml_str
) # type: QuantizationModifier
serialized_modifier = QuantizationModifier.load_obj(
str(yaml_modifier)
) # type: QuantizationModifier
obj_modifier = QuantizationModifier(
start_epoch=start_epoch,
submodules=submodules,
model_fuse_fn_name=model_fuse_fn_name,
disable_quantization_observer_epoch=disable_quantization_observer_epoch,
freeze_bn_stats_epoch=freeze_bn_stats_epoch,
)
assert isinstance(yaml_modifier, QuantizationModifier)
assert (
yaml_modifier.start_epoch
== serialized_modifier.start_epoch
== obj_modifier.start_epoch
)
assert (
sorted(yaml_modifier.submodules)
== sorted(serialized_modifier.submodules)
== sorted(obj_modifier.submodules)
)
assert (
yaml_modifier.model_fuse_fn_name
== serialized_modifier.model_fuse_fn_name
== obj_modifier.model_fuse_fn_name
)
assert (
yaml_modifier.disable_quantization_observer_epoch
== serialized_modifier.disable_quantization_observer_epoch
== obj_modifier.disable_quantization_observer_epoch
)
assert (
yaml_modifier.freeze_bn_stats_epoch
== serialized_modifier.freeze_bn_stats_epoch
== obj_modifier.freeze_bn_stats_epoch
)
| []
| []
| [
"NM_ML_SKIP_PYTORCH_QUANT_TESTS",
"NM_ML_SKIP_PYTORCH_TESTS"
]
| [] | ["NM_ML_SKIP_PYTORCH_QUANT_TESTS", "NM_ML_SKIP_PYTORCH_TESTS"] | python | 2 | 0 | |
lib/transport/transportRabbit.go | package transport
import (
"encoding/json"
"fmt"
"os"
"strconv"
"sync"
redis "github.com/go-redis/redis"
logol "github.com/osallou/logol-go-playground/lib/types"
"github.com/satori/go.uuid"
"github.com/streadway/amqp"
)
var doStats bool
// TransportRabbit is the transport manager using RabbitMQ
type TransportRabbit struct {
id string
conn *amqp.Connection
ch *amqp.Channel
queues map[int]string
redis *redis.Client
}
// GetQueueStatus returns number of pending messages and number of message consumer for the queue
func (t TransportRabbit) GetQueueStatus(queueID QueueType) (pending int, consumers int) {
queueName := "analyse"
switch queueID {
case QUEUE_MESSAGE:
queueName = "analyse"
case QUEUE_RESULT:
queueName = "result"
case QUEUE_CASSIE:
queueName = "cassie"
}
queue, _ := t.ch.QueueInspect("logol-" + queueName + "-" + t.id)
pending += queue.Messages
consumers += queue.Consumers
return pending, consumers
}
// GetId returns the unique transport id
func (t TransportRabbit) GetID() string {
return t.id
}
// GetProgress returns number of final matches, number of match under analysis and rejected matches
func (t *TransportRabbit) GetProgress(uid string) (count int, ban int, match int) {
rcount, _ := t.redis.Get("logol:" + uid + ":count").Result()
count, _ = strconv.Atoi(rcount)
rban, _ := t.redis.Get("logol:" + uid + ":ban").Result()
ban, _ = strconv.Atoi(rban)
rmatch, _ := t.redis.Get("logol:" + uid + ":match").Result()
match, _ = strconv.Atoi(rmatch)
return count, ban, match
}
// AddBan reject a match
func (t *TransportRabbit) AddBan(uid string, nb int64) {
t.redis.IncrBy("logol:"+uid+":ban", nb)
}
// AddCount increment number of solutions
func (t *TransportRabbit) AddCount(uid string, nb int64) {
t.redis.IncrBy("logol:"+uid+":count", nb)
}
// AddMatch increment number of successful match
func (t *TransportRabbit) AddMatch(uid string, nb int64) {
t.redis.IncrBy("logol:"+uid+":match", nb)
}
// SetBan sets the number of ban for result
func (t *TransportRabbit) SetBan(uid string, nb int64) {
t.redis.Set("logol:"+uid+":ban", nb, 0)
}
// SetCount sets the number of result
func (t *TransportRabbit) SetCount(uid string, nb int64) {
t.redis.Set("logol:"+uid+":count", nb, 0)
}
// SetMatch sets the number of match for result
func (t *TransportRabbit) SetMatch(uid string, nb int64) {
t.redis.Set("logol:"+uid+":match", nb, 0)
}
// Clear redis info for the run id
func (t *TransportRabbit) Clear(uid string) {
t.redis.Del("logol:" + uid + ":count")
t.redis.Del("logol:" + uid + ":match")
t.redis.Del("logol:" + uid + ":ban")
t.redis.Del("logol:" + uid + ":grammar")
t.redis.Del("logol:" + uid + ":stat:duration")
t.redis.Del("logol:" + uid + ":stat:flow")
t.redis.Del("logol:" + uid + ":toban")
}
// SetGrammar saves grammar in redis
func (t *TransportRabbit) SetGrammar(grammar []byte, grammarID string) (err bool) {
logger.Debugf("Set grammar %s", grammarID)
t.redis.Set("logol:"+grammarID+":grammar", grammar, 0)
return true
}
// GetGrammar fetch grammar from redis
func (t *TransportRabbit) GetGrammar(grammarID string) (logol.Grammar, bool) {
logger.Debugf("Get grammar %s", grammarID)
grammar, err := t.redis.Get(grammarID).Result()
if grammar == "" {
logger.Errorf("Failed to get grammar %s", grammarID)
return logol.Grammar{}, true
}
err, g := logol.LoadGrammar([]byte(grammar))
if err != nil {
logger.Errorf("error: %v", err)
return logol.Grammar{}, true
}
return g, false
}
// Close cleanup queues
func (t *TransportRabbit) Close() {
logger.Debugf("Closing transport %s", t.id)
t.ch.ExchangeDelete("logol-event-exchange-"+t.id, false, false)
t.ch.QueueDelete("logol-analyse-"+t.id, false, false, false)
t.ch.QueueDelete("logol-result-"+t.id, false, false, false)
t.ch.QueueDelete("logol-cassie-"+t.id, false, false, false)
t.ch.QueueDelete("logol-log-"+t.id, false, false, false)
t.ch.Close()
t.conn.Close()
}
// Init setup queues
func (t *TransportRabbit) Init(uid string) {
doStatsEnv := os.Getenv("LOGOL_STATS")
if doStatsEnv != "" {
doStats = true
logger.Infof("Activating usage statistics, this can impact performance")
}
t.id = uid
queueName := t.id
rabbitConURL := "amqp://guest:guest@localhost:5672"
osRabbitConURL := os.Getenv("LOGOL_RABBITMQ_ADDR")
if osRabbitConURL != "" {
rabbitConURL = osRabbitConURL
}
//connUrl := fmt.Sprintf("amqp://%s:%s@%s:%d/",
// h.User, h.Password, h.Hostname, h.Port)
conn, err := amqp.Dial(rabbitConURL)
failOnError(err, "Failed to connect to RabbitMQ")
t.conn = conn
//defer conn.Close()
ch, err := conn.Channel()
failOnError(err, "Failed to open a channel")
t.ch = ch
//defer ch.Close()
t.queues = make(map[int]string)
qLog, lerr := ch.QueueDeclare(
"logol-log-"+queueName, // name
false, // durable
false, // delete when usused
false, // exclusive
false, // no-wait
nil, // arguments
)
failOnError(lerr, "Failed to declare a queue")
t.queues[QUEUE_LOG] = qLog.Name
err = ch.ExchangeDeclare(
"logol-event-exchange-"+queueName, // name
"fanout", // kind
false, // durable
false, // delete when usused
false, // exclusive
false, // no-wait
nil, // arguments
)
failOnError(err, "Failed to declare an exchange")
eventQueue, err := ch.QueueDeclare(
"",
false, // durable
false, // delete when usused
true, // exclusive
false, // no-wait
nil, // arguments
)
failOnError(err, "Failed to declare a queue")
err = ch.QueueBind(
eventQueue.Name, // name,
"", // key
"logol-event-exchange-"+queueName, // exchange
false, // no-wait
nil, // arguments
)
failOnError(err, "Failed to bind queue")
t.queues[QUEUE_EVENT] = eventQueue.Name
t.queues[EXCHANGE_EVENT] = "logol-event-exchange-" + queueName
qAnalyse, err := ch.QueueDeclare(
"logol-analyse-"+queueName, // name
false, // durable
false, // delete when usused
false, // exclusive
false, // no-wait
nil, // arguments
)
failOnError(err, "Failed to declare a queue")
t.queues[QUEUE_MESSAGE] = qAnalyse.Name
qCassie, cerr := ch.QueueDeclare(
"logol-cassie-"+queueName, // name
false, // durable
false, // delete when usused
false, // exclusive
false, // no-wait
nil, // arguments
)
failOnError(cerr, "Failed to declare a queue")
t.queues[QUEUE_CASSIE] = qCassie.Name
qResult, rqerr := ch.QueueDeclare(
"logol-result-"+queueName, // name
false, // durable
false, // delete when usused
false, // exclusive
false, // no-wait
nil, // arguments
)
failOnError(rqerr, "Failed to declare a queue")
t.queues[QUEUE_RESULT] = qResult.Name
err = ch.Qos(
1, // prefetch count
0, // prefetch size
false, // global
)
failOnError(err, "Failed to set QoS")
}
// ListenLog starts a loop waiting for log message. On message, callback function is called
func (t *TransportRabbit) ListenLog(fn CallbackLog) {
queueListenName, ok := t.queues[QUEUE_LOG]
if !ok {
panic(fmt.Sprintf("%s", "Failed to find log queue name"))
}
logger.Infof("Listen on queue %s", queueListenName)
eventQueueName, eok := t.queues[QUEUE_EVENT]
if !eok {
panic(fmt.Sprintf("%s", "Failed to find event queue name"))
}
msgs, err := t.ch.Consume(
queueListenName, // queue
"", // consumer
false, // auto-ack
false, // exclusive
false, // no-local
false, // no-wait
nil, // args
)
failOnError(err, "Failed to register a consumer")
events, err := t.ch.Consume(
eventQueueName, // queue
"", // consumer
false, // auto-ack
false, // exclusive
false, // no-local
false, // no-wait
nil, // args
)
failOnError(err, "Failed to register a consumer")
wg := sync.WaitGroup{}
wg.Add(2)
forever := make(chan bool)
go func() {
for d := range msgs {
result := string(d.Body[:])
fn(result)
d.Ack(false)
}
wg.Done()
}()
go func(ch chan bool) {
for d := range events {
logger.Debugf("New message on %s, %s", queueListenName, string(d.Body[:]))
msgEvent := MsgEvent{}
json.Unmarshal([]byte(d.Body), &msgEvent)
switch msgEvent.Step {
case STEP_END:
logger.Infof("Received exit request %s", queueListenName)
d.Ack(false)
wg.Done()
ch <- true
default:
d.Ack(false)
}
}
}(forever)
logger.Infof(" [*] Waiting for logs.")
<-forever
t.ch.Close()
t.conn.Close()
wg.Wait()
}
// Listen starts a loop waiting for message on selected queue. On message, callback function is called
func (t *TransportRabbit) Listen(queueListen QueueType, fn CallbackMessage) {
queueListenName, ok := t.queues[int(queueListen)]
if !ok {
panic(fmt.Sprintf("%s", "Failed to find message queue name"))
//Errorf("Failed to find message queue %d", int(queueListen))
}
logger.Debugf("Listen on queue %s", queueListenName)
eventQueueName, eok := t.queues[QUEUE_EVENT]
if !eok {
panic(fmt.Sprintf("%s", "Failed to find event queue name"))
}
msgs, err := t.ch.Consume(
queueListenName, // queue
"", // consumer
false, // auto-ack
false, // exclusive
false, // no-local
false, // no-wait
nil, // args
)
failOnError(err, "Failed to register a consumer")
events, err := t.ch.Consume(
eventQueueName, // queue
"", // consumer
false, // auto-ack
false, // exclusive
false, // no-local
false, // no-wait
nil, // args
)
failOnError(err, "Failed to register a consumer")
wg := sync.WaitGroup{}
wg.Add(2)
forever := make(chan bool)
go func() {
for d := range msgs {
logger.Debugf("New message on %s, %s", queueListenName, string(d.Body[:]))
result, _ := t.getMessage(string(d.Body[:]))
fn(result)
d.Ack(false)
}
wg.Done()
}()
go func(ch chan bool) {
for d := range events {
logger.Debugf("New message on %s, %s", queueListenName, string(d.Body[:]))
msgEvent := MsgEvent{}
json.Unmarshal([]byte(d.Body), &msgEvent)
switch msgEvent.Step {
case STEP_END:
logger.Infof("Received exit request %s", queueListenName)
d.Ack(false)
wg.Done()
ch <- true
default:
d.Ack(false)
}
}
}(forever)
logger.Infof(" [*] Waiting for messages. To exit press CTRL+C")
<-forever
t.ch.Close()
t.conn.Close()
wg.Wait()
}
func (t TransportRabbit) getMessage(uid string) (result logol.Result, err error) {
// fetch from redis the message based on provided uid
// Once fetched, delete it from db
val, err := t.redis.Get("logol:msg:" + uid).Result()
if err == redis.Nil {
return logol.Result{}, err
}
result = logol.Result{}
json.Unmarshal([]byte(val), &result)
t.redis.Del(uid)
return result, err
}
// SendLog sends a log message to log queue
func (t TransportRabbit) SendLog(msg string) bool {
queueName, ok := t.queues[QUEUE_LOG]
if !ok {
logger.Errorf("Could not find queue %d", QUEUE_LOG)
return false
}
publishMsg := amqp.Publishing{}
publishMsg.Body = []byte(msg)
t.ch.Publish(
"", // exchange
queueName, // key
false, // mandatory
false, // immediate
publishMsg,
)
return true
}
// PrepareMessage store data in redis and returns its identifier
func (t TransportRabbit) PrepareMessage(data logol.Result) string {
u1 := uuid.Must(uuid.NewV4())
jsonMsg, _ := json.Marshal(data)
err := t.redis.Set("logol:msg:"+u1.String(), jsonMsg, 0).Err()
if err != nil {
logger.Errorf("Failed to store message")
}
return u1.String()
}
// PublishMessage post a msg to a rabbitmq queue
func (t TransportRabbit) PublishMessage(queue string, msg string) {
publishMsg := amqp.Publishing{}
publishMsg.Body = []byte(msg)
t.ch.Publish(
"", // exchange
queue, // key
false, // mandatory
false, // immediate
publishMsg,
)
}
// PublishExchange post a msg to a rabbitmq exchange
func (t TransportRabbit) PublishExchange(exchange string, msg string) {
publishMsg := amqp.Publishing{}
publishMsg.Body = []byte(msg)
t.ch.Publish(
exchange, // exchange
"", // key
false, // mandatory
false, // immediate
publishMsg,
)
}
// SendEvent sends an *MsgEvent* to exchange
func (t TransportRabbit) SendEvent(event MsgEvent) bool {
queueExchange, _ := t.queues[int(EXCHANGE_EVENT)]
//publish_msg := amqp.Publishing{}
jsonMsg, _ := json.Marshal(event)
//publish_msg.Body = []byte(json_msg)
t.PublishExchange(queueExchange, string(jsonMsg))
return true
}
// SendMessage sends a new message to defined queue
func (t TransportRabbit) SendMessage(queue QueueType, data logol.Result) bool {
queueName, ok := t.queues[int(queue)]
if !ok {
logger.Errorf("Could not find queue %d", int(queue))
return false
}
publishMsg := t.PrepareMessage(data)
if queue == QUEUE_EVENT {
queueExchange, _ := t.queues[int(EXCHANGE_EVENT)]
logger.Debugf("Send message to event exchange")
t.PublishExchange(queueExchange, publishMsg)
} else {
logger.Debugf("Send message to %s", queueName)
t.PublishMessage(queueName, publishMsg)
}
return true
}
// IncrFlowStat increments number of calls between 2 variables
func (t TransportRabbit) IncrFlowStat(uid string, from string, to string) {
if !doStats || from == "." || to == "over.over" {
return
}
err := t.redis.HIncrBy("logol:"+uid+":stat:flow", from+"."+to, 1).Err()
if err != nil {
logger.Errorf("Failed to update stats")
}
}
// IncrDurationStat increments duration for defined variable
func (t TransportRabbit) IncrDurationStat(uid string, variable string, duration int64) {
if !doStats {
return
}
err := t.redis.HIncrBy("logol:"+uid+":stat:duration", variable, duration).Err()
if err != nil {
logger.Errorf("Failed to update stats")
}
}
// Stats information
type Stats struct {
Duration map[string]string
Flow map[string]string
}
// GetStats get workflow informations (links and duration)
func (t TransportRabbit) GetStats(uid string) Stats {
statDuration, errD := t.redis.HGetAll("logol:" + uid + ":stat:duration").Result()
statFlow, errF := t.redis.HGetAll("logol:" + uid + ":stat:flow").Result()
stats := Stats{}
if errD == nil {
stats.Duration = statDuration
}
if errF == nil {
stats.Flow = statFlow
}
return stats
}
// AddToBan appends the variable uid to the list of matches to filter out from result
func (t TransportRabbit) AddToBan(uid string, varUID string) {
t.redis.LPush("logol:"+uid+":toban", varUID)
}
// GetToBan returns the list of variable uids to be filterd out from result
func (t TransportRabbit) GetToBan(uid string) (toBan []string) {
nbElts, err := t.redis.LLen("logol:" + uid + ":toban").Result()
if err != nil {
return toBan
}
for v := int64(0); v < nbElts; v++ {
matchToBan, err := t.redis.LPop("logol:" + uid + ":toban").Result()
if err == nil {
toBan = append(toBan, matchToBan)
}
}
return toBan
}
func newRedisClient() (client *redis.Client) {
redisAddr := "localhost:6379"
osRedisAddr := os.Getenv("LOGOL_REDIS_ADDR")
if osRedisAddr != "" {
redisAddr = osRedisAddr
}
redisClient := redis.NewClient(&redis.Options{
Addr: redisAddr,
Password: "", // no password set
DB: 0, // use default DB
})
_, err := redisClient.Ping().Result()
if err != nil {
logger.Errorf("Failed to contact Redis database")
}
return redisClient
}
// NewTransportRabbit create a new transport instance
func NewTransportRabbit() *TransportRabbit {
transport := TransportRabbit{}
transport.redis = newRedisClient()
return &transport
}
| [
"\"LOGOL_STATS\"",
"\"LOGOL_RABBITMQ_ADDR\"",
"\"LOGOL_REDIS_ADDR\""
]
| []
| [
"LOGOL_REDIS_ADDR",
"LOGOL_STATS",
"LOGOL_RABBITMQ_ADDR"
]
| [] | ["LOGOL_REDIS_ADDR", "LOGOL_STATS", "LOGOL_RABBITMQ_ADDR"] | go | 3 | 0 | |
tests/test017/__init__.py | # -*- coding: utf8 -*-
import os
import unittest
import das # pylint: disable=import-error
class TestCase(unittest.TestCase):
TestDir = None
HomerInput = None
HomerOutput = None
SimpsonsInput = None
SimpsonsOutput = None
MultipleOutput = None
@classmethod
def setUpClass(cls):
cls.TestDir = os.path.abspath(os.path.dirname(__file__))
cls.HomerInput = cls.TestDir + "/homer.person"
cls.HomerOutput = cls.TestDir + "/homer.csv"
cls.SimpsonsInput = cls.TestDir + "/simpsons.family"
cls.SimpsonsOutput = cls.TestDir + "/simpsons.csv"
cls.MultipleOutput = cls.TestDir + "/multiple.csv"
os.environ["DAS_SCHEMA_PATH"] = cls.TestDir
def setUp(self):
self.addCleanup(self.cleanUp)
def tearDown(self):
pass
def cleanUp(self):
if os.path.isfile(self.HomerOutput):
os.remove(self.HomerOutput)
if os.path.isfile(self.SimpsonsOutput):
os.remove(self.SimpsonsOutput)
if os.path.isfile(self.MultipleOutput):
os.remove(self.MultipleOutput)
@classmethod
def tearDownClass(cls):
del(os.environ["DAS_SCHEMA_PATH"])
# Test functions
def testSimpsons(self):
simpsons = das.read(self.SimpsonsInput)
das.write_csv(simpsons, self.SimpsonsOutput)
from_csv = das.read_csv(self.SimpsonsOutput)[0]
self.assertEqual(simpsons, from_csv)
simpsons.father.name.given = "ned"
self.assertNotEqual(simpsons, from_csv)
def testHomer(self):
homer = das.read(self.HomerInput)
das.write_csv(homer, self.HomerOutput)
from_csv = das.read_csv(self.HomerOutput)[0]
self.assertEqual(homer, from_csv)
from_csv.name.family = "flanders"
self.assertNotEqual(homer, from_csv)
def testMultiple(self):
simpsons = das.read(self.SimpsonsInput)
homer = das.read(self.HomerInput)
ned = das.make_default("csv.Person")
ned.name.given = "ned"
ned.name.family = "flanders"
rod = das.make_default("csv.Relationship")
todd = das.make_default("csv.Relationship")
rod.data.name.given = "rod"
rod.data.name.family = "flanders"
todd.data.name.given = "todd"
todd.data.name.family = "flanders"
ned.family = [rod, todd]
das.write_csv([simpsons, homer, ned], self.MultipleOutput, alias={"csv.Person": "p", "csv.Family": "f"})
read_data = das.read_csv(self.MultipleOutput)
self.assertEqual(read_data[0], simpsons)
self.assertEqual(read_data[1], homer)
self.assertEqual(read_data[2], ned)
self.assertNotEqual(read_data[2], homer)
self.assertNotEqual(read_data[1], ned)
| []
| []
| [
"DAS_SCHEMA_PATH"
]
| [] | ["DAS_SCHEMA_PATH"] | python | 1 | 0 | |
apiserver/cmd/apiserver/server/options.go | // Copyright (c) 2021 Tigera, Inc. All rights reserved.
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package server
import (
"crypto/tls"
"fmt"
"net"
"os"
"strings"
"time"
"github.com/go-openapi/spec"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
k8sopenapi "k8s.io/apiserver/pkg/endpoints/openapi"
"k8s.io/apiserver/pkg/features"
genericapiserver "k8s.io/apiserver/pkg/server"
genericoptions "k8s.io/apiserver/pkg/server/options"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog"
"github.com/projectcalico/calico/libcalico-go/lib/logutils"
"github.com/projectcalico/api/pkg/openapi"
"github.com/projectcalico/calico/apiserver/pkg/apiserver"
)
// CalicoServerOptions contains the aggregation of configuration structs for
// the calico server. It contains everything needed to configure a basic API server.
// It is public so that integration tests can access it.
type CalicoServerOptions struct {
RecommendedOptions *genericoptions.RecommendedOptions
// DisableAuth disables delegating authentication and authorization for testing scenarios
DisableAuth bool
// Print a swagger file at desired path and exit.
PrintSwagger bool
SwaggerFilePath string
// Enable Admission Controller support.
EnableAdmissionController bool
StopCh <-chan struct{}
}
func (s *CalicoServerOptions) addFlags(flags *pflag.FlagSet) {
s.RecommendedOptions.AddFlags(flags)
flags.BoolVar(&s.EnableAdmissionController, "enable-admission-controller-support", s.EnableAdmissionController,
"If true, admission controller hooks will be enabled.")
flags.BoolVar(&s.PrintSwagger, "print-swagger", false,
"If true, prints swagger to stdout and exits.")
flags.StringVar(&s.SwaggerFilePath, "swagger-file-path", "./",
"If print-swagger is set true, then write swagger.json to location specified. Default is current directory.")
}
func (o CalicoServerOptions) Validate(args []string) error {
errors := []error{}
errors = append(errors, o.RecommendedOptions.Validate()...)
return utilerrors.NewAggregate(errors)
}
func (o *CalicoServerOptions) Complete() error {
return nil
}
func (o *CalicoServerOptions) Config() (*apiserver.Config, error) {
// TODO have a "real" external address
if err := o.RecommendedOptions.SecureServing.MaybeDefaultWithSelfSignedCerts("localhost", nil, []net.IP{net.ParseIP("127.0.0.1")}); err != nil {
return nil, fmt.Errorf("error creating self-signed certificates: %v", err)
}
serverConfig := genericapiserver.NewRecommendedConfig(apiserver.Codecs)
serverConfig.OpenAPIConfig = genericapiserver.DefaultOpenAPIConfig(openapi.GetOpenAPIDefinitions, k8sopenapi.NewDefinitionNamer(apiserver.Scheme))
if serverConfig.OpenAPIConfig.Info == nil {
serverConfig.OpenAPIConfig.Info = &spec.Info{}
}
if serverConfig.OpenAPIConfig.Info.Version == "" {
if serverConfig.Version != nil {
serverConfig.OpenAPIConfig.Info.Version = strings.Split(serverConfig.Version.String(), "-")[0]
} else {
serverConfig.OpenAPIConfig.Info.Version = "unversioned"
}
}
if err := o.RecommendedOptions.Etcd.ApplyTo(&serverConfig.Config); err != nil {
return nil, err
}
o.RecommendedOptions.Etcd.StorageConfig.Paging = utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking)
if err := o.RecommendedOptions.SecureServing.ApplyTo(&serverConfig.SecureServing, &serverConfig.LoopbackClientConfig); err != nil {
return nil, err
}
//Explicitly setting cipher suites in order to remove deprecated ones:
//- TLS_RSA --- lack perfect forward secrecy
//- 3DES --- widely considered to be too weak
//Order matters as indicates preference for the cipher in the selection algorithm. Also, some suites (TLS_ECDHE_ECDSA_WITH_RC4_128_SHA
//for instance, for full list refer to golang.org/x/net/http2) are blacklisted by HTTP/2 spec and MUST be placed after the HTTP/2-approved
//cipher suites. Not doing that might cause client to be given an unapproved suite and reject the connection.
cipherSuites := []uint16{
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA}
serverConfig.SecureServing.CipherSuites = cipherSuites
if o.PrintSwagger {
o.DisableAuth = true
}
if !o.DisableAuth {
if err := o.RecommendedOptions.Authentication.ApplyTo(&serverConfig.Authentication, serverConfig.SecureServing, serverConfig.OpenAPIConfig); err != nil {
return nil, err
}
if err := o.RecommendedOptions.Authorization.ApplyTo(&serverConfig.Authorization); err != nil {
return nil, err
}
} else {
// always warn when auth is disabled, since this should only be used for testing
klog.Infof("Authentication and authorization disabled for testing purposes")
}
if err := o.RecommendedOptions.Audit.ApplyTo(&serverConfig.Config); err != nil {
return nil, err
}
if err := o.RecommendedOptions.Features.ApplyTo(&serverConfig.Config); err != nil {
return nil, err
}
if err := o.RecommendedOptions.CoreAPI.ApplyTo(serverConfig); err != nil {
return nil, err
}
if initializers, err := o.RecommendedOptions.ExtraAdmissionInitializers(serverConfig); err != nil {
return nil, err
} else if err := o.RecommendedOptions.Admission.ApplyTo(&serverConfig.Config, serverConfig.SharedInformerFactory, serverConfig.ClientConfig, o.RecommendedOptions.FeatureGate, initializers...); err != nil {
return nil, err
}
// Extra extra config from environments.
//TODO(rlb): Need to unify our logging libraries
logrusLevel := logrus.InfoLevel
if env := os.Getenv("LOG_LEVEL"); env != "" {
logrusLevel = logutils.SafeParseLogLevel(env)
}
logrus.SetLevel(logrusLevel)
minResourceRefreshInterval := 5 * time.Second
if env := os.Getenv("MIN_RESOURCE_REFRESH_INTERVAL"); env != "" {
if dur, err := time.ParseDuration(env); err != nil {
return nil, err
} else {
minResourceRefreshInterval = dur
}
}
config := &apiserver.Config{
GenericConfig: serverConfig,
ExtraConfig: apiserver.ExtraConfig{
KubernetesAPIServerConfig: serverConfig.ClientConfig,
MinResourceRefreshInterval: minResourceRefreshInterval,
},
}
return config, nil
}
| [
"\"LOG_LEVEL\"",
"\"MIN_RESOURCE_REFRESH_INTERVAL\""
]
| []
| [
"MIN_RESOURCE_REFRESH_INTERVAL",
"LOG_LEVEL"
]
| [] | ["MIN_RESOURCE_REFRESH_INTERVAL", "LOG_LEVEL"] | go | 2 | 0 | |
src/com.mentor.nucleus.bp.ui.canvas/src/com/mentor/nucleus/bp/ui/canvas/Diagramelement_c.java |
package com.mentor.nucleus.bp.ui.canvas ;
//====================================================================
//
// File: com.mentor.nucleus.bp.ui.canvas.Diagramelement_c.java
//
// WARNING: Do not edit this generated file
// Generated by ../MC-Java/java.arc, $Revision: 1.111 $
//
// (c) Copyright 2005-2014 by Mentor Graphics Corp. All rights reserved.
//
//====================================================================
import com.mentor.nucleus.bp.core.*;
import org.eclipse.swt.graphics.GC;
import java.util.* ;
import java.lang.reflect.*;
import org.eclipse.core.resources.IFile;
import org.eclipse.core.resources.IResource;
import org.eclipse.core.runtime.IAdaptable;
import org.eclipse.core.runtime.IPath;
import org.eclipse.core.runtime.Path;
import com.mentor.nucleus.bp.core.util.PersistenceUtil;
import org.eclipse.core.runtime.NullProgressMonitor;
import com.mentor.nucleus.bp.core.ui.marker.UmlProblem;
import com.mentor.nucleus.bp.core.common.*;
abstract class EV_DIAGRAMELEMENT extends genericEvent_c
{
public abstract int getEvtcode() ;
}
public class Diagramelement_c extends NonRootModelElement implements IAdaptable, Cloneable
{
// Public Constructors
public Diagramelement_c(ModelRoot modelRoot,
java.util.UUID p_m_elementid,
boolean p_m_isvisible,
java.util.UUID p_m_container_elementid)
{
super(modelRoot);
m_isvisible = p_m_isvisible;
//pre-process the uuid so that we re-use null uuid instance rather then creating a new one.
m_elementid = IdAssigner.preprocessUUID(p_m_elementid);
//extract 28 bit value only
m_elementidLongBased = 0xfffffff & p_m_elementid.getLeastSignificantBits();
//pre-process the uuid so that we re-use null uuid instance rather then creating a new one.
m_container_elementid = IdAssigner.preprocessUUID(p_m_container_elementid);
setUniqueId(m_elementid);
Object [] key = {m_elementid};
addInstanceToMap(key);
}
static public Diagramelement_c createProxy(ModelRoot modelRoot,
java.util.UUID p_m_elementid,
boolean p_m_isvisible,
java.util.UUID p_m_container_elementid, String p_contentPath, IPath p_localPath)
{
ModelRoot resolvedModelRoot = ModelRoot.findModelRoot(modelRoot, p_contentPath, p_localPath);
// if a model root was not resolved it is most likely
// due to a missing file of the proxy, defualt back to
// the original model root
if(resolvedModelRoot != null)
modelRoot = resolvedModelRoot;
InstanceList instances = modelRoot.getInstanceList(Diagramelement_c.class);
Diagramelement_c new_inst = null;
synchronized(instances) {
Object[] key = {p_m_elementid};
new_inst = (Diagramelement_c) instances.get(key) ;
}
String contentPath = PersistenceUtil.resolveRelativePath(
p_localPath,
new Path(p_contentPath));
if(modelRoot.isNewCompareRoot()) {
// for comparisons we do not want to change
// the content path
contentPath = p_contentPath;
}
if ( new_inst != null && !modelRoot.isCompareRoot()) {
PersistableModelComponent pmc = new_inst.getPersistableComponent();
if (pmc == null) {
// dangling reference, redo this instance
new_inst.batchUnrelate();
new_inst.m_isvisible = p_m_isvisible;
//pre-process the uuid so that we re-use null uuid instance rather then creating a new one.
new_inst.m_elementid = IdAssigner.preprocessUUID(p_m_elementid);
//extract 28 bit value only
new_inst.m_elementidLongBased = 0xfffffff & p_m_elementid.getLeastSignificantBits();
//pre-process the uuid so that we re-use null uuid instance rather then creating a new one.
new_inst.m_container_elementid = IdAssigner.preprocessUUID(p_m_container_elementid);
}
}
if ( new_inst == null ) {
// there is no instance matching the id, create a proxy
// if the resource doesn't exist then this will be a dangling reference
new_inst = new Diagramelement_c(modelRoot,
p_m_elementid,
p_m_isvisible,
p_m_container_elementid
);
new_inst.m_contentPath = contentPath;
}
return new_inst;
}
static public Diagramelement_c resolveInstance(ModelRoot modelRoot,
java.util.UUID p_m_elementid,
boolean p_m_isvisible,
java.util.UUID p_m_container_elementid){
InstanceList instances = modelRoot.getInstanceList(Diagramelement_c.class);
Diagramelement_c source = null;
synchronized(instances) {
Object [] key = {
p_m_elementid
};
source = (Diagramelement_c) instances.get(key);
if (source != null && !modelRoot.isCompareRoot()) {
source.convertFromProxy();
source.batchUnrelate();
source.m_isvisible = p_m_isvisible;
//pre-process the uuid so that we re-use null uuid instance rather then creating a new one.
source.m_elementid = IdAssigner.preprocessUUID(p_m_elementid);
//extract 28 bit value only
source.m_elementidLongBased = 0xfffffff & p_m_elementid.getLeastSignificantBits();
//pre-process the uuid so that we re-use null uuid instance rather then creating a new one.
source.m_container_elementid = IdAssigner.preprocessUUID(p_m_container_elementid);
return source ;
}
}
// there is no instance matching the id
Diagramelement_c new_inst = new Diagramelement_c(modelRoot,
p_m_elementid,
p_m_isvisible,
p_m_container_elementid
);
return new_inst;
}
public Diagramelement_c(ModelRoot modelRoot)
{
super(modelRoot);
m_elementid = idAssigner.createUUID();
m_container_elementid = IdAssigner.NULL_UUID;
setUniqueId(m_elementid);
Object [] key = {m_elementid};
addInstanceToMap(key);
}
public Object getInstanceKey() {
Object [] key = {m_elementid};
return key;
}
public boolean setInstanceKey(UUID p_newKey){
boolean changed = false;
// round p1
// round p2
// round p3
// round p5
if (m_elementid != p_newKey ){
m_elementid = p_newKey;
changed = true;
}
return changed;
}
public boolean equals (Object elem) {
if (!(elem instanceof Diagramelement_c)) {
return false;
}
// check that the model-roots are the same
if (((NonRootModelElement)elem).getModelRoot() != getModelRoot()) {
return false;
}
return identityEquals(elem);
}
public boolean identityEquals(Object elem) {
if (!(elem instanceof Diagramelement_c)) {
return false;
}
Diagramelement_c me = (Diagramelement_c)elem;
// don't allow an empty id-value to produce a false positive result;
// in this case, use whether the two instances are actually the same
// one in memory, instead
if ((IdAssigner.NULL_UUID.equals(getElementid()) || IdAssigner.NULL_UUID.equals(((Diagramelement_c)elem).getElementid())) && this != elem) {
return false;
}
if (!getElementid().equals(((Diagramelement_c)elem).getElementid())) return false;
return true;
}
public boolean cachedIdentityEquals(Object elem) {
if (!(elem instanceof Diagramelement_c)) {
return false;
}
Diagramelement_c me = (Diagramelement_c)elem;
if (!getElementid().equals(((Diagramelement_c)elem).getElementid())) return false;
return true;
}
// Attributes
private boolean m_isvisible ;
private java.util.UUID m_elementid ;
private long m_elementidLongBased ;
private java.util.UUID m_container_elementid ;
// declare association references from this class
// referring navigation
Graphelement_c ContainerGraphelement ;
public void relateAcrossR307To(Graphelement_c target)
{
relateAcrossR307To(target, true);
}
public void relateAcrossR307To(Graphelement_c target, boolean notifyChanges)
{
if (target == null) return;
if (target == ContainerGraphelement) return; // already related
if ( ContainerGraphelement != target ) {
Object oldKey = getInstanceKey();
if (ContainerGraphelement != null) {
ContainerGraphelement.clearBackPointerR307To(this);
if (Boolean.valueOf(System.getenv("PTC_MCC_ENABLED")) == true) { //$NON-NLS-1$
Ooaofgraphics.log.println(ILogger.CONSISTENCY,
"Diagramelement_c.relateAcrossR307To(Graphelement_c target)",
"Relate performed across R307 from DiagramElement to GraphElement without unrelate of prior instance.");
}
}
ContainerGraphelement = target ;
if(IdAssigner.NULL_UUID.equals(target.getElementid()))
{
// do not update cached value
} else {
// update cached value
m_container_elementid = target.getElementidCachedValue();
}
updateInstanceKey(oldKey, getInstanceKey());
target.setBackPointerR307To(this);
target.addRef();
if(notifyChanges) {
RelationshipChangeModelDelta change = new RelationshipChangeModelDelta(Modeleventnotification_c.DELTA_ELEMENT_RELATED, this, target, "307", "");
Ooaofgraphics.getDefaultInstance().fireModelElementRelationChanged(change);
}
}
}
public void unrelateAcrossR307From(Graphelement_c target)
{
unrelateAcrossR307From(target, true);
}
public void unrelateAcrossR307From(Graphelement_c target, boolean notifyChanges)
{
if (target == null) return;
if (ContainerGraphelement == null) return; // already unrelated
if (target != ContainerGraphelement) {
Exception e = new Exception();
e.fillInStackTrace();
CanvasPlugin.logError("Tried to unrelate from non-related instance across R307", e);
return;
}
if (target != null) {
target.clearBackPointerR307To(this);
}
if(ContainerGraphelement != null) {
m_container_elementid = ContainerGraphelement.getElementid();
if(IdAssigner.NULL_UUID.equals(m_container_elementid))
{
m_container_elementid = ContainerGraphelement.getElementidCachedValue();
}
ContainerGraphelement = null ;
target.removeRef();
if(notifyChanges) {
RelationshipChangeModelDelta change = new RelationshipChangeModelDelta(Modeleventnotification_c.DELTA_ELEMENT_UNRELATED, this, target, "307", "");
Ooaofgraphics.getDefaultInstance().fireModelElementRelationChanged(change);
}
}
}
public static Diagramelement_c getOneDIM_ELEOnR307(Graphelement_c [] targets)
{
return getOneDIM_ELEOnR307(targets, null);
}
public static Diagramelement_c getOneDIM_ELEOnR307(Graphelement_c [] targets, ClassQueryInterface_c test)
{
Diagramelement_c ret_val = null;
if (targets != null)
{
for (int i = 0; i < targets.length && ret_val == null; ++i )
{
ret_val = getOneDIM_ELEOnR307(targets[i], test);
}
}
return ret_val;
}
public static Diagramelement_c getOneDIM_ELEOnR307(Graphelement_c target)
{
return getOneDIM_ELEOnR307(target, null);
}
public static Diagramelement_c getOneDIM_ELEOnR307(Graphelement_c target, boolean loadComponent)
{
return getOneDIM_ELEOnR307(target.getModelRoot(), target, null, loadComponent);
}
public static Diagramelement_c getOneDIM_ELEOnR307(Graphelement_c target, ClassQueryInterface_c test)
{
if (target != null) {
return getOneDIM_ELEOnR307(target.getModelRoot(), target, test);
}
return null;
}
public static Diagramelement_c getOneDIM_ELEOnR307(ModelRoot modelRoot, Graphelement_c target, ClassQueryInterface_c test)
{
return getOneDIM_ELEOnR307(modelRoot, target, test, true);
}
public static Diagramelement_c getOneDIM_ELEOnR307(ModelRoot modelRoot, Graphelement_c target, ClassQueryInterface_c test, boolean loadComponent)
{
return find_getOneDIM_ELEOnR307(modelRoot,target,test);
}
private static Diagramelement_c find_getOneDIM_ELEOnR307(ModelRoot modelRoot, Graphelement_c target, ClassQueryInterface_c test)
{
if (target != null) {
synchronized(target.backPointer_ContainedDiagramelementContained_R307) {
for ( int i = 0; i < target.backPointer_ContainedDiagramelementContained_R307.size(); ++i ) {
Diagramelement_c source = (Diagramelement_c)target.backPointer_ContainedDiagramelementContained_R307.get(i) ;
if (source != null && (test == null || test.evaluate(source))) {
return source ;
}
}
}
}
// not found
return null ;
}
public static Diagramelement_c [] getManyDIM_ELEsOnR307(Graphelement_c [] targets)
{
return getManyDIM_ELEsOnR307(targets, null);
}
public static Diagramelement_c [] getManyDIM_ELEsOnR307(Graphelement_c [] targets, boolean loadComponent)
{
return getManyDIM_ELEsOnR307(targets, null, loadComponent);
}
public static Diagramelement_c [] getManyDIM_ELEsOnR307(Graphelement_c [] targets, ClassQueryInterface_c test)
{
return getManyDIM_ELEsOnR307(targets, test, true);
}
public static Diagramelement_c [] getManyDIM_ELEsOnR307(Graphelement_c [] targets, ClassQueryInterface_c test, boolean loadComponent)
{
if(targets == null || targets.length == 0 || targets[0] == null)
return new Diagramelement_c[0];
ModelRoot modelRoot = targets[0].getModelRoot();
InstanceList instances = modelRoot.getInstanceList(Diagramelement_c.class);
Vector matches = new Vector();
for (int i = 0 ; i < targets.length ; i++) {
synchronized(targets[i].backPointer_ContainedDiagramelementContained_R307) {
for (int j = 0; j < targets[i].backPointer_ContainedDiagramelementContained_R307.size(); ++j) {
Diagramelement_c source = (Diagramelement_c)targets[i].backPointer_ContainedDiagramelementContained_R307.get(j) ;
if (source != null && (test == null || test.evaluate(source))) {
matches.add(source);
}
}
}
}
if (matches.size() > 0) {
Diagramelement_c[] ret_set = new Diagramelement_c[matches.size()];
matches.copyInto(ret_set);
return ret_set;
} else {
return new Diagramelement_c[0];
}
}
public static Diagramelement_c [] getManyDIM_ELEsOnR307(Graphelement_c target)
{
return getManyDIM_ELEsOnR307(target, null);
}
public static Diagramelement_c [] getManyDIM_ELEsOnR307(Graphelement_c target, boolean loadComponent)
{
return getManyDIM_ELEsOnR307(target, null, loadComponent);
}
public static Diagramelement_c [] getManyDIM_ELEsOnR307(Graphelement_c target, ClassQueryInterface_c test)
{
return getManyDIM_ELEsOnR307(target, test, true);
}
public static Diagramelement_c [] getManyDIM_ELEsOnR307(Graphelement_c target, ClassQueryInterface_c test, boolean loadComponent)
{
if(target == null)
return new Diagramelement_c[0];
ModelRoot modelRoot = target.getModelRoot();
Vector matches = new Vector();
synchronized(target.backPointer_ContainedDiagramelementContained_R307) {
for (int i = 0; i < target.backPointer_ContainedDiagramelementContained_R307.size(); ++i) {
Diagramelement_c source = (Diagramelement_c)target.backPointer_ContainedDiagramelementContained_R307.get(i);
if (source != null && (test == null || test.evaluate(source))) {
matches.add(source);
}
}
}
if (matches.size() > 0) {
Diagramelement_c[] ret_set = new Diagramelement_c[matches.size()];
matches.copyInto(ret_set);
return ret_set;
} else {
return new Diagramelement_c[0];
}
}
// declare associations referring to this class
// referred to navigation
ArrayList<Reference_c> backPointer_ReferenceReferenceReference_R308 = new ArrayList<Reference_c>();
public void setReferenceOrderInChildListR308(Reference_c target, int index) {
unrelateAcrossR308From(target);
Reference_c[] elements = Reference_c.getManyDIM_REFsOnR308(this);
int count = 0;
for(int i = 0; i < elements.length; i++) {
if(count >= index) {
unrelateAcrossR308From((Reference_c) elements[i]);
}
count++;
}
relateAcrossR308To(target);
count = 0;
for(int i = 0; i < elements.length; i++) {
if(count >= index) {
relateAcrossR308To((Reference_c) elements[i]);
}
count++;
}
}
public void relateAcrossR308To(Reference_c target)
{
if (target != null) {
target.relateAcrossR308To(this, true) ;
}
}
public void relateAcrossR308To(Reference_c target, boolean notifyChanges)
{
if (target != null) {
target.relateAcrossR308To(this, notifyChanges) ;
}
}
public void setBackPointerR308To(Reference_c target)
{
synchronized (backPointer_ReferenceReferenceReference_R308) {
backPointer_ReferenceReferenceReference_R308.add(target);
}
}
public void unrelateAcrossR308From(Reference_c target)
{
if (target != null) {
target.unrelateAcrossR308From(this, true) ;
}
}
public void unrelateAcrossR308From(Reference_c target, boolean notifyChanges)
{
if (target != null) {
target.unrelateAcrossR308From(this, notifyChanges) ;
}
}
public void clearBackPointerR308To(Reference_c target)
{
synchronized (backPointer_ReferenceReferenceReference_R308) {
backPointer_ReferenceReferenceReference_R308.remove(target);
}
}
public static Diagramelement_c getOneDIM_ELEOnR308(Reference_c [] targets)
{
return getOneDIM_ELEOnR308(targets, null);
}
public static Diagramelement_c getOneDIM_ELEOnR308(Reference_c [] targets, ClassQueryInterface_c test)
{
return getOneDIM_ELEOnR308(targets, test, true);
}
public static Diagramelement_c getOneDIM_ELEOnR308(Reference_c [] targets, ClassQueryInterface_c test, boolean loadComponent)
{
Diagramelement_c ret_val = null;
if (targets != null)
{
for (int i = 0; i < targets.length && ret_val == null; ++i )
{
if (test != null) {
Diagramelement_c candidate_val = getOneDIM_ELEOnR308(targets[i],true);
if ( candidate_val != null && test.evaluate(candidate_val) )
{
ret_val = candidate_val;
break;
}
}
else {
ret_val = getOneDIM_ELEOnR308(targets[i], loadComponent);
if (ret_val != null) {
break;
}
}
}
}
return ret_val;
}
public static Diagramelement_c getOneDIM_ELEOnR308(Reference_c target)
{
return getOneDIM_ELEOnR308(target, true);
}
public static Diagramelement_c getOneDIM_ELEOnR308(Reference_c target, boolean loadComponent)
{
if (target != null) {
if(loadComponent){
target.loadProxy();
}
return target.ReferencedDiagramelement ;
} else {
return null;
}
}
public static Diagramelement_c [] getManyDIM_ELEsOnR308(Reference_c [] targets,
ClassQueryInterface_c test)
{
return getManyDIM_ELEsOnR308(targets, test, true);
}
public static Diagramelement_c [] getManyDIM_ELEsOnR308(Reference_c [] targets,
ClassQueryInterface_c test, boolean loadComponent)
{
if(targets == null || targets.length == 0 || targets[0] == null)
return new Diagramelement_c[0];
LinkedHashSet<Diagramelement_c> elementsSet = new LinkedHashSet<Diagramelement_c>();
for (int i = 0; i < targets.length; i++) {
if(loadComponent && targets[i] != null && targets[i].ReferencedDiagramelement == null)
targets[i].loadProxy();
Diagramelement_c associate = targets[i].ReferencedDiagramelement;
if (targets[i] != null && associate != null
&& (test == null || test.evaluate(associate))) {
if (elementsSet.add(associate)){
}
}
}
Diagramelement_c[] result = new Diagramelement_c[elementsSet.size()];
elementsSet.toArray(result);
return result;
}
public static Diagramelement_c [] getManyDIM_ELEsOnR308(Reference_c [] targets)
{
return getManyDIM_ELEsOnR308(targets, null);
}
public static Diagramelement_c [] getManyDIM_ELEsOnR308(Reference_c target,
ClassQueryInterface_c test, boolean loadComponent)
{
if (target != null) {
Reference_c [] targetArray = new Reference_c[1];
targetArray[0] = target;
return getManyDIM_ELEsOnR308(targetArray, test, loadComponent);
} else {
Diagramelement_c [] result = new Diagramelement_c [0] ;
return result ;
}
}
public static Diagramelement_c [] getManyDIM_ELEsOnR308(Reference_c target,
ClassQueryInterface_c test)
{
return getManyDIM_ELEsOnR308(target, null, true);
}
public static Diagramelement_c [] getManyDIM_ELEsOnR308(Reference_c target)
{
return getManyDIM_ELEsOnR308(target, null, true);
}
public static Diagramelement_c [] getManyDIM_ELEsOnR308(Reference_c target, boolean loadComponent)
{
return getManyDIM_ELEsOnR308(target, null, loadComponent);
}
// referred to navigation
Reference_c backPointer_IsSubtypeReferenceIsSubtype_R302;
public void setReferenceOrderInChildListR302(Reference_c target, int index) {
unrelateAcrossR302From(target);
Reference_c[] elements = Reference_c.getManyDIM_REFsOnR302(this);
int count = 0;
for(int i = 0; i < elements.length; i++) {
if(count >= index) {
unrelateAcrossR302From((Reference_c) elements[i]);
}
count++;
}
relateAcrossR302To(target);
count = 0;
for(int i = 0; i < elements.length; i++) {
if(count >= index) {
relateAcrossR302To((Reference_c) elements[i]);
}
count++;
}
}
public void relateAcrossR302To(Reference_c target)
{
if (target != null) {
target.relateAcrossR302To(this, true) ;
}
}
public void relateAcrossR302To(Reference_c target, boolean notifyChanges)
{
if (target != null) {
target.relateAcrossR302To(this, notifyChanges) ;
}
}
public void setBackPointerR302To(Reference_c target)
{
backPointer_IsSubtypeReferenceIsSubtype_R302 = target;
}
public void unrelateAcrossR302From(Reference_c target)
{
if (target != null) {
target.unrelateAcrossR302From(this, true) ;
}
}
public void unrelateAcrossR302From(Reference_c target, boolean notifyChanges)
{
if (target != null) {
target.unrelateAcrossR302From(this, notifyChanges) ;
}
}
public void clearBackPointerR302To(Reference_c target)
{
if (target == backPointer_IsSubtypeReferenceIsSubtype_R302) {
backPointer_IsSubtypeReferenceIsSubtype_R302 = null;
}
}
public static Diagramelement_c getOneDIM_ELEOnR302(Reference_c [] targets)
{
return getOneDIM_ELEOnR302(targets, null);
}
public static Diagramelement_c getOneDIM_ELEOnR302(Reference_c [] targets, ClassQueryInterface_c test)
{
return getOneDIM_ELEOnR302(targets, test, true);
}
public static Diagramelement_c getOneDIM_ELEOnR302(Reference_c [] targets, ClassQueryInterface_c test, boolean loadComponent)
{
Diagramelement_c ret_val = null;
if (targets != null)
{
for (int i = 0; i < targets.length && ret_val == null; ++i )
{
if (test != null) {
Diagramelement_c candidate_val = getOneDIM_ELEOnR302(targets[i],true);
if ( candidate_val != null && test.evaluate(candidate_val) )
{
ret_val = candidate_val;
break;
}
}
else {
ret_val = getOneDIM_ELEOnR302(targets[i], loadComponent);
if (ret_val != null) {
break;
}
}
}
}
return ret_val;
}
public static Diagramelement_c getOneDIM_ELEOnR302(Reference_c target)
{
return getOneDIM_ELEOnR302(target, true);
}
public static Diagramelement_c getOneDIM_ELEOnR302(Reference_c target, boolean loadComponent)
{
if (target != null) {
if(loadComponent){
target.loadProxy();
}
return target.IsSupertypeDiagramelement ;
} else {
return null;
}
}
public static Diagramelement_c [] getManyDIM_ELEsOnR302(Reference_c [] targets,
ClassQueryInterface_c test)
{
return getManyDIM_ELEsOnR302(targets, test, true);
}
public static Diagramelement_c [] getManyDIM_ELEsOnR302(Reference_c [] targets,
ClassQueryInterface_c test, boolean loadComponent)
{
if(targets == null || targets.length == 0 || targets[0] == null)
return new Diagramelement_c[0];
LinkedHashSet<Diagramelement_c> elementsSet = new LinkedHashSet<Diagramelement_c>();
for (int i = 0; i < targets.length; i++) {
if(loadComponent && targets[i] != null && targets[i].IsSupertypeDiagramelement == null)
targets[i].loadProxy();
Diagramelement_c associate = targets[i].IsSupertypeDiagramelement;
if (targets[i] != null && associate != null
&& (test == null || test.evaluate(associate))) {
if (elementsSet.add(associate)){
}
}
}
Diagramelement_c[] result = new Diagramelement_c[elementsSet.size()];
elementsSet.toArray(result);
return result;
}
public static Diagramelement_c [] getManyDIM_ELEsOnR302(Reference_c [] targets)
{
return getManyDIM_ELEsOnR302(targets, null);
}
public static Diagramelement_c [] getManyDIM_ELEsOnR302(Reference_c target,
ClassQueryInterface_c test, boolean loadComponent)
{
if (target != null) {
Reference_c [] targetArray = new Reference_c[1];
targetArray[0] = target;
return getManyDIM_ELEsOnR302(targetArray, test, loadComponent);
} else {
Diagramelement_c [] result = new Diagramelement_c [0] ;
return result ;
}
}
public static Diagramelement_c [] getManyDIM_ELEsOnR302(Reference_c target,
ClassQueryInterface_c test)
{
return getManyDIM_ELEsOnR302(target, null, true);
}
public static Diagramelement_c [] getManyDIM_ELEsOnR302(Reference_c target)
{
return getManyDIM_ELEsOnR302(target, null, true);
}
public static Diagramelement_c [] getManyDIM_ELEsOnR302(Reference_c target, boolean loadComponent)
{
return getManyDIM_ELEsOnR302(target, null, loadComponent);
}
// referred to navigation
Graphelement_c backPointer_IsSubtypeGraphelementIsSubtype_R302;
public void setGraphelementOrderInChildListR302(Graphelement_c target, int index) {
unrelateAcrossR302From(target);
Graphelement_c[] elements = Graphelement_c.getManyDIM_GEsOnR302(this);
int count = 0;
for(int i = 0; i < elements.length; i++) {
if(count >= index) {
unrelateAcrossR302From((Graphelement_c) elements[i]);
}
count++;
}
relateAcrossR302To(target);
count = 0;
for(int i = 0; i < elements.length; i++) {
if(count >= index) {
relateAcrossR302To((Graphelement_c) elements[i]);
}
count++;
}
}
public void relateAcrossR302To(Graphelement_c target)
{
if (target != null) {
target.relateAcrossR302To(this, true) ;
}
}
public void relateAcrossR302To(Graphelement_c target, boolean notifyChanges)
{
if (target != null) {
target.relateAcrossR302To(this, notifyChanges) ;
}
}
public void setBackPointerR302To(Graphelement_c target)
{
backPointer_IsSubtypeGraphelementIsSubtype_R302 = target;
}
public void unrelateAcrossR302From(Graphelement_c target)
{
if (target != null) {
target.unrelateAcrossR302From(this, true) ;
}
}
public void unrelateAcrossR302From(Graphelement_c target, boolean notifyChanges)
{
if (target != null) {
target.unrelateAcrossR302From(this, notifyChanges) ;
}
}
public void clearBackPointerR302To(Graphelement_c target)
{
if (target == backPointer_IsSubtypeGraphelementIsSubtype_R302) {
backPointer_IsSubtypeGraphelementIsSubtype_R302 = null;
}
}
public static Diagramelement_c getOneDIM_ELEOnR302(Graphelement_c [] targets)
{
return getOneDIM_ELEOnR302(targets, null);
}
public static Diagramelement_c getOneDIM_ELEOnR302(Graphelement_c [] targets, ClassQueryInterface_c test)
{
return getOneDIM_ELEOnR302(targets, test, true);
}
public static Diagramelement_c getOneDIM_ELEOnR302(Graphelement_c [] targets, ClassQueryInterface_c test, boolean loadComponent)
{
Diagramelement_c ret_val = null;
if (targets != null)
{
for (int i = 0; i < targets.length && ret_val == null; ++i )
{
if (test != null) {
Diagramelement_c candidate_val = getOneDIM_ELEOnR302(targets[i],true);
if ( candidate_val != null && test.evaluate(candidate_val) )
{
ret_val = candidate_val;
break;
}
}
else {
ret_val = getOneDIM_ELEOnR302(targets[i], loadComponent);
if (ret_val != null) {
break;
}
}
}
}
return ret_val;
}
public static Diagramelement_c getOneDIM_ELEOnR302(Graphelement_c target)
{
return getOneDIM_ELEOnR302(target, true);
}
public static Diagramelement_c getOneDIM_ELEOnR302(Graphelement_c target, boolean loadComponent)
{
if (target != null) {
if(loadComponent){
target.loadProxy();
}
return target.IsSupertypeDiagramelement ;
} else {
return null;
}
}
public static Diagramelement_c [] getManyDIM_ELEsOnR302(Graphelement_c [] targets,
ClassQueryInterface_c test)
{
return getManyDIM_ELEsOnR302(targets, test, true);
}
public static Diagramelement_c [] getManyDIM_ELEsOnR302(Graphelement_c [] targets,
ClassQueryInterface_c test, boolean loadComponent)
{
if(targets == null || targets.length == 0 || targets[0] == null)
return new Diagramelement_c[0];
LinkedHashSet<Diagramelement_c> elementsSet = new LinkedHashSet<Diagramelement_c>();
for (int i = 0; i < targets.length; i++) {
if(loadComponent && targets[i] != null && targets[i].IsSupertypeDiagramelement == null)
targets[i].loadProxy();
Diagramelement_c associate = targets[i].IsSupertypeDiagramelement;
if (targets[i] != null && associate != null
&& (test == null || test.evaluate(associate))) {
if (elementsSet.add(associate)){
}
}
}
Diagramelement_c[] result = new Diagramelement_c[elementsSet.size()];
elementsSet.toArray(result);
return result;
}
public static Diagramelement_c [] getManyDIM_ELEsOnR302(Graphelement_c [] targets)
{
return getManyDIM_ELEsOnR302(targets, null);
}
public static Diagramelement_c [] getManyDIM_ELEsOnR302(Graphelement_c target,
ClassQueryInterface_c test, boolean loadComponent)
{
if (target != null) {
Graphelement_c [] targetArray = new Graphelement_c[1];
targetArray[0] = target;
return getManyDIM_ELEsOnR302(targetArray, test, loadComponent);
} else {
Diagramelement_c [] result = new Diagramelement_c [0] ;
return result ;
}
}
public static Diagramelement_c [] getManyDIM_ELEsOnR302(Graphelement_c target,
ClassQueryInterface_c test)
{
return getManyDIM_ELEsOnR302(target, null, true);
}
public static Diagramelement_c [] getManyDIM_ELEsOnR302(Graphelement_c target)
{
return getManyDIM_ELEsOnR302(target, null, true);
}
public static Diagramelement_c [] getManyDIM_ELEsOnR302(Graphelement_c target, boolean loadComponent)
{
return getManyDIM_ELEsOnR302(target, null, loadComponent);
}
// referred to navigation
Leafelement_c backPointer_IsSubtypeLeafelementIsSubtype_R302;
public void setLeafelementOrderInChildListR302(Leafelement_c target, int index) {
unrelateAcrossR302From(target);
Leafelement_c[] elements = Leafelement_c.getManyDIM_LELsOnR302(this);
int count = 0;
for(int i = 0; i < elements.length; i++) {
if(count >= index) {
unrelateAcrossR302From((Leafelement_c) elements[i]);
}
count++;
}
relateAcrossR302To(target);
count = 0;
for(int i = 0; i < elements.length; i++) {
if(count >= index) {
relateAcrossR302To((Leafelement_c) elements[i]);
}
count++;
}
}
public void relateAcrossR302To(Leafelement_c target)
{
if (target != null) {
target.relateAcrossR302To(this, true) ;
}
}
public void relateAcrossR302To(Leafelement_c target, boolean notifyChanges)
{
if (target != null) {
target.relateAcrossR302To(this, notifyChanges) ;
}
}
public void setBackPointerR302To(Leafelement_c target)
{
backPointer_IsSubtypeLeafelementIsSubtype_R302 = target;
}
public void unrelateAcrossR302From(Leafelement_c target)
{
if (target != null) {
target.unrelateAcrossR302From(this, true) ;
}
}
public void unrelateAcrossR302From(Leafelement_c target, boolean notifyChanges)
{
if (target != null) {
target.unrelateAcrossR302From(this, notifyChanges) ;
}
}
public void clearBackPointerR302To(Leafelement_c target)
{
if (target == backPointer_IsSubtypeLeafelementIsSubtype_R302) {
backPointer_IsSubtypeLeafelementIsSubtype_R302 = null;
}
}
public static Diagramelement_c getOneDIM_ELEOnR302(Leafelement_c [] targets)
{
return getOneDIM_ELEOnR302(targets, null);
}
public static Diagramelement_c getOneDIM_ELEOnR302(Leafelement_c [] targets, ClassQueryInterface_c test)
{
return getOneDIM_ELEOnR302(targets, test, true);
}
public static Diagramelement_c getOneDIM_ELEOnR302(Leafelement_c [] targets, ClassQueryInterface_c test, boolean loadComponent)
{
Diagramelement_c ret_val = null;
if (targets != null)
{
for (int i = 0; i < targets.length && ret_val == null; ++i )
{
if (test != null) {
Diagramelement_c candidate_val = getOneDIM_ELEOnR302(targets[i],true);
if ( candidate_val != null && test.evaluate(candidate_val) )
{
ret_val = candidate_val;
break;
}
}
else {
ret_val = getOneDIM_ELEOnR302(targets[i], loadComponent);
if (ret_val != null) {
break;
}
}
}
}
return ret_val;
}
public static Diagramelement_c getOneDIM_ELEOnR302(Leafelement_c target)
{
return getOneDIM_ELEOnR302(target, true);
}
public static Diagramelement_c getOneDIM_ELEOnR302(Leafelement_c target, boolean loadComponent)
{
if (target != null) {
if(loadComponent){
target.loadProxy();
}
return target.IsSupertypeDiagramelement ;
} else {
return null;
}
}
public static Diagramelement_c [] getManyDIM_ELEsOnR302(Leafelement_c [] targets,
ClassQueryInterface_c test)
{
return getManyDIM_ELEsOnR302(targets, test, true);
}
public static Diagramelement_c [] getManyDIM_ELEsOnR302(Leafelement_c [] targets,
ClassQueryInterface_c test, boolean loadComponent)
{
if(targets == null || targets.length == 0 || targets[0] == null)
return new Diagramelement_c[0];
LinkedHashSet<Diagramelement_c> elementsSet = new LinkedHashSet<Diagramelement_c>();
for (int i = 0; i < targets.length; i++) {
if(loadComponent && targets[i] != null && targets[i].IsSupertypeDiagramelement == null)
targets[i].loadProxy();
Diagramelement_c associate = targets[i].IsSupertypeDiagramelement;
if (targets[i] != null && associate != null
&& (test == null || test.evaluate(associate))) {
if (elementsSet.add(associate)){
}
}
}
Diagramelement_c[] result = new Diagramelement_c[elementsSet.size()];
elementsSet.toArray(result);
return result;
}
public static Diagramelement_c [] getManyDIM_ELEsOnR302(Leafelement_c [] targets)
{
return getManyDIM_ELEsOnR302(targets, null);
}
public static Diagramelement_c [] getManyDIM_ELEsOnR302(Leafelement_c target,
ClassQueryInterface_c test, boolean loadComponent)
{
if (target != null) {
Leafelement_c [] targetArray = new Leafelement_c[1];
targetArray[0] = target;
return getManyDIM_ELEsOnR302(targetArray, test, loadComponent);
} else {
Diagramelement_c [] result = new Diagramelement_c [0] ;
return result ;
}
}
public static Diagramelement_c [] getManyDIM_ELEsOnR302(Leafelement_c target,
ClassQueryInterface_c test)
{
return getManyDIM_ELEsOnR302(target, null, true);
}
public static Diagramelement_c [] getManyDIM_ELEsOnR302(Leafelement_c target)
{
return getManyDIM_ELEsOnR302(target, null, true);
}
public static Diagramelement_c [] getManyDIM_ELEsOnR302(Leafelement_c target, boolean loadComponent)
{
return getManyDIM_ELEsOnR302(target, null, loadComponent);
}
// referred to navigation
ArrayList<Property_c> backPointer_PropertyPropertyProperty_R310 = new ArrayList<Property_c>();
public void setPropertyOrderInChildListR310(Property_c target, int index) {
unrelateAcrossR310From(target);
Property_c[] elements = Property_c.getManyDIM_PRPsOnR310(this);
int count = 0;
for(int i = 0; i < elements.length; i++) {
if(count >= index) {
unrelateAcrossR310From((Property_c) elements[i]);
}
count++;
}
relateAcrossR310To(target);
count = 0;
for(int i = 0; i < elements.length; i++) {
if(count >= index) {
relateAcrossR310To((Property_c) elements[i]);
}
count++;
}
}
public void relateAcrossR310To(Property_c target)
{
if (target != null) {
target.relateAcrossR310To(this, true) ;
}
}
public void relateAcrossR310To(Property_c target, boolean notifyChanges)
{
if (target != null) {
target.relateAcrossR310To(this, notifyChanges) ;
}
}
public void setBackPointerR310To(Property_c target)
{
synchronized (backPointer_PropertyPropertyProperty_R310) {
backPointer_PropertyPropertyProperty_R310.add(target);
}
}
public void unrelateAcrossR310From(Property_c target)
{
if (target != null) {
target.unrelateAcrossR310From(this, true) ;
}
}
public void unrelateAcrossR310From(Property_c target, boolean notifyChanges)
{
if (target != null) {
target.unrelateAcrossR310From(this, notifyChanges) ;
}
}
public void clearBackPointerR310To(Property_c target)
{
synchronized (backPointer_PropertyPropertyProperty_R310) {
backPointer_PropertyPropertyProperty_R310.remove(target);
}
}
public static Diagramelement_c getOneDIM_ELEOnR310(Property_c [] targets)
{
return getOneDIM_ELEOnR310(targets, null);
}
public static Diagramelement_c getOneDIM_ELEOnR310(Property_c [] targets, ClassQueryInterface_c test)
{
return getOneDIM_ELEOnR310(targets, test, true);
}
public static Diagramelement_c getOneDIM_ELEOnR310(Property_c [] targets, ClassQueryInterface_c test, boolean loadComponent)
{
Diagramelement_c ret_val = null;
if (targets != null)
{
for (int i = 0; i < targets.length && ret_val == null; ++i )
{
if (test != null) {
Diagramelement_c candidate_val = getOneDIM_ELEOnR310(targets[i],true);
if ( candidate_val != null && test.evaluate(candidate_val) )
{
ret_val = candidate_val;
break;
}
}
else {
ret_val = getOneDIM_ELEOnR310(targets[i], loadComponent);
if (ret_val != null) {
break;
}
}
}
}
return ret_val;
}
public static Diagramelement_c getOneDIM_ELEOnR310(Property_c target)
{
return getOneDIM_ELEOnR310(target, true);
}
public static Diagramelement_c getOneDIM_ELEOnR310(Property_c target, boolean loadComponent)
{
if (target != null) {
if(loadComponent){
target.loadProxy();
}
return target.ContainerDiagramelement ;
} else {
return null;
}
}
public static Diagramelement_c [] getManyDIM_ELEsOnR310(Property_c [] targets,
ClassQueryInterface_c test)
{
return getManyDIM_ELEsOnR310(targets, test, true);
}
public static Diagramelement_c [] getManyDIM_ELEsOnR310(Property_c [] targets,
ClassQueryInterface_c test, boolean loadComponent)
{
if(targets == null || targets.length == 0 || targets[0] == null)
return new Diagramelement_c[0];
LinkedHashSet<Diagramelement_c> elementsSet = new LinkedHashSet<Diagramelement_c>();
for (int i = 0; i < targets.length; i++) {
if(loadComponent && targets[i] != null && targets[i].ContainerDiagramelement == null)
targets[i].loadProxy();
Diagramelement_c associate = targets[i].ContainerDiagramelement;
if (targets[i] != null && associate != null
&& (test == null || test.evaluate(associate))) {
if (elementsSet.add(associate)){
}
}
}
Diagramelement_c[] result = new Diagramelement_c[elementsSet.size()];
elementsSet.toArray(result);
return result;
}
public static Diagramelement_c [] getManyDIM_ELEsOnR310(Property_c [] targets)
{
return getManyDIM_ELEsOnR310(targets, null);
}
public static Diagramelement_c [] getManyDIM_ELEsOnR310(Property_c target,
ClassQueryInterface_c test, boolean loadComponent)
{
if (target != null) {
Property_c [] targetArray = new Property_c[1];
targetArray[0] = target;
return getManyDIM_ELEsOnR310(targetArray, test, loadComponent);
} else {
Diagramelement_c [] result = new Diagramelement_c [0] ;
return result ;
}
}
public static Diagramelement_c [] getManyDIM_ELEsOnR310(Property_c target,
ClassQueryInterface_c test)
{
return getManyDIM_ELEsOnR310(target, null, true);
}
public static Diagramelement_c [] getManyDIM_ELEsOnR310(Property_c target)
{
return getManyDIM_ELEsOnR310(target, null, true);
}
public static Diagramelement_c [] getManyDIM_ELEsOnR310(Property_c target, boolean loadComponent)
{
return getManyDIM_ELEsOnR310(target, null, loadComponent);
}
public void batchRelate(ModelRoot modelRoot, boolean notifyChanges, boolean searchAllRoots)
{
batchRelate(modelRoot, false, notifyChanges, searchAllRoots);
}
public void batchRelate(ModelRoot modelRoot, boolean relateProxies, boolean notifyChanges, boolean searchAllRoots)
{
InstanceList instances=null;
ModelRoot baseRoot = modelRoot;
if (ContainerGraphelement == null) {
// R307
Graphelement_c relInst21886 = (Graphelement_c) baseRoot.getInstanceList(Graphelement_c.class).get(new Object[] {m_container_elementid});
// if there was no local element, check for any global elements
// failing that proceed to check other model roots
if (relInst21886 == null) {
relInst21886 = (Graphelement_c) Ooaofooa.getDefaultInstance().getInstanceList(Graphelement_c.class).get(new Object[] {m_container_elementid});
}
//synchronized
if ( relInst21886 != null )
{
if (relateProxies || !isProxy() || (inSameComponent(this, relInst21886) && !isProxy())) {
relInst21886.relateAcrossR307To(this, notifyChanges);
}
}
}
}
public void batchUnrelate(boolean notifyChanges)
{
NonRootModelElement inst=null;
// R307
// DIM_GE
inst=ContainerGraphelement;
unrelateAcrossR307From(ContainerGraphelement, notifyChanges);
if ( inst != null ) {
inst.removeRef();
}
}
public static void batchRelateAll(ModelRoot modelRoot, boolean notifyChanges, boolean searchAllRoots) {
batchRelateAll(modelRoot, notifyChanges, searchAllRoots, false);
}
public static void batchRelateAll(ModelRoot modelRoot, boolean notifyChanges, boolean searchAllRoots, boolean relateProxies)
{
InstanceList instances = modelRoot.getInstanceList(Diagramelement_c.class);
synchronized(instances) {
Iterator<NonRootModelElement> cursor = instances.iterator() ;
while (cursor.hasNext())
{
final Diagramelement_c inst = (Diagramelement_c)cursor.next() ;
inst.batchRelate(modelRoot, relateProxies, notifyChanges, searchAllRoots );
}
}
}
public static void clearInstances(ModelRoot modelRoot)
{
InstanceList instances = modelRoot.getInstanceList(Diagramelement_c.class);
synchronized(instances) {
for(int i=instances.size()-1; i>=0; i--){
((NonRootModelElement)instances.get(i)).delete_unchecked();
}
}
}
public static Diagramelement_c DiagramelementInstance(ModelRoot modelRoot, ClassQueryInterface_c test, boolean loadComponent)
{
Diagramelement_c result=findDiagramelementInstance(modelRoot,test,loadComponent);
return result;
}
private static Diagramelement_c findDiagramelementInstance(ModelRoot modelRoot, ClassQueryInterface_c test, boolean loadComponent)
{
InstanceList instances = modelRoot.getInstanceList(Diagramelement_c.class);
synchronized (instances) {
for (int i = 0; i < instances.size(); ++i) {
Diagramelement_c x = (Diagramelement_c) instances.get(i);
if (test==null || test.evaluate(x)){
return x;
}
}
}
return null;
}
public static Diagramelement_c DiagramelementInstance(ModelRoot modelRoot, ClassQueryInterface_c test){
return DiagramelementInstance(modelRoot,test,true);
}
public static Diagramelement_c DiagramelementInstance(ModelRoot modelRoot)
{
return DiagramelementInstance(modelRoot,null,true);
}
public static Diagramelement_c [] DiagramelementInstances(ModelRoot modelRoot, ClassQueryInterface_c test, boolean loadComponent)
{
InstanceList instances = modelRoot.getInstanceList(Diagramelement_c.class);
Vector matches = new Vector();
synchronized (instances) {
for (int i = 0; i < instances.size(); ++i) {
Diagramelement_c x = (Diagramelement_c) instances.get(i);
if (test==null ||test.evaluate(x)){
matches.add(x);
}
}
if (matches.size() > 0) {
Diagramelement_c[] ret_set = new Diagramelement_c[matches.size()];
matches.copyInto(ret_set);
return ret_set;
} else {
return new Diagramelement_c[0];
}
}
}
public static Diagramelement_c [] DiagramelementInstances(ModelRoot modelRoot, ClassQueryInterface_c test){
return DiagramelementInstances(modelRoot,test,true);
}
public static Diagramelement_c [] DiagramelementInstances(ModelRoot modelRoot)
{
return DiagramelementInstances(modelRoot,null,true);
}
public boolean delete()
{
boolean result = super.delete();
boolean delete_error = false;
String errorMsg = "The following relationships were not torn down by the DiagramElement.dispose call: ";
Reference_c testR308Inst = Reference_c.getOneDIM_REFOnR308(this, false);
if ( testR308Inst != null )
{
delete_error = true;
errorMsg = errorMsg + "308 ";
}
Reference_c testR302Inst1 = Reference_c.getOneDIM_REFOnR302(this, false);
if ( testR302Inst1 != null )
{
delete_error = true;
errorMsg = errorMsg + "302 ";
}
Graphelement_c testR302Inst2 = Graphelement_c.getOneDIM_GEOnR302(this, false);
if ( testR302Inst2 != null )
{
delete_error = true;
errorMsg = errorMsg + "302 ";
}
Leafelement_c testR302Inst3 = Leafelement_c.getOneDIM_LELOnR302(this, false);
if ( testR302Inst3 != null )
{
delete_error = true;
errorMsg = errorMsg + "302 ";
}
Property_c testR310Inst = Property_c.getOneDIM_PRPOnR310(this, false);
if ( testR310Inst != null )
{
delete_error = true;
errorMsg = errorMsg + "310 ";
}
Graphelement_c testR307Inst = Graphelement_c.getOneDIM_GEOnR307(this, false);
if ( testR307Inst != null )
{
delete_error = true;
errorMsg = errorMsg + "307 ";
}
if(delete_error == true) {
if(CanvasPlugin.getDefault().isDebugging()) {
Ooaofgraphics.log.println(ILogger.DELETE, "DiagramElement", errorMsg);
}
else {
Exception e = new Exception();
e.fillInStackTrace();
CanvasPlugin.logError(errorMsg, e);
}
}
return result;
}
/**
* Assigns IDs to instances of this class.
*/
private static IdAssigner idAssigner = new IdAssigner();
/**
* See field.
*/
public IdAssigner getIdAssigner() {return idAssigner;}
/**
* See field.
*/
public static IdAssigner getIdAssigner_() {return idAssigner;}
// end declare instance pool
// declare attribute accessors
public boolean isUUID(String attributeName){
if(attributeName.equals("elementid")){
return true;
}
if(attributeName.equals("container_elementid")){
return true;
}
return false;
}
// declare attribute accessors
public boolean getIsvisible()
{
return m_isvisible ;
}
public void setIsvisible(boolean newValue)
{
if(m_isvisible == newValue){
return;
}
AttributeChangeModelDelta change = new AttributeChangeModelDelta(Modeleventnotification_c.DELTA_ATTRIBUTE_CHANGE, this, "Isvisible", new Boolean(m_isvisible), new Boolean(newValue),true);
m_isvisible = newValue ;
Ooaofgraphics.getDefaultInstance().fireModelElementAttributeChanged(change);
}
public long getElementidLongBased()
{
if(m_elementidLongBased == 0 && !IdAssigner.NULL_UUID.equals(m_elementid)){
return 0xfffffff & m_elementid.getLeastSignificantBits();
}
return m_elementidLongBased ;
}
public java.util.UUID getElementid()
{
return m_elementid ;
}
public void setElementid(java.util.UUID newValue)
{
m_elementid = IdAssigner.preprocessUUID(newValue);
}
public long getContainer_elementidLongBased()
{
if ( ContainerGraphelement != null )
{
return ContainerGraphelement.getElementidLongBased();
}
return 0;
}
public java.util.UUID getContainer_elementid()
{
if ( ContainerGraphelement != null )
{
return ContainerGraphelement.getElementid();
}
return IdAssigner.NULL_UUID;
}
public java.util.UUID getContainer_elementidCachedValue()
{
if ( !IdAssigner.NULL_UUID.equals(m_container_elementid) )
return m_container_elementid;
else
return getContainer_elementid();
}
public void setContainer_elementid(java.util.UUID newValue)
{
if(newValue != null){
if(newValue.equals(m_container_elementid)){
return;
}
}else if(m_container_elementid != null){
if(m_container_elementid.equals(newValue)){
return;
}
}else{
return;
}
AttributeChangeModelDelta change = new AttributeChangeModelDelta(Modeleventnotification_c.DELTA_ATTRIBUTE_CHANGE, this, "Container_elementid", m_container_elementid, newValue,true);
m_container_elementid = IdAssigner.preprocessUUID(newValue);
Ooaofgraphics.getDefaultInstance().fireModelElementAttributeChanged(change);
}
// end declare accessors
public static void checkClassConsistency (ModelRoot modelRoot) {
Ooaofooa.log.println(
ILogger.OPERATION,
"DiagramElement", //$NON-NLS-1$
" Operation entered: DiagramElement::checkClassConsistency"); //$NON-NLS-1$
if (Boolean.valueOf(System.getenv("PTC_MCC_ENABLED")) == false) { //$NON-NLS-1$
return;
}
Diagramelement_c [] objs = Diagramelement_c.DiagramelementInstances(modelRoot,null,false);
for ( int i = 0; i < objs.length; i++) {
objs[i].checkConsistency();
}
}
public boolean checkConsistency () {
Ooaofooa.log.println(
ILogger.OPERATION,
"DiagramElement", //$NON-NLS-1$
" Operation entered: DiagramElement::checkConsistency"); //$NON-NLS-1$
if (Boolean.valueOf(System.getenv("PTC_MCC_ENABLED")) == false) { //$NON-NLS-1$
return true;
}
ModelRoot modelRoot = getModelRoot();
boolean retval = true;
class Diagramelement_c_test21888_c implements ClassQueryInterface_c
{
Diagramelement_c_test21888_c( java.util.UUID p21889 ) {
m_p21889 = p21889;
}
private java.util.UUID m_p21889;
public boolean evaluate (Object candidate)
{
Diagramelement_c selected = (Diagramelement_c) candidate;
boolean retval = false;
retval = (selected.getElementid().equals(m_p21889));
return retval;
}
}
Diagramelement_c [] objs21887 =
Diagramelement_c.DiagramelementInstances(modelRoot, new Diagramelement_c_test21888_c(getElementid())) ;
if ( ( (objs21887.length) == 0) )
{
if (CanvasPlugin.getDefault().isDebugging()){
Ooaofgraphics.log.println(ILogger.CONSISTENCY, "DiagramElement", //$NON-NLS-1$
"Consistency: Object: DiagramElement: Cardinality of an identifier is zero. " //$NON-NLS-1$
+ "Actual Value: " + Integer.toString( objs21887.length ) ); //$NON-NLS-1$
}
else {
Exception e = new Exception();
CanvasPlugin.logError("Consistency: Object: DiagramElement: Cardinality of an identifier is zero. " //$NON-NLS-1$
+ "Actual Value: " //$NON-NLS-1$
+ Integer.toString( objs21887.length ) , e);
}
retval = false;
}
if ( ( (objs21887.length) > 1) )
{
if (CanvasPlugin.getDefault().isDebugging()){
Ooaofgraphics.log.println(ILogger.CONSISTENCY, "DiagramElement", //$NON-NLS-1$
"Consistency: Object: DiagramElement: Cardinality of an identifier is greater than 1. " //$NON-NLS-1$
+ "Actual Value: " //$NON-NLS-1$
+ Integer.toString( objs21887.length ) + " elementId: " + "Not Printable" ); //$NON-NLS-1$
}
else {
Exception e = new Exception();
CanvasPlugin.logError("Consistency: Object: DiagramElement: Cardinality of an identifier is greater than 1. " //$NON-NLS-1$
+ "Actual Value: " //$NON-NLS-1$
+ Integer.toString( objs21887.length ) + " elementId: " + "Not Printable" , e); //$NON-NLS-1$
}
retval = false;
}
// DiagramElement is a referring class in association: rel.Numb = 307
// The participating class is: GraphElement
class Graphelement_c_test21893_c implements ClassQueryInterface_c
{
Graphelement_c_test21893_c( java.util.UUID p21894 ) {
m_p21894 = p21894;
}
private java.util.UUID m_p21894;
public boolean evaluate (Object candidate)
{
Graphelement_c selected = (Graphelement_c) candidate;
boolean retval = false;
retval = (selected.getElementid().equals(m_p21894));
return retval;
}
}
Graphelement_c [] objs21892 =
Graphelement_c.GraphelementInstances(modelRoot, new Graphelement_c_test21893_c(getContainer_elementid())) ;
if ( ( (objs21892.length) > 1) )
{
if (CanvasPlugin.getDefault().isDebugging()){
Ooaofgraphics.log.println(ILogger.CONSISTENCY, "DiagramElement", //$NON-NLS-1$
"Consistency: Object: DiagramElement: Association: 307: Cardinality of a participant is greater than 1. " //$NON-NLS-1$
+ "Actual Value: " //$NON-NLS-1$
+ Integer.toString( objs21892.length ) + " container_elementId: " + "Not Printable" ); //$NON-NLS-1$
}
else {
Exception e = new Exception();
CanvasPlugin.logError("Consistency: Object: DiagramElement: Association: 307: Cardinality of a participant is greater than 1. " //$NON-NLS-1$
+ "Actual Value: " //$NON-NLS-1$
+ Integer.toString( objs21892.length ) + " container_elementId: " + "Not Printable" , e); //$NON-NLS-1$
}
retval = false;
}
// DiagramElement is a participating class in association: rel.Numb = 308
// Object: Reference
// Supertype: rel.Numb = 302
int objs21895 = 0;
// Subtype Object: Reference
class Reference_c_test21896_c implements ClassQueryInterface_c
{
Reference_c_test21896_c( java.util.UUID p21897 ) {
m_p21897 = p21897;
}
private java.util.UUID m_p21897;
public boolean evaluate (Object candidate)
{
Reference_c selected = (Reference_c) candidate;
boolean retval = false;
retval = (selected.getElementid().equals(m_p21897));
return retval;
}
}
Reference_c [] objs21898 =
Reference_c.ReferenceInstances(modelRoot, new Reference_c_test21896_c(getElementid())) ;
objs21895 = objs21895 + objs21898.length;
// Subtype Object: GraphElement
class Graphelement_c_test21899_c implements ClassQueryInterface_c
{
Graphelement_c_test21899_c( java.util.UUID p21900 ) {
m_p21900 = p21900;
}
private java.util.UUID m_p21900;
public boolean evaluate (Object candidate)
{
Graphelement_c selected = (Graphelement_c) candidate;
boolean retval = false;
retval = (selected.getElementid().equals(m_p21900));
return retval;
}
}
Graphelement_c [] objs21901 =
Graphelement_c.GraphelementInstances(modelRoot, new Graphelement_c_test21899_c(getElementid())) ;
objs21895 = objs21895 + objs21901.length;
// Subtype Object: LeafElement
class Leafelement_c_test21902_c implements ClassQueryInterface_c
{
Leafelement_c_test21902_c( java.util.UUID p21903 ) {
m_p21903 = p21903;
}
private java.util.UUID m_p21903;
public boolean evaluate (Object candidate)
{
Leafelement_c selected = (Leafelement_c) candidate;
boolean retval = false;
retval = (selected.getElementid().equals(m_p21903));
return retval;
}
}
Leafelement_c [] objs21904 =
Leafelement_c.LeafelementInstances(modelRoot, new Leafelement_c_test21902_c(getElementid())) ;
objs21895 = objs21895 + objs21904.length;
if ( objs21895 != 1 )
{
if (CanvasPlugin.getDefault().isDebugging()){
Ooaofgraphics.log.println(ILogger.CONSISTENCY, "DiagramElement", //$NON-NLS-1$
"Consistency: Object: DiagramElement: Association: 302: Cardinality of subtype is not equal to 1. " //$NON-NLS-1$
+ "Actual Value: " + Integer.toString( objs21895 ) ); //$NON-NLS-1$
}
else {
Exception e = new Exception();
CanvasPlugin.logError("Consistency: Object: DiagramElement: Association: 302: Cardinality of subtype is not equal to 1. " //$NON-NLS-1$
+ "Actual Value: " //$NON-NLS-1$
+ Integer.toString( objs21895 ) , e);
}
retval = false;
}
// DiagramElement is a participating class in association: rel.Numb = 310
// Object: Property
return retval;
}
// declare transform functions
public void Dispose()
{
Ooaofgraphics.log.println(ILogger.OPERATION, "DiagramElement", " Operation entered: Diagramelement::Dispose") ;
final ModelRoot modelRoot = getModelRoot();
Graphelement_c v_ge = Graphelement_c.getOneDIM_GEOnR302(this);
if ( ( (v_ge != null)) )
{
this.unrelateAcrossR302From(v_ge);
if (v_ge != null) {
v_ge.Dispose() ;
}
else {
Throwable t = new Throwable();
t.fillInStackTrace();
CorePlugin.logError("Attempted to call an operation on a null instance.", t);
}
}
else
{
Leafelement_c v_leaf = Leafelement_c.getOneDIM_LELOnR302(this);
if ( ( (v_leaf != null)) )
{
this.unrelateAcrossR302From(v_leaf);
if (v_leaf != null) {
v_leaf.Dispose() ;
}
else {
Throwable t = new Throwable();
t.fillInStackTrace();
CorePlugin.logError("Attempted to call an operation on a null instance.", t);
}
}
}
Property_c [] v_prop_set = Property_c.getManyDIM_PRPsOnR310(this);
Property_c v_prop = null;
for ( int i21306 = 0; i21306 < v_prop_set.length; i21306++)
{
v_prop = v_prop_set[i21306] ;
this.unrelateAcrossR310From(v_prop);
if (v_prop != null) {
v_prop.Dispose() ;
}
else {
Throwable t = new Throwable();
t.fillInStackTrace();
CorePlugin.logError("Attempted to call an operation on a null instance.", t);
}
}
v_ge = Graphelement_c.getOneDIM_GEOnR307(this);
if ( ( (v_ge != null)) )
{
this.unrelateAcrossR307From(v_ge);
}
if ( delete() ) {
Ooaofgraphics.getDefaultInstance().fireModelElementDeleted(new BaseModelDelta(Modeleventnotification_c.DELTA_DELETE, this));
}
} // End dispose
// end transform functions
public Object getAdapter(Class adapter) {
Object superAdapter = super.getAdapter(adapter);
if(superAdapter != null) {
return superAdapter;
}
return null;
}
} // end DiagramElement
| [
"\"PTC_MCC_ENABLED\"",
"\"PTC_MCC_ENABLED\"",
"\"PTC_MCC_ENABLED\""
]
| []
| [
"PTC_MCC_ENABLED"
]
| [] | ["PTC_MCC_ENABLED"] | java | 1 | 0 | |
python/python-psi-impl/src/com/jetbrains/python/psi/PyUtil.java | // Copyright 2000-2019 JetBrains s.r.o. Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.jetbrains.python.psi;
import com.google.common.collect.Maps;
import com.intellij.codeInsight.FileModificationService;
import com.intellij.codeInsight.completion.PrioritizedLookupElement;
import com.intellij.codeInsight.lookup.LookupElement;
import com.intellij.codeInsight.lookup.LookupElementBuilder;
import com.intellij.lang.ASTFactory;
import com.intellij.lang.ASTNode;
import com.intellij.model.ModelBranch;
import com.intellij.openapi.application.ApplicationManager;
import com.intellij.openapi.editor.Document;
import com.intellij.openapi.module.Module;
import com.intellij.openapi.module.ModuleUtilCore;
import com.intellij.openapi.progress.ProgressIndicator;
import com.intellij.openapi.progress.ProgressManager;
import com.intellij.openapi.progress.Task;
import com.intellij.openapi.project.Project;
import com.intellij.openapi.roots.*;
import com.intellij.openapi.util.Comparing;
import com.intellij.openapi.util.TextRange;
import com.intellij.openapi.util.io.FileUtil;
import com.intellij.openapi.util.io.FileUtilRt;
import com.intellij.openapi.vfs.VfsUtilCore;
import com.intellij.openapi.vfs.VirtualFile;
import com.intellij.psi.*;
import com.intellij.psi.stubs.StubElement;
import com.intellij.psi.tree.IElementType;
import com.intellij.psi.util.*;
import com.intellij.util.*;
import com.intellij.util.concurrency.annotations.RequiresEdt;
import com.intellij.util.containers.ContainerUtil;
import com.jetbrains.python.PyElementTypes;
import com.jetbrains.python.PyNames;
import com.jetbrains.python.PyTokenTypes;
import com.jetbrains.python.PythonCodeStyleService;
import com.jetbrains.python.codeInsight.completion.OverwriteEqualsInsertHandler;
import com.jetbrains.python.codeInsight.controlflow.ScopeOwner;
import com.jetbrains.python.codeInsight.dataflow.scope.ScopeUtil;
import com.jetbrains.python.codeInsight.mlcompletion.PyCompletionMlElementInfo;
import com.jetbrains.python.codeInsight.mlcompletion.PyCompletionMlElementKind;
import com.jetbrains.python.psi.impl.PyBuiltinCache;
import com.jetbrains.python.psi.impl.PyExpressionCodeFragmentImpl;
import com.jetbrains.python.psi.impl.PyPsiUtils;
import com.jetbrains.python.psi.impl.PyTypeProvider;
import com.jetbrains.python.psi.resolve.PyResolveContext;
import com.jetbrains.python.psi.resolve.QualifiedNameFinder;
import com.jetbrains.python.psi.resolve.RatedResolveResult;
import com.jetbrains.python.psi.stubs.PySetuptoolsNamespaceIndex;
import com.jetbrains.python.psi.types.*;
import com.jetbrains.python.pyi.PyiStubSuppressor;
import one.util.streamex.StreamEx;
import org.jetbrains.annotations.*;
import javax.swing.*;
import java.io.File;
import java.util.*;
import static com.jetbrains.python.psi.PyFunction.Modifier.CLASSMETHOD;
import static com.jetbrains.python.psi.PyFunction.Modifier.STATICMETHOD;
/**
* Assorted utility methods for Python code insight.
*
* These methods don't depend on the Python runtime.
*
* @see PyPsiUtils for utilities used in Python PSI API
* @see PyUiUtil for UI-related utilities for Python (available in intellij.python.community.impl)
*/
public final class PyUtil {
private static final boolean VERBOSE_MODE = System.getenv().get("_PYCHARM_VERBOSE_MODE") != null;
private PyUtil() {
}
/**
* @see PyUtil#flattenedParensAndTuples
*/
private static List<PyExpression> unfoldParentheses(PyExpression[] targets, List<PyExpression> receiver,
boolean unfoldListLiterals, boolean unfoldStarExpressions) {
// NOTE: this proliferation of instanceofs is not very beautiful. Maybe rewrite using a visitor.
for (PyExpression exp : targets) {
if (exp instanceof PyParenthesizedExpression) {
final PyParenthesizedExpression parenExpr = (PyParenthesizedExpression)exp;
unfoldParentheses(new PyExpression[]{parenExpr.getContainedExpression()}, receiver, unfoldListLiterals, unfoldStarExpressions);
}
else if (exp instanceof PyTupleExpression) {
final PyTupleExpression tupleExpr = (PyTupleExpression)exp;
unfoldParentheses(tupleExpr.getElements(), receiver, unfoldListLiterals, unfoldStarExpressions);
}
else if (exp instanceof PyListLiteralExpression && unfoldListLiterals) {
final PyListLiteralExpression listLiteral = (PyListLiteralExpression)exp;
unfoldParentheses(listLiteral.getElements(), receiver, true, unfoldStarExpressions);
}
else if (exp instanceof PyStarExpression && unfoldStarExpressions) {
unfoldParentheses(new PyExpression[]{((PyStarExpression)exp).getExpression()}, receiver, unfoldListLiterals, true);
}
else if (exp != null) {
receiver.add(exp);
}
}
return receiver;
}
/**
* Flattens the representation of every element in targets, and puts all results together.
* Elements of every tuple nested in target item are brought to the top level: (a, (b, (c, d))) -> (a, b, c, d)
* Typical usage: {@code flattenedParensAndTuples(some_tuple.getExpressions())}.
*
* @param targets target elements.
* @return the list of flattened expressions.
*/
@NotNull
public static List<PyExpression> flattenedParensAndTuples(PyExpression... targets) {
return unfoldParentheses(targets, new ArrayList<>(targets.length), false, false);
}
@NotNull
public static List<PyExpression> flattenedParensAndLists(PyExpression... targets) {
return unfoldParentheses(targets, new ArrayList<>(targets.length), true, true);
}
@NotNull
public static List<PyExpression> flattenedParensAndStars(PyExpression... targets) {
return unfoldParentheses(targets, new ArrayList<>(targets.length), false, true);
}
/**
* Produce a reasonable representation of a PSI element, good for debugging.
*
* @param elt element to represent; nulls and invalid nodes are ok.
* @param cutAtEOL if true, representation stops at nearest EOL inside the element.
* @return the representation.
*/
@NotNull
@NonNls
public static String getReadableRepr(PsiElement elt, final boolean cutAtEOL) {
if (elt == null) return "null!";
ASTNode node = elt.getNode();
if (node == null) {
return "null";
}
else {
String s = node.getText();
int cut_pos;
if (cutAtEOL) {
cut_pos = s.indexOf('\n');
}
else {
cut_pos = -1;
}
if (cut_pos < 0) cut_pos = s.length();
return s.substring(0, Math.min(cut_pos, s.length()));
}
}
@Nullable
public static PyClass getContainingClassOrSelf(final PsiElement element) {
PsiElement current = element;
while (current != null && !(current instanceof PyClass)) {
current = current.getParent();
}
return (PyClass)current;
}
/**
* @param element for which to obtain the file
* @return PyFile, or null, if there's no containing file, or it is not a PyFile.
*/
@Nullable
public static PyFile getContainingPyFile(PyElement element) {
final PsiFile containingFile = element.getContainingFile();
return containingFile instanceof PyFile ? (PyFile)containingFile : null;
}
/**
* Returns a quoted string representation, or "null".
*/
@NonNls
public static String nvl(Object s) {
if (s != null) {
return "'" + s.toString() + "'";
}
else {
return "null";
}
}
/**
* Adds an item into a comma-separated list in a PSI tree. E.g. can turn "foo, bar" into "foo, bar, baz", adding commas as needed.
*
* @param parent the element to represent the list; we're adding a child to it.
* @param newItem the element we're inserting (the "baz" in the example).
* @param beforeThis node to mark the insertion point inside the list; must belong to a child of target. Set to null to add first element.
* @param isFirst true if we don't need a comma before the element we're adding.
* @param isLast true if we don't need a comma after the element we're adding.
*/
public static void addListNode(PsiElement parent, PsiElement newItem, ASTNode beforeThis,
boolean isFirst, boolean isLast, boolean addWhitespace) {
if (!FileModificationService.getInstance().preparePsiElementForWrite(parent)) {
return;
}
ASTNode node = parent.getNode();
assert node != null;
ASTNode itemNode = newItem.getNode();
assert itemNode != null;
Project project = parent.getProject();
PyElementGenerator gen = PyElementGenerator.getInstance(project);
if (!isFirst) node.addChild(gen.createComma(), beforeThis);
node.addChild(itemNode, beforeThis);
if (!isLast) node.addChild(gen.createComma(), beforeThis);
if (addWhitespace) node.addChild(ASTFactory.whitespace(" "), beforeThis);
}
// TODO: move to a more proper place?
/**
* Determine the type of a special attribute. Currently supported: {@code __class__} and {@code __dict__}.
*
* @param ref reference to a possible attribute; only qualified references make sense.
* @return type, or null (if type cannot be determined, reference is not to a known attribute, etc.)
*/
@Nullable
public static PyType getSpecialAttributeType(@Nullable PyReferenceExpression ref, TypeEvalContext context) {
if (ref != null) {
PyExpression qualifier = ref.getQualifier();
if (qualifier != null) {
String attr_name = ref.getReferencedName();
if (PyNames.__CLASS__.equals(attr_name)) {
PyType qualifierType = context.getType(qualifier);
if (qualifierType instanceof PyClassType) {
return new PyClassTypeImpl(((PyClassType)qualifierType).getPyClass(), true); // always as class, never instance
}
}
else if (PyNames.DUNDER_DICT.equals(attr_name)) {
PyType qualifierType = context.getType(qualifier);
if (qualifierType instanceof PyClassType && ((PyClassType)qualifierType).isDefinition()) {
return PyBuiltinCache.getInstance(ref).getDictType();
}
}
}
}
return null;
}
/**
* Makes sure that 'thing' is not null; else throws an {@link IncorrectOperationException}.
*
* @param thing what we check.
* @return thing, if not null.
*/
@NotNull
public static <T> T sure(T thing) {
if (thing == null) throw new IncorrectOperationException();
return thing;
}
/**
* Makes sure that the 'thing' is true; else throws an {@link IncorrectOperationException}.
*
* @param thing what we check.
*/
public static void sure(boolean thing) {
if (!thing) throw new IncorrectOperationException();
}
public static boolean isAttribute(PyTargetExpression ex) {
return isInstanceAttribute(ex) || isClassAttribute(ex);
}
public static boolean isInstanceAttribute(PyExpression target) {
if (!(target instanceof PyTargetExpression)) {
return false;
}
final ScopeOwner owner = ScopeUtil.getScopeOwner(target);
if (owner instanceof PyFunction) {
final PyFunction method = (PyFunction)owner;
if (method.getContainingClass() != null) {
if (method.getStub() != null) {
return true;
}
final PyParameter[] params = method.getParameterList().getParameters();
if (params.length > 0) {
final PyTargetExpression targetExpr = (PyTargetExpression)target;
final PyExpression qualifier = targetExpr.getQualifier();
return qualifier != null && qualifier.getText().equals(params[0].getName());
}
}
}
return false;
}
public static boolean isClassAttribute(PsiElement element) {
return element instanceof PyTargetExpression && ScopeUtil.getScopeOwner(element) instanceof PyClass;
}
public static boolean hasIfNameEqualsMain(@NotNull PyFile file) {
final PyIfStatement dunderMain = SyntaxTraverser.psiApi()
.children(file)
.filterMap(psi -> psi instanceof PyIfStatement ? ((PyIfStatement)psi) : null)
.find(ifStatement -> isIfNameEqualsMain(ifStatement));
return dunderMain != null;
}
public static boolean isIfNameEqualsMain(PyIfStatement ifStatement) {
final PyExpression condition = ifStatement.getIfPart().getCondition();
return isNameEqualsMain(condition);
}
private static boolean isNameEqualsMain(PyExpression condition) {
if (condition instanceof PyParenthesizedExpression) {
return isNameEqualsMain(((PyParenthesizedExpression)condition).getContainedExpression());
}
if (condition instanceof PyBinaryExpression) {
PyBinaryExpression binaryExpression = (PyBinaryExpression)condition;
if (binaryExpression.getOperator() == PyTokenTypes.OR_KEYWORD) {
return isNameEqualsMain(binaryExpression.getLeftExpression()) || isNameEqualsMain(binaryExpression.getRightExpression());
}
if (binaryExpression.getRightExpression() instanceof PyStringLiteralExpression) {
final PyStringLiteralExpression rhs = (PyStringLiteralExpression) binaryExpression.getRightExpression();
return binaryExpression.getOperator() == PyTokenTypes.EQEQ &&
binaryExpression.getLeftExpression().getText().equals(PyNames.NAME) &&
rhs.getStringValue().equals("__main__");
}
}
return false;
}
/**
* Searches for a method wrapping given element.
*
* @param start element presumably inside a method
* @param deep if true, allow 'start' to be inside functions nested in a method; else, 'start' must be directly inside a method.
* @return if not 'deep', [0] is the method and [1] is the class; if 'deep', first several elements may be the nested functions,
* the last but one is the method, and the last is the class.
*/
@Nullable
public static List<PsiElement> searchForWrappingMethod(PsiElement start, boolean deep) {
PsiElement seeker = start;
List<PsiElement> ret = new ArrayList<>(2);
while (seeker != null) {
PyFunction func = PsiTreeUtil.getParentOfType(seeker, PyFunction.class, true, PyClass.class);
if (func != null) {
PyClass cls = func.getContainingClass();
if (cls != null) {
ret.add(func);
ret.add(cls);
return ret;
}
else if (deep) {
ret.add(func);
seeker = func;
}
else {
return null; // no immediate class
}
}
else {
return null; // no function
}
}
return null;
}
public static boolean inSameFile(@NotNull PsiElement e1, @NotNull PsiElement e2) {
final PsiFile f1 = e1.getContainingFile();
final PsiFile f2 = e2.getContainingFile();
if (f1 == null || f2 == null) {
return false;
}
return f1 == f2;
}
public static boolean onSameLine(@NotNull PsiElement e1, @NotNull PsiElement e2) {
final PsiDocumentManager documentManager = PsiDocumentManager.getInstance(e1.getProject());
final Document document = documentManager.getDocument(e1.getContainingFile());
if (document == null || document != documentManager.getDocument(e2.getContainingFile())) {
return false;
}
return document.getLineNumber(e1.getTextOffset()) == document.getLineNumber(e2.getTextOffset());
}
public static boolean isTopLevel(@NotNull PsiElement element) {
if (element instanceof StubBasedPsiElement) {
final StubElement stub = ((StubBasedPsiElement)element).getStub();
if (stub != null) {
final StubElement parentStub = stub.getParentStub();
if (parentStub != null) {
return parentStub.getPsi() instanceof PsiFile;
}
}
}
return ScopeUtil.getScopeOwner(element) instanceof PsiFile;
}
public static void deletePycFiles(String pyFilePath) {
if (pyFilePath.endsWith(PyNames.DOT_PY)) {
List<File> filesToDelete = new ArrayList<>();
File pyc = new File(pyFilePath + "c");
if (pyc.exists()) {
filesToDelete.add(pyc);
}
File pyo = new File(pyFilePath + "o");
if (pyo.exists()) {
filesToDelete.add(pyo);
}
final File file = new File(pyFilePath);
File pycache = new File(file.getParentFile(), PyNames.PYCACHE);
if (pycache.isDirectory()) {
final String shortName = FileUtilRt.getNameWithoutExtension(file.getName());
Collections.addAll(filesToDelete, pycache.listFiles(pathname -> {
if (!FileUtilRt.extensionEquals(pathname.getName(), "pyc")) return false;
String nameWithMagic = FileUtilRt.getNameWithoutExtension(pathname.getName());
return FileUtilRt.getNameWithoutExtension(nameWithMagic).equals(shortName);
}));
}
FileUtil.asyncDelete(filesToDelete);
}
}
public static String getElementNameWithoutExtension(PsiNamedElement psiNamedElement) {
return psiNamedElement instanceof PyFile
? FileUtilRt.getNameWithoutExtension(((PyFile)psiNamedElement).getName())
: psiNamedElement.getName();
}
public static boolean hasUnresolvedAncestors(@NotNull PyClass cls, @NotNull TypeEvalContext context) {
for (PyClassLikeType type : cls.getAncestorTypes(context)) {
if (type == null) {
return true;
}
}
return false;
}
@NotNull
public static AccessDirection getPropertyAccessDirection(@NotNull PyFunction function) {
final Property property = function.getProperty();
if (property != null) {
if (property.getGetter().valueOrNull() == function) {
return AccessDirection.READ;
}
if (property.getSetter().valueOrNull() == function) {
return AccessDirection.WRITE;
}
else if (property.getDeleter().valueOrNull() == function) {
return AccessDirection.DELETE;
}
}
return AccessDirection.READ;
}
public static void removeQualifier(@NotNull final PyReferenceExpression element) {
final PyExpression qualifier = element.getQualifier();
if (qualifier == null) return;
if (qualifier instanceof PyCallExpression) {
final PyExpression callee = ((PyCallExpression)qualifier).getCallee();
if (callee instanceof PyReferenceExpression) {
final PyExpression calleeQualifier = ((PyReferenceExpression)callee).getQualifier();
if (calleeQualifier != null) {
qualifier.replace(calleeQualifier);
return;
}
}
}
final PsiElement dot = PyPsiUtils.getNextNonWhitespaceSibling(qualifier);
if (dot != null) dot.delete();
qualifier.delete();
}
public static boolean isOwnScopeComprehension(@NotNull PyComprehensionElement comprehension) {
final boolean isAtLeast30 = !LanguageLevel.forElement(comprehension).isPython2();
final boolean isListComprehension = comprehension instanceof PyListCompExpression;
return !isListComprehension || isAtLeast30;
}
public static ASTNode createNewName(PyElement element, String name) {
return PyElementGenerator.getInstance(element.getProject()).createNameIdentifier(name, LanguageLevel.forElement(element));
}
/**
* Finds element declaration by resolving its references top the top but not further than file (to prevent un-stubbing)
*
* @param elementToResolve element to resolve
* @return its declaration
*/
@NotNull
public static PsiElement resolveToTheTop(@NotNull final PsiElement elementToResolve) {
PsiElement currentElement = elementToResolve;
final Set<PsiElement> checkedElements = new HashSet<>(); // To prevent PY-20553
while (true) {
final PsiReference reference = currentElement.getReference();
if (reference == null) {
break;
}
final PsiElement resolve = reference.resolve();
if (resolve == null || checkedElements.contains(resolve) || resolve.equals(currentElement) || !inSameFile(resolve, currentElement)) {
break;
}
currentElement = resolve;
checkedElements.add(resolve);
}
return currentElement;
}
/**
* Note that returned list may contain {@code null} items, e.g. for unresolved import elements, originally wrapped
* in {@link com.jetbrains.python.psi.resolve.ImportedResolveResult}.
*/
@NotNull
public static List<PsiElement> multiResolveTopPriority(@NotNull PsiElement element, @NotNull PyResolveContext resolveContext) {
if (element instanceof PyReferenceOwner) {
final PsiPolyVariantReference ref = ((PyReferenceOwner)element).getReference(resolveContext);
return filterTopPriorityResults(ref.multiResolve(false));
}
else {
final PsiReference reference = element.getReference();
return reference != null ? Collections.singletonList(reference.resolve()) : Collections.emptyList();
}
}
@NotNull
public static List<PsiElement> multiResolveTopPriority(@NotNull PsiPolyVariantReference reference) {
return filterTopPriorityResults(reference.multiResolve(false));
}
@NotNull
public static List<PsiElement> filterTopPriorityResults(ResolveResult @NotNull [] resolveResults) {
if (resolveResults.length == 0) return Collections.emptyList();
final int maxRate = getMaxRate(Arrays.asList(resolveResults));
return StreamEx
.of(resolveResults)
.filter(resolveResult -> getRate(resolveResult) >= maxRate)
.map(ResolveResult::getElement)
.nonNull()
.toList();
}
@NotNull
public static <E extends ResolveResult> List<E> filterTopPriorityResults(@NotNull List<? extends E> resolveResults) {
if (resolveResults.isEmpty()) return Collections.emptyList();
final int maxRate = getMaxRate(resolveResults);
return ContainerUtil.filter(resolveResults, resolveResult -> getRate(resolveResult) >= maxRate);
}
public static @NotNull <E extends ResolveResult> List<PsiElement> filterTopPriorityElements(@NotNull List<? extends E> resolveResults) {
return ContainerUtil.mapNotNull(filterTopPriorityResults(resolveResults), ResolveResult::getElement);
}
private static int getMaxRate(@NotNull List<? extends ResolveResult> resolveResults) {
return resolveResults
.stream()
.mapToInt(PyUtil::getRate)
.max()
.orElse(Integer.MIN_VALUE);
}
private static int getRate(@NotNull ResolveResult resolveResult) {
return resolveResult instanceof RatedResolveResult ? ((RatedResolveResult)resolveResult).getRate() : RatedResolveResult.RATE_NORMAL;
}
/**
* Gets class init method
*
* @param pyClass class where to find init
* @return class init method if any
*/
@Nullable
public static PyFunction getInitMethod(@NotNull final PyClass pyClass) {
return pyClass.findMethodByName(PyNames.INIT, false, null);
}
/**
* Clone of C# "as" operator.
* Checks if expression has correct type and casts it if it has. Returns null otherwise.
* It saves coder from "instanceof / cast" chains.
*
* @param expression expression to check
* @param clazz class to cast
* @param <T> class to cast
* @return expression casted to appropriate type (if could be casted). Null otherwise.
*/
@Nullable
public static <T> T as(@Nullable final Object expression, @NotNull final Class<T> clazz) {
return ObjectUtils.tryCast(expression, clazz);
}
// TODO: Move to PsiElement?
/**
* Searches for references injected to element with certain type
*
* @param element element to search injected references for
* @param expectedClass expected type of element reference resolved to
* @param <T> expected type of element reference resolved to
* @return resolved element if found or null if not found
*/
@Nullable
public static <T extends PsiElement> T findReference(@NotNull final PsiElement element, @NotNull final Class<T> expectedClass) {
for (final PsiReference reference : element.getReferences()) {
final T result = as(reference.resolve(), expectedClass);
if (result != null) {
return result;
}
}
return null;
}
/**
* Converts collection to list of certain type
*
* @param expression expression of collection type
* @param elementClass expected element type
* @param <T> expected element type
* @return list of elements of expected element type
*/
@NotNull
public static <T> List<T> asList(@Nullable final Collection<?> expression, @NotNull final Class<? extends T> elementClass) {
if ((expression == null) || expression.isEmpty()) {
return Collections.emptyList();
}
final List<T> result = new ArrayList<>();
for (final Object element : expression) {
final T toAdd = as(element, elementClass);
if (toAdd != null) {
result.add(toAdd);
}
}
return result;
}
/**
* Calculates and caches value based on param. Think about it as about map with param as key which flushes on each psi modification.
* <p>
* For nullable function see {@link #getNullableParameterizedCachedValue(PsiElement, Object, NullableFunction)}.
* <p>
* This function is used instead of {@link CachedValuesManager#createParameterizedCachedValue(ParameterizedCachedValueProvider, boolean)}
* because parameter is not used as key there but used only for first calculation. Hence this should have functional dependency on element.
*
* @param element place to store cache
* @param param param to be used as key
* @param f function to produce value for key
* @param <T> value type
* @param <P> key type
*/
@NotNull
public static <T, P> T getParameterizedCachedValue(@NotNull PsiElement element, @Nullable P param, @NotNull NotNullFunction<P, T> f) {
final T result = getNullableParameterizedCachedValue(element, param, f);
assert result != null;
return result;
}
/**
* Same as {@link #getParameterizedCachedValue(PsiElement, Object, NotNullFunction)} but allows nulls.
*/
@Nullable
public static <T, P> T getNullableParameterizedCachedValue(@NotNull PsiElement element,
@Nullable P param,
@NotNull NullableFunction<P, T> f) {
final CachedValuesManager manager = CachedValuesManager.getManager(element.getProject());
final Map<Optional<P>, Optional<T>> cache = CachedValuesManager.getCachedValue(element, manager.getKeyForClass(f.getClass()), () -> {
// concurrent hash map is a null-hostile collection
return CachedValueProvider.Result.create(Maps.newConcurrentMap(), PsiModificationTracker.MODIFICATION_COUNT);
});
// Don't use ConcurrentHashMap#computeIfAbsent(), it blocks if the function tries to update the cache recursively for the same key
// during computation. We can accept here that some values will be computed several times due to non-atomic updates.
final Optional<P> wrappedParam = Optional.ofNullable(param);
Optional<T> value = cache.get(wrappedParam);
if (value == null) {
value = Optional.ofNullable(f.fun(param));
cache.put(wrappedParam, value);
}
return value.orElse(null);
}
/**
* This method is allowed to be called from any thread, but in general you should not set {@code modal=true} if you're calling it
* from the write action, because in this case {@code function} will be executed right in the current thread (presumably EDT)
* without any progress whatsoever to avoid possible deadlock.
*
* @see com.intellij.openapi.application.impl.ApplicationImpl#runProcessWithProgressSynchronously(Runnable, String, boolean, boolean, Project, JComponent, String)
*/
public static void runWithProgress(@Nullable Project project, @Nls(capitalization = Nls.Capitalization.Title) @NotNull String title,
boolean modal, boolean canBeCancelled, @NotNull final Consumer<? super ProgressIndicator> function) {
if (modal) {
ProgressManager.getInstance().run(new Task.Modal(project, title, canBeCancelled) {
@Override
public void run(@NotNull ProgressIndicator indicator) {
function.consume(indicator);
}
});
}
else {
ProgressManager.getInstance().run(new Task.Backgroundable(project, title, canBeCancelled) {
@Override
public void run(@NotNull ProgressIndicator indicator) {
function.consume(indicator);
}
});
}
}
/**
* Executes code only if <pre>_PYCHARM_VERBOSE_MODE</pre> is set in env (which should be done for debug purposes only)
*
* @param runnable code to call
*/
public static void verboseOnly(@NotNull final Runnable runnable) {
if (VERBOSE_MODE) {
runnable.run();
}
}
/**
* Returns the line comment that immediately precedes statement list of the given compound statement. Python parser ensures
* that it follows the statement header, i.e. it's directly after the colon, not on its own line.
*/
@Nullable
public static PsiComment getCommentOnHeaderLine(@NotNull PyStatementListContainer container) {
return as(getHeaderEndAnchor(container), PsiComment.class);
}
@NotNull
public static PsiElement getHeaderEndAnchor(@NotNull PyStatementListContainer container) {
final PyStatementList statementList = container.getStatementList();
return Objects.requireNonNull(PyPsiUtils.getPrevNonWhitespaceSibling(statementList));
}
public static boolean isPy2ReservedWord(@NotNull PyReferenceExpression node) {
if (LanguageLevel.forElement(node).isPython2()) {
if (!node.isQualified()) {
final String name = node.getName();
if (PyNames.NONE.equals(name) || PyNames.FALSE.equals(name) || PyNames.TRUE.equals(name)) {
return true;
}
}
}
return false;
}
/**
* Retrieves the document from {@link PsiDocumentManager} using the anchor PSI element and, if it's not null,
* passes it to the consumer function.
* <p>
* The document is first released from pending PSI operations and then committed after the function has been applied
* in a {@code try/finally} block, so that subsequent operations on PSI could be performed.
*
* @see PsiDocumentManager#doPostponedOperationsAndUnblockDocument(Document)
* @see PsiDocumentManager#commitDocument(Document)
* @see #updateDocumentUnblockedAndCommitted(PsiElement, Function)
*/
public static void updateDocumentUnblockedAndCommitted(@NotNull PsiElement anchor, @NotNull Consumer<? super Document> consumer) {
updateDocumentUnblockedAndCommitted(anchor, document -> {
consumer.consume(document);
return null;
});
}
@Nullable
public static <T> T updateDocumentUnblockedAndCommitted(@NotNull PsiElement anchor, @NotNull Function<? super Document, ? extends T> func) {
final PsiDocumentManager manager = PsiDocumentManager.getInstance(anchor.getProject());
final Document document = manager.getDocument(anchor.getContainingFile());
if (document != null) {
manager.doPostponedOperationsAndUnblockDocument(document);
try {
return func.fun(document);
}
finally {
manager.commitDocument(document);
}
}
return null;
}
@Nullable
public static PyType getReturnTypeToAnalyzeAsCallType(@NotNull PyFunction function, @NotNull TypeEvalContext context) {
if (isInitMethod(function)) {
final PyClass cls = function.getContainingClass();
if (cls != null) {
for (PyTypeProvider provider : PyTypeProvider.EP_NAME.getExtensionList()) {
final PyType providedClassType = provider.getGenericType(cls, context);
if (providedClassType != null) {
return providedClassType;
}
}
final PyInstantiableType classType = as(context.getType(cls), PyInstantiableType.class);
if (classType != null) {
return classType.toInstance();
}
}
}
return context.getReturnType(function);
}
/**
* Create a new expressions fragment from the given text, setting the specified element as its context,
* and return the contained expression of the first expression statement in it.
*
* @param expressionText text of the expression
* @param context context element used to resolve symbols in the expression
* @return instance of {@link PyExpression} as described
* @see PyExpressionCodeFragment
*/
@Nullable
public static PyExpression createExpressionFromFragment(@NotNull String expressionText, @NotNull PsiElement context) {
final PyExpressionCodeFragmentImpl codeFragment =
new PyExpressionCodeFragmentImpl(context.getProject(), "dummy.py", expressionText, false);
codeFragment.setContext(context);
final PyExpressionStatement statement = as(codeFragment.getFirstChild(), PyExpressionStatement.class);
return statement != null ? statement.getExpression() : null;
}
public static boolean isRoot(PsiFileSystemItem directory) {
if (directory == null) return true;
VirtualFile vFile = directory.getVirtualFile();
if (vFile == null) return true;
Project project = directory.getProject();
return isRoot(vFile, project);
}
public static boolean isRoot(@NotNull VirtualFile directory, @NotNull Project project) {
ProjectFileIndex fileIndex = ProjectFileIndex.SERVICE.getInstance(project);
return Comparing.equal(fileIndex.getClassRootForFile(directory), directory) ||
Comparing.equal(fileIndex.getContentRootForFile(directory), directory) ||
Comparing.equal(fileIndex.getSourceRootForFile(directory), directory);
}
/**
* Checks whether {@param file} representing Python module or package can be imported into {@param file}.
*/
public static boolean isImportable(PsiFile targetFile, @NotNull PsiFileSystemItem file) {
PsiDirectory parent = (PsiDirectory)file.getParent();
return parent != null && file != targetFile &&
(isRoot(parent) ||
parent == targetFile.getParent() ||
isPackage(parent, false, null));
}
@NotNull
public static Collection<String> collectUsedNames(@Nullable final PsiElement scope) {
if (!(scope instanceof PyClass) && !(scope instanceof PyFile) && !(scope instanceof PyFunction)) {
return Collections.emptyList();
}
final Set<String> variables = new HashSet<>() {
@Override
public boolean add(String s) {
return s != null && super.add(s);
}
};
scope.acceptChildren(new PyRecursiveElementVisitor() {
@Override
public void visitPyTargetExpression(@NotNull final PyTargetExpression node) {
variables.add(node.getName());
}
@Override
public void visitPyNamedParameter(@NotNull final PyNamedParameter node) {
variables.add(node.getName());
}
@Override
public void visitPyReferenceExpression(@NotNull PyReferenceExpression node) {
if (!node.isQualified()) {
variables.add(node.getReferencedName());
}
else {
super.visitPyReferenceExpression(node);
}
}
@Override
public void visitPyFunction(@NotNull final PyFunction node) {
variables.add(node.getName());
}
@Override
public void visitPyClass(@NotNull final PyClass node) {
variables.add(node.getName());
}
});
return variables;
}
/**
* If argument is a PsiDirectory, turn it into a PsiFile that points to __init__.py in that directory.
* If there's no __init__.py there, null is returned, there's no point to resolve to a dir which is not a package.
* Alas, resolve() and multiResolve() can't return anything but a PyFile or PsiFileImpl.isPsiUpToDate() would fail.
* This is because isPsiUpToDate() relies on identity of objects returned by FileViewProvider.getPsi().
* If we ever need to exactly tell a dir from __init__.py, that logic has to change.
*
* @param target a resolve candidate.
* @return a PsiFile if target was a PsiDirectory, or null, or target unchanged.
*/
@Nullable
public static PsiElement turnDirIntoInit(@Nullable PsiElement target) {
if (target instanceof PsiDirectory) {
final PsiDirectory dir = (PsiDirectory)target;
final PsiFile initStub = dir.findFile(PyNames.INIT_DOT_PYI);
if (initStub != null && !PyiStubSuppressor.isIgnoredStub(initStub)) {
return initStub;
}
final PsiFile initFile = dir.findFile(PyNames.INIT_DOT_PY);
if (initFile != null) {
return initFile; // ResolveImportUtil will extract directory part as needed, everyone else are better off with a file.
}
else {
return null;
} // dir without __init__.py does not resolve
}
else {
return target;
} // don't touch non-dirs
}
/**
* If directory is a PsiDirectory, that is also a valid Python package, return PsiFile that points to __init__.py,
* if such file exists, or directory itself (i.e. namespace package). Otherwise, return {@code null}.
* Unlike {@link #turnDirIntoInit(PsiElement)} this function handles namespace packages and
* accepts only PsiDirectories as target.
*
* @param directory directory to check
* @param anchor optional PSI element to determine language level as for {@link #isPackage(PsiDirectory, PsiElement)}
* @return PsiFile or PsiDirectory, if target is a Python package and {@code null} null otherwise
*/
@Nullable
public static PsiElement getPackageElement(@NotNull PsiDirectory directory, @Nullable PsiElement anchor) {
if (isPackage(directory, anchor)) {
final PsiElement init = turnDirIntoInit(directory);
if (init != null) {
return init;
}
return directory;
}
return null;
}
/**
* If target is a Python module named __init__.py file, return its directory. Otherwise return target unchanged.
*
* @param target PSI element to check
* @return PsiDirectory or target unchanged
*/
@Contract("null -> null")
@Nullable
public static PsiElement turnInitIntoDir(@Nullable PsiElement target) {
if (target instanceof PyFile && isPackage((PsiFile)target)) {
return ((PsiFile)target).getContainingDirectory();
}
return target;
}
/**
* @see #isPackage(PsiDirectory, boolean, PsiElement)
*/
public static boolean isPackage(@NotNull PsiDirectory directory, @Nullable PsiElement anchor) {
return isPackage(directory, true, anchor);
}
/**
* Checks that given PsiDirectory can be treated as Python package, i.e. it's either contains __init__.py or it's a namespace package
* (effectively any directory in Python 3.3 and above). Setuptools namespace packages can be checked as well, but it requires access to
* {@link PySetuptoolsNamespaceIndex} and may slow things down during update of project indexes.
* Also note that this method does not check that directory itself and its parents have valid importable names,
* use {@link PyNames#isIdentifier(String)} for this purpose.
*
* @param directory PSI directory to check
* @param checkSetupToolsPackages whether setuptools namespace packages should be considered as well
* @param anchor optional anchor element to determine language level
* @return whether given directory is Python package
* @see PyNames#isIdentifier(String)
*/
public static boolean isPackage(@NotNull PsiDirectory directory, boolean checkSetupToolsPackages, @Nullable PsiElement anchor) {
if (isExplicitPackage(directory)) return true;
final LanguageLevel level = anchor != null ? LanguageLevel.forElement(anchor) : LanguageLevel.forElement(directory);
if (!level.isPython2()) {
return true;
}
return checkSetupToolsPackages && isSetuptoolsNamespacePackage(directory);
}
public static boolean isPackage(@NotNull PsiFile file) {
for (PyCustomPackageIdentifier customPackageIdentifier : PyCustomPackageIdentifier.EP_NAME.getExtensions()) {
if (customPackageIdentifier.isPackageFile(file)) {
return true;
}
}
return PyNames.INIT_DOT_PY.equals(file.getName());
}
public static boolean isPackage(@NotNull PsiFileSystemItem anchor, @Nullable PsiElement location) {
return anchor instanceof PsiFile ? isPackage((PsiFile)anchor) :
anchor instanceof PsiDirectory && isPackage((PsiDirectory)anchor, location);
}
public static boolean isCustomPackage(@NotNull PsiDirectory directory) {
for (PyCustomPackageIdentifier customPackageIdentifier : PyCustomPackageIdentifier.EP_NAME.getExtensions()) {
if (customPackageIdentifier.isPackage(directory)) {
return true;
}
}
return false;
}
public static boolean isExplicitPackage(@NotNull PsiDirectory directory) {
return isOrdinaryPackage(directory) || isCustomPackage(directory);
}
private static boolean isSetuptoolsNamespacePackage(@NotNull PsiDirectory directory) {
final String packagePath = getPackagePath(directory);
return packagePath != null && !PySetuptoolsNamespaceIndex.find(packagePath, directory.getProject()).isEmpty();
}
@Nullable
private static String getPackagePath(@NotNull PsiDirectory directory) {
final QualifiedName name = QualifiedNameFinder.findShortestImportableQName(directory);
return name != null ? name.toString() : null;
}
/**
* Counts initial underscores of an identifier.
*
* @param name identifier
* @return 0 if null or no initial underscores found, 1 if there's only one underscore, 2 if there's two or more initial underscores.
*/
public static int getInitialUnderscores(@Nullable String name) {
return name == null ? 0 : name.startsWith("__") ? 2 : name.startsWith(PyNames.UNDERSCORE) ? 1 : 0;
}
/**
* @param name
* @return true iff the name looks like a class-private one, starting with two underscores but not ending with two underscores.
*/
public static boolean isClassPrivateName(@NotNull String name) {
return name.startsWith("__") && !name.endsWith("__");
}
public static boolean isSpecialName(@NotNull String name) {
return name.length() > 4 && name.startsWith("__") && name.endsWith("__");
}
/**
* Constructs new lookup element for completion of keyword argument with equals sign appended.
*
* @param name name of the parameter
* @param settingsAnchor file to check code style settings and surround equals sign with spaces if necessary
* @return lookup element
*/
@NotNull
public static LookupElement createNamedParameterLookup(@NotNull String name, @NotNull PsiFile settingsAnchor, boolean addEquals) {
final String suffix;
if (addEquals) {
if (PythonCodeStyleService.getInstance().isSpaceAroundEqInKeywordArgument(settingsAnchor)) {
suffix = " = ";
}
else {
suffix = "=";
}
} else {
suffix = "";
}
LookupElementBuilder lookupElementBuilder = LookupElementBuilder.create(name + suffix).withIcon(PlatformIcons.PARAMETER_ICON);
lookupElementBuilder = lookupElementBuilder.withInsertHandler(OverwriteEqualsInsertHandler.INSTANCE);
lookupElementBuilder.putUserData(PyCompletionMlElementInfo.Companion.getKey(), PyCompletionMlElementKind.NAMED_ARG.asInfo());
return PrioritizedLookupElement.withGrouping(lookupElementBuilder, 1);
}
@NotNull
public static LookupElement createNamedParameterLookup(@NotNull String name, @NotNull PsiFile settingsAnchor) {
return createNamedParameterLookup(name, settingsAnchor, true);
}
/**
* Peels argument expression of parentheses and of keyword argument wrapper
*
* @param expr an item of getArguments() array
* @return expression actually passed as argument
*/
@Nullable
public static PyExpression peelArgument(PyExpression expr) {
while (expr instanceof PyParenthesizedExpression) expr = ((PyParenthesizedExpression)expr).getContainedExpression();
if (expr instanceof PyKeywordArgument) expr = ((PyKeywordArgument)expr).getValueExpression();
return expr;
}
public static String getFirstParameterName(PyFunction container) {
String selfName = PyNames.CANONICAL_SELF;
if (container != null) {
final PyParameter[] params = container.getParameterList().getParameters();
if (params.length > 0) {
final PyNamedParameter named = params[0].getAsNamed();
if (named != null) {
selfName = named.getName();
}
}
}
return selfName;
}
@RequiresEdt
public static void addSourceRoots(@NotNull Module module, @NotNull Collection<VirtualFile> roots) {
if (roots.isEmpty()) {
return;
}
ApplicationManager.getApplication().runWriteAction(
() -> {
final ModifiableRootModel model = ModuleRootManager.getInstance(module).getModifiableModel();
for (VirtualFile root : roots) {
boolean added = false;
for (ContentEntry entry : model.getContentEntries()) {
final VirtualFile file = entry.getFile();
if (file != null && VfsUtilCore.isAncestor(file, root, true)) {
entry.addSourceFolder(root, false);
added = true;
}
}
if (!added) {
model.addContentEntry(root).addSourceFolder(root, false);
}
}
model.commit();
}
);
}
@RequiresEdt
public static void removeSourceRoots(@NotNull Module module, @NotNull Collection<VirtualFile> roots) {
if (roots.isEmpty()) {
return;
}
ApplicationManager.getApplication().runWriteAction(
() -> {
final ModifiableRootModel model = ModuleRootManager.getInstance(module).getModifiableModel();
for (ContentEntry entry : model.getContentEntries()) {
for (SourceFolder folder : entry.getSourceFolders()) {
if (roots.contains(folder.getFile())) {
entry.removeSourceFolder(folder);
}
}
if (roots.contains(entry.getFile()) && entry.getSourceFolders().length == 0) {
model.removeContentEntry(entry);
}
}
model.commit();
}
);
}
/**
* @return Source roots <strong>and</strong> content roots for element's project
*/
@NotNull
public static Collection<VirtualFile> getSourceRoots(@NotNull PsiElement foothold) {
final Module module = ModuleUtilCore.findModuleForPsiElement(foothold);
if (module != null) {
Collection<VirtualFile> roots = getSourceRoots(module);
ModelBranch branch = ModelBranch.getPsiBranch(foothold);
if (branch != null) {
return ContainerUtil.map(roots, branch::findFileCopy);
}
return roots;
}
return Collections.emptyList();
}
/**
* @return Source roots <strong>and</strong> content roots for module
*/
@NotNull
public static Collection<VirtualFile> getSourceRoots(@NotNull Module module) {
final Set<VirtualFile> result = new LinkedHashSet<>();
final ModuleRootManager manager = ModuleRootManager.getInstance(module);
Collections.addAll(result, manager.getSourceRoots());
Collections.addAll(result, manager.getContentRoots());
return result;
}
@Nullable
public static VirtualFile findInRoots(Module module, String path) {
if (module != null) {
for (VirtualFile root : getSourceRoots(module)) {
VirtualFile file = root.findFileByRelativePath(path);
if (file != null) {
return file;
}
}
}
return null;
}
@Nullable
public static List<String> strListValue(PyExpression value) {
while (value instanceof PyParenthesizedExpression) {
value = ((PyParenthesizedExpression)value).getContainedExpression();
}
if (value instanceof PySequenceExpression) {
final PyExpression[] elements = ((PySequenceExpression)value).getElements();
List<String> result = new ArrayList<>(elements.length);
for (PyExpression element : elements) {
if (!(element instanceof PyStringLiteralExpression)) {
return null;
}
result.add(((PyStringLiteralExpression)element).getStringValue());
}
return result;
}
return null;
}
@NotNull
public static Map<String, PyExpression> dictValue(@NotNull PyDictLiteralExpression dict) {
Map<String, PyExpression> result = Maps.newLinkedHashMap();
for (PyKeyValueExpression keyValue : dict.getElements()) {
PyExpression key = keyValue.getKey();
PyExpression value = keyValue.getValue();
if (key instanceof PyStringLiteralExpression) {
result.put(((PyStringLiteralExpression)key).getStringValue(), value);
}
}
return result;
}
/**
* @param what thing to search for
* @param variants things to search among
* @return true iff what.equals() one of the variants.
*/
public static <T> boolean among(@NotNull T what, T... variants) {
for (T s : variants) {
if (what.equals(s)) return true;
}
return false;
}
@Nullable
public static String getKeywordArgumentString(PyCallExpression expr, String keyword) {
return PyPsiUtils.strValue(expr.getKeywordArgument(keyword));
}
public static boolean isExceptionClass(PyClass pyClass) {
if (isBaseException(pyClass.getQualifiedName())) {
return true;
}
for (PyClassLikeType type : pyClass.getAncestorTypes(TypeEvalContext.codeInsightFallback(pyClass.getProject()))) {
if (type != null && isBaseException(type.getClassQName())) {
return true;
}
}
return false;
}
private static boolean isBaseException(String name) {
return name != null && (name.contains("BaseException") || name.startsWith("exceptions."));
}
public static final class MethodFlags {
private final boolean myIsStaticMethod;
private final boolean myIsMetaclassMethod;
private final boolean myIsSpecialMetaclassMethod;
private final boolean myIsClassMethod;
/**
* @return true iff the method belongs to a metaclass (an ancestor of 'type').
*/
public boolean isMetaclassMethod() {
return myIsMetaclassMethod;
}
/**
* @return iff isMetaclassMethod and the method is either __init__ or __call__.
*/
public boolean isSpecialMetaclassMethod() {
return myIsSpecialMetaclassMethod;
}
public boolean isStaticMethod() {
return myIsStaticMethod;
}
public boolean isClassMethod() {
return myIsClassMethod;
}
private MethodFlags(boolean isClassMethod, boolean isStaticMethod, boolean isMetaclassMethod, boolean isSpecialMetaclassMethod) {
myIsClassMethod = isClassMethod;
myIsStaticMethod = isStaticMethod;
myIsMetaclassMethod = isMetaclassMethod;
myIsSpecialMetaclassMethod = isSpecialMetaclassMethod;
}
/**
* @param node a function
* @return a new flags object, or null if the function is not a method
*/
@Nullable
public static MethodFlags of(@NotNull PyFunction node) {
PyClass cls = node.getContainingClass();
if (cls != null) {
PyFunction.Modifier modifier = node.getModifier();
boolean isMetaclassMethod = false;
PyClass type_cls = PyBuiltinCache.getInstance(node).getClass("type");
for (PyClass ancestor_cls : cls.getAncestorClasses(null)) {
if (ancestor_cls == type_cls) {
isMetaclassMethod = true;
break;
}
}
final String method_name = node.getName();
boolean isSpecialMetaclassMethod = isMetaclassMethod && method_name != null && among(method_name, PyNames.INIT, "__call__");
return new MethodFlags(modifier == CLASSMETHOD, modifier == STATICMETHOD, isMetaclassMethod, isSpecialMetaclassMethod);
}
return null;
}
//TODO: Doc
public boolean isInstanceMethod() {
return !(myIsClassMethod || myIsStaticMethod);
}
}
public static boolean isSuperCall(@NotNull PyCallExpression node) {
PyClass klass = PsiTreeUtil.getParentOfType(node, PyClass.class);
if (klass == null) return false;
PyExpression callee = node.getCallee();
if (callee == null) return false;
String name = callee.getName();
if (PyNames.SUPER.equals(name)) {
PsiReference reference = callee.getReference();
if (reference == null) return false;
PsiElement resolved = reference.resolve();
PyBuiltinCache cache = PyBuiltinCache.getInstance(node);
if (resolved != null && cache.isBuiltin(resolved)) {
PyExpression[] args = node.getArguments();
if (args.length > 0) {
String firstArg = args[0].getText();
if (firstArg.equals(klass.getName()) || firstArg.equals(PyNames.CANONICAL_SELF + "." + PyNames.__CLASS__)) {
return true;
}
for (PyClass s : klass.getAncestorClasses(null)) {
if (firstArg.equals(s.getName())) {
return true;
}
}
}
else {
return true;
}
}
}
return false;
}
@Nullable
public static PsiElement findPrevAtOffset(PsiFile psiFile, int caretOffset, @NotNull Class<? extends PsiElement> @NotNull ... toSkip) {
PsiElement element;
if (caretOffset < 0) {
return null;
}
int lineStartOffset = 0;
final Document document = PsiDocumentManager.getInstance(psiFile.getProject()).getDocument(psiFile);
if (document != null) {
int lineNumber = document.getLineNumber(caretOffset);
lineStartOffset = document.getLineStartOffset(lineNumber);
}
do {
caretOffset--;
element = psiFile.findElementAt(caretOffset);
}
while (caretOffset >= lineStartOffset && PsiTreeUtil.instanceOf(element, toSkip));
return PsiTreeUtil.instanceOf(element, toSkip) ? null : element;
}
@Nullable
public static PsiElement findNonWhitespaceAtOffset(PsiFile psiFile, int caretOffset) {
PsiElement element = findNextAtOffset(psiFile, caretOffset, PsiWhiteSpace.class);
if (element == null) {
element = findPrevAtOffset(psiFile, caretOffset - 1, PsiWhiteSpace.class);
}
return element;
}
@Nullable
public static PsiElement findElementAtOffset(PsiFile psiFile, int caretOffset) {
PsiElement element = findPrevAtOffset(psiFile, caretOffset);
if (element == null) {
element = findNextAtOffset(psiFile, caretOffset);
}
return element;
}
@Nullable
public static PsiElement findNextAtOffset(@NotNull final PsiFile psiFile, int caretOffset, @NotNull Class<? extends PsiElement> @NotNull ... toSkip) {
PsiElement element = psiFile.findElementAt(caretOffset);
if (element == null) {
return null;
}
final Document document = PsiDocumentManager.getInstance(psiFile.getProject()).getDocument(psiFile);
int lineEndOffset = 0;
if (document != null) {
int lineNumber = document.getLineNumber(caretOffset);
lineEndOffset = document.getLineEndOffset(lineNumber);
}
while (caretOffset < lineEndOffset && PsiTreeUtil.instanceOf(element, toSkip)) {
caretOffset++;
element = psiFile.findElementAt(caretOffset);
}
return PsiTreeUtil.instanceOf(element, toSkip) ? null : element;
}
public static boolean isSignatureCompatibleTo(@NotNull PyCallable callable, @NotNull PyCallable otherCallable,
@NotNull TypeEvalContext context) {
final List<PyCallableParameter> parameters = callable.getParameters(context);
final List<PyCallableParameter> otherParameters = otherCallable.getParameters(context);
final int optionalCount = optionalParametersCount(parameters);
final int otherOptionalCount = optionalParametersCount(otherParameters);
final int requiredCount = requiredParametersCount(callable, parameters);
final int otherRequiredCount = requiredParametersCount(otherCallable, otherParameters);
if (hasPositionalContainer(otherParameters) || hasKeywordContainer(otherParameters)) {
if (otherParameters.size() == specialParametersCount(otherCallable, otherParameters)) {
return true;
}
}
if (hasPositionalContainer(parameters) || hasKeywordContainer(parameters)) {
return requiredCount <= otherRequiredCount;
}
return requiredCount <= otherRequiredCount &&
optionalCount >= otherOptionalCount &&
namedParametersCount(parameters) >= namedParametersCount(otherParameters);
}
private static int optionalParametersCount(@NotNull List<PyCallableParameter> parameters) {
int n = 0;
for (PyCallableParameter parameter : parameters) {
if (parameter.hasDefaultValue()) {
n++;
}
}
return n;
}
private static int requiredParametersCount(@NotNull PyCallable callable, @NotNull List<PyCallableParameter> parameters) {
return namedParametersCount(parameters) - optionalParametersCount(parameters) - specialParametersCount(callable, parameters);
}
private static int specialParametersCount(@NotNull PyCallable callable, @NotNull List<PyCallableParameter> parameters) {
int n = 0;
if (hasPositionalContainer(parameters)) {
n++;
}
if (hasKeywordContainer(parameters)) {
n++;
}
if (isFirstParameterSpecial(callable, parameters)) {
n++;
}
return n;
}
private static boolean hasPositionalContainer(@NotNull List<PyCallableParameter> parameters) {
for (PyCallableParameter parameter : parameters) {
if (parameter.isPositionalContainer()) {
return true;
}
}
return false;
}
private static boolean hasKeywordContainer(@NotNull List<PyCallableParameter> parameters) {
for (PyCallableParameter parameter : parameters) {
if (parameter.isKeywordContainer()) {
return true;
}
}
return false;
}
private static int namedParametersCount(@NotNull List<PyCallableParameter> parameters) {
return ContainerUtil.count(parameters, p -> p.getParameter() instanceof PyNamedParameter);
}
private static boolean isFirstParameterSpecial(@NotNull PyCallable callable, @NotNull List<PyCallableParameter> parameters) {
final PyFunction method = callable.asMethod();
if (method != null) {
return isNewMethod(method) || method.getModifier() != STATICMETHOD;
}
else {
final PyCallableParameter first = ContainerUtil.getFirstItem(parameters);
return first != null && PyNames.CANONICAL_SELF.equals(first.getName());
}
}
/**
* @return true if passed {@code element} is a method (this means a function inside a class) named {@code __init__}.
* @see PyUtil#isNewMethod(PsiElement)
* @see PyUtil#isInitOrNewMethod(PsiElement)
* @see PyUtil#turnConstructorIntoClass(PyFunction)
*/
@Contract("null -> false")
public static boolean isInitMethod(@Nullable PsiElement element) {
final PyFunction function = as(element, PyFunction.class);
return function != null && PyNames.INIT.equals(function.getName()) && function.getContainingClass() != null;
}
/**
* @return true if passed {@code element} is a method (this means a function inside a class) named {@code __new__}.
* @see PyUtil#isInitMethod(PsiElement)
* @see PyUtil#isInitOrNewMethod(PsiElement)
* @see PyUtil#turnConstructorIntoClass(PyFunction)
*/
@Contract("null -> false")
public static boolean isNewMethod(@Nullable PsiElement element) {
final PyFunction function = as(element, PyFunction.class);
return function != null && PyNames.NEW.equals(function.getName()) && function.getContainingClass() != null;
}
/**
* @return true if passed {@code element} is a method (this means a function inside a class) named {@code __init__} or {@code __new__}.
* @see PyUtil#isInitMethod(PsiElement)
* @see PyUtil#isNewMethod(PsiElement)
* @see PyUtil#turnConstructorIntoClass(PyFunction)
*/
@Contract("null -> false")
public static boolean isInitOrNewMethod(@Nullable PsiElement element) {
final PyFunction function = as(element, PyFunction.class);
if (function == null) return false;
final String name = function.getName();
return (PyNames.INIT.equals(name) || PyNames.NEW.equals(name)) && function.getContainingClass() != null;
}
/**
* @return containing class for a method named {@code __init__} or {@code __new__}.
* @see PyUtil#isInitMethod(PsiElement)
* @see PyUtil#isNewMethod(PsiElement)
* @see PyUtil#isInitOrNewMethod(PsiElement)
*/
@Nullable
@Contract("null -> null")
public static PyClass turnConstructorIntoClass(@Nullable PyFunction function) {
return isInitOrNewMethod(function) ? function.getContainingClass() : null;
}
public static boolean isStarImportableFrom(@NotNull String name, @NotNull PyFile file) {
final List<String> dunderAll = file.getDunderAll();
return dunderAll != null ? dunderAll.contains(name) : !name.startsWith("_");
}
public static boolean isObjectClass(@NotNull PyClass cls) {
return PyNames.OBJECT.equals(cls.getQualifiedName());
}
@Nullable
public static PyType getReturnTypeOfMember(@NotNull PyType type,
@NotNull String memberName,
@Nullable PyExpression location,
@NotNull TypeEvalContext context) {
final PyResolveContext resolveContext = PyResolveContext.defaultContext(context);
final List<? extends RatedResolveResult> resolveResults = type.resolveMember(memberName, location, AccessDirection.READ,
resolveContext);
if (resolveResults != null) {
final List<PyType> types = new ArrayList<>();
for (RatedResolveResult resolveResult : resolveResults) {
final PyType returnType = getReturnType(resolveResult.getElement(), context);
if (returnType != null) {
types.add(returnType);
}
}
return PyUnionType.union(types);
}
return null;
}
@Nullable
private static PyType getReturnType(@Nullable PsiElement element, @NotNull TypeEvalContext context) {
if (element instanceof PyTypedElement) {
final PyType type = context.getType((PyTypedElement)element);
return getReturnType(type, context);
}
return null;
}
@Nullable
private static PyType getReturnType(@Nullable PyType type, @NotNull TypeEvalContext context) {
if (type instanceof PyCallableType) {
return ((PyCallableType)type).getReturnType(context);
}
if (type instanceof PyUnionType) {
return PyUnionType.toNonWeakType(((PyUnionType)type).map(member -> getReturnType(member, context)));
}
return null;
}
public static boolean isEmptyFunction(@NotNull PyFunction function) {
final PyStatementList statementList = function.getStatementList();
final PyStatement[] statements = statementList.getStatements();
if (statements.length == 0) {
return true;
}
else if (statements.length == 1) {
if (isStringLiteral(statements[0]) || isPassOrRaiseOrEmptyReturnOrEllipsis(statements[0])) {
return true;
}
}
else if (statements.length == 2) {
if (isStringLiteral(statements[0]) && (isPassOrRaiseOrEmptyReturnOrEllipsis(statements[1]))) {
return true;
}
}
return false;
}
private static boolean isPassOrRaiseOrEmptyReturnOrEllipsis(PyStatement stmt) {
if (stmt instanceof PyPassStatement || stmt instanceof PyRaiseStatement) {
return true;
}
if (stmt instanceof PyReturnStatement && ((PyReturnStatement)stmt).getExpression() == null) {
return true;
}
if (stmt instanceof PyExpressionStatement) {
final PyExpression expression = ((PyExpressionStatement)stmt).getExpression();
if (expression instanceof PyNoneLiteralExpression && ((PyNoneLiteralExpression)expression).isEllipsis()) {
return true;
}
}
return false;
}
public static boolean isStringLiteral(@Nullable PyStatement stmt) {
if (stmt instanceof PyExpressionStatement) {
final PyExpression expr = ((PyExpressionStatement)stmt).getExpression();
if (expr instanceof PyStringLiteralExpression) {
return true;
}
}
return false;
}
@Nullable
public static PyLoopStatement getCorrespondingLoop(@NotNull PsiElement breakOrContinue) {
return breakOrContinue instanceof PyContinueStatement || breakOrContinue instanceof PyBreakStatement
? getCorrespondingLoopImpl(breakOrContinue)
: null;
}
@Nullable
private static PyLoopStatement getCorrespondingLoopImpl(@NotNull PsiElement element) {
final PyLoopStatement loop = PsiTreeUtil.getParentOfType(element, PyLoopStatement.class, true, ScopeOwner.class);
if (loop instanceof PyStatementWithElse && PsiTreeUtil.isAncestor(((PyStatementWithElse)loop).getElsePart(), element, true)) {
return getCorrespondingLoopImpl(loop);
}
return loop;
}
public static boolean isForbiddenMutableDefault(@Nullable PyTypedElement value, @NotNull TypeEvalContext context) {
if (value == null) return false;
final PyClassType type = as(context.getType(value), PyClassType.class);
if (type != null && !type.isDefinition()) {
final PyBuiltinCache builtinCache = PyBuiltinCache.getInstance(value);
final Set<PyClass> forbiddenClasses = StreamEx
.of(builtinCache.getListType(), builtinCache.getSetType(), builtinCache.getDictType())
.nonNull()
.map(PyClassType::getPyClass)
.toSet();
final PyClass cls = type.getPyClass();
return forbiddenClasses.contains(cls) || ContainerUtil.exists(cls.getAncestorClasses(context), forbiddenClasses::contains);
}
return false;
}
public static void addDecorator(@NotNull PyFunction function, @NotNull String decorator) {
final PyDecoratorList currentDecorators = function.getDecoratorList();
final List<String> decoTexts = new ArrayList<>();
decoTexts.add(decorator);
if (currentDecorators != null) {
for (PyDecorator deco : currentDecorators.getDecorators()) {
decoTexts.add(deco.getText());
}
}
final PyElementGenerator generator = PyElementGenerator.getInstance(function.getProject());
final PyDecoratorList newDecorators = generator.createDecoratorList(ArrayUtilRt.toStringArray(decoTexts));
if (currentDecorators != null) {
currentDecorators.replace(newDecorators);
}
else {
function.addBefore(newDecorators, function.getFirstChild());
}
}
public static boolean isOrdinaryPackage(@NotNull PsiDirectory directory) {
return directory.findFile(PyNames.INIT_DOT_PY) != null;
}
/**
* This helper class allows to collect various information about AST nodes composing {@link PyStringLiteralExpression}.
*/
public static final class StringNodeInfo {
private final ASTNode myNode;
private final String myPrefix;
private final String myQuote;
private final TextRange myContentRange;
public StringNodeInfo(@NotNull ASTNode node) {
final IElementType nodeType = node.getElementType();
// TODO Migrate to newer PyStringElement API
if (!PyTokenTypes.STRING_NODES.contains(nodeType) && nodeType != PyElementTypes.FSTRING_NODE) {
throw new IllegalArgumentException("Node must be valid Python string literal token, but " + nodeType + " was given");
}
myNode = node;
final String nodeText = node.getText();
final int prefixLength = PyStringLiteralUtil.getPrefixLength(nodeText);
myPrefix = nodeText.substring(0, prefixLength);
myContentRange = PyStringLiteralUtil.getContentRange(nodeText);
myQuote = nodeText.substring(prefixLength, myContentRange.getStartOffset());
}
public StringNodeInfo(@NotNull PsiElement element) {
this(element.getNode());
}
@NotNull
public ASTNode getNode() {
return myNode;
}
/**
* @return string prefix, e.g. "UR", "b" etc.
*/
@NotNull
public String getPrefix() {
return myPrefix;
}
/**
* @return content of the string node between quotes
*/
@NotNull
public String getContent() {
return myContentRange.substring(myNode.getText());
}
/**
* @return <em>relative</em> range of the content (excluding prefix and quotes)
* @see #getAbsoluteContentRange()
*/
@NotNull
public TextRange getContentRange() {
return myContentRange;
}
/**
* @return <em>absolute</em> content range that accounts offset of the {@link #getNode() node} in the document
*/
@NotNull
public TextRange getAbsoluteContentRange() {
return getContentRange().shiftRight(myNode.getStartOffset());
}
/**
* @return the first character of {@link #getQuote()}
*/
public char getSingleQuote() {
return myQuote.charAt(0);
}
@NotNull
public String getQuote() {
return myQuote;
}
public boolean isTripleQuoted() {
return myQuote.length() == 3;
}
/**
* @return true if string literal ends with starting quote
*/
public boolean isTerminated() {
final String text = myNode.getText();
return text.length() - myPrefix.length() >= myQuote.length() * 2 && text.endsWith(myQuote);
}
/**
* @return true if given string node contains "u" or "U" prefix
*/
public boolean isUnicode() {
return PyStringLiteralUtil.isUnicodePrefix(myPrefix);
}
/**
* @return true if given string node contains "r" or "R" prefix
*/
public boolean isRaw() {
return PyStringLiteralUtil.isRawPrefix(myPrefix);
}
/**
* @return true if given string node contains "b" or "B" prefix
*/
public boolean isBytes() {
return PyStringLiteralUtil.isBytesPrefix(myPrefix);
}
/**
* @return true if given string node contains "f" or "F" prefix
*/
public boolean isFormatted() {
return PyStringLiteralUtil.isFormattedPrefix(myPrefix);
}
/**
* @return true if other string node has the same decorations, i.e. quotes and prefix
*/
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
StringNodeInfo info = (StringNodeInfo)o;
return getQuote().equals(info.getQuote()) &&
isRaw() == info.isRaw() &&
isUnicode() == info.isUnicode() &&
isBytes() == info.isBytes();
}
}
public static final class IterHelper { // TODO: rename sanely
private IterHelper() {}
@Nullable
public static PsiNamedElement findName(Iterable<? extends PsiNamedElement> it, String name) {
PsiNamedElement ret = null;
for (PsiNamedElement elt : it) {
if (elt != null) {
// qualified refs don't match by last name, and we're not checking FQNs here
if (elt instanceof PyQualifiedExpression && ((PyQualifiedExpression)elt).isQualified()) continue;
if (name.equals(elt.getName())) { // plain name matches
ret = elt;
break;
}
}
}
return ret;
}
}
}
| []
| []
| []
| [] | [] | java | 0 | 0 | |
web/manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings.dev")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
rush00/wsgi.py | """
WSGI config for rush00 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rush00.settings")
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
PILasOPENCV.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
import cv2
try:
import gif2numpy
gif2numpy_installed = True
except:
gif2numpy_installed = False
try:
import numpy2gif
numpy2gif_installed = True
except:
numpy2gif_installed = False
import re, os, sys, tempfile
import numbers
try:
import mss
import mss.tools
mss_installed = True
except:
mss_installed = False
from io import StringIO
try:
import ctypes
from ctypes.wintypes import WORD, DWORD, LONG
bitmap_classes_ok = True
except:
bitmap_classes_ok = False
try:
import freetype
freetype_installed = True
except:
freetype_installed = False
__author__ = 'imressed, bunkus'
VERSION = "2.9"
"""
Version history:
2.9: New functions of ImageEnhance Brightness and Contrast implemented
2.8: In case an image file does not exist which shall be opened there will be an exception raised
2.7: Bugfix when drawing text and lines or other draw objects the lines were not drawn, fixed
2.6: Bugfix for method show: Old windows were not deleted so it came to display errors, fixed
2.5: Bugfixes for coordinates which were given as float instead of integers when drawing polygons, texts, lines, points, rectangles
bugfix for composite when alphamask and images had not the same amount of channels
bugfix in floodfill when value was given as single integer
2.4: Caught several exceptions in case dependencies modules are not installed you can still work with the basic functions,
ImageDraw method bitmap implemented, ImageChops method screen implemented, saves now single or multiple frames in gif files
2.3: Updated the module for gif2numpy Version 1.2
2.2: Bugfix for Python3 on file objects, multiple frames from gifs can be loaded now and can be retrieved with seek(frame)
2.1: though OpenCV does not support gif images, PILasOPENCV now can load gif images by courtesy of the library gif2numpy
2.0: disabled ImageGrab.grabclipboard() in case it throws exceptions which happens e.g. on Ubuntu/Linux
1.9: disabled ImageGrab.grabclipboard() which throws exceptions on some platforms
1.8: ImageGrab.grab() and ImageGrab.grabclipboard() implemented with dependency on mss
1.7: fixed fromarray
1.6: fixed frombytes, getdata, putdata and caught exception in case freetype-py is not installed or dll is missing
"""
if sys.version[0] == "2":
py3 = False
basstring = basestring
fil_object = file
import cStringIO
from operator import isNumberType as isNumberTyp
from operator import isSequenceType as isSequenceTyp
else:
py3 = True
basstring = str
from io import IOBase
import collections
fil_object = IOBase
def isNumberTyp(obj):
return isinstance(obj, numbers.Number)
def isSequenceTyp(obj):
return isinstance(obj, collections.abc.Sequence)
NONE = 0
MAX_IMAGE_PIXELS = int(1024 * 1024 * 1024 // 4 // 3)
# transpose
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
ROTATE_90 = 2
ROTATE_180 = 3
ROTATE_270 = 4
TRANSPOSE = 5
TRANSVERSE = 6
# transforms
AFFINE = 0
EXTENT = 1
PERSPECTIVE = 2
QUAD = 3
MESH = 4
# resampling filters
NEAREST = NONE = 0
BOX = 4
BILINEAR = LINEAR = 2
HAMMING = 5
BICUBIC = CUBIC = 3
LANCZOS = ANTIALIAS = 1
# dithers
NEAREST = NONE = 0
ORDERED = 1 # Not yet implemented
RASTERIZE = 2 # Not yet implemented
FLOYDSTEINBERG = 3 # default
# palettes/quantizers
WEB = 0
ADAPTIVE = 1
MEDIANCUT = 0
MAXCOVERAGE = 1
FASTOCTREE = 2
LIBIMAGEQUANT = 3
# categories
NORMAL = 0
SEQUENCE = 1
CONTAINER = 2
NEAREST = cv2.INTER_NEAREST # 0
BILINEAR = INTER_LINEAR = cv2.INTER_LINEAR # 1
BICUBIC = cv2.INTER_CUBIC # 2
LANCZOS = cv2.INTER_LANCZOS4 # 4
INTERAREA = cv2.INTER_AREA # 3
# --------------------------------------------------------------------
# Registries
ID = []
OPEN = {}
MIME = {}
SAVE = {}
SAVE_ALL = {}
EXTENSION = {".bmp": "BMP", ".dib": "DIB", ".jpeg": "JPEG", ".jpg": "JPEG", ".jpe": "JPEG", ".jp2": "JPEG2000", ".png": "PNG",
".webp": "WEBP", ".pbm": "PBM", ".pgm": "PGM", ".ppm": "PPM", ".sr": "SR", ".ras": "RAS", ".tif": "TIFF", ".tiff": "TIFF", ".gif": "GIF"}
CV2_FONTS = [cv2.FONT_HERSHEY_SIMPLEX, cv2.FONT_HERSHEY_PLAIN, cv2.FONT_HERSHEY_DUPLEX,
cv2.FONT_HERSHEY_COMPLEX, cv2.FONT_HERSHEY_TRIPLEX, cv2.FONT_HERSHEY_COMPLEX_SMALL,
cv2.FONT_HERSHEY_SCRIPT_SIMPLEX, cv2.FONT_HERSHEY_SCRIPT_COMPLEX]
DECODERS = {}
ENCODERS = {}
# --------------------------------------------------------------------
# Modes supported by this version
_MODEINFO = {
# NOTE: this table will be removed in future versions. use
# getmode* functions or ImageMode descriptors instead.
# official modes
"1": ("L", "L", ("1",)),
"L": ("L", "L", ("L",)),
"I": ("L", "I", ("I",)),
"F": ("L", "F", ("F",)),
"P": ("RGB", "L", ("P",)),
"RGB": ("RGB", "L", ("R", "G", "B")),
"RGBX": ("RGB", "L", ("R", "G", "B", "X")),
"RGBA": ("RGB", "L", ("R", "G", "B", "A")),
"CMYK": ("RGB", "L", ("C", "M", "Y", "K")),
"YCbCr": ("RGB", "L", ("Y", "Cb", "Cr")),
"LAB": ("RGB", "L", ("L", "A", "B")),
"HSV": ("RGB", "L", ("H", "S", "V")),
# Experimental modes include I;16, I;16L, I;16B, RGBa, BGR;15, and
# BGR;24. Use these modes only if you know exactly what you're
# doing...
}
if sys.byteorder == 'little':
_ENDIAN = '<'
else:
_ENDIAN = '>'
_MODE_CONV = {
# official modes
"1": ('|b1', None), # Bits need to be extended to bytes
"L": ('|u1', None),
"LA": ('|u1', 2),
"I": (_ENDIAN + 'i4', None),
"F": (_ENDIAN + 'f4', None),
"P": ('|u1', None),
"RGB": ('|u1', 3),
"RGBX": ('|u1', 4),
"RGBA": ('|u1', 4),
"CMYK": ('|u1', 4),
"YCbCr": ('|u1', 3),
"LAB": ('|u1', 3), # UNDONE - unsigned |u1i1i1
"HSV": ('|u1', 3),
# I;16 == I;16L, and I;32 == I;32L
"I;16": ('<u2', None),
"I;16B": ('>u2', None),
"I;16L": ('<u2', None),
"I;16S": ('<i2', None),
"I;16BS": ('>i2', None),
"I;16LS": ('<i2', None),
"I;32": ('<u4', None),
"I;32B": ('>u4', None),
"I;32L": ('<u4', None),
"I;32S": ('<i4', None),
"I;32BS": ('>i4', None),
"I;32LS": ('<i4', None),
}
def _conv_type_shape(im):
typ, extra = _MODE_CONV[im.mode]
if extra is None:
return (im.size[1], im.size[0]), typ
else:
return (im.size[1], im.size[0], extra), typ
MODES = sorted(_MODEINFO)
# raw modes that may be memory mapped. NOTE: if you change this, you
# may have to modify the stride calculation in map.c too!
_MAPMODES = ("L", "P", "RGBX", "RGBA", "CMYK", "I;16", "I;16L", "I;16B")
if bitmap_classes_ok:
try:
class BITMAPFILEHEADER(ctypes.Structure):
_pack_ = 1 # structure field byte alignment
_fields_ = [
('bfType', WORD), # file type ("BM")
('bfSize', DWORD), # file size in bytes
('bfReserved1', WORD), # must be zero
('bfReserved2', WORD), # must be zero
('bfOffBits', DWORD), # byte offset to the pixel array
]
SIZEOF_BITMAPFILEHEADER = ctypes.sizeof(BITMAPFILEHEADER)
class BITMAPINFOHEADER(ctypes.Structure):
_pack_ = 1 # structure field byte alignment
_fields_ = [
('biSize', DWORD),
('biWidth', LONG),
('biHeight', LONG),
('biPLanes', WORD),
('biBitCount', WORD),
('biCompression', DWORD),
('biSizeImage', DWORD),
('biXPelsPerMeter', LONG),
('biYPelsPerMeter', LONG),
('biClrUsed', DWORD),
('biClrImportant', DWORD)
]
SIZEOF_BITMAPINFOHEADER = ctypes.sizeof(BITMAPINFOHEADER)
bitmap_classes_ok = True
except:
bitmap_classes_ok = False
def getmodebase(mode):
"""
Gets the "base" mode for given mode. This function returns "L" for
images that contain grayscale data, and "RGB" for images that
contain color data.
:param mode: Input mode.
:returns: "L" or "RGB".
:exception KeyError: If the input mode was not a standard mode.
"""
return ImageMode().getmode(mode).basemode
def getmodetype(mode):
"""
Gets the storage type mode. Given a mode, this function returns a
single-layer mode suitable for storing individual bands.
:param mode: Input mode.
:returns: "L", "I", or "F".
:exception KeyError: If the input mode was not a standard mode.
"""
return ImageMode().getmode(mode).basetype
def getmodebandnames(mode):
"""
Gets a list of individual band names. Given a mode, this function returns
a tuple containing the names of individual bands (use
:py:method:`~PIL.Image.getmodetype` to get the mode used to store each
individual band.
:param mode: Input mode.
:returns: A tuple containing band names. The length of the tuple
gives the number of bands in an image of the given mode.
:exception KeyError: If the input mode was not a standard mode.
"""
return ImageMode().getmode(mode).bands
def getmodebands(mode):
"""
Gets the number of individual bands for this mode.
:param mode: Input mode.
:returns: The number of bands in this mode.
:exception KeyError: If the input mode was not a standard mode.
"""
return len(ImageMode().getmode(mode).bands)
colormap = {
# X11 colour table from https://drafts.csswg.org/css-color-4/, with
# gray/grey spelling issues fixed. This is a superset of HTML 4.0
# colour names used in CSS 1.
"aliceblue": "#f0f8ff",
"antiquewhite": "#faebd7",
"aqua": "#00ffff",
"aquamarine": "#7fffd4",
"azure": "#f0ffff",
"beige": "#f5f5dc",
"bisque": "#ffe4c4",
"black": "#000000",
"blanchedalmond": "#ffebcd",
"blue": "#0000ff",
"blueviolet": "#8a2be2",
"brown": "#a52a2a",
"burlywood": "#deb887",
"cadetblue": "#5f9ea0",
"chartreuse": "#7fff00",
"chocolate": "#d2691e",
"coral": "#ff7f50",
"cornflowerblue": "#6495ed",
"cornsilk": "#fff8dc",
"crimson": "#dc143c",
"cyan": "#00ffff",
"darkblue": "#00008b",
"darkcyan": "#008b8b",
"darkgoldenrod": "#b8860b",
"darkgray": "#a9a9a9",
"darkgrey": "#a9a9a9",
"darkgreen": "#006400",
"darkkhaki": "#bdb76b",
"darkmagenta": "#8b008b",
"darkolivegreen": "#556b2f",
"darkorange": "#ff8c00",
"darkorchid": "#9932cc",
"darkred": "#8b0000",
"darksalmon": "#e9967a",
"darkseagreen": "#8fbc8f",
"darkslateblue": "#483d8b",
"darkslategray": "#2f4f4f",
"darkslategrey": "#2f4f4f",
"darkturquoise": "#00ced1",
"darkviolet": "#9400d3",
"deeppink": "#ff1493",
"deepskyblue": "#00bfff",
"dimgray": "#696969",
"dimgrey": "#696969",
"dodgerblue": "#1e90ff",
"firebrick": "#b22222",
"floralwhite": "#fffaf0",
"forestgreen": "#228b22",
"fuchsia": "#ff00ff",
"gainsboro": "#dcdcdc",
"ghostwhite": "#f8f8ff",
"gold": "#ffd700",
"goldenrod": "#daa520",
"gray": "#808080",
"grey": "#808080",
"green": "#008000",
"greenyellow": "#adff2f",
"honeydew": "#f0fff0",
"hotpink": "#ff69b4",
"indianred": "#cd5c5c",
"indigo": "#4b0082",
"ivory": "#fffff0",
"khaki": "#f0e68c",
"lavender": "#e6e6fa",
"lavenderblush": "#fff0f5",
"lawngreen": "#7cfc00",
"lemonchiffon": "#fffacd",
"lightblue": "#add8e6",
"lightcoral": "#f08080",
"lightcyan": "#e0ffff",
"lightgoldenrodyellow": "#fafad2",
"lightgreen": "#90ee90",
"lightgray": "#d3d3d3",
"lightgrey": "#d3d3d3",
"lightpink": "#ffb6c1",
"lightsalmon": "#ffa07a",
"lightseagreen": "#20b2aa",
"lightskyblue": "#87cefa",
"lightslategray": "#778899",
"lightslategrey": "#778899",
"lightsteelblue": "#b0c4de",
"lightyellow": "#ffffe0",
"lime": "#00ff00",
"limegreen": "#32cd32",
"linen": "#faf0e6",
"magenta": "#ff00ff",
"maroon": "#800000",
"mediumaquamarine": "#66cdaa",
"mediumblue": "#0000cd",
"mediumorchid": "#ba55d3",
"mediumpurple": "#9370db",
"mediumseagreen": "#3cb371",
"mediumslateblue": "#7b68ee",
"mediumspringgreen": "#00fa9a",
"mediumturquoise": "#48d1cc",
"mediumvioletred": "#c71585",
"midnightblue": "#191970",
"mintcream": "#f5fffa",
"mistyrose": "#ffe4e1",
"moccasin": "#ffe4b5",
"navajowhite": "#ffdead",
"navy": "#000080",
"oldlace": "#fdf5e6",
"olive": "#808000",
"olivedrab": "#6b8e23",
"orange": "#ffa500",
"orangered": "#ff4500",
"orchid": "#da70d6",
"palegoldenrod": "#eee8aa",
"palegreen": "#98fb98",
"paleturquoise": "#afeeee",
"palevioletred": "#db7093",
"papayawhip": "#ffefd5",
"peachpuff": "#ffdab9",
"peru": "#cd853f",
"pink": "#ffc0cb",
"plum": "#dda0dd",
"powderblue": "#b0e0e6",
"purple": "#800080",
"rebeccapurple": "#663399",
"red": "#ff0000",
"rosybrown": "#bc8f8f",
"royalblue": "#4169e1",
"saddlebrown": "#8b4513",
"salmon": "#fa8072",
"sandybrown": "#f4a460",
"seagreen": "#2e8b57",
"seashell": "#fff5ee",
"sienna": "#a0522d",
"silver": "#c0c0c0",
"skyblue": "#87ceeb",
"slateblue": "#6a5acd",
"slategray": "#708090",
"slategrey": "#708090",
"snow": "#fffafa",
"springgreen": "#00ff7f",
"steelblue": "#4682b4",
"tan": "#d2b48c",
"teal": "#008080",
"thistle": "#d8bfd8",
"tomato": "#ff6347",
"turquoise": "#40e0d0",
"violet": "#ee82ee",
"wheat": "#f5deb3",
"white": "#ffffff",
"whitesmoke": "#f5f5f5",
"yellow": "#ffff00",
"yellowgreen": "#9acd32",
}
class ImagePointHandler:
# used as a mixin by point transforms (for use with im.point)
pass
class ImageTransformHandler:
# used as a mixin by geometry transforms (for use with im.transform)
pass
class Image(object):
def __init__(self, image=None, filename=None, format=None, instances=[], exts=[], image_specs={}):
self._instance = image
self.filename = filename
self.format = format
self.frames = instances
self.n_frames = len(self.frames)
if self.n_frames>1:
self.is_animated = True
else:
self.is_animated = False
self._frame_nr = 0
self.exts = exts
self.image_specs = image_specs
self._mode = None
if image is not None or filename is not None:
if self.filename is not None:
ext = os.path.splitext(self.filename)[1]
self.format = EXTENSION[ext]
if self._instance is not None:
self.size = (self._instance.shape[1], self._instance.shape[0])
if len(self._instance.shape)>2:
self.layers = self.bands = self._instance.shape[2]
else:
self.layers = self.bands = 1
self.dtype = self._instance.dtype
if self.dtype == np.uint8:
self.bits = 8
self._mode = self._get_mode(self._instance.shape, self.dtype)
else:
self._mode = None
self.size = (0, 0)
self.dtype = None
self.mode = self._mode
# @property
# def size(self):
# return self._instance.shape[:2][::-1]
# @property
# def width(self):
# return self._instance.shape[1]
# @property
# def height(self):
# return self._instance.size[0]
# @property
# def mode(self):
# if self._mode:
# return self._mode
# else:
# raise ValueError('No mode specified.')
# @property
# def shape(self):
# return self._instance.shape
# @property
# def get_instance(self):
# return self._instance
def _get_channels_and_depth(self, mode):
mode = str(mode).upper()
if mode == '1':
return 1 , np.bool
if mode == 'L':
return 1, np.uint8
if mode == 'LA':
return 2, np.uint8
if mode == 'P':
return 1, np.uint8
if mode == 'RGB':
return 3, np.uint8
if mode == 'RGBA':
return 4, np.uint8
if mode == 'CMYK':
return 4, np.uint8
if mode == 'YCBCR':
return 3, np.uint8
if mode == 'LAB':
return 3, np.uint8
if mode == 'HSV':
return 3, np.uint8
if mode == 'I':
return 1, np.int32
if mode == 'F':
return 1, np.float32
raise ValueError('Your mode name is incorrect.')
def _get_converting_flag(self, mode, inst=None):
"returns the cv2 flag for color conversion from inst to mode, uses the mode of the image object by default"
mode = mode.upper()
if inst is None:
inst = self._mode.upper()
if mode == inst:
return "EQUAL"
converting_table = {
'L':{
'RGB':cv2.COLOR_GRAY2BGR,
'RGBA':cv2.COLOR_GRAY2BGRA
},
'RGB':{
'1':cv2.COLOR_BGR2GRAY,
'L':cv2.COLOR_BGR2GRAY,
'LAB':cv2.COLOR_BGR2LAB,
'HSV':cv2.COLOR_BGR2HSV,
'YCBCR':cv2.COLOR_BGR2YCR_CB,
'RGBA':cv2.COLOR_BGR2BGRA
},
'RGBA':{
'1':cv2.COLOR_BGRA2GRAY,
'L':cv2.COLOR_BGRA2GRAY,
'RGB':cv2.COLOR_BGRA2BGR
},
'LAB':{
'RGB':cv2.COLOR_LAB2BGR
},
'HSV':{
'RGB':cv2.COLOR_HSV2BGR
},
'YCBCR':{
'RGB':cv2.COLOR_YCR_CB2BGR
}
}
if inst in converting_table:
if mode in converting_table[inst]:
return converting_table[inst][mode]
else:
raise ValueError('You can not convert image to this type')
else:
raise ValueError('This image type can not be converted')
def _get_mode(self, shape, depth):
if len(shape) == 2:
channels = 1
else:
channels = shape[2]
if channels == 1 and depth == np.bool:
return '1'
if channels == 1 and depth == np.uint8:
return 'L'
if channels == 1 and depth == np.uint8:
return 'P'
if channels == 2 and depth == np.uint8:
return 'LA'
if channels == 3 and depth == np.uint8:
return 'RGB'
if channels == 4 and depth == np.uint8:
return 'RGBA'
if channels == 4 and depth == np.uint8:
return 'CMYK'
if channels == 3 and depth == np.uint8:
return 'YCBCR'
if channels == 3 and depth == np.uint8:
return 'LAB'
if channels == 3 and depth == np.uint8:
return 'HSV'
if channels == 1 and depth == np.int32:
return 'I'
if channels == 1 and depth == np.float32 :
return 'F'
def _new(self, mode, size, color=None):
self._mode = mode
channels, depth = self._get_channels_and_depth(mode)
size = size[::-1]
self._instance = np.zeros(size + (channels,), depth)
if color is not None:
self._instance[:, 0:] = color
return self._instance
def alpha_composite(self, im, dest=(0, 0), source=(0, 0)):
""" 'In-place' analog of Image.alpha_composite. Composites an image
onto this image.
:param im: image to composite over this one
:param dest: Optional 2 tuple (left, top) specifying the upper
left corner in this (destination) image.
:param source: Optional 2 (left, top) tuple for the upper left
corner in the overlay source image, or 4 tuple (left, top, right,
bottom) for the bounds of the source rectangle
Performance Note: Not currently implemented in-place in the core layer.
"""
if not isinstance(source, (list, tuple)):
raise ValueError("Source must be a tuple")
if not isinstance(dest, (list, tuple)):
raise ValueError("Destination must be a tuple")
if not len(source) in (2, 4):
raise ValueError("Source must be a 2 or 4-tuple")
if not len(dest) == 2:
raise ValueError("Destination must be a 2-tuple")
if min(source) < 0:
raise ValueError("Source must be non-negative")
if min(dest) < 0:
raise ValueError("Destination must be non-negative")
channels, depth = self._get_channels_and_depth(im)
_mode = self._get_mode(im.shape, im.dtype)
_im = self._new(_mode, (im.shape[1], im.shape[0]))
if len(source) == 2:
source = source + _im.size
# over image, crop if it's not the whole thing.
if source == (0, 0) + _im.size:
overlay = _im
else:
overlay = _im.crop(source)
# target for the paste
box = dest + (dest[0] + overlay.width, dest[1] + overlay.height)
# destination image. don't copy if we're using the whole image.
if box == (0, 0) + self.size:
background = self._instance
else:
background = self.crop(box)
result = alpha_composite(background, overlay)
self.paste(result, box)
def crop(self, box, image=None):
"crops the image to the box which is a tuple = left, upper, right, lower"
if image is None:
part = self._instance[box[1]:box[3], box[0]:box[2]]
return Image(part)
else:
image = image[box[1]:box[3], box[0]:box[2]]
return image
def copy(self):
"returns a deep copy of the original"
return Image(self._instance.copy(), format=self.format)
def close(self):
"closes all opened windows"
cv2.destroyAllWindows()
return None
def draft(self, mode, size):
"""
Configures the image file loader so it returns a version of the
image that as closely as possible matches the given mode and
size. For example, you can use this method to convert a color
JPEG to greyscale while loading it, or to extract a 128x192
version from a PCD file.
Note that this method modifies the :py:class:`~PIL.Image.Image` object
in place. If the image has already been loaded, this method has no
effect.
Note: This method is not implemented for most images. It is
currently implemented only for JPEG and PCD images.
:param mode: The requested mode.
:param size: The requested size.
"""
pass
def frombytes(self, mode, size, data, decoder_name="raw", *args):
"""
Loads this image with pixel data from a bytes object.
This method is similar to the :py:func:`~PIL.Image.frombytes` function,
but loads data into this image instead of creating a new image object.
"""
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
# default format
if decoder_name == "raw" and args == ():
args = self.mode
# unpack data
channels, depth = self._get_channels_and_depth(mode)
self._instance = np.fromstring(data, dtype=depth)
try:
self._instance = self._instance.reshape((size[1], size[0], channels))
except:
raise ValueError("not enough image data")
try:
self._instance = self._instance.astype(depth)
if channels == 3:
self._instance = cv2.cvtColor(self._instance, cv2.COLOR_BGR2RGB)
elif channels == 4:
self._instance = cv2.cvtColor(self._instance, cv2.COLOR_BGRA2RGBA)
except:
raise ValueError("cannot decode image data")
def fromstring(self, mode, size, data, decoder_name="raw", *args):
# raise NotImplementedError("fromstring() has been removed. "
# "Please call frombytes() instead.")
self.frombytes(mode, size, data, decoder_name, *args)
def convert(self, mode):
"converts an image to the given mode"
if self._mode.upper() == mode.upper():
return Image(self._instance.copy())
if not mode and self.mode == "P":
# determine default mode
if self.palette:
mode = self.palette.mode
else:
mode = "RGB"
if not mode or (mode == self.mode):
return Image(self._instance.copy())
return Image(self._convert(mode))
def _convert(self, mode, obj=None):
if obj is None:
obj = self._instance
flag = self._get_converting_flag(mode)
else:
orig_mode = self._get_mode(obj.shape, obj.dtype)
flag = self._get_converting_flag(mode, inst=orig_mode)
if flag == "EQUAL":
return obj.copy()
if mode == "1":
im_gray = cv2.cvtColor(obj, cv2.COLOR_BGR2GRAY)
thresh, converted = cv2.threshold(im_gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
else:
converted = cv2.cvtColor(obj, flag)
return converted
def paste(self, img_color, box=None, mask=None):
"pastes either an image or a color to a region of interest defined in box with a mask"
if isinstance(img_color, Image): # pasting an image
_img_color = img_color._instance
if box is None:
box = (0, 0)
else:
if len(box) == 4:
if not(box[2]-box[0]==_img_color.shape[1] and box[3]-box[1]==_img_color.shape[0]):
raise ValueError("images do not match")
# convert modes
if len(img_color._instance.shape) == 3:
if img_color._instance.shape[2] != self._instance.shape[2] or img_color._instance.dtype != self._instance.dtype:
dest_mode = self._mode
_img_color = self._convert(dest_mode, obj=_img_color)
elif len(img_color._instance.shape) != len(self._instance.shape):
dest_mode = self._mode
_img_color = self._convert(dest_mode, obj=_img_color)
else: # pasting a colorbox
if box is None:
raise ValueError("cannot determine region size; use 4-item box")
img_dim = (box[3]-box[1]+1, box[2]-box[0]+1)
channels, depth = self._get_channels_and_depth(self._mode)
colorbox = np.zeros((img_dim[0], img_dim[1], channels), dtype=depth)
colorbox[:] = img_color
_img_color = colorbox.copy()
if mask is None:
self._instance = self._paste(self._instance, _img_color, box[0], box[1])
else:
# enlarge the image _img_color without resizing to the new_canvas
new_canvas = np.zeros(self._instance.shape, dtype=self._instance.dtype)
new_canvas = self._paste(new_canvas, _img_color, box[0], box[1])
if len(mask._instance.shape) == 3:
if mask._instance.shape[2] == 4: # RGBA
r, g, b, _mask = self.split(mask)
elif mask._instance.shape[2] == 1:
_mask = mask._instance.copy()
else:
_mask = mask._instance.copy()
if _mask.shape[:2] != new_canvas.shape[:2]:
_new_mask = np.zeros(self._instance.shape[:2], dtype=self._instance.dtype)
_new_mask = ~(self._paste(_new_mask, _mask, box[0], box[1]))
else:
_new_mask = ~_mask
self._instance = composite(self._instance, new_canvas, _new_mask, np_image=True)
def _paste(self, mother, child, x, y):
"Pastes the numpy image child into the numpy image mother at position (x, y)"
size = mother.shape
csize = child.shape
if y+csize[0]<0 or x+csize[1]<0 or y>size[0] or x>size[1]: return mother
sel = [int(y), int(x), csize[0], csize[1]]
csel = [0, 0, csize[0], csize[1]]
if y<0:
sel[0] = 0
sel[2] = csel[2] + y
csel[0] = -y
elif y+sel[2]>=size[0]:
sel[2] = int(size[0])
csel[2] = size[0]-y
else:
sel[2] = sel[0] + sel[2]
if x<0:
sel[1] = 0
sel[3] = csel[3] + x
csel[1] = -x
elif x+sel[3]>=size[1]:
sel[3] = int(size[1])
csel[3] = size[1]-x
else:
sel[3] = sel[1] + sel[3]
childpart = child[csel[0]:csel[2], csel[1]:csel[3]]
mother[sel[0]:sel[2], sel[1]:sel[3]] = childpart
return mother
def _scaleTo8Bit(self, image, div, displayMin=None, displayMax=None):
if displayMin == None:
displayMin = np.min(image)
if displayMax == None:
displayMax = np.max(image)
np.clip(image, displayMin, displayMax, out=image)
image = image - displayMin
cf = 255. / (displayMax - displayMin)
imageOut = (cf*image).astype(np.uint8)
return imageOut
def _filter_kernel(self, fa):
kernel = np.array(fa[3], dtype=np.float32)/fa[1]
kernel = kernel.reshape(fa[0])
# print(kernel)
return kernel
def filter(self, filtermethod):
"Filters this image using the given filter."
if filtermethod.name == "GaussianBlur":
return GaussianBlur().filter(self)
fa = filtermethod.filterargs
if filtermethod == EMBOSS:
_im = self._instance.astype(np.float32)
_im = cv2.filter2D(_im, -1, self._filter_kernel(fa))
_im = self._scaleTo8Bit(_im, fa[2])
elif filtermethod == CONTOUR:
_im = cv2.filter2D(self._instance, -1, self._filter_kernel(fa))
_im = ~_im
else:
_im = cv2.filter2D(self._instance, -1, self._filter_kernel(fa))
return Image(_im)
def getband(self, channel):
channels, depth = self._get_channels_and_depth(self._mode)
if channels == 1:
return self._instance.copy()
else:
chs = self.split()
return chs[channel]
def getbands(self):
return tuple([i for i in self._mode])
def getbbox(self):
"""
Calculates the bounding box of the non-zero regions in the
image.
:returns: The bounding box is returned as a 4-tuple defining the
left, upper, right, and lower pixel coordinate. See
:ref:`coordinate-system`. If the image is completely empty, this
method returns None.
"""
img_ = (self._instance > 0)
rows = np.any(img_, axis=1)
cols = np.any(img_, axis=0)
rmin, rmax = np.argmax(rows), img_.shape[0] - 1 - np.argmax(np.flipud(rows))
cmin, cmax = np.argmax(cols), img_.shape[1] - 1 - np.argmax(np.flipud(cols))
return (rmin, rmax, cmin, cmax)
def _getcolors(self):
channels, depth = self._get_channels_and_depth(self._mode)
if channels == 1:
img = self._instance.copy()
y = img.shape[0]
x = img.shape[1]
flattened = img.reshape((x*y, 1))
uni, counts = np.unique(flattened, return_counts=True)
else:
if channels == 4:
r ,g, b, a = self.split()
colorband = (r, g, b)
img = merge("RGB", colorband, image=True)
else: # channels == 3
img = self._instance.copy()
y = img.shape[0]
x = img.shape[1]
flattened = img.reshape((x*y, 3))
uni, counts = np.unique(flattened, axis=0, return_counts=True)
return uni, counts
def getcolors(self, maxcolors=256):
"""
Returns a list of colors used in this image.
:param maxcolors: Maximum number of colors. If this number is
exceeded, this method returns None. The default limit is
256 colors.
:returns: An unsorted list of (count, pixel) values.
"""
if self._mode in ("1", "L", "P"):
h = self._instance.histogram()
out = []
for i in range(256):
if h[i]:
out.append((h[i], i))
if len(out) > maxcolors:
return None
return out
uni, counts = self._getcolors()
if c>maxcolors: return None
colors = []
for l in range(len(counts)):
colors.append((counts[l], l))
return colors
def getdata(self, band=None):
channels, depth = self._get_channels_and_depth(self._mode)
flattened = self._instance.reshape((self.size[0]*self.size[1], channels))
return flattened
def getextrema(self):
return (np.minimum(self._instance), np.maximum(self._instance))
def getim(self):
return self._instance
def getpalette(self):
uni, counts = self._getcolors()
colors = list(np.ravel(uni))
return colors
def getpixel(self, xytup):
return self._instance[y, x]
def histogram(self, mask=None, extrema=None):
"""
Returns a histogram for the image. The histogram is returned as
a list of pixel counts, one for each pixel value in the source
image. If the image has more than one band, the histograms for
all bands are concatenated (for example, the histogram for an
"RGB" image contains 768 values).
A bilevel image (mode "1") is treated as a greyscale ("L") image
by this method.
If a mask is provided, the method returns a histogram for those
parts of the image where the mask image is non-zero. The mask
image must have the same size as the image, and be either a
bi-level image (mode "1") or a greyscale image ("L").
:param mask: An optional mask.
:returns: A list containing pixel counts.
"""
uni, counts = self._getcolors()
return [l for l in counts]
def offset(self, xoffset, yoffset=None):
raise NotImplementedError("offset() has been removed. "
"Please call ImageChops.offset() instead.")
def point(self, lut, mode=None):
"Map image through lookup table"
raise NotImplementedError("point() has been not implemented in this library. ")
def putpixel(self, xytup, color):
self._instance[xytup[1], xytup[0]] = color
def putalpha(self, alpha):
"""
Adds or replaces the alpha layer in this image. If the image
does not have an alpha layer, it's converted to "LA" or "RGBA".
The new layer must be either "L" or "1".
:param alpha: The new alpha layer. This can either be an "L" or "1"
image having the same size as this image, or an integer or
other color value.
"""
channels, depth = self._get_channels_and_depth(self._mode)
if isinstance(alpha, np.ndarray):
paste_image = True
else:
paste_image = False
if channels==4:
r, g, b, a = self.split()
if not paste_image:
a[:] = alpha
else:
a = alpha.copy()
colorband = (r, g, b, a)
self._instance = merge("RGBA", colorband, image=True)
elif channels == 3:
if not paste_image:
sh = self._instance.shape
sh = (sh[0], sh[1], 1)
a = np.zeros(sh, dtype=depth)
a[:] = alpha
else:
a = alpha.copy()
r, g, b = self.split()
colorband = (r, g, b, a)
self._instance = merge("RGBA", colorband, image=True)
elif channels < 2: # "L" or "LA"
if not paste_image:
sh = self._instance.shape
sh = (sh[0], sh[1], 1)
a = np.zeros(sh, dtype=depth)
a[:] = alpha
else:
a = alpha.copy()
if channels == 2:
l, a_old = self.split()
colorband = (l, a)
else:
colorband = (self._instance, a)
self._instance = merge("LA", colorband, image=True)
def putdata(self, dat, scale=1.0, offset=0.0):
"""
Copies pixel data to this image. This method copies data from a
sequence object into the image, starting at the upper left
corner (0, 0), and continuing until either the image or the
sequence ends. The scale and offset values are used to adjust
the sequence values: **pixel = value*scale + offset**.
:param data: A sequence object.
:param scale: An optional scale value. The default is 1.0.
:param offset: An optional offset value. The default is 0.0.
"""
data = np.array(dat)
data = data * scale + offset
channels, depth = self._get_channels_and_depth(self._mode)
siz = self.size
_im = np.ravel(self._instance)
data = data[:len(_im)]
_im = _im[:len(data)] = data
self._instance = _im.reshape((siz[1], siz[0], channels))
self._instance = self._instance.astype(depth)
def putpalette(self, data, rawmode="RGB"):
raise NotImplementedError("putpalette() has been not implemented in this library. ")
def quantize(self, colors=256, method=None, kmeans=0, palette=None):
raise NotImplementedError("quantize() has been not implemented in this library. ")
def remap_palette(self, dest_map, source_palette=None):
raise NotImplementedError("remap_palette() has been not implemented in this library. ")
def resize(self, size, filtermethod = cv2.INTER_LINEAR, image=None):
"resizes an image according to the given filter/interpolation method NEAREST, BILINEAR/INTER_LINEAR, BICUBIC, LANCZOS, INTERAREA"
if image is None:
_im = cv2.resize(self._instance, size, interpolation = filtermethod)
return Image(_im)
else:
return cv2.resize(image, size, interpolation = filtermethod)
def rotate_bound(self, angle, fillcolor=None):
# grab the dimensions of the image and then determine the
# center
h, w = self._instance.shape[:2]
(cX, cY) = (w // 2, h // 2)
# grab the rotation matrix (applying the negative of the
# angle to rotate clockwise), then grab the sine and cosine
# (i.e., the rotation components of the matrix)
M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# compute the new bounding dimensions of the image
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
# perform the actual rotation and return the image
return cv2.warpAffine(self._instance, M, (nW, nH), borderValue=fillcolor)
def translated(self, image, x, y):
# define the translation matrix and perform the translation
M = np.float32([[1, 0, x], [0, 1, y]])
shifted = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))
# return the translated image
return shifted
def rotate(self, angle, resample=NEAREST, expand=0, center=None,
translate=None, fillcolor=None):
"""
Returns a rotated copy of this image. This method returns a
copy of this image, rotated the given number of degrees counter
clockwise around its centre.
:param angle: In degrees counter clockwise.
:param resample: An optional resampling filter. This can be
one of :py:attr:`PIL.Image.NEAREST` (use nearest neighbour),
:py:attr:`PIL.Image.BILINEAR` (linear interpolation in a 2x2
environment), or :py:attr:`PIL.Image.BICUBIC`
(cubic spline interpolation in a 4x4 environment).
If omitted, or if the image has mode "1" or "P", it is
set :py:attr:`PIL.Image.NEAREST`. See :ref:`concept-filters`.
:param expand: Optional expansion flag. If true, expands the output
image to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the
input image. Note that the expand flag assumes rotation around
the center and no translation.
:param center: Optional center of rotation (a 2-tuple). Origin is
the upper left corner. Default is the center of the image.
:param translate: An optional post-rotate translation (a 2-tuple).
:param fillcolor: An optional color for area outside the rotated image.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
angle = angle % 360.0
if fillcolor is None:
fillcolor = (0, 0, 0)
if expand == 0:
# grab the dimensions of the image
h, w = self.size[1], self.size[0]
# if the center is None, initialize it as the center of
# the image
if center is None:
center = (w // 2, h // 2)
scale = 1.0
# perform the rotation
M = cv2.getRotationMatrix2D(center, angle, scale)
_im = cv2.warpAffine(self._instance, M, (w, h), borderValue=fillcolor)
else:
_im = self.rotate_bound(angle)
if translate is not None:
_im = self.translated(_im, translate[0], translate[0])
return Image(_im)
def save(self, fp, format=None, **params):
"""
Saves this image under the given filename. If no format is
specified, the format to use is determined from the filename
extension, if possible.
Keyword options can be used to provide additional instructions
to the writer. If a writer doesn't recognise an option, it is
silently ignored. The available options are described in the
:doc:`image format documentation
<../handbook/image-file-formats>` for each writer.
You can use a file object instead of a filename. In this case,
you must always specify the format. The file object must
implement the ``seek``, ``tell``, and ``write``
methods, and be opened in binary mode.
:param fp: A filename (string), pathlib.Path object or file object.
:param format: Optional format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
:param params: Extra parameters to the image writer.
:returns: None
:exception ValueError: If the output format could not be determined
from the file name. Use the format option to solve this.
:exception IOError: If the file could not be written. The file
may have been created, and may contain partial data.
"""
if isinstance(fp, basstring):
if fp.lower().endswith(".gif"):
if numpy2gif_installed:
if self.is_animated:
numpy2gif.write_gif(self.frames, fp, fps=100//self.exts[0][['delay_time']])
else:
numpy2gif.write_gif(self._instance, fp)
else:
NotImplementedError("numpy2gif is not installed so cannot save gif images, install it with: pip install numpy2gif")
else:
cv2.imwrite(fp, self._instance)
return None
if isinstance(fp, fil_object):
fl = open(format, 'w')
fl.write(fp.read())
fl.close()
return None
return None
def seek(self, frame):
"""
Seeks to the given frame in this sequence file. If you numpy2gifek
beyond the end of the sequence, the method raises an
**EOFError** exception. When a sequence file is opened, the
library automatically seeks to frame 0.
Note that in the current version of the library, most sequence
formats only allows you to seek to the next frame.
See :py:meth:`~PIL.Image.Image.tell`.
:param frame: Frame number, starting at 0.
:exception EOFError: If the call attempts to seek beyond the end
of the sequence.
"""
if frame>=self.n_frames:
raise EOFError("Frame number is beyond the number of frames")
else:
self._frame_nr = frame
self._instance = self.frames[frame]
def setim(self, numpy_image):
mode = Image()._get_mode(numpy_image.shape, numpy_image.dtype)
if mode != self._mode:
raise ValueError("Modes of mother image and child image do not match", self._mode, mode)
self._instance = numpy_image
def show(self, title=None, command=None, wait=0, destroyWindow=True):
"shows the image in a window"
if title is None:
title = ""
if command is None:
cv2.imshow(title, self._instance)
cv2.waitKey(wait)
if destroyWindow:
cv2.destroyWindow(title)
else:
flag, fname = tempfile.mkstemp()
cv2.imwrite(fname, self._instance)
os.system(command+" "+fname)
def split(self, image=None):
"splits the image into its color bands"
if image is None:
if len(self._instance.shape) == 3:
if self._instance.shape[2] == 1:
return self._instance.copy()
elif self._instance.shape[2] == 2:
l, a = cv2.split(self._instance)
return l, a
elif self._instance.shape[2] == 3:
b, g, r = cv2.split(self._instance)
return b, g, r
else:
b, g, r, a = cv2.split(self._instance)
return b, g, r, a
else:
return self._instance
else:
if len(self._instance.shape) == 3:
if image.shape[2] == 1:
return image.copy()
elif image.shape[2] == 2:
l, a = cv2.split(image)
return l, a
elif image.shape[2] == 3:
b, g, r = cv2.split(image)
return b, g, r
else:
b, g, r, a = cv2.split(image)
return b, g, r, a
else:
return self._instance
def getchannel(self, channel):
"""
Returns an image containing a single channel of the source image.
:param channel: What channel to return. Could be index
(0 for "R" channel of "RGB") or channel name
("A" for alpha channel of "RGBA").
:returns: An image in "L" mode.
.. versionadded:: 4.3.0
"""
if isinstance(channel, basstring):
try:
channel = self.getbands().index(channel)
except ValueError:
raise ValueError(
'The image has no channel "{}"'.format(channel))
return self.getband(channel)
def tell(self):
"""
Returns the current frame number. See :py:meth:`~PIL.Image.Image.seek`.
:returns: Frame number, starting with 0.
"""
return self._frame_nr
def thumbnail(self, size, resample=BICUBIC):
"""
Make this image into a thumbnail. This method modifies the
image to contain a thumbnail version of itself, no larger than
the given size. This method calculates an appropriate thumbnail
size to preserve the aspect of the image, calls the
:py:meth:`~PIL.Image.Image.draft` method to configure the file reader
(where applicable), and finally resizes the image.
Note that this function modifies the :py:class:`~PIL.Image.Image`
object in place. If you need to use the full resolution image as well,
apply this method to a :py:meth:`~PIL.Image.Image.copy` of the original
image.
:param size: Requested size.
:param resample: Optional resampling filter. This can be one
of :py:attr:`PIL.Image.NEAREST`, :py:attr:`PIL.Image.BILINEAR`,
:py:attr:`PIL.Image.BICUBIC`, or :py:attr:`PIL.Image.LANCZOS`.
If omitted, it defaults to :py:attr:`PIL.Image.BICUBIC`.
(was :py:attr:`PIL.Image.NEAREST` prior to version 2.5.0)
:returns: None
"""
# preserve aspect ratio
x, y = self.size
if x > size[0]:
y = int(max(y * size[0] / x, 1))
x = int(size[0])
if y > size[1]:
x = int(max(x * size[1] / y, 1))
y = int(size[1])
size = x, y
if size == self.size:
return
self.draft(None, size)
self._instance = self.resize(size, resample, image=self._instance)
self.readonly = 0
self.pyaccess = None
def transform(self, size, method, data=None, resample=NEAREST,
fill=1, fillcolor=None):
"""
Transforms this image. This method creates a new image with the
given size, and the same mode as the original, and copies data
to the new image using the given transform.
:param size: The output size.
:param method: The transformation method. This is one of
:py:attr:`PIL.Image.EXTENT` (cut out a rectangular subregion),
:py:attr:`PIL.Image.AFFINE` (affine transform),
:py:attr:`PIL.Image.PERSPECTIVE` (perspective transform),
:py:attr:`PIL.Image.QUAD` (map a quadrilateral to a rectangle), or
:py:attr:`PIL.Image.MESH` (map a number of source quadrilaterals
in one operation).
It may also be an :py:class:`~PIL.Image.ImageTransformHandler`
object::
class Example(Image.ImageTransformHandler):
def transform(size, method, data, resample, fill=1):
# Return result
It may also be an object with a :py:meth:`~method.getdata` method
that returns a tuple supplying new **method** and **data** values::
class Example(object):
def getdata(self):
method = Image.EXTENT
data = (0, 0, 100, 100)
return method, data
:param data: Extra data to the transformation method.
:param resample: Optional resampling filter. It can be one of
:py:attr:`PIL.Image.NEAREST` (use nearest neighbour),
:py:attr:`PIL.Image.BILINEAR` (linear interpolation in a 2x2
environment), or :py:attr:`PIL.Image.BICUBIC` (cubic spline
interpolation in a 4x4 environment). If omitted, or if the image
has mode "1" or "P", it is set to :py:attr:`PIL.Image.NEAREST`.
:param fill: If **method** is an
:py:class:`~PIL.Image.ImageTransformHandler` object, this is one of
the arguments passed to it. Otherwise, it is unused.
:param fillcolor: Optional fill color for the area outside the
transform in the output image.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if method == EXTENT:
x0, y0, x1, y1 = data
part = self._instance[y0:y1, x0:x1]
_im = cv2.resize(part, size)
elif method == AFFINE:
x0, y0, x1, y1, x2, y2, x3, y3, x4, y4, x5, y5 = data
pts1 = np.float32([[x0, y0], [x1, y1], [x2, y2]])
pts2 = np.float32([[x3, y3], [x4, y4], [x5, y5]])
M = cv2.getAffineTransform(pts1,pts2)
_im = cv2.warpAffine(self._instance, M, size)
elif method == PERSPECTIVE or method == QUAD:
x0, y0, x1, y1, x2, y2, x3, y3 = data
pts1 = np.float32([[x0, y0], [x1, y1], [x2, y2], [x3, y3]])
pts2 = np.float32([[0,0],[size[0], 0], [0, size[1]], [size[0], size[1]]])
M = cv2.getPerspectiveTransform(pts1, pts2)
_im = cv2.warpPerspective(self._instance, M, size)
elif method == MESH:
_im = self._instance.copy()
for elem in data:
box, quad = elem
x0, y0, x1, y1, x2, y2, x3, y3 = quad
pts1 = np.float32([[x0, y0], [x1, y1], [x2, y2], [x3, y3]])
pts2 = np.float32([[box[0], box[1]],[box[2], box[1]], [box[0], box[3]], [box[2], box[3]]])
M = cv2.getPerspectiveTransform(pts1, pts2)
_im = cv2.warpPerspective(_im, M, size)
return Image(_im)
def transpose(self, method):
"""
Transpose image (flip or rotate in 90 degree steps)
:param method: One of :py:attr:`PIL.Image.FLIP_LEFT_RIGHT`,
:py:attr:`PIL.Image.FLIP_TOP_BOTTOM`, :py:attr:`PIL.Image.ROTATE_90`,
:py:attr:`PIL.Image.ROTATE_180`, :py:attr:`PIL.Image.ROTATE_270`,
:returns: Returns a flipped or rotated copy of this image.
"""
w, h = self.size
if method == FLIP_LEFT_RIGHT:
_im = cv2.flip(self._instance, 1)
elif method == FLIP_TOP_BOTTOM:
_im = cv2.flip(self._instance, 0)
elif method == ROTATE_90:
_im = self.rotate_bound(270)
x = self.size[0]//2-self.size[1]//2
box = (0, x, self.size[0], x+self.size[1])
_im = self.crop(box, _im)
elif method == ROTATE_180:
_im = self.rotate(180, self._instance)
elif method == ROTATE_270:
_im = self.rotate_bound(90)
x = self.size[0]//2-self.size[1]//2
box = (0, x, self.size[0], x+self.size[1])
_im = self.crop(box, _im)
if isinstance(_im, Image):
return _im
elif isinstance(_im, np.ndarray):
return Image(_im)
def verify(self):
"""
Verifies the contents of a file. For data read from a file, this
method attempts to determine if the file is broken, without
actually decoding the image data. If this method finds any
problems, it raises suitable exceptions. If you need to load
the image after using this method, you must reopen the image
file.
"""
pass
class FreeTypeFont(object):
"FreeType font wrapper (requires python library freetype-py)"
def __init__(self, font=None, size=10, index=0, encoding="",
layout_engine=None):
self.path = font
self.size = size
self.index = index
self.encoding = encoding
self.layout_engine = layout_engine
if os.path.isfile(self.path):
self.font = load(self.path, self.size+16)
else:
self.font = None
def getsize(text, ttf_font, scale=1.0, thickness=1):
if isinstance(ttf_font, freetype.Face):
slot = ttf_font.glyph
width, height, baseline = 0, 0, 0
previous = 0
for i,c in enumerate(text):
ttf_font.load_char(c)
bitmap = slot.bitmap
height = max(height, bitmap.rows + max(0,-(slot.bitmap_top-bitmap.rows)))
baseline = max(baseline, max(0,-(slot.bitmap_top-bitmap.rows)))
kerning = ttf_font.get_kerning(previous, c)
width += (slot.advance.x >> 6) + (kerning.x >> 6)
previous = c
else:
size = cv2.getTextSize(text, ttf_font, scale, thickness)
width = size[0][0]
height = size[0][1]
baseline = size[1]
return width, height, baseline
def getmask(text, ttf_font):
slot = ttf_font.glyph
width, height, baseline = getsize(text, ttf_font)
Z = np.zeros((height, width), dtype=np.ubyte)
x, y = 0, 0
previous = 0
for c in text:
ttf_font.load_char(c)
bitmap = slot.bitmap
top = slot.bitmap_top
left = slot.bitmap_left
w,h = bitmap.width, bitmap.rows
y = height-baseline-top
if y<=0: y=0
kerning = ttf_font.get_kerning(previous, c)
x += (kerning.x >> 6)
character = np.array(bitmap.buffer, dtype='uint8').reshape(h,w)
try:
Z[y:y+h,x:x+w] += character
except ValueError:
while x+w>Z.shape[1]:
x = x - 1
# print("new", x, y, w, h, character.shape, type(bitmap))
if x>0:
Z[:character.shape[0],x:x+w] += character
x += (slot.advance.x >> 6)
previous = c
return Z
def grab(bbox=None):
if mss_installed:
fh, filepath = tempfile.mkstemp('.png')
with mss.mss() as sct:
# The screen part to capture
if bbox is None:
filepath = sct.shot(mon=-1, output=filepath)
else:
monitor = {"top": bbox[1], "left": bbox[0], "width": bbox[2]-bbox[0], "height": bbox[3]-bbox[1]}
# Grab the data
sct_img = sct.grab(monitor)
# Save to the picture file
mss.tools.to_png(sct_img.rgb, sct_img.size, output=filepath)
return open(filepath)
else:
NotImplementedError("mss is not installed so there is no grab method available, install it with: pip install mss")
def grabclipboard():
if mss_installed:
if bitmap_classes_ok:
if sys.platform == "darwin":
fh, filepath = tempfile.mkstemp('.jpg')
os.close(fh)
commands = [
"set theFile to (open for access POSIX file \""
+ filepath + "\" with write permission)",
"try",
" write (the clipboard as JPEG picture) to theFile",
"end try",
"close access theFile"
]
script = ["osascript"]
for command in commands:
script += ["-e", command]
subprocess.call(script)
im = None
if os.stat(filepath).st_size != 0:
im = open(filepath)
os.unlink(filepath)
return im
else:
fh, filepath = tempfile.mkstemp('.bmp')
import win32clipboard, builtins
win32clipboard.OpenClipboard()
try:
if win32clipboard.IsClipboardFormatAvailable(win32clipboard.CF_DIB):
data = win32clipboard.GetClipboardData(win32clipboard.CF_DIB)
else:
data = None
finally:
win32clipboard.CloseClipboard()
if data is None: return None
bmih = BITMAPINFOHEADER()
ctypes.memmove(ctypes.pointer(bmih), data, SIZEOF_BITMAPINFOHEADER)
bmfh = BITMAPFILEHEADER()
ctypes.memset(ctypes.pointer(bmfh), 0, SIZEOF_BITMAPFILEHEADER) # zero structure
bmfh.bfType = ord('B') | (ord('M') << 8)
bmfh.bfSize = SIZEOF_BITMAPFILEHEADER + len(data) # file size
SIZEOF_COLORTABLE = 0
bmfh.bfOffBits = SIZEOF_BITMAPFILEHEADER + SIZEOF_BITMAPINFOHEADER + SIZEOF_COLORTABLE
with builtins.open(filepath, 'wb') as bmp_file:
bmp_file.write(bmfh)
bmp_file.write(data)
return open(filepath)
else:
raise NotImplementedError("grabclipboard is not available on your platform")
else:
NotImplementedError("mss is not installed so there is no grabclipboard method available, install it with: pip install mss")
def load(filename, size=12):
"""
Load a font file. This function loads a font object from the given
bitmap font file, and returns the corresponding font object.
:param filename: Name of font file.
:return: A font object.
:exception IOError: If the file could not be read.
"""
# face = Face('./VeraMono.ttf')
face = freetype.Face(filename)
face.set_char_size(size*size)
return face
def truetype(font=None, size=10, index=0, encoding="",
layout_engine=None):
"""
Load a TrueType or OpenType font from a file or file-like object,
and create a font object.
This function loads a font object from the given file or file-like
object, and creates a font object for a font of the given size.
This function requires the _imagingft service.
:param font: A filename or file-like object containing a TrueType font.
Under Windows, if the file is not found in this filename,
the loader also looks in Windows :file:`fonts/` directory.
:param size: The requested size, in points.
:param index: Which font face to load (default is first available face).
:param encoding: Which font encoding to use (default is Unicode). Common
encodings are "unic" (Unicode), "symb" (Microsoft
Symbol), "ADOB" (Adobe Standard), "ADBE" (Adobe Expert),
and "armn" (Apple Roman). See the FreeType documentation
for more information.
:param layout_engine: Which layout engine to use, if available:
`ImageFont.LAYOUT_BASIC` or `ImageFont.LAYOUT_RAQM`.
:return: A font object.
:exception IOError: If the file could not be read.
"""
if not freetype_installed:
raise NotImplementedError("freetype-py is not installed or the libfreetype.dll/dylib/so is missing, if freetype-py is not installed, install it with pip install freetype-py")
fontpath = font
font = FreeTypeFont(font, size)
if font.font is not None:
return font.font
else:
ttf_filename = os.path.basename(fontpath)
dirs = []
if sys.platform == "win32":
# check the windows font repository
# NOTE: must use uppercase WINDIR, to work around bugs in
# 1.5.2's os.environ.get()
windir = os.environ.get("WINDIR")
if windir:
dirs.append(os.path.join(windir, "Fonts"))
elif sys.platform in ('linux', 'linux2'):
lindirs = os.environ.get("XDG_DATA_DIRS", "")
if not lindirs:
# According to the freedesktop spec, XDG_DATA_DIRS should
# default to /usr/share
lindirs = '/usr/share'
dirs += [os.path.join(lindir, "fonts")
for lindir in lindirs.split(":")]
elif sys.platform == 'darwin':
dirs += ['/Library/Fonts', '/System/Library/Fonts',
os.path.expanduser('~/Library/Fonts')]
ext = os.path.splitext(ttf_filename)[1]
first_font_with_a_different_extension = None
for directory in dirs:
for walkroot, walkdir, walkfilenames in os.walk(directory):
for walkfilename in walkfilenames:
if ext and walkfilename == ttf_filename:
fontpath = os.path.join(walkroot, walkfilename)
font = FreeTypeFont(fontpath, size)
return font.font
elif (not ext and
os.path.splitext(walkfilename)[0] == ttf_filename):
fontpath = os.path.join(walkroot, walkfilename)
if os.path.splitext(fontpath)[1] == '.ttf':
font = FreeTypeFont(fontpath, size)
return font.font
raise IOError("cannot find font file")
def load_path(filename, size=12):
"""
Load font file. Same as :py:func:`~PIL.ImageFont.load`, but searches for a
bitmap font along the Python path.
:param filename: Name of font file.
:return: A font object.
:exception IOError: If the file could not be read.
"""
for directory in sys.path:
if isDirectory(directory):
if not isinstance(filename, str):
if py3:
filename = filename.decode("utf-8")
else:
filename = filename.encode("utf-8")
try:
return load(os.path.join(directory, filename), size)
except IOError:
pass
raise IOError("cannot find font file")
class ImageDraw(object):
def __init__(self, img, mode=None):
try:
self.img = img
self._img_instance = self.img._instance
self.mode = Image()._get_mode(self._img_instance.shape, self._img_instance.dtype)
self.setink()
except AttributeError:
self._img_instance = None
self.mode = None
self.ink = None
self.fill = None
self.palette = None
self.font = None
def _convert_bgr2rgb(self, color):
if isinstance(color, tuple):
if len(color) == 3:
color = color[::-1]
elif len(color) == 4:
color = color[:3][::-1] + (color[3],)
return color
def _get_coordinates(self, xy):
"Transform two tuples in a 4 array or pass the 4 array through"
if isinstance(xy[0], tuple):
coord = []
for i in range(len(xy)):
coord.append(int(xy[i][0]))
coord.append(int(xy[i][1]))
else:
coord = [int(i) for i in xy]
return coord
def _get_ellipse_bb(x, y, major, minor, angle_deg=0):
"Compute tight ellipse bounding box."
t = np.arctan(-minor / 2 * np.tan(np.radians(angle_deg)) / (major / 2))
[max_x, min_x] = [x + major / 2 * np.cos(t) * np.cos(np.radians(angle_deg)) -
minor / 2 * np.sin(t) * np.sin(np.radians(angle_deg)) for t in (t, t + np.pi)]
t = np.arctan(minor / 2 * 1. / np.tan(np.radians(angle_deg)) / (major / 2))
[max_y, min_y] = [y + minor / 2 * np.sin(t) * np.cos(np.radians(angle_deg)) +
major / 2 * np.cos(t) * np.sin(np.radians(angle_deg)) for t in (t, t + np.pi)]
return min_x, min_y, max_x, max_y
def _getink(self, ink, fill=None):
if ink is None and fill is None:
if self.fill:
fill = self.ink
else:
ink = self.ink
else:
if ink is not None:
if isinstance(ink, basstring):
ink = ImageColor().getcolor(ink, self.mode)
if self.palette and not isinstance(ink, numbers.Number):
ink = self.palette.getcolor(ink)
if not self.mode[0] in ("1", "L", "I", "F") and isinstance(ink, numbers.Number):
ink = (0, 0, ink)
# ink = self.draw.draw_ink(ink, self.mode)
# convert BGR -> RGB
ink = self._convert_bgr2rgb(ink)
if fill is not None:
if isinstance(fill, basstring):
fill = ImageColor().getcolor(fill, self.mode)
if self.palette and not isinstance(fill, numbers.Number):
fill = self.palette.getcolor(fill)
if not self.mode[0] in ("1", "L", "I", "F") and isinstance(fill, numbers.Number):
fill = (0, 0, fill)
# fill = self.draw.draw_ink(fill, self.mode)
# convert BGR -> RGB
fill = self._convert_bgr2rgb(fill)
return ink, fill
def _get_ell_elements(self, box):
x1, y1, x2, y2 = box
axis1 = x2-x1
axis2 = y2-y1
center = (x1+axis1//2, y1+axis2//2)
return center, axis1, axis2
def _get_pointFromEllipseAngle(self, centerx, centery, radiush, radiusv, ang):
"""calculate point (x,y) for a given angle ang on an ellipse with its center at centerx, centery and
its horizontal radiush and its vertical radiusv"""
th = np.radians(ang)
ratio = (radiush/2.0)/float(radiusv/2.0)
x = centerx + radiush/2.0 * np.cos(th)
y = centery + radiusv/2.0 * np.sin(th)
return int(x), int(y)
def _multiline_check(self, text):
"Draw text."
split_character = "\n" if isinstance(text, str) else b"\n"
return split_character in text
def _multiline_split(self, text):
split_character = "\n" if isinstance(text, str) else b"\n"
return text.split(split_character)
def arc(self, box, start, end, fill=None, width=1, line=False, linecenter=False, fillcolor=None):
"Draw an arc."
while end<start:
end = end + 360
if fillcolor is not None:
fill = fillcolor
ink, fill = self._getink(fill)
if ink is not None or fill is not None:
center, axis1, axis2 = self._get_ell_elements(box)
axes = (axis1//2, axis2//2)
if linecenter:
if fillcolor:
cv2.ellipse(self._img_instance, center, axes, 0, start, end, fillcolor, -1)
else:
cv2.ellipse(self._img_instance, center, axes, 0, start, end, fillcolor, width)
startx, starty = self._get_pointFromEllipseAngle(center[0], center[1], axis1, axis2, start)
endx, endy = self._get_pointFromEllipseAngle(center[0], center[1], axis1, axis2, end)
st = (startx, starty)
e = (endx, endy)
cv2.line(self._img_instance, center, st, ink, width)
cv2.line(self._img_instance, center, e, ink, width)
self.img._instance = self._img_instance
else:
cv2.ellipse(self._img_instance, center, axes, 0, start, end, ink, width)
self.img._instance = self._img_instance
if line:
startx, starty = self._get_pointFromEllipseAngle(center[0], center[1], axis1, axis2, start)
endx, endy = self._get_pointFromEllipseAngle(center[0], center[1], axis1, axis2, end)
st = (startx, starty)
e = (endx, endy)
cv2.line(self._img_instance, st, e, ink, width)
if fillcolor is not None:
mid_line = ((startx+endx)//2, (starty+endy)//2)
mid_ang = (start+end)//2
midx, midy = self._get_pointFromEllipseAngle(center[0], center[1], axis1, axis2, mid_ang)
mid_chord = ((mid_line[0]+midx)//2, (mid_line[1]+midy)//2)
h, w = self._img_instance.shape[:2]
mask = np.zeros((h + 2, w + 2), np.uint8)
cv2.floodFill(self._img_instance, mask, mid_chord, fillcolor)
self.img._instance = self._img_instance
def bitmap(self, xy, bitmap, fill=None):
"Draw a bitmap."
ink, fill = self._getink(fill)
if ink is None:
ink = fill
if ink is not None:
box = (xy[0], xy[1], bitmap._instance.shape[1]+xy[0], bitmap._instance.shape[0]+xy[1])
self.img.paste(ink, box, mask=bitmap)
def chord(self, box, start, end, fill=None, outline=None, width=1):
"Draw a chord."
ink, fill = self._getink(outline, fill)
if fill is not None:
self.arc(box, start, end, ink, width, line=True, fillcolor=fill)
# self.draw.draw_chord(xy, start, end, fill, 1)
if ink is not None and ink != fill:
self.arc(box, start, end, ink, width, line=True)
# self.draw.draw_chord(xy, start, end, ink, 0, width)
def ellipse(self, box, fill=None, outline=None, width=1):
"Draw an ellipse inside the bounding box like cv2.ellipse(img, box, color[, thickness)]"
ink, fill = self. _getink(outline, fill)
center, axis1, axis2 = self._get_ell_elements(box)
ebox = (center, (axis1, axis2), 0)
if fill is not None:
cv2.ellipse(self._img_instance, ebox, fill, -1)
self.img._instance = self._img_instance
if ink is not None and ink != fill:
cv2.ellipse(self._img_instance, ebox, ink, width)
self.img._instance = self._img_instance
def getfont(self):
"""Get the current default font.
:returns: An image font."""
if self.font is None:
self.font = cv2.FONT_HERSHEY_SIMPLEX
return self.font
def line(self, xy, fill=None, width=1, joint=None):
"Draw a line."
ink = self._getink(fill)[0]
coord = self._get_coordinates(xy)
for co in range(0, len(coord), 4):
start = (coord[co], coord[co+1])
end = (coord[co+2], coord[co+3])
cv2.line(self._img_instance, start, end, ink, width)
self.img._instance = self._img_instance
if joint == "curve" and width > 4:
for i in range(1, len(xy)-1):
point = xy[i]
angles = [
np.degrees(np.arctan2(
end[0] - start[0], start[1] - end[1]
)) % 360
for start, end in ((xy[i-1], point), (point, xy[i+1]))
]
if angles[0] == angles[1]:
# This is a straight line, so no joint is required
continue
def coord_at_angle(coord, angle):
x, y = coord
angle -= 90
distance = width/2 - 1
return tuple([
p +
(np.floor(p_d) if p_d > 0 else np.ceil(p_d))
for p, p_d in
((x, distance * np.cos(np.radians(angle))),
(y, distance * np.sin(np.radians(angle))))
])
flipped = ((angles[1] > angles[0] and
angles[1] - 180 > angles[0]) or
(angles[1] < angles[0] and
angles[1] + 180 > angles[0]))
coords = [
(point[0] - width/2 + 1, point[1] - width/2 + 1),
(point[0] + width/2 - 1, point[1] + width/2 - 1)
]
if flipped:
start, end = (angles[1] + 90, angles[0] + 90)
else:
start, end = (angles[0] - 90, angles[1] - 90)
self.pieslice(coords, start - 90, end - 90, fill)
if width > 8:
# Cover potential gaps between the line and the joint
if flipped:
gapCoords = [
coord_at_angle(point, angles[0]+90),
point,
coord_at_angle(point, angles[1]+90)
]
else:
gapCoords = [
coord_at_angle(point, angles[0]-90),
point,
coord_at_angle(point, angles[1]-90)
]
self.line(gapCoords, fill, width=3)
def multiline_text(self, xy, text, fill=None, font=cv2.FONT_HERSHEY_SIMPLEX, anchor=None,
spacing=4, align="left", direction=None, features=None, scale=0.4, thickness=1):
widths = []
max_width = 0
lines = self._multiline_split(text)
line_spacing = self.textsize('A', font=font, scale=scale, thickness=thickness)[1] + spacing
for line in lines:
line_width, line_height = self.textsize(line, font, scale=scale, thickness=thickness)
widths.append(line_width)
max_width = max(max_width, line_width)
left, top = xy
for idx, line in enumerate(lines):
if align == "left":
pass # left = x
elif align == "center":
left += (max_width - widths[idx]) / 2.0
elif align == "right":
left += (max_width - widths[idx])
else:
raise ValueError('align must be "left", "center" or "right"')
self.text((left, top), line, fill=fill, font=font, anchor=anchor, scale=scale, thickness=thickness,
calledfrommultilines=True, direction=direction, features=features)
top += line_spacing
left = xy[0]
def multiline_textsize(self, text, font=cv2.FONT_HERSHEY_SIMPLEX, spacing=4, direction=None, features=None, scale=0.4, thickness=1):
max_width = 0
lines = self._multiline_split(text)
line_spacing = self.textsize('A', font=font, scale=scale, thickness=thickness)[1] + spacing
for line in lines:
line_width, line_height = self.textsize(line, font, spacing, direction, features, scale=scale, thickness=thickness)
max_width = max(max_width, line_width)
return max_width, len(lines)*line_spacing - spacing
def pieslice(self, box, start, end, fill=None, outline=None, width=1):
"Draw a pieslice."
ink, fill = self._getink(outline, fill)
if fill is not None:
self.arc(box, start, end, fill, width, linecenter=True, fillcolor=fill)
# self.draw.draw_pieslice(xy, start, end, fill, 1)
if ink is not None and ink != fill:
self.arc(box, start, end, ink, width, linecenter=True)
# self.draw.draw_pieslice(xy, start, end, ink, 0, width)
def _point(self, x, y, fill=None):
"Draw a point without transformations"
elem = (x, y)
cv2.circle(self._img_instance, elem, 1, fill, thickness=-1)
self.img._instance = self._img_instance
def point(self, xy, fill=None, width=1):
"Draw a point."
ink, fill = self._getink(fill)
coord = self._get_coordinates(xy)
for co in range(0, len(coord), 2):
elem = (coord[co], coord[co+1])
# cv2.line(self._img_instance, elem, elem, ink, width)
cv2.circle(self._img_instance, elem, width, ink, thickness=-1)
self.img._instance = self._img_instance
def polygon(self, xy, fill=None, outline=None):
"Draw a polygon."
ink, fill = self._getink(outline, fill)
coord = self._get_coordinates(xy)
coord = np.array(coord, np.int32)
coord = np.reshape(coord, (len(coord)//2, 2))
if fill is not None:
# self.draw.draw_polygon(xy, fill, 1)
try:
cv2.fillPoly(self._img_instance, [coord], fill)
except:
coord = coord.reshape((-1, 1, 2))
cv2.fillPoly(self._img_instance, [coord], fill)
self.img._instance = self._img_instance
if ink is not None and ink != fill:
# self.draw.draw_polygon(xy, ink, 0)
try:
cv2.polylines(self._img_instance, [coord], True, ink)
except:
coord = coord.reshape((-1, 1, 2))
cv2.polylines(self._img_instance, [coord], True, ink)
self.img._instance = self._img_instance
def rectangle(self, xy, fill=None, outline=None, width=1):
"Draw a rectangle."
ink, fill = self._getink(outline, fill)
coord = self._get_coordinates(xy)
if fill is not None:
cv2.rectangle(self._img_instance, tuple(coord[:2]), tuple(coord[2:4]), fill, -width)
self.img._instance = self._img_instance
if ink is not None and ink != fill:
cv2.rectangle(self._img_instance, tuple(coord[:2]), tuple(coord[2:4]), ink, width)
self.img._instance = self._img_instance
def setink(self):
"Set ink to standard black by default"
if len(self._img_instance.shape) == 2:
channels = 1
else:
channels = self._img_instance.shape[2]
depth = self._img_instance.dtype
if channels == 1 and depth == np.bool:
self.ink = False
if channels == 1 and depth == np.uint8:
self.ink = 0
if channels == 2 and depth == np.uint8:
self.ink = (0, 255)
if channels == 3 and depth == np.uint8:
self.ink = (0, 0, 0)
if channels == 4 and depth == np.uint8:
self.ink = (0, 0, 0, 255)
if channels == 1 and depth == np.int32:
self.ink = 0
if channels == 1 and depth == np.float32:
self.ink = 0.0
if channels == 1 and depth == np.float64:
self.ink = 0.0
def text(self, xy, text, fill=None, font=cv2.FONT_HERSHEY_SIMPLEX, anchor=None, scale=0.4, thickness=1, calledfrommultilines=False, *args, **kwargs):
fontFace = font
fontScale = scale
if not calledfrommultilines and not isinstance(fontFace, freetype.Face):
if self._multiline_check(text):
return self.multiline_text(xy, text, fill, font, anchor, scale=scale, thickness=thickness, *args, **kwargs)
ink, fill = self._getink(fill)
if fontFace is None:
fontFace = self.getfont()
if ink is None:
ink = fill
if ink is not None:
if not isinstance(fontFace, freetype.Face):
w, h = self.textsize(text, font=fontFace, scale=scale, thickness=thickness)
xy = (xy[0], xy[1]+h)
cv2.putText(self._img_instance, text, xy, fontFace, fontScale, ink, thickness)
self.img._instance = self._img_instance
else:
if self._multiline_check(text):
lines = text.split("\n")
else:
lines =[text]
old_height = 0
for line in lines:
# First pass to compute bbox
width, height, baseline = getsize(line, font)
# Second pass for actual rendering
Z = getmask(line, font)
# cv2.imshow("Z", Z)
# cv2.waitKey()
MaskImg = Image(Z)
img = np.zeros(self.img._instance.shape, dtype=self.img._instance.dtype)
if len(self.img._instance.shape)>2:
if self.img._instance.shape[2] >= 2:
img[:,:,0] = ink[0]
img[:,:,1] = ink[1]
if self.img._instance.shape[2] >= 3:
img[:,:,2] = ink[2]
if self.img._instance.shape[2] == 4:
img[:,:,3] = 255
else:
img[:] = ink
TextImg = Image(img)
box = [int(xy[0]), int(xy[1]+old_height)]
self.img.paste(TextImg, box=box, mask=MaskImg)
self._img_instance = self.img._instance
old_height = old_height + height
def textsize(self, text, font=cv2.FONT_HERSHEY_SIMPLEX, spacing=4, direction=None, features=None, scale=0.4, thickness=1):
"Get the size of a given string, in pixels."
fontFace = font
fontScale = scale
if self._multiline_check(text):
return self.multiline_textsize(text, font, spacing, direction, features, scale=scale, thickness=thickness)
if not isinstance(fontFace, freetype.Face):
if font is None:
fontFace = self.getfont()
size = cv2.getTextSize(text, fontFace, fontScale, thickness)
text_width = size[0][0]
text_height = size[0][1]
return (text_width, text_height)
else:
width, height, baseline = getsize(text, fontFace)
return (width, height)
def Draw(im, mode=None):
"""
A simple 2D drawing interface for PIL images.
:param im: The image to draw in.
:param mode: Optional mode to use for color values. For RGB
images, this argument can be RGB or RGBA (to blend the
drawing into the image). For all other modes, this argument
must be the same as the image mode. If omitted, the mode
defaults to the mode of the image.
"""
# try:
# return im.getdraw(mode)
# except AttributeError:
# return ImageDraw(im, mode)
return ImageDraw(im)
def floodfill(image, xy, value, border=None, thresh=0, flags=130820):
"""
(experimental) Fills a bounded region with a given color.
:param image: Target image.
:param xy: Seed position (a 2-item coordinate tuple). See
:ref:`coordinate-system`.
:param value: Fill color.
:param border: Optional border value. If given, the region consists of
pixels with a color different from the border color. If not given,
the region consists of pixels having the same color as the seed
pixel.
:param thresh: Optional threshold value which specifies a maximum
tolerable difference of a pixel value from the 'background' in
order for it to be replaced. Useful for filling regions of
non-homogeneous, but similar, colors.
"""
_img_instance = image.getim()
if isinstance(value, tuple) or isinstance(value, list):
value = value[::-1]
h, w = _img_instance.shape[:2]
mask = np.zeros((h+2, w+2), np.uint8)
mask[:] = 0
lo = hi = thresh
xy = tuple([int(i) for i in xy])
cv2.floodFill(_img_instance, mask, xy, value, (lo,)*3, (hi,)*3, flags)
class ImageColor(object):
def getcolor(self, color, mode):
"""
Same as :py:func:`~PIL.ImageColor.getrgb`, but converts the RGB value to a
greyscale value if the mode is not color or a palette image. If the string
cannot be parsed, this function raises a :py:exc:`ValueError` exception.
.. versionadded:: 1.1.4
:param color: A color string
:return: ``(graylevel [, alpha]) or (red, green, blue[, alpha])``
"""
# same as getrgb, but converts the result to the given mode
color, alpha = self.getrgb(color), 255
if len(color) == 4:
color, alpha = color[0:3], color[3]
if getmodebase(mode) == "L":
r, g, b = color
color = (r*299 + g*587 + b*114)//1000
if mode[-1] == 'A':
return (color, alpha)
else:
if mode[-1] == 'A':
return color + (alpha,)
return color
def getrgb(self, color):
"""
Convert a color string to an RGB tuple. If the string cannot be parsed,
this function raises a :py:exc:`ValueError` exception.
.. versionadded:: 1.1.4
:param color: A color string
:return: ``(red, green, blue[, alpha])``
"""
color = color.lower()
rgb = colormap.get(color, None)
if rgb:
if isinstance(rgb, tuple):
return rgb
colormap[color] = rgb = self.getrgb(rgb)
return rgb
# check for known string formats
if re.match('#[a-f0-9]{3}$', color):
return (
int(color[1]*2, 16),
int(color[2]*2, 16),
int(color[3]*2, 16),
)
if re.match('#[a-f0-9]{4}$', color):
return (
int(color[1]*2, 16),
int(color[2]*2, 16),
int(color[3]*2, 16),
int(color[4]*2, 16),
)
if re.match('#[a-f0-9]{6}$', color):
return (
int(color[1:3], 16),
int(color[3:5], 16),
int(color[5:7], 16),
)
if re.match('#[a-f0-9]{8}$', color):
return (
int(color[1:3], 16),
int(color[3:5], 16),
int(color[5:7], 16),
int(color[7:9], 16),
)
m = re.match(r"rgb\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$", color)
if m:
return (
int(m.group(1)),
int(m.group(2)),
int(m.group(3))
)
m = re.match(r"rgb\(\s*(\d+)%\s*,\s*(\d+)%\s*,\s*(\d+)%\s*\)$", color)
if m:
return (
int((int(m.group(1)) * 255) / 100.0 + 0.5),
int((int(m.group(2)) * 255) / 100.0 + 0.5),
int((int(m.group(3)) * 255) / 100.0 + 0.5)
)
m = re.match(
r"hsl\(\s*(\d+\.?\d*)\s*,\s*(\d+\.?\d*)%\s*,\s*(\d+\.?\d*)%\s*\)$",
color,
)
if m:
from colorsys import hls_to_rgb
rgb = hls_to_rgb(
float(m.group(1)) / 360.0,
float(m.group(3)) / 100.0,
float(m.group(2)) / 100.0,
)
return (
int(rgb[0] * 255 + 0.5),
int(rgb[1] * 255 + 0.5),
int(rgb[2] * 255 + 0.5)
)
m = re.match(
r"hs[bv]\(\s*(\d+\.?\d*)\s*,\s*(\d+\.?\d*)%\s*,\s*(\d+\.?\d*)%\s*\)$",
color,
)
if m:
from colorsys import hsv_to_rgb
rgb = hsv_to_rgb(
float(m.group(1)) / 360.0,
float(m.group(2)) / 100.0,
float(m.group(3)) / 100.0,
)
return (
int(rgb[0] * 255 + 0.5),
int(rgb[1] * 255 + 0.5),
int(rgb[2] * 255 + 0.5)
)
m = re.match(r"rgba\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$",
color)
if m:
return (
int(m.group(1)),
int(m.group(2)),
int(m.group(3)),
int(m.group(4))
)
raise ValueError("unknown color specifier: %r" % color)
class ModeDescriptor(object):
"""Wrapper for mode strings."""
def __init__(self, mode, bands, basemode, basetype):
self.mode = mode
self.bands = bands
self.basemode = basemode
self.basetype = basetype
def __str__(self):
return self.mode
class ImageMode(object):
def getmode(self, mode):
"""Gets a mode descriptor for the given mode."""
modes = {}
# core modes
for m, (basemode, basetype, bands) in _MODEINFO.items():
modes[m] = ModeDescriptor(m, bands, basemode, basetype)
# extra experimental modes
modes["RGBa"] = ModeDescriptor("RGBa",
("R", "G", "B", "a"), "RGB", "L")
modes["LA"] = ModeDescriptor("LA", ("L", "A"), "L", "L")
modes["La"] = ModeDescriptor("La", ("L", "a"), "L", "L")
modes["PA"] = ModeDescriptor("PA", ("P", "A"), "RGB", "L")
# mapping modes
modes["I;16"] = ModeDescriptor("I;16", "I", "L", "L")
modes["I;16L"] = ModeDescriptor("I;16L", "I", "L", "L")
modes["I;16B"] = ModeDescriptor("I;16B", "I", "L", "L")
# set global mode cache atomically
_modes = modes
return _modes[mode]
def _check_size(size):
"""
Common check to enforce type and sanity check on size tuples
:param size: Should be a 2 tuple of (width, height)
:returns: True, or raises a ValueError
"""
if not isinstance(size, (list, tuple)):
raise ValueError("Size must be a tuple")
if len(size) != 2:
raise ValueError("Size must be a tuple of length 2")
if size[0] < 0 or size[1] < 0:
raise ValueError("Width and height must be >= 0")
return True
def new(mode, size, color=0):
"""
Creates a new image with the given mode and size.
:param mode: The mode to use for the new image. See:
:ref:`concept-modes`.
:param size: A 2-tuple, containing (width, height) in pixels.
:param color: What color to use for the image. Default is black.
If given, this should be a single integer or floating point value
for single-band modes, and a tuple for multi-band modes (one value
per band). When creating RGB images, you can also use color
strings as supported by the ImageColor module. If the color is
None, the image is not initialised.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
_check_size(size)
if color is None:
# don't initialize
_im = Image()._new(mode, size)
return Image(_im)
if type(color).__name__ == "str":
# css3-style specifier
color = ImageColor().getcolor(color, mode)
color = ImageDraw(None)._convert_bgr2rgb(color)
_im = Image()._new(mode, size, color)
return Image(_im)
def frombytes(mode, size, data, decoder_name="raw", *args):
"""
Creates a copy of an image memory from pixel data in a buffer.
In its simplest form, this function takes three arguments
(mode, size, and unpacked pixel data).
You can also use any pixel decoder supported by PIL. For more
information on available decoders, see the section
:ref:`Writing Your Own File Decoder <file-decoders>`.
Note that this function decodes pixel data only, not entire images.
If you have an entire image in a string, wrap it in a
:py:class:`~io.BytesIO` object, and use :py:func:`~PIL.Image.open` to load
it.
:param mode: The image mode. See: :ref:`concept-modes`.
:param size: The image size.
:param data: A byte buffer containing raw data for the given mode.
:param decoder_name: What decoder to use.
:param args: Additional parameters for the given decoder.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
_check_size(size)
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
if decoder_name == "raw" and args == ():
args = mode
im = new(mode, size)
im.frombytes(mode, size, data, decoder_name, args)
return im
def fromstring(mode, size, data, decoder_name="raw", *args):
# raise NotImplementedError("fromstring() has been removed. " +
# "Please call frombytes() instead.")
return frombytes(mode, size, data, decoder_name, *args)
def frombuffer(mode, size, data, decoder_name="raw", *args):
"""
Creates an image memory referencing pixel data in a byte buffer.
This function is similar to :py:func:`~PIL.Image.frombytes`, but uses data
in the byte buffer, where possible. This means that changes to the
original buffer object are reflected in this image). Not all modes can
share memory; supported modes include "L", "RGBX", "RGBA", and "CMYK".
Note that this function decodes pixel data only, not entire images.
If you have an entire image file in a string, wrap it in a
**BytesIO** object, and use :py:func:`~PIL.Image.open` to load it.
In the current version, the default parameters used for the "raw" decoder
differs from that used for :py:func:`~PIL.Image.frombytes`. This is a
bug, and will probably be fixed in a future release. The current release
issues a warning if you do this; to disable the warning, you should provide
the full set of parameters. See below for details.
:param mode: The image mode. See: :ref:`concept-modes`.
:param size: The image size.
:param data: A bytes or other buffer object containing raw
data for the given mode.
:param decoder_name: What decoder to use.
:param args: Additional parameters for the given decoder. For the
default encoder ("raw"), it's recommended that you provide the
full set of parameters::
frombuffer(mode, size, data, "raw", mode, 0, 1)
:returns: An :py:class:`~PIL.Image.Image` object.
.. versionadded:: 1.1.4
"""
_check_size(size)
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
if decoder_name == "raw":
if args == ():
args = mode, 0, -1 # may change to (mode, 0, 1) post-1.1.6
if args[0] in _MAPMODES:
channels, depth = Image()._get_channels_and_depth(mode)
im = np.frombuffer(data)
im = im.reshape((size[1], size[0], channels))
im = im.astype(depth)
im_ = new(mode, (1, 1))
im_._instance = im
im_.readonly = 1
return im_
return frombytes(mode, size, data, decoder_name, args)
def fromarray(obj, mode=None):
"""
Creates an image memory from an object exporting the array interface
(using the buffer protocol).
If **obj** is not contiguous, then the tobytes method is called
and :py:func:`~PIL.Image.frombuffer` is used.
If you have an image in NumPy::
from PIL import Image
import numpy as np
im = Image.open('hopper.jpg')
a = np.asarray(im)
Then this can be used to convert it to a Pillow image::
im = Image.fromarray(a)
:param obj: Object with array interface
:param mode: Mode to use (will be determined from type if None)
See: :ref:`concept-modes`.
:returns: An image object.
.. versionadded:: 1.1.6
"""
if isinstance(obj, np.ndarray):
_mode = Image()._get_mode(obj.shape, obj.dtype)
if _mode == 'RGB':
obj = cv2.cvtColor(obj, cv2.COLOR_RGB2BGR)
elif mode == "RGBA":
obj = cv2.cvtColor(obj, cv2.COLOR_RGBA2BGRA)
return Image(obj)
else:
raise TypeError("Cannot handle this data type")
_fromarray_typemap = {
# (shape, typestr) => mode, rawmode
# first two members of shape are set to one
((1, 1), "|b1"): ("1", "1;8"),
((1, 1), "|u1"): ("L", "L"),
((1, 1), "|i1"): ("I", "I;8"),
((1, 1), "<u2"): ("I", "I;16"),
((1, 1), ">u2"): ("I", "I;16B"),
((1, 1), "<i2"): ("I", "I;16S"),
((1, 1), ">i2"): ("I", "I;16BS"),
((1, 1), "<u4"): ("I", "I;32"),
((1, 1), ">u4"): ("I", "I;32B"),
((1, 1), "<i4"): ("I", "I;32S"),
((1, 1), ">i4"): ("I", "I;32BS"),
((1, 1), "<f4"): ("F", "F;32F"),
((1, 1), ">f4"): ("F", "F;32BF"),
((1, 1), "<f8"): ("F", "F;64F"),
((1, 1), ">f8"): ("F", "F;64BF"),
((1, 1, 2), "|u1"): ("LA", "LA"),
((1, 1, 3), "|u1"): ("RGB", "RGB"),
((1, 1, 4), "|u1"): ("RGBA", "RGBA"),
}
# shortcuts
_fromarray_typemap[((1, 1), _ENDIAN + "i4")] = ("I", "I")
_fromarray_typemap[((1, 1), _ENDIAN + "f4")] = ("F", "F")
def open(fl, mode='r'):
_mode = None
_format = None
if isinstance(fl, basstring):
if not os.path.isfile(fl):
raise IOError("cannot find image file", fl)
if os.path.splitext(fl)[1].lower() == ".gif":
if gif2numpy_installed:
_instances, _exts, _image_specs = gif2numpy.convert(fl)
_instance = _instances[0]
img = Image(_instance, fl, instances = _instances, exts = _exts, image_specs = _image_specs)
else:
raise NotImplementedError("gif2numpy has not been installed. Unable to read gif images, install it with: pip install gif2numpy")
else:
_instance = cv2.imread(fl, cv2.IMREAD_UNCHANGED)
# _mode = Image()._get_mode(_instance.shape, _instance.dtype)
img = Image(_instance, fl)
return img
if isinstance(fl, fil_object):
file_bytes = np.asarray(bytearray(fl.read()), dtype=np.uint8)
_instance = cv2.imdecode(file_bytes, cv2.IMREAD_UNCHANGED)
# _mode = Image()._get_mode(_instance.shape, _instance.dtype)
img = Image(_instance)
return img
if not py3:
if isinstance(fl, cStringIO.InputType):
fl.seek(0)
img_array = np.asarray(bytearray(fl.read()), dtype=np.uint8)
return Image(cv2.imdecode(img_array, 1))
if hasattr(fl, 'mode'):
image = np.array(fl)
_mode = fl.mode
if _mode == 'RGB':
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
_instance = image
img = Image(_instance)
return img
def blend(img1, img2, alpha):
"blends 2 images using an alpha value>=0.0 and <=1.0"
dst = cv2.addWeighted(img1, 1.0-alpha, img2, alpha, 0)
return Image(dst)
def composite(background, foreground, mask, np_image=False, neg_mask=False):
"pastes the foreground image into the background image using the mask"
# Convert uint8 to float
if isinstance(background, np.ndarray):
foreground = foreground.astype(float)
old_type = background.dtype
background = background.astype(float)
# Normalize the alpha mask to keep intensity between 0 and 1
if neg_mask:
alphamask = mask.astype(float)/255
else:
alphamask = (~mask).astype(float)/255
else:
foreground = foreground._instance.astype(float)
old_type = background.dtype
background = background._instance.astype(float)
# Normalize the alpha mask to keep intensity between 0 and 1
if neg_mask:
alphamask = mask._instance.astype(float)/255
else:
alphamask = (~(mask._instance)).astype(float)/255
fslen = len(foreground.shape)
if len(alphamask.shape) != fslen:
img = np.zeros(foreground.shape, dtype=foreground.dtype)
if fslen>2:
if foreground.shape[2] >= 2:
img[:,:,0] = alphamask
img[:,:,1] = alphamask
if foreground.shape[2] >= 3:
img[:,:,2] = alphamask
if foreground.shape[2] == 4:
img[:,:,3] = alphamask
alphamask = img.copy()
# Multiply the foreground with the alpha mask
try:
foreground = cv2.multiply(alphamask, foreground)
except:
if alphamask.shape[2] == 1 and foreground.shape[2] == 3:
triplemask = cv2.merge((alphamask, alphamask, alphamask))
foreground = cv2.multiply(triplemask, foreground)
else:
raise ValueError("OpenCV Error: Sizes of input arguments do not match (The operation is neither 'array op array' (where arrays have the same size and the same number of channels), nor 'array op scalar', nor 'scalar op array') in cv::arithm_op, file ..\..\..\..\opencv\modules\core\src\arithm.cpp")
# Multiply the background with ( 1 - alpha )
bslen = len(background.shape)
if len(alphamask.shape) != bslen:
img = np.zeros(background.shape, dtype=background.dtype)
if bslen>2:
if background.shape[2] >= 2:
img[:,:,0] = alphamask
img[:,:,1] = alphamask
if background.shape[2] >= 3:
img[:,:,2] = alphamask
if background.shape[2] == 4:
img[:,:,3] = alphamask
alphamask = img.copy()
try:
background = cv2.multiply(1.0 - alphamask, background)
except:
if alphamask.shape[2] == 1 and foreground.shape[2] == 3:
background = cv2.multiply(1.0 - triplemask, background)
else:
raise ValueError("OpenCV Error: Sizes of input arguments do not match (The operation is neither 'array op array' (where arrays have the same size and the same number of channels), nor 'array op scalar', nor 'scalar op array') in cv::arithm_op, file ..\..\..\..\opencv\modules\core\src\arithm.cpp")
# Add the masked foreground and background
outImage = cv2.add(foreground, background)
outImage = outImage/255
outImage = outImage*255
outImage = outImage.astype(old_type)
if np_image:
return outImage
else:
return Image(outImage)
def alpha_composite(im1, im2):
"""
Alpha composite im2 over im1.
:param im1: The first image. Must have mode RGBA.
:param im2: The second image. Must have mode RGBA, and the same size as
the first image.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
r1, g1, b1, a1 = Image().split(im1)
r2, g2, b2, a2 = Image().split(im2)
alphacomp = np.zeros(im1.shape, dtype=im1.dtype)
im3 = composite(alphacomp, im1, a1)
alphacomp = np.zeros(im2.shape, dtype=im2.dtype)
im4 = composite(alphacomp, im2, a2)
return blend(im3, im4, 0.5)
def merge(mode, colorbandtuple, image=False):
"merges three channels to one band"
if len(colorbandtuple) == 2:
red, green = colorbandtuple
blue = None
alpha = None
elif len(colorbandtuple) == 3:
red, green, blue = colorbandtuple
alpha = None
elif len(colorbandtuple) == 4:
red, green, blue, alpha = colorbandtuple
channels, depth = Image()._get_channels_and_depth(mode)
img_dim = red.shape
img = np.zeros((img_dim[0], img_dim[1], channels), dtype=depth)
img[:,:,0] = red
img[:,:,1] = green
if blue is not None:
img[:,:,2] = blue
if alpha is not None:
img[:,:,3] = alpha
if image:
return img
else:
return Image(img)
def linear_gradient(mode, size=256):
"Generate 256x256 linear gradient from black to white, top to bottom."
channels, depth = Image()._get_channels_and_depth(mode)
if channels == 1:
y = np.linspace(0, size-1, size)
gradient = np.tile(y, (size, 1)).T
gradient = gradient.astype(depth)
return gradient
elif channels > 3:
y = np.linspace(0, size-1, size)
gradient = np.tile(y, (channels, size, 1)).T
gradient = gradient.astype(depth)
return gradient
def radial_gradient(mode, size=256, innerColor=(0, 0, 0), outerColor=(255, 255, 255)):
"Generate 256x256 radial gradient from black to white, centre to edge."
channels, depth = Image()._get_channels_and_depth(mode)
gradient = np.zeros((size, size, channels), dtype=depth)
if channels == 1:
_max_value = 1
x_axis = np.linspace(-_max_value, _max_value, size)[:, None]
y_axis = np.linspace(-_max_value, _max_value, size)[None, :]
gradient = np.sqrt(x_axis ** 2 + y_axis ** 2)
if innerColor == 255 or innerColor == (255, 255, 255):
gradient = _max_value-gradient
return gradient
elif channels ==3:
inner = np.array([0, 0, 0])[None, None, :]
outer = np.array([1, 1, 1])[None, None, :]
if gradient.max() != 0:
gradient /= gradient.max()
gradient = gradient[:, :, None]
gradient = gradient * outer + (1 - gradient) * inner
# gradient = gradient/255.0*255
return gradient
else:
imgsize = gradient.shape[:2]
for y in range(imgsize[1]):
for x in range(imgsize[0]):
#Find the distance to the center
distanceToCenter = np.sqrt((x - imgsize[0]//2) ** 2 + (y - imgsize[1]//2) ** 2)
#Make it on a scale from 0 to 1innerColor
distanceToCenter = distanceToCenter / (np.sqrt(2) * imgsize[0]/2)
#Calculate r, g, and b values
r = outerColor[0] * distanceToCenter + innerColor[0] * (1 - distanceToCenter)
g = outerColor[1] * distanceToCenter + innerColor[1] * (1 - distanceToCenter)
b = outerColor[2] * distanceToCenter + innerColor[2] * (1 - distanceToCenter)
a = outerColor[2] * distanceToCenter + innerColor[2] * (1 - distanceToCenter)
#Place the pixel
gradient[y, x] = (int(r), int(g), int(b), int(a))
return gradient
def constant(image, value):
"Fill a channel with a given grey level"
return Image.new("L", image.size, value)
def duplicate(image):
"Create a copy of a channel"
return image.copy()
def invert(image, im=None):
"Invert a channel"
if im is None:
return ~image.getim()
else:
return ~im
def _reduce_images(image1, image2):
"bring two images to an identical size using the minimum side of each image"
s0 = min(image1._instance.shape[0], image2._instance.shape[0])
s1 = min(image1._instance.shape[1], image2._instance.shape[1])
image1_copy = image1._instance[:s0,:s1]
image2_copy = image2._instance[:s0,:s1]
return image1_copy, image2_copy
def lighter(image1, image2):
"Select the lighter pixels from each image"
image1_copy, image2_copy = _reduce_images(image1, image2)
return np.maximum(image1_copy, image2_copy)
def darker(image1, image2):
"Select the darker pixels from each image"
image1_copy, image2_copy = _reduce_images(image1, image2)
return np.minimum(image1_copy, image2_copy)
def difference(image1, image2):
"Subtract one image from another"
# does not work as in PIL, needs to be fixed
# Calculate absolute difference
# (abs(image1 - image2)).
image1_copy, image2_copy = _reduce_images(image1, image2)
return np.absolute(np.subtract(image1_copy, image2_copy))
def multiply(image1, image2):
"Superimpose two positive images"
# broken, needs to be fixed
# Superimpose positive images
# (image1 * image2 / MAX).
# <p>
# Superimposes two images on top of each other. If you multiply an
# image with a solid black image, the result is black. If you multiply
# with a solid white image, the image is unaffected.
image1_copy, image2_copy = _reduce_images(image1, image2)
div = np.divide(image2_copy, 255)
return np.multiply(image1_copy, div)
def screen(image1, image2):
"Superimpose two negative images"
# Superimpose negative images
# (MAX - ((MAX - image1) * (MAX - image2) / MAX)).
# <p>
# Superimposes two inverted images on top of each other.
image1_copy, image2_copy = _reduce_images(image1, image2)
max_image = np.maximum(image1_copy, image2_copy)
return (max_image - ((max_image - image1_copy) * (max_image - image2_copy) / max_image))
def add(image1, image2, scale=1.0, offset=0):
"Add two images"
# ((image1 + image2) / scale + offset).
# Adds two images, dividing the result by scale and adding the
# offset. If omitted, scale defaults to 1.0, and offset to 0.0.
image1_copy, image2_copy = _reduce_images(image1, image2)
return np.add(image1_copy, image2_copy)/scale+offset
def subtract(image1, image2, scale=1.0, offset=0):
"Subtract two images"
# Subtract images
# ((image1 - image2) / scale + offset).
# Subtracts two images, dividing the result by scale and adding the
# offset. If omitted, scale defaults to 1.0, and offset to 0.0.
image1_copy, image2_copy = _reduce_images(image1, image2)
return np.subtract(image1_copy, image2_copy)/scale+offset
def add_modulo(image1, image2):
"Add two images without clipping"
# Add images without clipping
# ((image1 + image2) % MAX).
# Adds two images, without clipping the result.
image1_copy, image2_copy = _reduce_images(image1, image2)
return np.mod(np.add(image1_copy, image2_copy), np.maximum(image1_copy, image2_copy))
def subtract_modulo(image1, image2):
"Subtract two images without clipping"
# Subtract images without clipping
# ((image1 - image2) % MAX).
# Subtracts two images, without clipping the result.
image1_copy, image2_copy = _reduce_images(image1, image2)
return np.mod(np.subtract(image1_copy, image2_copy), np.maximum(image1_copy, image2_copy))
def logical_and(image1, image2):
"Logical and between two images"
# Logical AND
# (image1 and image2).
image1_copy, image2_copy = _reduce_images(image1, image2)
return np.logical_and(image1_copy, image2_copy)
def logical_or(image1, image2):
"Logical or between two images"
# Logical OR
# (image1 or image2).
image1_copy, image2_copy = _reduce_images(image1, image2)
return np.logical_or(image1_copy, image2_copy)
def logical_xor(image1, image2):
"Logical xor between two images"
# Logical XOR
# (image1 xor image2).
image1_copy, image2_copy = _reduce_images(image1, image2)
return np.logical_xor(image1_copy, image2_copy)
class Brightness(object):
def __init__(self, image):
self.image = image
def enhance(self, factor):
# Brightness control (0-100) which is 0.0 to 1.0 in original PIL
img = self.image.getim()
brightness = (1-factor)*-255
adjusted = cv2.addWeighted(img, 1.0, np.zeros(img.shape, img.dtype), 0, brightness)
return Image(adjusted)
class Contrast(object):
def __init__(self, image):
self.image = image
def enhance(self, factor):
img = self.image.getim()
# Contrast control factor which is 0.0 to 1.0 in original PIL
adjusted = cv2.convertScaleAbs(img, alpha=factor, beta=0)
return Image(adjusted)
class Filter(object):
pass
class MultibandFilter(Filter):
pass
class BuiltinFilter(MultibandFilter):
def filter(self, image):
if image.mode == "P":
raise ValueError("cannot filter palette images")
return image.filter(*self.filterargs)
class GaussianBlur(MultibandFilter):
"""Gaussian blur filter.
:param radius: Blur radius.
"""
name = "GaussianBlur"
def __init__(self, radius=2):
self.radius = radius
self.name = "GaussianBlur"
def filter(self, image):
kernel_size = self.radius*2+1
sigmaX = 0.3*((kernel_size-1)*0.5 - 1) + 0.8
dst = cv2.GaussianBlur(image._instance, (kernel_size, kernel_size), sigmaX, borderType=cv2.BORDER_DEFAULT)
return Image(dst)
class BLUR(BuiltinFilter):
name = "Blur"
filterargs = (5, 5), 16, 0, (
1, 1, 1, 1, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 1, 1, 1, 1)
class CONTOUR(BuiltinFilter):
name = "Contour"
filterargs = (3, 3), 1, 255, (
-1, -1, -1,
-1, 8, -1,
-1, -1, -1)
class DETAIL(BuiltinFilter):
name = "Detail"
filterargs = (3, 3), 6, 0, (
0, -1, 0,
-1, 10, -1,
0, -1, 0)
class EDGE_ENHANCE(BuiltinFilter):
name = "Edge-enhance"
filterargs = (3, 3), 2, 0, (
-1, -1, -1,
-1, 10, -1,
-1, -1, -1)
class EDGE_ENHANCE_MORE(BuiltinFilter):
name = "Edge-enhance More"
filterargs = (3, 3), 1, 0, (
-1, -1, -1,
-1, 9, -1,
-1, -1, -1)
class EMBOSS(BuiltinFilter):
name = "Emboss"
filterargs = (3, 3), 1, 128, (
-1, 0, 0,
0, 1, 0,
0, 0, 0)
class FIND_EDGES(BuiltinFilter):
name = "Find Edges"
filterargs = (3, 3), 1, 0, (
-1, -1, -1,
-1, 8, -1,
-1, -1, -1)
class SHARPEN(BuiltinFilter):
name = "Sharpen"
filterargs = (3, 3), 16, 0, (
-2, -2, -2,
-2, 32, -2,
-2, -2, -2)
class SMOOTH(BuiltinFilter):
name = "Smooth"
filterargs = (3, 3), 13, 0, (
1, 1, 1,
1, 5, 1,
1, 1, 1)
class SMOOTH_MORE(BuiltinFilter):
name = "Smooth More"
filterargs = (5, 5), 100, 0, (
1, 1, 1, 1, 1,
1, 5, 5, 5, 1,
1, 5, 44, 5, 1,
1, 5, 5, 5, 1,
1, 1, 1, 1, 1)
if __name__ == '__main__':
# var init
testfile = "lena1.jpg"
if os.path.isfile("lena.jpg"):
testfile = "lena.jpg"
elif os.path.isfile("Images/lena.jpg"):
testfile = "Images/lena.jpg"
else:
url_loc = "https://raw.githubusercontent.com/bunkahle/PILasOPENCV/master/tests/lena.jpg"
if py3:
import requests, builtins
f = builtins.open(testfile, "wb")
r = requests.get(url_loc)
f.write(r.content)
else:
import urllib2, cStringIO
imgdata = urllib2.urlopen(url_loc).read()
img = open(cStringIO.StringIO(imgdata))
img.save(testfile)
outfile1 = "lena1.bmp"
outfile2 = "lena2.bmp"
thsize = (128, 128)
box = (100, 100, 400, 400)
# the old style:
# from PIL import Image as PILImage
# pil_image = PILImage.open(testfile)
# print(pil_image.format, pil_image.size, pil_image.mode)
# pil_image.save(outfile1)
# pil_image.show()
# small_pil = pil_image.copy()
# small_pil.thumbnail(thsize)
# small_pil.show()
# region_pil = pil_image.crop(box)
# region_pil = region_pil.transpose(PILImage.ROTATE_180)
# pil_image.paste(region_pil, box)
# pil_image.show()
# the new style:
# if you import the library from site-packages import like this:
# import PILasOPENCV as Image
# im = Image.new("RGB", (512, 512), "white")
im = new("RGB", (512, 512), "red")
im.show()
print (type(im))
print(im.format, im.size, im.mode)
# None (512, 512) RGB
# <class 'Image'>
# im = Image.open(testfile)
im = open(testfile)
print(im.format, im.size, im.mode)
font_success = True
try:
font = truetype("arial.ttf", 28)
except:
font_success = False
draw = Draw(im)
if font_success:
text = "Lena's\nimage"
draw.text((249,435), text, font=font, fill=(0, 0, 0))
# JPEG (512, 512) RGB
# im.save(outfile2)
im.show()
small = im.copy()
small.thumbnail(thsize)
small.show()
region = im.crop(box)
print("region",region.format, region.size, region.mode)
# region = region.transpose(Image.ROTATE_180)
region = region.transpose(ROTATE_180)
region.show()
im.paste(region, box)
im.show() | []
| []
| [
"XDG_DATA_DIRS",
"WINDIR"
]
| [] | ["XDG_DATA_DIRS", "WINDIR"] | python | 2 | 0 | |
vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/config.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package server
import (
"fmt"
"io/ioutil"
"net"
"net/http"
"os"
goruntime "runtime"
"runtime/debug"
"sort"
"strconv"
"strings"
"sync/atomic"
"time"
jsonpatch "github.com/evanphx/json-patch"
"github.com/go-openapi/spec"
"github.com/google/uuid"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets"
utilwaitgroup "k8s.io/apimachinery/pkg/util/waitgroup"
"k8s.io/apimachinery/pkg/version"
"k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/audit"
auditpolicy "k8s.io/apiserver/pkg/audit/policy"
"k8s.io/apiserver/pkg/authentication/authenticator"
"k8s.io/apiserver/pkg/authentication/authenticatorfactory"
authenticatorunion "k8s.io/apiserver/pkg/authentication/request/union"
"k8s.io/apiserver/pkg/authentication/user"
"k8s.io/apiserver/pkg/authorization/authorizer"
"k8s.io/apiserver/pkg/authorization/authorizerfactory"
authorizerunion "k8s.io/apiserver/pkg/authorization/union"
"k8s.io/apiserver/pkg/endpoints/discovery"
genericapifilters "k8s.io/apiserver/pkg/endpoints/filters"
apiopenapi "k8s.io/apiserver/pkg/endpoints/openapi"
apirequest "k8s.io/apiserver/pkg/endpoints/request"
genericregistry "k8s.io/apiserver/pkg/registry/generic"
"k8s.io/apiserver/pkg/server/dynamiccertificates"
"k8s.io/apiserver/pkg/server/egressselector"
genericfilters "k8s.io/apiserver/pkg/server/filters"
"k8s.io/apiserver/pkg/server/healthz"
"k8s.io/apiserver/pkg/server/routes"
serverstore "k8s.io/apiserver/pkg/server/storage"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
restclient "k8s.io/client-go/rest"
"k8s.io/component-base/logs"
"k8s.io/klog"
openapicommon "k8s.io/kube-openapi/pkg/common"
// install apis
_ "k8s.io/apiserver/pkg/apis/apiserver/install"
)
const (
// DefaultLegacyAPIPrefix is where the legacy APIs will be located.
DefaultLegacyAPIPrefix = "/api"
// APIGroupPrefix is where non-legacy API group will be located.
APIGroupPrefix = "/apis"
)
// Config is a structure used to configure a GenericAPIServer.
// Its members are sorted roughly in order of importance for composers.
type Config struct {
// SecureServing is required to serve https
SecureServing *SecureServingInfo
// Authentication is the configuration for authentication
Authentication AuthenticationInfo
// Authorization is the configuration for authorization
Authorization AuthorizationInfo
// LoopbackClientConfig is a config for a privileged loopback connection to the API server
// This is required for proper functioning of the PostStartHooks on a GenericAPIServer
// TODO: move into SecureServing(WithLoopback) as soon as insecure serving is gone
LoopbackClientConfig *restclient.Config
// EgressSelector provides a lookup mechanism for dialing outbound connections.
// It does so based on a EgressSelectorConfiguration which was read at startup.
EgressSelector *egressselector.EgressSelector
// RuleResolver is required to get the list of rules that apply to a given user
// in a given namespace
RuleResolver authorizer.RuleResolver
// AdmissionControl performs deep inspection of a given request (including content)
// to set values and determine whether its allowed
AdmissionControl admission.Interface
CorsAllowedOriginList []string
EnableIndex bool
EnableProfiling bool
EnableDiscovery bool
// Requires generic profiling enabled
EnableContentionProfiling bool
EnableMetrics bool
DisabledPostStartHooks sets.String
// done values in this values for this map are ignored.
PostStartHooks map[string]PostStartHookConfigEntry
// Version will enable the /version endpoint if non-nil
Version *version.Info
// AuditBackend is where audit events are sent to.
AuditBackend audit.Backend
// AuditPolicyChecker makes the decision of whether and how to audit log a request.
AuditPolicyChecker auditpolicy.Checker
// ExternalAddress is the host name to use for external (public internet) facing URLs (e.g. Swagger)
// Will default to a value based on secure serving info and available ipv4 IPs.
ExternalAddress string
//===========================================================================
// Fields you probably don't care about changing
//===========================================================================
// BuildHandlerChainFunc allows you to build custom handler chains by decorating the apiHandler.
BuildHandlerChainFunc func(apiHandler http.Handler, c *Config) (secure http.Handler)
// HandlerChainWaitGroup allows you to wait for all chain handlers exit after the server shutdown.
HandlerChainWaitGroup *utilwaitgroup.SafeWaitGroup
// DiscoveryAddresses is used to build the IPs pass to discovery. If nil, the ExternalAddress is
// always reported
DiscoveryAddresses discovery.Addresses
// The default set of healthz checks. There might be more added via AddHealthChecks dynamically.
HealthzChecks []healthz.HealthChecker
// The default set of livez checks. There might be more added via AddHealthChecks dynamically.
LivezChecks []healthz.HealthChecker
// The default set of readyz-only checks. There might be more added via AddReadyzChecks dynamically.
ReadyzChecks []healthz.HealthChecker
// LegacyAPIGroupPrefixes is used to set up URL parsing for authorization and for validating requests
// to InstallLegacyAPIGroup. New API servers don't generally have legacy groups at all.
LegacyAPIGroupPrefixes sets.String
// RequestInfoResolver is used to assign attributes (used by admission and authorization) based on a request URL.
// Use-cases that are like kubelets may need to customize this.
RequestInfoResolver apirequest.RequestInfoResolver
// Serializer is required and provides the interface for serializing and converting objects to and from the wire
// The default (api.Codecs) usually works fine.
Serializer runtime.NegotiatedSerializer
// OpenAPIConfig will be used in generating OpenAPI spec. This is nil by default. Use DefaultOpenAPIConfig for "working" defaults.
OpenAPIConfig *openapicommon.Config
// RESTOptionsGetter is used to construct RESTStorage types via the generic registry.
RESTOptionsGetter genericregistry.RESTOptionsGetter
// If specified, all requests except those which match the LongRunningFunc predicate will timeout
// after this duration.
RequestTimeout time.Duration
// If specified, long running requests such as watch will be allocated a random timeout between this value, and
// twice this value. Note that it is up to the request handlers to ignore or honor this timeout. In seconds.
MinRequestTimeout int
// This represents the maximum amount of time it should take for apiserver to complete its startup
// sequence and become healthy. From apiserver's start time to when this amount of time has
// elapsed, /livez will assume that unfinished post-start hooks will complete successfully and
// therefore return true.
LivezGracePeriod time.Duration
// ShutdownDelayDuration allows to block shutdown for some time, e.g. until endpoints pointing to this API server
// have converged on all node. During this time, the API server keeps serving, /healthz will return 200,
// but /readyz will return failure.
ShutdownDelayDuration time.Duration
// The limit on the total size increase all "copy" operations in a json
// patch may cause.
// This affects all places that applies json patch in the binary.
JSONPatchMaxCopyBytes int64
// The limit on the request size that would be accepted and decoded in a write request
// 0 means no limit.
MaxRequestBodyBytes int64
// MaxRequestsInFlight is the maximum number of parallel non-long-running requests. Every further
// request has to wait. Applies only to non-mutating requests.
MaxRequestsInFlight int
// MaxMutatingRequestsInFlight is the maximum number of parallel mutating requests. Every further
// request has to wait.
MaxMutatingRequestsInFlight int
// Predicate which is true for paths of long-running http requests
LongRunningFunc apirequest.LongRunningRequestCheck
// MergedResourceConfig indicates which groupVersion enabled and its resources enabled/disabled.
// This is composed of genericapiserver defaultAPIResourceConfig and those parsed from flags.
// If not specify any in flags, then genericapiserver will only enable defaultAPIResourceConfig.
MergedResourceConfig *serverstore.ResourceConfig
// EventSink receives events about the life cycle of the API server, e.g. readiness, serving, signals and termination.
EventSink EventSink
//===========================================================================
// values below here are targets for removal
//===========================================================================
// PublicAddress is the IP address where members of the cluster (kubelet,
// kube-proxy, services, etc.) can reach the GenericAPIServer.
// If nil or 0.0.0.0, the host's default interface will be used.
PublicAddress net.IP
// EquivalentResourceRegistry provides information about resources equivalent to a given resource,
// and the kind associated with a given resource. As resources are installed, they are registered here.
EquivalentResourceRegistry runtime.EquivalentResourceRegistry
}
// EventSink allows to create events.
type EventSink interface {
Create(event *corev1.Event) (*corev1.Event, error)
}
type RecommendedConfig struct {
Config
// SharedInformerFactory provides shared informers for Kubernetes resources. This value is set by
// RecommendedOptions.CoreAPI.ApplyTo called by RecommendedOptions.ApplyTo. It uses an in-cluster client config
// by default, or the kubeconfig given with kubeconfig command line flag.
SharedInformerFactory informers.SharedInformerFactory
// ClientConfig holds the kubernetes client configuration.
// This value is set by RecommendedOptions.CoreAPI.ApplyTo called by RecommendedOptions.ApplyTo.
// By default in-cluster client config is used.
ClientConfig *restclient.Config
}
type SecureServingInfo struct {
// Listener is the secure server network listener.
Listener net.Listener
// Cert is the main server cert which is used if SNI does not match. Cert must be non-nil and is
// allowed to be in SNICerts.
Cert dynamiccertificates.CertKeyContentProvider
// SNICerts are the TLS certificates used for SNI.
SNICerts []dynamiccertificates.SNICertKeyContentProvider
// ClientCA is the certificate bundle for all the signers that you'll recognize for incoming client certificates
ClientCA dynamiccertificates.CAContentProvider
// MinTLSVersion optionally overrides the minimum TLS version supported.
// Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants).
MinTLSVersion uint16
// CipherSuites optionally overrides the list of allowed cipher suites for the server.
// Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants).
CipherSuites []uint16
// HTTP2MaxStreamsPerConnection is the limit that the api server imposes on each client.
// A value of zero means to use the default provided by golang's HTTP/2 support.
HTTP2MaxStreamsPerConnection int
// DisableHTTP2 indicates that http2 should not be enabled.
DisableHTTP2 bool
}
type AuthenticationInfo struct {
// APIAudiences is a list of identifier that the API identifies as. This is
// used by some authenticators to validate audience bound credentials.
APIAudiences authenticator.Audiences
// Authenticator determines which subject is making the request
Authenticator authenticator.Request
// SupportsBasicAuth indicates that's at least one Authenticator supports basic auth
// If this is true, a basic auth challenge is returned on authentication failure
// TODO(roberthbailey): Remove once the server no longer supports http basic auth.
SupportsBasicAuth bool
}
type AuthorizationInfo struct {
// Authorizer determines whether the subject is allowed to make the request based only
// on the RequestURI
Authorizer authorizer.Authorizer
}
// NewConfig returns a Config struct with the default values
func NewConfig(codecs serializer.CodecFactory) *Config {
defaultHealthChecks := []healthz.HealthChecker{healthz.PingHealthz, healthz.LogHealthz}
return &Config{
Serializer: codecs,
BuildHandlerChainFunc: DefaultBuildHandlerChain,
HandlerChainWaitGroup: new(utilwaitgroup.SafeWaitGroup),
LegacyAPIGroupPrefixes: sets.NewString(DefaultLegacyAPIPrefix),
DisabledPostStartHooks: sets.NewString(),
PostStartHooks: map[string]PostStartHookConfigEntry{},
HealthzChecks: append([]healthz.HealthChecker{}, defaultHealthChecks...),
ReadyzChecks: append([]healthz.HealthChecker{}, defaultHealthChecks...),
LivezChecks: append([]healthz.HealthChecker{}, defaultHealthChecks...),
EnableIndex: true,
EnableDiscovery: true,
EnableProfiling: true,
EnableMetrics: true,
MaxRequestsInFlight: 400,
MaxMutatingRequestsInFlight: 200,
RequestTimeout: time.Duration(60) * time.Second,
MinRequestTimeout: 1800,
LivezGracePeriod: time.Duration(0),
ShutdownDelayDuration: time.Duration(0),
// 1.5MB is the default client request size in bytes
// the etcd server should accept. See
// https://github.com/etcd-io/etcd/blob/release-3.4/embed/config.go#L56.
// A request body might be encoded in json, and is converted to
// proto when persisted in etcd, so we allow 2x as the largest size
// increase the "copy" operations in a json patch may cause.
JSONPatchMaxCopyBytes: int64(3 * 1024 * 1024),
// 1.5MB is the recommended client request size in byte
// the etcd server should accept. See
// https://github.com/etcd-io/etcd/blob/release-3.4/embed/config.go#L56.
// A request body might be encoded in json, and is converted to
// proto when persisted in etcd, so we allow 2x as the largest request
// body size to be accepted and decoded in a write request.
MaxRequestBodyBytes: int64(3 * 1024 * 1024),
// Default to treating watch as a long-running operation
// Generic API servers have no inherent long-running subresources
LongRunningFunc: genericfilters.BasicLongRunningRequestCheck(sets.NewString("watch"), sets.NewString()),
}
}
// NewRecommendedConfig returns a RecommendedConfig struct with the default values
func NewRecommendedConfig(codecs serializer.CodecFactory) *RecommendedConfig {
return &RecommendedConfig{
Config: *NewConfig(codecs),
}
}
func DefaultOpenAPIConfig(getDefinitions openapicommon.GetOpenAPIDefinitions, defNamer *apiopenapi.DefinitionNamer) *openapicommon.Config {
return &openapicommon.Config{
ProtocolList: []string{"https"},
IgnorePrefixes: []string{},
Info: &spec.Info{
InfoProps: spec.InfoProps{
Title: "Generic API Server",
},
},
DefaultResponse: &spec.Response{
ResponseProps: spec.ResponseProps{
Description: "Default Response.",
},
},
GetOperationIDAndTags: apiopenapi.GetOperationIDAndTags,
GetDefinitionName: defNamer.GetDefinitionName,
GetDefinitions: getDefinitions,
}
}
func (c *AuthenticationInfo) ApplyClientCert(clientCA dynamiccertificates.CAContentProvider, servingInfo *SecureServingInfo) error {
if servingInfo == nil {
return nil
}
if clientCA == nil {
return nil
}
if servingInfo.ClientCA == nil {
servingInfo.ClientCA = clientCA
return nil
}
servingInfo.ClientCA = dynamiccertificates.NewUnionCAContentProvider(servingInfo.ClientCA, clientCA)
return nil
}
type completedConfig struct {
*Config
//===========================================================================
// values below here are filled in during completion
//===========================================================================
// SharedInformerFactory provides shared informers for resources
SharedInformerFactory informers.SharedInformerFactory
}
type CompletedConfig struct {
// Embed a private pointer that cannot be instantiated outside of this package.
*completedConfig
}
// AddHealthChecks adds a health check to our config to be exposed by the health endpoints
// of our configured apiserver. We should prefer this to adding healthChecks directly to
// the config unless we explicitly want to add a healthcheck only to a specific health endpoint.
func (c *Config) AddHealthChecks(healthChecks ...healthz.HealthChecker) {
for _, check := range healthChecks {
c.HealthzChecks = append(c.HealthzChecks, check)
c.LivezChecks = append(c.LivezChecks, check)
c.ReadyzChecks = append(c.ReadyzChecks, check)
}
}
// AddPostStartHook allows you to add a PostStartHook that will later be added to the server itself in a New call.
// Name conflicts will cause an error.
func (c *Config) AddPostStartHook(name string, hook PostStartHookFunc) error {
if len(name) == 0 {
return fmt.Errorf("missing name")
}
if hook == nil {
return fmt.Errorf("hook func may not be nil: %q", name)
}
if c.DisabledPostStartHooks.Has(name) {
klog.V(1).Infof("skipping %q because it was explicitly disabled", name)
return nil
}
if postStartHook, exists := c.PostStartHooks[name]; exists {
// this is programmer error, but it can be hard to debug
return fmt.Errorf("unable to add %q because it was already registered by: %s", name, postStartHook.originatingStack)
}
c.PostStartHooks[name] = PostStartHookConfigEntry{hook: hook, originatingStack: string(debug.Stack())}
return nil
}
// AddPostStartHookOrDie allows you to add a PostStartHook, but dies on failure.
func (c *Config) AddPostStartHookOrDie(name string, hook PostStartHookFunc) {
if err := c.AddPostStartHook(name, hook); err != nil {
klog.Fatalf("Error registering PostStartHook %q: %v", name, err)
}
}
// Complete fills in any fields not set that are required to have valid data and can be derived
// from other fields. If you're going to `ApplyOptions`, do that first. It's mutating the receiver.
func (c *Config) Complete(informers informers.SharedInformerFactory) CompletedConfig {
if len(c.ExternalAddress) == 0 && c.PublicAddress != nil {
c.ExternalAddress = c.PublicAddress.String()
}
// if there is no port, and we listen on one securely, use that one
if _, _, err := net.SplitHostPort(c.ExternalAddress); err != nil {
if c.SecureServing == nil {
klog.Fatalf("cannot derive external address port without listening on a secure port.")
}
_, port, err := c.SecureServing.HostPort()
if err != nil {
klog.Fatalf("cannot derive external address from the secure port: %v", err)
}
c.ExternalAddress = net.JoinHostPort(c.ExternalAddress, strconv.Itoa(port))
}
if c.OpenAPIConfig != nil {
if c.OpenAPIConfig.SecurityDefinitions != nil {
// Setup OpenAPI security: all APIs will have the same authentication for now.
c.OpenAPIConfig.DefaultSecurity = []map[string][]string{}
keys := []string{}
for k := range *c.OpenAPIConfig.SecurityDefinitions {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
c.OpenAPIConfig.DefaultSecurity = append(c.OpenAPIConfig.DefaultSecurity, map[string][]string{k: {}})
}
if c.OpenAPIConfig.CommonResponses == nil {
c.OpenAPIConfig.CommonResponses = map[int]spec.Response{}
}
if _, exists := c.OpenAPIConfig.CommonResponses[http.StatusUnauthorized]; !exists {
c.OpenAPIConfig.CommonResponses[http.StatusUnauthorized] = spec.Response{
ResponseProps: spec.ResponseProps{
Description: "Unauthorized",
},
}
}
}
// make sure we populate info, and info.version, if not manually set
if c.OpenAPIConfig.Info == nil {
c.OpenAPIConfig.Info = &spec.Info{}
}
if c.OpenAPIConfig.Info.Version == "" {
if c.Version != nil {
c.OpenAPIConfig.Info.Version = strings.Split(c.Version.String(), "-")[0]
} else {
c.OpenAPIConfig.Info.Version = "unversioned"
}
}
}
if c.DiscoveryAddresses == nil {
c.DiscoveryAddresses = discovery.DefaultAddresses{DefaultAddress: c.ExternalAddress}
}
if c.EventSink == nil {
c.EventSink = nullEventSink{}
}
AuthorizeClientBearerToken(c.LoopbackClientConfig, &c.Authentication, &c.Authorization)
if c.RequestInfoResolver == nil {
c.RequestInfoResolver = NewRequestInfoResolver(c)
}
if c.EquivalentResourceRegistry == nil {
if c.RESTOptionsGetter == nil {
c.EquivalentResourceRegistry = runtime.NewEquivalentResourceRegistry()
} else {
c.EquivalentResourceRegistry = runtime.NewEquivalentResourceRegistryWithIdentity(func(groupResource schema.GroupResource) string {
// use the storage prefix as the key if possible
if opts, err := c.RESTOptionsGetter.GetRESTOptions(groupResource); err == nil {
return opts.ResourcePrefix
}
// otherwise return "" to use the default key (parent GV name)
return ""
})
}
}
return CompletedConfig{&completedConfig{c, informers}}
}
// Complete fills in any fields not set that are required to have valid data and can be derived
// from other fields. If you're going to `ApplyOptions`, do that first. It's mutating the receiver.
func (c *RecommendedConfig) Complete() CompletedConfig {
if c.ClientConfig != nil {
ref, err := eventReference()
if err != nil {
klog.Warningf("Failed to derive event reference, won't create events: %v", err)
c.EventSink = nullEventSink{}
} else {
ns := ref.Namespace
if len(ns) == 0 {
ns = "default"
}
c.EventSink = &v1.EventSinkImpl{
Interface: kubernetes.NewForConfigOrDie(c.ClientConfig).CoreV1().Events(ns),
}
}
}
return c.Config.Complete(c.SharedInformerFactory)
}
func eventReference() (*corev1.ObjectReference, error) {
ns := os.Getenv("POD_NAMESPACE")
pod := os.Getenv("POD_NAME")
if len(ns) == 0 && len(pod) > 0 {
serviceAccountNamespaceFile := "/var/run/secrets/kubernetes.io/serviceaccount/namespace"
if _, err := os.Stat(serviceAccountNamespaceFile); err == nil {
bs, err := ioutil.ReadFile(serviceAccountNamespaceFile)
if err != nil {
return nil, err
}
ns = string(bs)
}
}
if len(ns) == 0 {
pod = ""
ns = "kube-system"
}
if len(pod) == 0 {
return &corev1.ObjectReference{
Kind: "Namespace",
Name: ns,
APIVersion: "v1",
}, nil
}
return &corev1.ObjectReference{
Kind: "Pod",
Namespace: ns,
Name: pod,
APIVersion: "v1",
}, nil
}
// New creates a new server which logically combines the handling chain with the passed server.
// name is used to differentiate for logging. The handler chain in particular can be difficult as it starts delgating.
// delegationTarget may not be nil.
func (c completedConfig) New(name string, delegationTarget DelegationTarget) (*GenericAPIServer, error) {
if c.Serializer == nil {
return nil, fmt.Errorf("Genericapiserver.New() called with config.Serializer == nil")
}
if c.LoopbackClientConfig == nil {
return nil, fmt.Errorf("Genericapiserver.New() called with config.LoopbackClientConfig == nil")
}
if c.EquivalentResourceRegistry == nil {
return nil, fmt.Errorf("Genericapiserver.New() called with config.EquivalentResourceRegistry == nil")
}
handlerChainBuilder := func(handler http.Handler) http.Handler {
return c.BuildHandlerChainFunc(handler, c.Config)
}
apiServerHandler := NewAPIServerHandler(name, c.Serializer, handlerChainBuilder, delegationTarget.UnprotectedHandler())
s := &GenericAPIServer{
discoveryAddresses: c.DiscoveryAddresses,
LoopbackClientConfig: c.LoopbackClientConfig,
legacyAPIGroupPrefixes: c.LegacyAPIGroupPrefixes,
admissionControl: c.AdmissionControl,
Serializer: c.Serializer,
AuditBackend: c.AuditBackend,
Authorizer: c.Authorization.Authorizer,
delegationTarget: delegationTarget,
EquivalentResourceRegistry: c.EquivalentResourceRegistry,
HandlerChainWaitGroup: c.HandlerChainWaitGroup,
minRequestTimeout: time.Duration(c.MinRequestTimeout) * time.Second,
ShutdownTimeout: c.RequestTimeout,
ShutdownDelayDuration: c.ShutdownDelayDuration,
SecureServingInfo: c.SecureServing,
ExternalAddress: c.ExternalAddress,
Handler: apiServerHandler,
listedPathProvider: apiServerHandler,
openAPIConfig: c.OpenAPIConfig,
postStartHooks: map[string]postStartHookEntry{},
preShutdownHooks: map[string]preShutdownHookEntry{},
disabledPostStartHooks: c.DisabledPostStartHooks,
healthzChecks: c.HealthzChecks,
livezChecks: c.LivezChecks,
readyzChecks: c.ReadyzChecks,
readinessStopCh: make(chan struct{}),
livezGracePeriod: c.LivezGracePeriod,
DiscoveryGroupManager: discovery.NewRootAPIsHandler(c.DiscoveryAddresses, c.Serializer),
maxRequestBodyBytes: c.MaxRequestBodyBytes,
livezClock: clock.RealClock{},
eventSink: c.EventSink,
}
ref, err := eventReference()
if err != nil {
klog.Warningf("Failed to derive event reference, won't create events: %v", err)
c.EventSink = nullEventSink{}
}
s.eventRef = ref
for {
if c.JSONPatchMaxCopyBytes <= 0 {
break
}
existing := atomic.LoadInt64(&jsonpatch.AccumulatedCopySizeLimit)
if existing > 0 && existing < c.JSONPatchMaxCopyBytes {
break
}
if atomic.CompareAndSwapInt64(&jsonpatch.AccumulatedCopySizeLimit, existing, c.JSONPatchMaxCopyBytes) {
break
}
}
// first add poststarthooks from delegated targets
for k, v := range delegationTarget.PostStartHooks() {
s.postStartHooks[k] = v
}
for k, v := range delegationTarget.PreShutdownHooks() {
s.preShutdownHooks[k] = v
}
// add poststarthooks that were preconfigured. Using the add method will give us an error if the same name has already been registered.
for name, preconfiguredPostStartHook := range c.PostStartHooks {
if err := s.AddPostStartHook(name, preconfiguredPostStartHook.hook); err != nil {
return nil, err
}
}
genericApiServerHookName := "generic-apiserver-start-informers"
if c.SharedInformerFactory != nil && !s.isPostStartHookRegistered(genericApiServerHookName) {
err := s.AddPostStartHook(genericApiServerHookName, func(context PostStartHookContext) error {
c.SharedInformerFactory.Start(context.StopCh)
return nil
})
if err != nil {
return nil, err
}
}
for _, delegateCheck := range delegationTarget.HealthzChecks() {
skip := false
for _, existingCheck := range c.HealthzChecks {
if existingCheck.Name() == delegateCheck.Name() {
skip = true
break
}
}
if skip {
continue
}
s.AddHealthChecks(delegateCheck)
}
s.listedPathProvider = routes.ListedPathProviders{s.listedPathProvider, delegationTarget}
installAPI(s, c.Config)
// use the UnprotectedHandler from the delegation target to ensure that we don't attempt to double authenticator, authorize,
// or some other part of the filter chain in delegation cases.
if delegationTarget.UnprotectedHandler() == nil && c.EnableIndex {
s.Handler.NonGoRestfulMux.NotFoundHandler(routes.IndexLister{
StatusCode: http.StatusNotFound,
PathProvider: s.listedPathProvider,
})
}
return s, nil
}
func DefaultBuildHandlerChain(apiHandler http.Handler, c *Config) http.Handler {
handler := genericapifilters.WithAuthorization(apiHandler, c.Authorization.Authorizer, c.Serializer)
handler = genericfilters.WithMaxInFlightLimit(handler, c.MaxRequestsInFlight, c.MaxMutatingRequestsInFlight, c.LongRunningFunc)
handler = genericapifilters.WithImpersonation(handler, c.Authorization.Authorizer, c.Serializer)
handler = genericapifilters.WithAudit(handler, c.AuditBackend, c.AuditPolicyChecker, c.LongRunningFunc)
failedHandler := genericapifilters.Unauthorized(c.Serializer, c.Authentication.SupportsBasicAuth)
failedHandler = genericapifilters.WithFailedAuthenticationAudit(failedHandler, c.AuditBackend, c.AuditPolicyChecker)
handler = genericapifilters.WithAuthentication(handler, c.Authentication.Authenticator, failedHandler, c.Authentication.APIAudiences)
handler = genericfilters.WithCORS(handler, c.CorsAllowedOriginList, nil, nil, nil, "true")
handler = genericfilters.WithTimeoutForNonLongRunningRequests(handler, c.LongRunningFunc, c.RequestTimeout)
handler = genericfilters.WithWaitGroup(handler, c.LongRunningFunc, c.HandlerChainWaitGroup)
handler = genericapifilters.WithRequestInfo(handler, c.RequestInfoResolver)
handler = genericfilters.WithPanicRecovery(handler)
return handler
}
func installAPI(s *GenericAPIServer, c *Config) {
if c.EnableIndex {
routes.Index{}.Install(s.listedPathProvider, s.Handler.NonGoRestfulMux)
}
if c.EnableProfiling {
routes.Profiling{}.Install(s.Handler.NonGoRestfulMux)
if c.EnableContentionProfiling {
goruntime.SetBlockProfileRate(1)
}
// so far, only logging related endpoints are considered valid to add for these debug flags.
routes.DebugFlags{}.Install(s.Handler.NonGoRestfulMux, "v", routes.StringFlagPutHandler(logs.GlogSetter))
}
if c.EnableMetrics {
if c.EnableProfiling {
routes.MetricsWithReset{}.Install(s.Handler.NonGoRestfulMux)
} else {
routes.DefaultMetrics{}.Install(s.Handler.NonGoRestfulMux)
}
}
routes.Version{Version: c.Version}.Install(s.Handler.GoRestfulContainer)
if c.EnableDiscovery {
s.Handler.GoRestfulContainer.Add(s.DiscoveryGroupManager.WebService())
}
}
func NewRequestInfoResolver(c *Config) *apirequest.RequestInfoFactory {
apiPrefixes := sets.NewString(strings.Trim(APIGroupPrefix, "/")) // all possible API prefixes
legacyAPIPrefixes := sets.String{} // APIPrefixes that won't have groups (legacy)
for legacyAPIPrefix := range c.LegacyAPIGroupPrefixes {
apiPrefixes.Insert(strings.Trim(legacyAPIPrefix, "/"))
legacyAPIPrefixes.Insert(strings.Trim(legacyAPIPrefix, "/"))
}
return &apirequest.RequestInfoFactory{
APIPrefixes: apiPrefixes,
GrouplessAPIPrefixes: legacyAPIPrefixes,
}
}
func (s *SecureServingInfo) HostPort() (string, int, error) {
if s == nil || s.Listener == nil {
return "", 0, fmt.Errorf("no listener found")
}
addr := s.Listener.Addr().String()
host, portStr, err := net.SplitHostPort(addr)
if err != nil {
return "", 0, fmt.Errorf("failed to get port from listener address %q: %v", addr, err)
}
port, err := strconv.Atoi(portStr)
if err != nil {
return "", 0, fmt.Errorf("invalid non-numeric port %q", portStr)
}
return host, port, nil
}
// AuthorizeClientBearerToken wraps the authenticator and authorizer in loopback authentication logic
// if the loopback client config is specified AND it has a bearer token. Note that if either authn or
// authz is nil, this function won't add a token authenticator or authorizer.
func AuthorizeClientBearerToken(loopback *restclient.Config, authn *AuthenticationInfo, authz *AuthorizationInfo) {
if loopback == nil || len(loopback.BearerToken) == 0 {
return
}
if authn == nil || authz == nil {
// prevent nil pointer panic
return
}
if authn.Authenticator == nil || authz.Authorizer == nil {
// authenticator or authorizer might be nil if we want to bypass authz/authn
// and we also do nothing in this case.
return
}
privilegedLoopbackToken := loopback.BearerToken
var uid = uuid.New().String()
tokens := make(map[string]*user.DefaultInfo)
tokens[privilegedLoopbackToken] = &user.DefaultInfo{
Name: user.APIServerUser,
UID: uid,
Groups: []string{user.SystemPrivilegedGroup},
}
tokenAuthenticator := authenticatorfactory.NewFromTokens(tokens)
authn.Authenticator = authenticatorunion.New(tokenAuthenticator, authn.Authenticator)
tokenAuthorizer := authorizerfactory.NewPrivilegedGroups(user.SystemPrivilegedGroup)
authz.Authorizer = authorizerunion.New(tokenAuthorizer, authz.Authorizer)
}
type nullEventSink struct{}
func (nullEventSink) Create(event *corev1.Event) (*corev1.Event, error) {
return nil, nil
}
| [
"\"POD_NAMESPACE\"",
"\"POD_NAME\""
]
| []
| [
"POD_NAMESPACE",
"POD_NAME"
]
| [] | ["POD_NAMESPACE", "POD_NAME"] | go | 2 | 0 | |
vendor/github.com/coreos/rkt/tests/rkt_image_gc_test.go | // Copyright 2015 The rkt Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build host coreos src kvm
package main
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/coreos/gexpect"
"github.com/coreos/rkt/common"
"github.com/coreos/rkt/tests/testutils"
)
func TestImageGCTreeStore(t *testing.T) {
ctx := testutils.NewRktRunCtx()
defer ctx.Cleanup()
expectedTreeStores := 2
// If overlayfs is not supported only the stage1 image is rendered in the treeStore
if !common.SupportsOverlay() {
expectedTreeStores = 1
}
// at this point we know that RKT_INSPECT_IMAGE env var is not empty
referencedACI := os.Getenv("RKT_INSPECT_IMAGE")
cmd := fmt.Sprintf("%s --insecure-options=image run --mds-register=false %s", ctx.Cmd(), referencedACI)
t.Logf("Running %s: %v", referencedACI, cmd)
child, err := gexpect.Spawn(cmd)
if err != nil {
t.Fatalf("Cannot exec: %v", err)
}
if err := child.Wait(); err != nil {
t.Fatalf("rkt didn't terminate correctly: %v", err)
}
treeStoreIDs, err := getTreeStoreIDs(ctx)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// We expect 2 treeStoreIDs for stage1 and app (only 1 if overlay is not supported/enabled)
if len(treeStoreIDs) != expectedTreeStores {
t.Fatalf("expected %d entries in the treestore but found %d entries", expectedTreeStores, len(treeStoreIDs))
}
runImageGC(t, ctx)
treeStoreIDs, err = getTreeStoreIDs(ctx)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
// We expect 1/2 treeStoreIDs again as no pod gc has been executed
if len(treeStoreIDs) != expectedTreeStores {
t.Fatalf("expected %d entries in the treestore but found %d entries", expectedTreeStores, len(treeStoreIDs))
}
runGC(t, ctx)
runImageGC(t, ctx)
treeStoreIDs, err = getTreeStoreIDs(ctx)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if len(treeStoreIDs) != 0 {
t.Fatalf("expected empty treestore but found %d entries", len(treeStoreIDs))
}
}
func getTreeStoreIDs(ctx *testutils.RktRunCtx) (map[string]struct{}, error) {
treeStoreIDs := map[string]struct{}{}
ls, err := ioutil.ReadDir(filepath.Join(ctx.DataDir(), "cas", "tree"))
if err != nil {
if os.IsNotExist(err) {
return treeStoreIDs, nil
}
return nil, fmt.Errorf("cannot read treestore directory: %v", err)
}
for _, p := range ls {
if p.IsDir() {
id := filepath.Base(p.Name())
treeStoreIDs[id] = struct{}{}
}
}
return treeStoreIDs, nil
}
| [
"\"RKT_INSPECT_IMAGE\""
]
| []
| [
"RKT_INSPECT_IMAGE"
]
| [] | ["RKT_INSPECT_IMAGE"] | go | 1 | 0 | |
preprocess_data.py | import json
import numpy as np
import scipy.sparse as sp
import scipy.io
import os
home = os.getenv("HOME")
data_path = home + "/gpu_memory_reduction/alzheimer/data"
def print_array_prop(a):
print("Shape: {}".format(a.shape))
print("Dtype: {}".format(a.dtype))
def preprocess_flickr():
flickr_path = data_path + "/flickr"
path = flickr_path + "/adj_full.npz"
f = np.load(path)
adj = sp.csr_matrix((f["data"], f["indices"], f["indptr"]), f["shape"])
adj = adj.astype(np.float32)
path = flickr_path + "/adjacency.mtx"
scipy.io.mmwrite(path, adj)
path = flickr_path + "/feats.npy"
features = np.load(path)
features = features.astype(np.float32)
path = flickr_path + "/features.npy"
np.save(path, features)
[print(x[0:10]) for x in features[0:10]]
classes = np.zeros((features.shape[0],), dtype=np.int32)
path = flickr_path + "/class_map.json"
with open(path) as f:
class_map = json.load(f)
for key, item in class_map.items():
classes[int(key)] = item
path = flickr_path + "/classes.npy"
np.save(path, classes)
[print(x) for x in classes[0:10]]
path = flickr_path + "/role.json"
with open(path) as f:
role = json.load(f)
train_mask = np.zeros((features.shape[0],), dtype=bool)
train_mask[np.array(role["tr"])] = True
path = flickr_path + "/train_mask.npy"
np.save(path, train_mask)
[print(x) for x in train_mask[0:10]]
val_mask = np.zeros((features.shape[0],), dtype=bool)
val_mask[np.array(role["va"])] = True
path = flickr_path + "/val_mask.npy"
np.save(path, val_mask)
[print(x) for x in val_mask[0:10]]
test_mask = np.zeros((features.shape[0],), dtype=bool)
test_mask[np.array(role["te"])] = True
path = flickr_path + "/test_mask.npy"
np.save(path, test_mask)
[print(x) for x in test_mask[0:10]]
def preprocess_reddit():
reddit_path = data_path + "/reddit"
path = reddit_path + "/reddit_data.npz"
data = np.load(path)
print(data.files)
features = data["feature"]
features = features.astype(np.float32)
path = reddit_path + "/features.npy"
np.save(path, features)
classes = data["label"]
classes = classes.astype(np.int32)
path = reddit_path + "/classes.npy"
np.save(path, classes)
path = reddit_path + "/reddit_graph.npz"
graph = np.load(path)
print(graph.files)
shape = graph["shape"]
row = graph["row"]
column = graph["col"]
values = graph["data"]
adjacency = sp.coo_matrix((values, (row, column)), shape=shape)
adjacency = sp.csr_matrix(adjacency)
adjacency = adjacency.astype(np.float32)
path = reddit_path + "/adjacency.mtx"
scipy.io.mmwrite(path, adjacency)
def preprocess_products():
products_path = data_path + "/products"
path = products_path + "/raw/num-edge-list.csv"
num_edges_np = np.genfromtxt(path, dtype=np.int64)
num_edges = num_edges_np.item()
path = products_path + "/raw/num-node-list.csv"
num_classes_np = np.genfromtxt(path, dtype=np.int64)
num_classes = num_classes_np.item()
print("Number of edges: {}".format(num_edges))
print("Number of classes: {}".format(num_classes))
path = products_path + "/raw/edge.csv"
edges = np.genfromtxt(path, dtype=np.int64, delimiter=",")
data = np.ones(num_edges)
adjacency = sp.coo_matrix((data, (edges[:, 0], edges[:, 1])), shape=(num_classes, num_classes))
adjacency = adjacency.tocsr()
adjacency = adjacency.astype(np.float32)
path = products_path + "/adjacency.mtx"
scipy.io.mmwrite(path, adjacency)
path = products_path + "/raw/node-feat.csv"
features = np.genfromtxt(path, dtype=np.float64, delimiter=",")
features = features.astype(np.float32)
path = products_path + "/features.npy"
np.save(path, features)
path = products_path + "/raw/node-label.csv"
classes = np.genfromtxt(path, dtype=np.int64, delimiter=",")
classes = classes.astype(np.int32)
path = products_path + "/classes.npy"
np.save(path, classes)
if __name__ == "__main__":
# preprocess_flickr()
# preprocess_reddit()
preprocess_products()
| []
| []
| [
"HOME"
]
| [] | ["HOME"] | python | 1 | 0 | |
gibberish/gibberish.py | #!/usr/bin/python
#
# 12Jun2017 Petr Janata - added srcfile and outfile
# 17Jun2107 Petr Janata - expanded set of accepted characters to include digits and hyphen
import math
import pickle
import os
accepted_chars = 'abcdefghijklmnopqrstuvwxyz0123456789- '
pos = dict([(char, idx) for idx, char in enumerate(accepted_chars)])
if 'GIBBERISH_DATA_DIR' in os.environ:
data_dir = os.environ['GIBBERISH_DATA_DIR'] + '/'
else:
data_dir = os.path.dirname(os.path.abspath(__file__)) + '/data/'
model_path = data_dir + '/gib_model.pki'
class Gibberish(object):
def __init__(self):
self.train_if_necessary()
def train_if_necessary(self):
if not os.path.isfile(model_path):
self.train()
else:
self.load_persisted_model()
def persist_model(self):
with open(model_path, 'wb') as f:
pickle.dump(vars(self), f)
def load_persisted_model(self):
with open(model_path, 'rb') as f:
persisted_model = pickle.load(f)
for key, value in persisted_model.items():
setattr(self, key, value)
def normalize(self, line):
""" Return only the subset of chars from accepted_chars.
This helps keep the model relatively small by ignoring punctuation,
infrequenty symbols, etc. """
return [c.lower() for c in line if c.lower() in accepted_chars]
def ngram(self, n, l):
""" Return all n grams from l after normalizing """
filtered = self.normalize(l)
for start in range(0, len(filtered) - n + 1):
yield ''.join(filtered[start:start + n])
def avg_transition_prob(self, l, log_prob_mat):
""" Return the average transition prob from l through log_prob_mat. """
log_prob = 0.0
transition_ct = 0
for a, b in self.ngram(2, l):
log_prob += log_prob_mat[pos[a]][pos[b]]
transition_ct += 1
# The exponentiation translates from log probs to probs.
return math.exp(log_prob / (transition_ct or 1))
def train(self, bigfile=data_dir + 'big.txt', goodfile=data_dir + 'good.txt',
badfile=data_dir + 'bad.txt'):
""" Write a simple model as a pickle file """
k = len(accepted_chars)
# Assume we have seen 10 of each character pair. This acts as a kind of
# prior or smoothing factor. This way, if we see a character transition
# live that we've never observed in the past, we won't assume the entire
# string has 0 probability.
counts = [[10 for i in range(k)] for i in range(k)]
# Count transitions from big text file, taken
# from http://norvig.com/spell-correct.html
for line in open(bigfile):
for a, b in self.ngram(2, line):
counts[pos[a]][pos[b]] += 1
# Normalize the counts so that they become log probabilities.
# We use log probabilities rather than straight probabilities to avoid
# numeric underflow issues with long texts.
# This contains a justification:
# http://squarecog.wordpress.com/2009/01/10/dealing-with-underflow-in-joint-probability-calculations/
for i, row in enumerate(counts):
s = float(sum(row))
for j in range(len(row)):
row[j] = math.log(row[j] / s)
# Find the probability of generating a few arbitrarily choosen good and
# bad phrases.
good_probs = [self.avg_transition_prob(l, counts) for l in open(goodfile)]
bad_probs = [self.avg_transition_prob(l, counts) for l in open(badfile)]
# Assert that we actually are capable of detecting the junk.
assert min(good_probs) > max(bad_probs)
# And pick a threshold halfway between the worst good and best bad inputs.
thresh = (min(good_probs) + max(bad_probs)) / 2
self.mat = counts
self.thresh = thresh
self.persist_model()
def detect_gibberish(self, text):
text = ''.join(self.normalize(text))
return self.avg_transition_prob(text, self.mat) < self.thresh
def percent_gibberish(self, text):
text = ''.join(self.normalize(text))
text = text.strip()
words = text.split(' ')
if len(words) == 0:
return 0
gibberish_count = 0
for word in words:
if self.detect_gibberish(word):
gibberish_count += 1
return float(gibberish_count) / float(len(words))
def gibberish_pct(self, text):
text = ''.join(self.normalize(text))
return self.avg_transition_prob(text, self.mat)
| []
| []
| [
"GIBBERISH_DATA_DIR"
]
| [] | ["GIBBERISH_DATA_DIR"] | python | 1 | 0 | |
prudence/commands/run.go | package commands
import (
"io/fs"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/spf13/cobra"
kutiljs "github.com/tliron/kutil/js"
urlpkg "github.com/tliron/kutil/url"
"github.com/tliron/kutil/util"
"github.com/tliron/kutil/version"
"github.com/tliron/prudence/js"
"github.com/tliron/prudence/platform"
)
var paths []string
var typescript string
var arguments map[string]string
var watch bool
func init() {
rootCommand.AddCommand(runCommand)
runCommand.Flags().StringArrayVarP(&paths, "path", "p", nil, "library path (appended after PRUDENCE_PATH environment variable)")
runCommand.Flags().StringVarP(&typescript, "typescript", "t", "", "TypeScript project path (must have a tsconfig.json file)")
runCommand.Flags().StringToStringVarP(&arguments, "argument", "a", make(map[string]string), "arguments (format is name=value)")
runCommand.Flags().BoolVarP(&watch, "watch", "w", true, "whether to watch dependent files and restart if they are changed")
runCommand.Flags().StringVarP(&platform.NCSAFilename, "ncsa", "n", "", "NCSA log filename (or special values \"stdout\" and \"stderr\")")
}
var runCommand = &cobra.Command{
Use: "run [Script PATH or URL]",
Short: "Run",
Args: cobra.ExactArgs(1),
Run: func(cmd *cobra.Command, args []string) {
startId := args[0]
util.OnExit(platform.Stop)
urlContext := urlpkg.NewContext()
util.OnExitError(urlContext.Release)
var path_ []urlpkg.URL
parsePaths := func(paths []string) {
for _, path := range paths {
if !strings.HasSuffix(path, "/") {
path += "/"
}
pathUrl, err := urlpkg.NewValidURL(path, nil, urlContext)
log.Infof("library path: %s", pathUrl.String())
util.FailOnError(err)
path_ = append(path_, pathUrl)
}
}
parsePaths(filepath.SplitList(os.Getenv("PRUDENCE_PATH")))
parsePaths(paths)
environment := js.NewEnvironment(urlContext, path_, arguments)
util.OnExitError(environment.Release)
log.Noticef("Prudence version: %s", version.GitVersion)
if typescript != "" {
transpileTypeScript()
}
environment.OnChanged = func(id string, module *kutiljs.Module) {
if module != nil {
log.Infof("module changed: %s", module.Id)
} else if id != "" {
log.Infof("file changed: %s", id)
}
environment.Lock.Lock()
if watch {
if err := environment.RestartWatcher(); err != nil {
log.Warningf("watch feature not supported on this platform")
}
if typescript != "" {
if filepath.Ext(id) == ".ts" {
transpileTypeScript()
}
// Watch all TypeScript files
filepath.WalkDir(typescript, func(path string, dirEntry fs.DirEntry, err error) error {
if (filepath.Ext(path) == ".ts") && !dirEntry.IsDir() {
environment.Watch(path)
}
return nil
})
}
}
environment.ClearCache()
_, err := environment.RequireID(startId)
environment.Lock.Unlock()
util.FailOnError(err)
}
environment.OnChanged("", nil)
// Block forever
<-make(chan bool, 0)
},
}
func transpileTypeScript() {
log.Infof("transpiling TypeScript: %s", typescript)
cmd := exec.Command("tsc", "--project", typescript)
err := cmd.Run()
util.FailOnError(err)
}
| [
"\"PRUDENCE_PATH\""
]
| []
| [
"PRUDENCE_PATH"
]
| [] | ["PRUDENCE_PATH"] | go | 1 | 0 | |
fmriprep/utils/sentry.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Stripped out routines for Sentry"""
import os
from pathlib import Path
import re
from niworkflows.utils.misc import read_crashfile
import sentry_sdk
CHUNK_SIZE = 16384
# Group common events with pre specified fingerprints
KNOWN_ERRORS = {
'permission-denied': [
"PermissionError: [Errno 13] Permission denied"
],
'memory-error': [
"MemoryError",
"Cannot allocate memory",
"Return code: 134",
],
'reconall-already-running': [
"ERROR: it appears that recon-all is already running"
],
'no-disk-space': [
"[Errno 28] No space left on device",
"[Errno 122] Disk quota exceeded"
],
'segfault': [
"Segmentation Fault",
"Segfault",
"Return code: 139",
],
'potential-race-condition': [
"[Errno 39] Directory not empty",
"_unfinished.json",
],
'keyboard-interrupt': [
"KeyboardInterrupt",
],
}
def start_ping(run_uuid, npart):
with sentry_sdk.configure_scope() as scope:
if run_uuid:
scope.set_tag('run_uuid', run_uuid)
scope.set_tag('npart', npart)
sentry_sdk.add_breadcrumb(message='fMRIPrep started', level='info')
sentry_sdk.capture_message('fMRIPrep started', level='info')
def sentry_setup(opts, exec_env):
from os import cpu_count
import psutil
import hashlib
from ..__about__ import __version__
environment = "prod"
release = __version__
if not __version__:
environment = "dev"
release = "dev"
elif int(os.getenv('FMRIPREP_DEV', '0')) or ('+' in __version__):
environment = "dev"
sentry_sdk.init("https://[email protected]/1137693",
release=release,
environment=environment,
before_send=before_send)
with sentry_sdk.configure_scope() as scope:
scope.set_tag('exec_env', exec_env)
if exec_env == 'fmriprep-docker':
scope.set_tag('docker_version', os.getenv('DOCKER_VERSION_8395080871'))
dset_desc_path = opts.bids_dir / 'dataset_description.json'
if dset_desc_path.exists():
desc_content = dset_desc_path.read_bytes()
scope.set_tag('dset_desc_sha256', hashlib.sha256(desc_content).hexdigest())
free_mem_at_start = round(psutil.virtual_memory().free / 1024**3, 1)
scope.set_tag('free_mem_at_start', free_mem_at_start)
scope.set_tag('cpu_count', cpu_count())
# Memory policy may have a large effect on types of errors experienced
overcommit_memory = Path('/proc/sys/vm/overcommit_memory')
if overcommit_memory.exists():
policy = {'0': 'heuristic',
'1': 'always',
'2': 'never'}.get(overcommit_memory.read_text().strip(), 'unknown')
scope.set_tag('overcommit_memory', policy)
if policy == 'never':
overcommit_kbytes = Path('/proc/sys/vm/overcommit_memory')
kb = overcommit_kbytes.read_text().strip()
if kb != '0':
limit = '{}kB'.format(kb)
else:
overcommit_ratio = Path('/proc/sys/vm/overcommit_ratio')
limit = '{}%'.format(overcommit_ratio.read_text().strip())
scope.set_tag('overcommit_limit', limit)
else:
scope.set_tag('overcommit_limit', 'n/a')
else:
scope.set_tag('overcommit_memory', 'n/a')
scope.set_tag('overcommit_limit', 'n/a')
for k, v in vars(opts).items():
scope.set_tag(k, v)
def process_crashfile(crashfile):
"""Parse the contents of a crashfile and submit sentry messages"""
crash_info = read_crashfile(str(crashfile))
with sentry_sdk.push_scope() as scope:
scope.level = 'fatal'
# Extract node name
node_name = crash_info.pop('node').split('.')[-1]
scope.set_tag("node_name", node_name)
# Massage the traceback, extract the gist
traceback = crash_info.pop('traceback')
# last line is probably most informative summary
gist = traceback.splitlines()[-1]
exception_text_start = 1
for line in traceback.splitlines()[1:]:
if not line[0].isspace():
break
exception_text_start += 1
exception_text = '\n'.join(
traceback.splitlines()[exception_text_start:])
# Extract inputs, if present
inputs = crash_info.pop('inputs', None)
if inputs:
scope.set_extra('inputs', dict(inputs))
# Extract any other possible metadata in the crash file
for k, v in crash_info.items():
strv = list(_chunks(str(v)))
if len(strv) == 1:
scope.set_extra(k, strv[0])
else:
for i, chunk in enumerate(strv):
scope.set_extra('%s_%02d' % (k, i), chunk)
fingerprint = ''
issue_title = '{}: {}'.format(node_name, gist)
for new_fingerprint, error_snippets in KNOWN_ERRORS.items():
for error_snippet in error_snippets:
if error_snippet in traceback:
fingerprint = new_fingerprint
issue_title = new_fingerprint
break
if fingerprint:
break
message = issue_title + '\n\n'
message += exception_text[-(8192 - len(message)):]
if fingerprint:
sentry_sdk.add_breadcrumb(message=fingerprint, level='fatal')
else:
# remove file paths
fingerprint = re.sub(r"(/[^/ ]*)+/?", '', message)
# remove words containing numbers
fingerprint = re.sub(r"([a-zA-Z]*[0-9]+[a-zA-Z]*)+", '', fingerprint)
# adding the return code if it exists
for line in message.splitlines():
if line.startswith("Return code"):
fingerprint += line
break
scope.fingerprint = [fingerprint]
sentry_sdk.capture_message(message, 'fatal')
def before_send(event, hints):
# Filtering log messages about crashed nodes
if 'logentry' in event and 'message' in event['logentry']:
msg = event['logentry']['message']
if msg.startswith("could not run node:"):
return None
if msg.startswith("Saving crash info to "):
return None
if re.match("Node .+ failed to run on host .+", msg):
return None
if 'breadcrumbs' in event and isinstance(event['breadcrumbs'], list):
fingerprints_to_propagate = ['no-disk-space', 'memory-error', 'permission-denied',
'keyboard-interrupt']
for bc in event['breadcrumbs']:
msg = bc.get('message', 'empty-msg')
if msg in fingerprints_to_propagate:
event['fingerprint'] = [msg]
break
return event
def _chunks(string, length=CHUNK_SIZE):
"""
Splits a string into smaller chunks
>>> list(_chunks('some longer string.', length=3))
['som', 'e l', 'ong', 'er ', 'str', 'ing', '.']
"""
return (string[i:i + length]
for i in range(0, len(string), length))
| []
| []
| [
"DOCKER_VERSION_8395080871",
"FMRIPREP_DEV"
]
| [] | ["DOCKER_VERSION_8395080871", "FMRIPREP_DEV"] | python | 2 | 0 | |
cmd/zt_generic_traverser_test.go | // Copyright © 2017 Microsoft <[email protected]>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package cmd
import (
"context"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"time"
gcpUtils "cloud.google.com/go/storage"
"github.com/Azure/azure-storage-blob-go/azblob"
"github.com/Azure/azure-storage-file-go/azfile"
"github.com/minio/minio-go"
chk "gopkg.in/check.v1"
"github.com/Azure/azure-storage-azcopy/v10/azbfs"
"github.com/Azure/azure-storage-azcopy/v10/common"
"github.com/Azure/azure-storage-azcopy/v10/ste"
)
type genericTraverserSuite struct{}
var _ = chk.Suite(&genericTraverserSuite{})
// On Windows, if you don't hold adequate permissions to create a symlink, tests regarding symlinks will fail.
// This is arguably annoying to dig through, therefore, we cleanly skip the test.
func trySymlink(src, dst string, c *chk.C) {
if err := os.Symlink(src, dst); err != nil {
if strings.Contains(err.Error(), "A required privilege is not held by the client") {
c.Skip("client lacks required privilege to create symlinks; symlinks will not be tested")
}
c.Error(err)
}
}
// GetProperties tests.
// GetProperties does not exist on Blob, as the properties come in the list call.
// While BlobFS could get properties in the future, it's currently disabled as BFS source S2S isn't set up right now, and likely won't be.
func (s *genericTraverserSuite) TestFilesGetProperties(c *chk.C) {
fsu := getFSU()
share, shareName := createNewAzureShare(c, fsu)
fileName := generateAzureFileName()
headers := azfile.FileHTTPHeaders{
ContentType: "text/random",
ContentEncoding: "testEncoding",
ContentLanguage: "en-US",
ContentDisposition: "testDisposition",
CacheControl: "testCacheControl",
}
scenarioHelper{}.generateAzureFilesFromList(c, share, []string{fileName})
_, err := share.NewRootDirectoryURL().NewFileURL(fileName).SetHTTPHeaders(ctx, headers)
c.Assert(err, chk.IsNil)
shareURL := scenarioHelper{}.getRawShareURLWithSAS(c, shareName)
pipeline := azfile.NewPipeline(azfile.NewAnonymousCredential(), azfile.PipelineOptions{})
// first test reading from the share itself
traverser := newFileTraverser(&shareURL, pipeline, ctx, false, true, func(common.EntityType) {})
// embed the check into the processor for ease of use
seenContentType := false
processor := func(object StoredObject) error {
if object.entityType == common.EEntityType.File() {
// test all attributes (but only for files, since folders don't have them)
c.Assert(object.contentType, chk.Equals, headers.ContentType)
c.Assert(object.contentEncoding, chk.Equals, headers.ContentEncoding)
c.Assert(object.contentLanguage, chk.Equals, headers.ContentLanguage)
c.Assert(object.contentDisposition, chk.Equals, headers.ContentDisposition)
c.Assert(object.cacheControl, chk.Equals, headers.CacheControl)
seenContentType = true
}
return nil
}
err = traverser.Traverse(noPreProccessor, processor, nil)
c.Assert(err, chk.IsNil)
c.Assert(seenContentType, chk.Equals, true)
// then test reading from the filename exactly, because that's a different codepath.
seenContentType = false
fileURL := scenarioHelper{}.getRawFileURLWithSAS(c, shareName, fileName)
traverser = newFileTraverser(&fileURL, pipeline, ctx, false, true, func(common.EntityType) {})
err = traverser.Traverse(noPreProccessor, processor, nil)
c.Assert(err, chk.IsNil)
c.Assert(seenContentType, chk.Equals, true)
}
func (s *genericTraverserSuite) TestS3GetProperties(c *chk.C) {
skipIfS3Disabled(c)
client, err := createS3ClientWithMinio(createS3ResOptions{})
if err != nil {
// TODO: Alter all tests that use S3 credentials to just skip instead of failing
// This is useful for local testing, when we don't want to have to sift through errors related to S3 clients not being created
// Just so that we can test locally without interrupting CI.
c.Skip("S3-based tests will not be ran as no credentials were supplied.")
return // make syntax highlighting happy
}
headers := minio.PutObjectOptions{
ContentType: "text/random",
ContentEncoding: "testEncoding",
ContentLanguage: "en-US",
ContentDisposition: "testDisposition",
CacheControl: "testCacheControl",
}
bucketName := generateBucketName()
objectName := generateObjectName()
err = client.MakeBucket(bucketName, "")
defer deleteBucket(c, client, bucketName, false)
c.Assert(err, chk.IsNil)
_, err = client.PutObjectWithContext(ctx, bucketName, objectName, strings.NewReader(objectDefaultData), int64(len(objectDefaultData)), headers)
c.Assert(err, chk.IsNil)
// First test against the bucket
s3BucketURL := scenarioHelper{}.getRawS3BucketURL(c, "", bucketName)
credentialInfo := common.CredentialInfo{CredentialType: common.ECredentialType.S3AccessKey()}
traverser, err := newS3Traverser(credentialInfo.CredentialType, &s3BucketURL, ctx, false, true, func(common.EntityType) {})
c.Assert(err, chk.IsNil)
// Embed the check into the processor for ease of use
seenContentType := false
processor := func(object StoredObject) error {
// test all attributes
c.Assert(object.contentType, chk.Equals, headers.ContentType)
c.Assert(object.contentEncoding, chk.Equals, headers.ContentEncoding)
c.Assert(object.contentLanguage, chk.Equals, headers.ContentLanguage)
c.Assert(object.contentDisposition, chk.Equals, headers.ContentDisposition)
c.Assert(object.cacheControl, chk.Equals, headers.CacheControl)
seenContentType = true
return nil
}
err = traverser.Traverse(noPreProccessor, processor, nil)
c.Assert(err, chk.IsNil)
c.Assert(seenContentType, chk.Equals, true)
// Then, test against the object itself because that's a different codepath.
seenContentType = false
s3ObjectURL := scenarioHelper{}.getRawS3ObjectURL(c, "", bucketName, objectName)
credentialInfo = common.CredentialInfo{CredentialType: common.ECredentialType.S3AccessKey()}
traverser, err = newS3Traverser(credentialInfo.CredentialType, &s3ObjectURL, ctx, false, true, func(common.EntityType) {})
c.Assert(err, chk.IsNil)
err = traverser.Traverse(noPreProccessor, processor, nil)
c.Assert(err, chk.IsNil)
c.Assert(seenContentType, chk.Equals, true)
}
func (s *genericTraverserSuite) TestGCPGetProperties(c *chk.C) {
skipIfGCPDisabled(c)
client, err := createGCPClientWithGCSSDK()
if err != nil {
c.Skip("GCP-based tests will not be run as no credentials were supplied.")
return
}
headers := gcpUtils.ObjectAttrsToUpdate{
ContentType: "text/html",
ContentEncoding: "gzip",
ContentLanguage: "en",
ContentDisposition: "inline",
CacheControl: "no-cache",
}
bucketName := generateBucketName()
objectName := generateObjectName()
bkt := client.Bucket(bucketName)
err = bkt.Create(context.Background(), os.Getenv("GOOGLE_CLOUD_PROJECT"), &gcpUtils.BucketAttrs{})
defer deleteGCPBucket(c, client, bucketName, false)
c.Assert(err, chk.IsNil)
reader := strings.NewReader(objectDefaultData)
obj := bkt.Object(objectName)
wc := obj.NewWriter(ctx)
n, err := io.Copy(wc, reader)
c.Assert(err, chk.IsNil)
c.Assert(n, chk.Equals, int64(len(objectDefaultData)))
err = wc.Close()
c.Assert(err, chk.IsNil)
_, err = obj.Update(ctx, headers)
c.Assert(err, chk.IsNil)
// First test against the bucket
gcpBucketURL := scenarioHelper{}.getRawGCPBucketURL(c, bucketName)
traverser, err := newGCPTraverser(&gcpBucketURL, ctx, false, true, func(common.EntityType) {})
c.Assert(err, chk.IsNil)
// Embed the check into the processor for ease of use
seenContentType := false
processor := func(object StoredObject) error {
// test all attributes
c.Assert(object.contentType, chk.Equals, headers.ContentType)
c.Assert(object.contentEncoding, chk.Equals, headers.ContentEncoding)
c.Assert(object.contentLanguage, chk.Equals, headers.ContentLanguage)
c.Assert(object.contentDisposition, chk.Equals, headers.ContentDisposition)
c.Assert(object.cacheControl, chk.Equals, headers.CacheControl)
seenContentType = true
return nil
}
err = traverser.Traverse(noPreProccessor, processor, nil)
c.Assert(err, chk.IsNil)
c.Assert(seenContentType, chk.Equals, true)
// Then, test against the object itself because that's a different codepath.
seenContentType = false
gcpObjectURL := scenarioHelper{}.getRawGCPObjectURL(c, bucketName, objectName)
traverser, err = newGCPTraverser(&gcpObjectURL, ctx, false, true, func(common.EntityType) {})
c.Assert(err, chk.IsNil)
err = traverser.Traverse(noPreProccessor, processor, nil)
c.Assert(err, chk.IsNil)
c.Assert(seenContentType, chk.Equals, true)
}
// Test follow symlink functionality
func (s *genericTraverserSuite) TestWalkWithSymlinks_ToFolder(c *chk.C) {
fileNames := []string{"March 20th is international happiness day.txt", "wonderwall but it goes on and on and on.mp3", "bonzi buddy.exe"}
tmpDir := scenarioHelper{}.generateLocalDirectory(c)
defer os.RemoveAll(tmpDir)
symlinkTmpDir := scenarioHelper{}.generateLocalDirectory(c)
defer os.RemoveAll(symlinkTmpDir)
c.Assert(tmpDir, chk.Not(chk.Equals), symlinkTmpDir)
scenarioHelper{}.generateLocalFilesFromList(c, tmpDir, fileNames)
scenarioHelper{}.generateLocalFilesFromList(c, symlinkTmpDir, fileNames)
dirLinkName := "so long and thanks for all the fish"
time.Sleep(2 * time.Second) // to be sure to get different LMT for link, compared to root, so we can make assertions later about whose fileInfo we get
trySymlink(symlinkTmpDir, filepath.Join(tmpDir, dirLinkName), c)
fileCount := 0
sawLinkTargetDir := false
c.Assert(WalkWithSymlinks(tmpDir, func(path string, fi os.FileInfo, err error) error {
c.Assert(err, chk.IsNil)
if fi.IsDir() {
if fi.Name() == dirLinkName {
sawLinkTargetDir = true
s, _ := os.Stat(symlinkTmpDir)
c.Assert(fi.ModTime().UTC(), chk.Equals, s.ModTime().UTC())
}
return nil
}
fileCount++
return nil
},
true), chk.IsNil)
// 3 files live in base, 3 files live in symlink
c.Assert(fileCount, chk.Equals, 6)
c.Assert(sawLinkTargetDir, chk.Equals, true)
}
// Next test is temporarily disabled, to avoid changing functionality near 10.4 release date
/*
// symlinks are not just to folders. They may be to individual files
func (s *genericTraverserSuite) TestWalkWithSymlinks_ToFile(c *chk.C) {
mainDirFilenames := []string{"iAmANormalFile.txt"}
symlinkTargetFilenames := []string{"iAmASymlinkTargetFile.txt"}
tmpDir := scenarioHelper{}.generateLocalDirectory(c)
defer os.RemoveAll(tmpDir)
symlinkTmpDir := scenarioHelper{}.generateLocalDirectory(c)
defer os.RemoveAll(symlinkTmpDir)
c.Assert(tmpDir, chk.Not(chk.Equals), symlinkTmpDir)
scenarioHelper{}.generateLocalFilesFromList(c, tmpDir, mainDirFilenames)
scenarioHelper{}.generateLocalFilesFromList(c, symlinkTmpDir, symlinkTargetFilenames)
trySymlink(filepath.Join(symlinkTmpDir, symlinkTargetFilenames[0]), filepath.Join(tmpDir, "iPointToTheSymlink"), c)
trySymlink(filepath.Join(symlinkTmpDir, symlinkTargetFilenames[0]), filepath.Join(tmpDir, "iPointToTheSameSymlink"), c)
fileCount := 0
c.Assert(WalkWithSymlinks(tmpDir, func(path string, fi os.FileInfo, err error) error {
c.Assert(err, chk.IsNil)
if fi.IsDir() {
return nil
}
fileCount++
if fi.Name() != "iAmANormalFile.txt" {
c.Assert(strings.HasPrefix(path, tmpDir), chk.Equals, true) // the file appears to have the location of the symlink source (not the dest)
c.Assert(strings.HasPrefix(filepath.Base(path), "iPoint"), chk.Equals, true) // the file appears to have the name of the symlink source (not the dest)
c.Assert(strings.HasPrefix(fi.Name(), "iPoint"), chk.Equals, true) // and it still appears to have that name when we look it the fileInfo
}
return nil
},
true), chk.IsNil)
// 1 file is in base, 2 are pointed to by a symlink (the fact that both point to the same file is does NOT prevent us
// processing them both. For efficiency of dedupe algorithm, we only dedupe directories, not files).
c.Assert(fileCount, chk.Equals, 3)
}
*/
// Test cancel symlink loop functionality
func (s *genericTraverserSuite) TestWalkWithSymlinksBreakLoop(c *chk.C) {
fileNames := []string{"stonks.txt", "jaws but its a baby shark.mp3", "my crow soft.txt"}
tmpDir := scenarioHelper{}.generateLocalDirectory(c)
defer os.RemoveAll(tmpDir)
scenarioHelper{}.generateLocalFilesFromList(c, tmpDir, fileNames)
trySymlink(tmpDir, filepath.Join(tmpDir, "spinloop"), c)
// Only 3 files should ever be found.
// This is because the symlink links back to the root dir
fileCount := 0
c.Assert(WalkWithSymlinks(tmpDir, func(path string, fi os.FileInfo, err error) error {
c.Assert(err, chk.IsNil)
if fi.IsDir() {
return nil
}
fileCount++
return nil
},
true), chk.IsNil)
c.Assert(fileCount, chk.Equals, 3)
}
// Test ability to dedupe within the same directory
func (s *genericTraverserSuite) TestWalkWithSymlinksDedupe(c *chk.C) {
fileNames := []string{"stonks.txt", "jaws but its a baby shark.mp3", "my crow soft.txt"}
tmpDir := scenarioHelper{}.generateLocalDirectory(c)
defer os.RemoveAll(tmpDir)
symlinkTmpDir, err := ioutil.TempDir(tmpDir, "subdir")
c.Assert(err, chk.IsNil)
scenarioHelper{}.generateLocalFilesFromList(c, tmpDir, fileNames)
scenarioHelper{}.generateLocalFilesFromList(c, symlinkTmpDir, fileNames)
trySymlink(symlinkTmpDir, filepath.Join(tmpDir, "symlinkdir"), c)
// Only 6 files should ever be found.
// 3 in the root dir, 3 in subdir, then symlinkdir should be ignored because it's been seen.
fileCount := 0
c.Assert(WalkWithSymlinks(tmpDir, func(path string, fi os.FileInfo, err error) error {
c.Assert(err, chk.IsNil)
if fi.IsDir() {
return nil
}
fileCount++
return nil
},
true), chk.IsNil)
c.Assert(fileCount, chk.Equals, 6)
}
// Test ability to only get the output of one symlink when two point to the same place
func (s *genericTraverserSuite) TestWalkWithSymlinksMultitarget(c *chk.C) {
fileNames := []string{"March 20th is international happiness day.txt", "wonderwall but it goes on and on and on.mp3", "bonzi buddy.exe"}
tmpDir := scenarioHelper{}.generateLocalDirectory(c)
defer os.RemoveAll(tmpDir)
symlinkTmpDir := scenarioHelper{}.generateLocalDirectory(c)
defer os.RemoveAll(symlinkTmpDir)
c.Assert(tmpDir, chk.Not(chk.Equals), symlinkTmpDir)
scenarioHelper{}.generateLocalFilesFromList(c, tmpDir, fileNames)
scenarioHelper{}.generateLocalFilesFromList(c, symlinkTmpDir, fileNames)
trySymlink(symlinkTmpDir, filepath.Join(tmpDir, "so long and thanks for all the fish"), c)
trySymlink(symlinkTmpDir, filepath.Join(tmpDir, "extradir"), c)
trySymlink(filepath.Join(tmpDir, "extradir"), filepath.Join(tmpDir, "linktolink"), c)
fileCount := 0
c.Assert(WalkWithSymlinks(tmpDir, func(path string, fi os.FileInfo, err error) error {
c.Assert(err, chk.IsNil)
if fi.IsDir() {
return nil
}
fileCount++
return nil
},
true), chk.IsNil)
// 3 files live in base, 3 files live in first symlink, second & third symlink is ignored.
c.Assert(fileCount, chk.Equals, 6)
}
func (s *genericTraverserSuite) TestWalkWithSymlinksToParentAndChild(c *chk.C) {
fileNames := []string{"file1.txt", "file2.txt", "file3.txt"}
root1 := scenarioHelper{}.generateLocalDirectory(c)
defer os.RemoveAll(root1)
root2 := scenarioHelper{}.generateLocalDirectory(c)
defer os.RemoveAll(root2)
child, err := ioutil.TempDir(root2, "childdir")
c.Assert(err, chk.IsNil)
scenarioHelper{}.generateLocalFilesFromList(c, root2, fileNames)
scenarioHelper{}.generateLocalFilesFromList(c, child, fileNames)
trySymlink(root2, filepath.Join(root1, "toroot"), c)
trySymlink(child, filepath.Join(root1, "tochild"), c)
fileCount := 0
c.Assert(WalkWithSymlinks(root1, func(path string, fi os.FileInfo, err error) error {
c.Assert(err, chk.IsNil)
if fi.IsDir() {
return nil
}
fileCount++
return nil
},
true), chk.IsNil)
// 6 files total live under toroot. tochild should be ignored (or if tochild was traversed first, child will be ignored on toroot).
c.Assert(fileCount, chk.Equals, 6)
}
// validate traversing a single Blob, a single Azure File, and a single local file
// compare that the traversers get consistent results
func (s *genericTraverserSuite) TestTraverserWithSingleObject(c *chk.C) {
bsu := getBSU()
containerURL, containerName := createNewContainer(c, bsu)
defer deleteContainer(c, containerURL)
fsu := getFSU()
shareURL, shareName := createNewAzureShare(c, fsu)
defer deleteShare(c, shareURL)
bfsu := GetBFSSU()
filesystemURL, _ := createNewFilesystem(c, bfsu)
defer deleteFilesystem(c, filesystemURL)
s3Client, err := createS3ClientWithMinio(createS3ResOptions{})
s3Enabled := err == nil && !isS3Disabled()
gcpClient, err := createGCPClientWithGCSSDK()
gcpEnabled := err == nil && gcpTestsDisabled()
var bucketName string
var bucketNameGCP string
if s3Enabled {
bucketName = createNewBucket(c, s3Client, createS3ResOptions{})
defer deleteBucket(c, s3Client, bucketName, true)
}
if gcpEnabled {
bucketNameGCP = createNewGCPBucket(c, gcpClient)
defer deleteGCPBucket(c, gcpClient, bucketNameGCP, true)
}
// test two scenarios, either blob is at the root virtual dir, or inside sub virtual dirs
for _, storedObjectName := range []string{"sub1/sub2/singleblobisbest", "nosubsingleblob", "满汉全席.txt"} {
// set up the container with a single blob
blobList := []string{storedObjectName}
scenarioHelper{}.generateBlobsFromList(c, containerURL, blobList, blockBlobDefaultData)
// set up the directory as a single file
dstDirName := scenarioHelper{}.generateLocalDirectory(c)
defer os.RemoveAll(dstDirName)
dstFileName := storedObjectName
scenarioHelper{}.generateLocalFilesFromList(c, dstDirName, blobList)
// construct a local traverser
localTraverser := newLocalTraverser(filepath.Join(dstDirName, dstFileName), false, false, func(common.EntityType) {})
// invoke the local traversal with a dummy processor
localDummyProcessor := dummyProcessor{}
err := localTraverser.Traverse(noPreProccessor, localDummyProcessor.process, nil)
c.Assert(err, chk.IsNil)
c.Assert(len(localDummyProcessor.record), chk.Equals, 1)
// construct a blob traverser
ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion)
p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{})
rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, blobList[0])
blobTraverser := newBlobTraverser(&rawBlobURLWithSAS, p, ctx, false, false,
func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false)
// invoke the blob traversal with a dummy processor
blobDummyProcessor := dummyProcessor{}
err = blobTraverser.Traverse(noPreProccessor, blobDummyProcessor.process, nil)
c.Assert(err, chk.IsNil)
c.Assert(len(blobDummyProcessor.record), chk.Equals, 1)
// assert the important info are correct
c.Assert(localDummyProcessor.record[0].name, chk.Equals, blobDummyProcessor.record[0].name)
c.Assert(localDummyProcessor.record[0].relativePath, chk.Equals, blobDummyProcessor.record[0].relativePath)
// Azure File cannot handle names with '/' in them
// TODO: Construct a directory URL and then build a file URL atop it in order to solve this portion of the test.
// We shouldn't be excluding things the traverser is actually capable of doing.
// Fix within scenarioHelper.generateAzureFilesFromList, since that's what causes the fail.
if !strings.Contains(storedObjectName, "/") {
// set up the Azure Share with a single file
fileList := []string{storedObjectName}
scenarioHelper{}.generateAzureFilesFromList(c, shareURL, fileList)
// construct an Azure file traverser
filePipeline := azfile.NewPipeline(azfile.NewAnonymousCredential(), azfile.PipelineOptions{})
rawFileURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(c, shareName, fileList[0])
azureFileTraverser := newFileTraverser(&rawFileURLWithSAS, filePipeline, ctx, false, false, func(common.EntityType) {})
// invoke the file traversal with a dummy processor
fileDummyProcessor := dummyProcessor{}
err = azureFileTraverser.Traverse(noPreProccessor, fileDummyProcessor.process, nil)
c.Assert(err, chk.IsNil)
c.Assert(len(fileDummyProcessor.record), chk.Equals, 1)
c.Assert(localDummyProcessor.record[0].relativePath, chk.Equals, fileDummyProcessor.record[0].relativePath)
c.Assert(localDummyProcessor.record[0].name, chk.Equals, fileDummyProcessor.record[0].name)
}
// set up the filesystem with a single file
bfsList := []string{storedObjectName}
scenarioHelper{}.generateBFSPathsFromList(c, filesystemURL, bfsList)
// construct a BlobFS traverser
accountName, accountKey := getAccountAndKey()
bfsPipeline := azbfs.NewPipeline(azbfs.NewSharedKeyCredential(accountName, accountKey), azbfs.PipelineOptions{})
rawFileURL := filesystemURL.NewRootDirectoryURL().NewFileURL(bfsList[0]).URL()
bfsTraverser := newBlobFSTraverser(&rawFileURL, bfsPipeline, ctx, false, func(common.EntityType) {})
// Construct and run a dummy processor for bfs
bfsDummyProcessor := dummyProcessor{}
err = bfsTraverser.Traverse(noPreProccessor, bfsDummyProcessor.process, nil)
c.Assert(err, chk.IsNil)
c.Assert(len(bfsDummyProcessor.record), chk.Equals, 1)
c.Assert(localDummyProcessor.record[0].relativePath, chk.Equals, bfsDummyProcessor.record[0].relativePath)
c.Assert(localDummyProcessor.record[0].name, chk.Equals, bfsDummyProcessor.record[0].name)
if s3Enabled {
// set up the bucket with a single file
s3List := []string{storedObjectName}
scenarioHelper{}.generateObjects(c, s3Client, bucketName, s3List)
// construct a s3 traverser
s3DummyProcessor := dummyProcessor{}
url := scenarioHelper{}.getRawS3ObjectURL(c, "", bucketName, storedObjectName)
credentialInfo := common.CredentialInfo{CredentialType: common.ECredentialType.S3AccessKey()}
S3Traverser, err := newS3Traverser(credentialInfo.CredentialType, &url, ctx, false, false, func(common.EntityType) {})
c.Assert(err, chk.IsNil)
err = S3Traverser.Traverse(noPreProccessor, s3DummyProcessor.process, nil)
c.Assert(err, chk.IsNil)
c.Assert(len(s3DummyProcessor.record), chk.Equals, 1)
c.Assert(localDummyProcessor.record[0].relativePath, chk.Equals, s3DummyProcessor.record[0].relativePath)
c.Assert(localDummyProcessor.record[0].name, chk.Equals, s3DummyProcessor.record[0].name)
}
if gcpEnabled {
gcpList := []string{storedObjectName}
scenarioHelper{}.generateGCPObjects(c, gcpClient, bucketNameGCP, gcpList)
gcpDummyProcessor := dummyProcessor{}
gcpURL := scenarioHelper{}.getRawGCPObjectURL(c, bucketNameGCP, storedObjectName)
GCPTraverser, err := newGCPTraverser(&gcpURL, ctx, false, false, func(entityType common.EntityType) {})
c.Assert(err, chk.IsNil)
err = GCPTraverser.Traverse(noPreProccessor, gcpDummyProcessor.process, nil)
c.Assert(err, chk.IsNil)
c.Assert(len(gcpDummyProcessor.record), chk.Equals, 1)
c.Assert(localDummyProcessor.record[0].relativePath, chk.Equals, gcpDummyProcessor.record[0].relativePath)
c.Assert(localDummyProcessor.record[0].name, chk.Equals, gcpDummyProcessor.record[0].name)
}
}
}
// validate traversing a container, a share, and a local directory containing the same objects
// compare that traversers get consistent results
func (s *genericTraverserSuite) TestTraverserContainerAndLocalDirectory(c *chk.C) {
bsu := getBSU()
containerURL, containerName := createNewContainer(c, bsu)
defer deleteContainer(c, containerURL)
fsu := getFSU()
shareURL, shareName := createNewAzureShare(c, fsu)
defer deleteShare(c, shareURL)
bfsu := GetBFSSU()
filesystemURL, _ := createNewFilesystem(c, bfsu)
defer deleteFilesystem(c, filesystemURL)
s3Client, err := createS3ClientWithMinio(createS3ResOptions{})
s3Enabled := err == nil && !isS3Disabled() // are creds supplied, and is S3 enabled
gcpClient, err := createGCPClientWithGCSSDK()
gcpEnabled := err == nil && !gcpTestsDisabled()
var bucketName string
var bucketNameGCP string
if s3Enabled {
bucketName = createNewBucket(c, s3Client, createS3ResOptions{})
defer deleteBucket(c, s3Client, bucketName, true)
}
if gcpEnabled {
bucketNameGCP = createNewGCPBucket(c, gcpClient)
defer deleteGCPBucket(c, gcpClient, bucketNameGCP, true)
}
// set up the container with numerous blobs
fileList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerURL, "")
c.Assert(containerURL, chk.NotNil)
// set up an Azure File Share with the same files
scenarioHelper{}.generateAzureFilesFromList(c, shareURL, fileList)
// set up a filesystem with the same files
scenarioHelper{}.generateBFSPathsFromList(c, filesystemURL, fileList)
if s3Enabled {
// set up a bucket with the same files
scenarioHelper{}.generateObjects(c, s3Client, bucketName, fileList)
}
if gcpEnabled {
scenarioHelper{}.generateGCPObjects(c, gcpClient, bucketNameGCP, fileList)
}
dstDirName := scenarioHelper{}.generateLocalDirectory(c)
defer os.RemoveAll(dstDirName)
scenarioHelper{}.generateLocalFilesFromList(c, dstDirName, fileList)
// test two scenarios, either recursive or not
for _, isRecursiveOn := range []bool{true, false} {
// construct a local traverser
localTraverser := newLocalTraverser(dstDirName, isRecursiveOn, false, func(common.EntityType) {})
// invoke the local traversal with an indexer
// so that the results are indexed for easy validation
localIndexer := newObjectIndexer()
err := localTraverser.Traverse(noPreProccessor, localIndexer.store, nil)
c.Assert(err, chk.IsNil)
// construct a blob traverser
ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion)
p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{})
rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName)
blobTraverser := newBlobTraverser(&rawContainerURLWithSAS, p, ctx, isRecursiveOn, false,
func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false)
// invoke the local traversal with a dummy processor
blobDummyProcessor := dummyProcessor{}
err = blobTraverser.Traverse(noPreProccessor, blobDummyProcessor.process, nil)
c.Assert(err, chk.IsNil)
// construct an Azure File traverser
filePipeline := azfile.NewPipeline(azfile.NewAnonymousCredential(), azfile.PipelineOptions{})
rawFileURLWithSAS := scenarioHelper{}.getRawShareURLWithSAS(c, shareName)
azureFileTraverser := newFileTraverser(&rawFileURLWithSAS, filePipeline, ctx, isRecursiveOn, false, func(common.EntityType) {})
// invoke the file traversal with a dummy processor
fileDummyProcessor := dummyProcessor{}
err = azureFileTraverser.Traverse(noPreProccessor, fileDummyProcessor.process, nil)
c.Assert(err, chk.IsNil)
// construct a directory URL and pipeline
accountName, accountKey := getAccountAndKey()
bfsPipeline := azbfs.NewPipeline(azbfs.NewSharedKeyCredential(accountName, accountKey), azbfs.PipelineOptions{})
rawFilesystemURL := filesystemURL.NewRootDirectoryURL().URL()
// construct and run a FS traverser
bfsTraverser := newBlobFSTraverser(&rawFilesystemURL, bfsPipeline, ctx, isRecursiveOn, func(common.EntityType) {})
bfsDummyProcessor := dummyProcessor{}
err = bfsTraverser.Traverse(noPreProccessor, bfsDummyProcessor.process, nil)
c.Assert(err, chk.IsNil)
s3DummyProcessor := dummyProcessor{}
gcpDummyProcessor := dummyProcessor{}
if s3Enabled {
// construct and run a S3 traverser
rawS3URL := scenarioHelper{}.getRawS3BucketURL(c, "", bucketName)
credentialInfo := common.CredentialInfo{CredentialType: common.ECredentialType.S3AccessKey()}
S3Traverser, err := newS3Traverser(credentialInfo.CredentialType, &rawS3URL, ctx, isRecursiveOn, false, func(common.EntityType) {})
c.Assert(err, chk.IsNil)
err = S3Traverser.Traverse(noPreProccessor, s3DummyProcessor.process, nil)
c.Assert(err, chk.IsNil)
}
if gcpEnabled {
rawGCPURL := scenarioHelper{}.getRawGCPBucketURL(c, bucketNameGCP)
GCPTraverser, err := newGCPTraverser(&rawGCPURL, ctx, isRecursiveOn, false, func(entityType common.EntityType) {})
c.Assert(err, chk.IsNil)
err = GCPTraverser.Traverse(noPreProccessor, gcpDummyProcessor.process, nil)
c.Assert(err, chk.IsNil)
}
// make sure the results are as expected
localTotalCount := len(localIndexer.indexMap)
localFileOnlyCount := 0
for _, x := range localIndexer.indexMap {
if x.entityType == common.EEntityType.File() {
localFileOnlyCount++
}
}
c.Assert(len(blobDummyProcessor.record), chk.Equals, localFileOnlyCount)
if isRecursiveOn {
c.Assert(len(fileDummyProcessor.record), chk.Equals, localTotalCount)
c.Assert(len(bfsDummyProcessor.record), chk.Equals, localTotalCount)
} else {
// in real usage, folders get stripped out in ToNewCopyTransfer when non-recursive,
// but that doesn't run here in this test,
// so we have to count files only on the processor
c.Assert(fileDummyProcessor.countFilesOnly(), chk.Equals, localTotalCount)
c.Assert(bfsDummyProcessor.countFilesOnly(), chk.Equals, localTotalCount)
}
if s3Enabled {
c.Assert(len(s3DummyProcessor.record), chk.Equals, localFileOnlyCount)
}
if gcpEnabled {
c.Assert(len(gcpDummyProcessor.record), chk.Equals, localFileOnlyCount)
}
// if s3dummyprocessor is empty, it's A-OK because no records will be tested
for _, storedObject := range append(append(append(append(blobDummyProcessor.record, fileDummyProcessor.record...), bfsDummyProcessor.record...), s3DummyProcessor.record...), gcpDummyProcessor.record...) {
if isRecursiveOn || storedObject.entityType == common.EEntityType.File() { // folder enumeration knowingly NOT consistent when non-recursive (since the folders get stripped out by ToNewCopyTransfer when non-recursive anyway)
correspondingLocalFile, present := localIndexer.indexMap[storedObject.relativePath]
c.Assert(present, chk.Equals, true)
c.Assert(correspondingLocalFile.name, chk.Equals, storedObject.name)
if !isRecursiveOn {
c.Assert(strings.Contains(storedObject.relativePath, common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false)
}
}
}
}
}
// validate traversing a virtual and a local directory containing the same objects
// compare that blob and local traversers get consistent results
func (s *genericTraverserSuite) TestTraverserWithVirtualAndLocalDirectory(c *chk.C) {
bsu := getBSU()
containerURL, containerName := createNewContainer(c, bsu)
defer deleteContainer(c, containerURL)
fsu := getFSU()
shareURL, shareName := createNewAzureShare(c, fsu)
defer deleteShare(c, shareURL)
bfsu := GetBFSSU()
filesystemURL, _ := createNewFilesystem(c, bfsu)
defer deleteFilesystem(c, filesystemURL)
s3Client, err := createS3ClientWithMinio(createS3ResOptions{})
s3Enabled := err == nil && !isS3Disabled()
gcpClient, err := createGCPClientWithGCSSDK()
gcpEnabled := err == nil && !gcpTestsDisabled()
var bucketName, bucketNameGCP string
if s3Enabled {
bucketName = createNewBucket(c, s3Client, createS3ResOptions{})
defer deleteBucket(c, s3Client, bucketName, true)
}
if gcpEnabled {
bucketNameGCP = createNewGCPBucket(c, gcpClient)
defer deleteGCPBucket(c, gcpClient, bucketNameGCP, true)
}
// set up the container with numerous blobs
virDirName := "virdir"
fileList := scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerURL, virDirName+"/")
c.Assert(containerURL, chk.NotNil)
// set up an Azure File Share with the same files
scenarioHelper{}.generateAzureFilesFromList(c, shareURL, fileList)
// set up the filesystem with the same files
scenarioHelper{}.generateBFSPathsFromList(c, filesystemURL, fileList)
if s3Enabled {
// Set up the bucket with the same files
scenarioHelper{}.generateObjects(c, s3Client, bucketName, fileList)
}
if gcpEnabled {
scenarioHelper{}.generateGCPObjects(c, gcpClient, bucketNameGCP, fileList)
}
time.Sleep(time.Second * 2) // Ensure the objects' LMTs are in the past
// set up the destination with a folder that have the exact same files
dstDirName := scenarioHelper{}.generateLocalDirectory(c)
defer os.RemoveAll(dstDirName)
scenarioHelper{}.generateLocalFilesFromList(c, dstDirName, fileList)
// test two scenarios, either recursive or not
for _, isRecursiveOn := range []bool{true, false} {
// construct a local traverser
localTraverser := newLocalTraverser(filepath.Join(dstDirName, virDirName), isRecursiveOn, false, func(common.EntityType) {})
// invoke the local traversal with an indexer
// so that the results are indexed for easy validation
localIndexer := newObjectIndexer()
err := localTraverser.Traverse(noPreProccessor, localIndexer.store, nil)
c.Assert(err, chk.IsNil)
// construct a blob traverser
ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion)
p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{})
rawVirDirURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, virDirName)
blobTraverser := newBlobTraverser(&rawVirDirURLWithSAS, p, ctx, isRecursiveOn, false,
func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false)
// invoke the local traversal with a dummy processor
blobDummyProcessor := dummyProcessor{}
err = blobTraverser.Traverse(noPreProccessor, blobDummyProcessor.process, nil)
c.Assert(err, chk.IsNil)
// construct an Azure File traverser
filePipeline := azfile.NewPipeline(azfile.NewAnonymousCredential(), azfile.PipelineOptions{})
rawFileURLWithSAS := scenarioHelper{}.getRawFileURLWithSAS(c, shareName, virDirName)
azureFileTraverser := newFileTraverser(&rawFileURLWithSAS, filePipeline, ctx, isRecursiveOn, false, func(common.EntityType) {})
// invoke the file traversal with a dummy processor
fileDummyProcessor := dummyProcessor{}
err = azureFileTraverser.Traverse(noPreProccessor, fileDummyProcessor.process, nil)
c.Assert(err, chk.IsNil)
// construct a filesystem URL & pipeline
accountName, accountKey := getAccountAndKey()
bfsPipeline := azbfs.NewPipeline(azbfs.NewSharedKeyCredential(accountName, accountKey), azbfs.PipelineOptions{})
rawFilesystemURL := filesystemURL.NewRootDirectoryURL().NewDirectoryURL(virDirName).URL()
// construct and run a FS traverser
bfsTraverser := newBlobFSTraverser(&rawFilesystemURL, bfsPipeline, ctx, isRecursiveOn, func(common.EntityType) {})
bfsDummyProcessor := dummyProcessor{}
err = bfsTraverser.Traverse(noPreProccessor, bfsDummyProcessor.process, nil)
localTotalCount := len(localIndexer.indexMap)
localFileOnlyCount := 0
for _, x := range localIndexer.indexMap {
if x.entityType == common.EEntityType.File() {
localFileOnlyCount++
}
}
s3DummyProcessor := dummyProcessor{}
gcpDummyProcessor := dummyProcessor{}
if s3Enabled {
// construct and run a S3 traverser
// directory object keys always end with / in S3
rawS3URL := scenarioHelper{}.getRawS3ObjectURL(c, "", bucketName, virDirName+"/")
credentialInfo := common.CredentialInfo{CredentialType: common.ECredentialType.S3AccessKey()}
S3Traverser, err := newS3Traverser(credentialInfo.CredentialType, &rawS3URL, ctx, isRecursiveOn, false, func(common.EntityType) {})
c.Assert(err, chk.IsNil)
err = S3Traverser.Traverse(noPreProccessor, s3DummyProcessor.process, nil)
c.Assert(err, chk.IsNil)
// check that the results are the same length
c.Assert(len(s3DummyProcessor.record), chk.Equals, localFileOnlyCount)
}
if gcpEnabled {
rawGCPURL := scenarioHelper{}.getRawGCPObjectURL(c, bucketNameGCP, virDirName+"/")
GCPTraverser, err := newGCPTraverser(&rawGCPURL, ctx, isRecursiveOn, false, func(common.EntityType) {})
c.Assert(err, chk.IsNil)
err = GCPTraverser.Traverse(noPreProccessor, gcpDummyProcessor.process, nil)
c.Assert(err, chk.IsNil)
c.Assert(len(gcpDummyProcessor.record), chk.Equals, localFileOnlyCount)
}
// make sure the results are as expected
c.Assert(len(blobDummyProcessor.record), chk.Equals, localFileOnlyCount)
if isRecursiveOn {
c.Assert(len(fileDummyProcessor.record), chk.Equals, localTotalCount)
c.Assert(len(bfsDummyProcessor.record), chk.Equals, localTotalCount)
} else {
// only files matter when not recursive (since ToNewCopyTransfer strips out everything else when non-recursive)
c.Assert(fileDummyProcessor.countFilesOnly(), chk.Equals, localTotalCount)
c.Assert(bfsDummyProcessor.countFilesOnly(), chk.Equals, localTotalCount)
}
// if s3 testing is disabled the s3 dummy processors' records will be empty. This is OK for appending. Nothing will happen.
for _, storedObject := range append(append(append(append(blobDummyProcessor.record, fileDummyProcessor.record...), bfsDummyProcessor.record...), s3DummyProcessor.record...), gcpDummyProcessor.record...) {
if isRecursiveOn || storedObject.entityType == common.EEntityType.File() { // folder enumeration knowingly NOT consistent when non-recursive (since the folders get stripped out by ToNewCopyTransfer when non-recursive anyway)
correspondingLocalFile, present := localIndexer.indexMap[storedObject.relativePath]
c.Assert(present, chk.Equals, true)
c.Assert(correspondingLocalFile.name, chk.Equals, storedObject.name)
// Say, here's a good question, why do we have this last check?
// None of the other tests have it.
c.Assert(correspondingLocalFile.isMoreRecentThan(storedObject), chk.Equals, true)
if !isRecursiveOn {
c.Assert(strings.Contains(storedObject.relativePath, common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false)
}
}
}
}
}
// validate traversing a virtual directory containing the same objects
// compare that the serial and parallel blob traversers get consistent results
func (s *genericTraverserSuite) TestSerialAndParallelBlobTraverser(c *chk.C) {
bsu := getBSU()
containerURL, containerName := createNewContainer(c, bsu)
defer deleteContainer(c, containerURL)
// set up the container with numerous blobs
virDirName := "virdir"
scenarioHelper{}.generateCommonRemoteScenarioForBlob(c, containerURL, virDirName+"/")
c.Assert(containerURL, chk.NotNil)
// test two scenarios, either recursive or not
for _, isRecursiveOn := range []bool{true, false} {
// construct a parallel blob traverser
ctx := context.WithValue(context.TODO(), ste.ServiceAPIVersionOverride, ste.DefaultServiceApiVersion)
p := azblob.NewPipeline(azblob.NewAnonymousCredential(), azblob.PipelineOptions{})
rawVirDirURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, virDirName)
parallelBlobTraverser := newBlobTraverser(&rawVirDirURLWithSAS, p, ctx, isRecursiveOn, false,
func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false)
// construct a serial blob traverser
serialBlobTraverser := newBlobTraverser(&rawVirDirURLWithSAS, p, ctx, isRecursiveOn, false,
func(common.EntityType) {}, false, common.CpkOptions{}, false, false, false)
serialBlobTraverser.parallelListing = false
// invoke the parallel traversal with a dummy processor
parallelDummyProcessor := dummyProcessor{}
err := parallelBlobTraverser.Traverse(noPreProccessor, parallelDummyProcessor.process, nil)
c.Assert(err, chk.IsNil)
// invoke the serial traversal with a dummy processor
serialDummyProcessor := dummyProcessor{}
err = parallelBlobTraverser.Traverse(noPreProccessor, serialDummyProcessor.process, nil)
c.Assert(err, chk.IsNil)
// make sure the results are as expected
c.Assert(len(parallelDummyProcessor.record), chk.Equals, len(serialDummyProcessor.record))
// compare the entries one by one
lookupMap := make(map[string]StoredObject)
for _, entry := range parallelDummyProcessor.record {
lookupMap[entry.relativePath] = entry
}
for _, storedObject := range serialDummyProcessor.record {
correspondingFile, present := lookupMap[storedObject.relativePath]
c.Assert(present, chk.Equals, true)
c.Assert(storedObject.lastModifiedTime, chk.DeepEquals, correspondingFile.lastModifiedTime)
c.Assert(storedObject.md5, chk.DeepEquals, correspondingFile.md5)
}
}
}
| [
"\"GOOGLE_CLOUD_PROJECT\""
]
| []
| [
"GOOGLE_CLOUD_PROJECT"
]
| [] | ["GOOGLE_CLOUD_PROJECT"] | go | 1 | 0 | |
gqt/runner/runner.go | package runner
import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"os/exec"
"path/filepath"
"reflect"
"runtime"
"strings"
"syscall"
"time"
"code.cloudfoundry.org/garden"
"code.cloudfoundry.org/garden/client"
"code.cloudfoundry.org/garden/client/connection"
"code.cloudfoundry.org/guardian/gqt/cgrouper"
"code.cloudfoundry.org/lager"
"code.cloudfoundry.org/lager/lagertest"
multierror "github.com/hashicorp/go-multierror"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
"github.com/tedsuo/ifrit"
"github.com/tedsuo/ifrit/ginkgomon"
)
type GdnRunnerConfig struct {
TmpDir string
User UserCredential
ConfigFilePath string
Socket2meBin string
Socket2meSocketPath string
RuncRoot string
// Garden config
GdnBin string
GrootBin string
TarBin string `flag:"tar-bin"`
InitBin string `flag:"init-bin"`
RuntimePluginBin string `flag:"runtime-plugin"`
ImagePluginBin string `flag:"image-plugin"`
PrivilegedImagePluginBin string `flag:"privileged-image-plugin"`
NetworkPluginBin string `flag:"network-plugin"`
ExecRunnerBin string `flag:"dadoo-bin"`
NSTarBin string `flag:"nstar-bin"`
DefaultRootFS string `flag:"default-rootfs"`
DepotDir string `flag:"depot"`
ConsoleSocketsPath string `flag:"console-sockets-path"`
BindIP string `flag:"bind-ip"`
BindPort *int `flag:"bind-port"`
BindSocket string `flag:"bind-socket"`
DenyNetworks []string `flag:"deny-network"`
DefaultBlkioWeight *uint64 `flag:"default-container-blockio-weight"`
NetworkPluginExtraArgs []string `flag:"network-plugin-extra-arg"`
ImagePluginExtraArgs []string `flag:"image-plugin-extra-arg"`
RuntimePluginExtraArgs []string `flag:"runtime-plugin-extra-arg"`
PrivilegedImagePluginExtraArgs []string `flag:"privileged-image-plugin-extra-arg"`
MaxContainers *uint64 `flag:"max-containers"`
DebugIP string `flag:"debug-bind-ip"`
DebugPort *int `flag:"debug-bind-port"`
PropertiesPath string `flag:"properties-path"`
LogLevel string `flag:"log-level"`
TCPMemoryLimit *uint64 `flag:"tcp-memory-limit"`
CPUQuotaPerShare *uint64 `flag:"cpu-quota-per-share"`
IPTablesBin string `flag:"iptables-bin"`
IPTablesRestoreBin string `flag:"iptables-restore-bin"`
DNSServers []string `flag:"dns-server"`
AdditionalDNSServers []string `flag:"additional-dns-server"`
AdditionalHostEntries []string `flag:"additional-host-entry"`
MTU *int `flag:"mtu"`
PortPoolSize *int `flag:"port-pool-size"`
PortPoolStart *int `flag:"port-pool-start"`
PortPoolPropertiesPath string `flag:"port-pool-properties-path"`
DestroyContainersOnStartup *bool `flag:"destroy-containers-on-startup"`
DockerRegistry string `flag:"docker-registry"`
InsecureDockerRegistry string `flag:"insecure-docker-registry"`
AllowHostAccess *bool `flag:"allow-host-access"`
SkipSetup *bool `flag:"skip-setup"`
UIDMapStart *uint32 `flag:"uid-map-start"`
UIDMapLength *uint32 `flag:"uid-map-length"`
GIDMapStart *uint32 `flag:"gid-map-start"`
GIDMapLength *uint32 `flag:"gid-map-length"`
CleanupProcessDirsOnWait *bool `flag:"cleanup-process-dirs-on-wait"`
DisablePrivilegedContainers *bool `flag:"disable-privileged-containers"`
AppArmor string `flag:"apparmor"`
Tag string `flag:"tag"`
NetworkPool string `flag:"network-pool"`
ContainerdSocket string `flag:"containerd-socket"`
UseContainerdForProcesses *bool `flag:"use-containerd-for-processes"`
CPUEntitlementPerShare *float64 `flag:"cpu-entitlement-per-share"`
StartupExpectedToFail bool
StorePath string
PrivilegedStorePath string
}
func (c GdnRunnerConfig) connectionInfo() (string, string) {
if c.Socket2meSocketPath != "" {
return "unix", c.Socket2meSocketPath
}
if c.BindSocket != "" {
return "unix", c.BindSocket
}
return "tcp", fmt.Sprintf("%s:%d", c.BindIP, *c.BindPort)
}
func (c GdnRunnerConfig) toServerFlags() []string {
gardenArgs := []string{}
if c.ConfigFilePath != "" {
gardenArgs = append(gardenArgs, "--config", c.ConfigFilePath)
}
gardenArgs = append(gardenArgs, "server")
vConf := reflect.ValueOf(c)
tConf := vConf.Type()
for i := 0; i < tConf.NumField(); i++ {
tField := tConf.Field(i)
flagName, ok := tField.Tag.Lookup("flag")
if !ok {
continue
}
vField := vConf.Field(i)
if vField.Kind() != reflect.String && vField.IsNil() {
continue
}
fieldVal := reflect.Indirect(vField).Interface()
switch v := fieldVal.(type) {
case string:
if v != "" {
gardenArgs = append(gardenArgs, "--"+flagName, v)
}
if v == "" && vField.Kind() != reflect.String && !vField.IsNil() {
gardenArgs = append(gardenArgs, "--"+flagName, "")
}
case int, uint64, uint32:
gardenArgs = append(gardenArgs, "--"+flagName, fmt.Sprintf("%d", v))
case bool:
if v {
gardenArgs = append(gardenArgs, "--"+flagName)
}
case []string:
for _, val := range v {
gardenArgs = append(gardenArgs, "--"+flagName, val)
}
case float64:
gardenArgs = append(gardenArgs, "--"+flagName, fmt.Sprintf("%f", v))
default:
Fail(fmt.Sprintf("unrecognised field type for field %s", flagName))
}
}
return gardenArgs
}
type Binaries struct {
Tar string `json:"tar,omitempty"`
Gdn string `json:"gdn,omitempty"`
Groot string `json:"groot,omitempty"`
Tardis string `json:"tardis,omitempty"`
Init string `json:"init,omitempty"`
RuntimePlugin string `json:"runtime_plugin,omitempty"`
ImagePlugin string `json:"image_plugin,omitempty"`
PrivilegedImagePlugin string `json:"privileged_image_plugin,omitempty"`
NetworkPlugin string `json:"network_plugin,omitempty"`
NoopPlugin string `json:"noop_plugin,omitempty"`
ExecRunner string `json:"execrunner,omitempty"`
NSTar string `json:"nstar,omitempty"`
Socket2me string `json:"socket2me,omitempty"`
}
type GardenRunner struct {
*GdnRunnerConfig
*ginkgomon.Runner
}
func (r *GardenRunner) Setup() {
r.setupDirsForUser()
}
type RunningGarden struct {
*GardenRunner
client.Client
process ifrit.Process
Pid int
logger lager.Logger
}
func init() {
}
func DefaultGdnRunnerConfig(binaries Binaries) GdnRunnerConfig {
var config GdnRunnerConfig
config.Tag = fmt.Sprintf("%d", GinkgoParallelNode())
var err error
config.TmpDir, err = ioutil.TempDir("", fmt.Sprintf("test-garden-%s-", config.Tag))
Expect(err).NotTo(HaveOccurred())
Expect(os.Chmod(config.TmpDir, 0777)).To(Succeed())
config.ConsoleSocketsPath = filepath.Join(config.TmpDir, "console-sockets")
config.DepotDir = filepath.Join(config.TmpDir, "containers")
Expect(os.MkdirAll(config.DepotDir, 0755)).To(Succeed())
if runtime.GOOS == "windows" {
config.BindIP = "127.0.0.1"
config.BindPort = intptr(10000 + os.Getpid())
} else {
config.BindSocket = fmt.Sprintf("/tmp/garden_%s.sock", config.Tag)
}
config.NetworkPool = fmt.Sprintf("10.254.%d.0/22", 4*GinkgoParallelNode())
config.PortPoolStart = intptr(GinkgoParallelNode() * 7000)
config.UIDMapStart = uint32ptr(1)
config.UIDMapLength = uint32ptr(100000)
config.GIDMapStart = uint32ptr(1)
config.GIDMapLength = uint32ptr(100000)
config.StorePath = filepath.Join(config.TmpDir, "groot_store")
config.PrivilegedStorePath = filepath.Join(config.TmpDir, "groot_privileged_store")
config.ImagePluginExtraArgs = []string{`"--store"`, config.StorePath, `"--tardis-bin"`, binaries.Tardis, `"--log-level"`, "debug"}
config.PrivilegedImagePluginExtraArgs = []string{`"--store"`, config.PrivilegedStorePath, `"--tardis-bin"`, binaries.Tardis, `"--log-level"`, "debug"}
config.LogLevel = "debug"
return config
}
func NewGardenRunner(config GdnRunnerConfig) *GardenRunner {
runner := &GardenRunner{
GdnRunnerConfig: &config,
Runner: ginkgomon.New(ginkgomon.Config{
Name: "guardian",
AnsiColorCode: "31m",
StartCheck: "",
StartCheckTimeout: 30 * time.Second,
}),
}
if config.Socket2meSocketPath == "" {
runner.Command = exec.Command(config.GdnBin, config.toServerFlags()...)
} else {
runner.Command = socket2meCommand(config)
}
runner.Command.Env = append(
os.Environ(),
[]string{
fmt.Sprintf("TMPDIR=%s", runner.TmpDir),
fmt.Sprintf("TEMP=%s", runner.TmpDir),
fmt.Sprintf("TMP=%s", runner.TmpDir),
}...,
)
if config.RuncRoot != "" {
runner.Command.Env = append(
os.Environ(),
[]string{
fmt.Sprintf("XDG_RUNTIME_DIR=%s", config.RuncRoot),
}...,
)
}
setUserCredential(runner)
runner.Setup()
return runner
}
func Start(config GdnRunnerConfig) *RunningGarden {
if runtime.GOOS == "linux" {
initGrootStore(config.ImagePluginBin, config.StorePath, []string{"0:4294967294:1", "1:65536:4294901758"})
initGrootStore(config.PrivilegedImagePluginBin, config.PrivilegedStorePath, nil)
}
runner := NewGardenRunner(config)
gdn := &RunningGarden{
GardenRunner: runner,
logger: lagertest.NewTestLogger("garden-runner"),
}
gdn.process = ifrit.Invoke(runner)
gdn.Pid = runner.Command.Process.Pid
gdn.Client = client.New(connection.New(runner.connectionInfo()))
if !config.StartupExpectedToFail {
Eventually(gdn.Ping, time.Second*10).Should(Succeed())
}
return gdn
}
func (r *RunningGarden) Create(spec garden.ContainerSpec) (garden.Container, error) {
container, err := r.Client.Create(spec)
if err != nil {
return nil, err
}
containerPid, err := r.getContainerPid(container.Handle())
if err != nil {
return nil, err
}
fmt.Fprintf(GinkgoWriter, "GQT runner created container with id %s and pid %s", container.Handle(), containerPid)
return container, nil
}
func (r *RunningGarden) getContainerPid(handle string) (string, error) {
if isContainerd() {
return r.listProcesses(handle), nil
}
pidBytes, err := ioutil.ReadFile(filepath.Join(r.DepotDir, handle, "pidfile"))
if err != nil {
return "", err
}
return string(pidBytes), nil
}
func (r *RunningGarden) Kill() error {
r.process.Signal(syscall.SIGKILL)
select {
case err := <-r.process.Wait():
return err
case <-time.After(time.Second * 10):
r.process.Signal(syscall.SIGKILL)
return errors.New("timed out waiting for garden to shutdown after 10 seconds")
}
}
type ErrGardenStop struct {
error
}
func (r *RunningGarden) DestroyAndStop() error {
return multierror.Append(
r.DestroyContainers(),
r.forceStop(),
r.Cleanup(),
).ErrorOrNil()
}
func (r *RunningGarden) forceStop() error {
if runtime.GOOS == "windows" {
// Windows doesn't support SIGTERM
r.Kill()
} else {
if err := r.Stop(); err != nil {
fmt.Printf("error on r.Stop() during forceStop: %s\n", err.Error())
return ErrGardenStop{error: err}
}
}
if err := r.removeTempDirContentsPreservingGrootFSStores(); err != nil {
fmt.Printf("error on r.removeTempDirContentsPreservingGrootFSStore() during forceStop: %s\n", err.Error())
return err
}
return nil
}
func (r *RunningGarden) CgroupsRootPath() string {
return CgroupsRootPath(r.Tag)
}
func CgroupsRootPath(tag string) string {
return filepath.Join("/tmp", fmt.Sprintf("cgroups-%s", tag))
}
func (r *RunningGarden) CgroupSubsystemPath(subsystem, handle string) string {
gardenCgroupRelativePath, err := cgrouper.GetCGroup(subsystem)
Expect(err).NotTo(HaveOccurred())
return filepath.Join(CgroupsRootPath(r.Tag), subsystem, gardenCgroupRelativePath, "garden-"+r.Tag, handle)
}
func (r *RunningGarden) removeTempDirContentsPreservingGrootFSStores() error {
tmpDir, err := os.Open(r.TmpDir)
if err != nil {
return err
}
defer tmpDir.Close()
tmpDirContents, err := tmpDir.Readdir(0)
if err != nil {
return err
}
for _, tmpDirChild := range tmpDirContents {
if !strings.Contains(tmpDirChild.Name(), "store") {
if err := os.RemoveAll(filepath.Join(r.TmpDir, tmpDirChild.Name())); err != nil {
return err
}
}
}
return nil
}
func (r *RunningGarden) Stop() error {
r.process.Signal(syscall.SIGTERM)
var err error
for i := 0; i < 5; i++ {
select {
case err := <-r.process.Wait():
return err
case <-time.After(time.Second * 5):
r.process.Signal(syscall.SIGTERM)
err = errors.New("timed out waiting for garden to shutdown after 5 seconds")
}
}
r.process.Signal(syscall.SIGKILL)
return err
}
func (r *RunningGarden) DestroyContainers() error {
containers, err := r.Containers(nil)
if err != nil {
return err
}
for _, container := range containers {
if destroyErr := r.Destroy(container.Handle()); destroyErr != nil {
err = multierror.Append(destroyErr)
}
}
return err
}
type debugVars struct {
NumGoRoutines int `json:"numGoRoutines"`
}
func (r *RunningGarden) NumGoroutines() (int, error) {
debugURL := fmt.Sprintf("http://%s:%d/debug/vars", r.DebugIP, *r.DebugPort)
res, err := http.Get(debugURL)
if err != nil {
return 0, err
}
defer res.Body.Close()
decoder := json.NewDecoder(res.Body)
var debugVarsData debugVars
err = decoder.Decode(&debugVarsData)
if err != nil {
return 0, err
}
return debugVarsData.NumGoRoutines, nil
}
func (r *RunningGarden) StackDump() (string, error) {
debugURL := fmt.Sprintf("http://%s:%d/debug/pprof/goroutine?debug=2", r.DebugIP, *r.DebugPort)
res, err := http.Get(debugURL)
if err != nil {
return "", err
}
defer res.Body.Close()
stack, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", err
}
return string(stack), nil
}
func intptr(i int) *int {
return &i
}
func uint32ptr(i uint32) *uint32 {
return &i
}
func initGrootStore(grootBin, storePath string, idMappings []string) {
if filepath.Base(grootBin) != "grootfs" {
// Don't initialise the grootfs store for fake image plugins
// This is important to prevent loop device leakige!
return
}
initStoreArgs := []string{"--store", storePath, "init-store", "--store-size-bytes", fmt.Sprintf("%d", 2*1024*1024*1024)}
for _, idMapping := range idMappings {
initStoreArgs = append(initStoreArgs, "--uid-mapping", idMapping, "--gid-mapping", idMapping)
}
initStore := exec.Command(grootBin, initStoreArgs...)
initStore.Stdout = GinkgoWriter
initStore.Stderr = GinkgoWriter
Expect(initStore.Run()).To(Succeed())
}
func isContainerd() bool {
return os.Getenv("CONTAINERD_ENABLED") == "true"
}
func (r *RunningGarden) listProcesses(containerID string) string {
return r.runCtr([]string{"tasks", "ps", containerID})
}
func (r *RunningGarden) runCtr(args []string) string {
socket := r.GdnRunnerConfig.ContainerdSocket
defaultArgs := []string{"--address", socket, "--namespace", "garden"}
cmd := exec.Command("ctr", append(defaultArgs, args...)...)
session, err := gexec.Start(cmd, GinkgoWriter, GinkgoWriter)
Expect(err).NotTo(HaveOccurred())
Eventually(session).Should(gexec.Exit(0), string(session.Err.Contents()))
return string(session.Out.Contents())
}
| [
"\"CONTAINERD_ENABLED\""
]
| []
| [
"CONTAINERD_ENABLED"
]
| [] | ["CONTAINERD_ENABLED"] | go | 1 | 0 | |
api4/apitestlib.go | // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See LICENSE.txt for license information.
package api4
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"net/http"
"os"
"path/filepath"
"strings"
"sync"
"testing"
"time"
"github.com/gorilla/websocket"
graphql "github.com/graph-gophers/graphql-go"
s3 "github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/stretchr/testify/require"
"github.com/mattermost/mattermost-server/v6/app"
"github.com/mattermost/mattermost-server/v6/app/request"
"github.com/mattermost/mattermost-server/v6/config"
"github.com/mattermost/mattermost-server/v6/model"
"github.com/mattermost/mattermost-server/v6/plugin/plugintest/mock"
"github.com/mattermost/mattermost-server/v6/services/searchengine"
"github.com/mattermost/mattermost-server/v6/shared/mlog"
"github.com/mattermost/mattermost-server/v6/store"
"github.com/mattermost/mattermost-server/v6/store/localcachelayer"
"github.com/mattermost/mattermost-server/v6/store/storetest/mocks"
"github.com/mattermost/mattermost-server/v6/testlib"
"github.com/mattermost/mattermost-server/v6/web"
"github.com/mattermost/mattermost-server/v6/wsapi"
)
type TestHelper struct {
App *app.App
Server *app.Server
ConfigStore *config.Store
Context *request.Context
Client *model.Client4
GraphQLClient *graphQLClient
BasicUser *model.User
BasicUser2 *model.User
TeamAdminUser *model.User
BasicTeam *model.Team
BasicChannel *model.Channel
BasicPrivateChannel *model.Channel
BasicPrivateChannel2 *model.Channel
BasicDeletedChannel *model.Channel
BasicChannel2 *model.Channel
BasicPost *model.Post
Group *model.Group
SystemAdminClient *model.Client4
SystemAdminUser *model.User
tempWorkspace string
SystemManagerClient *model.Client4
SystemManagerUser *model.User
LocalClient *model.Client4
IncludeCacheLayer bool
TestLogger *mlog.Logger
}
var mainHelper *testlib.MainHelper
func SetMainHelper(mh *testlib.MainHelper) {
mainHelper = mh
}
func setupTestHelper(dbStore store.Store, searchEngine *searchengine.Broker, enterprise bool, includeCache bool,
updateConfig func(*model.Config), options []app.Option) *TestHelper {
tempWorkspace, err := ioutil.TempDir("", "apptest")
if err != nil {
panic(err)
}
memoryStore, err := config.NewMemoryStoreWithOptions(&config.MemoryStoreOptions{IgnoreEnvironmentOverrides: true})
if err != nil {
panic("failed to initialize memory store: " + err.Error())
}
memoryConfig := &model.Config{}
memoryConfig.SetDefaults()
*memoryConfig.PluginSettings.Directory = filepath.Join(tempWorkspace, "plugins")
*memoryConfig.PluginSettings.ClientDirectory = filepath.Join(tempWorkspace, "webapp")
memoryConfig.ServiceSettings.EnableLocalMode = model.NewBool(true)
*memoryConfig.ServiceSettings.LocalModeSocketLocation = filepath.Join(tempWorkspace, "mattermost_local.sock")
*memoryConfig.AnnouncementSettings.AdminNoticesEnabled = false
*memoryConfig.AnnouncementSettings.UserNoticesEnabled = false
*memoryConfig.PluginSettings.AutomaticPrepackagedPlugins = false
if updateConfig != nil {
updateConfig(memoryConfig)
}
memoryStore.Set(memoryConfig)
configStore, err := config.NewStoreFromBacking(memoryStore, nil, false)
if err != nil {
panic(err)
}
options = append(options, app.ConfigStore(configStore))
if includeCache {
// Adds the cache layer to the test store
options = append(options, app.StoreOverride(func(s *app.Server) store.Store {
lcl, err2 := localcachelayer.NewLocalCacheLayer(dbStore, s.Metrics, s.Cluster, s.CacheProvider)
if err2 != nil {
panic(err2)
}
return lcl
}))
} else {
options = append(options, app.StoreOverride(dbStore))
}
testLogger, _ := mlog.NewLogger()
logCfg, _ := config.MloggerConfigFromLoggerConfig(&memoryConfig.LogSettings, nil, config.GetLogFileLocation)
if errCfg := testLogger.ConfigureTargets(logCfg, nil); errCfg != nil {
panic("failed to configure test logger: " + errCfg.Error())
}
// lock logger config so server init cannot override it during testing.
testLogger.LockConfiguration()
options = append(options, app.SetLogger(testLogger))
s, err := app.NewServer(options...)
if err != nil {
panic(err)
}
th := &TestHelper{
App: app.New(app.ServerConnector(s.Channels())),
Server: s,
ConfigStore: configStore,
IncludeCacheLayer: includeCache,
Context: &request.Context{},
TestLogger: testLogger,
}
if s.SearchEngine != nil && s.SearchEngine.BleveEngine != nil && searchEngine != nil {
searchEngine.BleveEngine = s.SearchEngine.BleveEngine
}
if searchEngine != nil {
th.App.SetSearchEngine(searchEngine)
}
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.TeamSettings.MaxUsersPerTeam = 50
*cfg.RateLimitSettings.Enable = false
*cfg.EmailSettings.SendEmailNotifications = true
*cfg.ServiceSettings.SiteURL = ""
// Disable sniffing, otherwise elastic client fails to connect to docker node
// More details: https://github.com/olivere/elastic/wiki/Sniffing
*cfg.ElasticsearchSettings.Sniff = false
*cfg.TeamSettings.EnableOpenServer = true
// Disable strict password requirements for test
*cfg.PasswordSettings.MinimumLength = 5
*cfg.PasswordSettings.Lowercase = false
*cfg.PasswordSettings.Uppercase = false
*cfg.PasswordSettings.Symbol = false
*cfg.PasswordSettings.Number = false
*cfg.ServiceSettings.ListenAddress = ":0"
})
if err := th.Server.Start(); err != nil {
panic(err)
}
Init(th.App.Srv())
web.New(th.App.Srv())
wsapi.Init(th.App.Srv())
if enterprise {
th.App.Srv().SetLicense(model.NewTestLicense())
} else {
th.App.Srv().SetLicense(nil)
}
th.Client = th.CreateClient()
th.GraphQLClient = newGraphQLClient(fmt.Sprintf("http://localhost:%v", th.App.Srv().ListenAddr.Port))
th.SystemAdminClient = th.CreateClient()
th.SystemManagerClient = th.CreateClient()
// Verify handling of the supported true/false values by randomizing on each run.
rand.Seed(time.Now().UTC().UnixNano())
trueValues := []string{"1", "t", "T", "TRUE", "true", "True"}
falseValues := []string{"0", "f", "F", "FALSE", "false", "False"}
trueString := trueValues[rand.Intn(len(trueValues))]
falseString := falseValues[rand.Intn(len(falseValues))]
mlog.Debug("Configured Client4 bool string values", mlog.String("true", trueString), mlog.String("false", falseString))
th.Client.SetBoolString(true, trueString)
th.Client.SetBoolString(false, falseString)
th.LocalClient = th.CreateLocalClient(*memoryConfig.ServiceSettings.LocalModeSocketLocation)
if th.tempWorkspace == "" {
th.tempWorkspace = tempWorkspace
}
return th
}
func SetupEnterprise(tb testing.TB) *TestHelper {
if testing.Short() {
tb.SkipNow()
}
if mainHelper == nil {
tb.SkipNow()
}
dbStore := mainHelper.GetStore()
dbStore.DropAllTables()
dbStore.MarkSystemRanUnitTests()
mainHelper.PreloadMigrations()
searchEngine := mainHelper.GetSearchEngine()
th := setupTestHelper(dbStore, searchEngine, true, true, nil, nil)
th.InitLogin()
return th
}
func Setup(tb testing.TB) *TestHelper {
if testing.Short() {
tb.SkipNow()
}
if mainHelper == nil {
tb.SkipNow()
}
dbStore := mainHelper.GetStore()
dbStore.DropAllTables()
dbStore.MarkSystemRanUnitTests()
mainHelper.PreloadMigrations()
searchEngine := mainHelper.GetSearchEngine()
th := setupTestHelper(dbStore, searchEngine, false, true, nil, nil)
th.InitLogin()
return th
}
func SetupAndApplyConfigBeforeLogin(tb testing.TB, updateConfig func(cfg *model.Config)) *TestHelper {
if testing.Short() {
tb.SkipNow()
}
if mainHelper == nil {
tb.SkipNow()
}
dbStore := mainHelper.GetStore()
dbStore.DropAllTables()
dbStore.MarkSystemRanUnitTests()
mainHelper.PreloadMigrations()
searchEngine := mainHelper.GetSearchEngine()
th := setupTestHelper(dbStore, searchEngine, false, true, nil, nil)
th.App.UpdateConfig(updateConfig)
th.InitLogin()
return th
}
func SetupConfig(tb testing.TB, updateConfig func(cfg *model.Config)) *TestHelper {
if testing.Short() {
tb.SkipNow()
}
if mainHelper == nil {
tb.SkipNow()
}
dbStore := mainHelper.GetStore()
dbStore.DropAllTables()
dbStore.MarkSystemRanUnitTests()
searchEngine := mainHelper.GetSearchEngine()
th := setupTestHelper(dbStore, searchEngine, false, true, updateConfig, nil)
th.InitLogin()
return th
}
func SetupConfigWithStoreMock(tb testing.TB, updateConfig func(cfg *model.Config)) *TestHelper {
th := setupTestHelper(testlib.GetMockStoreForSetupFunctions(), nil, false, false, updateConfig, nil)
statusMock := mocks.StatusStore{}
statusMock.On("UpdateExpiredDNDStatuses").Return([]*model.Status{}, nil)
statusMock.On("Get", "user1").Return(&model.Status{UserId: "user1", Status: model.StatusOnline}, nil)
statusMock.On("UpdateLastActivityAt", "user1", mock.Anything).Return(nil)
statusMock.On("SaveOrUpdate", mock.AnythingOfType("*model.Status")).Return(nil)
emptyMockStore := mocks.Store{}
emptyMockStore.On("Close").Return(nil)
emptyMockStore.On("Status").Return(&statusMock)
th.App.Srv().Store = &emptyMockStore
return th
}
func SetupWithStoreMock(tb testing.TB) *TestHelper {
th := setupTestHelper(testlib.GetMockStoreForSetupFunctions(), nil, false, false, nil, nil)
statusMock := mocks.StatusStore{}
statusMock.On("UpdateExpiredDNDStatuses").Return([]*model.Status{}, nil)
statusMock.On("Get", "user1").Return(&model.Status{UserId: "user1", Status: model.StatusOnline}, nil)
statusMock.On("UpdateLastActivityAt", "user1", mock.Anything).Return(nil)
statusMock.On("SaveOrUpdate", mock.AnythingOfType("*model.Status")).Return(nil)
emptyMockStore := mocks.Store{}
emptyMockStore.On("Close").Return(nil)
emptyMockStore.On("Status").Return(&statusMock)
th.App.Srv().Store = &emptyMockStore
return th
}
func SetupEnterpriseWithStoreMock(tb testing.TB) *TestHelper {
th := setupTestHelper(testlib.GetMockStoreForSetupFunctions(), nil, true, false, nil, nil)
statusMock := mocks.StatusStore{}
statusMock.On("UpdateExpiredDNDStatuses").Return([]*model.Status{}, nil)
statusMock.On("Get", "user1").Return(&model.Status{UserId: "user1", Status: model.StatusOnline}, nil)
statusMock.On("UpdateLastActivityAt", "user1", mock.Anything).Return(nil)
statusMock.On("SaveOrUpdate", mock.AnythingOfType("*model.Status")).Return(nil)
emptyMockStore := mocks.Store{}
emptyMockStore.On("Close").Return(nil)
emptyMockStore.On("Status").Return(&statusMock)
th.App.Srv().Store = &emptyMockStore
return th
}
func SetupWithServerOptions(tb testing.TB, options []app.Option) *TestHelper {
if testing.Short() {
tb.SkipNow()
}
if mainHelper == nil {
tb.SkipNow()
}
dbStore := mainHelper.GetStore()
dbStore.DropAllTables()
dbStore.MarkSystemRanUnitTests()
mainHelper.PreloadMigrations()
searchEngine := mainHelper.GetSearchEngine()
th := setupTestHelper(dbStore, searchEngine, false, true, nil, options)
th.InitLogin()
return th
}
func (th *TestHelper) ShutdownApp() {
done := make(chan bool)
go func() {
th.Server.Shutdown()
close(done)
}()
select {
case <-done:
case <-time.After(30 * time.Second):
// panic instead of fatal to terminate all tests in this package, otherwise the
// still running App could spuriously fail subsequent tests.
panic("failed to shutdown App within 30 seconds")
}
}
func (th *TestHelper) TearDown() {
if th.IncludeCacheLayer {
// Clean all the caches
th.App.Srv().InvalidateAllCaches()
}
th.ShutdownApp()
}
func closeBody(r *http.Response) {
if r.Body != nil {
_, _ = io.Copy(ioutil.Discard, r.Body)
_ = r.Body.Close()
}
}
var initBasicOnce sync.Once
var userCache struct {
SystemAdminUser *model.User
SystemManagerUser *model.User
TeamAdminUser *model.User
BasicUser *model.User
BasicUser2 *model.User
}
func (th *TestHelper) InitLogin() *TestHelper {
th.waitForConnectivity()
// create users once and cache them because password hashing is slow
initBasicOnce.Do(func() {
th.SystemAdminUser = th.CreateUser()
th.App.UpdateUserRoles(th.SystemAdminUser.Id, model.SystemUserRoleId+" "+model.SystemAdminRoleId, false)
th.SystemAdminUser, _ = th.App.GetUser(th.SystemAdminUser.Id)
userCache.SystemAdminUser = th.SystemAdminUser.DeepCopy()
th.SystemManagerUser = th.CreateUser()
th.App.UpdateUserRoles(th.SystemManagerUser.Id, model.SystemUserRoleId+" "+model.SystemManagerRoleId, false)
th.SystemManagerUser, _ = th.App.GetUser(th.SystemManagerUser.Id)
userCache.SystemManagerUser = th.SystemManagerUser.DeepCopy()
th.TeamAdminUser = th.CreateUser()
th.App.UpdateUserRoles(th.TeamAdminUser.Id, model.SystemUserRoleId, false)
th.TeamAdminUser, _ = th.App.GetUser(th.TeamAdminUser.Id)
userCache.TeamAdminUser = th.TeamAdminUser.DeepCopy()
th.BasicUser = th.CreateUser()
th.BasicUser, _ = th.App.GetUser(th.BasicUser.Id)
userCache.BasicUser = th.BasicUser.DeepCopy()
th.BasicUser2 = th.CreateUser()
th.BasicUser2, _ = th.App.GetUser(th.BasicUser2.Id)
userCache.BasicUser2 = th.BasicUser2.DeepCopy()
})
// restore cached users
th.SystemAdminUser = userCache.SystemAdminUser.DeepCopy()
th.SystemManagerUser = userCache.SystemManagerUser.DeepCopy()
th.TeamAdminUser = userCache.TeamAdminUser.DeepCopy()
th.BasicUser = userCache.BasicUser.DeepCopy()
th.BasicUser2 = userCache.BasicUser2.DeepCopy()
users := []*model.User{th.SystemAdminUser, th.TeamAdminUser, th.BasicUser, th.BasicUser2, th.SystemManagerUser}
mainHelper.GetSQLStore().User().InsertUsers(users)
// restore non hashed password for login
th.SystemAdminUser.Password = "Pa$$word11"
th.TeamAdminUser.Password = "Pa$$word11"
th.BasicUser.Password = "Pa$$word11"
th.BasicUser2.Password = "Pa$$word11"
th.SystemManagerUser.Password = "Pa$$word11"
var wg sync.WaitGroup
wg.Add(3)
go func() {
th.LoginSystemAdmin()
wg.Done()
}()
go func() {
th.LoginSystemManager()
wg.Done()
}()
go func() {
th.LoginTeamAdmin()
wg.Done()
}()
wg.Wait()
return th
}
func (th *TestHelper) InitBasic() *TestHelper {
th.BasicTeam = th.CreateTeam()
th.BasicChannel = th.CreatePublicChannel()
th.BasicPrivateChannel = th.CreatePrivateChannel()
th.BasicPrivateChannel2 = th.CreatePrivateChannel()
th.BasicDeletedChannel = th.CreatePublicChannel()
th.BasicChannel2 = th.CreatePublicChannel()
th.BasicPost = th.CreatePost()
th.LinkUserToTeam(th.BasicUser, th.BasicTeam)
th.LinkUserToTeam(th.BasicUser2, th.BasicTeam)
th.App.AddUserToChannel(th.BasicUser, th.BasicChannel, false)
th.App.AddUserToChannel(th.BasicUser2, th.BasicChannel, false)
th.App.AddUserToChannel(th.BasicUser, th.BasicChannel2, false)
th.App.AddUserToChannel(th.BasicUser2, th.BasicChannel2, false)
th.App.AddUserToChannel(th.BasicUser, th.BasicPrivateChannel, false)
th.App.AddUserToChannel(th.BasicUser2, th.BasicPrivateChannel, false)
th.App.AddUserToChannel(th.BasicUser, th.BasicDeletedChannel, false)
th.App.AddUserToChannel(th.BasicUser2, th.BasicDeletedChannel, false)
th.App.UpdateUserRoles(th.BasicUser.Id, model.SystemUserRoleId, false)
th.Client.DeleteChannel(th.BasicDeletedChannel.Id)
th.LoginBasic()
th.Group = th.CreateGroup()
return th
}
func (th *TestHelper) waitForConnectivity() {
for i := 0; i < 1000; i++ {
conn, err := net.Dial("tcp", fmt.Sprintf("localhost:%v", th.App.Srv().ListenAddr.Port))
if err == nil {
conn.Close()
return
}
time.Sleep(time.Millisecond * 20)
}
panic("unable to connect")
}
func (th *TestHelper) CreateClient() *model.Client4 {
return model.NewAPIv4Client(fmt.Sprintf("http://localhost:%v", th.App.Srv().ListenAddr.Port))
}
// ToDo: maybe move this to NewAPIv4SocketClient and reuse it in mmctl
func (th *TestHelper) CreateLocalClient(socketPath string) *model.Client4 {
httpClient := &http.Client{
Transport: &http.Transport{
Dial: func(network, addr string) (net.Conn, error) {
return net.Dial("unix", socketPath)
},
},
}
return &model.Client4{
APIURL: "http://_" + model.APIURLSuffix,
HTTPClient: httpClient,
}
}
func (th *TestHelper) CreateWebSocketClient() (*model.WebSocketClient, error) {
return model.NewWebSocketClient4(fmt.Sprintf("ws://localhost:%v", th.App.Srv().ListenAddr.Port), th.Client.AuthToken)
}
func (th *TestHelper) CreateReliableWebSocketClient(connID string, seqNo int) (*model.WebSocketClient, error) {
return model.NewReliableWebSocketClientWithDialer(websocket.DefaultDialer, fmt.Sprintf("ws://localhost:%v", th.App.Srv().ListenAddr.Port), th.Client.AuthToken, connID, seqNo, true)
}
func (th *TestHelper) CreateWebSocketSystemAdminClient() (*model.WebSocketClient, error) {
return model.NewWebSocketClient4(fmt.Sprintf("ws://localhost:%v", th.App.Srv().ListenAddr.Port), th.SystemAdminClient.AuthToken)
}
func (th *TestHelper) CreateWebSocketSystemManagerClient() (*model.WebSocketClient, error) {
return model.NewWebSocketClient4(fmt.Sprintf("ws://localhost:%v", th.App.Srv().ListenAddr.Port), th.SystemManagerClient.AuthToken)
}
func (th *TestHelper) CreateWebSocketClientWithClient(client *model.Client4) (*model.WebSocketClient, error) {
return model.NewWebSocketClient4(fmt.Sprintf("ws://localhost:%v", th.App.Srv().ListenAddr.Port), client.AuthToken)
}
func (th *TestHelper) CreateBotWithSystemAdminClient() *model.Bot {
return th.CreateBotWithClient((th.SystemAdminClient))
}
func (th *TestHelper) CreateBotWithClient(client *model.Client4) *model.Bot {
bot := &model.Bot{
Username: GenerateTestUsername(),
DisplayName: "a bot",
Description: "bot",
}
rbot, _, err := client.CreateBot(bot)
if err != nil {
panic(err)
}
return rbot
}
func (th *TestHelper) CreateUser() *model.User {
return th.CreateUserWithClient(th.Client)
}
func (th *TestHelper) CreateTeam() *model.Team {
return th.CreateTeamWithClient(th.Client)
}
func (th *TestHelper) CreateTeamWithClient(client *model.Client4) *model.Team {
id := model.NewId()
team := &model.Team{
DisplayName: "dn_" + id,
Name: GenerateTestTeamName(),
Email: th.GenerateTestEmail(),
Type: model.TeamOpen,
}
rteam, _, err := client.CreateTeam(team)
if err != nil {
panic(err)
}
return rteam
}
func (th *TestHelper) CreateUserWithClient(client *model.Client4) *model.User {
id := model.NewId()
user := &model.User{
Email: th.GenerateTestEmail(),
Username: GenerateTestUsername(),
Nickname: "nn_" + id,
FirstName: "f_" + id,
LastName: "l_" + id,
Password: "Pa$$word11",
}
ruser, _, err := client.CreateUser(user)
if err != nil {
panic(err)
}
ruser.Password = "Pa$$word11"
_, err = th.App.Srv().Store.User().VerifyEmail(ruser.Id, ruser.Email)
if err != nil {
return nil
}
return ruser
}
func (th *TestHelper) CreateUserWithAuth(authService string) *model.User {
id := model.NewId()
user := &model.User{
Email: "success+" + id + "@simulator.amazonses.com",
Username: "un_" + id,
Nickname: "nn_" + id,
EmailVerified: true,
AuthService: authService,
}
user, err := th.App.CreateUser(th.Context, user)
if err != nil {
panic(err)
}
return user
}
func (th *TestHelper) SetupLdapConfig() {
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.ServiceSettings.EnableMultifactorAuthentication = true
*cfg.LdapSettings.Enable = true
*cfg.LdapSettings.EnableSync = true
*cfg.LdapSettings.LdapServer = "dockerhost"
*cfg.LdapSettings.BaseDN = "dc=mm,dc=test,dc=com"
*cfg.LdapSettings.BindUsername = "cn=admin,dc=mm,dc=test,dc=com"
*cfg.LdapSettings.BindPassword = "mostest"
*cfg.LdapSettings.FirstNameAttribute = "cn"
*cfg.LdapSettings.LastNameAttribute = "sn"
*cfg.LdapSettings.NicknameAttribute = "cn"
*cfg.LdapSettings.EmailAttribute = "mail"
*cfg.LdapSettings.UsernameAttribute = "uid"
*cfg.LdapSettings.IdAttribute = "cn"
*cfg.LdapSettings.LoginIdAttribute = "uid"
*cfg.LdapSettings.SkipCertificateVerification = true
*cfg.LdapSettings.GroupFilter = ""
*cfg.LdapSettings.GroupDisplayNameAttribute = "cN"
*cfg.LdapSettings.GroupIdAttribute = "entRyUuId"
*cfg.LdapSettings.MaxPageSize = 0
})
th.App.Srv().SetLicense(model.NewTestLicense("ldap"))
}
func (th *TestHelper) SetupSamlConfig() {
th.App.UpdateConfig(func(cfg *model.Config) {
*cfg.SamlSettings.Enable = true
*cfg.SamlSettings.Verify = false
*cfg.SamlSettings.Encrypt = false
*cfg.SamlSettings.IdpURL = "https://does.notmatter.example"
*cfg.SamlSettings.IdpDescriptorURL = "https://localhost/adfs/services/trust"
*cfg.SamlSettings.AssertionConsumerServiceURL = "https://localhost/login/sso/saml"
*cfg.SamlSettings.ServiceProviderIdentifier = "https://localhost/login/sso/saml"
*cfg.SamlSettings.IdpCertificateFile = app.SamlIdpCertificateName
*cfg.SamlSettings.PrivateKeyFile = app.SamlPrivateKeyName
*cfg.SamlSettings.PublicCertificateFile = app.SamlPublicCertificateName
*cfg.SamlSettings.EmailAttribute = "Email"
*cfg.SamlSettings.UsernameAttribute = "Username"
*cfg.SamlSettings.FirstNameAttribute = "FirstName"
*cfg.SamlSettings.LastNameAttribute = "LastName"
*cfg.SamlSettings.NicknameAttribute = ""
*cfg.SamlSettings.PositionAttribute = ""
*cfg.SamlSettings.LocaleAttribute = ""
*cfg.SamlSettings.SignatureAlgorithm = model.SamlSettingsSignatureAlgorithmSha256
*cfg.SamlSettings.CanonicalAlgorithm = model.SamlSettingsCanonicalAlgorithmC14n11
})
th.App.Srv().SetLicense(model.NewTestLicense("saml"))
}
func (th *TestHelper) CreatePublicChannel() *model.Channel {
return th.CreateChannelWithClient(th.Client, model.ChannelTypeOpen)
}
func (th *TestHelper) CreatePrivateChannel() *model.Channel {
return th.CreateChannelWithClient(th.Client, model.ChannelTypePrivate)
}
func (th *TestHelper) CreateChannelWithClient(client *model.Client4, channelType model.ChannelType) *model.Channel {
return th.CreateChannelWithClientAndTeam(client, channelType, th.BasicTeam.Id)
}
func (th *TestHelper) CreateChannelWithClientAndTeam(client *model.Client4, channelType model.ChannelType, teamId string) *model.Channel {
id := model.NewId()
channel := &model.Channel{
DisplayName: "dn_" + id,
Name: GenerateTestChannelName(),
Type: channelType,
TeamId: teamId,
}
rchannel, _, err := client.CreateChannel(channel)
if err != nil {
panic(err)
}
return rchannel
}
func (th *TestHelper) CreatePost() *model.Post {
return th.CreatePostWithClient(th.Client, th.BasicChannel)
}
func (th *TestHelper) CreatePinnedPost() *model.Post {
return th.CreatePinnedPostWithClient(th.Client, th.BasicChannel)
}
func (th *TestHelper) CreateMessagePost(message string) *model.Post {
return th.CreateMessagePostWithClient(th.Client, th.BasicChannel, message)
}
func (th *TestHelper) CreatePostWithFiles(files ...*model.FileInfo) *model.Post {
return th.CreatePostWithFilesWithClient(th.Client, th.BasicChannel, files...)
}
func (th *TestHelper) CreatePostInChannelWithFiles(channel *model.Channel, files ...*model.FileInfo) *model.Post {
return th.CreatePostWithFilesWithClient(th.Client, channel, files...)
}
func (th *TestHelper) CreatePostWithFilesWithClient(client *model.Client4, channel *model.Channel, files ...*model.FileInfo) *model.Post {
var fileIds model.StringArray
for i := range files {
fileIds = append(fileIds, files[i].Id)
}
post := &model.Post{
ChannelId: channel.Id,
Message: "message_" + model.NewId(),
FileIds: fileIds,
}
rpost, _, err := client.CreatePost(post)
if err != nil {
panic(err)
}
return rpost
}
func (th *TestHelper) CreatePostWithClient(client *model.Client4, channel *model.Channel) *model.Post {
id := model.NewId()
post := &model.Post{
ChannelId: channel.Id,
Message: "message_" + id,
}
rpost, _, err := client.CreatePost(post)
if err != nil {
panic(err)
}
return rpost
}
func (th *TestHelper) CreatePinnedPostWithClient(client *model.Client4, channel *model.Channel) *model.Post {
id := model.NewId()
post := &model.Post{
ChannelId: channel.Id,
Message: "message_" + id,
IsPinned: true,
}
rpost, _, err := client.CreatePost(post)
if err != nil {
panic(err)
}
return rpost
}
func (th *TestHelper) CreateMessagePostWithClient(client *model.Client4, channel *model.Channel, message string) *model.Post {
post := &model.Post{
ChannelId: channel.Id,
Message: message,
}
rpost, _, err := client.CreatePost(post)
if err != nil {
panic(err)
}
return rpost
}
func (th *TestHelper) CreateMessagePostNoClient(channel *model.Channel, message string, createAtTime int64) *model.Post {
post, err := th.App.Srv().Store.Post().Save(&model.Post{
UserId: th.BasicUser.Id,
ChannelId: channel.Id,
Message: message,
CreateAt: createAtTime,
})
if err != nil {
panic(err)
}
return post
}
func (th *TestHelper) CreateDmChannel(user *model.User) *model.Channel {
var err *model.AppError
var channel *model.Channel
if channel, err = th.App.GetOrCreateDirectChannel(th.Context, th.BasicUser.Id, user.Id); err != nil {
panic(err)
}
return channel
}
func (th *TestHelper) LoginBasic() {
th.LoginBasicWithClient(th.Client)
if os.Getenv("MM_FEATUREFLAGS_GRAPHQL") == "true" {
th.LoginBasicWithGraphQL()
}
}
func (th *TestHelper) LoginBasic2() {
th.LoginBasic2WithClient(th.Client)
if os.Getenv("MM_FEATUREFLAGS_GRAPHQL") == "true" {
th.LoginBasicWithGraphQL()
}
}
func (th *TestHelper) LoginTeamAdmin() {
th.LoginTeamAdminWithClient(th.Client)
}
func (th *TestHelper) LoginSystemAdmin() {
th.LoginSystemAdminWithClient(th.SystemAdminClient)
}
func (th *TestHelper) LoginSystemManager() {
th.LoginSystemManagerWithClient(th.SystemManagerClient)
}
func (th *TestHelper) LoginBasicWithClient(client *model.Client4) {
_, _, err := client.Login(th.BasicUser.Email, th.BasicUser.Password)
if err != nil {
panic(err)
}
}
func (th *TestHelper) LoginBasicWithGraphQL() {
_, _, err := th.GraphQLClient.login(th.BasicUser.Email, th.BasicUser.Password)
if err != nil {
panic(err)
}
}
func (th *TestHelper) LoginBasic2WithClient(client *model.Client4) {
_, _, err := client.Login(th.BasicUser2.Email, th.BasicUser2.Password)
if err != nil {
panic(err)
}
}
func (th *TestHelper) LoginTeamAdminWithClient(client *model.Client4) {
_, _, err := client.Login(th.TeamAdminUser.Email, th.TeamAdminUser.Password)
if err != nil {
panic(err)
}
}
func (th *TestHelper) LoginSystemManagerWithClient(client *model.Client4) {
_, _, err := client.Login(th.SystemManagerUser.Email, th.SystemManagerUser.Password)
if err != nil {
panic(err)
}
}
func (th *TestHelper) LoginSystemAdminWithClient(client *model.Client4) {
_, _, err := client.Login(th.SystemAdminUser.Email, th.SystemAdminUser.Password)
if err != nil {
panic(err)
}
}
func (th *TestHelper) UpdateActiveUser(user *model.User, active bool) {
_, err := th.App.UpdateActive(th.Context, user, active)
if err != nil {
panic(err)
}
}
func (th *TestHelper) LinkUserToTeam(user *model.User, team *model.Team) {
_, err := th.App.JoinUserToTeam(th.Context, team, user, "")
if err != nil {
panic(err)
}
}
func (th *TestHelper) UnlinkUserFromTeam(user *model.User, team *model.Team) {
err := th.App.RemoveUserFromTeam(th.Context, team.Id, user.Id, "")
if err != nil {
panic(err)
}
}
func (th *TestHelper) AddUserToChannel(user *model.User, channel *model.Channel) *model.ChannelMember {
member, err := th.App.AddUserToChannel(user, channel, false)
if err != nil {
panic(err)
}
return member
}
func (th *TestHelper) RemoveUserFromChannel(user *model.User, channel *model.Channel) {
err := th.App.RemoveUserFromChannel(th.Context, user.Id, "", channel)
if err != nil {
panic(err)
}
}
func (th *TestHelper) GenerateTestEmail() string {
if *th.App.Config().EmailSettings.SMTPServer != "localhost" && os.Getenv("CI_INBUCKET_PORT") == "" {
return strings.ToLower("success+" + model.NewId() + "@simulator.amazonses.com")
}
return strings.ToLower(model.NewId() + "@localhost")
}
func (th *TestHelper) CreateGroup() *model.Group {
id := model.NewId()
group := &model.Group{
Name: model.NewString("n-" + id),
DisplayName: "dn_" + id,
Source: model.GroupSourceLdap,
RemoteId: model.NewString("ri_" + model.NewId()),
}
group, err := th.App.CreateGroup(group)
if err != nil {
panic(err)
}
return group
}
// TestForSystemAdminAndLocal runs a test function for both
// SystemAdmin and Local clients. Several endpoints work in the same
// way when used by a fully privileged user and through the local
// mode, so this helper facilitates checking both
func (th *TestHelper) TestForSystemAdminAndLocal(t *testing.T, f func(*testing.T, *model.Client4), name ...string) {
var testName string
if len(name) > 0 {
testName = name[0] + "/"
}
t.Run(testName+"SystemAdminClient", func(t *testing.T) {
f(t, th.SystemAdminClient)
})
t.Run(testName+"LocalClient", func(t *testing.T) {
f(t, th.LocalClient)
})
}
// TestForAllClients runs a test function for all the clients
// registered in the TestHelper
func (th *TestHelper) TestForAllClients(t *testing.T, f func(*testing.T, *model.Client4), name ...string) {
var testName string
if len(name) > 0 {
testName = name[0] + "/"
}
t.Run(testName+"Client", func(t *testing.T) {
f(t, th.Client)
})
t.Run(testName+"SystemAdminClient", func(t *testing.T) {
f(t, th.SystemAdminClient)
})
t.Run(testName+"LocalClient", func(t *testing.T) {
f(t, th.LocalClient)
})
}
func GenerateTestUsername() string {
return "fakeuser" + model.NewRandomString(10)
}
func GenerateTestTeamName() string {
return "faketeam" + model.NewRandomString(6)
}
func GenerateTestChannelName() string {
return "fakechannel" + model.NewRandomString(10)
}
func GenerateTestAppName() string {
return "fakeoauthapp" + model.NewRandomString(10)
}
func GenerateTestId() string {
return model.NewId()
}
func CheckUserSanitization(tb testing.TB, user *model.User) {
tb.Helper()
require.Equal(tb, "", user.Password, "password wasn't blank")
require.Empty(tb, user.AuthData, "auth data wasn't blank")
require.Equal(tb, "", user.MfaSecret, "mfa secret wasn't blank")
}
func CheckEtag(tb testing.TB, data interface{}, resp *model.Response) {
tb.Helper()
require.Empty(tb, data)
require.Equal(tb, http.StatusNotModified, resp.StatusCode, "wrong status code for etag")
}
func checkHTTPStatus(tb testing.TB, resp *model.Response, expectedStatus int) {
tb.Helper()
require.NotNilf(tb, resp, "Unexpected nil response, expected http status:%v", expectedStatus)
require.Equalf(tb, expectedStatus, resp.StatusCode, "Expected http status:%v, got %v", expectedStatus, resp.StatusCode)
}
func CheckOKStatus(tb testing.TB, resp *model.Response) {
tb.Helper()
checkHTTPStatus(tb, resp, http.StatusOK)
}
func CheckCreatedStatus(tb testing.TB, resp *model.Response) {
tb.Helper()
checkHTTPStatus(tb, resp, http.StatusCreated)
}
func CheckForbiddenStatus(tb testing.TB, resp *model.Response) {
tb.Helper()
checkHTTPStatus(tb, resp, http.StatusForbidden)
}
func CheckUnauthorizedStatus(tb testing.TB, resp *model.Response) {
tb.Helper()
checkHTTPStatus(tb, resp, http.StatusUnauthorized)
}
func CheckNotFoundStatus(tb testing.TB, resp *model.Response) {
tb.Helper()
checkHTTPStatus(tb, resp, http.StatusNotFound)
}
func CheckBadRequestStatus(tb testing.TB, resp *model.Response) {
tb.Helper()
checkHTTPStatus(tb, resp, http.StatusBadRequest)
}
func CheckNotImplementedStatus(tb testing.TB, resp *model.Response) {
tb.Helper()
checkHTTPStatus(tb, resp, http.StatusNotImplemented)
}
func CheckRequestEntityTooLargeStatus(tb testing.TB, resp *model.Response) {
tb.Helper()
checkHTTPStatus(tb, resp, http.StatusRequestEntityTooLarge)
}
func CheckInternalErrorStatus(tb testing.TB, resp *model.Response) {
tb.Helper()
checkHTTPStatus(tb, resp, http.StatusInternalServerError)
}
func CheckServiceUnavailableStatus(tb testing.TB, resp *model.Response) {
tb.Helper()
checkHTTPStatus(tb, resp, http.StatusServiceUnavailable)
}
func CheckErrorID(tb testing.TB, err error, errorId string) {
tb.Helper()
require.Error(tb, err, "should have errored with id: %s", errorId)
var appError *model.AppError
ok := errors.As(err, &appError)
require.True(tb, ok, "should have been a model.AppError")
require.Equalf(tb, errorId, appError.Id, "incorrect error id, actual: %s, expected: %s", appError.Id, errorId)
}
func CheckErrorMessage(tb testing.TB, err error, message string) {
tb.Helper()
require.Error(tb, err, "should have errored with message: %s", message)
var appError *model.AppError
ok := errors.As(err, &appError)
require.True(tb, ok, "should have been a model.AppError")
require.Equalf(tb, message, appError.Message, "incorrect error message, actual: %s, expected: %s", appError.Id, message)
}
func CheckStartsWith(tb testing.TB, value, prefix, message string) {
tb.Helper()
require.True(tb, strings.HasPrefix(value, prefix), message, value)
}
// Similar to s3.New() but allows initialization of signature v2 or signature v4 client.
// If signV2 input is false, function always returns signature v4.
//
// Additionally this function also takes a user defined region, if set
// disables automatic region lookup.
func s3New(endpoint, accessKey, secretKey string, secure bool, signV2 bool, region string) (*s3.Client, error) {
var creds *credentials.Credentials
if signV2 {
creds = credentials.NewStatic(accessKey, secretKey, "", credentials.SignatureV2)
} else {
creds = credentials.NewStatic(accessKey, secretKey, "", credentials.SignatureV4)
}
opts := s3.Options{
Creds: creds,
Secure: secure,
Region: region,
}
return s3.New(endpoint, &opts)
}
func (th *TestHelper) cleanupTestFile(info *model.FileInfo) error {
cfg := th.App.Config()
if *cfg.FileSettings.DriverName == model.ImageDriverS3 {
endpoint := *cfg.FileSettings.AmazonS3Endpoint
accessKey := *cfg.FileSettings.AmazonS3AccessKeyId
secretKey := *cfg.FileSettings.AmazonS3SecretAccessKey
secure := *cfg.FileSettings.AmazonS3SSL
signV2 := *cfg.FileSettings.AmazonS3SignV2
region := *cfg.FileSettings.AmazonS3Region
s3Clnt, err := s3New(endpoint, accessKey, secretKey, secure, signV2, region)
if err != nil {
return err
}
bucket := *cfg.FileSettings.AmazonS3Bucket
if err := s3Clnt.RemoveObject(context.Background(), bucket, info.Path, s3.RemoveObjectOptions{}); err != nil {
return err
}
if info.ThumbnailPath != "" {
if err := s3Clnt.RemoveObject(context.Background(), bucket, info.ThumbnailPath, s3.RemoveObjectOptions{}); err != nil {
return err
}
}
if info.PreviewPath != "" {
if err := s3Clnt.RemoveObject(context.Background(), bucket, info.PreviewPath, s3.RemoveObjectOptions{}); err != nil {
return err
}
}
} else if *cfg.FileSettings.DriverName == model.ImageDriverLocal {
if err := os.Remove(*cfg.FileSettings.Directory + info.Path); err != nil {
return err
}
if info.ThumbnailPath != "" {
if err := os.Remove(*cfg.FileSettings.Directory + info.ThumbnailPath); err != nil {
return err
}
}
if info.PreviewPath != "" {
if err := os.Remove(*cfg.FileSettings.Directory + info.PreviewPath); err != nil {
return err
}
}
}
return nil
}
func (th *TestHelper) MakeUserChannelAdmin(user *model.User, channel *model.Channel) {
if cm, err := th.App.Srv().Store.Channel().GetMember(context.Background(), channel.Id, user.Id); err == nil {
cm.SchemeAdmin = true
if _, err = th.App.Srv().Store.Channel().UpdateMember(cm); err != nil {
panic(err)
}
} else {
panic(err)
}
}
func (th *TestHelper) UpdateUserToTeamAdmin(user *model.User, team *model.Team) {
if tm, err := th.App.Srv().Store.Team().GetMember(context.Background(), team.Id, user.Id); err == nil {
tm.SchemeAdmin = true
if _, err = th.App.Srv().Store.Team().UpdateMember(tm); err != nil {
panic(err)
}
} else {
panic(err)
}
}
func (th *TestHelper) UpdateUserToNonTeamAdmin(user *model.User, team *model.Team) {
if tm, err := th.App.Srv().Store.Team().GetMember(context.Background(), team.Id, user.Id); err == nil {
tm.SchemeAdmin = false
if _, err = th.App.Srv().Store.Team().UpdateMember(tm); err != nil {
panic(err)
}
} else {
panic(err)
}
}
func (th *TestHelper) SaveDefaultRolePermissions() map[string][]string {
results := make(map[string][]string)
for _, roleName := range []string{
"system_user",
"system_admin",
"team_user",
"team_admin",
"channel_user",
"channel_admin",
} {
role, err1 := th.App.GetRoleByName(context.Background(), roleName)
if err1 != nil {
panic(err1)
}
results[roleName] = role.Permissions
}
return results
}
func (th *TestHelper) RestoreDefaultRolePermissions(data map[string][]string) {
for roleName, permissions := range data {
role, err1 := th.App.GetRoleByName(context.Background(), roleName)
if err1 != nil {
panic(err1)
}
if strings.Join(role.Permissions, " ") == strings.Join(permissions, " ") {
continue
}
role.Permissions = permissions
_, err2 := th.App.UpdateRole(role)
if err2 != nil {
panic(err2)
}
}
}
func (th *TestHelper) RemovePermissionFromRole(permission string, roleName string) {
role, err1 := th.App.GetRoleByName(context.Background(), roleName)
if err1 != nil {
panic(err1)
}
var newPermissions []string
for _, p := range role.Permissions {
if p != permission {
newPermissions = append(newPermissions, p)
}
}
if strings.Join(role.Permissions, " ") == strings.Join(newPermissions, " ") {
return
}
role.Permissions = newPermissions
_, err2 := th.App.UpdateRole(role)
if err2 != nil {
panic(err2)
}
}
func (th *TestHelper) AddPermissionToRole(permission string, roleName string) {
role, err1 := th.App.GetRoleByName(context.Background(), roleName)
if err1 != nil {
panic(err1)
}
for _, existingPermission := range role.Permissions {
if existingPermission == permission {
return
}
}
role.Permissions = append(role.Permissions, permission)
_, err2 := th.App.UpdateRole(role)
if err2 != nil {
panic(err2)
}
}
func (th *TestHelper) SetupTeamScheme() *model.Scheme {
return th.SetupScheme(model.SchemeScopeTeam)
}
func (th *TestHelper) SetupChannelScheme() *model.Scheme {
return th.SetupScheme(model.SchemeScopeChannel)
}
func (th *TestHelper) SetupScheme(scope string) *model.Scheme {
scheme, err := th.App.CreateScheme(&model.Scheme{
Name: model.NewId(),
DisplayName: model.NewId(),
Scope: scope,
})
if err != nil {
panic(err)
}
return scheme
}
func (th *TestHelper) MakeGraphQLRequest(input *graphQLInput) (*graphql.Response, error) {
url := fmt.Sprintf("http://localhost:%v", th.App.Srv().ListenAddr.Port) + model.APIURLSuffixV5 + "/graphql"
buf, err := json.Marshal(input)
if err != nil {
panic(err)
}
resp, err := th.GraphQLClient.doAPIRequest("POST", url, bytes.NewReader(buf), map[string]string{})
if err != nil {
panic(err)
}
defer closeBody(resp)
var gqlResp *graphql.Response
err = json.NewDecoder(resp.Body).Decode(&gqlResp)
return gqlResp, err
}
| [
"\"MM_FEATUREFLAGS_GRAPHQL\"",
"\"MM_FEATUREFLAGS_GRAPHQL\"",
"\"CI_INBUCKET_PORT\""
]
| []
| [
"CI_INBUCKET_PORT",
"MM_FEATUREFLAGS_GRAPHQL"
]
| [] | ["CI_INBUCKET_PORT", "MM_FEATUREFLAGS_GRAPHQL"] | go | 2 | 0 | |
v1-0-2/BladderTracker.py | ##################################################
## BladderTracker image and pressure data acquisition software
##
## This software helps Nathan understand the urge to purge
## Designed to work with a large number of cameras courtesy of uManager.
## Based on the VasoTracker pressure myography software
## For additional info see www.vasostracker.com and https://github.com/VasoTracker/VasoTracker
##
##################################################
##
## BSD 3-Clause License
##
## Copyright (c) 2019, VasoTracker
## All rights reserved.
##
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## ## * Redistributions of source code must retain the above copyright notice, this
## list of conditions and the following disclaimer.
##
## * Redistributions in binary form must reproduce the above copyright notice,
## this list of conditions and the following disclaimer in the documentation
## and/or other materials provided with the distribution.
##
## * Neither the name of the copyright holder nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
## IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
## DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
## FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
## DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
## SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
## CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
## OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
##################################################
##
## Author: Calum Wilson
## Copyright: Copyright 2019, VasoTracker
## Credits: Calum Wilson
## License: BSD 3-Clause License
## Version: 1.0.2
## Maintainer: Calum Wilson
## Email: [email protected]
## Status: Production
## Last updated: 20190910
##
##################################################
## The following were very useful:
## https://www.safaribooksonline.com/library/view/python-cookbook/0596001673/ch09s07.html
## http://code.activestate.com/recipes/82965-threads-tkinter-and-asynchronous-io/
## https://www.physics.utoronto.ca/~phy326/python/Live_Plot.py
## http://forum.arduino.cc/index.php?topic=225329.msg1810764#msg1810764
## https://stackoverflow.com/questions/9917280/using-draw-in-pil-tkinter
## https://stackoverflow.com/questions/37334106/opening-image-on-canvas-cropping-the-image-and-update-the-canvas
from __future__ import division
import numpy as np
# Tkinter imports
import Tkinter as tk
from Tkinter import *
import tkSimpleDialog
import tkMessageBox as tmb
import tkFileDialog
import ttk
from PIL import Image, ImageTk #convert cv2 image to tkinter
E = tk.E
W = tk.W
N = tk.N
S = tk.S
ypadding = 1.5 #ypadding just to save time - used for both x and y
# Other imports
import os
import sys
import time
import datetime
import threading
import random
import Queue
import cv2
import csv
from skimage import io
import skimage
from skimage import measure
import serial
import win32com.client
import webbrowser
from skimage.transform import rescale, resize, downscale_local_mean
from scipy import misc
# Import Vasotracker functions
from VT_Arduino import Arduino
import snake
# Add MicroManager to path
import sys
MM_PATH = os.path.join('C:', os.path.sep, 'Program Files','Micro-Manager-1.4')
sys.path.append(MM_PATH)
os.environ['PATH'] = MM_PATH + ';' + os.environ['PATH']
import MMCorePy
'''
import sys
sys.path.append('C:\Program Files\Micro-Manager-1.4')
import MMCorePy
'''
#import PyQt5
# matplotlib imports
import matplotlib
#matplotlib.use('Qt5Agg')
#matplotlib.use('Qt4Agg', warn=True)
import matplotlib.backends.tkagg as tkagg
from matplotlib.figure import Figure
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
import matplotlib.pyplot as plt
from matplotlib.backends import backend_qt4agg
from matplotlib import pyplot
from collections import deque
##################################################
## GUI main application
##################################################
class GuiPart(tk.Frame):
#Initialisation function
def __init__(self, master, queue,queue2, endCommand, *args, **kwargs):
tk.Frame.__init__(self, *args, **kwargs)
self.queue = queue
self.queue2 = queue2
self.endApplication = endCommand
global root
self.root = root
self.VTD = os.getcwd()
global VTD
VTD = self.VTD
#Set up the GUI
self.grid(sticky=N+S+E+W)
top = self.winfo_toplevel()
top.rowconfigure(0, weight=1)
top.columnconfigure(0, weight=1)
self.rowconfigure(1, weight=1)
self.columnconfigure(0, weight=1)
self.filename = self.get_file_name()
print self.filename
# Arduino
#self.Arduino = Arduino(self)
#self.ports = self.Arduino.getports()
# Timing functions
self.timeit = TimeIt()
self.timeit2 = TimeIt2()
#self.timeit3 = TimeIt3()
# Initial Values
# Scale setting
self.multiplication_factor = 1 # Scale setting
# Exposure setting
global exposure
exposure = 500
self.exposure = exposure
# npts setting
global npts
npts = 1200
self.npts = npts
# Acquisition rate setting
global acq_rate
acq_rate = 0
self.acq_rate = acq_rate
# Record interval setting
global rec_interval
rec_interval = 1
self.rec_interval = rec_interval
self.initUI(endCommand)
# Open the csv file and then clear it
f = open(self.filename.name, "w+")
f.close()
# Add the headers
with open((self.filename.name), 'ab') as f:
w=csv.writer(f, quoting=csv.QUOTE_ALL)
w.writerow(("Time",'Temperature (oC)', 'Pressure 1 (mmHg)', 'Pressure 2 (mmHg)', 'Avg Pressure (mmHg)'))
# Add file for table
self.txt_file = os.path.splitext(self.filename.name)[0]
print "tail = ", self.txt_file
self.txt_file = self.txt_file + ' - Table' + '.csv'
g = open(self.txt_file, "w+")
g.close()
with open((self.txt_file), 'ab') as g:
v=csv.writer(g, quoting=csv.QUOTE_ALL)
column_headings = 'Time (s)', 'Note', 'Temp (oC)', 'P1 (mmHg)', 'P2 (mmHg)', 'PAVG (mmHg)'
v.writerow(column_headings)
# Function for getting the save file.
def get_file_name(self):
tmb.showinfo("", "Create a file to save output...")
now = datetime.datetime.now()
savename = now.strftime("%Y%m%d")
f = tkFileDialog.asksaveasfile(mode='w', defaultextension=".csv", initialdir= os.path.join(VTD, 'Results'), initialfile=savename)
if f:
print "f = ", f
return(f)
else: # asksaveasfile return `None` if dialog closed with "cancel".
if tmb.askquestion("No save file selected", "Do you want to quit VasoTracker?", icon='warning') == "yes":
self.endApplication()
else:
f = self.get_file_name()
return (f)
# Function for writing to the save file
def writeToFile(self,data):
with open((self.filename.name), 'ab') as f:
w=csv.writer(f, quoting=csv.QUOTE_ALL)
w.writerow(data)
# Function for closing down
def close_app(self):
if tmb.askokcancel("Close", "Are you sure...?"):
self.endApplication()
def gotouserguide(self):
tmb.showinfo("Woops", "We've been too busy to write a user guide. Some details (like using certain cameras) are detailed in our VasoTracker software manual. Have a quick look at that...")
webbrowser.open_new(r"http://www.vasotracker.com/wp-content/uploads/2019/04/VasoTracker-Acquistion-Software-Manual.pdf")
def gotocontact(self):
tmb.showinfo("We would hate to hear from you", "Because it probably means there is a problem. Despite our feelings, we will do our best to help. Our contact details should pop up in your web browser...")
webbrowser.open_new(r"http://www.vasotracker.com/about/contact-us/")
def launchabout(self):
webbrowser.open_new(r"http://www.vasotracker.com/about/")
def launchsnake(self):
tmb.showinfo("We did warn you.", "Any hope of this being a productive day have just went out the window...")
window = tk.Toplevel(root)
window.iconbitmap('bladder_ICON.ICO')
snake.Application(window)
# Function for defining an average checkbox ## Shouldbe in toolbar!
def average_checkbox(self, window, text):
avg_checkbox = ttk.Checkbutton(window, text=text)
avg_checkbox.grid(row=0, columnspan=4, padx=3, pady=3)
# Second Function for initialising the GUI
def initUI(self,endCommand):
# make Esc exit the program
root.bind('<Escape>', lambda e: endCommand)
# make the top right close button minimize (iconify) the main window
root.protocol("WM_DELETE_WINDOW", self.close_app)
# create a menu bar with an Exit command
menubar = tk.Menu(root)
filemenu = tk.Menu(menubar, tearoff=0)
filemenu.add_command(label="Exit", command=self.close_app)
filemenu = tk.Menu(menubar, tearoff=0)
# Create a help menu
helpmenu = tk.Menu(menubar, tearoff=0)
helpmenu.add_command(label='User Guide', command = self.gotouserguide)
helpmenu.add_command(label='Contact', command = self.gotocontact)
helpmenu.add_command(label='About', command = self.launchabout)
helpmenu.add_separator()
helpmenu.add_command(label='Do not click here...', command = self.launchsnake)
menubar.add_cascade(label="File", menu=filemenu)
menubar.add_cascade(label="Help", menu=helpmenu)
root.config(menu=menubar)
self.pack(fill=BOTH, expand=1)
# Make the toolbar along the top
self.toolbar = ToolBar(self)#ttk.Frame(root, height=150)
self.toolbar.grid(row=0, column=0,rowspan=1,columnspan=4, padx=ypadding, pady=ypadding, sticky=E+S+W+N)
self.toolbar.grid(sticky='nswe')
self.toolbar.rowconfigure(0, weight=1)
self.toolbar.columnconfigure(0, weight=1)
#self.toolbar.grid_propagate(0)
# Make the status bar along the bottom
def callback(event):
webbrowser.open_new(r"https://doi.org/10.3389/fphys.2019.00099")
self.status_bar = ttk.Label(text = 'BladderTracker is based on VasoTracker software. Clicking here will take you to our wonderful paper.', relief=SUNKEN, anchor='w')
self.status_bar.pack(side=BOTTOM, fill=X)
self.status_bar.bind("<Button-1>", callback)
# Make the graph frame
self.graphframe = GraphFrame(self)
self.graphframe.grid(row=1, column=0, rowspan=2,columnspan=1, padx=ypadding, pady=ypadding, sticky=E+S+W+N)
self.graphframe.grid(sticky='nswe')
print "this is the height: ", self.graphframe.winfo_height()
#self.graphframe.rowconfigure(0, weight=1)
#self.graphframe.columnconfigure(0, weight=1)
#self.graphframe.grid_propagate(0)
# Make the table frame
self.tableframe = TableFrame(self)
self.tableframe.grid(row=1, column=1,rowspan=1,columnspan=1, padx=ypadding, pady=ypadding, sticky=E+S+W+N)
self.tableframe.grid(sticky='nwe')
#self.tableframe.rowconfigure(0, weight=1)
#self.tableframe.columnconfigure(0, weight=1)
#self.tableframe.grid_propagate(0)
#Update everything so that the frames are all the correct size. We need to do this so we can size the graph/image before we place them.
self.toolbar.update()
self.status_bar.update()
#self.graphframe.update()
self.tableframe.update()
self.toolbar.update()
# Make the Camera Frame bottom right
self.cameraframe = CameraFrame(self)
self.cameraframe.grid(row=2, column=1,rowspan=1,columnspan=1, padx=ypadding, pady=ypadding, sticky=E+S+W+N)
self.cameraframe.grid(sticky='nswe')
#self.cameraframe.rowconfigure(0, weight=3)
#self.cameraframe.columnconfigure(0, weight=2)
#self.cameraframe.grid_propagate(0)
print "this is the height: ", self.graphframe.winfo_height()
print "this is the width: ", self.graphframe.winfo_width()
self.graphframe.mainWidgets() # Now set up the graph
#if self.toolbar.start_flag:
# mmc.startContinuousSequenceAcquisition(500)
# Count function for reading in with FakeCamera
self.count = 0
# Count function for resizing on first image acquisition
self.count2 = 0
# Lists for storing the data
self.timelist = []
self.temp = []
#self.pressure1 = []
self.pressure2 = []
self.pressure_avg = []
self.timelist = [np.nan] * (self.npts-1)
self.templist = [np.nan] * (self.npts-1)
#self.pressure1 = []
#self.pressure2 = []
self.pressureavglist = [np.nan] * (self.npts-1)
self.T = []
self.PAVG = np.nan
def sortdata(self,temppres):
#print temppres
#print "Length of the data = ", len(temppres)
T = np.nan
P1 = np.nan
P2 = np.nan
for i,data in enumerate(temppres):
#print "length of data = ", len(data)#val = ser.readline().strip('\n\r').split(';')
#print "this is what we are looking at",data
if len(data) > 0:
val = data[0].strip('\n\r').split(';')[:-1]
val = [el.split(':') for el in val]
if val[0][0] == "T":
temp = float(val[0][1])
#print "this is a temp = ", temp
T = temp
elif val[0][0] == "P1":
pres1 = float(val[0][1])
pres2 = float(val[1][1])
#print "this is a pressure = ", pres1
P1,P2 = pres1,pres2
return P1,P2,T
# This function will process all of the incoming images
def processIncoming(self):
"""Handle all messages currently in the queue, if any."""
if self.toolbar.record_flag:
if self.count == 0:
global start_time
global T, P1, P2
global acqrate
start_time=time.time()
# This is for loading in a video as an example!
try:
mmc.setProperty('Focus', "Position", self.count%500)
#print "the count is this:", self.count
#print "the image is this:", self.count%500
#print mmc.getProperty('Camera', 'Resolved path')
except:
pass
#Get the image
if self.queue.qsize( )>0:
msg = self.queue.get(0)
#msg = downscale_local_mean(msg, (2, 2),np.mean)
#msg = np.array(msg, dtype=np.uint8)
#print "binned shape: ", msg.shape
#Get the time
timenow = time.time() - start_time
self.toolbar.update_time(timenow)
# Get the arduino data
# Need to set this to remember the previos contents.
if self.queue2.qsize( )>0:
(P1,P2,self.T) = self.queue2.get(0)
self.PAVG = (P1+P2)/2
else:
(P1,P2,T) = (np.nan,np.nan, np.nan)
#PAVG = 100.99
'''
#temppres = self.Arduino.getData()
#P1,P2,T = self.sortdata(temppres)
#print "Arduino data = ", P1,P2,T
PAVG = (P1+P2)/2
PAVG = 100.99
if self.queue2.qsize( )>0:
print "Queue2 size is this", self.queue2.qsize( )
temppres = self.queue2.get(0)
P1,P2,T = temppres
PAVG = (P1+P2)/2
else:
P1,P2,T = np.nan,np.nan, np.nan
PAVG = (P1+P2)/2
'''
#with self.timeit2("update data display"):
#self.toolbar.update_temp(T) #### CHANGE to T
#self.toolbar.update_pressure(P1,P2, self.PAVG)
self.toolbar.update_temp(self.T) #### CHANGE to T
self.toolbar.update_pressure(P1,P2, self.PAVG)
# Get the acquisition rate
self.toolbar.update_acq_rate(acqrate)
self.cameraframe.process_queue(msg,self.count2)
#with self.timeit2("some other stuff"):
#if self.count%10 == 0:
self.timelist.append(timenow)
self.templist.append(self.T)
#pressure1list.append(P1)
#pressure2list.append(P1)
self.pressureavglist.append(self.PAVG)
if len(self.timelist)>self.npts:
print "we are in here"
# Should use deque to do this.
d = deque(self.timelist)
d.popleft()
self.timelist = list(d)
d = deque(self.templist)
d.popleft()
self.templist = list(d)
d = deque(self.pressureavglist)
d.popleft()
self.pressureavglist = list(d)
'''
self.timelist.pop(0)
self.templist.pop(0)
#pressure1list.pop(0)
#pressure2list.pop(0)
self.pressureavglist.pop(0)
'''
print "Length of pressurelist = ", len(self.pressureavglist)
# Save the required data
savedata = timenow,self.T, self.PAVG
self.writeToFile(savedata)
#Get points within the axis limits
xdata = filter(lambda x: x >= self.timelist[-1]-abs(self.toolbar.xlims[0]), self.timelist)
# Subtract off the latest time point, so that the current time is t = 0
xdata = map(lambda x: x - xdata[-1], xdata)
# Get the corresponding ydata points
ydata1 = self.pressureavglist[len(self.pressureavglist)-len(xdata)::]#[0::10]
#with self.timeit2("graph"):
if self.count%1 == 0:
self.graphframe.plot(xdata,ydata1,self.toolbar.xlims, self.toolbar.ylims, self.toolbar.xlims2, self.toolbar.ylims2)
else:
print "no image"
else:
msg = self.queue.get(0)
self.cameraframe.process_queue(msg,self.count2)
try:
msg2 = self.queue2.get(0)
except:
pass
self.count += 1
self.count2 += 1
#print self.count
#return
# Class for the main toolbar
class ToolBar(tk.Frame):
def __init__(self,parent):
tk.Frame.__init__(self, parent, height = 150)#, width=250, highlightthickness=2, highlightbackground="#111")
self.parent = parent
self.mainWidgets()
self.set_camera = setCamera(self)
self.ref_OD = None
#Functions that do things in the toolbar
def update_temp(self, temp):
# Updates the temperature widget
'''
self.temp_entry.config(state='normal')
self.temp_entry.delete(0, 'end')
self.temp_entry.insert(0, '%.2f' % temp)
self.temp_entry.config(state='DISABLED')
'''
temp_string = str(round(temp,2))
self.temp_contents.set(temp_string)
def update_pressure(self, P1,P2,PAvg):
# Update average pressure
'''
self.pressureavg_entry.config(state='normal')
self.pressureavg_entry.delete(0, 'end')
self.pressureavg_entry.insert(0, '%.2f' % PAvg)
self.pressureavg_entry.config(state='DISABLED')
'''
pres_string = str(round(PAvg,2))
self.pressure_contents.set(pres_string)
def update_time(self, time):
#Update the temperature widget
self.time_contents.set(str(datetime.timedelta(seconds=time))[:-4])
print "the time is: ", str(datetime.timedelta(seconds=time))[:-4]
'''
self.time_entry.config(state='normal')
self.time_entry.delete(0, END)
timestring = str(datetime.timedelta(seconds=time))[:-4]
self.time_entry.insert(0, timestring)
self.time_entry.config(state='DISABLED')
'''
def update_acq_rate(self, acqrate):
#Update the temperature widget
self.acq_rate_contents.set(str(round(acqrate,2)))
'''
self.acq_rate__entry.config(state='normal')
self.acq_rate__entry.delete(0, END)
acqratestring = str(round(acqrate,2))
self.acq_rate__entry.insert(0, acqratestring)
self.acq_rate__entry.config(state='DISABLED')
'''
# Function that changes the exposure on enter key
def update_exposure(self,event):
global prevcontents,exposure
try:
# Check if the exposure is within a suitable range
exp = self.contents.get()
if exp < 10:
exp = 10
elif exp > 500:
exp = 500
self.exposure_entry.delete(0, 'end')
self.exposure_entry.insert('0', exp)
if exp < 100:
tmb.showinfo("Warning", "Except for ThorCam, we recommend an exposure between 100 ms and 500ms")
print "Setting exposure to:", exp
self.parent.exposure = int(exp)
print mmc.getExposure()
prevcontents = exp
exposure = exp
except:
print "Exposure remaining at:", prevcontents
self.exposure_entry.delete(0, 'end')
self.exposure_entry.insert('0', prevcontents)
exposure = prevcontents
self.set_camera.set_exp(exposure)
print mmc.getExposure()
self.exposure_entry.delete(0, 'end')
self.exposure_entry.insert('0', mmc.getExposure())
# Function that changes the exposure on enter key
def update_npts(self,event):
global npts_prevcontents,npts
try:
# Check if the exposure is within a suitable range
npts = self.npts_contents.get()
if npts < 10:
npts = 10
elif npts > 2400:
npts = 2400
self.npts_entry.delete(0, 'end')
self.npts_entry.insert('0', npts)
print "Setting npts to:", npts
self.parent.npts = int(npts)
npts_prevcontents = npts
npts = npts
except:
print "Exposure remaining at:", npts_prevcontents
self.npts_entry.delete(0, 'end')
self.npts_entry.insert('0', npts_prevcontents)
npts = npts_prevcontents
def update_rec_interval(self,event):
global rec_interval, rec_prevcontents
try: # Should check contents for int rather than try and catch exception
rec = self.rec_contents.get()
self.rec_interval_entry.delete(0, 'end')
self.rec_interval_entry.insert('0', rec)
self.parent.rec_interval = int(rec)
rec_prevcontents = rec
rec_interval = rec
except:
print "Record interval remaining at:", rec_prevcontents
self.rec_interval_entry.delete(0, 'end')
self.rec_interval_entry.insert('0', rec_prevcontents)
rec_interval = rec_prevcontents
# TO DO MAKE SURE THIS WORKS
# Function that changes the exposure on enter key
def update_scale(self,event):
print "updating the scale..."
try:
# Check if the exposure is within a suitable range
scale = self.scale_contents.get()
print "the scale is:", scale
self.scale_entry.delete(0, 'end')
self.scale_entry.insert('0', scale)
print "Setting scale to:", scale
self.parent.multiplication_factor = scale
self.scale_prevcontents = scale
except:
print "Scale remaining at:", self.scale_prevcontents
self.scale_entry.delete(0, 'end')
self.scale_entry.insert('0', self.scale_prevcontents)
self.parent.multiplication_factor = self.scale_prevcontents
def mainWidgets(self):
self.toolbarview = ttk.Frame(root, relief=RIDGE)
self.toolbarview.grid(row=2,column=3,rowspan=2,sticky=N+S+E+W, pady=0)
# Tool bar groups
source_group = ttk.LabelFrame(self, text='Source', height=150, width=150)
source_group.pack(side=LEFT, anchor=N, padx=3, fill=Y)
settings_group = ttk.LabelFrame(self, text='Acquisition Settings', height=150, width=150)
settings_group.pack(side=LEFT, anchor=N, padx=3, fill=Y)
outer_diameter_group = ttk.LabelFrame(self, text='Graph Settings', height=150, width=150)
outer_diameter_group.pack(side=LEFT, anchor=N, padx=3, fill=Y)
acquisition_group = ttk.LabelFrame(self, text='Data acquisition', height=150, width=150)
acquisition_group.pack(side=LEFT, anchor=N, padx=3, fill=Y)
start_group = ttk.LabelFrame(self, text='Start/Stop', height=150, width=150)
start_group.pack(side=LEFT, anchor=N, padx=3, fill=Y)
# Source group (e.g. camera and files)
camera_label = ttk.Label(source_group, text = 'Camera:')
camera_label.grid(row=0, column=0, sticky=E)
path_label = ttk.Label(source_group, text = 'Path:')
path_label.grid(row=1, column=0, sticky=E)
save_label = ttk.Label(source_group, text = 'File:')
save_label.grid(row=2, column=0, sticky=E)
# Flag Start/stop group
self.start_flag = False
def set_cam(self):
if self.start_flag == False:
camera_label = self.variable.get()
self.set_camera.set(camera_label)
#self.BIN_entry.configure(state="enabled")
#self.FOV_entry.configure(state="enabled")
return
else:
print "You can't change the camera whilst acquiring images!"
return
self.camoptions = ["...","TIS_DCAM","Thorlabs","OpenCV", "FakeCamera", "uManagerCam"]
self.variable = StringVar()
self.variable.set(self.camoptions[0])
self.camera_entry = ttk.OptionMenu(source_group, self.variable,self.camoptions[0], *self.camoptions, command= lambda _: set_cam(self))
self.camera_entry.grid(row=0, column=1, pady=5)
global head, tail
head,tail = os.path.split(self.parent.filename.name)
path_entry = ttk.Entry(source_group, width=20)
path_entry.insert(0, head)
path_entry.config(state=DISABLED)
path_entry.grid(row=1, column=1, pady=5)
save_entry = ttk.Entry(source_group, width=20)
save_entry.insert(0, tail)
save_entry.config(state=DISABLED)
save_entry.grid(row=2, column=1, pady=5)
# Create list buttons for the field of view
self.test = 0
def ShowChoice():
if self.test == 0:
self.cam_x_dim = mmc.getImageWidth()
self.cam_y_dim = mmc.getImageHeight()
print "Cam dimensions: ", self.cam_x_dim, self.cam_y_dim
self.test = self.test + 1
mmc.stopSequenceAcquisition()
print(self.FOV_variable.get())
# Need to get the dimensions of the image.
if self.FOV_variable.get() == "w x h":
try:
mmc.setROI(0, 0, self.cam_x_dim, self.cam_y_dim)
mmc.startContinuousSequenceAcquisition(0)
except:
tmb.showinfo("Warning", "Not available for this camera")
self.FOV_variable.set(self.FOV_modes[0])
elif self.FOV_variable.get() == "w/2 x h/2":
try:
mmc.setROI(int(self.cam_x_dim/4), int(self.cam_y_dim/4), int(self.cam_x_dim/2), int(self.cam_y_dim/2))
mmc.startContinuousSequenceAcquisition(0)
except:
tmb.showinfo("Warning", "Not available for this camera")
self.FOV_variable.set(self.FOV_modes[0])
#self.FOV_selection = IntVar(value=1) # initializing the choice, i.e. Python
self.FOV_modes = [("w x h"), ("w/2 x h/2")]
self.FOV_modes_label = ttk.Label(source_group, text = 'FOV:')
self.FOV_modes_label.grid(row=3, column=0, sticky=E)
self.FOV_variable = StringVar()
self.FOV_variable.set(self.FOV_modes[0])
self.FOV_entry = ttk.OptionMenu(source_group, self.FOV_variable,self.FOV_modes[0], *self.FOV_modes, command= lambda _: ShowChoice())
self.FOV_entry.grid(row=3, column=1, pady=5)
self.FOV_entry.configure(state="disabled")
def SetBin():
self.test = 0
try:
cameraName = mmc.getCameraDevice()
print cameraName
self.test = 1
except:
pass
#mmc.stopSequenceAcquisition()
'''
print(self.BIN_variable.get())
# Need to get the dimensions of the image.
if self.BIN_variable.get() == "1x":
try:
mmc.setProperty(cameraName, "Binning", "1")
print "Binning set to 1X"
self.BIN_prevcontents = self.BIN_variable.get()
except:
self.BIN_variable.set(self.BIN_prevcontents)
mmc.startContinuousSequenceAcquisition(0)
elif self.BIN_variable.get() == "2x":
try:
mmc.setProperty(cameraName, "Binning", "2")
print "Binning set to 2X"
self.BIN_prevcontents = self.BIN_variable.get()
except:
self.BIN_variable.set(self.BIN_prevcontents)
elif self.BIN_variable.get() == "4x":
try:
mmc.setProperty(cameraName, "Binning", "4")
print "Binning set to 4X"
self.BIN_prevcontents = self.BIN_variable.get()
except:
self.BIN_variable.set(self.BIN_prevcontents)
'''
self.BIN_modes = [("1x"), ("2x"), ("4x")]
self.BIN_modes_label = ttk.Label(source_group, text = 'Binning:')
self.BIN_modes_label.grid(row=4, column=0, sticky=E)
self.BIN_variable = StringVar()
self.BIN_variable.set(self.BIN_modes[0])
self.BIN_prevcontents = self.BIN_variable.get()
self.BIN_entry = ttk.OptionMenu(source_group, self.BIN_variable,self.BIN_modes[0], *self.BIN_modes, command= lambda _: SetBin())
self.BIN_entry.grid(row=4, column=1, pady=5)
self.BIN_entry.configure(state="disabled")
#cameraName = mmc.getCameraDevice()
#mmc.setProperty(cameraName, "Binning", "1")
# Settings group (e.g. camera and files)
scale_label = ttk.Label(settings_group, text = 'um/pixel:')
scale_label.grid(row=0, column=0, sticky=E)
exposure_label = ttk.Label(settings_group, text = 'Exp (ms):')
exposure_label.grid(row=1, column=0, sticky=E)
acqrate_label = ttk.Label(settings_group, text = 'Acq rate (Hz):')
acqrate_label.grid(row=2, column=0, sticky=E)
rec_interval_label = ttk.Label(settings_group, text = 'Rec intvl (frames):')
rec_interval_label.grid(row=3, column=0, sticky=E)
# Scale settings
scale = self.parent.multiplication_factor
scalefloat = "%3.0f" % scale
self.scale_contents = DoubleVar()
self.scale_contents.set(scalefloat)
global scale_contents
self.scale_prevcontents = self.scale_contents.get()
self.scale_entry = ttk.Entry(settings_group, textvariable = self.scale_contents,width=20)
self.scale_entry.grid(row=0, column=1, pady=5)
self.scale_entry.bind('<Return>', self.update_scale)
# Exposure settings
exp = self.parent.exposure
expfloat = "%3.0f" % exp
self.contents = IntVar()
self.contents.set(int(exp))
global prevcontents
prevcontents = self.contents.get()
self.exposure_entry = ttk.Entry(settings_group, textvariable = self.contents,width=20)
self.exposure_entry.grid(row=1, column=1, pady=5)
self.exposure_entry.bind('<Return>', self.update_exposure)
# Acquisition rate settings
acq_rate = self.parent.acq_rate
acq_rate = "%3.0f" % acq_rate
self.acq_rate_contents = IntVar()
self.acq_rate_contents.set(int(acq_rate))
global acq_rate_prevcontents
acq_rate_prevcontents = self.acq_rate_contents.get()
self.acq_rate__entry = ttk.Entry(settings_group, textvariable = self.acq_rate_contents,width=20)
self.acq_rate__entry.grid(row=2, column=1, pady=5)
self.acq_rate__entry.configure(state="disabled")
#self.acq_rate_entry.bind('<Return>', self.acq_rate_exposure)
# Record interval settings
rec_interval = self.parent.rec_interval
self.rec_contents = IntVar()
self.rec_contents.set(int(rec_interval))
global rec_prevcontents
rec_prevcontents = self.rec_contents.get()
self.rec_interval_entry = ttk.Entry(settings_group, textvariable = self.rec_contents,width=20)
self.rec_interval_entry.grid(row=3, column=1, pady=5)
self.rec_interval_entry.bind('<Return>', self.update_rec_interval)
# Outer diameter group
# Function for the labels
def coord_label(window, text, row, column):
label=ttk.Label(window, text=text)
label.grid(row=row, column=column, padx = 5, pady=5, sticky=E)
# Function for the labels 2
def coord_entry(window, row, column, coord_label):
entry = ttk.Entry(window, width=8, textvariable=coord_label)
entry.config(state=NORMAL)
entry.grid(row=row, column=column, padx=5, pady=5, sticky=E)
root.focus_set()
entry.focus_set()
root.focus_force()
return entry
def set_button(window):
set_button = ttk.Button(window, text='Set', command= lambda: coord_limits(get_coords=True, default = False))
set_button.grid(row=6, column=1,columnspan=2, pady=5)
def coord_limits(get_coords, default):
if get_coords == True:
self.xlims = (self.x_min_label.get(),self.x_max_label.get())
self.ylims = (self.y_min_label.get(),self.y_max_label.get())
self.xlims2 = self.xlims
self.ylims2 = (self.y_min_label2.get(),self.y_max_label2.get())
self.parent.graphframe.update_scale()
return self.xlims, self.ylims, self.xlims2, self.ylims2
get_coords = False
else:
pass
#Pressure Values
# Set the initial xlimit values
self.x_min_label, self.x_max_label = IntVar(value=-120), IntVar(value=0)
self.x_min_default, self.x_max_default = self.x_min_label.get(),self.x_max_label.get()
# Set the initial xlimit values
self.y_min_label, self.y_max_label = IntVar(value=0), IntVar(value=200)
self.y_min_default, self.y_max_default = self.y_min_label.get(),self.y_max_label.get()
# Get the x and y limits
self.xlims = (self.x_min_label.get(),self.x_max_label.get())
self.ylims = (self.y_min_label.get(),self.y_max_label.get())
#Temp Values
# Set the initial xlimit values
self.x_min_label2, self.x_max_label2 = IntVar(value=-120), IntVar(value=0)
self.x_min_default2, self.x_max_default2 = self.x_min_label2.get(),self.x_max_label2.get()
# Set the initial xlimit values
self.y_min_label2, self.y_max_label2 = IntVar(value=0), IntVar(value=50)
self.y_min_default2, self.y_max_default2 = self.y_min_label2.get(),self.y_max_label2.get()
# Get the x and y limits
self.xlims2 = self.xlims
self.ylims2 = (self.y_min_label2.get(),self.y_max_label2.get())
coord_label(outer_diameter_group, 'Min', 1, 1)
coord_label(outer_diameter_group, 'Max', 1, 2)
coord_label(outer_diameter_group, 'Time:', 2, 0)
coord_label(outer_diameter_group, 'Pressure:', 3, 0)
P_xmin_entry = coord_entry(outer_diameter_group, 2, 1, self.x_min_label)
P_xmax_entry = coord_entry(outer_diameter_group, 2, 2, self.x_max_label)
P_ymin_entry = coord_entry(outer_diameter_group, 3, 1, self.y_min_label)
P_ymax_entry = coord_entry(outer_diameter_group, 3, 2, self.y_max_label)
coord_label(outer_diameter_group, 'Temp:', 4, 0)
T_ymin_entry = coord_entry(outer_diameter_group, 4, 1, self.y_min_label2)
T_ymax_entry = coord_entry(outer_diameter_group, 4, 2, self.y_max_label2)
npts_label = ttk.Label(outer_diameter_group, text = 'Graph pts:')
npts_label.grid(row=5, column=1, sticky=E)
# Exposure settings
npts = self.parent.npts
nptsfloat = "%3.0f" % exp
self.npts_contents = IntVar()
self.npts_contents.set(int(npts))
global npts_prevcontents
npts_prevcontents = self.npts_contents.get()
self.npts_entry = ttk.Entry(outer_diameter_group, textvariable = self.npts_contents,width=8)
self.npts_entry.grid(row=5, column=2, pady=5)
self.npts_entry.bind('<Return>', self.update_npts)
set_button(outer_diameter_group)
# acquisition_group
temp_label = ttk.Label(acquisition_group, text = 'Temp (oC):')
temp_label.grid(row=0, column=0, sticky=E)
pressureavg_label = ttk.Label(acquisition_group, text = 'Avg Pressure (mmHg):')
pressureavg_label.grid(row=1, column=0, sticky=E)
time_label = ttk.Label(acquisition_group, text = 'Time (hr:min:sec:msec):')
time_label.grid(row=4, column=0, sticky=E)
self.temp_contents = IntVar()
self.temp_contents.set("N/A")
self.temp_entry = ttk.Entry(acquisition_group, textvariable = self.temp_contents, width=10)
self.temp_entry.config(state=DISABLED)
self.temp_entry.grid(row=0, column=1, pady=0)
self.pressure_contents = IntVar()
self.pressure_contents.set("N/A")
self.pressureavg_entry = ttk.Entry(acquisition_group, textvariable = self.pressure_contents, width=10)
self.pressureavg_entry.config(state=DISABLED)
self.pressureavg_entry.grid(row=1, column=1, pady=0)
self.time_contents = IntVar()
self.time_contents.set(str(datetime.timedelta(seconds=time.time()-time.time()))[:-4])
self.time_entry = ttk.Entry(acquisition_group,textvariable = self.time_contents, width=10)
self.time_entry.config(state=DISABLED)
self.time_entry.grid(row=4, column=1, pady=0)
# Function that will start the image acquisition
def start_acq():
if self.variable.get() == "...":
tmb.showwarning(title="Warning", message = "You need to select a camera source!")
self.start_flag = False
else:
self.camera_entry.configure(state="disabled")
self.exposure_entry.configure(state="disabled")
self.scale_entry.configure(state="disabled")
self.rec_interval_entry.configure(state="disabled")
self.start_flag = True
self.record_video_checkBox.configure(state="disabled")
self.npts_entry.configure(state="disabled")
mmc.startContinuousSequenceAcquisition(exposure) #50
print "everything should be running"
return self.start_flag
# Function that will stop the image acquisition
def stop_acq():
self.camera_entry.configure(state="enabled")
self.exposure_entry.configure(state="enabled")
self.scale_entry.configure(state="enabled")
self.rec_interval_entry.configure(state="enabled")
self.start_flag = False
self.record_video_checkBox.configure(state="enabled")
mmc.stopSequenceAcquisition()
self.record_flag = False
#self.FOV_entry.configure(state="enabled")
return self.start_flag,self.record_flag
# Function that will start the data acquisition
self.record_flag = False
self.first_go = True
def record_data():
# On first press, set the global start time
if self.first_go == True:
global start_time
start_time=time.time()
self.first_go = False
if self.start_flag == True:
self.record_flag = True
mmc.clearCircularBuffer()
self.FOV_entry.configure(state="disabled")
print "Just set the record flag to: ", self.record_flag
return self.record_flag
'''
def stop_record_data():
self.record_flag = False
print "Just set the record flag to: ", self.record_flag
return self.record_flag
'''
def snapshot():
self.snapshot_flag = True
return self.snapshot_flag
start_button = ttk.Button(start_group, text='Start', command= lambda: start_acq())
start_button.grid(row=0, column=0, pady=0, sticky=N+S+E+W)
#console = tk.Button(master, text='Exit', command=self.close_app)
#console.pack( )
live_button = ttk.Button(start_group, text='Stop', command= lambda: stop_acq())
live_button.grid(row=1, column=0, pady=0, sticky=N+S+E+W)
record_button = ttk.Button(start_group, text='Track', command= lambda: record_data())
record_button.grid(row=3, column=0, pady=0, sticky=N+S+E+W)
self.snapshot_flag = False
snapshot_button = ttk.Button(start_group, text='Snapshot', command= lambda: snapshot())
snapshot_button.grid(row=4, column=0, pady=0, sticky=N+S+E+W)
#stop_record_button = ttk.Button(start_group, text='Stop tracking', command= lambda: stop_record_data())
#stop_record_button.grid(row=4, column=0, pady=5, sticky=N+S+E+W)
self.record_is_checked = IntVar()
self.record_video_label = ttk.Label(settings_group, text = 'Record video?')
self.record_video_label.grid(row=4, column=0, sticky=E)
self.record_video_checkBox = ttk.Checkbutton(settings_group, onvalue=1, offvalue=0, variable=self.record_is_checked)
self.record_video_checkBox.grid(row=4, column=1, columnspan=1, padx=5, pady=3, sticky=W)
class GraphFrame(tk.Frame):
min_x = 0
max_x = 10
def __init__(self,parent):
tk.Frame.__init__(self, parent)#, bg = "yellow")#, highlightthickness=2, highlightbackground="#111")
self.parent = parent
self.top = Frame()
self.top.update_idletasks()
self.n_points = 100
self.xlim1 = self.parent.toolbar.x_min_default # Outer
self.xlim2 = self.parent.toolbar.x_max_default # Outer
self.ylim1 = self.parent.toolbar.y_min_default # Outer
self.ylim2 = self.parent.toolbar.y_max_default # Outer
self.xlim3 = self.parent.toolbar.x_min_default2 # Inner
self.xlim4 = self.parent.toolbar.x_max_default2 # Inner
self.ylim3 = self.parent.toolbar.y_min_default2 # Inner
self.ylim4 = self.parent.toolbar.y_max_default2 # Inner
self.delta_i = 1
self.n_data = 100000000
self.update = 1
#self.mainWidgets()
def update_scale(self, blit=True): #### NEE
print "attempting to update a blitted axis"
self.graphview.ax1.set_xlim(self.parent.toolbar.xlims[0],self.parent.toolbar.xlims[1]) # Outer diameter
self.graphview.ax1.set_ylim(self.parent.toolbar.ylims[0],self.parent.toolbar.ylims[1]) # Outer diameter
#self.graphview.ax2.set_xlim(self.parent.toolbar.xlims2[0],self.parent.toolbar.xlims2[1]) # Outer diameter
#self.graphview.ax2.set_ylim(self.parent.toolbar.ylims2[0],self.parent.toolbar.ylims2[1])
self.graphview.figure.canvas.draw()
def mainWidgets(self,blit=True):
#
# We want to explicitly set the size of the graph so that we can blit
print "this is the height: ", self.parent.graphframe.winfo_height()
print "this is the width: ", self.parent.graphframe.winfo_width()
self.graphview = tk.Label(self)
#print "Graph width: ", self.graphview.winfo_width()
#print "Graph height: ", self.parent.graphframe.winfo_height()
default_figsize = (plt.rcParams.get('figure.figsize'))
print "default fig size = ", default_figsize
other_figsize = [self.parent.graphframe.winfo_width()/100,self.parent.graphframe.winfo_height()/100]
print other_figsize
self.graphview.figure,self.graphview.ax1 = plt.subplots(1,1, figsize=other_figsize)
#self.graphview.figure = pyplot.figure()
#self.graphview.ax1 = self.graphview.figure.add_subplot(211)
#self.graphview.ax2 = self.graphview.figure.add_subplot(212)
self.graphview.line, = self.graphview.ax1.plot([],[]) # initialize line to be drawn
#self.graphview.line2, = self.graphview.ax2.plot([],[])
self.graphview.ax1.set_xlim(self.xlim1,self.xlim2) # Outer
#self.graphview.ax2.set_xlim(self.xlim3,self.xlim4) # Inner
self.graphview.ax1.set_ylim(self.ylim1,self.ylim2) # Outer
#self.graphview.ax2.set_ylim(self.ylim3,self.ylim4) # Inner
self.graphview.ax1.set_xlabel('Time (s)', fontsize=14) # Outer diameter labels
self.graphview.ax1.set_ylabel('Pressure (mmHg)', fontsize=14) # Outer diameter labels
#self.graphview.ax2.set_xlabel('Time (s)', fontsize=14) # Inner diameter labels
#self.graphview.ax2.set_ylabel('Temp (oC)', fontsize=14) # Inner diameter labels
self.graphview.figure.canvas = FigureCanvasTkAgg(self.graphview.figure, self)
self.graphview.figure.canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=None, expand=False) ##### THIS IS THE PROBLEM WITH BLITTING HERE. WE NEED TO EXPLICITLY STATE THE FIGURE SIZE ABOVE!!
print "Graph width: ", self.graphview.figure.canvas.get_tk_widget().winfo_width()
self.graphview.figure.canvas.draw()
print "Graph width: ", self.graphview.figure.canvas.get_tk_widget().winfo_width()
if blit:
# Get the background
self.ax1background = self.graphview.figure.canvas.copy_from_bbox(self.graphview.ax1.bbox)
#self.ax2background = self.graphview.figure.canvas.copy_from_bbox(self.graphview.ax2.bbox)
print "bounding box = ", self.graphview.ax1.bbox.get_points()
bbarrray = self.graphview.ax1.bbox.get_points()
from matplotlib.transforms import Bbox
my_blit_box = Bbox(bbarrray)
#my_blit_box = Bbox(np.array([[x0,y0],[x1,y1]]))
my_blit_box = Bbox.from_bounds(bbarrray[0][0], bbarrray[0][1], (bbarrray[1][0]-bbarrray[0][0])*1.5, bbarrray[1][1]-bbarrray[0][1])
print "bounding box = ", my_blit_box.get_points()
self.ax1background = self.graphview.figure.canvas.copy_from_bbox(my_blit_box)
def plot(self, timelist1, ydata1, xlims,ylims, xlims2, ylims2, blit=True):
# Get the data
if len(timelist1)>1:
# Set the axis values
self.graphview.ax1.set_xlim(xlims[0],xlims[1]) # Outer diameter
self.graphview.ax1.set_ylim(ylims[0],ylims[1]) # Outer diameter
#self.graphview.ax2.set_xlim(xlims2[0],xlims2[1]) # Inner diameter
#self.graphview.ax2.set_ylim(ylims2[0],ylims2[1]) # Inner diameter
# If there are many data points, it is a waste of time to plot all
# of them once the screen resolution is reached,
# so when the maximum number of points is reached,
# halve the number of points plotted. This is repeated
# every time the number of data points has doubled.
'''
self.i = int(len(timelist1))
self.i2 = int(len(timelist2))
print "length =", self.i
if self.i > self.n_points :
self.n_points *= 2
# frequency of plotted points
self.delta_i *= self.n_points/self.i
self.update = max(self.delta_i, self.update)
print("updating n_rescale = ",\
self.n_points, self.update, self.delta_i)
'''
# drawing the canvas takes most of the CPU time, so only update plot
# every so often
if blit == False:
try:
self.graphview.ax1.lines.remove(self.graphview.line)
except:
pass
self.graphview.line, = self.graphview.ax1.plot(
timelist1,ydata1,
color="blue", linewidth = 3)
self.graphview.figure.canvas.draw()
self.graphview.figure.canvas.get_tk_widget().update_idletasks()
#self.after(2,self.plotter)
#self.graphview.figure.canvas.flush_events()
if blit == True:
self.graphview.figure.canvas.restore_region(self.ax1background)
#self.graphview.figure.canvas.restore_region(self.ax2background)
try:
self.graphview.ax1.lines.remove(self.graphview.line)
#self.graphview.ax2.lines.remove(self.graphview.line2)
except:
pass
self.graphview.line.set_xdata(timelist1)#[::-1][0::int(self.delta_i)][::-1])
self.graphview.line.set_ydata(ydata1)#[::-1][0::int(self.delta_i)][::-1])
self.graphview.line.set_color('blue')
#self.graphview.line2.set_xdata(timelist2[::-1][0::int(self.delta_i)][::-1])
#self.graphview.line2.set_ydata(ydata2[::-1][0::int(self.delta_i)][::-1])
#self.graphview.line2.set_color('red')
# redraw just the points
self.graphview.ax1.draw_artist(self.graphview.line)
#self.graphview.ax2.draw_artist(self.graphview.line2)
# fill in the axes rectangle
self.graphview.figure.canvas.blit(self.graphview.ax1.bbox)
#self.graphview.figure.canvas.blit(self.graphview.ax2.bbox)
#Example
return
class TableFrame(tk.Frame):
def __init__(self,parent):
tk.Frame.__init__(self, parent)#,highlightthickness=2,highlightbackground="#111")#, width=250, height = 300)#, highlightthickness=2, highlightbackground="#111")
self.parent = parent
self.mainWidgets()
def mainWidgets(self):
self.tableview = ttk.Frame(self)
self.tableview.grid(row=0, column=0, sticky=N+S+E+W)
def add_row():
Label = table_text_entry.get()
Time = (time.time() - start_time)
Time = float(Time)
Time = round(Time, 1)
table_1.insert('', 'end', values=(Time, Label, T, P1,P2, (P1+P2)/2)) #P1, P2
hello = ((Time, Label, T, P1,P2, (P1+P2)/2))
table_1.yview_moveto(1)
save_table(hello)
table_text_entry = StringVar()
max_diameter_text = StringVar()
def save_table(hello):
with open((self.parent.txt_file), 'ab') as g:
w=csv.writer(g, quoting=csv.QUOTE_ALL)
w.writerow(hello)
table_text_entry = StringVar()
max_diameter_text = StringVar()
table_2 = tk.Frame(self.tableview)
table_2.grid(row=0, column=0, columnspan=5, sticky=N+S+E+W)
table_label = ttk.Label(table_2, text = 'Note:')
table_label.grid(row=0, column=0)
table_entry = ttk.Entry(table_2, width=60, textvariable=table_text_entry )
table_entry.grid(row=0, column=1)
add_button = ttk.Button(table_2, text='Add', command=add_row)
add_button.grid(row=0, column=2)
table_1 = ttk.Treeview(self.tableview, show= 'headings')
table_1["columns"] = ('Time', 'Note', 'Temp', 'P1', 'P2', 'PAVG')
table_1.column('#0', width=50)
table_1.column('Time', width=50, stretch=True)
table_1.column('Note', width=300)
table_1.column('Temp', width=50)
table_1.column('P1', width=50)
table_1.column('P2', width=50)
table_1.column('PAVG', width=50)
table_1.heading('#1', text = 'Time')
table_1.heading('#2', text = 'Note')
table_1.heading('#3', text = 'Temp')
table_1.heading('#4', text = 'P1')
table_1.heading('#5', text = 'P2')
table_1.heading('#6', text = 'PAVG')
scrollbar = Scrollbar(self.tableview)
scrollbar.grid(row=1,column=2, sticky=NS)
scrollbar.config( command = table_1.yview )
table_1.grid(row=1, column=1, sticky=N+S+E+W)
class CameraFrame(tk.Frame):
def __init__(self,parent):
tk.Frame.__init__(self, parent)#, width=1000, height = 600)#, highlightthickness=2, highlightbackground="#111")
self.parent = parent
self.mainWidgets()
def mainWidgets(self):
# Get the max dimensions that the Canvas can be
self.maxheight = self.parent.graphframe.winfo_height() - self.parent.tableframe.winfo_height() - self.parent.status_bar.winfo_height()
self.maxwidth = self.parent.status_bar.winfo_width() - self.parent.graphframe.winfo_width()
# Set up the Canvas that we will show the image on
self.cameraview = tk.Canvas(self, width=self.maxwidth, height=self.maxheight, background='white')
self.cameraview.grid(row=2,column=2,sticky=N+S+E+W, pady=ypadding)
# ROI rectangle initialisation
self.rect = None
self.start_x = None
self.start_y = None
self.end_x = None
self.end_y = None
# Factors for scaling ROI to original image (which is scaled to fit canvas)
#self.delta_width = None
#self.delta_height = None
#self.scale_factor = None
# Bind events to mouse
#self.cameraview.bind("<ButtonPress-1>",self.on_button_press)
#self.cameraview.bind("<B1-Motion>",self.on_move_press)
#self.cameraview.bind("<ButtonRelease-1>",self.on_button_release)
# Define functions for mouse actions
def on_button_press(self, event):
if self.parent.toolbar.set_roi == True: # Only enable if we have just pressed the button
# Delete any old ROIs
found = event.widget.find_all()
for iid in found:
if event.widget.type(iid) == 'rectangle':
event.widget.delete(iid)
# Create the rectangle ROI
self.start_x = event.x
self.start_y = event.y
self.rect = self.cameraview.create_rectangle(self.start_x, self.start_y, self.start_x, self.start_y)
def on_move_press(self, event):
#Update the ROI when the mouse is dragged
if self.parent.toolbar.set_roi == True:
curX, curY = (event.x, event.y)
self.cameraview.coords(self.rect, self.start_x, self.start_y, curX, curY)
def on_button_release(self, event):
if self.parent.toolbar.set_roi == True: # Only enable if we have just pressed the button
self.end_x = event.x
self.end_y = event.y
self.parent.toolbar.set_roi = False
self.parent.toolbar.ROI_checkBox.state(['selected'])
self.parent.toolbar.ROI_is_checked.set(1)
pass
def rescale_frame(self,frame):
# Scaling a rectangle to fit inside another rectangle.
# works out destinationwidth/sourcewidth and destinationheight/sourceheight
# and scaled by the smaller of the two ratios
width = frame.shape[1]
height = frame.shape[0]
#print "INFO"
#print width, height
#print self.maxwidth, self.maxheight
widthfactor = self.maxwidth / width
heightfactor = self.maxheight / height
if widthfactor < heightfactor:
self.scale_factor = widthfactor
else:
self.scale_factor = heightfactor
global scale_factor
scale_factor = self.scale_factor
#print scale_factor
width = int(frame.shape[1] * self.scale_factor)
height = int(frame.shape[0] * self.scale_factor)
#print "NEWDIMS"
#print width, height
self.delta_width = int((self.maxwidth - width)/2)
self.delta_height = int((self.maxheight - height)/2)
a = misc.imresize(frame.astype('uint8'), [height,width])
return a#cv2.resize(frame, (width, height), interpolation = cv2.INTER_NEAREST)
def process_queue(self,img,count):
try:
img = img
imgc = img#cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
if self.parent.toolbar.record_flag:
if self.parent.toolbar.snapshot_flag == True:
print "Snapshot pressed"
timenow2 = int(timenow)
directory = os.path.join(head, 'Snaps\\')
if not os.path.exists(directory):
os.makedirs(directory)
gfxPath = os.path.join(directory, '%s_t=%ss_Result SNAPSHOT.tiff' % (os.path.splitext(tail)[0],timenow2))
cv2.imwrite(gfxPath,imgc)
self.parent.toolbar.snapshot_flag = False
else:
pass
#Rescale the image so it doesnt take over the screen
imgc = self.rescale_frame(imgc)
#imgc = cv2.cvtColor(imgc, cv2.COLOR_BGR2RGBA)
prevImg = Image.fromarray(imgc)
imgtk = ImageTk.PhotoImage(image=prevImg)
#Show the image
self.imgtk = imgtk
self.image_on_canvas_ = self.cameraview.create_image(self.maxwidth/2, self.maxheight/2, anchor=CENTER,image=self.imgtk)
except:
pass
# Class for timing processes
class TimeIt():
from datetime import datetime
def __init__(self):
self.name = None
def __call__(self, name):
self.name = name
return self
def __enter__(self):
self.tic = self.datetime.now()
return self
def __exit__(self,name, *args, **kwargs):
print('process ' + self.name + ' runtime: {}'.format(self.datetime.now() - self.tic))##]]
class TimeIt2():
from datetime import datetime
def __init__(self):
self.name = None
def __call__(self, name):
self.name = name
return self
def __enter__(self):
self.tic = self.datetime.now()
return self
def __exit__(self,name, *args, **kwargs):
print('process ' + self.name + ' runtime: {}'.format(self.datetime.now() - self.tic))##]]
class TimeIt3():
from datetime import datetime
def millis_interval(self,start, end):
diff = end - start
millis = diff.days * 24 * 60 * 60 * 1000
millis += diff.seconds * 1000
millis += diff.microseconds / 1000
return millis
def __init__(self):
self.name = None
self.delta_time = 0
def __call__(self, name):
self.name = name
return self
def __enter__(self):
self.tic = self.datetime.now()
return self
def __exit__(self,name, *args, **kwargs):
self.delta_time = self.millis_interval(self.tic,self.datetime.now())
print('process ' + self.name + ' runtime: {}'.format(self.datetime.now() - self.tic))##]]
return self
class setCamera(object):
def __init__(self,camera_label):
camera_label = camera_label
self.DEVICE = None
# Factors for scaling ROI to original image (which is scaled to fit canvas)
self.delta_width = 0
self.delta_height = 0
self.scale_factor = 1
def set_exp(self,exposure):
mmc.setExposure(exposure)
return
def set(self, camera_label):
# Set up the camera
mmc.reset()
mmc.enableStderrLog(False)
mmc.enableDebugLog(False)
mmc.setCircularBufferMemoryFootprint(10)# (in case of memory problems)
if camera_label == "TIS_DCAM":
print "Camera Selected: ", camera_label
try:
DEVICE = ["TIS_DCAM","TIS_DCAM","TIS_DCAM"] #camera properties - micromanager creates these in a file
mmc.loadDevice(*DEVICE)
mmc.initializeDevice(DEVICE[0])
mmc.setCameraDevice(DEVICE[0])
mmc.setProperty(DEVICE[0], 'Binning', 1)
mmc.setProperty(DEVICE[0], 'DeNoise', 2)
mmc.setExposure(exposure)
except:
tmb.showinfo("Warning", "Cannot connect to camera!")
if camera_label == "Thorlabs":
print "Camera Selected: ", camera_label
try:
DEVICE = ["ThorCam","ThorlabsUSBCamera","ThorCam"] #camera properties - micromanager creates these in a file
mmc.loadDevice(*DEVICE)
mmc.initializeDevice(DEVICE[0])
mmc.setCameraDevice(DEVICE[0])
try:
mmc.setProperty(DEVICE[0], 'Binning', "1")
except:
tmb.showinfo("Warning", "Cannot set binning on camera!")
try:
mmc.setProperty(DEVICE[0], 'HardwareGain', 1)
mmc.setProperty(DEVICE[0], 'PixelClockMHz', 25)
mmc.setProperty(DEVICE[0], 'PixelType', '8bit')
except:
tmb.showinfo("Warning", "Cannot set something on camera!")
mmc.setExposure(exposure)
except:
tmb.showinfo("Warning", "Cannot connect to camera!")
if camera_label == "OpenCV":
print "Camera Selected: ", camera_label
print os.path.join(VTD, 'OpenCV.cfg')
mmc.loadSystemConfiguration(os.path.join(VTD, 'OpenCV.cfg'))
print "loaded the config file."
print "exposure is: ", exposure
mmc.setProperty('OpenCVgrabber', 'PixelType', '8bit')
mmc.setExposure(exposure)
#except:
# tmb.showinfo("Warning", "Cannot connect to camera!")
if camera_label == "uManagerCam":
print "Camera Selected: ", camera_label
config_loaded = False
try:
mmc.loadSystemConfiguration('MMConfig.cfg')
print "loaded the config file."
config_loaded = True
except:
tmb.showinfo("Warning", "MMConfig.cfg not found in home directory!")
if config_loaded:
camera = mmc.getLoadedDevicesOfType(2)[0]
camera_properties = mmc.getDevicePropertyNames(camera)
print "exposure is: ", exposure
try:
mmc.setProperty(mmc.getLoadedDevicesOfType(2)[0], 'PixelType', '8bit')
except:
pass
try:
mmc.setProperty(mmc.getLoadedDevicesOfType(2)[0], 'Binning', 1)
mmc.setProperty(mmc.getLoadedDevicesOfType(2)[0], 'DeNoise', 2)
except:
pass
mmc.setExposure(exposure)
elif camera_label == "FakeCamera":
print "Camera Selected: ", camera_label
try:
DEVICE = ['Camera', 'FakeCamera', 'FakeCamera'] #camera properties - micromanager creates these in a file
mmc.loadDevice(*DEVICE)
mmc.initializeDevice(DEVICE[0])
mmc.setCameraDevice(DEVICE[0])
print "exposure is: ", exposure
mmc.setExposure(exposure)
mmc.setProperty(DEVICE[0], 'PixelType', '8bit')
mmc.setProperty(DEVICE[0], 'Path mask', 'SampleData\\TEST?{4.0}?.tif') #C:\\00-Code\\00 - VasoTracker\\
# To load in a sequence
DEVICE2 = ['Focus', 'DemoCamera', 'DStage']
mmc.loadDevice(*DEVICE2)
mmc.initializeDevice(DEVICE2[0])
mmc.setFocusDevice(DEVICE2[0])
mmc.setProperty(DEVICE2[0], "Position", 0)
except:
tmb.showinfo("Warning", "Cannot connect to camera!")
elif camera_label == "":
tmb.showinfo("Warning", "You need to select a camera source!")
return
# TODO SET BINNING PARAMETER
'''
try:
mmc.setProperty(DEVICE[0], "Binning", "2")
except:
pass
'''
##################################################
## Threaded client, check if there are images and process the images in seperate threads
##################################################
class ThreadedClient:
"""
Launch the main part of the GUI and the worker thread. periodicCall and
endApplication could reside in the GUI part, but putting them here
means that you have all the thread controls in a single place.
"""
def __init__(self, master):
"""
Start the GUI and the asynchronous threads. We are in the main
(original) thread of the application, which will later be used by
the GUI as well. We spawn a new thread for the worker (I/O).
"""
self.timeit3 = TimeIt3()
#threading.Thread.daemon = True # Make sure the thread terminates on exit
self.master = master
# Create the queue
self.queue = Queue.Queue( )
self.queue2 = Queue.Queue( )
# Set up the GUI part
self.gui = GuiPart(master, self.queue,self.queue2, self.endApplication)
self.Arduino = Arduino(self)
self.ports = self.Arduino.getports()
# Set up the thread to do asynchronous I/O
# More threads can also be created and used, if necessary
self.running = 1
self.thread1 = threading.Thread(target=self.workerThread1)
#self.thread1.deamon = True
self.thread1.start( )
self.thread2 = threading.Thread(target=self.workerThread2)
self.thread2.start( )
# Start the periodic call in the GUI to check if the queue contains
# anything
self.periodicCall( )
self.acqrate = None
def sortdata(self,temppres):
#print temppres
#print "Length of the data = ", len(temppres)
T = np.nan
P1 = np.nan
P2 = np.nan
for i,data in enumerate(temppres):
#print "length of data = ", len(data)#val = ser.readline().strip('\n\r').split(';')
#print "this is what we are looking at",data
if len(data) > 0:
val = data[0].strip('\n\r').split(';')[:-1]
val = [el.split(':') for el in val]
if val[0][0] == "T1":
temp = float(val[0][1])
#print "this is a temp = ", temp
T = temp
elif val[0][0] == "P1":
pres1 = float(val[0][1])
pres2 = float(val[1][1])
#print "this is a pressure = ", pres1
P1,P2 = pres1,pres2
return P1,P2,T
def periodicCall(self):
"""
Check every 10 ms if there is something new in the queue.
"""
if self.running:
if self.queue.qsize( ) > 0:
#print "Queue size = ", self.queue.qsize( )
#print "Queue2 size = ", self.queue2.qsize( )
#with self.timeit3("Total"): # time for optimisation
self.gui.processIncoming()
#print "delta = ", self.timeit3.delta_time
#
#else:
# print "nothing in the queue"
# self.gui.processIncoming( self.timelist, self.temp, self.pressure1, self.pressure2, self.pressure_avg )
self.master.after(10, self.periodicCall)
'''
elif not self.running:
# This is the brutal stop of the system. You may want to do
# some cleanup before actually shutting it down.
print "brutal exit"
sys.exit(1)
#self.master.after(100-int(self.timeit3.delta_time)-1, self.periodicCall)
'''
def workerThread2(self):
while self.running:
temppres = self.Arduino.getData()
P1,P2,T = self.sortdata(temppres)
try:
binthis = self.queue2.get(0)
except:
pass
self.queue2.put((P1,P2,T))
#print "Queue size = ", self.queue2.qsize( )
time.sleep(0.5)
def workerThread1(self):
"""
This is where we handle the asynchronous I/O. For example, it may be
a 'select( )'. One important thing to remember is that the thread has
to yield control pretty regularly, by select or otherwise.
"""
'''
self.timenow = 0
global acqrate
global timenow
'''
self.timenow = 0
while self.running:
#print "self.running = ",self.running
if(self.queue.empty()):
try: # Catch exception on closing the window!
# Check if there is an image in the buffer, or an image acuisition in progress
#print "image remaining count = ", mmc.getRemainingImageCount()
if (mmc.getRemainingImageCount() > 0 or mmc.isSequenceRunning()):
#Check if there is an image in the buffer
#if mmc.getRemainingImageCount > 1:
# mmc.clearCircularBuffer()
if mmc.getRemainingImageCount() > 0:
#print "remaining images: ",mmc.getRemainingImageCount()
global timenow
timenow = time.time() - start_time #Get the time
global acqrate
acqrate = 1/(timenow - self.timenow)
self.timenow = timenow
img = mmc.getLastImage()# mmc.popNextImage() #mmc.getLastImage()## Get the next image. mmc.popNextImage() #
#print "original shape: ", img.shape
self.queue.put(img) # Put the image in the queue
# Save raw image:
if self.gui.toolbar.record_is_checked.get() == 1 and self.gui.count%self.gui.rec_interval == 0:
timenow2 = int(timenow)
directory = os.path.join(head, 'RawTiff\\')
if not os.path.exists(directory):
os.makedirs(directory)
gfxPath = os.path.join(directory, '%s_f=%s.tiff' % (os.path.splitext(tail)[0],str(self.gui.count).zfill(6)))
skimage.io.imsave(gfxPath, img)
else:
pass
#time.sleep(0.090)
else:
pass
else:
pass
except:
pass
"""
This is a function that cleans up on
exit. It should kill all processes properly.
"""
def endApplication(self):
print "we are exiting"
try:
mmc.stopSequenceAcquisition() # stop uManager acquisition
mmc.reset() # reset uManager
except:
pass
self.running = 0
#sys.exit()
root.quit()
root.destroy()
##################################################
## Splash screen
##################################################
rootsplash = tk.Tk()
rootsplash.overrideredirect(True)
width, height = rootsplash.winfo_screenwidth(), rootsplash.winfo_screenheight()
#print "Screen height is = ", height
#print "Screen width is = ", width
#Load in the splash screen image
image_file = "Bladder_SPLASH.gif"
image = Image.open(image_file)
image2 = PhotoImage(file=image_file)
# Scale to half screen, centered
imagewidth, imageheight = image2.width(), image2.height()
newimagewidth, newimageheight = int(np.floor(width*0.5)), int(np.floor(height*0.5))
image = image.resize((newimagewidth,newimageheight), Image.ANTIALIAS)
image = ImageTk.PhotoImage(image)
# Create and show for 3 seconds
rootsplash.geometry('%dx%d+%d+%d' % (newimagewidth, newimageheight, width/2 - newimagewidth/2, height/2 - newimageheight/2))
canvas = tk.Canvas(rootsplash, height=height, width=width, bg="darkgrey")
canvas.create_image(width/2 - newimagewidth/2, height/2 - newimageheight/2, image=image)
canvas.pack()
rootsplash.after(3000, rootsplash.destroy)
rootsplash.mainloop()
##################################################
## Main application loop
##################################################
if __name__ == "__main__":
# Initiate uManager
mmc = MMCorePy.CMMCore()
'''
# Trying to populate list of possible cameras
# Creates a weird error when failing to load a library.
# Loading in mmConfig.cfg file instead....
# Get list of available libraries:
for libname in mmc.getDeviceAdapterNames():
print libname
# Get list of available cameras:
available_cams = []
for lib in mmc.getDeviceAdapterNames():
try:
if mmc.getAvailableDeviceTypes(lib)[0] == 2:
available_cams.append(lib)
except:
pass
#print("'%s':\tThis library Won't work") % libname
print available_cams
'''
# Create the main window
rand = random.Random( )
root = tk.Tk( )
root.iconbitmap('bladder_ICON.ICO')
root.attributes('-topmost',True)
root.after_idle(root.attributes,'-topmost',False)
root.wm_title("BladderTracker") #Makes the title that will appear in the top left
root.state("zoomed")
root.resizable(0,0) # Remove ability to resize
#w, h = root.winfo_screenwidth(), root.winfo_screenheight() # Can set the window size using the screenwidth if we wish
#root.geometry("%dx%d+0+0" % (w, h))
#root.overrideredirect(1) #hides max min buttons and the big x
#root.wm_attributes('-fullscreen', 1)
# Go go go!
client = ThreadedClient(root)
root.mainloop( )
| []
| []
| [
"PATH"
]
| [] | ["PATH"] | python | 1 | 0 | |
pkg/controller/cloud/node_controller_test.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloud
import (
"context"
"errors"
"reflect"
"testing"
"time"
"k8s.io/api/core/v1"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/kubernetes/scheme"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/informers"
"k8s.io/client-go/tools/record"
"k8s.io/cloud-provider"
cloudproviderapi "k8s.io/cloud-provider/api"
fakecloud "k8s.io/cloud-provider/fake"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/assert"
"k8s.io/klog"
)
func TestEnsureNodeExistsByProviderID(t *testing.T) {
testCases := []struct {
testName string
node *v1.Node
expectedCalls []string
expectedNodeExists bool
hasInstanceID bool
existsByProviderID bool
nodeNameErr error
providerIDErr error
}{
{
testName: "node exists by provider id",
existsByProviderID: true,
providerIDErr: nil,
hasInstanceID: true,
nodeNameErr: errors.New("unimplemented"),
expectedCalls: []string{"instance-exists-by-provider-id"},
expectedNodeExists: true,
node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
},
Spec: v1.NodeSpec{
ProviderID: "node0",
},
},
},
{
testName: "does not exist by provider id",
existsByProviderID: false,
providerIDErr: nil,
hasInstanceID: true,
nodeNameErr: errors.New("unimplemented"),
expectedCalls: []string{"instance-exists-by-provider-id"},
expectedNodeExists: false,
node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
},
Spec: v1.NodeSpec{
ProviderID: "node0",
},
},
},
{
testName: "exists by instance id",
existsByProviderID: true,
providerIDErr: nil,
hasInstanceID: true,
nodeNameErr: nil,
expectedCalls: []string{"instance-id", "instance-exists-by-provider-id"},
expectedNodeExists: true,
node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
},
},
},
{
testName: "does not exist by no instance id",
existsByProviderID: true,
providerIDErr: nil,
hasInstanceID: false,
nodeNameErr: cloudprovider.InstanceNotFound,
expectedCalls: []string{"instance-id"},
expectedNodeExists: false,
node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
},
},
},
{
testName: "provider id returns error",
existsByProviderID: false,
providerIDErr: errors.New("unimplemented"),
hasInstanceID: true,
nodeNameErr: cloudprovider.InstanceNotFound,
expectedCalls: []string{"instance-exists-by-provider-id"},
expectedNodeExists: false,
node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
},
Spec: v1.NodeSpec{
ProviderID: "node0",
},
},
},
}
for _, tc := range testCases {
t.Run(tc.testName, func(t *testing.T) {
fc := &fakecloud.Cloud{
ExistsByProviderID: tc.existsByProviderID,
Err: tc.nodeNameErr,
ErrByProviderID: tc.providerIDErr,
}
if tc.hasInstanceID {
fc.ExtID = map[types.NodeName]string{
types.NodeName(tc.node.Name): "provider-id://a",
}
}
instances, _ := fc.Instances()
exists, err := ensureNodeExistsByProviderID(context.TODO(), instances, tc.node)
assert.Equal(t, err, tc.providerIDErr)
assert.EqualValues(t, tc.expectedCalls, fc.Calls,
"expected cloud provider methods `%v` to be called but `%v` was called ",
tc.expectedCalls, fc.Calls)
assert.Equal(t, tc.expectedNodeExists, exists,
"expected exists to be `%t` but got `%t`",
tc.existsByProviderID, exists)
})
}
}
func Test_AddCloudNode(t *testing.T) {
tests := []struct {
name string
fakeCloud *fakecloud.Cloud
existingNode *v1.Node
updatedNode *v1.Node
}{
{
name: "node initialized with provider ID",
fakeCloud: &fakecloud.Cloud{
InstanceTypes: map[types.NodeName]string{
types.NodeName("node0"): "t1.micro",
},
ExtID: map[types.NodeName]string{
types.NodeName("node0"): "12345",
},
Addresses: []v1.NodeAddress{
{
Type: v1.NodeHostName,
Address: "node0.cloud.internal",
},
{
Type: v1.NodeInternalIP,
Address: "10.0.0.1",
},
{
Type: v1.NodeExternalIP,
Address: "132.143.154.163",
},
},
ErrByProviderID: nil,
Err: nil,
},
existingNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
Spec: v1.NodeSpec{
Taints: []v1.Taint{
{
Key: cloudproviderapi.TaintExternalCloudProvider,
Value: "true",
Effect: v1.TaintEffectNoSchedule,
},
},
},
},
updatedNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
Spec: v1.NodeSpec{
ProviderID: "fake://12345",
Taints: []v1.Taint{},
},
},
},
{
name: "node ignored",
fakeCloud: &fakecloud.Cloud{
InstanceTypes: map[types.NodeName]string{
types.NodeName("node0"): "t1.micro",
types.NodeName("fake://12345"): "t1.micro",
},
ExtID: map[types.NodeName]string{
types.NodeName("node0"): "12345",
},
Addresses: []v1.NodeAddress{
{
Type: v1.NodeHostName,
Address: "node0.cloud.internal",
},
{
Type: v1.NodeInternalIP,
Address: "10.0.0.1",
},
{
Type: v1.NodeExternalIP,
Address: "132.143.154.163",
},
},
Err: nil,
},
existingNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
},
updatedNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
},
},
{
name: "zone/region topology labels added",
fakeCloud: &fakecloud.Cloud{
InstanceTypes: map[types.NodeName]string{
types.NodeName("node0"): "t1.micro",
},
ExtID: map[types.NodeName]string{
types.NodeName("node0"): "12345",
},
Addresses: []v1.NodeAddress{
{
Type: v1.NodeHostName,
Address: "node0.cloud.internal",
},
{
Type: v1.NodeInternalIP,
Address: "10.0.0.1",
},
{
Type: v1.NodeExternalIP,
Address: "132.143.154.163",
},
},
Provider: "aws",
Zone: cloudprovider.Zone{
FailureDomain: "us-west-1a",
Region: "us-west",
},
Err: nil,
},
existingNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
Spec: v1.NodeSpec{
Taints: []v1.Taint{
{
Key: cloudproviderapi.TaintExternalCloudProvider,
Value: "true",
Effect: v1.TaintEffectNoSchedule,
},
},
},
},
updatedNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
"failure-domain.beta.kubernetes.io/region": "us-west",
"failure-domain.beta.kubernetes.io/zone": "us-west-1a",
"topology.kubernetes.io/region": "us-west",
"topology.kubernetes.io/zone": "us-west-1a",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
Spec: v1.NodeSpec{
ProviderID: "aws://12345",
Taints: []v1.Taint{},
},
},
},
{
name: "node addresses",
fakeCloud: &fakecloud.Cloud{
InstanceTypes: map[types.NodeName]string{},
ExtID: map[types.NodeName]string{
types.NodeName("node0"): "12345",
},
Addresses: []v1.NodeAddress{
{
Type: v1.NodeHostName,
Address: "node0.cloud.internal",
},
{
Type: v1.NodeInternalIP,
Address: "10.0.0.1",
},
{
Type: v1.NodeExternalIP,
Address: "132.143.154.163",
},
},
ExistsByProviderID: true,
Err: nil,
},
existingNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{},
},
Spec: v1.NodeSpec{
Taints: []v1.Taint{
{
Key: "ImproveCoverageTaint",
Value: "true",
Effect: v1.TaintEffectNoSchedule,
},
{
Key: cloudproviderapi.TaintExternalCloudProvider,
Value: "true",
Effect: v1.TaintEffectNoSchedule,
},
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
updatedNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
Spec: v1.NodeSpec{
ProviderID: "fake://12345",
Taints: []v1.Taint{
{
Key: "ImproveCoverageTaint",
Value: "true",
Effect: v1.TaintEffectNoSchedule,
},
},
},
Status: v1.NodeStatus{
Addresses: []v1.NodeAddress{
{
Type: v1.NodeHostName,
Address: "node0.cloud.internal",
},
{
Type: v1.NodeInternalIP,
Address: "10.0.0.1",
},
{
Type: v1.NodeExternalIP,
Address: "132.143.154.163",
},
},
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
},
{
name: "provided node IP address",
fakeCloud: &fakecloud.Cloud{
Addresses: []v1.NodeAddress{
{
Type: v1.NodeInternalIP,
Address: "10.0.0.1",
},
{
Type: v1.NodeExternalIP,
Address: "132.143.154.163",
},
},
ExistsByProviderID: true,
Err: nil,
},
existingNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Annotations: map[string]string{
kubeletapis.AnnotationProvidedIPAddr: "10.0.0.1",
},
},
Spec: v1.NodeSpec{
Taints: []v1.Taint{
{
Key: "ImproveCoverageTaint",
Value: "true",
Effect: v1.TaintEffectNoSchedule,
},
{
Key: cloudproviderapi.TaintExternalCloudProvider,
Value: "true",
Effect: v1.TaintEffectNoSchedule,
},
},
ProviderID: "node0.aws.12345",
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
Addresses: []v1.NodeAddress{
{
Type: v1.NodeHostName,
Address: "node0.cloud.internal",
},
},
},
},
updatedNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Annotations: map[string]string{
kubeletapis.AnnotationProvidedIPAddr: "10.0.0.1",
},
},
Spec: v1.NodeSpec{
Taints: []v1.Taint{
{
Key: "ImproveCoverageTaint",
Value: "true",
Effect: v1.TaintEffectNoSchedule,
},
},
ProviderID: "node0.aws.12345",
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
Addresses: []v1.NodeAddress{
{
Type: v1.NodeInternalIP,
Address: "10.0.0.1",
},
{
Type: v1.NodeExternalIP,
Address: "132.143.154.163",
},
{
Type: v1.NodeHostName,
Address: "node0.cloud.internal",
},
},
},
},
},
{
name: "provider ID already set",
fakeCloud: &fakecloud.Cloud{
InstanceTypes: map[types.NodeName]string{},
Addresses: []v1.NodeAddress{
{
Type: v1.NodeHostName,
Address: "node0.cloud.internal",
},
{
Type: v1.NodeInternalIP,
Address: "10.0.0.1",
},
{
Type: v1.NodeExternalIP,
Address: "132.143.154.163",
},
},
ExistsByProviderID: false,
Err: nil,
},
existingNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
Spec: v1.NodeSpec{
ProviderID: "test-provider-id",
Taints: []v1.Taint{
{
Key: "ImproveCoverageTaint",
Value: "true",
Effect: v1.TaintEffectNoSchedule,
},
{
Key: cloudproviderapi.TaintExternalCloudProvider,
Value: "true",
Effect: v1.TaintEffectNoSchedule,
},
},
},
},
updatedNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
Spec: v1.NodeSpec{
ProviderID: "test-provider-id",
Taints: []v1.Taint{
{
Key: "ImproveCoverageTaint",
Value: "true",
Effect: v1.TaintEffectNoSchedule,
},
},
},
},
},
{
name: "provider ID not implemented",
fakeCloud: &fakecloud.Cloud{
InstanceTypes: map[types.NodeName]string{},
Provider: "test",
ExtID: map[types.NodeName]string{},
ExtIDErr: map[types.NodeName]error{
types.NodeName("node0"): cloudprovider.NotImplemented,
},
Err: nil,
},
existingNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
Spec: v1.NodeSpec{
Taints: []v1.Taint{
{
Key: cloudproviderapi.TaintExternalCloudProvider,
Value: "true",
Effect: v1.TaintEffectNoSchedule,
},
},
},
},
updatedNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
Spec: v1.NodeSpec{
Taints: []v1.Taint{},
},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
clientset := fake.NewSimpleClientset(test.existingNode)
factory := informers.NewSharedInformerFactory(clientset, 0)
eventBroadcaster := record.NewBroadcaster()
cloudNodeController := &CloudNodeController{
kubeClient: clientset,
nodeInformer: factory.Core().V1().Nodes(),
cloud: test.fakeCloud,
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}),
nodeStatusUpdateFrequency: 1 * time.Second,
}
eventBroadcaster.StartLogging(klog.Infof)
cloudNodeController.AddCloudNode(context.TODO(), test.existingNode)
updatedNode, err := clientset.CoreV1().Nodes().Get(context.TODO(), test.existingNode.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("error getting updated nodes: %v", err)
}
if !cmp.Equal(updatedNode, test.updatedNode) {
t.Errorf("unexpected node %s", cmp.Diff(updatedNode, test.updatedNode))
}
})
}
}
// This test checks that a node with the external cloud provider taint is cloudprovider initialized and
// the GCE route condition is added if cloudprovider is GCE
func TestGCECondition(t *testing.T) {
existingNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
Spec: v1.NodeSpec{
Taints: []v1.Taint{
{
Key: cloudproviderapi.TaintExternalCloudProvider,
Value: "true",
Effect: v1.TaintEffectNoSchedule,
},
},
},
}
fakeCloud := &fakecloud.Cloud{
InstanceTypes: map[types.NodeName]string{
types.NodeName("node0"): "t1.micro",
},
Addresses: []v1.NodeAddress{
{
Type: v1.NodeHostName,
Address: "node0.cloud.internal",
},
{
Type: v1.NodeInternalIP,
Address: "10.0.0.1",
},
{
Type: v1.NodeExternalIP,
Address: "132.143.154.163",
},
},
Provider: "gce",
Err: nil,
}
clientset := fake.NewSimpleClientset(existingNode)
factory := informers.NewSharedInformerFactory(clientset, 0)
eventBroadcaster := record.NewBroadcaster()
cloudNodeController := &CloudNodeController{
kubeClient: clientset,
nodeInformer: factory.Core().V1().Nodes(),
cloud: fakeCloud,
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}),
nodeStatusUpdateFrequency: 1 * time.Second,
}
eventBroadcaster.StartLogging(klog.Infof)
cloudNodeController.AddCloudNode(context.TODO(), existingNode)
updatedNode, err := clientset.CoreV1().Nodes().Get(context.TODO(), existingNode.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("error getting updated nodes: %v", err)
}
conditionAdded := false
for _, cond := range updatedNode.Status.Conditions {
if cond.Status == "True" && cond.Type == "NetworkUnavailable" && cond.Reason == "NoRouteCreated" {
conditionAdded = true
}
}
assert.True(t, conditionAdded, "Network Route Condition for GCE not added by external cloud initializer")
}
func Test_reconcileNodeLabels(t *testing.T) {
testcases := []struct {
name string
labels map[string]string
expectedLabels map[string]string
expectedErr error
}{
{
name: "requires reconcile",
labels: map[string]string{
v1.LabelZoneFailureDomain: "foo",
v1.LabelZoneRegion: "bar",
v1.LabelInstanceType: "the-best-type",
},
expectedLabels: map[string]string{
v1.LabelZoneFailureDomain: "foo",
v1.LabelZoneRegion: "bar",
v1.LabelZoneFailureDomainStable: "foo",
v1.LabelZoneRegionStable: "bar",
v1.LabelInstanceType: "the-best-type",
v1.LabelInstanceTypeStable: "the-best-type",
},
expectedErr: nil,
},
{
name: "doesn't require reconcile",
labels: map[string]string{
v1.LabelZoneFailureDomain: "foo",
v1.LabelZoneRegion: "bar",
v1.LabelZoneFailureDomainStable: "foo",
v1.LabelZoneRegionStable: "bar",
v1.LabelInstanceType: "the-best-type",
v1.LabelInstanceTypeStable: "the-best-type",
},
expectedLabels: map[string]string{
v1.LabelZoneFailureDomain: "foo",
v1.LabelZoneRegion: "bar",
v1.LabelZoneFailureDomainStable: "foo",
v1.LabelZoneRegionStable: "bar",
v1.LabelInstanceType: "the-best-type",
v1.LabelInstanceTypeStable: "the-best-type",
},
expectedErr: nil,
},
{
name: "require reconcile -- secondary labels are different from primary",
labels: map[string]string{
v1.LabelZoneFailureDomain: "foo",
v1.LabelZoneRegion: "bar",
v1.LabelZoneFailureDomainStable: "wrongfoo",
v1.LabelZoneRegionStable: "wrongbar",
v1.LabelInstanceType: "the-best-type",
v1.LabelInstanceTypeStable: "the-wrong-type",
},
expectedLabels: map[string]string{
v1.LabelZoneFailureDomain: "foo",
v1.LabelZoneRegion: "bar",
v1.LabelZoneFailureDomainStable: "foo",
v1.LabelZoneRegionStable: "bar",
v1.LabelInstanceType: "the-best-type",
v1.LabelInstanceTypeStable: "the-best-type",
},
expectedErr: nil,
},
}
for _, test := range testcases {
t.Run(test.name, func(t *testing.T) {
testNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node01",
Labels: test.labels,
},
}
clientset := fake.NewSimpleClientset(testNode)
factory := informers.NewSharedInformerFactory(clientset, 0)
cnc := &CloudNodeController{
kubeClient: clientset,
nodeInformer: factory.Core().V1().Nodes(),
}
// activate node informer
factory.Core().V1().Nodes().Informer()
factory.Start(nil)
factory.WaitForCacheSync(nil)
err := cnc.reconcileNodeLabels("node01")
if err != test.expectedErr {
t.Logf("actual err: %v", err)
t.Logf("expected err: %v", test.expectedErr)
t.Errorf("unexpected error")
}
actualNode, err := clientset.CoreV1().Nodes().Get(context.TODO(), "node01", metav1.GetOptions{})
if err != nil {
t.Fatalf("error getting updated node: %v", err)
}
if !reflect.DeepEqual(actualNode.Labels, test.expectedLabels) {
t.Logf("actual node labels: %v", actualNode.Labels)
t.Logf("expected node labels: %v", test.expectedLabels)
t.Errorf("updated node did not match expected node")
}
})
}
}
// Tests that node address changes are detected correctly
func TestNodeAddressesChangeDetected(t *testing.T) {
addressSet1 := []v1.NodeAddress{
{
Type: v1.NodeInternalIP,
Address: "10.0.0.1",
},
{
Type: v1.NodeExternalIP,
Address: "132.143.154.163",
},
}
addressSet2 := []v1.NodeAddress{
{
Type: v1.NodeInternalIP,
Address: "10.0.0.1",
},
{
Type: v1.NodeExternalIP,
Address: "132.143.154.163",
},
}
assert.False(t, nodeAddressesChangeDetected(addressSet1, addressSet2),
"Node address changes are not detected correctly")
addressSet1 = []v1.NodeAddress{
{
Type: v1.NodeInternalIP,
Address: "10.0.0.1",
},
{
Type: v1.NodeExternalIP,
Address: "132.143.154.164",
},
}
addressSet2 = []v1.NodeAddress{
{
Type: v1.NodeInternalIP,
Address: "10.0.0.1",
},
{
Type: v1.NodeExternalIP,
Address: "132.143.154.163",
},
}
assert.True(t, nodeAddressesChangeDetected(addressSet1, addressSet2),
"Node address changes are not detected correctly")
addressSet1 = []v1.NodeAddress{
{
Type: v1.NodeInternalIP,
Address: "10.0.0.1",
},
{
Type: v1.NodeExternalIP,
Address: "132.143.154.164",
},
{
Type: v1.NodeHostName,
Address: "hostname.zone.region.aws.test",
},
}
addressSet2 = []v1.NodeAddress{
{
Type: v1.NodeInternalIP,
Address: "10.0.0.1",
},
{
Type: v1.NodeExternalIP,
Address: "132.143.154.164",
},
}
assert.True(t, nodeAddressesChangeDetected(addressSet1, addressSet2),
"Node address changes are not detected correctly")
addressSet1 = []v1.NodeAddress{
{
Type: v1.NodeInternalIP,
Address: "10.0.0.1",
},
{
Type: v1.NodeExternalIP,
Address: "132.143.154.164",
},
}
addressSet2 = []v1.NodeAddress{
{
Type: v1.NodeInternalIP,
Address: "10.0.0.1",
},
{
Type: v1.NodeExternalIP,
Address: "132.143.154.164",
},
{
Type: v1.NodeHostName,
Address: "hostname.zone.region.aws.test",
},
}
assert.True(t, nodeAddressesChangeDetected(addressSet1, addressSet2),
"Node address changes are not detected correctly")
addressSet1 = []v1.NodeAddress{
{
Type: v1.NodeExternalIP,
Address: "10.0.0.1",
},
{
Type: v1.NodeInternalIP,
Address: "132.143.154.163",
},
}
addressSet2 = []v1.NodeAddress{
{
Type: v1.NodeInternalIP,
Address: "10.0.0.1",
},
{
Type: v1.NodeExternalIP,
Address: "132.143.154.163",
},
}
assert.True(t, nodeAddressesChangeDetected(addressSet1, addressSet2),
"Node address changes are not detected correctly")
}
// This test checks that a node with the external cloud provider taint is cloudprovider initialized and
// and node addresses will not be updated when node isn't present according to the cloudprovider
func TestNodeAddressesNotUpdate(t *testing.T) {
existingNode := &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
Spec: v1.NodeSpec{
Taints: []v1.Taint{
{
Key: cloudproviderapi.TaintExternalCloudProvider,
Value: "true",
Effect: v1.TaintEffectNoSchedule,
},
},
},
}
clientset := fake.NewSimpleClientset(existingNode)
factory := informers.NewSharedInformerFactory(clientset, 0)
fakeCloud := &fakecloud.Cloud{
InstanceTypes: map[types.NodeName]string{},
Addresses: []v1.NodeAddress{
{
Type: v1.NodeHostName,
Address: "node0.cloud.internal",
},
{
Type: v1.NodeInternalIP,
Address: "10.0.0.1",
},
{
Type: v1.NodeExternalIP,
Address: "132.143.154.163",
},
},
ExistsByProviderID: false,
Err: nil,
}
cloudNodeController := &CloudNodeController{
kubeClient: clientset,
nodeInformer: factory.Core().V1().Nodes(),
cloud: fakeCloud,
}
cloudNodeController.updateNodeAddress(context.TODO(), existingNode, fakeCloud)
updatedNode, err := clientset.CoreV1().Nodes().Get(context.TODO(), existingNode.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("error getting updated nodes: %v", err)
}
if len(updatedNode.Status.Addresses) > 0 {
t.Errorf("Node addresses should not be updated")
}
}
| []
| []
| []
| [] | [] | go | null | null | null |
first_django/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'first_django.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
rat/main.go | package main
import (
"flag"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"os/user"
"strings"
)
func main() {
flag.Parse()
var message string
var err error
stat, err := os.Stdin.Stat()
if err != nil {
log.Fatal(err)
}
if stat.Mode()&os.ModeCharDevice == 0 {
stdin, err := ioutil.ReadAll(os.Stdin)
if err != nil {
log.Fatal(err)
}
message = string(stdin)
} else {
message = strings.Join(flag.Args(), " ")
}
params := url.Values{}
params.Set("message", message)
host, err := os.Hostname()
if err != nil {
log.Println(err)
}
user, err := user.Current()
var username string
if err != nil {
log.Println(err)
username = ""
} else {
username = user.Username
}
server := os.Getenv("RATSERVER")
if server == "" {
server = "localhost"
}
port := os.Getenv("RATPORT")
if port == "" {
port = "8000"
}
params.Set("host", host)
params.Set("user", username)
resp, err := http.Post("http://"+server+":"+port, "application/x-www-form-urlencoded", strings.NewReader(params.Encode()))
if err != nil {
log.Fatal(err)
}
if resp.StatusCode != 200 {
log.Println("status:", resp.Status)
}
}
| [
"\"RATSERVER\"",
"\"RATPORT\""
]
| []
| [
"RATPORT",
"RATSERVER"
]
| [] | ["RATPORT", "RATSERVER"] | go | 2 | 0 | |
src/saltext/vmware/utils/connect.py | # SPDX-License-Identifier: Apache-2.0
import json
import logging
import os
import ssl
import requests
from requests.auth import HTTPBasicAuth
from requests.exceptions import HTTPError
from requests.exceptions import RequestException
from requests.exceptions import SSLError
# pylint: disable=no-name-in-module
try:
from pyVim import connect
from pyVim.connect import GetSi, SmartConnect, Disconnect, GetStub, SoapStubAdapter
from pyVmomi import vim, vmodl, VmomiSupport
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
log = logging.getLogger(__name__)
def get_service_instance(opts=None, pillar=None):
"""
Connect to VMware service instance
opts
(optional) Any additional options.
pillar
(optional) If specified, allows for a dictionary of pillar data to be made
available to pillar and ext_pillar rendering. These pillar variables
will also override any variables of the same name in pillar or
ext_pillar.
Pillar Example:
.. code-block::
vmware_config:
host: 198.51.100.100
password: ****
user: @example.com
"""
ctx = ssl._create_unverified_context()
host = (
os.environ.get("VMWARE_CONFIG_HOST")
or opts.get("vmware_config", {}).get("host")
or pillar.get("vmware_config", {}).get("host")
)
password = (
os.environ.get("VMWARE_CONFIG_PASSWORD")
or opts.get("vmware_config", {}).get("password")
or pillar.get("vmware_config", {}).get("password")
)
user = (
os.environ.get("VMWARE_CONFIG_USER")
or opts.get("vmware_config", {}).get("user")
or pillar.get("vmware_config", {}).get("user")
)
config = {
"host": host,
"password": password,
"user": user,
}
service_instance = connect.SmartConnect(
host=config["host"],
user=config["user"],
pwd=config["password"],
sslContext=ctx,
)
return service_instance
def request(url, method, body=None, token=None, opts=None, pillar=None):
"""
Make a request to VMware rest api
url
url address for request.
method
Method for api request.
body
Body of the api request.
token
(optional) Api session token for api access, will create new token if not passed.
opts
(optional) Any additional options.
pillar
(optional) If specified, allows for a dictionary of pillar data to be made
available to pillar and ext_pillar rendering. These pillar variables
will also override any variables of the same name in pillar or
ext_pillar.
"""
host = (
os.environ.get("VMWARE_CONFIG_REST_API_HOST")
or opts.get("vmware_config", {}).get("rest_api_host")
or pillar.get("vmware_config", {}).get("rest_api_host")
or os.environ.get("VMWARE_CONFIG_HOST")
or opts.get("vmware_config", {}).get("host")
or pillar.get("vmware_config", {}).get("host")
)
cert = (
os.environ.get("VMWARE_CONFIG_REST_API_CERT")
or opts.get("vmware_config", {}).get("rest_api_cert")
or pillar.get("vmware_config", {}).get("rest_api_cert")
)
if not cert:
cert = False
if token is None:
user = (
os.environ.get("VMWARE_CONFIG_REST_API_USER")
or opts.get("vmware_config", {}).get("rest_api_user")
or pillar.get("vmware_config", {}).get("rest_api_user")
or os.environ.get("VMWARE_CONFIG_USER")
or opts.get("vmware_config", {}).get("user")
or pillar.get("vmware_config", {}).get("user")
)
password = (
os.environ.get("VMWARE_CONFIG_REST_API_PASSWORD")
or opts.get("vmware_config", {}).get("rest_api_password")
or pillar.get("vmware_config", {}).get("rest_api_password")
or os.environ.get("VMWARE_CONFIG_PASSWORD")
or opts.get("vmware_config", {}).get("password")
or pillar.get("vmware_config", {}).get("password")
)
token = _get_session(host, user, password, cert)
headers = {
"Accept": "application/json",
"content-Type": "application/json",
"vmware-api-session-id": token,
}
session = requests.Session()
response = session.request(
method=method,
url=f"https://{host}{url}",
headers=headers,
verify=cert,
params=None,
data=json.dumps(body),
)
return {"response": response, "token": token}
def _get_session(host, user, password, cert):
"""
Create REST API session
host
Host for api request.
user
User to create session token for subsequent requests.
password
Password to create session token for subsequent requests.
cert
certificate for ssl verification.
"""
headers = {"Accept": "application/json", "content-Type": "application/json"}
session = requests.Session()
if not cert:
cert = False
try:
response = session.request(
method="POST",
url=f"https://{host}/rest/com/vmware/cis/session",
headers=headers,
auth=HTTPBasicAuth(user, password),
verify=cert,
params=None,
data=json.dumps(None),
)
response.raise_for_status()
json_response = response.json()
return json_response["value"]
except HTTPError as e:
log.error(e)
result = {"error": "Error occurred while calling vCenter API."}
# if response contains json, extract error message from it
if e.response.text:
log.error(f"Response from vCenter {e.response.text}")
try:
error_json = e.response.json()
if error_json["error_message"]:
result["error"] = e.response.json()["error_message"]
except ValueError:
log.error(
"Couldn't parse the response as json. Returning response text as error message"
)
result["error"] = e.response.text
return result
except SSLError as se:
log.error(se)
result = {"error": "SSL Error occurred while calling vCenter API."}
return result
except RequestException as re:
log.error(re)
result = {"error": "Error occurred while calling vCenter API."}
return result
| []
| []
| [
"VMWARE_CONFIG_HOST",
"VMWARE_CONFIG_PASSWORD",
"VMWARE_CONFIG_REST_API_HOST",
"VMWARE_CONFIG_REST_API_CERT",
"VMWARE_CONFIG_REST_API_USER",
"VMWARE_CONFIG_USER",
"VMWARE_CONFIG_REST_API_PASSWORD"
]
| [] | ["VMWARE_CONFIG_HOST", "VMWARE_CONFIG_PASSWORD", "VMWARE_CONFIG_REST_API_HOST", "VMWARE_CONFIG_REST_API_CERT", "VMWARE_CONFIG_REST_API_USER", "VMWARE_CONFIG_USER", "VMWARE_CONFIG_REST_API_PASSWORD"] | python | 7 | 0 | |
Algorithms/Implementation/ParityPermutation.java | package Algorithms.Implementation;
import java.io.*;
import java.math.*;
import java.security.*;
import java.text.*;
import java.util.*;
import java.util.concurrent.*;
import java.util.regex.*;
/**
* HackerRank Algorithms Implementation 63
* https://www.hackerrank.com/challenges/larrys-array/problem
* @author Hasol
*/
public class ParityPermutation {
// Complete the larrysArray function below.
static String larrysArray(int[] a) {
boolean[] u = new boolean[a.length];
ArrayList<ArrayList<Integer>> l = new ArrayList<>();
for (int i=0; i<a.length; i++)
if (!u[i]) {
ArrayList<Integer> n = new ArrayList<>();
int j = i;
do {
j = a[j]-1;
u[j] = true;
n.add(j);
} while (i != j);
l.add(n);
}
boolean p = true;
for (ArrayList<Integer> m: l)
if (m.size()%2 == 0)
p = !p;
return p ? "YES" : "NO";
}
static final Scanner scanner = new Scanner(System.in);
public static void main(String[] args) throws IOException {
BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(System.getenv("OUTPUT_PATH")));
int t = scanner.nextInt();
scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?");
for (int tItr=0; tItr<t; tItr++) {
int n = scanner.nextInt();
scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?");
int[] a = new int[n];
String[] aItems = scanner.nextLine().split(" ");
scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?");
for (int i=0; i<n; i++)
a[i] = Integer.parseInt(aItems[i]);
String result = larrysArray(a);
bufferedWriter.write(result);
bufferedWriter.newLine();
}
bufferedWriter.close();
scanner.close();
}
}
| [
"\"OUTPUT_PATH\""
]
| []
| [
"OUTPUT_PATH"
]
| [] | ["OUTPUT_PATH"] | java | 1 | 0 | |
src/main/java/com/bandwidth/iris/sdk/examples/Example.java | package com.bandwidth.iris.sdk.examples;
import com.bandwidth.iris.sdk.IrisClient;
import com.bandwidth.iris.sdk.model.*;
import java.io.File;
import java.util.*;
public class Example {
private static Site SITE = null;
public static void main(String[] args) {
try {
printCreateSite();
printCreateAndGetSipPeer();
printCreateLnpOrder();
printAvailableNpaNxx();
printAvailableNumbers();
printCoveredRateCenters();
printCities();
printLnpChecker();
printCreateAndGetOrder();
printRateCenters();
printGetTnDetails();
printGetTns();
} catch (Exception e) {
System.out.println("Got error: " + e.getMessage());
}
}
private static void printAvailableNpaNxx() throws Exception {
printMessage("Starting print search available Npa Nxx");
Map<String, Object> query = new HashMap<String, Object>();
query.put("areaCode", "805");
query.put("quantity", 2);
List<AvailableNpaNxx> availableNpaNxxList = AvailableNpaNxx.list(getClient(), query);
for (AvailableNpaNxx npaNxx : availableNpaNxxList) {
System.out.println(
String.format("City: %s | Npa: %s | Nxx: %s | State: %s ", npaNxx.getCity(), npaNxx.getNpa(),
npaNxx.getNxx(), npaNxx.getState())
);
}
printMessage("Ending print search available Npa Nxx");
}
private static void printAvailableNumbers() throws Exception {
printMessage("Starting print search results");
Map<String, Object> query = new HashMap<String, Object>();
query.put("areaCode", "205");
query.put("enableTNDetail", true);
query.put("quantity", 2);
List<TelephoneNumberDetail> numbers = (List<TelephoneNumberDetail>) AvailableNumbers.search(
getClient(), query);
for (TelephoneNumberDetail number : numbers) {
System.out.println(
String.format("Full number: %s : Rate Center: %s", number.getFullNumber(), number.getRateCenter()));
}
printMessage("Ending print search results");
}
private static void printCities() throws Exception {
printMessage("Starting print cities");
Map<String, Object> query = new HashMap<String, Object>();
query.put("state", "NC");
List<City> cities = City.list(getClient(), query);
for (City c : cities) {
System.out.println(String.format("Name: %s | RcAbbreviation: %s", c.getName(), c.getRcAbbreviation()));
}
printMessage("Ending print cities");
}
private static void printCoveredRateCenters() throws Exception {
printMessage("Starting print covered Rate Centers");
Map<String, Object> query = new HashMap<String, Object>();
query.put("state", "NC");
List<CoveredRateCenter> rateCenters = CoveredRateCenter.list(getClient(), query);
for (CoveredRateCenter rc : rateCenters) {
System.out.println(String.format("Name: %s | Abbreviation: %s | State: %s | Lata: %s", rc.getName(),
rc.getAbbreviation(), rc.getState(), rc.getLata()));
}
printMessage("Ending print covered Rate Centers");
}
private static void printCreateSite() throws Exception {
printMessage("Starting print sites");
Address a = new Address();
a.setHouseNumber("123");
a.setStreetName("Anywhere St");
a.setCity("Raleigh");
a.setState("NC");
a.setZip("27609");
a.setAddressType("Service");
Site s = new Site();
s.setName("A new site " + new Random().nextInt());
s.setDescription("Test site from Java Lib");
s.setAddress(a);
s = Site.create(getClient(), s);
SITE = s;
printMessage("Ending print sites");
}
private static void printLnpChecker() throws Exception {
printMessage("Starting print LNP Check");
NumberPortabilityRequest request = new NumberPortabilityRequest();
request.getTnList().add("9195551212");
NumberPortabilityResponse response = LnpChecker.checkLnp(getClient(), request, "true");
if (response.getPortableNumbers().size() > 0) {
System.out.println(response.getPortableNumbers().get(0));
} else {
System.out.println("This number is not portable. Try another one");
}
printMessage("Ending print LNP Check");
}
private static void printCreateAndGetOrder() throws Exception {
printMessage("Starting create Order");
Order o = new Order();
o.setSiteId(SITE.getId());
o.setName("A New Order");
ExistingTelephoneNumberOrderType existingTelephoneNumberOrderType = new ExistingTelephoneNumberOrderType();
existingTelephoneNumberOrderType.getTelephoneNumberList().add("2052865046");
o.setExistingTelephoneNumberOrderType(existingTelephoneNumberOrderType);
OrderResponse createdOrder = Order.create(getClient(), o);
System.out.println(String.format("Created order ID: %s", createdOrder.getOrder().getid()));
OrderResponse theSameOrder = Order.get(getClient(), createdOrder.getOrder().getid());
System.out.println("Successfully retrieved order, order status: " + theSameOrder.getOrderStatus());
printMessage("Ending create Order");
}
private static void printCreateLnpOrder() throws Exception {
LnpOrder order = new LnpOrder();
order.setSiteId(getFirstSite().getId());
order.setPeerId(getFirstSipPeer().getPeerId());
order.setBillingTelephoneNumber("9195551212");
Subscriber s = new Subscriber();
s.setSubscriberType("BUSINESS");
s.setBusinessName("Company");
ServiceAddress serviceAddress = new ServiceAddress();
serviceAddress.setHouseNumber("123");
serviceAddress.setStreetName("Anywhere St");
serviceAddress.setCity("Raleigh");
serviceAddress.setStateCode("NC");
serviceAddress.setZip("27609");
s.setServiceAddress(serviceAddress);
order.setSubscriber(s);
order.setLoaAuthorizingPerson("Joe Blow");
order.getListOfPhoneNumbers().add("9195551212");
order = LnpOrder.create(getClient(), order);
File file = File.createTempFile("file", "pdf");
order.uploadLoa(file, LoaFileType.PDF);
System.out.println("Created new Lnp Order: " + order.getOrderId());
}
private static void printRateCenters() throws Exception {
printMessage("Starting printRateCenters");
Map<String, Object> query = new HashMap<String, Object>();
query.put("state", "NC");
query.put("available", true);
List<RateCenter> rateCenters = RateCenter.list(getClient(), query);
for (RateCenter rc : rateCenters) {
System.out.println(String.format("Name: %s | Abbreviation: %s", rc.getName(), rc.getAbbreviation()));
}
printMessage("Ending printRateCenters");
}
private static void printCreateAndGetSipPeer() throws Exception {
printMessage("Starting create Sip Peer");
Host host = new Host();
host.setHostName("1.1.1.1");
TerminationHost termHost = new TerminationHost();
termHost.setHostName("1.1.2.3");
termHost.setPort("5060");
SipPeer peer = new SipPeer();
peer.setPeerName("A new Sip Peer " + new Random().nextInt());
peer.setDefaultPeer(true);
peer.setShortMessagingProtocol("SMPP");
peer.setSiteId(getFirstSite().getId());
peer.getVoiceHosts().add(host);
peer.getSmsHosts().add(host);
peer.getTerminationHosts().add(termHost);
peer = SipPeer.create(getClient(), SITE.getId(), peer);
System.out.println("Created SipPeer: " + peer.getPeerId());
printMessage("Ending create and get sip peer");
}
private static void printGetTnDetails() throws Exception {
printMessage("Starting get Tn Detail");
TelephoneNumberDetail detail = Tns.getTnDetails(getClient(), "9195551212");
if (detail.getFullNumber() != null) {
System.out.println("Got TN Detail for : " + detail.getFullNumber());
}
printMessage("Ending get TN detail");
}
private static void printGetTns() throws Exception {
printMessage("Starting get TNs");
Map<String, Object> query = new HashMap<String, Object>();
query.put("state", "NC");
TelephoneNumbersResponse response = Tns.list(getClient(), query);
System.out.println(String.format("Got %s numbers for this list", response.getTelephoneNumberCount()));
printMessage("Ending get TNs");
}
private static Site getFirstSite() throws Exception {
return Site.list(getClient()).get(0);
}
private static SipPeer getFirstSipPeer() throws Exception {
return SipPeer.list(getClient(), getFirstSite().getId()).get(0);
}
private static IrisClient getClient() {
Map<String, String> env = System.getenv();
String accountId = env.get("BW_ACCOUNT_ID");
String username = env.get("BW_USERNAME");
String password = env.get("BW_PASSWORD");
String url = env.get("BANDWIDTH_IRIS_URL");
return new IrisClient(url, accountId, username, password, "v1.0");
}
private static void printMessage(String message) {
System.out.println("********** " + message + "**********");
}
} | []
| []
| []
| [] | [] | java | 0 | 0 | |
sdk/cognitivelanguage/azure-ai-language-questionanswering/samples/sample_chat.py | # coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
"""
FILE: sample_chat.py
DESCRIPTION:
This sample demonstrates how to ask a follow-up question (chit-chat) from a knowledge base.
USAGE:
python sample_chat.py
Set the environment variables with your own values before running the sample:
1) AZURE_QUESTIONANSWERING_ENDPOINT - the endpoint to your QuestionAnswering resource.
2) AZURE_QUESTIONANSWERING_KEY - your QuestionAnswering API key.
3) AZURE_QUESTIONANSWERING_PROJECT - the name of a knowledge base project.
"""
def sample_chit_chat():
# [START chit_chat]
import os
from azure.core.credentials import AzureKeyCredential
from azure.ai.language.questionanswering import QuestionAnsweringClient
from azure.ai.language.questionanswering import models as qna
endpoint = os.environ["AZURE_QUESTIONANSWERING_ENDPOINT"]
key = os.environ["AZURE_QUESTIONANSWERING_KEY"]
knowledge_base_project = os.environ["AZURE_QUESTIONANSWERING_PROJECT"]
client = QuestionAnsweringClient(endpoint, AzureKeyCredential(key))
with client:
first_question = qna.QueryKnowledgeBaseOptions(
question="How long should my Surface battery last?",
top=3,
confidence_score_threshold=0.2,
include_unstructured_sources=True,
answer_span_request=qna.AnswerSpanRequest(
enable=True,
confidence_score_threshold=0.2,
top_answers_with_span=1
),
)
output = client.query_knowledge_base(
first_question,
project_name=knowledge_base_project,
deployment_name="test"
)
best_candidate = [a for a in output.answers if a.confidence_score > 0.9][0]
print(u"Q: {}".format(first_question.question))
print(u"A: {}".format(best_candidate.answer))
followup_question = qna.QueryKnowledgeBaseOptions(
question="How long it takes to charge Surface?",
top=3,
confidence_score_threshold=0.2,
context=qna.KnowledgeBaseAnswerRequestContext(
previous_user_query="How long should my Surface battery last?",
previous_qna_id=best_candidate.id
),
answer_span_request=qna.AnswerSpanRequest(
enable=True,
confidence_score_threshold=0.2,
top_answers_with_span=1
),
include_unstructured_sources=True
)
output = client.query_knowledge_base(
followup_question,
project_name=knowledge_base_project,
deployment_name="test"
)
print(u"Q: {}".format(followup_question.question))
print(u"A: {}".format(output.answers[0].answer))
# [END chit_chat]
if __name__ == '__main__':
sample_chit_chat() | []
| []
| [
"AZURE_QUESTIONANSWERING_KEY",
"AZURE_QUESTIONANSWERING_ENDPOINT",
"AZURE_QUESTIONANSWERING_PROJECT"
]
| [] | ["AZURE_QUESTIONANSWERING_KEY", "AZURE_QUESTIONANSWERING_ENDPOINT", "AZURE_QUESTIONANSWERING_PROJECT"] | python | 3 | 0 | |
mqtrigger/messageQueue/asq.go | /*
Copyright 2017 The Fission Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package messageQueue
import (
"bytes"
"encoding/base64"
"fmt"
"io/ioutil"
"net/http"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/Azure/azure-sdk-for-go/storage"
"github.com/pkg/errors"
"go.uber.org/zap"
"github.com/fission/fission"
"github.com/fission/fission/crd"
)
// TODO: some of these constants should probably be environment variables
const (
// AzureQueuePollingInterval is the polling interval (default is 1 minute).
AzureQueuePollingInterval = time.Minute
// AzureQueueRetryLimit is the limit for attempts to retry invoking a function.
AzureQueueRetryLimit = 3
// AzureMessageFetchCount is the number of messages to fetch at a time.
AzureMessageFetchCount = 10
// AzureMessageVisibilityTimeout is the visibility timeout for dequeued messages.
AzureMessageVisibilityTimeout = time.Minute
// AzurePoisonQueueSuffix is the suffix used for poison queues.
AzurePoisonQueueSuffix = "-poison"
// AzureFunctionInvocationTimeout is the amount of time to wait for a triggered function to execute.
AzureFunctionInvocationTimeout = 10 * time.Minute
)
// AzureStorageConnection represents an Azure storage connection.
type AzureStorageConnection struct {
logger *zap.Logger
routerURL string
service AzureQueueService
httpClient AzureHTTPClient
}
// AzureQueueSubscription represents an Azure storage message queue subscription.
type AzureQueueSubscription struct {
queue AzureQueue
queueName string
outputQueueName string
functionURL string
contentType string
unsubscribe chan bool
done chan bool
}
// AzureQueueService is the interface that abstracts the Azure storage service.
// This exists to enable unit testing.
type AzureQueueService interface {
GetQueue(name string) AzureQueue
}
// AzureQueue is the interface that abstracts Azure storage queues.
// This exists to enable unit testing.
type AzureQueue interface {
Create(options *storage.QueueServiceOptions) error
NewMessage(text string) AzureMessage
GetMessages(options *storage.GetMessagesOptions) ([]AzureMessage, error)
}
// AzureMessage is the interface that abstracts Azure storage messages.
// This exists to enable unit testing.
type AzureMessage interface {
Bytes() []byte
Put(options *storage.PutMessageOptions) error
Delete(options *storage.QueueServiceOptions) error
}
// AzureHTTPClient is the interface that abstract HTTP requests made by the trigger.
// This exists to enable unit testing.
type AzureHTTPClient interface {
Do(req *http.Request) (*http.Response, error)
}
type azureQueueService struct {
service storage.QueueServiceClient
}
func (qs azureQueueService) GetQueue(name string) AzureQueue {
return azureQueue{
ref: qs.service.GetQueueReference(name),
}
}
type azureQueue struct {
ref *storage.Queue
}
func (qr azureQueue) Create(options *storage.QueueServiceOptions) error {
exists, err := qr.ref.Exists()
if err != nil {
return err
}
if exists {
return nil
}
return qr.ref.Create(options)
}
func (qr azureQueue) NewMessage(text string) AzureMessage {
return azureMessage{
ref: qr.ref.GetMessageReference(text),
bytes: []byte(text),
}
}
func (qr azureQueue) GetMessages(options *storage.GetMessagesOptions) ([]AzureMessage, error) {
msgs, err := qr.ref.GetMessages(options)
if err != nil {
return nil, err
}
messages := make([]AzureMessage, len(msgs))
for i := range msgs {
bytes, err := base64.StdEncoding.DecodeString(msgs[i].Text)
if err != nil {
return nil, err
}
messages[i] = azureMessage{
ref: &msgs[i],
bytes: bytes,
}
}
return messages, nil
}
type azureMessage struct {
ref *storage.Message
bytes []byte
}
func (m azureMessage) Bytes() []byte {
return m.bytes
}
func (m azureMessage) Put(options *storage.PutMessageOptions) error {
return m.ref.Put(options)
}
func (m azureMessage) Delete(options *storage.QueueServiceOptions) error {
return m.ref.Delete(options)
}
func newAzureQueueService(client storage.Client) AzureQueueService {
return azureQueueService{
service: client.GetQueueService(),
}
}
func newAzureStorageConnection(logger *zap.Logger, routerURL string, config MessageQueueConfig) (MessageQueue, error) {
account := os.Getenv("AZURE_STORAGE_ACCOUNT_NAME")
if len(account) == 0 {
return nil, errors.New("Required environment variable 'AZURE_STORAGE_ACCOUNT_NAME' is not set")
}
key := os.Getenv("AZURE_STORAGE_ACCOUNT_KEY")
if len(key) == 0 {
return nil, errors.New("Required environment variable 'AZURE_STORAGE_ACCOUNT_KEY' is not set")
}
logger.Info("creating Azure storage connection to storage account", zap.String("account", account))
client, err := storage.NewBasicClient(account, key)
if err != nil {
return nil, errors.Wrap(err, "failed to create Azure storage client")
}
return &AzureStorageConnection{
logger: logger.Named("azue_storage"),
routerURL: routerURL,
service: newAzureQueueService(client),
httpClient: &http.Client{
Timeout: AzureFunctionInvocationTimeout,
},
}, nil
}
func (asc AzureStorageConnection) subscribe(trigger *crd.MessageQueueTrigger) (messageQueueSubscription, error) {
asc.logger.Info("subscribing to Azure storage queue", zap.String("queue", trigger.Spec.Topic))
if trigger.Spec.FunctionReference.Type != fission.FunctionReferenceTypeFunctionName {
return nil, fmt.Errorf("unsupported function reference type (%v) for trigger %q", trigger.Spec.FunctionReference.Type, trigger.Metadata.Name)
}
subscription := &AzureQueueSubscription{
queue: asc.service.GetQueue(trigger.Spec.Topic),
queueName: trigger.Spec.Topic,
outputQueueName: trigger.Spec.ResponseTopic,
// with the addition of multi-tenancy, the users can create functions in any namespace. however,
// the triggers can only be created in the same namespace as the function.
// so essentially, function namespace = trigger namespace.
functionURL: asc.routerURL + "/" + strings.TrimPrefix(fission.UrlForFunction(trigger.Spec.FunctionReference.Name, trigger.Metadata.Namespace), "/"),
contentType: trigger.Spec.ContentType,
unsubscribe: make(chan bool),
done: make(chan bool),
}
go runAzureQueueSubscription(asc, subscription)
return subscription, nil
}
func (asc AzureStorageConnection) unsubscribe(subscription messageQueueSubscription) error {
sub := subscription.(*AzureQueueSubscription)
asc.logger.Info("unsubscribing from Azure storage queue", zap.String("queue", sub.queueName))
// Let the worker know we've unsubscribed
sub.unsubscribe <- true
// Wait until the subscription is done
<-sub.done
return nil
}
func runAzureQueueSubscription(conn AzureStorageConnection, sub *AzureQueueSubscription) {
var wg sync.WaitGroup
// Process the queue before waiting
pollAzureQueueSubscription(conn, sub, &wg)
timer := time.NewTimer(AzureQueuePollingInterval)
for {
conn.logger.Info("waiting before polling Azure storage queue", zap.Duration("interval_length", AzureQueuePollingInterval), zap.String("queue", sub.queueName))
select {
case <-sub.unsubscribe:
timer.Stop()
wg.Wait()
sub.done <- true
return
case <-timer.C:
pollAzureQueueSubscription(conn, sub, &wg)
timer.Reset(AzureQueuePollingInterval)
continue
}
}
}
func pollAzureQueueSubscription(conn AzureStorageConnection, sub *AzureQueueSubscription, wg *sync.WaitGroup) {
conn.logger.Info("polling for messages from Azure storage queue", zap.String("queue", sub.queueName))
err := sub.queue.Create(nil)
if err != nil {
conn.logger.Error("failed to create message queue", zap.Error(err), zap.String("queue", sub.queueName))
return
}
for {
err := sub.queue.Create(nil)
if err != nil {
conn.logger.Error("failed to create message queue", zap.Error(err), zap.String("queue", sub.queueName))
return
}
messages, err := sub.queue.GetMessages(&storage.GetMessagesOptions{
NumOfMessages: AzureMessageFetchCount,
VisibilityTimeout: int(AzureMessageVisibilityTimeout / time.Second),
})
if err != nil {
conn.logger.Error("failed to retrieve messages from Azure storage queue", zap.Error(err), zap.String("queue", sub.queueName))
break
}
if len(messages) == 0 {
break
}
wg.Add(len(messages))
for _, msg := range messages {
go func(conn AzureStorageConnection, sub *AzureQueueSubscription, msg AzureMessage) {
defer wg.Done()
invokeTriggeredFunction(conn, sub, msg)
}(conn, sub, msg)
}
}
}
func invokeTriggeredFunction(conn AzureStorageConnection, sub *AzureQueueSubscription, message AzureMessage) {
defer message.Delete(nil)
conn.logger.Info("making HTTP request to invoke function", zap.String("function_url", sub.functionURL))
for i := 0; i <= AzureQueueRetryLimit; i++ {
if i > 0 {
conn.logger.Info("retrying function invocation", zap.Int("retry", i), zap.String("function_url", sub.functionURL))
}
request, err := http.NewRequest("POST", sub.functionURL, bytes.NewReader(message.Bytes()))
if err != nil {
conn.logger.Error("failed to create HTTP request to invoke function", zap.Error(err), zap.String("function_url", sub.functionURL))
continue
}
request.Header.Set("X-Fission-MQTrigger-Topic", sub.queueName)
if len(sub.outputQueueName) > 0 {
request.Header.Set("X-Fission-MQTrigger-RespTopic", sub.outputQueueName)
}
if i > 0 {
request.Header.Set("X-Fission-MQTrigger-RetryCount", strconv.Itoa(i))
}
request.Header.Set("Content-Type", sub.contentType)
response, err := conn.httpClient.Do(request)
if err != nil {
conn.logger.Error("sending function invocation request failed", zap.Error(err), zap.String("function_url", sub.functionURL))
continue
}
defer response.Body.Close()
body, err := ioutil.ReadAll(response.Body)
if err != nil {
conn.logger.Error("failed to read response body from function invocation", zap.Error(err), zap.String("function_url", sub.functionURL))
continue
}
if response.StatusCode < 200 || response.StatusCode >= 300 {
conn.logger.Error("function invocation request returned a failure status code",
zap.String("function_url", sub.functionURL),
zap.String("body", string(body)),
zap.Int("status_code", response.StatusCode))
continue
}
if len(sub.outputQueueName) > 0 {
outputQueue := conn.service.GetQueue(sub.outputQueueName)
err = outputQueue.Create(nil)
if err != nil {
conn.logger.Error("failed to create output queue",
zap.Error(err),
zap.String("output_queue", sub.outputQueueName),
zap.String("function_url", sub.functionURL))
return
}
outputMessage := outputQueue.NewMessage(string(body))
err = outputMessage.Put(nil)
if err != nil {
conn.logger.Error("failed to post response body from function invocation to output queue",
zap.String("output_queue", sub.outputQueueName),
zap.String("function_url", sub.functionURL))
return
}
}
// Function invocation was successful
return
}
conn.logger.Error("function invocation retired too many times - moving message to poison queue",
zap.Int("retry_limit", AzureQueueRetryLimit),
zap.String("function_url", sub.functionURL))
poisonQueueName := sub.queueName + AzurePoisonQueueSuffix
poisonQueue := conn.service.GetQueue(poisonQueueName)
err := poisonQueue.Create(nil)
if err != nil {
conn.logger.Error("failed to create poison queue",
zap.Error(err),
zap.String("poison_queue_name", poisonQueueName),
zap.String("function_url", sub.functionURL))
return
}
poisonMessage := poisonQueue.NewMessage(string(message.Bytes()))
err = poisonMessage.Put(nil)
if err != nil {
conn.logger.Error("failed to post response body from function invocation failure poison queue",
zap.Error(err),
zap.String("poison_queue_name", poisonQueueName),
zap.String("function_url", sub.functionURL))
return
}
}
| [
"\"AZURE_STORAGE_ACCOUNT_NAME\"",
"\"AZURE_STORAGE_ACCOUNT_KEY\""
]
| []
| [
"AZURE_STORAGE_ACCOUNT_KEY",
"AZURE_STORAGE_ACCOUNT_NAME"
]
| [] | ["AZURE_STORAGE_ACCOUNT_KEY", "AZURE_STORAGE_ACCOUNT_NAME"] | go | 2 | 0 | |
Hackerrank/travel-around-the-world.java | import java.io.*;
import java.math.*;
import java.text.*;
import java.util.*;
import java.util.regex.*;
/**
* @author [email protected], (c)2019
*/
public class Solution {
/*
* Complete the travelAroundTheWorld function below.
*/
private static int travelAroundTheWorld(final int[] a, final int[] b, final long c) {
final int N = a.length;
// предрассчитанные дельты
int[] d = new int[N];
// возможные = 0, невозможные -1
int[] imp = new int[N];
// размечаем дельты и сразу считаем баланс
boolean chocolate = true;
long sigma = 0;
for (int i = 0; i < N; i++) {
int di = d[i] = ((int) (a[i] < c ? a[i] : c) - b[i]); // delta(a[i], b[i], c);
sigma += di;
if(chocolate && di<0)
chocolate = false;
}
// шоколад
if (chocolate)
return N;
// абзац
if(sigma<0)
return 0;
// долг <=0 by design
int borrow = 0;
// бежим по дельтам в обратную сторону в поисках дырок,
// p дополнительно ускоренно сдвигается на самую младшую пройденную дырку
// если есть непогашенный долг тогда можно пойти на второй круг, но не третий
for (int p = N-1; (borrow < 0 || p >=0) && p > -N; --p) {
// цикический индекс
int i = p >= 0 ? p : p + N;
// текущая дельта
int di = d[i];
if (borrow >= 0 && di >= 0)
continue;
// это долг не сможет перейти через эту точку ибо её проход занимает весь безнобак
if(borrow < 0 && b[i] == c)
return 0;
// если долга не было он может как появиться так и погасться так и не измениться, но точно borrow <=0
borrow += di;
// by design borrow <=0
if(borrow > 0)
borrow = 0;
// вычёркиваем
if(borrow < 0)
imp[i] = 1;
}
int qu = 0;
for (int i = 0; i < N; i++) {
if(imp[i] == 0)
qu++;
}
return qu;
}
private static final Scanner scanner = new Scanner(System.in);
public static void main(String[] args) throws IOException {
BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(System.getenv("OUTPUT_PATH")));
String[] nc = scanner.nextLine().split(" ");
int n = Integer.parseInt(nc[0].trim());
long c = Long.parseLong(nc[1].trim());
int[] a = new int[n];
String[] aItems = scanner.nextLine().split(" ");
for (int aItr = 0; aItr < n; aItr++) {
int aItem = Integer.parseInt(aItems[aItr].trim());
a[aItr] = aItem;
}
int[] b = new int[n];
String[] bItems = scanner.nextLine().split(" ");
for (int bItr = 0; bItr < n; bItr++) {
int bItem = Integer.parseInt(bItems[bItr].trim());
b[bItr] = bItem;
}
int result = travelAroundTheWorld(a, b, c);
bufferedWriter.write(String.valueOf(result));
bufferedWriter.newLine();
bufferedWriter.close();
}
}
| [
"\"OUTPUT_PATH\""
]
| []
| [
"OUTPUT_PATH"
]
| [] | ["OUTPUT_PATH"] | java | 1 | 0 | |
docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
import django
if os.getenv("READTHEDOCS", default=False) == "True":
sys.path.insert(0, os.path.abspath(".."))
os.environ["DJANGO_READ_DOT_ENV_FILE"] = "True"
os.environ["USE_DOCKER"] = "no"
else:
sys.path.insert(0, os.path.abspath("/app"))
os.environ["DATABASE_URL"] = "sqlite:///readthedocs.db"
os.environ["CELERY_BROKER_URL"] = os.getenv("REDIS_URL", "redis://redis:6379")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
django.setup()
# -- Project information -----------------------------------------------------
project = "SEMsquare"
copyright = """2022, Johannes Jacob"""
author = "Johannes Jacob"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
]
# Add any paths that contain templates here, relative to this directory.
# templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
| []
| []
| [
"USE_DOCKER",
"DATABASE_URL",
"CELERY_BROKER_URL",
"DJANGO_READ_DOT_ENV_FILE",
"REDIS_URL",
"READTHEDOCS"
]
| [] | ["USE_DOCKER", "DATABASE_URL", "CELERY_BROKER_URL", "DJANGO_READ_DOT_ENV_FILE", "REDIS_URL", "READTHEDOCS"] | python | 6 | 0 | |
handler.py | ''''
Copyright 2018 Ashok Sathyanarayan
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
acknowledgements
https://github.com/serverless/examples/blob/master/aws-node-github-webhook-listener/handler.js
https://github.com/carlos-jenkins/python-github-webhooks/blob/master/webhooks.py
https://github.com/nytlabs/github-s3-deploy/blob/master/index.js
https://aws.amazon.com/blogs/compute/sharing-secrets-with-aws-lambda-using-aws-systems-manager-parameter-store/
'''
import json
import logging
import boto3
from botocore.exceptions import ClientError
import base64
import datetime
import hmac , hashlib
import os
import sys
here = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(here, "library"))
from github import Github, GithubException
log = logging.getLogger()
log.setLevel(logging.DEBUG)
# all required configurations
g_sns_arn = "arn:aws:sns:us-east-1:670533574044:github-file-to-copy"
g_github_secret_name = "/prod/githubCopy/appConfig"
g_s3_access_name = "/prod/s3/appKeys"
g_endpoint_url = "https://secretsmanager.us-east-1.amazonaws.com"
g_region_name = "us-east-1"
g_myGithubConfig = None
g_mys3AccessKeys = None
def load_github_config():
global g_myGithubConfig
if g_myGithubConfig is None:
config = get_secret(g_github_secret_name)
g_myGithubConfig = ConfigWrapper(config)
def load_s3_access_config():
global g_mys3AccessKeys
if g_mys3AccessKeys is None:
config = get_secret(g_s3_access_name)
g_mys3AccessKeys = ConfigWrapper(config)
class BreakoutException(Exception):
"""Base class for other exceptions"""
pass
class ConfigWrapper:
def __init__(self, config):
"""
Construct new GithubConfig with configuration
:param config: application configuration
"""
self.config = config
def get_config(self):
return self.config
def get_secret(secret_name):
endpoint_url = g_endpoint_url
region_name = g_region_name
secret = {}
session = boto3.session.Session()
client = session.client(
service_name='secretsmanager',
region_name=region_name,
endpoint_url=endpoint_url
)
try:
get_secret_value_response = client.get_secret_value(
SecretId=secret_name
)
except ClientError as e:
log.error ("got error while reading secret")
if e.response['Error']['Code'] == 'ResourceNotFoundException':
log.error("The requested secret " + secret_name + " was not found")
elif e.response['Error']['Code'] == 'InvalidRequestException':
log.error("The request was invalid due to:", e)
elif e.response['Error']['Code'] == 'InvalidParameterException':
log.error("The request had invalid params:", e)
else:
log.error (e)
else:
log.debug ("Getting secret")
# Decrypted secret using the associated KMS CMK
# Depending on whether the secret was a string or binary, one of these fields will be populated
if 'SecretString' in get_secret_value_response:
secret = get_secret_value_response['SecretString']
else:
binary_secret_data = get_secret_value_response['SecretBinary']
log.debug("binary secret is not being handled here")
# Your code goes here.
log.debug("SECRECT : {}".format(secret))
return json.loads(secret)
def send_message(client,repname, githubFile, s3bucket, s3basedir, s3path, sha):
message = {"repositoryName": repname,
"githubFile" : githubFile,
"s3bucket" : s3bucket,
"s3basedir": s3basedir,
"s3path" : s3path,
"sha" : sha
}
sns_response = client.publish(
TargetArn=g_sns_arn,
Message=json.dumps({'default': json.dumps(message)}),
MessageStructure='json'
)
log.debug("send message response :".format( sns_response))
def queue_files_to_download(repository, sha, server_path, bucket, basedir, repname, sns_client):
contents = repository.get_dir_contents(server_path, ref=sha)
for content in contents:
if content.type == 'dir':
queue_files_to_download(repository, sha, content.path, bucket, basedir+"/"+content.path,repname, sns_client)
else :
try:
path = content.path
#file_content = repository.get_contents(path, ref=sha)
#file_data = base64.b64decode(file_content.content)
#s3.Object(bucket, basedir + "/" +content.name).put(Body=file_data)
send_message(sns_client, repname, path, bucket,basedir,content.name, sha)
log.debug( "queing file = {}".format( path) + " to s3 path = {}".format( basedir) + "/".format( content.name))
except (GithubException, IOError) as exc:
log.error('Error processing %s: %s', content.path, exc)
def download_file(repository, githubFile,sha,s3bucket,s3path, s3basedir, access_keys):
session = boto3.Session(
aws_access_key_id= access_keys["aws_access_key_id"],
aws_secret_access_key=access_keys["aws_secret_access_key"],)
s3 = session.resource('s3')
file_content = repository.get_contents(githubFile, ref=sha)
file_data = base64.b64decode(file_content.content)
s3.Object(s3bucket, s3basedir + "/" + s3path).put(Body=file_data)
def githubWebhook(event, context):
global g_sns_arn
global g_github_secret_name
log.debug("event : ", event)
g_sns_arn = os.environ['snsarn']
g_github_secret_name = os.environ['githubconfig']
log.info(" sns queue to use ".format(g_sns_arn))
headers = event["headers"]
sig = headers['X-Hub-Signature']
githubEvent = headers['X-GitHub-Event']
id = headers['X-GitHub-Delivery']
responseHeaders = {
'content-type': 'application/json',
'Access-Control-Allow-Origin': '*',
'Access-Control-Allow-Credentials': 'true',
"isBase64Encoded": 'false'
};
plain_ret = {
'statusCode': 401,
'headers': responseHeaders,
'body': {"msg": "",
"timestamp": datetime.datetime.utcnow().isoformat()
}
}
try:
if g_myGithubConfig is None:
load_github_config()
secret = g_myGithubConfig.get_config()
log.debug ("secret = {}".format(secret))
if secret is None:
plain_ret['body']['msg'] = 'Internal Configuration Problems'
plain_ret['statusCode'] = 500
raise BreakoutException
if sig is None:
plain_ret['body']['msg'] = 'No X-Hub-Signature found on request'
raise BreakoutException
if githubEvent is None:
plain_ret['body']['msg'] = 'No X-Github-Event found on request'
plain_ret['statusCode'] = 422
raise BreakoutException
if id is None:
plain_ret['body']['msg'] = 'No X-Github-Delivery found on request'
raise BreakoutException
if secret:
# Only SHA1 is supported
header_signature = headers['X-Hub-Signature']
if header_signature is None:
plain_ret['body']['msg'] = 'No X-Hub-Signature found on request'
plain_ret['statusCode'] = 403
raise BreakoutException
sha_name, signature = header_signature.split('=')
log.info ("header_signature = {}".format(header_signature))
log.debug ("sha_name = {}".format( sha_name))
log.debug ("signature = {} ".format( signature))
sha_name = sha_name.strip()
if sha_name != 'sha1':
plain_ret['body']['msg'] = 'Only sha1 is supported'
plain_ret['statusCode'] = 501
raise BreakoutException
#validate signature
log.debug("event body = {}".format(event['body']))
body = json.loads(event['body'])
repository = body['repository']['name']
log.debug("event detected for repository=" + repository)
node = secret[repository]
secretAsbytearray = bytearray()
secretAsbytearray.extend(map(ord, node['githubWebhookSecret']))
bodyAsbytearray = bytearray()
bodyAsbytearray.extend(map(ord, str(event["body"])))
mac = hmac.new(secretAsbytearray, msg=bodyAsbytearray, digestmod=hashlib.sha1)
log.info("calculated mac={}".format( mac.hexdigest()))
if not hmac.compare_digest(str(mac.hexdigest()), str(signature)):
log.error ("signature mismatch ")
plain_ret['body']['msg'] = 'Invalid signature'
plain_ret['statusCode'] = 403
raise BreakoutException
# implement ping
githubEvent = githubEvent.strip()
if githubEvent == 'ping':
plain_ret['body']['msg'] = 'pong'
plain_ret['statusCode'] = 200
raise BreakoutException
plain_ret['body']['msg'] = 'No processing done as event was not relevant'
if githubEvent == 'push':
try:
g = Github(node['githubAPIKey'])
r = g.get_user().get_repo(repository)
f_c = r.get_branches()
matched_branches = [match for match in f_c if match.name == "master"]
sns_client = boto3.client('sns')
queue_files_to_download(r, matched_branches[0].commit.sha, "/", node['bucket'], node['bucketDir'],repository, sns_client )
log.debug("Queued files for Download")
plain_ret['body']['msg'] = "Push event processed"
plain_ret['statusCode'] = 200
except KeyError as e:
plain_ret['body']['msg'] = 'push event not processed for this repository'
plain_ret['statusCode'] = 200
except BreakoutException:
pass
plain_ret['body'] = json.dumps(plain_ret['body'])
return plain_ret
def githubFileCopy(event, context):
global g_github_secret_name
global g_s3_access_name
log.debug("Received event {}".format(json.dumps(event)))
try:
g_github_secret_name = os.environ['githubconfig']
g_s3_access_name = os.environ['s3accessKeys']
message = json.loads(event["Records"][0]["Sns"]["Message"])
if g_myGithubConfig is None:
load_github_config()
secret = g_myGithubConfig.get_config()
if g_mys3AccessKeys is None:
load_s3_access_config()
s3_access_keys = g_mys3AccessKeys.get_config()
node = secret[message["repositoryName"]]
g = Github(node['githubAPIKey'])
r = g.get_user().get_repo(message["repositoryName"])
download_file(r, message["githubFile"], message["sha"], message["s3bucket"],message["s3path"],message["s3basedir"], s3_access_keys)
except BreakoutException:
pass
return {
"message": message["githubFile"] + " downloaded",
}
| []
| []
| [
"githubconfig",
"s3accessKeys",
"snsarn"
]
| [] | ["githubconfig", "s3accessKeys", "snsarn"] | python | 3 | 0 | |
internal/dbtest/db_test.go | package dbtest_test
import (
"context"
"database/sql"
"encoding/json"
"errors"
"os"
"path/filepath"
"reflect"
"runtime"
"strings"
"testing"
"github.com/uptrace/bun"
"github.com/uptrace/bun/dialect"
"github.com/uptrace/bun/dialect/feature"
"github.com/uptrace/bun/dialect/mysqldialect"
"github.com/uptrace/bun/dialect/pgdialect"
"github.com/uptrace/bun/dialect/sqlitedialect"
"github.com/uptrace/bun/driver/pgdriver"
"github.com/uptrace/bun/driver/sqliteshim"
"github.com/uptrace/bun/extra/bundebug"
_ "github.com/go-sql-driver/mysql"
_ "github.com/jackc/pgx/v4/stdlib"
"github.com/stretchr/testify/require"
)
var ctx = context.TODO()
const (
pgName = "pg"
pgxName = "pgx"
mysql5Name = "mysql5"
mysql8Name = "mysql8"
mariadbName = "mariadb"
sqliteName = "sqlite"
)
var allDBs = map[string]func(tb testing.TB) *bun.DB{
pgName: pg,
pgxName: pgx,
mysql5Name: mysql5,
mysql8Name: mysql8,
mariadbName: mariadb,
sqliteName: sqlite,
}
func pg(tb testing.TB) *bun.DB {
dsn := os.Getenv("PG")
if dsn == "" {
dsn = "postgres://postgres:postgres@localhost:5432/test?sslmode=disable"
}
sqldb := sql.OpenDB(pgdriver.NewConnector(pgdriver.WithDSN(dsn)))
tb.Cleanup(func() {
require.NoError(tb, sqldb.Close())
})
db := bun.NewDB(sqldb, pgdialect.New())
db.AddQueryHook(bundebug.NewQueryHook(
bundebug.WithEnabled(false),
bundebug.FromEnv(""),
))
require.Equal(tb, "DB<dialect=pg>", db.String())
return db
}
func pgx(tb testing.TB) *bun.DB {
dsn := os.Getenv("PG")
if dsn == "" {
dsn = "postgres://postgres:postgres@localhost:5432/test?sslmode=disable"
}
sqldb, err := sql.Open("pgx", dsn)
require.NoError(tb, err)
tb.Cleanup(func() {
require.NoError(tb, sqldb.Close())
})
db := bun.NewDB(sqldb, pgdialect.New())
db.AddQueryHook(bundebug.NewQueryHook(
bundebug.WithEnabled(false),
bundebug.FromEnv(""),
))
require.Equal(tb, "DB<dialect=pg>", db.String())
return db
}
func mysql8(tb testing.TB) *bun.DB {
dsn := os.Getenv("MYSQL")
if dsn == "" {
dsn = "user:pass@/test"
}
sqldb, err := sql.Open("mysql", dsn)
require.NoError(tb, err)
tb.Cleanup(func() {
require.NoError(tb, sqldb.Close())
})
db := bun.NewDB(sqldb, mysqldialect.New())
db.AddQueryHook(bundebug.NewQueryHook(
bundebug.WithEnabled(false),
bundebug.FromEnv(""),
))
require.Equal(tb, "DB<dialect=mysql>", db.String())
return db
}
func mysql5(tb testing.TB) *bun.DB {
dsn := os.Getenv("MYSQL5")
if dsn == "" {
dsn = "user:pass@tcp(localhost:53306)/test"
}
sqldb, err := sql.Open("mysql", dsn)
require.NoError(tb, err)
tb.Cleanup(func() {
require.NoError(tb, sqldb.Close())
})
db := bun.NewDB(sqldb, mysqldialect.New())
db.AddQueryHook(bundebug.NewQueryHook(
bundebug.WithEnabled(false),
bundebug.FromEnv(""),
))
require.Equal(tb, "DB<dialect=mysql>", db.String())
return db
}
func mariadb(tb testing.TB) *bun.DB {
dsn := os.Getenv("MYSQL5")
if dsn == "" {
dsn = "user:pass@tcp(localhost:13306)/test"
}
sqldb, err := sql.Open("mysql", dsn)
require.NoError(tb, err)
tb.Cleanup(func() {
require.NoError(tb, sqldb.Close())
})
db := bun.NewDB(sqldb, mysqldialect.New())
db.AddQueryHook(bundebug.NewQueryHook(
bundebug.WithEnabled(false),
bundebug.FromEnv(""),
))
require.Equal(tb, "DB<dialect=mysql>", db.String())
return db
}
func sqlite(tb testing.TB) *bun.DB {
sqldb, err := sql.Open(sqliteshim.DriverName(), filepath.Join(tb.TempDir(), "sqlite.db"))
require.NoError(tb, err)
tb.Cleanup(func() {
require.NoError(tb, sqldb.Close())
})
db := bun.NewDB(sqldb, sqlitedialect.New())
db.AddQueryHook(bundebug.NewQueryHook(
bundebug.WithEnabled(false),
bundebug.FromEnv(""),
))
require.Equal(tb, "DB<dialect=sqlite>", db.String())
return db
}
func testEachDB(t *testing.T, f func(t *testing.T, dbName string, db *bun.DB)) {
for dbName, newDB := range allDBs {
t.Run(dbName, func(t *testing.T) {
f(t, dbName, newDB(t))
})
}
}
func funcName(x interface{}) string {
s := runtime.FuncForPC(reflect.ValueOf(x).Pointer()).Name()
if i := strings.LastIndexByte(s, '.'); i >= 0 {
return s[i+1:]
}
return s
}
func TestDB(t *testing.T) {
type Test struct {
run func(t *testing.T, db *bun.DB)
}
tests := []Test{
{testPing},
{testNilModel},
{testSelectScan},
{testSelectCount},
{testSelectMap},
{testSelectMapSlice},
{testSelectStruct},
{testSelectNestedStructValue},
{testSelectNestedStructPtr},
{testSelectStructSlice},
{testSelectSingleSlice},
{testSelectMultiSlice},
{testSelectJSONMap},
{testSelectJSONStruct},
{testJSONSpecialChars},
{testSelectRawMessage},
{testScanNullVar},
{testScanSingleRow},
{testScanSingleRowByRow},
{testScanRows},
{testRunInTx},
{testInsertIface},
{testSelectBool},
{testFKViolation},
{testInterfaceAny},
{testInterfaceJSON},
{testScanRawMessage},
{testPointers},
{testExists},
{testScanTimeIntoString},
{testModelNonPointer},
{testBinaryData},
{testUpsert},
{testMultiUpdate},
{testTxScanAndCount},
}
testEachDB(t, func(t *testing.T, dbName string, db *bun.DB) {
for _, test := range tests {
t.Run(funcName(test.run), func(t *testing.T) {
test.run(t, db)
})
}
})
}
func testPing(t *testing.T, db *bun.DB) {
err := db.PingContext(ctx)
require.NoError(t, err)
}
func testNilModel(t *testing.T, db *bun.DB) {
err := db.NewSelect().ColumnExpr("1").Scan(ctx)
require.Error(t, err)
require.Equal(t, "bun: Model(nil)", err.Error())
}
func testSelectScan(t *testing.T, db *bun.DB) {
var num int
err := db.NewSelect().ColumnExpr("10").Scan(ctx, &num)
require.NoError(t, err)
require.Equal(t, 10, num)
err = db.NewSelect().TableExpr("(SELECT 10) AS t").Where("FALSE").Scan(ctx, &num)
require.Equal(t, sql.ErrNoRows, err)
var str string
err = db.NewSelect().ColumnExpr("?", "\\\"'hello\n%_").Scan(ctx, &str)
require.NoError(t, err)
require.Equal(t, "\\\"'hello\n%_", str)
var flag bool
err = db.NewSelect().
ColumnExpr("EXISTS (?)", db.NewSelect().ColumnExpr("1")).
Scan(ctx, &flag)
require.NoError(t, err)
require.Equal(t, true, flag)
}
func testSelectCount(t *testing.T, db *bun.DB) {
if !db.Dialect().Features().Has(feature.CTE) {
t.Skip()
return
}
values := db.NewValues(&[]map[string]interface{}{
{"num": 1},
{"num": 2},
{"num": 3},
})
q := db.NewSelect().
With("t", values).
Column("t.num").
TableExpr("t").
OrderExpr("t.num DESC").
Limit(1)
var num int
err := q.Scan(ctx, &num)
require.NoError(t, err)
require.Equal(t, 3, num)
count, err := q.Count(ctx)
require.NoError(t, err)
require.Equal(t, 3, count)
}
func testSelectMap(t *testing.T, db *bun.DB) {
var m map[string]interface{}
err := db.NewSelect().
ColumnExpr("10 AS num").
Scan(ctx, &m)
require.NoError(t, err)
switch v := m["num"]; v.(type) {
case int32:
require.Equal(t, int32(10), v)
case int64:
require.Equal(t, int64(10), v)
default:
t.Fail()
}
}
func testSelectMapSlice(t *testing.T, db *bun.DB) {
if !db.Dialect().Features().Has(feature.CTE) {
t.Skip()
}
values := db.NewValues(&[]map[string]interface{}{
{"column1": 1},
{"column1": 2},
{"column1": 3},
})
var ms []map[string]interface{}
err := db.NewSelect().
With("t", values).
TableExpr("t").
Scan(ctx, &ms)
require.NoError(t, err)
require.Len(t, ms, 3)
for i, m := range ms {
require.Equal(t, map[string]interface{}{
"column1": int64(i + 1),
}, m)
}
}
func testSelectStruct(t *testing.T, db *bun.DB) {
type Model struct {
Num int
Str string
}
model := new(Model)
err := db.NewSelect().
ColumnExpr("10 AS num, 'hello' as str").
Scan(ctx, model)
require.NoError(t, err)
require.Equal(t, 10, model.Num)
require.Equal(t, "hello", model.Str)
err = db.NewSelect().TableExpr("(SELECT 42) AS t").Where("FALSE").Scan(ctx, model)
require.Equal(t, sql.ErrNoRows, err)
err = db.NewSelect().ColumnExpr("1 as unknown_column").Scan(ctx, model)
require.Error(t, err)
require.Contains(t, err.Error(), "Model does not have column")
}
func testSelectNestedStructValue(t *testing.T, db *bun.DB) {
type Model struct {
Num int
Sub struct {
Str string
}
}
model := new(Model)
err := db.NewSelect().
ColumnExpr("10 AS num, 'hello' as sub__str").
Scan(ctx, model)
require.NoError(t, err)
require.Equal(t, 10, model.Num)
require.Equal(t, "hello", model.Sub.Str)
}
func testSelectNestedStructPtr(t *testing.T, db *bun.DB) {
type Sub struct {
Str string
}
type Model struct {
Num int
Sub *Sub
}
model := new(Model)
err := db.NewSelect().
ColumnExpr("10 AS num").
Scan(ctx, model)
require.NoError(t, err)
require.Equal(t, 10, model.Num)
require.Nil(t, model.Sub)
model = new(Model)
err = db.NewSelect().
ColumnExpr("10 AS num, 'hello' AS sub__str").
Scan(ctx, model)
require.NoError(t, err)
require.Equal(t, 10, model.Num)
require.Equal(t, "hello", model.Sub.Str)
model = new(Model)
err = db.NewSelect().
ColumnExpr("10 AS num, NULL AS sub__str").
Scan(ctx, model)
require.NoError(t, err)
require.Equal(t, 10, model.Num)
require.Nil(t, model.Sub)
}
func testSelectStructSlice(t *testing.T, db *bun.DB) {
if !db.Dialect().Features().Has(feature.CTE) {
t.Skip()
}
type Model struct {
Num int `bun:"column1"`
}
values := db.NewValues(&[]map[string]interface{}{
{"column1": 1},
{"column1": 2},
{"column1": 3},
})
models := make([]Model, 0)
err := db.NewSelect().
With("t", values).
TableExpr("t").
Scan(ctx, &models)
require.NoError(t, err)
require.Len(t, models, 3)
for i, model := range models {
require.Equal(t, i+1, model.Num)
}
}
func testSelectSingleSlice(t *testing.T, db *bun.DB) {
if !db.Dialect().Features().Has(feature.CTE) {
t.Skip()
}
values := db.NewValues(&[]map[string]interface{}{
{"column1": 1},
{"column1": 2},
{"column1": 3},
})
var ns []int
err := db.NewSelect().
With("t", values).
TableExpr("t").
Scan(ctx, &ns)
require.NoError(t, err)
require.Equal(t, []int{1, 2, 3}, ns)
}
func testSelectMultiSlice(t *testing.T, db *bun.DB) {
if !db.Dialect().Features().Has(feature.CTE) {
t.Skip()
}
values := db.NewValues(&[]map[string]interface{}{
{"a": 1, "b": "foo"},
{"a": 2, "b": "bar"},
{"a": 3, "b": ""},
})
var ns []int
var ss []string
err := db.NewSelect().
With("t", values).
TableExpr("t").
Scan(ctx, &ns, &ss)
require.NoError(t, err)
require.Equal(t, []int{1, 2, 3}, ns)
require.Equal(t, []string{"foo", "bar", ""}, ss)
}
func testSelectJSONMap(t *testing.T, db *bun.DB) {
type Model struct {
Map map[string]string
}
model := new(Model)
err := db.NewSelect().
ColumnExpr("? AS map", map[string]string{"hello": "world"}).
Scan(ctx, model)
require.NoError(t, err)
require.Equal(t, map[string]string{"hello": "world"}, model.Map)
err = db.NewSelect().
ColumnExpr("NULL AS map").
Scan(ctx, model)
require.NoError(t, err)
require.Equal(t, map[string]string(nil), model.Map)
}
func testSelectJSONStruct(t *testing.T, db *bun.DB) {
type Struct struct {
Hello string
}
type Model struct {
Struct Struct
}
model := new(Model)
err := db.NewSelect().
ColumnExpr("? AS struct", Struct{Hello: "world"}).
Scan(ctx, model)
require.NoError(t, err)
require.Equal(t, Struct{Hello: "world"}, model.Struct)
err = db.NewSelect().
ColumnExpr("NULL AS struct").
Scan(ctx, model)
require.NoError(t, err)
require.Equal(t, Struct{}, model.Struct)
}
func testSelectRawMessage(t *testing.T, db *bun.DB) {
type Model struct {
Raw json.RawMessage
}
model := new(Model)
err := db.NewSelect().
ColumnExpr("? AS raw", map[string]string{"hello": "world"}).
Scan(ctx, model)
require.NoError(t, err)
require.Equal(t, `{"hello":"world"}`, string(model.Raw))
err = db.NewSelect().
ColumnExpr("NULL AS raw").
Scan(ctx, model)
require.NoError(t, err)
require.Nil(t, model.Raw)
}
func testScanNullVar(t *testing.T, db *bun.DB) {
num := int(42)
err := db.NewSelect().ColumnExpr("NULL").Scan(ctx, &num)
require.NoError(t, err)
require.Zero(t, num)
}
func testScanSingleRow(t *testing.T, db *bun.DB) {
rows, err := db.QueryContext(ctx, "SELECT 42")
require.NoError(t, err)
defer rows.Close()
if !rows.Next() {
t.Fail()
}
var num int
err = db.ScanRow(ctx, rows, &num)
require.NoError(t, err)
require.Equal(t, 42, num)
}
func testScanSingleRowByRow(t *testing.T, db *bun.DB) {
if !db.Dialect().Features().Has(feature.CTE) {
t.Skip()
}
values := db.NewValues(&[]map[string]interface{}{
{"num": 1},
{"num": 2},
{"num": 3},
})
rows, err := db.NewSelect().
With("t", values).
TableExpr("t").
OrderExpr("t.num DESC").
Rows(ctx)
require.NoError(t, err)
defer rows.Close()
var nums []int
for rows.Next() {
var num int
err := db.ScanRow(ctx, rows, &num)
require.NoError(t, err)
nums = append(nums, num)
}
require.NoError(t, rows.Err())
require.Equal(t, []int{3, 2, 1}, nums)
}
func testScanRows(t *testing.T, db *bun.DB) {
if !db.Dialect().Features().Has(feature.CTE) {
t.Skip()
}
values := db.NewValues(&[]map[string]interface{}{
{"num": 1},
{"num": 2},
{"num": 3},
})
rows, err := db.NewSelect().
With("t", values).
TableExpr("t").
OrderExpr("t.num DESC").
Rows(ctx)
require.NoError(t, err)
defer rows.Close()
var nums []int
err = db.ScanRows(ctx, rows, &nums)
require.NoError(t, err)
require.Equal(t, []int{3, 2, 1}, nums)
}
func testRunInTx(t *testing.T, db *bun.DB) {
type Counter struct {
Count int64
}
err := db.ResetModel(ctx, (*Counter)(nil))
require.NoError(t, err)
_, err = db.NewInsert().Model(&Counter{Count: 0}).Exec(ctx)
require.NoError(t, err)
err = db.RunInTx(ctx, nil, func(ctx context.Context, tx bun.Tx) error {
_, err := tx.NewUpdate().Model((*Counter)(nil)).
Set("count = count + 1").
Where("TRUE").
Exec(ctx)
return err
})
require.NoError(t, err)
var count int
err = db.NewSelect().Model((*Counter)(nil)).Scan(ctx, &count)
require.NoError(t, err)
require.Equal(t, 1, count)
err = db.RunInTx(ctx, nil, func(ctx context.Context, tx bun.Tx) error {
if _, err := tx.NewUpdate().Model((*Counter)(nil)).
Set("count = count + 1").
Where("TRUE").
Exec(ctx); err != nil {
return err
}
return errors.New("rollback")
})
require.Error(t, err)
err = db.NewSelect().Model((*Counter)(nil)).Scan(ctx, &count)
require.NoError(t, err)
require.Equal(t, 1, count)
}
func testJSONSpecialChars(t *testing.T, db *bun.DB) {
type Model struct {
ID int
Attrs map[string]interface{} `bun:"type:json"`
}
ctx := context.Background()
err := db.ResetModel(ctx, (*Model)(nil))
require.NoError(t, err)
model := &Model{
Attrs: map[string]interface{}{
"hello": "\000world\nworld\u0000",
},
}
_, err = db.NewInsert().Model(model).Exec(ctx)
require.NoError(t, err)
model = new(Model)
err = db.NewSelect().Model(model).Scan(ctx)
require.NoError(t, err)
switch db.Dialect().Name() {
case dialect.MySQL:
require.Equal(t, map[string]interface{}{
"hello": "\x00world\nworld\x00",
}, model.Attrs)
default:
require.Equal(t, map[string]interface{}{
"hello": "\\u0000world\nworld\\u0000",
}, model.Attrs)
}
}
func testInsertIface(t *testing.T, db *bun.DB) {
type Model struct {
ID int
Value interface{} `bun:"type:json"`
}
ctx := context.Background()
err := db.ResetModel(ctx, (*Model)(nil))
require.NoError(t, err)
model := new(Model)
_, err = db.NewInsert().Model(model).Exec(ctx)
require.NoError(t, err)
model = &Model{
Value: "hello",
}
_, err = db.NewInsert().Model(model).Exec(ctx)
require.NoError(t, err)
}
func testSelectBool(t *testing.T, db *bun.DB) {
var flag bool
err := db.NewSelect().ColumnExpr("1").Scan(ctx, &flag)
require.NoError(t, err)
require.True(t, flag)
err = db.NewSelect().ColumnExpr("0").Scan(ctx, &flag)
require.NoError(t, err)
require.False(t, flag)
}
func testFKViolation(t *testing.T, db *bun.DB) {
type Deck struct {
ID int
UserID int
}
type User struct {
ID int
}
if db.Dialect().Name() == dialect.SQLite {
_, err := db.Exec("PRAGMA foreign_keys = ON;")
require.NoError(t, err)
}
for _, model := range []interface{}{(*Deck)(nil), (*User)(nil)} {
_, err := db.NewDropTable().Model(model).IfExists().Exec(ctx)
require.NoError(t, err)
}
_, err := db.NewCreateTable().
Model((*User)(nil)).
IfNotExists().
Exec(ctx)
require.NoError(t, err)
_, err = db.NewCreateTable().
Model((*Deck)(nil)).
IfNotExists().
ForeignKey("(user_id) REFERENCES users (id) ON DELETE CASCADE").
Exec(ctx)
require.NoError(t, err)
// Empty deck should violate FK constraint.
_, err = db.NewInsert().Model(new(Deck)).Exec(ctx)
require.Error(t, err)
// Create a deck that violates the user_id FK contraint
deck := &Deck{UserID: 42}
_, err = db.NewInsert().Model(deck).Exec(ctx)
require.Error(t, err)
decks := []*Deck{deck}
_, err = db.NewInsert().Model(&decks).Exec(ctx)
require.Error(t, err)
n, err := db.NewSelect().Model((*Deck)(nil)).Count(ctx)
require.NoError(t, err)
require.Equal(t, 0, n)
}
func testInterfaceAny(t *testing.T, db *bun.DB) {
switch db.Dialect().Name() {
case dialect.MySQL:
t.Skip()
}
type Model struct {
Value interface{}
}
model := new(Model)
err := db.NewSelect().ColumnExpr("NULL AS value").Scan(ctx, model)
require.NoError(t, err)
require.Nil(t, model.Value)
model = new(Model)
err = db.NewSelect().ColumnExpr(`'hello' AS value`).Scan(ctx, model)
require.NoError(t, err)
require.Equal(t, "hello", model.Value)
model = new(Model)
err = db.NewSelect().ColumnExpr(`42 AS value`).Scan(ctx, model)
require.NoError(t, err)
require.Equal(t, int64(42), model.Value)
}
func testInterfaceJSON(t *testing.T, db *bun.DB) {
type Model struct {
Value interface{} `bun:"type:json"`
}
model := new(Model)
err := db.NewSelect().ColumnExpr("NULL AS value").Scan(ctx, model)
require.NoError(t, err)
require.Nil(t, model.Value)
model = new(Model)
err = db.NewSelect().ColumnExpr(`'"hello"' AS value`).Scan(ctx, model)
require.NoError(t, err)
require.Equal(t, "hello", model.Value)
}
func testScanRawMessage(t *testing.T, db *bun.DB) {
type Model struct {
ID int64
Value json.RawMessage
}
ctx := context.Background()
err := db.ResetModel(ctx, (*Model)(nil))
require.NoError(t, err)
models := []Model{
{Value: json.RawMessage(`"hello"`)},
{Value: json.RawMessage(`"world"`)},
}
_, err = db.NewInsert().Model(&models).Exec(ctx)
require.NoError(t, err)
var models1 []Model
err = db.NewSelect().Model(&models1).Order("id ASC").Scan(ctx)
require.NoError(t, err)
var models2 []Model
err = db.NewSelect().Model(&models2).Order("id DESC").Scan(ctx)
require.NoError(t, err)
require.Equal(t, models, models1)
}
func testPointers(t *testing.T, db *bun.DB) {
type Model struct {
ID *int64 `bun:",default:0"`
Str *string
}
ctx := context.Background()
for _, id := range []int64{-1, 0, 1} {
err := db.ResetModel(ctx, (*Model)(nil))
require.NoError(t, err)
var model Model
if id >= 0 {
str := "hello"
model.ID = &id
model.Str = &str
}
_, err = db.NewInsert().Model(&model).Exec(ctx)
require.NoError(t, err)
var model2 Model
err = db.NewSelect().Model(&model2).Order("id ASC").Scan(ctx)
require.NoError(t, err)
}
}
func testExists(t *testing.T, db *bun.DB) {
ctx := context.Background()
exists, err := db.NewSelect().ColumnExpr("1").Exists(ctx)
require.NoError(t, err)
require.True(t, exists)
exists, err = db.NewSelect().ColumnExpr("1").Where("1 = 0").Exists(ctx)
require.NoError(t, err)
require.False(t, exists)
}
func testScanTimeIntoString(t *testing.T, db *bun.DB) {
ctx := context.Background()
var str string
err := db.NewSelect().ColumnExpr("CURRENT_TIMESTAMP").Scan(ctx, &str)
require.NoError(t, err)
require.NotZero(t, str)
}
func testModelNonPointer(t *testing.T, db *bun.DB) {
type Model struct{}
_, err := db.NewInsert().Model(Model{}).ExcludeColumn("id").Returning("id").Exec(ctx)
require.Error(t, err)
require.Equal(t, "bun: Model(non-pointer dbtest_test.Model)", err.Error())
}
func testBinaryData(t *testing.T, db *bun.DB) {
type Model struct {
ID int64
Data []byte
}
ctx := context.Background()
err := db.ResetModel(ctx, (*Model)(nil))
require.NoError(t, err)
_, err = db.NewInsert().Model(&Model{Data: []byte("hello")}).Exec(ctx)
require.NoError(t, err)
var model Model
err = db.NewSelect().Model(&model).Scan(ctx)
require.NoError(t, err)
require.Equal(t, []byte("hello"), model.Data)
}
func testUpsert(t *testing.T, db *bun.DB) {
type Model struct {
ID int64
Str string
}
ctx := context.Background()
err := db.ResetModel(ctx, (*Model)(nil))
require.NoError(t, err)
model := &Model{ID: 1, Str: "hello"}
_, err = db.NewInsert().Model(model).Exec(ctx)
require.NoError(t, err)
model.Str = "world"
switch db.Dialect().Name() {
case dialect.MySQL:
_, err := db.NewInsert().Model(model).On("DUPLICATE KEY UPDATE").Exec(ctx)
require.NoError(t, err)
default:
_, err := db.NewInsert().Model(model).On("CONFLICT (id) DO UPDATE").Exec(ctx)
require.NoError(t, err)
}
err = db.NewSelect().Model(model).WherePK().Scan(ctx)
require.NoError(t, err)
require.Equal(t, "world", model.Str)
}
func testMultiUpdate(t *testing.T, db *bun.DB) {
if !db.Dialect().Features().Has(feature.CTE) {
t.Skip()
return
}
type Model struct {
ID int64
Str string
}
ctx := context.Background()
err := db.ResetModel(ctx, (*Model)(nil))
require.NoError(t, err)
model := &Model{ID: 1, Str: "hello"}
_, err = db.NewInsert().Model(model).Exec(ctx)
require.NoError(t, err)
selq := db.NewSelect().Model(new(Model))
_, err = db.NewUpdate().
With("src", selq).
TableExpr("models AS dest").
Table("src").
Set("? = src.str", db.UpdateFQN("dest", "str")).
Where("dest.id = src.id").
Exec(ctx)
require.NoError(t, err)
}
func testTxScanAndCount(t *testing.T, db *bun.DB) {
type Model struct {
ID int64
Str string
}
ctx := context.Background()
err := db.ResetModel(ctx, (*Model)(nil))
require.NoError(t, err)
for i := 0; i < 100; i++ {
err := db.RunInTx(ctx, nil, func(ctx context.Context, tx bun.Tx) error {
var models []Model
_, err := tx.NewSelect().Model(&models).ScanAndCount(ctx)
return err
})
require.NoError(t, err)
}
}
| [
"\"PG\"",
"\"PG\"",
"\"MYSQL\"",
"\"MYSQL5\"",
"\"MYSQL5\""
]
| []
| [
"MYSQL",
"PG",
"MYSQL5"
]
| [] | ["MYSQL", "PG", "MYSQL5"] | go | 3 | 0 | |
pkg/env/prod.go | package env
// Copyright (c) Microsoft Corporation.
// Licensed under the Apache License 2.0.
import (
"context"
"crypto/rsa"
"crypto/x509"
"fmt"
"net"
"net/url"
"os"
"strings"
"time"
mgmtcompute "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2020-06-01/compute"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/sirupsen/logrus"
"github.com/Azure/ARO-RP/pkg/proxy"
"github.com/Azure/ARO-RP/pkg/util/azureclient/mgmt/compute"
"github.com/Azure/ARO-RP/pkg/util/clientauthorizer"
"github.com/Azure/ARO-RP/pkg/util/computeskus"
"github.com/Azure/ARO-RP/pkg/util/keyvault"
"github.com/Azure/ARO-RP/pkg/util/refreshable"
"github.com/Azure/ARO-RP/pkg/util/version"
)
type prod struct {
Core
proxy.Dialer
ARMHelper
armClientAuthorizer clientauthorizer.ClientAuthorizer
adminClientAuthorizer clientauthorizer.ClientAuthorizer
acrDomain string
vmskus map[string]*mgmtcompute.ResourceSku
fpCertificateRefresher CertificateRefresher
fpClientID string
clusterKeyvault keyvault.Manager
serviceKeyvault keyvault.Manager
clusterGenevaLoggingCertificate *x509.Certificate
clusterGenevaLoggingPrivateKey *rsa.PrivateKey
clusterGenevaLoggingAccount string
clusterGenevaLoggingConfigVersion string
clusterGenevaLoggingEnvironment string
clusterGenevaLoggingNamespace string
gatewayDomains []string
log *logrus.Entry
features map[Feature]bool
}
func newProd(ctx context.Context, log *logrus.Entry) (*prod, error) {
for _, key := range []string{
"AZURE_FP_CLIENT_ID",
"DOMAIN_NAME",
} {
if _, found := os.LookupEnv(key); !found {
return nil, fmt.Errorf("environment variable %q unset", key)
}
}
if !IsLocalDevelopmentMode() {
for _, key := range []string{
"CLUSTER_MDSD_CONFIG_VERSION",
"CLUSTER_MDSD_ACCOUNT",
"GATEWAY_DOMAINS",
"GATEWAY_RESOURCEGROUP",
"MDSD_ENVIRONMENT",
"CLUSTER_MDSD_NAMESPACE",
} {
if _, found := os.LookupEnv(key); !found {
return nil, fmt.Errorf("environment variable %q unset", key)
}
}
}
core, err := NewCore(ctx, log)
if err != nil {
return nil, err
}
dialer, err := proxy.NewDialer(core.IsLocalDevelopmentMode())
if err != nil {
return nil, err
}
p := &prod{
Core: core,
Dialer: dialer,
fpClientID: os.Getenv("AZURE_FP_CLIENT_ID"),
clusterGenevaLoggingAccount: os.Getenv("CLUSTER_MDSD_ACCOUNT"),
clusterGenevaLoggingConfigVersion: os.Getenv("CLUSTER_MDSD_CONFIG_VERSION"),
clusterGenevaLoggingEnvironment: os.Getenv("MDSD_ENVIRONMENT"),
clusterGenevaLoggingNamespace: os.Getenv("CLUSTER_MDSD_NAMESPACE"),
log: log,
features: map[Feature]bool{},
}
features := os.Getenv("RP_FEATURES")
if features != "" {
for _, feature := range strings.Split(features, ",") {
f, err := FeatureString("Feature" + feature)
if err != nil {
return nil, err
}
p.features[f] = true
}
}
msiAuthorizer, err := p.NewMSIAuthorizer(MSIContextRP, p.Environment().ResourceManagerEndpoint)
if err != nil {
return nil, err
}
msiKVAuthorizer, err := p.NewMSIAuthorizer(MSIContextRP, p.Environment().ResourceIdentifiers.KeyVault)
if err != nil {
return nil, err
}
serviceKeyvaultURI, err := keyvault.URI(p, ServiceKeyvaultSuffix)
if err != nil {
return nil, err
}
p.serviceKeyvault = keyvault.NewManager(msiKVAuthorizer, serviceKeyvaultURI)
resourceSkusClient := compute.NewResourceSkusClient(p.Environment(), p.SubscriptionID(), msiAuthorizer)
err = p.populateVMSkus(ctx, resourceSkusClient)
if err != nil {
return nil, err
}
p.fpCertificateRefresher = newCertificateRefresher(log, 1*time.Hour, p.serviceKeyvault, RPFirstPartySecretName)
err = p.fpCertificateRefresher.Start(ctx)
if err != nil {
return nil, err
}
localFPKVAuthorizer, err := p.FPAuthorizer(p.TenantID(), p.Environment().ResourceIdentifiers.KeyVault)
if err != nil {
return nil, err
}
clusterKeyvaultURI, err := keyvault.URI(p, ClusterKeyvaultSuffix)
if err != nil {
return nil, err
}
p.clusterKeyvault = keyvault.NewManager(localFPKVAuthorizer, clusterKeyvaultURI)
clusterGenevaLoggingPrivateKey, clusterGenevaLoggingCertificates, err := p.serviceKeyvault.GetCertificateSecret(ctx, ClusterLoggingSecretName)
if err != nil {
return nil, err
}
p.clusterGenevaLoggingPrivateKey = clusterGenevaLoggingPrivateKey
p.clusterGenevaLoggingCertificate = clusterGenevaLoggingCertificates[0]
var acrDataDomain string
if p.ACRResourceID() != "" { // TODO: ugh!
acrResource, err := azure.ParseResourceID(p.ACRResourceID())
if err != nil {
return nil, err
}
p.acrDomain = acrResource.ResourceName + "." + p.Environment().ContainerRegistryDNSSuffix
acrDataDomain = acrResource.ResourceName + "." + p.Location() + ".data." + p.Environment().ContainerRegistryDNSSuffix
} else {
p.acrDomain = "arointsvc." + azure.PublicCloud.ContainerRegistryDNSSuffix // TODO: make cloud aware once this is set up for US Gov Cloud
acrDataDomain = "arointsvc." + p.Location() + ".data." + azure.PublicCloud.ContainerRegistryDNSSuffix // TODO: make cloud aware once this is set up for US Gov Cloud
}
if !p.IsLocalDevelopmentMode() {
gatewayDomains := os.Getenv("GATEWAY_DOMAINS")
if gatewayDomains != "" {
p.gatewayDomains = strings.Split(gatewayDomains, ",")
}
for _, rawurl := range []string{
p.Environment().ActiveDirectoryEndpoint,
p.Environment().ResourceManagerEndpoint,
} {
u, err := url.Parse(rawurl)
if err != nil {
return nil, err
}
p.gatewayDomains = append(p.gatewayDomains, u.Hostname())
}
p.gatewayDomains = append(p.gatewayDomains, p.acrDomain, acrDataDomain)
}
p.ARMHelper, err = newARMHelper(ctx, log, p)
if err != nil {
return nil, err
}
return p, nil
}
func (p *prod) InitializeAuthorizers() error {
if !p.FeatureIsSet(FeatureEnableDevelopmentAuthorizer) {
p.armClientAuthorizer = clientauthorizer.NewARM(p.log, p.Core)
} else {
armClientAuthorizer, err := clientauthorizer.NewSubjectNameAndIssuer(
p.log,
"/etc/aro-rp/arm-ca-bundle.pem",
os.Getenv("ARM_API_CLIENT_CERT_COMMON_NAME"),
)
if err != nil {
return err
}
p.armClientAuthorizer = armClientAuthorizer
}
adminClientAuthorizer, err := clientauthorizer.NewSubjectNameAndIssuer(
p.log,
"/etc/aro-rp/admin-ca-bundle.pem",
os.Getenv("ADMIN_API_CLIENT_CERT_COMMON_NAME"),
)
if err != nil {
return err
}
p.adminClientAuthorizer = adminClientAuthorizer
return nil
}
func (p *prod) ArmClientAuthorizer() clientauthorizer.ClientAuthorizer {
return p.armClientAuthorizer
}
func (p *prod) AdminClientAuthorizer() clientauthorizer.ClientAuthorizer {
return p.adminClientAuthorizer
}
func (p *prod) ACRResourceID() string {
return os.Getenv("ACR_RESOURCE_ID")
}
func (p *prod) ACRDomain() string {
return p.acrDomain
}
func (p *prod) AROOperatorImage() string {
return fmt.Sprintf("%s/aro:%s", p.acrDomain, version.GitCommit)
}
func (p *prod) populateVMSkus(ctx context.Context, resourceSkusClient compute.ResourceSkusClient) error {
// Filtering is poorly documented, but currently (API version 2019-04-01)
// it seems that the API returns all SKUs without a filter and with invalid
// value in the filter.
// Filtering gives significant optimisation: at the moment of writing,
// we get ~1.2M response in eastus vs ~37M unfiltered (467 items vs 16618).
filter := fmt.Sprintf("location eq '%s'", p.Location())
skus, err := resourceSkusClient.List(ctx, filter)
if err != nil {
return err
}
p.vmskus = computeskus.FilterVMSizes(skus, p.Location())
return nil
}
func (p *prod) ClusterGenevaLoggingAccount() string {
return p.clusterGenevaLoggingAccount
}
func (p *prod) ClusterGenevaLoggingConfigVersion() string {
return p.clusterGenevaLoggingConfigVersion
}
func (p *prod) ClusterGenevaLoggingEnvironment() string {
return p.clusterGenevaLoggingEnvironment
}
func (p *prod) ClusterGenevaLoggingNamespace() string {
return p.clusterGenevaLoggingNamespace
}
func (p *prod) ClusterGenevaLoggingSecret() (*rsa.PrivateKey, *x509.Certificate) {
return p.clusterGenevaLoggingPrivateKey, p.clusterGenevaLoggingCertificate
}
func (p *prod) ClusterKeyvault() keyvault.Manager {
return p.clusterKeyvault
}
func (p *prod) Domain() string {
return os.Getenv("DOMAIN_NAME")
}
func (p *prod) FeatureIsSet(f Feature) bool {
return p.features[f]
}
func (p *prod) FPAuthorizer(tenantID, resource string) (refreshable.Authorizer, error) {
oauthConfig, err := adal.NewOAuthConfig(p.Environment().ActiveDirectoryEndpoint, tenantID)
if err != nil {
return nil, err
}
fpPrivateKey, fpCertificates := p.fpCertificateRefresher.GetCertificates()
sp, err := adal.NewServicePrincipalTokenFromCertificate(*oauthConfig, p.fpClientID, fpCertificates[0], fpPrivateKey, resource)
if err != nil {
return nil, err
}
return refreshable.NewAuthorizer(sp), nil
}
func (p *prod) FPClientID() string {
return p.fpClientID
}
func (p *prod) Listen() (net.Listener, error) {
return net.Listen("tcp", ":8443")
}
func (p *prod) GatewayDomains() []string {
gatewayDomains := make([]string, len(p.gatewayDomains))
copy(gatewayDomains, p.gatewayDomains)
return gatewayDomains
}
func (p *prod) GatewayResourceGroup() string {
return os.Getenv("GATEWAY_RESOURCEGROUP")
}
func (p *prod) ServiceKeyvault() keyvault.Manager {
return p.serviceKeyvault
}
func (p *prod) VMSku(vmSize string) (*mgmtcompute.ResourceSku, error) {
vmsku, found := p.vmskus[vmSize]
if !found {
return nil, fmt.Errorf("sku information not found for vm size %q", vmSize)
}
return vmsku, nil
}
| [
"\"AZURE_FP_CLIENT_ID\"",
"\"CLUSTER_MDSD_ACCOUNT\"",
"\"CLUSTER_MDSD_CONFIG_VERSION\"",
"\"MDSD_ENVIRONMENT\"",
"\"CLUSTER_MDSD_NAMESPACE\"",
"\"RP_FEATURES\"",
"\"GATEWAY_DOMAINS\"",
"\"ARM_API_CLIENT_CERT_COMMON_NAME\"",
"\"ADMIN_API_CLIENT_CERT_COMMON_NAME\"",
"\"ACR_RESOURCE_ID\"",
"\"DOMAIN_NAME\"",
"\"GATEWAY_RESOURCEGROUP\""
]
| []
| [
"CLUSTER_MDSD_ACCOUNT",
"ACR_RESOURCE_ID",
"GATEWAY_DOMAINS",
"GATEWAY_RESOURCEGROUP",
"CLUSTER_MDSD_CONFIG_VERSION",
"CLUSTER_MDSD_NAMESPACE",
"ARM_API_CLIENT_CERT_COMMON_NAME",
"DOMAIN_NAME",
"RP_FEATURES",
"ADMIN_API_CLIENT_CERT_COMMON_NAME",
"AZURE_FP_CLIENT_ID",
"MDSD_ENVIRONMENT"
]
| [] | ["CLUSTER_MDSD_ACCOUNT", "ACR_RESOURCE_ID", "GATEWAY_DOMAINS", "GATEWAY_RESOURCEGROUP", "CLUSTER_MDSD_CONFIG_VERSION", "CLUSTER_MDSD_NAMESPACE", "ARM_API_CLIENT_CERT_COMMON_NAME", "DOMAIN_NAME", "RP_FEATURES", "ADMIN_API_CLIENT_CERT_COMMON_NAME", "AZURE_FP_CLIENT_ID", "MDSD_ENVIRONMENT"] | go | 12 | 0 | |
pkg/analytics/opt_test.go | package analytics_test
import (
"io/ioutil"
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/tilt-dev/wmclient/pkg/analytics"
)
func TestString(t *testing.T) {
if analytics.OptIn.String() != "opt-in" {
t.Errorf("Expected opt-in, actual: %s", analytics.OptIn)
}
}
func TestSetOptStr(t *testing.T) {
f := setup(t)
defer f.tearDown()
f.assertOptStatus(analytics.OptDefault)
for _, v := range []struct {
s string
opt analytics.Opt
}{
{"opt-in", analytics.OptIn},
{"opt-out", analytics.OptOut},
{"in", analytics.OptIn},
{"out", analytics.OptOut},
} {
opt, err := analytics.SetOptStr(v.s)
if assert.NoError(t, err) {
assert.Equal(t, v.opt, opt)
f.assertOptStatus(v.opt)
}
}
_, err := analytics.SetOptStr("foo")
if assert.Error(t, err) {
assert.Contains(t, err.Error(), "unknown analytics opt: \"foo\"")
}
}
func TestSetOpt(t *testing.T) {
f := setup(t)
defer f.tearDown()
f.assertOptStatus(analytics.OptDefault)
analytics.SetOpt(analytics.OptIn)
f.assertOptStatus(analytics.OptIn)
analytics.SetOpt(analytics.OptOut)
f.assertOptStatus(analytics.OptOut)
analytics.SetOpt(99999)
f.assertOptStatus(analytics.OptDefault)
}
type fixture struct {
t *testing.T
dir string
oldWindmillDir string
}
func setup(t *testing.T) *fixture {
oldWindmillDir := os.Getenv("WINDMILL_DIR")
dir, err := ioutil.TempDir("", t.Name())
if err != nil {
t.Fatalf("Error making temp dir: %v", err)
}
err = os.Setenv("WINDMILL_DIR", dir)
if !assert.NoError(t, err) {
t.FailNow()
}
return &fixture{t: t, dir: dir, oldWindmillDir: oldWindmillDir}
}
func (f *fixture) tearDown() {
os.RemoveAll(f.dir)
os.Setenv("WINDMILL_DIR", f.oldWindmillDir)
}
func (f *fixture) assertOptStatus(expected analytics.Opt) {
actual, err := analytics.OptStatus()
if err != nil {
f.t.Fatal(err)
}
if actual != expected {
f.t.Errorf("got opt status %v, expected %v", actual, expected)
}
}
| [
"\"WINDMILL_DIR\""
]
| []
| [
"WINDMILL_DIR"
]
| [] | ["WINDMILL_DIR"] | go | 1 | 0 | |
cmd/load.go | // Copyright © 2016 NAME HERE <EMAIL ADDRESS>
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"fmt"
"github.com/getwe/figlet4go"
"github.com/spf13/cobra"
"gopkg.in/yaml.v2"
"io/ioutil"
"os"
)
// loadCmd represents the load command
var loadCmd = &cobra.Command{
Use: "load",
Short: "A brief description of your command",
Long: `A longer description that spans multiple lines and likely contains examples
and usage of using your command. For example:
Cobra is a CLI library for Go that empowers applications.
This application is a tool to generate the needed files
to quickly create a Cobra application.`,
Run: func(cmd *cobra.Command, args []string) {
config, err := loadConfig()
if err != nil {
fmt.Printf("Failed load $HOME/.chenv.yaml, %s", err)
return
}
key := args[0]
nextEnv := getNextEnv(config, key)
printBanner(key)
for k, v := range nextEnv {
os.Setenv(k.(string), v.(string))
}
shell := os.Getenv("SHELL")
procAttr := new(os.ProcAttr)
newEnv := []string{"CHENV=" + key}
procAttr.Env = newEnv
procAttr.Files = []*os.File{os.Stdin, os.Stdout, os.Stderr}
shellArgs := []string{
//"--xtrace",
}
if p, err := os.StartProcess(shell, shellArgs, procAttr); err != nil {
fmt.Println("[ERROR] Failed to StartProcess")
fmt.Println("[ERROR] Failed to StartProcess, %s", err)
return
} else {
_, err := p.Wait()
if err != nil {
fmt.Println("[ERROR]: failed wait: %s", err)
return
}
}
},
}
func loadConfig() (map[interface{}]interface{}, error) {
buf, err := ioutil.ReadFile(os.Getenv("HOME") + "/.chenv.yaml")
if err != nil {
return nil, err
}
m := make(map[interface{}]interface{})
err = yaml.Unmarshal(buf, &m)
if err != nil {
panic(err)
}
return m, nil
}
func getNextEnv(config map[interface{}]interface{}, key string) map[interface{}]interface{} {
return config[key].(map[interface{}]interface{})
}
func printBanner(s string) {
ascii := figlet4go.NewAsciiRender()
renderStr, _ := ascii.Render(s)
fmt.Println(renderStr)
}
func init() {
RootCmd.AddCommand(loadCmd)
// Here you will define your flags and configuration settings.
// Cobra supports Persistent Flags which will work for this command
// and all subcommands, e.g.:
// loadCmd.PersistentFlags().String("foo", "", "A help for foo")
// Cobra supports local flags which will only run when this command
// is called directly, e.g.:
// loadCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle")
}
| [
"\"SHELL\"",
"\"HOME\""
]
| []
| [
"SHELL",
"HOME"
]
| [] | ["SHELL", "HOME"] | go | 2 | 0 | |
adminLogin/manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'adminLogin.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
config/config.go | package config
import (
"log"
"os"
)
//AppConfig contains all configurations necessary for the application
type AppConfig struct {
DBHost string
DBPort string
DBUser string
DBPassword string
DBName string
TableName string
DBProvider string
ReinitializeTable string
}
var (
//Configurations contain all runtime configs for use in other packages
Configurations *AppConfig
)
//Configure reads environment variables and sets them to Configurations variable
func Configure() {
log.Println("fetching configurations from environment")
Configurations = new(AppConfig)
Configurations.DBHost = os.Getenv("DBHOST")
Configurations.DBPort = os.Getenv("DBPORT")
Configurations.DBUser = os.Getenv("DBUSER")
Configurations.DBPassword = os.Getenv("DBPASSWORD")
Configurations.DBName = os.Getenv("DBNAME")
Configurations.TableName = os.Getenv("TABLENAME")
Configurations.DBProvider = os.Getenv("DBPROVIDER")
Configurations.ReinitializeTable = os.Getenv("REINITIALIZE_TABLE")
log.Println("DBHOST " + Configurations.DBHost)
log.Println("DBPORT " + Configurations.DBPort)
log.Println("DBUSER " + Configurations.DBUser)
if len(Configurations.DBPassword) != 0 {
log.Println("DBPASSWORD ********")
} else {
log.Println("DBPASSWORD is empty")
}
log.Println("DBNAME " + Configurations.DBName)
log.Println("TABLENAME " + Configurations.TableName)
log.Println("DBPROVIDER " + Configurations.DBProvider)
log.Println("REINITIALIZE_TABLE " + Configurations.ReinitializeTable)
}
| [
"\"DBHOST\"",
"\"DBPORT\"",
"\"DBUSER\"",
"\"DBPASSWORD\"",
"\"DBNAME\"",
"\"TABLENAME\"",
"\"DBPROVIDER\"",
"\"REINITIALIZE_TABLE\""
]
| []
| [
"TABLENAME",
"DBHOST",
"DBUSER",
"DBNAME",
"DBPASSWORD",
"DBPROVIDER",
"REINITIALIZE_TABLE",
"DBPORT"
]
| [] | ["TABLENAME", "DBHOST", "DBUSER", "DBNAME", "DBPASSWORD", "DBPROVIDER", "REINITIALIZE_TABLE", "DBPORT"] | go | 8 | 0 | |
kubernetes-client/src/main/java/io/fabric8/kubernetes/client/Config.java | /**
* Copyright (C) 2015 Red Hat, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.fabric8.kubernetes.client;
import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.annotation.JsonProperty;
import io.fabric8.kubernetes.api.model.ConfigBuilder;
import okhttp3.TlsVersion;
import io.fabric8.kubernetes.api.model.AuthInfo;
import io.fabric8.kubernetes.api.model.Cluster;
import io.fabric8.kubernetes.api.model.Context;
import io.fabric8.kubernetes.client.internal.KubeConfigUtils;
import io.fabric8.kubernetes.client.internal.SSLUtils;
import io.fabric8.kubernetes.client.utils.Utils;
import io.sundr.builder.annotations.Buildable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.charset.StandardCharsets;
import java.util.HashMap;
import java.util.Map;
import static okhttp3.TlsVersion.TLS_1_2;
@JsonInclude(JsonInclude.Include.NON_NULL)
@JsonIgnoreProperties(ignoreUnknown = true, allowGetters = true, allowSetters = true)
public class Config {
private static final Logger LOGGER = LoggerFactory.getLogger(Config.class);
public static final String KUBERNETES_MASTER_SYSTEM_PROPERTY = "kubernetes.master";
public static final String KUBERNETES_API_VERSION_SYSTEM_PROPERTY = "kubernetes.api.version";
public static final String KUBERNETES_TRUST_CERT_SYSTEM_PROPERTY = "kubernetes.trust.certificates";
public static final String KUBERNETES_DISABLE_HOSTNAME_VERIFICATION_SYSTEM_PROPERTY = "kubernetes.disable.hostname.verification";
public static final String KUBERNETES_CA_CERTIFICATE_FILE_SYSTEM_PROPERTY = "kubernetes.certs.ca.file";
public static final String KUBERNETES_CA_CERTIFICATE_DATA_SYSTEM_PROPERTY = "kubernetes.certs.ca.data";
public static final String KUBERNETES_CLIENT_CERTIFICATE_FILE_SYSTEM_PROPERTY = "kubernetes.certs.client.file";
public static final String KUBERNETES_CLIENT_CERTIFICATE_DATA_SYSTEM_PROPERTY = "kubernetes.certs.client.data";
public static final String KUBERNETES_CLIENT_KEY_FILE_SYSTEM_PROPERTY = "kubernetes.certs.client.key.file";
public static final String KUBERNETES_CLIENT_KEY_DATA_SYSTEM_PROPERTY = "kubernetes.certs.client.key.data";
public static final String KUBERNETES_CLIENT_KEY_ALGO_SYSTEM_PROPERTY = "kubernetes.certs.client.key.algo";
public static final String KUBERNETES_CLIENT_KEY_PASSPHRASE_SYSTEM_PROPERTY = "kubernetes.certs.client.key.passphrase";
public static final String KUBERNETES_AUTH_BASIC_USERNAME_SYSTEM_PROPERTY = "kubernetes.auth.basic.username";
public static final String KUBERNETES_AUTH_BASIC_PASSWORD_SYSTEM_PROPERTY = "kubernetes.auth.basic.password";
public static final String KUBERNETES_AUTH_TRYKUBECONFIG_SYSTEM_PROPERTY = "kubernetes.auth.tryKubeConfig";
public static final String KUBERNETES_AUTH_TRYSERVICEACCOUNT_SYSTEM_PROPERTY = "kubernetes.auth.tryServiceAccount";
public static final String KUBERNETES_OAUTH_TOKEN_SYSTEM_PROPERTY = "kubernetes.auth.token";
public static final String KUBERNETES_WATCH_RECONNECT_INTERVAL_SYSTEM_PROPERTY = "kubernetes.watch.reconnectInterval";
public static final String KUBERNETES_WATCH_RECONNECT_LIMIT_SYSTEM_PROPERTY = "kubernetes.watch.reconnectLimit";
public static final String KUBERNETES_CONNECTION_TIMEOUT_SYSTEM_PROPERTY = "kubernetes.connection.timeout";
public static final String KUBERNETES_REQUEST_TIMEOUT_SYSTEM_PROPERTY = "kubernetes.request.timeout";
public static final String KUBERNETES_ROLLING_TIMEOUT_SYSTEM_PROPERTY = "kubernetes.rolling.timeout";
public static final String KUBERNETES_LOGGING_INTERVAL_SYSTEM_PROPERTY = "kubernetes.logging.interval";
public static final String KUBERNETES_SCALE_TIMEOUT_SYSTEM_PROPERTY = "kubernetes.scale.timeout";
public static final String KUBERNETES_WEBSOCKET_TIMEOUT_SYSTEM_PROPERTY = "kubernetes.websocket.timeout";
public static final String KUBERNETES_WEBSOCKET_PING_INTERVAL_SYSTEM_PROPERTY = "kubernetes.websocket.ping.interval";
public static final String KUBERNETES_MAX_CONCURRENT_REQUESTS ="kubernetes.max.concurrent.requests";
public static final String KUBERNETES_MAX_CONCURRENT_REQUESTS_PER_HOST ="kubernetes.max.concurrent.requests.per.host";
public static final String KUBERNETES_IMPERSONATE_USERNAME = "kubernetes.impersonate.username";
public static final String KUBERNETES_IMPERSONATE_GROUP = "kubernetes.impersonate.group";
public static final String KUBERNETES_TRUSTSTORE_PASSPHRASE_PROPERTY = "kubernetes.truststore.passphrase";
public static final String KUBERNETES_TRUSTSTORE_FILE_PROPERTY = "kubernetes.truststore.file";
public static final String KUBERNETES_KEYSTORE_PASSPHRASE_PROPERTY = "kubernetes.keystore.passphrase";
public static final String KUBERNETES_KEYSTORE_FILE_PROPERTY = "kubernetes.keystore.file";
public static final String KUBERNETES_TLS_VERSIONS = "kubernetes.tls.versions";
public static final String KUBERNETES_TRYNAMESPACE_PATH_SYSTEM_PROPERTY = "kubernetes.tryNamespacePath";
public static final String KUBERNETES_NAMESPACE_PATH = "/var/run/secrets/kubernetes.io/serviceaccount/namespace";
public static final String KUBERNETES_NAMESPACE_FILE = "kubenamespace";
public static final String KUBERNETES_NAMESPACE_SYSTEM_PROPERTY = "kubernetes.namespace";
public static final String KUBERNETES_KUBECONFIG_FILE = "kubeconfig";
public static final String KUBERNETES_SERVICE_HOST_PROPERTY = "KUBERNETES_SERVICE_HOST";
public static final String KUBERNETES_SERVICE_PORT_PROPERTY = "KUBERNETES_SERVICE_PORT";
public static final String KUBERNETES_SERVICE_ACCOUNT_TOKEN_PATH = "/var/run/secrets/kubernetes.io/serviceaccount/token";
public static final String KUBERNETES_SERVICE_ACCOUNT_CA_CRT_PATH = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt";
public static final String KUBERNETES_HTTP_PROXY = "http.proxy";
public static final String KUBERNETES_HTTPS_PROXY = "https.proxy";
public static final String KUBERNETES_ALL_PROXY = "all.proxy";
public static final String KUBERNETES_NO_PROXY = "no.proxy";
public static final String KUBERNETES_PROXY_USERNAME = "proxy.username";
public static final String KUBERNETES_PROXY_PASSWORD = "proxy.password";
public static final String KUBERNETES_USER_AGENT = "fabric8-kubernetes-client/" + Version.clientVersion() ;
public static final Long DEFAULT_ROLLING_TIMEOUT = 15 * 60 * 1000L;
public static final Long DEFAULT_SCALE_TIMEOUT = 10 * 60 * 1000L;
public static final int DEFAULT_LOGGING_INTERVAL = 20 * 1000;
public static final Long DEFAULT_WEBSOCKET_TIMEOUT = 5 * 1000L;
public static final Long DEFAULT_WEBSOCKET_PING_INTERVAL = 1 * 1000L;
public static final Integer DEFAULT_MAX_CONCURRENT_REQUESTS = 64;
public static final Integer DEFAULT_MAX_CONCURRENT_REQUESTS_PER_HOST = 5;
public static final String HTTP_PROTOCOL_PREFIX = "http://";
public static final String HTTPS_PROTOCOL_PREFIX = "https://";
private static final String ACCESS_TOKEN = "access-token";
private boolean trustCerts;
private boolean disableHostnameVerification;
private String masterUrl = "https://kubernetes.default.svc";
private String apiVersion = "v1";
private String namespace;
private String caCertFile;
private String caCertData;
private String clientCertFile;
private String clientCertData;
private String clientKeyFile;
private String clientKeyData;
private String clientKeyAlgo = "RSA";
private String clientKeyPassphrase = "changeit";
private String trustStoreFile;
private String trustStorePassphrase;
private String keyStoreFile;
private String keyStorePassphrase;
private RequestConfig requestConfig = new RequestConfig();
/**
* fields not used but needed for builder generation.
*/
private String username;
private String password;
private String oauthToken;
private int watchReconnectInterval = 1000;
private int watchReconnectLimit = -1;
private int connectionTimeout = 10 * 1000;
private int requestTimeout = 10 * 1000;
private long rollingTimeout = DEFAULT_ROLLING_TIMEOUT;
private long scaleTimeout = DEFAULT_SCALE_TIMEOUT;
private int loggingInterval = DEFAULT_LOGGING_INTERVAL;
private long websocketTimeout = DEFAULT_WEBSOCKET_TIMEOUT;
private long websocketPingInterval = DEFAULT_WEBSOCKET_PING_INTERVAL;
private int maxConcurrentRequests = DEFAULT_MAX_CONCURRENT_REQUESTS;
private int maxConcurrentRequestsPerHost = DEFAULT_MAX_CONCURRENT_REQUESTS_PER_HOST;
private String impersonateUsername;
private String impersonateGroup;
private Map<String, String> impersonateExtras;
/**
* end of fields not used but needed for builder generation.
*/
private String httpProxy;
private String httpsProxy;
private String proxyUsername;
private String proxyPassword;
private String[] noProxy;
private String userAgent;
private TlsVersion[] tlsVersions = new TlsVersion[]{TLS_1_2};
private Map<Integer, String> errorMessages = new HashMap<>();
//In future releases (2.4.x) the public constructor will be empty.
//The current functionality will be provided by autoConfigure().
//This is a necessary change to allow us distinguish between auto configured values and builder values.
@Deprecated
public Config() {
autoConfigure(this, null);
}
/**
* @param context if null will use current-context
*/
public static Config autoConfigure(String context) {
Config config = new Config();
return autoConfigure(config, context);
}
private static Config autoConfigure(Config config, String context) {
if (!tryKubeConfig(config, context)) {
tryServiceAccount(config);
tryNamespaceFromPath(config);
}
configFromSysPropsOrEnvVars(config);
if (!config.masterUrl.toLowerCase().startsWith(HTTP_PROTOCOL_PREFIX) && !config.masterUrl.toLowerCase().startsWith(HTTPS_PROTOCOL_PREFIX)) {
config.masterUrl = (SSLUtils.isHttpsAvailable(config) ? HTTPS_PROTOCOL_PREFIX : HTTP_PROTOCOL_PREFIX) + config.masterUrl;
}
if (!config.masterUrl.endsWith("/")) {
config.masterUrl = config.masterUrl + "/";
}
return config;
}
@Buildable(builderPackage = "io.fabric8.kubernetes.api.builder", editableEnabled = false)
public Config(String masterUrl, String apiVersion, String namespace, boolean trustCerts, boolean disableHostnameVerification, String caCertFile, String caCertData, String clientCertFile, String clientCertData, String clientKeyFile, String clientKeyData, String clientKeyAlgo, String clientKeyPassphrase, String username, String password, String oauthToken, int watchReconnectInterval, int watchReconnectLimit, int connectionTimeout, int requestTimeout, long rollingTimeout, long scaleTimeout, int loggingInterval, int maxConcurrentRequestsPerHost, String httpProxy, String httpsProxy, String[] noProxy, Map<Integer, String> errorMessages, String userAgent, TlsVersion[] tlsVersions, long websocketTimeout, long websocketPingInterval, String proxyUsername, String proxyPassword, String trustStoreFile, String trustStorePassphrase, String keyStoreFile, String keyStorePassphrase, String impersonateUsername, String impersonateGroup, Map<String, String> impersonateExtras) {
this.masterUrl = masterUrl;
this.apiVersion = apiVersion;
this.namespace = namespace;
this.trustCerts = trustCerts;
this.disableHostnameVerification = disableHostnameVerification;
this.caCertFile = caCertFile;
this.caCertData = caCertData;
this.clientCertFile = clientCertFile;
this.clientCertData = clientCertData;
this.clientKeyFile = clientKeyFile;
this.clientKeyData = clientKeyData;
this.clientKeyAlgo = clientKeyAlgo;
this.clientKeyPassphrase = clientKeyPassphrase;
this.requestConfig = new RequestConfig(username, password, oauthToken, watchReconnectLimit, watchReconnectInterval, connectionTimeout, rollingTimeout, requestTimeout, scaleTimeout, loggingInterval, websocketTimeout, websocketPingInterval, maxConcurrentRequests, maxConcurrentRequestsPerHost);
this.requestConfig.setImpersonateUsername(impersonateUsername);
this.requestConfig.setImpersonateGroup(impersonateGroup);
this.requestConfig.setImpersonateExtras(impersonateExtras);
this.httpProxy= httpProxy;
this.httpsProxy= httpsProxy;
this.noProxy= noProxy;
this.proxyUsername = proxyUsername;
this.proxyPassword = proxyPassword;
this.errorMessages = errorMessages;
this.userAgent = userAgent;
this.tlsVersions = tlsVersions;
if (!this.masterUrl.toLowerCase().startsWith(HTTP_PROTOCOL_PREFIX) && !this.masterUrl.startsWith(HTTPS_PROTOCOL_PREFIX)) {
this.masterUrl = (SSLUtils.isHttpsAvailable(this) ? HTTPS_PROTOCOL_PREFIX : HTTP_PROTOCOL_PREFIX) + this.masterUrl;
}
if (!this.masterUrl.endsWith("/")) {
this.masterUrl = this.masterUrl + "/";
}
this.trustStoreFile = trustStoreFile;
this.trustStorePassphrase = trustStorePassphrase;
this.keyStoreFile = keyStoreFile;
this.keyStorePassphrase = keyStorePassphrase;
}
public static void configFromSysPropsOrEnvVars(Config config) {
config.setTrustCerts(Utils.getSystemPropertyOrEnvVar(KUBERNETES_TRUST_CERT_SYSTEM_PROPERTY, config.isTrustCerts()));
config.setDisableHostnameVerification(Utils.getSystemPropertyOrEnvVar(KUBERNETES_DISABLE_HOSTNAME_VERIFICATION_SYSTEM_PROPERTY, config.isDisableHostnameVerification()));
config.setMasterUrl(Utils.getSystemPropertyOrEnvVar(KUBERNETES_MASTER_SYSTEM_PROPERTY, config.getMasterUrl()));
config.setApiVersion(Utils.getSystemPropertyOrEnvVar(KUBERNETES_API_VERSION_SYSTEM_PROPERTY, config.getApiVersion()));
config.setNamespace(Utils.getSystemPropertyOrEnvVar(KUBERNETES_NAMESPACE_SYSTEM_PROPERTY, config.getNamespace()));
config.setCaCertFile(Utils.getSystemPropertyOrEnvVar(KUBERNETES_CA_CERTIFICATE_FILE_SYSTEM_PROPERTY, config.getCaCertFile()));
config.setCaCertData(Utils.getSystemPropertyOrEnvVar(KUBERNETES_CA_CERTIFICATE_DATA_SYSTEM_PROPERTY, config.getCaCertData()));
config.setClientCertFile(Utils.getSystemPropertyOrEnvVar(KUBERNETES_CLIENT_CERTIFICATE_FILE_SYSTEM_PROPERTY, config.getClientCertFile()));
config.setClientCertData(Utils.getSystemPropertyOrEnvVar(KUBERNETES_CLIENT_CERTIFICATE_DATA_SYSTEM_PROPERTY, config.getClientCertData()));
config.setClientKeyFile(Utils.getSystemPropertyOrEnvVar(KUBERNETES_CLIENT_KEY_FILE_SYSTEM_PROPERTY, config.getClientKeyFile()));
config.setClientKeyData(Utils.getSystemPropertyOrEnvVar(KUBERNETES_CLIENT_KEY_DATA_SYSTEM_PROPERTY, config.getClientKeyData()));
config.setClientKeyAlgo(Utils.getSystemPropertyOrEnvVar(KUBERNETES_CLIENT_KEY_ALGO_SYSTEM_PROPERTY, config.getClientKeyAlgo()));
config.setClientKeyPassphrase(Utils.getSystemPropertyOrEnvVar(KUBERNETES_CLIENT_KEY_PASSPHRASE_SYSTEM_PROPERTY, new String(config.getClientKeyPassphrase())));
config.setUserAgent(Utils.getSystemPropertyOrEnvVar(KUBERNETES_USER_AGENT, config.getUserAgent()));
config.setTrustStorePassphrase(Utils.getSystemPropertyOrEnvVar(KUBERNETES_TRUSTSTORE_PASSPHRASE_PROPERTY, config.getTrustStorePassphrase()));
config.setTrustStoreFile(Utils.getSystemPropertyOrEnvVar(KUBERNETES_TRUSTSTORE_FILE_PROPERTY, config.getTrustStoreFile()));
config.setKeyStorePassphrase(Utils.getSystemPropertyOrEnvVar(KUBERNETES_KEYSTORE_PASSPHRASE_PROPERTY, config.getKeyStorePassphrase()));
config.setKeyStoreFile(Utils.getSystemPropertyOrEnvVar(KUBERNETES_KEYSTORE_FILE_PROPERTY, config.getKeyStoreFile()));
config.setOauthToken(Utils.getSystemPropertyOrEnvVar(KUBERNETES_OAUTH_TOKEN_SYSTEM_PROPERTY, config.getOauthToken()));
config.setUsername(Utils.getSystemPropertyOrEnvVar(KUBERNETES_AUTH_BASIC_USERNAME_SYSTEM_PROPERTY, config.getUsername()));
config.setPassword(Utils.getSystemPropertyOrEnvVar(KUBERNETES_AUTH_BASIC_PASSWORD_SYSTEM_PROPERTY, config.getPassword()));
config.setImpersonateUsername(Utils.getSystemPropertyOrEnvVar(KUBERNETES_IMPERSONATE_USERNAME, config.getImpersonateUsername()));
config.setImpersonateGroup(Utils.getSystemPropertyOrEnvVar(KUBERNETES_IMPERSONATE_GROUP, config.getImpersonateGroup()));
String configuredWatchReconnectInterval = Utils.getSystemPropertyOrEnvVar(KUBERNETES_WATCH_RECONNECT_INTERVAL_SYSTEM_PROPERTY);
if (configuredWatchReconnectInterval != null) {
config.setWatchReconnectInterval(Integer.parseInt(configuredWatchReconnectInterval));
}
String configuredWatchReconnectLimit = Utils.getSystemPropertyOrEnvVar(KUBERNETES_WATCH_RECONNECT_LIMIT_SYSTEM_PROPERTY);
if (configuredWatchReconnectLimit != null) {
config.setWatchReconnectLimit(Integer.parseInt(configuredWatchReconnectLimit));
}
String configuredRollingTimeout = Utils.getSystemPropertyOrEnvVar(KUBERNETES_ROLLING_TIMEOUT_SYSTEM_PROPERTY, String.valueOf(DEFAULT_ROLLING_TIMEOUT));
if (configuredRollingTimeout != null) {
config.setRollingTimeout(Long.parseLong(configuredRollingTimeout));
}
String configuredScaleTimeout = Utils.getSystemPropertyOrEnvVar(KUBERNETES_SCALE_TIMEOUT_SYSTEM_PROPERTY, String.valueOf(DEFAULT_SCALE_TIMEOUT));
if (configuredScaleTimeout != null) {
config.setScaleTimeout(Long.parseLong(configuredScaleTimeout));
}
String configuredLoggingInterval = Utils.getSystemPropertyOrEnvVar(KUBERNETES_LOGGING_INTERVAL_SYSTEM_PROPERTY, String.valueOf(DEFAULT_LOGGING_INTERVAL));
if (configuredLoggingInterval != null) {
config.setLoggingInterval(Integer.parseInt(configuredLoggingInterval));
}
config.setConnectionTimeout(Utils.getSystemPropertyOrEnvVar(KUBERNETES_CONNECTION_TIMEOUT_SYSTEM_PROPERTY, config.getConnectionTimeout()));
config.setRequestTimeout(Utils.getSystemPropertyOrEnvVar(KUBERNETES_REQUEST_TIMEOUT_SYSTEM_PROPERTY, config.getRequestTimeout()));
String configuredWebsocketTimeout = Utils.getSystemPropertyOrEnvVar(KUBERNETES_WEBSOCKET_TIMEOUT_SYSTEM_PROPERTY, String.valueOf(config.getWebsocketTimeout()));
if (configuredWebsocketTimeout != null) {
config.setWebsocketTimeout(Long.parseLong(configuredWebsocketTimeout));
}
String configuredWebsocketPingInterval = Utils.getSystemPropertyOrEnvVar(KUBERNETES_WEBSOCKET_PING_INTERVAL_SYSTEM_PROPERTY, String.valueOf(config.getWebsocketPingInterval()));
if (configuredWebsocketPingInterval != null) {
config.setWebsocketPingInterval(Long.parseLong(configuredWebsocketPingInterval));
}
String configuredMaxConcurrentReqeustsPerHost = Utils.getSystemPropertyOrEnvVar(KUBERNETES_MAX_CONCURRENT_REQUESTS_PER_HOST, String.valueOf(config.getMaxConcurrentRequestsPerHost()));
if (configuredMaxConcurrentReqeustsPerHost != null) {
config.setMaxConcurrentRequestsPerHost(Integer.parseInt(configuredMaxConcurrentReqeustsPerHost));
}
config.setHttpProxy(Utils.getSystemPropertyOrEnvVar(KUBERNETES_ALL_PROXY, config.getHttpProxy()));
config.setHttpsProxy(Utils.getSystemPropertyOrEnvVar(KUBERNETES_ALL_PROXY, config.getHttpsProxy()));
config.setHttpsProxy(Utils.getSystemPropertyOrEnvVar(KUBERNETES_HTTPS_PROXY, config.getHttpsProxy()));
config.setHttpProxy(Utils.getSystemPropertyOrEnvVar(KUBERNETES_HTTP_PROXY, config.getHttpProxy()));
config.setProxyUsername(Utils.getSystemPropertyOrEnvVar(KUBERNETES_PROXY_USERNAME, config.getProxyUsername()));
config.setProxyPassword(Utils.getSystemPropertyOrEnvVar(KUBERNETES_PROXY_PASSWORD, config.getProxyPassword()));
String noProxyVar = Utils.getSystemPropertyOrEnvVar(KUBERNETES_NO_PROXY);
if (noProxyVar != null) {
config.setNoProxy(noProxyVar.split(","));
}
String tlsVersionsVar = Utils.getSystemPropertyOrEnvVar(KUBERNETES_TLS_VERSIONS);
if (tlsVersionsVar != null && !tlsVersionsVar.isEmpty()) {
String[] tlsVersionsSplit = tlsVersionsVar.split(",");
TlsVersion[] tlsVersions = new TlsVersion[tlsVersionsSplit.length];
for (int i = 0; i < tlsVersionsSplit.length; i++) {
tlsVersions[i] = TlsVersion.forJavaName(tlsVersionsSplit[i]);
}
config.setTlsVersions(tlsVersions);
}
}
private static boolean tryServiceAccount(Config config) {
LOGGER.debug("Trying to configure client from service account...");
String masterHost = Utils.getSystemPropertyOrEnvVar(KUBERNETES_SERVICE_HOST_PROPERTY, (String) null);
String masterPort = Utils.getSystemPropertyOrEnvVar(KUBERNETES_SERVICE_PORT_PROPERTY, (String) null);
if (masterHost != null && masterPort != null) {
String hostPort = joinHostPort(masterHost, masterPort);
LOGGER.debug("Found service account host and port: " + hostPort);
config.setMasterUrl("https://" + hostPort);
}
if (Utils.getSystemPropertyOrEnvVar(KUBERNETES_AUTH_TRYSERVICEACCOUNT_SYSTEM_PROPERTY, true)) {
boolean serviceAccountCaCertExists = Files.isRegularFile(new File(KUBERNETES_SERVICE_ACCOUNT_CA_CRT_PATH).toPath());
if (serviceAccountCaCertExists) {
LOGGER.debug("Found service account ca cert at: ["+KUBERNETES_SERVICE_ACCOUNT_CA_CRT_PATH+"].");
config.setCaCertFile(KUBERNETES_SERVICE_ACCOUNT_CA_CRT_PATH);
} else {
LOGGER.debug("Did not find service account ca cert at: ["+KUBERNETES_SERVICE_ACCOUNT_CA_CRT_PATH+"].");
}
try {
String serviceTokenCandidate = new String(Files.readAllBytes(new File(KUBERNETES_SERVICE_ACCOUNT_TOKEN_PATH).toPath()));
if (serviceTokenCandidate != null) {
LOGGER.debug("Found service account token at: ["+KUBERNETES_SERVICE_ACCOUNT_TOKEN_PATH+"].");
config.setOauthToken(serviceTokenCandidate);
String txt = "Configured service account doesn't have access. Service account may have been revoked.";
config.getErrorMessages().put(401, "Unauthorized! " + txt);
config.getErrorMessages().put(403, "Forbidden!" + txt);
return true;
} else {
LOGGER.debug("Did not find service account token at: ["+KUBERNETES_SERVICE_ACCOUNT_TOKEN_PATH+"].");
}
} catch (IOException e) {
// No service account token available...
LOGGER.warn("Error reading service account token from: ["+KUBERNETES_SERVICE_ACCOUNT_TOKEN_PATH+"]. Ignoring.");
}
}
return false;
}
private static String joinHostPort(String host, String port) {
if (host.indexOf(':') >= 0) {
// Host is an IPv6
return "[" + host + "]:" + port;
}
return host + ":" + port;
}
private static String absolutify(File relativeTo, String filename) {
if (filename == null) {
return null;
}
File file = new File(filename);
if (file.isAbsolute()) {
return file.getAbsolutePath();
}
return new File(relativeTo.getParentFile(), filename).getAbsolutePath();
}
public static Config fromKubeconfig(String kubeconfigContents) {
return fromKubeconfig(null, kubeconfigContents, null);
}
// Note: kubeconfigPath is optional (see note on loadFromKubeConfig)
public static Config fromKubeconfig(String context, String kubeconfigContents, String kubeconfigPath) {
// we allow passing context along here, since downstream accepts it
Config config = new Config();
Config.loadFromKubeconfig(config, context, kubeconfigContents, kubeconfigPath);
return config;
}
private static boolean tryKubeConfig(Config config, String context) {
LOGGER.debug("Trying to configure client from Kubernetes config...");
if (Utils.getSystemPropertyOrEnvVar(KUBERNETES_AUTH_TRYKUBECONFIG_SYSTEM_PROPERTY, true)) {
File kubeConfigFile = new File(
Utils.getSystemPropertyOrEnvVar(KUBERNETES_KUBECONFIG_FILE, new File(getHomeDir(), ".kube" + File.separator + "config").toString()));
boolean kubeConfigFileExists = Files.isRegularFile(kubeConfigFile.toPath());
if (kubeConfigFileExists) {
LOGGER.debug("Found for Kubernetes config at: ["+kubeConfigFile.getPath()+"].");
String kubeconfigContents;
try {
kubeconfigContents = new String(Files.readAllBytes(kubeConfigFile.toPath()), StandardCharsets.UTF_8);
} catch(IOException e) {
LOGGER.error("Could not load Kubernetes config file from {}", kubeConfigFile.getPath(), e);
return false;
}
Config.loadFromKubeconfig(config, context, kubeconfigContents, kubeConfigFile.getPath());
return true;
} else {
LOGGER.debug("Did not find Kubernetes config at: ["+kubeConfigFile.getPath()+"]. Ignoring.");
}
}
return false;
}
// Note: kubeconfigPath is optional
// It is only used to rewrite relative tls asset paths inside kubeconfig when a file is passed, and in the case that
// the kubeconfig references some assets via relative paths.
private static boolean loadFromKubeconfig(Config config, String context, String kubeconfigContents, String kubeconfigPath) {
try {
io.fabric8.kubernetes.api.model.Config kubeConfig = KubeConfigUtils.parseConfigFromString(kubeconfigContents);
if (context != null) {
kubeConfig.setCurrentContext(context);
}
Context currentContext = KubeConfigUtils.getCurrentContext(kubeConfig);
Cluster currentCluster = KubeConfigUtils.getCluster(kubeConfig, currentContext);
if (currentCluster != null) {
config.setMasterUrl(currentCluster.getServer());
config.setNamespace(currentContext.getNamespace());
config.setTrustCerts(currentCluster.getInsecureSkipTlsVerify() != null && currentCluster.getInsecureSkipTlsVerify());
config.setDisableHostnameVerification(currentCluster.getInsecureSkipTlsVerify() != null && currentCluster.getInsecureSkipTlsVerify());
config.setCaCertData(currentCluster.getCertificateAuthorityData());
AuthInfo currentAuthInfo = KubeConfigUtils.getUserAuthInfo(kubeConfig, currentContext);
if (currentAuthInfo != null) {
// rewrite tls asset paths if needed
String caCertFile = currentCluster.getCertificateAuthority();
String clientCertFile = currentAuthInfo.getClientCertificate();
String clientKeyFile = currentAuthInfo.getClientKey();
if (kubeconfigPath != null && !kubeconfigPath.isEmpty()) {
caCertFile = absolutify(new File(kubeconfigPath), currentCluster.getCertificateAuthority());
clientCertFile = absolutify(new File(kubeconfigPath), currentAuthInfo.getClientCertificate());
clientKeyFile = absolutify(new File(kubeconfigPath), currentAuthInfo.getClientKey());
}
config.setCaCertFile(caCertFile);
config.setClientCertFile(clientCertFile);
config.setClientCertData(currentAuthInfo.getClientCertificateData());
config.setClientKeyFile(clientKeyFile);
config.setClientKeyData(currentAuthInfo.getClientKeyData());
config.setOauthToken(currentAuthInfo.getToken());
config.setUsername(currentAuthInfo.getUsername());
config.setPassword(currentAuthInfo.getPassword());
if (Utils.isNullOrEmpty(config.getOauthToken()) && currentAuthInfo.getAuthProvider() != null && !Utils.isNullOrEmpty(currentAuthInfo.getAuthProvider().getConfig().get(ACCESS_TOKEN))) {
config.setOauthToken(currentAuthInfo.getAuthProvider().getConfig().get(ACCESS_TOKEN));
}
config.getErrorMessages().put(401, "Unauthorized! Token may have expired! Please log-in again.");
config.getErrorMessages().put(403, "Forbidden! User "+currentContext.getUser()+ " doesn't have permission.");
}
return true;
}
} catch (IOException e) {
LOGGER.error("Failed to parse the kubeconfig.", e);
}
return false;
}
private static boolean tryNamespaceFromPath(Config config) {
LOGGER.debug("Trying to configure client namespace from Kubernetes service account namespace path...");
if (Utils.getSystemPropertyOrEnvVar(KUBERNETES_TRYNAMESPACE_PATH_SYSTEM_PROPERTY, true)) {
String serviceAccountNamespace = Utils.getSystemPropertyOrEnvVar(KUBERNETES_NAMESPACE_FILE, KUBERNETES_NAMESPACE_PATH);
boolean serviceAccountNamespaceExists = Files.isRegularFile(new File(serviceAccountNamespace).toPath());
if (serviceAccountNamespaceExists) {
LOGGER.debug("Found service account namespace at: [" + serviceAccountNamespace + "].");
try {
String namespace = new String(Files.readAllBytes(new File(serviceAccountNamespace).toPath()));
config.setNamespace(namespace.replace(System.lineSeparator(), ""));
return true;
} catch (IOException e) {
LOGGER.error("Error reading service account namespace from: [" + serviceAccountNamespace + "].", e);
}
} else {
LOGGER.debug("Did not find service account namespace at: [" + serviceAccountNamespace + "]. Ignoring.");
}
}
return false;
}
private static String getHomeDir() {
String osName = System.getProperty("os.name").toLowerCase();
if (osName.startsWith("win")) {
String homeDrive = System.getenv("HOMEDRIVE");
String homePath = System.getenv("HOMEPATH");
if (homeDrive != null && !homeDrive.isEmpty() && homePath != null && !homePath.isEmpty()) {
String homeDir = homeDrive + homePath;
File f = new File(homeDir);
if (f.exists() && f.isDirectory()) {
return homeDir;
}
}
String userProfile = System.getenv("USERPROFILE");
if (userProfile != null && !userProfile.isEmpty()) {
File f = new File(userProfile);
if (f.exists() && f.isDirectory()) {
return userProfile;
}
}
}
String home = System.getenv("HOME");
if (home != null && !home.isEmpty()) {
File f = new File(home);
if (f.exists() && f.isDirectory()) {
return home;
}
}
//Fall back to user.home should never really get here
return System.getProperty("user.home", ".");
}
@JsonProperty("oauthToken")
public String getOauthToken() {
return getRequestConfig().getOauthToken();
}
public void setOauthToken(String oauthToken) {
this.requestConfig.setOauthToken(oauthToken);
}
@JsonProperty("password")
public String getPassword() {
return getRequestConfig().getPassword();
}
public void setPassword(String password) {
this.requestConfig.setPassword(password);
}
@JsonProperty("username")
public String getUsername() {
return getRequestConfig().getUsername();
}
public void setUsername(String username) {
this.requestConfig.setUsername(username);
}
@JsonProperty("impersonateUsername")
public String getImpersonateUsername() {
return getRequestConfig().getImpersonateUsername();
}
public void setImpersonateUsername(String impersonateUsername) {
this.requestConfig.setImpersonateUsername(impersonateUsername);
}
@JsonProperty("impersonateGroup")
public String getImpersonateGroup() {
return getRequestConfig().getImpersonateGroup();
}
public void setImpersonateGroup(String impersonateGroup) {
this.requestConfig.setImpersonateGroup(impersonateGroup);
}
@JsonProperty("impersonateExtras")
public Map<String, String> getImpersonateExtras() {
return getRequestConfig().getImpersonateExtras();
}
public void setImpersonateExtras(Map<String, String> impersonateExtras) {
this.requestConfig.setImpersonateExtras(impersonateExtras);
}
@JsonProperty("clientKeyPassphrase")
public String getClientKeyPassphrase() {
return clientKeyPassphrase;
}
public void setClientKeyPassphrase(String clientKeyPassphrase) {
this.clientKeyPassphrase = clientKeyPassphrase;
}
@JsonProperty("clientKeyAlgo")
public String getClientKeyAlgo() {
return clientKeyAlgo;
}
public void setClientKeyAlgo(String clientKeyAlgo) {
this.clientKeyAlgo = clientKeyAlgo;
}
@JsonProperty("clientKeyData")
public String getClientKeyData() {
return clientKeyData;
}
public void setClientKeyData(String clientKeyData) {
this.clientKeyData = clientKeyData;
}
@JsonProperty("clientKeyFile")
public String getClientKeyFile() {
return clientKeyFile;
}
public void setClientKeyFile(String clientKeyFile) {
this.clientKeyFile = clientKeyFile;
}
@JsonProperty("clientCertData")
public String getClientCertData() {
return clientCertData;
}
public void setClientCertData(String clientCertData) {
this.clientCertData = clientCertData;
}
@JsonProperty("clientCertFile")
public String getClientCertFile() {
return clientCertFile;
}
public void setClientCertFile(String clientCertFile) {
this.clientCertFile = clientCertFile;
}
@JsonProperty("caCertData")
public String getCaCertData() {
return caCertData;
}
public void setCaCertData(String caCertData) {
this.caCertData = caCertData;
}
@JsonProperty("caCertFile")
public String getCaCertFile() {
return caCertFile;
}
public void setCaCertFile(String caCertFile) {
this.caCertFile = caCertFile;
}
@JsonProperty("apiVersion")
public String getApiVersion() {
return apiVersion;
}
public void setApiVersion(String apiVersion) {
this.apiVersion = apiVersion;
}
@JsonProperty("masterUrl")
public String getMasterUrl() {
return masterUrl;
}
public void setMasterUrl(String masterUrl) {
this.masterUrl = masterUrl;
}
@JsonProperty("trustCerts")
public boolean isTrustCerts() {
return trustCerts;
}
public void setTrustCerts(boolean trustCerts) {
this.trustCerts = trustCerts;
}
@JsonProperty("disableHostnameVerification")
public boolean isDisableHostnameVerification() {
return disableHostnameVerification;
}
public void setDisableHostnameVerification(boolean disableHostnameVerification) {
this.disableHostnameVerification = disableHostnameVerification;
}
@JsonProperty("watchReconnectInterval")
public int getWatchReconnectInterval() {
return requestConfig.getWatchReconnectInterval();
}
public void setWatchReconnectInterval(int watchReconnectInterval) {
this.requestConfig.setWatchReconnectInterval(watchReconnectInterval);
}
@JsonProperty("watchReconnectLimit")
public int getWatchReconnectLimit() {
return getRequestConfig().getWatchReconnectLimit();
}
public void setWatchReconnectLimit(int watchReconnectLimit) {
this.requestConfig.setWatchReconnectLimit(watchReconnectLimit);
}
@JsonProperty("errorMessages")
public Map<Integer, String> getErrorMessages() {
return errorMessages;
}
public void setErrorMessages(Map<Integer, String> errorMessages) {
this.errorMessages = errorMessages;
}
public static ConfigBuilder builder() {
return new ConfigBuilder();
}
@JsonProperty("connectionTimeout")
public int getConnectionTimeout() {
return getRequestConfig().getConnectionTimeout();
}
public void setConnectionTimeout(int connectionTimeout) {
this.requestConfig.setConnectionTimeout(connectionTimeout);
}
@JsonProperty("requestTimeout")
public int getRequestTimeout() {
return getRequestConfig().getRequestTimeout();
}
public void setRequestTimeout(int requestTimeout) {
this.requestConfig.setRequestTimeout(requestTimeout);
}
@JsonProperty("rollingTimeout")
public long getRollingTimeout() {
return getRequestConfig().getRollingTimeout();
}
public void setRollingTimeout(long rollingTimeout) {
this.requestConfig.setRollingTimeout(rollingTimeout);
}
@JsonProperty("scaleTimeout")
public long getScaleTimeout() {
return getRequestConfig().getScaleTimeout();
}
public void setScaleTimeout(long scaleTimeout) {
this.requestConfig.setScaleTimeout(scaleTimeout);
}
@JsonProperty("loggingInterval")
public int getLoggingInterval() {
return getRequestConfig().getLoggingInterval();
}
public void setLoggingInterval(int loggingInterval) {
this.requestConfig.setLoggingInterval(loggingInterval);
}
public void setHttpProxy(String httpProxy) {
this.httpProxy= httpProxy;
}
@JsonProperty("httpProxy")
public String getHttpProxy() {
return httpProxy;
}
public void setHttpsProxy(String httpsProxy) {
this.httpsProxy= httpsProxy;
}
@JsonProperty("httpsProxy")
public String getHttpsProxy() {
return httpsProxy;
}
public void setNoProxy(String[] noProxy) {
this.noProxy = noProxy;
}
@JsonProperty("noProxy")
public String[] getNoProxy() {
return noProxy;
}
@JsonProperty("namespace")
public String getNamespace() {
return namespace;
}
public void setNamespace(String namespace) {
this.namespace = namespace;
}
@JsonProperty("userAgent")
public String getUserAgent() {
return userAgent;
}
public void setUserAgent(String userAgent) {
this.userAgent = userAgent;
}
@JsonProperty("tlsVersions")
public TlsVersion[] getTlsVersions() {
return tlsVersions;
}
public void setTlsVersions(TlsVersion[] tlsVersions) {
this.tlsVersions = tlsVersions;
}
@JsonProperty("websocketTimeout")
public long getWebsocketTimeout() {
return getRequestConfig().getWebsocketTimeout();
}
public void setWebsocketTimeout(long websocketTimeout) {
this.requestConfig.setWebsocketTimeout(websocketTimeout);
}
@JsonProperty("websocketPingInterval")
public long getWebsocketPingInterval() {
return getRequestConfig().getWebsocketPingInterval();
}
public void setWebsocketPingInterval(long websocketPingInterval) {
this.requestConfig.setWebsocketPingInterval(websocketPingInterval);
}
public int getMaxConcurrentRequests() {
return getRequestConfig().getMaxConcurrentRequests();
}
public void setMaxConcurrentRequests(int maxConcurrentRequests) {
this.requestConfig.setMaxConcurrentRequests(maxConcurrentRequests);
}
public int getMaxConcurrentRequestsPerHost() {
return getRequestConfig().getMaxConcurrentRequestsPerHost();
}
public void setMaxConcurrentRequestsPerHost(int maxConcurrentRequestsPerHost) {
this.requestConfig.setMaxConcurrentRequestsPerHost(maxConcurrentRequestsPerHost);
}
@JsonProperty("proxyUsername")
public String getProxyUsername() {
return proxyUsername;
}
public void setProxyUsername(String proxyUsername) {
this.proxyUsername = proxyUsername;
}
@JsonProperty("proxyPassword")
public String getProxyPassword() {
return proxyPassword;
}
public void setProxyPassword(String proxyPassword) {
this.proxyPassword = proxyPassword;
}
public RequestConfig getRequestConfig() {
RequestConfig rc = RequestConfigHolder.get();
return rc != null ? rc : this.requestConfig;
}
public void setTrustStorePassphrase(String trustStorePassphrase) {
this.trustStorePassphrase = trustStorePassphrase;
}
@JsonProperty("trustStorePassphrase")
public String getTrustStorePassphrase() {
return trustStorePassphrase;
}
public void setKeyStorePassphrase(String keyStorePassphrase) {
this.keyStorePassphrase = keyStorePassphrase;
}
@JsonProperty("keyStorePassphrase")
public String getKeyStorePassphrase() {
return keyStorePassphrase;
}
public void setTrustStoreFile(String trustStoreFile) {
this.trustStoreFile = trustStoreFile;
}
@JsonProperty("trustStoreFile")
public String getTrustStoreFile() {
return trustStoreFile;
}
public void setKeyStoreFile(String keyStoreFile) {
this.keyStoreFile = keyStoreFile;
}
@JsonProperty("keyStoreFile")
public String getKeyStoreFile() {
return keyStoreFile;
}
}
| [
"\"HOMEDRIVE\"",
"\"HOMEPATH\"",
"\"USERPROFILE\"",
"\"HOME\""
]
| []
| [
"USERPROFILE",
"HOME",
"HOMEPATH",
"HOMEDRIVE"
]
| [] | ["USERPROFILE", "HOME", "HOMEPATH", "HOMEDRIVE"] | java | 4 | 0 | |
tests/core/test_lock_store.py | import asyncio
import os
import numpy as np
import pytest
import time
from _pytest.tmpdir import TempdirFactory
from unittest.mock import patch
from rasa.core.agent import Agent
from rasa.core.channels import UserMessage
from rasa.core.constants import INTENT_MESSAGE_PREFIX, DEFAULT_LOCK_LIFETIME
from rasa.core.lock import TicketLock
from rasa.core.lock_store import (
InMemoryLockStore,
LockError,
LockStore,
RedisLockStore,
)
class FakeRedisLockStore(RedisLockStore):
"""Fake `RedisLockStore` using `fakeredis` library."""
def __init__(self):
import fakeredis
self.red = fakeredis.FakeStrictRedis()
# added in redis==3.3.0, but not yet in fakeredis
self.red.connection_pool.connection_class.health_check_interval = 0
super(RedisLockStore, self).__init__()
def test_issue_ticket():
lock = TicketLock("random id 0")
# no lock issued
assert lock.last_issued == -1
assert lock.now_serving == 0
# no one is waiting
assert not lock.is_someone_waiting()
# issue ticket
ticket = lock.issue_ticket(1)
assert ticket == 0
assert lock.last_issued == 0
assert lock.now_serving == 0
# someone is waiting
assert lock.is_someone_waiting()
def test_remove_expired_tickets():
lock = TicketLock("random id 1")
# issue one long- and one short-lived ticket
_ = list(map(lock.issue_ticket, [k for k in [0.01, 10]]))
# both tickets are there
assert len(lock.tickets) == 2
# sleep and only one ticket should be left
time.sleep(0.02)
lock.remove_expired_tickets()
assert len(lock.tickets) == 1
@pytest.mark.parametrize("lock_store", [InMemoryLockStore(), FakeRedisLockStore()])
def test_create_lock_store(lock_store: LockStore):
conversation_id = "my id 0"
# create and lock
lock = lock_store.create_lock(conversation_id)
lock_store.save_lock(lock)
lock = lock_store.get_lock(conversation_id)
assert lock
assert lock.conversation_id == conversation_id
@pytest.mark.parametrize("lock_store", [InMemoryLockStore(), FakeRedisLockStore()])
def test_serve_ticket(lock_store: LockStore):
conversation_id = "my id 1"
lock = lock_store.create_lock(conversation_id)
lock_store.save_lock(lock)
# issue ticket with long lifetime
ticket_0 = lock_store.issue_ticket(conversation_id, 10)
assert ticket_0 == 0
lock = lock_store.get_lock(conversation_id)
assert lock.last_issued == ticket_0
assert lock.now_serving == ticket_0
assert lock.is_someone_waiting()
# issue another ticket
ticket_1 = lock_store.issue_ticket(conversation_id, 10)
# finish serving ticket_0
lock_store.finish_serving(conversation_id, ticket_0)
lock = lock_store.get_lock(conversation_id)
assert lock.last_issued == ticket_1
assert lock.now_serving == ticket_1
assert lock.is_someone_waiting()
# serve second ticket and no one should be waiting
lock_store.finish_serving(conversation_id, ticket_1)
lock = lock_store.get_lock(conversation_id)
assert not lock.is_someone_waiting()
# noinspection PyProtectedMember
@pytest.mark.parametrize("lock_store", [InMemoryLockStore(), FakeRedisLockStore()])
def test_lock_expiration(lock_store: LockStore):
conversation_id = "my id 2"
lock = lock_store.create_lock(conversation_id)
lock_store.save_lock(lock)
# issue ticket with long lifetime
ticket = lock.issue_ticket(10)
assert ticket == 0
assert not lock._ticket_for_ticket_number(ticket).has_expired()
# issue ticket with short lifetime
ticket = lock.issue_ticket(0.00001)
time.sleep(0.00002)
assert ticket == 1
assert lock._ticket_for_ticket_number(ticket) is None
# newly assigned ticket should get number 1 again
assert lock.issue_ticket(10) == 1
async def test_multiple_conversation_ids(default_agent: Agent):
text = INTENT_MESSAGE_PREFIX + 'greet{"name":"Rasa"}'
conversation_ids = [f"conversation {i}" for i in range(2)]
# ensure conversations are processed in order
tasks = [default_agent.handle_text(text, sender_id=_id) for _id in conversation_ids]
results = await asyncio.gather(*tasks)
assert results
processed_ids = [result[0]["recipient_id"] for result in results]
assert processed_ids == conversation_ids
async def test_message_order(tmpdir_factory: TempdirFactory, default_agent: Agent):
start_time = time.time()
n_messages = 10
lock_wait = 0.1
# let's write the incoming order of messages and the order of results to temp files
temp_path = tmpdir_factory.mktemp("message_order")
results_file = temp_path / "results_file"
incoming_order_file = temp_path / "incoming_order_file"
# We need to mock `Agent.handle_message()` so we can introduce an
# artificial holdup (`wait_time_in_seconds`). In the mocked method, we'll
# record messages as they come and and as they're processed in files so we
# can check the order later on. We don't need the return value of this method so
# we'll just return None.
async def mocked_handle_message(self, message: UserMessage, wait: float) -> None:
# write incoming message to file
with open(str(incoming_order_file), "a+") as f_0:
f_0.write(message.text + "\n")
async with self.lock_store.lock(
message.sender_id, wait_time_in_seconds=lock_wait
):
# hold up the message processing after the lock has been acquired
await asyncio.sleep(wait)
# write message to file as it's processed
with open(str(results_file), "a+") as f_1:
f_1.write(message.text + "\n")
return None
# We'll send n_messages from the same sender_id with different blocking times
# after the lock has been acquired.
# We have to ensure that the messages are processed in the right order.
with patch.object(Agent, "handle_message", mocked_handle_message):
# use decreasing wait times so that every message after the first one
# does not acquire its lock immediately
wait_times = np.linspace(0.1, 0.05, n_messages)
tasks = [
default_agent.handle_message(
UserMessage(f"sender {i}", sender_id="some id"), wait=k
)
for i, k in enumerate(wait_times)
]
# execute futures
await asyncio.gather(*(asyncio.ensure_future(t) for t in tasks))
expected_order = [f"sender {i}" for i in range(len(wait_times))]
# ensure order of incoming messages is as expected
with open(str(incoming_order_file)) as f:
incoming_order = [l for l in f.read().split("\n") if l]
assert incoming_order == expected_order
# ensure results are processed in expected order
with open(str(results_file)) as f:
results_order = [l for l in f.read().split("\n") if l]
assert results_order == expected_order
# Every message after the first one will wait `lock_wait` seconds to acquire its
# lock (`wait_time_in_seconds` kwarg in `lock_store.lock()`).
# Let's make sure that this is not blocking and test that total test
# execution time is less than the sum of all wait times plus
# (n_messages - 1) * lock_wait
time_limit = np.sum(wait_times[1:])
time_limit += (n_messages - 1) * lock_wait
assert time.time() - start_time < time_limit
async def test_lock_error(default_agent: Agent):
lock_lifetime = 0.01
wait_time_in_seconds = 0.01
holdup = 0.1
# Mock message handler again to add a wait time holding up the lock
# after it's been acquired
async def mocked_handle_message(self, message: UserMessage) -> None:
async with self.lock_store.lock(
message.sender_id,
wait_time_in_seconds=wait_time_in_seconds,
lock_lifetime=lock_lifetime,
):
# hold up the message processing after the lock has been acquired
await asyncio.sleep(holdup)
return None
with patch.object(Agent, "handle_message", mocked_handle_message):
# first message blocks the lock for `holdup`,
# meaning the second message will not be able to acquire a lock
tasks = [
default_agent.handle_message(
UserMessage(f"sender {i}", sender_id="some id")
)
for i in range(2)
]
with pytest.raises(LockError):
await asyncio.gather(*(asyncio.ensure_future(t) for t in tasks))
async def test_lock_lifetime_environment_variable():
import rasa.core.lock_store
import importlib
# by default lock lifetime is `DEFAULT_LOCK_LIFETIME`
assert rasa.core.lock_store.LOCK_LIFETIME == DEFAULT_LOCK_LIFETIME
# set new lock lifetime as environment variable
new_lock_lifetime = 123
os.environ["TICKET_LOCK_LIFETIME"] = str(new_lock_lifetime)
# reload module and check value is updated
importlib.reload(rasa.core.lock_store)
assert rasa.core.lock_store.LOCK_LIFETIME == new_lock_lifetime
| []
| []
| [
"TICKET_LOCK_LIFETIME"
]
| [] | ["TICKET_LOCK_LIFETIME"] | python | 1 | 0 | |
ch-3/metasploit-minimal/client/main.go | package main
import (
"fmt"
"log"
"os"
"github.com/blackhat-go/bhg/ch-3/metasploit-minimal/rpc"
)
func main() {
host := os.Getenv("MSFHOST")
pass := os.Getenv("MSFPASS")
user := "msf"
if host == "" || pass == "" {
log.Fatalln("Missing required environment variable MSFHOST or MSFPASS")
}
msf, err := rpc.New(host, user, pass)
if err != nil {
log.Panicln(err)
}
defer msf.Logout()
sessions, err := msf.SessionList()
if err != nil {
log.Panicln(err)
}
fmt.Println("Sessions:")
for _, session := range sessions {
fmt.Printf("%5d %s\n", session.ID, session.Info)
}
}
| [
"\"MSFHOST\"",
"\"MSFPASS\""
]
| []
| [
"MSFHOST",
"MSFPASS"
]
| [] | ["MSFHOST", "MSFPASS"] | go | 2 | 0 | |
ws-signaling-server.py | #!/usr/bin/env python
#
# Simple websocket server to perform signaling.
#
import asyncio
import binascii
import os
import websockets
clients = {}
async def echo(websocket, path):
client_id = binascii.hexlify(os.urandom(8))
clients[client_id] = websocket
try:
async for message in websocket:
for c in clients.values():
if c != websocket:
await c.send(message)
finally:
clients.pop(client_id)
asyncio.get_event_loop().run_until_complete(
websockets.serve(echo, '0.0.0.0', int(os.getenv("PORT"))))
# websockets.serve(echo, '0.0.0.0', 10001))
asyncio.get_event_loop().run_forever()
| []
| []
| [
"PORT"
]
| [] | ["PORT"] | python | 1 | 0 | |
clients/google-api-services-tpu/v1alpha1/1.31.0/com/google/api/services/tpu/v1alpha1/TPU.java | /*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
/*
* This code was generated by https://github.com/googleapis/google-api-java-client-services/
* Modify at your own risk.
*/
package com.google.api.services.tpu.v1alpha1;
/**
* Service definition for TPU (v1alpha1).
*
* <p>
* TPU API provides customers with access to Google TPU technology.
* </p>
*
* <p>
* For more information about this service, see the
* <a href="https://cloud.google.com/tpu/" target="_blank">API Documentation</a>
* </p>
*
* <p>
* This service uses {@link TPURequestInitializer} to initialize global parameters via its
* {@link Builder}.
* </p>
*
* @since 1.3
* @author Google, Inc.
*/
@SuppressWarnings("javadoc")
public class TPU extends com.google.api.client.googleapis.services.json.AbstractGoogleJsonClient {
// Note: Leave this static initializer at the top of the file.
static {
com.google.api.client.util.Preconditions.checkState(
com.google.api.client.googleapis.GoogleUtils.MAJOR_VERSION == 1 &&
(com.google.api.client.googleapis.GoogleUtils.MINOR_VERSION >= 32 ||
(com.google.api.client.googleapis.GoogleUtils.MINOR_VERSION == 31 &&
com.google.api.client.googleapis.GoogleUtils.BUGFIX_VERSION >= 1)),
"You are currently running with version %s of google-api-client. " +
"You need at least version 1.31.1 of google-api-client to run version " +
"1.32.1 of the Cloud TPU API library.", com.google.api.client.googleapis.GoogleUtils.VERSION);
}
/**
* The default encoded root URL of the service. This is determined when the library is generated
* and normally should not be changed.
*
* @since 1.7
*/
public static final String DEFAULT_ROOT_URL = "https://tpu.googleapis.com/";
/**
* The default encoded mTLS root URL of the service. This is determined when the library is generated
* and normally should not be changed.
*
* @since 1.31
*/
public static final String DEFAULT_MTLS_ROOT_URL = "https://tpu.mtls.googleapis.com/";
/**
* The default encoded service path of the service. This is determined when the library is
* generated and normally should not be changed.
*
* @since 1.7
*/
public static final String DEFAULT_SERVICE_PATH = "";
/**
* The default encoded batch path of the service. This is determined when the library is
* generated and normally should not be changed.
*
* @since 1.23
*/
public static final String DEFAULT_BATCH_PATH = "batch";
/**
* The default encoded base URL of the service. This is determined when the library is generated
* and normally should not be changed.
*/
public static final String DEFAULT_BASE_URL = DEFAULT_ROOT_URL + DEFAULT_SERVICE_PATH;
/**
* Constructor.
*
* <p>
* Use {@link Builder} if you need to specify any of the optional parameters.
* </p>
*
* @param transport HTTP transport, which should normally be:
* <ul>
* <li>Google App Engine:
* {@code com.google.api.client.extensions.appengine.http.UrlFetchTransport}</li>
* <li>Android: {@code newCompatibleTransport} from
* {@code com.google.api.client.extensions.android.http.AndroidHttp}</li>
* <li>Java: {@link com.google.api.client.googleapis.javanet.GoogleNetHttpTransport#newTrustedTransport()}
* </li>
* </ul>
* @param jsonFactory JSON factory, which may be:
* <ul>
* <li>Jackson: {@code com.google.api.client.json.jackson2.JacksonFactory}</li>
* <li>Google GSON: {@code com.google.api.client.json.gson.GsonFactory}</li>
* <li>Android Honeycomb or higher:
* {@code com.google.api.client.extensions.android.json.AndroidJsonFactory}</li>
* </ul>
* @param httpRequestInitializer HTTP request initializer or {@code null} for none
* @since 1.7
*/
public TPU(com.google.api.client.http.HttpTransport transport, com.google.api.client.json.JsonFactory jsonFactory,
com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
this(new Builder(transport, jsonFactory, httpRequestInitializer));
}
/**
* @param builder builder
*/
TPU(Builder builder) {
super(builder);
}
@Override
protected void initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest<?> httpClientRequest) throws java.io.IOException {
super.initialize(httpClientRequest);
}
/**
* An accessor for creating requests from the Projects collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code TPU tpu = new TPU(...);}
* {@code TPU.Projects.List request = tpu.projects().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Projects projects() {
return new Projects();
}
/**
* The "projects" collection of methods.
*/
public class Projects {
/**
* An accessor for creating requests from the Locations collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code TPU tpu = new TPU(...);}
* {@code TPU.Locations.List request = tpu.locations().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Locations locations() {
return new Locations();
}
/**
* The "locations" collection of methods.
*/
public class Locations {
/**
* Gets information about a location.
*
* Create a request for the method "locations.get".
*
* This request holds the parameters needed by the tpu server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param name Resource name for the location.
* @return the request
*/
public Get get(java.lang.String name) throws java.io.IOException {
Get result = new Get(name);
initialize(result);
return result;
}
public class Get extends TPURequest<com.google.api.services.tpu.v1alpha1.model.Location> {
private static final String REST_PATH = "v1alpha1/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+$");
/**
* Gets information about a location.
*
* Create a request for the method "locations.get".
*
* This request holds the parameters needed by the the tpu server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation. <p> {@link
* Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must be
* called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Resource name for the location.
* @since 1.13
*/
protected Get(java.lang.String name) {
super(TPU.this, "GET", REST_PATH, null, com.google.api.services.tpu.v1alpha1.model.Location.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Resource name for the location. */
@com.google.api.client.util.Key
private java.lang.String name;
/** Resource name for the location.
*/
public java.lang.String getName() {
return name;
}
/** Resource name for the location. */
public Get setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists information about the supported locations for this service.
*
* Create a request for the method "locations.list".
*
* This request holds the parameters needed by the tpu server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param name The resource that owns the locations collection, if applicable.
* @return the request
*/
public List list(java.lang.String name) throws java.io.IOException {
List result = new List(name);
initialize(result);
return result;
}
public class List extends TPURequest<com.google.api.services.tpu.v1alpha1.model.ListLocationsResponse> {
private static final String REST_PATH = "v1alpha1/{+name}/locations";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+$");
/**
* Lists information about the supported locations for this service.
*
* Create a request for the method "locations.list".
*
* This request holds the parameters needed by the the tpu server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation. <p> {@link
* List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must be
* called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name The resource that owns the locations collection, if applicable.
* @since 1.13
*/
protected List(java.lang.String name) {
super(TPU.this, "GET", REST_PATH, null, com.google.api.services.tpu.v1alpha1.model.ListLocationsResponse.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** The resource that owns the locations collection, if applicable. */
@com.google.api.client.util.Key
private java.lang.String name;
/** The resource that owns the locations collection, if applicable.
*/
public java.lang.String getName() {
return name;
}
/** The resource that owns the locations collection, if applicable. */
public List setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+$");
}
this.name = name;
return this;
}
/**
* A filter to narrow down results to a preferred subset. The filtering language accepts
* strings like "displayName=tokyo", and is documented in more detail in
* [AIP-160](https://google.aip.dev/160).
*/
@com.google.api.client.util.Key
private java.lang.String filter;
/** A filter to narrow down results to a preferred subset. The filtering language accepts strings like
"displayName=tokyo", and is documented in more detail in [AIP-160](https://google.aip.dev/160).
*/
public java.lang.String getFilter() {
return filter;
}
/**
* A filter to narrow down results to a preferred subset. The filtering language accepts
* strings like "displayName=tokyo", and is documented in more detail in
* [AIP-160](https://google.aip.dev/160).
*/
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/** The maximum number of results to return. If not set, the service selects a default. */
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** The maximum number of results to return. If not set, the service selects a default.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/** The maximum number of results to return. If not set, the service selects a default. */
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/**
* A page token received from the `next_page_token` field in the response. Send that page
* token to receive the subsequent page.
*/
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** A page token received from the `next_page_token` field in the response. Send that page token to
receive the subsequent page.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/**
* A page token received from the `next_page_token` field in the response. Send that page
* token to receive the subsequent page.
*/
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* An accessor for creating requests from the AcceleratorTypes collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code TPU tpu = new TPU(...);}
* {@code TPU.AcceleratorTypes.List request = tpu.acceleratorTypes().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public AcceleratorTypes acceleratorTypes() {
return new AcceleratorTypes();
}
/**
* The "acceleratorTypes" collection of methods.
*/
public class AcceleratorTypes {
/**
* Gets AcceleratorType.
*
* Create a request for the method "acceleratorTypes.get".
*
* This request holds the parameters needed by the tpu server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param name Required. The resource name.
* @return the request
*/
public Get get(java.lang.String name) throws java.io.IOException {
Get result = new Get(name);
initialize(result);
return result;
}
public class Get extends TPURequest<com.google.api.services.tpu.v1alpha1.model.AcceleratorType> {
private static final String REST_PATH = "v1alpha1/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+/acceleratorTypes/[^/]+$");
/**
* Gets AcceleratorType.
*
* Create a request for the method "acceleratorTypes.get".
*
* This request holds the parameters needed by the the tpu server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation. <p> {@link
* Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must be
* called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Required. The resource name.
* @since 1.13
*/
protected Get(java.lang.String name) {
super(TPU.this, "GET", REST_PATH, null, com.google.api.services.tpu.v1alpha1.model.AcceleratorType.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/acceleratorTypes/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Required. The resource name. */
@com.google.api.client.util.Key
private java.lang.String name;
/** Required. The resource name.
*/
public java.lang.String getName() {
return name;
}
/** Required. The resource name. */
public Get setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/acceleratorTypes/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists accelerator types supported by this API.
*
* Create a request for the method "acceleratorTypes.list".
*
* This request holds the parameters needed by the tpu server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param parent Required. The parent resource name.
* @return the request
*/
public List list(java.lang.String parent) throws java.io.IOException {
List result = new List(parent);
initialize(result);
return result;
}
public class List extends TPURequest<com.google.api.services.tpu.v1alpha1.model.ListAcceleratorTypesResponse> {
private static final String REST_PATH = "v1alpha1/{+parent}/acceleratorTypes";
private final java.util.regex.Pattern PARENT_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+$");
/**
* Lists accelerator types supported by this API.
*
* Create a request for the method "acceleratorTypes.list".
*
* This request holds the parameters needed by the the tpu server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation. <p> {@link
* List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must be
* called to initialize this instance immediately after invoking the constructor. </p>
*
* @param parent Required. The parent resource name.
* @since 1.13
*/
protected List(java.lang.String parent) {
super(TPU.this, "GET", REST_PATH, null, com.google.api.services.tpu.v1alpha1.model.ListAcceleratorTypesResponse.class);
this.parent = com.google.api.client.util.Preconditions.checkNotNull(parent, "Required parameter parent must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** Required. The parent resource name. */
@com.google.api.client.util.Key
private java.lang.String parent;
/** Required. The parent resource name.
*/
public java.lang.String getParent() {
return parent;
}
/** Required. The parent resource name. */
public List setParent(java.lang.String parent) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+$");
}
this.parent = parent;
return this;
}
/** List filter. */
@com.google.api.client.util.Key
private java.lang.String filter;
/** List filter.
*/
public java.lang.String getFilter() {
return filter;
}
/** List filter. */
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/** Sort results. */
@com.google.api.client.util.Key
private java.lang.String orderBy;
/** Sort results.
*/
public java.lang.String getOrderBy() {
return orderBy;
}
/** Sort results. */
public List setOrderBy(java.lang.String orderBy) {
this.orderBy = orderBy;
return this;
}
/** The maximum number of items to return. */
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** The maximum number of items to return.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/** The maximum number of items to return. */
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/** The next_page_token value returned from a previous List request, if any. */
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** The next_page_token value returned from a previous List request, if any.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/** The next_page_token value returned from a previous List request, if any. */
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the Nodes collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code TPU tpu = new TPU(...);}
* {@code TPU.Nodes.List request = tpu.nodes().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Nodes nodes() {
return new Nodes();
}
/**
* The "nodes" collection of methods.
*/
public class Nodes {
/**
* Creates a node.
*
* Create a request for the method "nodes.create".
*
* This request holds the parameters needed by the tpu server. After setting any optional
* parameters, call the {@link Create#execute()} method to invoke the remote operation.
*
* @param parent Required. The parent resource name.
* @param content the {@link com.google.api.services.tpu.v1alpha1.model.Node}
* @return the request
*/
public Create create(java.lang.String parent, com.google.api.services.tpu.v1alpha1.model.Node content) throws java.io.IOException {
Create result = new Create(parent, content);
initialize(result);
return result;
}
public class Create extends TPURequest<com.google.api.services.tpu.v1alpha1.model.Operation> {
private static final String REST_PATH = "v1alpha1/{+parent}/nodes";
private final java.util.regex.Pattern PARENT_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+$");
/**
* Creates a node.
*
* Create a request for the method "nodes.create".
*
* This request holds the parameters needed by the the tpu server. After setting any optional
* parameters, call the {@link Create#execute()} method to invoke the remote operation. <p> {@link
* Create#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param parent Required. The parent resource name.
* @param content the {@link com.google.api.services.tpu.v1alpha1.model.Node}
* @since 1.13
*/
protected Create(java.lang.String parent, com.google.api.services.tpu.v1alpha1.model.Node content) {
super(TPU.this, "POST", REST_PATH, content, com.google.api.services.tpu.v1alpha1.model.Operation.class);
this.parent = com.google.api.client.util.Preconditions.checkNotNull(parent, "Required parameter parent must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+$");
}
}
@Override
public Create set$Xgafv(java.lang.String $Xgafv) {
return (Create) super.set$Xgafv($Xgafv);
}
@Override
public Create setAccessToken(java.lang.String accessToken) {
return (Create) super.setAccessToken(accessToken);
}
@Override
public Create setAlt(java.lang.String alt) {
return (Create) super.setAlt(alt);
}
@Override
public Create setCallback(java.lang.String callback) {
return (Create) super.setCallback(callback);
}
@Override
public Create setFields(java.lang.String fields) {
return (Create) super.setFields(fields);
}
@Override
public Create setKey(java.lang.String key) {
return (Create) super.setKey(key);
}
@Override
public Create setOauthToken(java.lang.String oauthToken) {
return (Create) super.setOauthToken(oauthToken);
}
@Override
public Create setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Create) super.setPrettyPrint(prettyPrint);
}
@Override
public Create setQuotaUser(java.lang.String quotaUser) {
return (Create) super.setQuotaUser(quotaUser);
}
@Override
public Create setUploadType(java.lang.String uploadType) {
return (Create) super.setUploadType(uploadType);
}
@Override
public Create setUploadProtocol(java.lang.String uploadProtocol) {
return (Create) super.setUploadProtocol(uploadProtocol);
}
/** Required. The parent resource name. */
@com.google.api.client.util.Key
private java.lang.String parent;
/** Required. The parent resource name.
*/
public java.lang.String getParent() {
return parent;
}
/** Required. The parent resource name. */
public Create setParent(java.lang.String parent) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+$");
}
this.parent = parent;
return this;
}
/** The unqualified resource name. */
@com.google.api.client.util.Key
private java.lang.String nodeId;
/** The unqualified resource name.
*/
public java.lang.String getNodeId() {
return nodeId;
}
/** The unqualified resource name. */
public Create setNodeId(java.lang.String nodeId) {
this.nodeId = nodeId;
return this;
}
/** Idempotent request UUID. */
@com.google.api.client.util.Key
private java.lang.String requestId;
/** Idempotent request UUID.
*/
public java.lang.String getRequestId() {
return requestId;
}
/** Idempotent request UUID. */
public Create setRequestId(java.lang.String requestId) {
this.requestId = requestId;
return this;
}
@Override
public Create set(String parameterName, Object value) {
return (Create) super.set(parameterName, value);
}
}
/**
* Deletes a node.
*
* Create a request for the method "nodes.delete".
*
* This request holds the parameters needed by the tpu server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param name Required. The resource name.
* @return the request
*/
public Delete delete(java.lang.String name) throws java.io.IOException {
Delete result = new Delete(name);
initialize(result);
return result;
}
public class Delete extends TPURequest<com.google.api.services.tpu.v1alpha1.model.Operation> {
private static final String REST_PATH = "v1alpha1/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+/nodes/[^/]+$");
/**
* Deletes a node.
*
* Create a request for the method "nodes.delete".
*
* This request holds the parameters needed by the the tpu server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation. <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Required. The resource name.
* @since 1.13
*/
protected Delete(java.lang.String name) {
super(TPU.this, "DELETE", REST_PATH, null, com.google.api.services.tpu.v1alpha1.model.Operation.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/nodes/[^/]+$");
}
}
@Override
public Delete set$Xgafv(java.lang.String $Xgafv) {
return (Delete) super.set$Xgafv($Xgafv);
}
@Override
public Delete setAccessToken(java.lang.String accessToken) {
return (Delete) super.setAccessToken(accessToken);
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setCallback(java.lang.String callback) {
return (Delete) super.setCallback(callback);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUploadType(java.lang.String uploadType) {
return (Delete) super.setUploadType(uploadType);
}
@Override
public Delete setUploadProtocol(java.lang.String uploadProtocol) {
return (Delete) super.setUploadProtocol(uploadProtocol);
}
/** Required. The resource name. */
@com.google.api.client.util.Key
private java.lang.String name;
/** Required. The resource name.
*/
public java.lang.String getName() {
return name;
}
/** Required. The resource name. */
public Delete setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/nodes/[^/]+$");
}
this.name = name;
return this;
}
/** Idempotent request UUID. */
@com.google.api.client.util.Key
private java.lang.String requestId;
/** Idempotent request UUID.
*/
public java.lang.String getRequestId() {
return requestId;
}
/** Idempotent request UUID. */
public Delete setRequestId(java.lang.String requestId) {
this.requestId = requestId;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Gets the details of a node.
*
* Create a request for the method "nodes.get".
*
* This request holds the parameters needed by the tpu server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param name Required. The resource name.
* @return the request
*/
public Get get(java.lang.String name) throws java.io.IOException {
Get result = new Get(name);
initialize(result);
return result;
}
public class Get extends TPURequest<com.google.api.services.tpu.v1alpha1.model.Node> {
private static final String REST_PATH = "v1alpha1/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+/nodes/[^/]+$");
/**
* Gets the details of a node.
*
* Create a request for the method "nodes.get".
*
* This request holds the parameters needed by the the tpu server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation. <p> {@link
* Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must be
* called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Required. The resource name.
* @since 1.13
*/
protected Get(java.lang.String name) {
super(TPU.this, "GET", REST_PATH, null, com.google.api.services.tpu.v1alpha1.model.Node.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/nodes/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Required. The resource name. */
@com.google.api.client.util.Key
private java.lang.String name;
/** Required. The resource name.
*/
public java.lang.String getName() {
return name;
}
/** Required. The resource name. */
public Get setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/nodes/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists nodes.
*
* Create a request for the method "nodes.list".
*
* This request holds the parameters needed by the tpu server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param parent Required. The parent resource name.
* @return the request
*/
public List list(java.lang.String parent) throws java.io.IOException {
List result = new List(parent);
initialize(result);
return result;
}
public class List extends TPURequest<com.google.api.services.tpu.v1alpha1.model.ListNodesResponse> {
private static final String REST_PATH = "v1alpha1/{+parent}/nodes";
private final java.util.regex.Pattern PARENT_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+$");
/**
* Lists nodes.
*
* Create a request for the method "nodes.list".
*
* This request holds the parameters needed by the the tpu server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation. <p> {@link
* List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must be
* called to initialize this instance immediately after invoking the constructor. </p>
*
* @param parent Required. The parent resource name.
* @since 1.13
*/
protected List(java.lang.String parent) {
super(TPU.this, "GET", REST_PATH, null, com.google.api.services.tpu.v1alpha1.model.ListNodesResponse.class);
this.parent = com.google.api.client.util.Preconditions.checkNotNull(parent, "Required parameter parent must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** Required. The parent resource name. */
@com.google.api.client.util.Key
private java.lang.String parent;
/** Required. The parent resource name.
*/
public java.lang.String getParent() {
return parent;
}
/** Required. The parent resource name. */
public List setParent(java.lang.String parent) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+$");
}
this.parent = parent;
return this;
}
/** The maximum number of items to return. */
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** The maximum number of items to return.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/** The maximum number of items to return. */
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/** The next_page_token value returned from a previous List request, if any. */
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** The next_page_token value returned from a previous List request, if any.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/** The next_page_token value returned from a previous List request, if any. */
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
/**
* Reimages a node's OS.
*
* Create a request for the method "nodes.reimage".
*
* This request holds the parameters needed by the tpu server. After setting any optional
* parameters, call the {@link Reimage#execute()} method to invoke the remote operation.
*
* @param name The resource name.
* @param content the {@link com.google.api.services.tpu.v1alpha1.model.ReimageNodeRequest}
* @return the request
*/
public Reimage reimage(java.lang.String name, com.google.api.services.tpu.v1alpha1.model.ReimageNodeRequest content) throws java.io.IOException {
Reimage result = new Reimage(name, content);
initialize(result);
return result;
}
public class Reimage extends TPURequest<com.google.api.services.tpu.v1alpha1.model.Operation> {
private static final String REST_PATH = "v1alpha1/{+name}:reimage";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+/nodes/[^/]+$");
/**
* Reimages a node's OS.
*
* Create a request for the method "nodes.reimage".
*
* This request holds the parameters needed by the the tpu server. After setting any optional
* parameters, call the {@link Reimage#execute()} method to invoke the remote operation. <p>
* {@link
* Reimage#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name The resource name.
* @param content the {@link com.google.api.services.tpu.v1alpha1.model.ReimageNodeRequest}
* @since 1.13
*/
protected Reimage(java.lang.String name, com.google.api.services.tpu.v1alpha1.model.ReimageNodeRequest content) {
super(TPU.this, "POST", REST_PATH, content, com.google.api.services.tpu.v1alpha1.model.Operation.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/nodes/[^/]+$");
}
}
@Override
public Reimage set$Xgafv(java.lang.String $Xgafv) {
return (Reimage) super.set$Xgafv($Xgafv);
}
@Override
public Reimage setAccessToken(java.lang.String accessToken) {
return (Reimage) super.setAccessToken(accessToken);
}
@Override
public Reimage setAlt(java.lang.String alt) {
return (Reimage) super.setAlt(alt);
}
@Override
public Reimage setCallback(java.lang.String callback) {
return (Reimage) super.setCallback(callback);
}
@Override
public Reimage setFields(java.lang.String fields) {
return (Reimage) super.setFields(fields);
}
@Override
public Reimage setKey(java.lang.String key) {
return (Reimage) super.setKey(key);
}
@Override
public Reimage setOauthToken(java.lang.String oauthToken) {
return (Reimage) super.setOauthToken(oauthToken);
}
@Override
public Reimage setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Reimage) super.setPrettyPrint(prettyPrint);
}
@Override
public Reimage setQuotaUser(java.lang.String quotaUser) {
return (Reimage) super.setQuotaUser(quotaUser);
}
@Override
public Reimage setUploadType(java.lang.String uploadType) {
return (Reimage) super.setUploadType(uploadType);
}
@Override
public Reimage setUploadProtocol(java.lang.String uploadProtocol) {
return (Reimage) super.setUploadProtocol(uploadProtocol);
}
/** The resource name. */
@com.google.api.client.util.Key
private java.lang.String name;
/** The resource name.
*/
public java.lang.String getName() {
return name;
}
/** The resource name. */
public Reimage setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/nodes/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Reimage set(String parameterName, Object value) {
return (Reimage) super.set(parameterName, value);
}
}
/**
* Starts a node.
*
* Create a request for the method "nodes.start".
*
* This request holds the parameters needed by the tpu server. After setting any optional
* parameters, call the {@link Start#execute()} method to invoke the remote operation.
*
* @param name The resource name.
* @param content the {@link com.google.api.services.tpu.v1alpha1.model.StartNodeRequest}
* @return the request
*/
public Start start(java.lang.String name, com.google.api.services.tpu.v1alpha1.model.StartNodeRequest content) throws java.io.IOException {
Start result = new Start(name, content);
initialize(result);
return result;
}
public class Start extends TPURequest<com.google.api.services.tpu.v1alpha1.model.Operation> {
private static final String REST_PATH = "v1alpha1/{+name}:start";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+/nodes/[^/]+$");
/**
* Starts a node.
*
* Create a request for the method "nodes.start".
*
* This request holds the parameters needed by the the tpu server. After setting any optional
* parameters, call the {@link Start#execute()} method to invoke the remote operation. <p> {@link
* Start#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name The resource name.
* @param content the {@link com.google.api.services.tpu.v1alpha1.model.StartNodeRequest}
* @since 1.13
*/
protected Start(java.lang.String name, com.google.api.services.tpu.v1alpha1.model.StartNodeRequest content) {
super(TPU.this, "POST", REST_PATH, content, com.google.api.services.tpu.v1alpha1.model.Operation.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/nodes/[^/]+$");
}
}
@Override
public Start set$Xgafv(java.lang.String $Xgafv) {
return (Start) super.set$Xgafv($Xgafv);
}
@Override
public Start setAccessToken(java.lang.String accessToken) {
return (Start) super.setAccessToken(accessToken);
}
@Override
public Start setAlt(java.lang.String alt) {
return (Start) super.setAlt(alt);
}
@Override
public Start setCallback(java.lang.String callback) {
return (Start) super.setCallback(callback);
}
@Override
public Start setFields(java.lang.String fields) {
return (Start) super.setFields(fields);
}
@Override
public Start setKey(java.lang.String key) {
return (Start) super.setKey(key);
}
@Override
public Start setOauthToken(java.lang.String oauthToken) {
return (Start) super.setOauthToken(oauthToken);
}
@Override
public Start setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Start) super.setPrettyPrint(prettyPrint);
}
@Override
public Start setQuotaUser(java.lang.String quotaUser) {
return (Start) super.setQuotaUser(quotaUser);
}
@Override
public Start setUploadType(java.lang.String uploadType) {
return (Start) super.setUploadType(uploadType);
}
@Override
public Start setUploadProtocol(java.lang.String uploadProtocol) {
return (Start) super.setUploadProtocol(uploadProtocol);
}
/** The resource name. */
@com.google.api.client.util.Key
private java.lang.String name;
/** The resource name.
*/
public java.lang.String getName() {
return name;
}
/** The resource name. */
public Start setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/nodes/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Start set(String parameterName, Object value) {
return (Start) super.set(parameterName, value);
}
}
/**
* Stops a node.
*
* Create a request for the method "nodes.stop".
*
* This request holds the parameters needed by the tpu server. After setting any optional
* parameters, call the {@link Stop#execute()} method to invoke the remote operation.
*
* @param name The resource name.
* @param content the {@link com.google.api.services.tpu.v1alpha1.model.StopNodeRequest}
* @return the request
*/
public Stop stop(java.lang.String name, com.google.api.services.tpu.v1alpha1.model.StopNodeRequest content) throws java.io.IOException {
Stop result = new Stop(name, content);
initialize(result);
return result;
}
public class Stop extends TPURequest<com.google.api.services.tpu.v1alpha1.model.Operation> {
private static final String REST_PATH = "v1alpha1/{+name}:stop";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+/nodes/[^/]+$");
/**
* Stops a node.
*
* Create a request for the method "nodes.stop".
*
* This request holds the parameters needed by the the tpu server. After setting any optional
* parameters, call the {@link Stop#execute()} method to invoke the remote operation. <p> {@link
* Stop#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must be
* called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name The resource name.
* @param content the {@link com.google.api.services.tpu.v1alpha1.model.StopNodeRequest}
* @since 1.13
*/
protected Stop(java.lang.String name, com.google.api.services.tpu.v1alpha1.model.StopNodeRequest content) {
super(TPU.this, "POST", REST_PATH, content, com.google.api.services.tpu.v1alpha1.model.Operation.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/nodes/[^/]+$");
}
}
@Override
public Stop set$Xgafv(java.lang.String $Xgafv) {
return (Stop) super.set$Xgafv($Xgafv);
}
@Override
public Stop setAccessToken(java.lang.String accessToken) {
return (Stop) super.setAccessToken(accessToken);
}
@Override
public Stop setAlt(java.lang.String alt) {
return (Stop) super.setAlt(alt);
}
@Override
public Stop setCallback(java.lang.String callback) {
return (Stop) super.setCallback(callback);
}
@Override
public Stop setFields(java.lang.String fields) {
return (Stop) super.setFields(fields);
}
@Override
public Stop setKey(java.lang.String key) {
return (Stop) super.setKey(key);
}
@Override
public Stop setOauthToken(java.lang.String oauthToken) {
return (Stop) super.setOauthToken(oauthToken);
}
@Override
public Stop setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Stop) super.setPrettyPrint(prettyPrint);
}
@Override
public Stop setQuotaUser(java.lang.String quotaUser) {
return (Stop) super.setQuotaUser(quotaUser);
}
@Override
public Stop setUploadType(java.lang.String uploadType) {
return (Stop) super.setUploadType(uploadType);
}
@Override
public Stop setUploadProtocol(java.lang.String uploadProtocol) {
return (Stop) super.setUploadProtocol(uploadProtocol);
}
/** The resource name. */
@com.google.api.client.util.Key
private java.lang.String name;
/** The resource name.
*/
public java.lang.String getName() {
return name;
}
/** The resource name. */
public Stop setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/nodes/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Stop set(String parameterName, Object value) {
return (Stop) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the Operations collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code TPU tpu = new TPU(...);}
* {@code TPU.Operations.List request = tpu.operations().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public Operations operations() {
return new Operations();
}
/**
* The "operations" collection of methods.
*/
public class Operations {
/**
* Starts asynchronous cancellation on a long-running operation. The server makes a best effort to
* cancel the operation, but success is not guaranteed. If the server doesn't support this method,
* it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other
* methods to check whether the cancellation succeeded or whether the operation completed despite
* cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an
* operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to
* `Code.CANCELLED`.
*
* Create a request for the method "operations.cancel".
*
* This request holds the parameters needed by the tpu server. After setting any optional
* parameters, call the {@link Cancel#execute()} method to invoke the remote operation.
*
* @param name The name of the operation resource to be cancelled.
* @return the request
*/
public Cancel cancel(java.lang.String name) throws java.io.IOException {
Cancel result = new Cancel(name);
initialize(result);
return result;
}
public class Cancel extends TPURequest<com.google.api.services.tpu.v1alpha1.model.Empty> {
private static final String REST_PATH = "v1alpha1/{+name}:cancel";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+/operations/[^/]+$");
/**
* Starts asynchronous cancellation on a long-running operation. The server makes a best effort to
* cancel the operation, but success is not guaranteed. If the server doesn't support this method,
* it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other
* methods to check whether the cancellation succeeded or whether the operation completed despite
* cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an
* operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to
* `Code.CANCELLED`.
*
* Create a request for the method "operations.cancel".
*
* This request holds the parameters needed by the the tpu server. After setting any optional
* parameters, call the {@link Cancel#execute()} method to invoke the remote operation. <p> {@link
* Cancel#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name The name of the operation resource to be cancelled.
* @since 1.13
*/
protected Cancel(java.lang.String name) {
super(TPU.this, "POST", REST_PATH, null, com.google.api.services.tpu.v1alpha1.model.Empty.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/operations/[^/]+$");
}
}
@Override
public Cancel set$Xgafv(java.lang.String $Xgafv) {
return (Cancel) super.set$Xgafv($Xgafv);
}
@Override
public Cancel setAccessToken(java.lang.String accessToken) {
return (Cancel) super.setAccessToken(accessToken);
}
@Override
public Cancel setAlt(java.lang.String alt) {
return (Cancel) super.setAlt(alt);
}
@Override
public Cancel setCallback(java.lang.String callback) {
return (Cancel) super.setCallback(callback);
}
@Override
public Cancel setFields(java.lang.String fields) {
return (Cancel) super.setFields(fields);
}
@Override
public Cancel setKey(java.lang.String key) {
return (Cancel) super.setKey(key);
}
@Override
public Cancel setOauthToken(java.lang.String oauthToken) {
return (Cancel) super.setOauthToken(oauthToken);
}
@Override
public Cancel setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Cancel) super.setPrettyPrint(prettyPrint);
}
@Override
public Cancel setQuotaUser(java.lang.String quotaUser) {
return (Cancel) super.setQuotaUser(quotaUser);
}
@Override
public Cancel setUploadType(java.lang.String uploadType) {
return (Cancel) super.setUploadType(uploadType);
}
@Override
public Cancel setUploadProtocol(java.lang.String uploadProtocol) {
return (Cancel) super.setUploadProtocol(uploadProtocol);
}
/** The name of the operation resource to be cancelled. */
@com.google.api.client.util.Key
private java.lang.String name;
/** The name of the operation resource to be cancelled.
*/
public java.lang.String getName() {
return name;
}
/** The name of the operation resource to be cancelled. */
public Cancel setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/operations/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Cancel set(String parameterName, Object value) {
return (Cancel) super.set(parameterName, value);
}
}
/**
* Deletes a long-running operation. This method indicates that the client is no longer interested
* in the operation result. It does not cancel the operation. If the server doesn't support this
* method, it returns `google.rpc.Code.UNIMPLEMENTED`.
*
* Create a request for the method "operations.delete".
*
* This request holds the parameters needed by the tpu server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation.
*
* @param name The name of the operation resource to be deleted.
* @return the request
*/
public Delete delete(java.lang.String name) throws java.io.IOException {
Delete result = new Delete(name);
initialize(result);
return result;
}
public class Delete extends TPURequest<com.google.api.services.tpu.v1alpha1.model.Empty> {
private static final String REST_PATH = "v1alpha1/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+/operations/[^/]+$");
/**
* Deletes a long-running operation. This method indicates that the client is no longer interested
* in the operation result. It does not cancel the operation. If the server doesn't support this
* method, it returns `google.rpc.Code.UNIMPLEMENTED`.
*
* Create a request for the method "operations.delete".
*
* This request holds the parameters needed by the the tpu server. After setting any optional
* parameters, call the {@link Delete#execute()} method to invoke the remote operation. <p> {@link
* Delete#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must
* be called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name The name of the operation resource to be deleted.
* @since 1.13
*/
protected Delete(java.lang.String name) {
super(TPU.this, "DELETE", REST_PATH, null, com.google.api.services.tpu.v1alpha1.model.Empty.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/operations/[^/]+$");
}
}
@Override
public Delete set$Xgafv(java.lang.String $Xgafv) {
return (Delete) super.set$Xgafv($Xgafv);
}
@Override
public Delete setAccessToken(java.lang.String accessToken) {
return (Delete) super.setAccessToken(accessToken);
}
@Override
public Delete setAlt(java.lang.String alt) {
return (Delete) super.setAlt(alt);
}
@Override
public Delete setCallback(java.lang.String callback) {
return (Delete) super.setCallback(callback);
}
@Override
public Delete setFields(java.lang.String fields) {
return (Delete) super.setFields(fields);
}
@Override
public Delete setKey(java.lang.String key) {
return (Delete) super.setKey(key);
}
@Override
public Delete setOauthToken(java.lang.String oauthToken) {
return (Delete) super.setOauthToken(oauthToken);
}
@Override
public Delete setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Delete) super.setPrettyPrint(prettyPrint);
}
@Override
public Delete setQuotaUser(java.lang.String quotaUser) {
return (Delete) super.setQuotaUser(quotaUser);
}
@Override
public Delete setUploadType(java.lang.String uploadType) {
return (Delete) super.setUploadType(uploadType);
}
@Override
public Delete setUploadProtocol(java.lang.String uploadProtocol) {
return (Delete) super.setUploadProtocol(uploadProtocol);
}
/** The name of the operation resource to be deleted. */
@com.google.api.client.util.Key
private java.lang.String name;
/** The name of the operation resource to be deleted.
*/
public java.lang.String getName() {
return name;
}
/** The name of the operation resource to be deleted. */
public Delete setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/operations/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Delete set(String parameterName, Object value) {
return (Delete) super.set(parameterName, value);
}
}
/**
* Gets the latest state of a long-running operation. Clients can use this method to poll the
* operation result at intervals as recommended by the API service.
*
* Create a request for the method "operations.get".
*
* This request holds the parameters needed by the tpu server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param name The name of the operation resource.
* @return the request
*/
public Get get(java.lang.String name) throws java.io.IOException {
Get result = new Get(name);
initialize(result);
return result;
}
public class Get extends TPURequest<com.google.api.services.tpu.v1alpha1.model.Operation> {
private static final String REST_PATH = "v1alpha1/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+/operations/[^/]+$");
/**
* Gets the latest state of a long-running operation. Clients can use this method to poll the
* operation result at intervals as recommended by the API service.
*
* Create a request for the method "operations.get".
*
* This request holds the parameters needed by the the tpu server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation. <p> {@link
* Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must be
* called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name The name of the operation resource.
* @since 1.13
*/
protected Get(java.lang.String name) {
super(TPU.this, "GET", REST_PATH, null, com.google.api.services.tpu.v1alpha1.model.Operation.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/operations/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** The name of the operation resource. */
@com.google.api.client.util.Key
private java.lang.String name;
/** The name of the operation resource.
*/
public java.lang.String getName() {
return name;
}
/** The name of the operation resource. */
public Get setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/operations/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists operations that match the specified filter in the request. If the server doesn't support
* this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to override
* the binding to use different resource name schemes, such as `users/operations`. To override the
* binding, API services can add a binding such as `"/v1/{name=users}/operations"` to their service
* configuration. For backwards compatibility, the default name includes the operations collection
* id, however overriding users must ensure the name binding is the parent resource, without the
* operations collection id.
*
* Create a request for the method "operations.list".
*
* This request holds the parameters needed by the tpu server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param name The name of the operation's parent resource.
* @return the request
*/
public List list(java.lang.String name) throws java.io.IOException {
List result = new List(name);
initialize(result);
return result;
}
public class List extends TPURequest<com.google.api.services.tpu.v1alpha1.model.ListOperationsResponse> {
private static final String REST_PATH = "v1alpha1/{+name}/operations";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+$");
/**
* Lists operations that match the specified filter in the request. If the server doesn't support
* this method, it returns `UNIMPLEMENTED`. NOTE: the `name` binding allows API services to
* override the binding to use different resource name schemes, such as `users/operations`. To
* override the binding, API services can add a binding such as `"/v1/{name=users}/operations"` to
* their service configuration. For backwards compatibility, the default name includes the
* operations collection id, however overriding users must ensure the name binding is the parent
* resource, without the operations collection id.
*
* Create a request for the method "operations.list".
*
* This request holds the parameters needed by the the tpu server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation. <p> {@link
* List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must be
* called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name The name of the operation's parent resource.
* @since 1.13
*/
protected List(java.lang.String name) {
super(TPU.this, "GET", REST_PATH, null, com.google.api.services.tpu.v1alpha1.model.ListOperationsResponse.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** The name of the operation's parent resource. */
@com.google.api.client.util.Key
private java.lang.String name;
/** The name of the operation's parent resource.
*/
public java.lang.String getName() {
return name;
}
/** The name of the operation's parent resource. */
public List setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+$");
}
this.name = name;
return this;
}
/** The standard list filter. */
@com.google.api.client.util.Key
private java.lang.String filter;
/** The standard list filter.
*/
public java.lang.String getFilter() {
return filter;
}
/** The standard list filter. */
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/** The standard list page size. */
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** The standard list page size.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/** The standard list page size. */
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/** The standard list page token. */
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** The standard list page token.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/** The standard list page token. */
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
}
/**
* An accessor for creating requests from the TensorflowVersions collection.
*
* <p>The typical use is:</p>
* <pre>
* {@code TPU tpu = new TPU(...);}
* {@code TPU.TensorflowVersions.List request = tpu.tensorflowVersions().list(parameters ...)}
* </pre>
*
* @return the resource collection
*/
public TensorflowVersions tensorflowVersions() {
return new TensorflowVersions();
}
/**
* The "tensorflowVersions" collection of methods.
*/
public class TensorflowVersions {
/**
* Gets TensorFlow Version.
*
* Create a request for the method "tensorflowVersions.get".
*
* This request holds the parameters needed by the tpu server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation.
*
* @param name Required. The resource name.
* @return the request
*/
public Get get(java.lang.String name) throws java.io.IOException {
Get result = new Get(name);
initialize(result);
return result;
}
public class Get extends TPURequest<com.google.api.services.tpu.v1alpha1.model.TensorFlowVersion> {
private static final String REST_PATH = "v1alpha1/{+name}";
private final java.util.regex.Pattern NAME_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+/tensorflowVersions/[^/]+$");
/**
* Gets TensorFlow Version.
*
* Create a request for the method "tensorflowVersions.get".
*
* This request holds the parameters needed by the the tpu server. After setting any optional
* parameters, call the {@link Get#execute()} method to invoke the remote operation. <p> {@link
* Get#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must be
* called to initialize this instance immediately after invoking the constructor. </p>
*
* @param name Required. The resource name.
* @since 1.13
*/
protected Get(java.lang.String name) {
super(TPU.this, "GET", REST_PATH, null, com.google.api.services.tpu.v1alpha1.model.TensorFlowVersion.class);
this.name = com.google.api.client.util.Preconditions.checkNotNull(name, "Required parameter name must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/tensorflowVersions/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public Get set$Xgafv(java.lang.String $Xgafv) {
return (Get) super.set$Xgafv($Xgafv);
}
@Override
public Get setAccessToken(java.lang.String accessToken) {
return (Get) super.setAccessToken(accessToken);
}
@Override
public Get setAlt(java.lang.String alt) {
return (Get) super.setAlt(alt);
}
@Override
public Get setCallback(java.lang.String callback) {
return (Get) super.setCallback(callback);
}
@Override
public Get setFields(java.lang.String fields) {
return (Get) super.setFields(fields);
}
@Override
public Get setKey(java.lang.String key) {
return (Get) super.setKey(key);
}
@Override
public Get setOauthToken(java.lang.String oauthToken) {
return (Get) super.setOauthToken(oauthToken);
}
@Override
public Get setPrettyPrint(java.lang.Boolean prettyPrint) {
return (Get) super.setPrettyPrint(prettyPrint);
}
@Override
public Get setQuotaUser(java.lang.String quotaUser) {
return (Get) super.setQuotaUser(quotaUser);
}
@Override
public Get setUploadType(java.lang.String uploadType) {
return (Get) super.setUploadType(uploadType);
}
@Override
public Get setUploadProtocol(java.lang.String uploadProtocol) {
return (Get) super.setUploadProtocol(uploadProtocol);
}
/** Required. The resource name. */
@com.google.api.client.util.Key
private java.lang.String name;
/** Required. The resource name.
*/
public java.lang.String getName() {
return name;
}
/** Required. The resource name. */
public Get setName(java.lang.String name) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(NAME_PATTERN.matcher(name).matches(),
"Parameter name must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+/tensorflowVersions/[^/]+$");
}
this.name = name;
return this;
}
@Override
public Get set(String parameterName, Object value) {
return (Get) super.set(parameterName, value);
}
}
/**
* Lists TensorFlow versions supported by this API.
*
* Create a request for the method "tensorflowVersions.list".
*
* This request holds the parameters needed by the tpu server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation.
*
* @param parent Required. The parent resource name.
* @return the request
*/
public List list(java.lang.String parent) throws java.io.IOException {
List result = new List(parent);
initialize(result);
return result;
}
public class List extends TPURequest<com.google.api.services.tpu.v1alpha1.model.ListTensorFlowVersionsResponse> {
private static final String REST_PATH = "v1alpha1/{+parent}/tensorflowVersions";
private final java.util.regex.Pattern PARENT_PATTERN =
java.util.regex.Pattern.compile("^projects/[^/]+/locations/[^/]+$");
/**
* Lists TensorFlow versions supported by this API.
*
* Create a request for the method "tensorflowVersions.list".
*
* This request holds the parameters needed by the the tpu server. After setting any optional
* parameters, call the {@link List#execute()} method to invoke the remote operation. <p> {@link
* List#initialize(com.google.api.client.googleapis.services.AbstractGoogleClientRequest)} must be
* called to initialize this instance immediately after invoking the constructor. </p>
*
* @param parent Required. The parent resource name.
* @since 1.13
*/
protected List(java.lang.String parent) {
super(TPU.this, "GET", REST_PATH, null, com.google.api.services.tpu.v1alpha1.model.ListTensorFlowVersionsResponse.class);
this.parent = com.google.api.client.util.Preconditions.checkNotNull(parent, "Required parameter parent must be specified.");
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+$");
}
}
@Override
public com.google.api.client.http.HttpResponse executeUsingHead() throws java.io.IOException {
return super.executeUsingHead();
}
@Override
public com.google.api.client.http.HttpRequest buildHttpRequestUsingHead() throws java.io.IOException {
return super.buildHttpRequestUsingHead();
}
@Override
public List set$Xgafv(java.lang.String $Xgafv) {
return (List) super.set$Xgafv($Xgafv);
}
@Override
public List setAccessToken(java.lang.String accessToken) {
return (List) super.setAccessToken(accessToken);
}
@Override
public List setAlt(java.lang.String alt) {
return (List) super.setAlt(alt);
}
@Override
public List setCallback(java.lang.String callback) {
return (List) super.setCallback(callback);
}
@Override
public List setFields(java.lang.String fields) {
return (List) super.setFields(fields);
}
@Override
public List setKey(java.lang.String key) {
return (List) super.setKey(key);
}
@Override
public List setOauthToken(java.lang.String oauthToken) {
return (List) super.setOauthToken(oauthToken);
}
@Override
public List setPrettyPrint(java.lang.Boolean prettyPrint) {
return (List) super.setPrettyPrint(prettyPrint);
}
@Override
public List setQuotaUser(java.lang.String quotaUser) {
return (List) super.setQuotaUser(quotaUser);
}
@Override
public List setUploadType(java.lang.String uploadType) {
return (List) super.setUploadType(uploadType);
}
@Override
public List setUploadProtocol(java.lang.String uploadProtocol) {
return (List) super.setUploadProtocol(uploadProtocol);
}
/** Required. The parent resource name. */
@com.google.api.client.util.Key
private java.lang.String parent;
/** Required. The parent resource name.
*/
public java.lang.String getParent() {
return parent;
}
/** Required. The parent resource name. */
public List setParent(java.lang.String parent) {
if (!getSuppressPatternChecks()) {
com.google.api.client.util.Preconditions.checkArgument(PARENT_PATTERN.matcher(parent).matches(),
"Parameter parent must conform to the pattern " +
"^projects/[^/]+/locations/[^/]+$");
}
this.parent = parent;
return this;
}
/** List filter. */
@com.google.api.client.util.Key
private java.lang.String filter;
/** List filter.
*/
public java.lang.String getFilter() {
return filter;
}
/** List filter. */
public List setFilter(java.lang.String filter) {
this.filter = filter;
return this;
}
/** Sort results. */
@com.google.api.client.util.Key
private java.lang.String orderBy;
/** Sort results.
*/
public java.lang.String getOrderBy() {
return orderBy;
}
/** Sort results. */
public List setOrderBy(java.lang.String orderBy) {
this.orderBy = orderBy;
return this;
}
/** The maximum number of items to return. */
@com.google.api.client.util.Key
private java.lang.Integer pageSize;
/** The maximum number of items to return.
*/
public java.lang.Integer getPageSize() {
return pageSize;
}
/** The maximum number of items to return. */
public List setPageSize(java.lang.Integer pageSize) {
this.pageSize = pageSize;
return this;
}
/** The next_page_token value returned from a previous List request, if any. */
@com.google.api.client.util.Key
private java.lang.String pageToken;
/** The next_page_token value returned from a previous List request, if any.
*/
public java.lang.String getPageToken() {
return pageToken;
}
/** The next_page_token value returned from a previous List request, if any. */
public List setPageToken(java.lang.String pageToken) {
this.pageToken = pageToken;
return this;
}
@Override
public List set(String parameterName, Object value) {
return (List) super.set(parameterName, value);
}
}
}
}
}
/**
* Builder for {@link TPU}.
*
* <p>
* Implementation is not thread-safe.
* </p>
*
* @since 1.3.0
*/
public static final class Builder extends com.google.api.client.googleapis.services.json.AbstractGoogleJsonClient.Builder {
private static String chooseEndpoint(com.google.api.client.http.HttpTransport transport) {
// If the GOOGLE_API_USE_MTLS_ENDPOINT environment variable value is "always", use mTLS endpoint.
// If the env variable is "auto", use mTLS endpoint if and only if the transport is mTLS.
// Use the regular endpoint for all other cases.
String useMtlsEndpoint = System.getenv("GOOGLE_API_USE_MTLS_ENDPOINT");
useMtlsEndpoint = useMtlsEndpoint == null ? "auto" : useMtlsEndpoint;
if ("always".equals(useMtlsEndpoint) || ("auto".equals(useMtlsEndpoint) && transport != null && transport.isMtls())) {
return DEFAULT_MTLS_ROOT_URL;
}
return DEFAULT_ROOT_URL;
}
/**
* Returns an instance of a new builder.
*
* @param transport HTTP transport, which should normally be:
* <ul>
* <li>Google App Engine:
* {@code com.google.api.client.extensions.appengine.http.UrlFetchTransport}</li>
* <li>Android: {@code newCompatibleTransport} from
* {@code com.google.api.client.extensions.android.http.AndroidHttp}</li>
* <li>Java: {@link com.google.api.client.googleapis.javanet.GoogleNetHttpTransport#newTrustedTransport()}
* </li>
* </ul>
* @param jsonFactory JSON factory, which may be:
* <ul>
* <li>Jackson: {@code com.google.api.client.json.jackson2.JacksonFactory}</li>
* <li>Google GSON: {@code com.google.api.client.json.gson.GsonFactory}</li>
* <li>Android Honeycomb or higher:
* {@code com.google.api.client.extensions.android.json.AndroidJsonFactory}</li>
* </ul>
* @param httpRequestInitializer HTTP request initializer or {@code null} for none
* @since 1.7
*/
public Builder(com.google.api.client.http.HttpTransport transport, com.google.api.client.json.JsonFactory jsonFactory,
com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
super(
transport,
jsonFactory,
Builder.chooseEndpoint(transport),
DEFAULT_SERVICE_PATH,
httpRequestInitializer,
false);
setBatchPath(DEFAULT_BATCH_PATH);
}
/** Builds a new instance of {@link TPU}. */
@Override
public TPU build() {
return new TPU(this);
}
@Override
public Builder setRootUrl(String rootUrl) {
return (Builder) super.setRootUrl(rootUrl);
}
@Override
public Builder setServicePath(String servicePath) {
return (Builder) super.setServicePath(servicePath);
}
@Override
public Builder setBatchPath(String batchPath) {
return (Builder) super.setBatchPath(batchPath);
}
@Override
public Builder setHttpRequestInitializer(com.google.api.client.http.HttpRequestInitializer httpRequestInitializer) {
return (Builder) super.setHttpRequestInitializer(httpRequestInitializer);
}
@Override
public Builder setApplicationName(String applicationName) {
return (Builder) super.setApplicationName(applicationName);
}
@Override
public Builder setSuppressPatternChecks(boolean suppressPatternChecks) {
return (Builder) super.setSuppressPatternChecks(suppressPatternChecks);
}
@Override
public Builder setSuppressRequiredParameterChecks(boolean suppressRequiredParameterChecks) {
return (Builder) super.setSuppressRequiredParameterChecks(suppressRequiredParameterChecks);
}
@Override
public Builder setSuppressAllChecks(boolean suppressAllChecks) {
return (Builder) super.setSuppressAllChecks(suppressAllChecks);
}
/**
* Set the {@link TPURequestInitializer}.
*
* @since 1.12
*/
public Builder setTPURequestInitializer(
TPURequestInitializer tpuRequestInitializer) {
return (Builder) super.setGoogleClientRequestInitializer(tpuRequestInitializer);
}
@Override
public Builder setGoogleClientRequestInitializer(
com.google.api.client.googleapis.services.GoogleClientRequestInitializer googleClientRequestInitializer) {
return (Builder) super.setGoogleClientRequestInitializer(googleClientRequestInitializer);
}
}
}
| [
"\"GOOGLE_API_USE_MTLS_ENDPOINT\""
]
| []
| [
"GOOGLE_API_USE_MTLS_ENDPOINT"
]
| [] | ["GOOGLE_API_USE_MTLS_ENDPOINT"] | java | 1 | 0 | |
test/e2e/utils.go | package e2e
import (
goctx "context"
"crypto/tls"
"fmt"
"net/http"
"os"
"os/exec"
"runtime"
"strconv"
"strings"
"testing"
"time"
osv1 "github.com/openshift/api/route/v1"
osv1sec "github.com/openshift/api/security/v1"
framework "github.com/operator-framework/operator-sdk/pkg/test"
"github.com/operator-framework/operator-sdk/pkg/test/e2eutil"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/net/context"
corev1 "k8s.io/api/core/v1"
rbac "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/discovery"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"github.com/jaegertracing/jaeger-operator/pkg/apis"
v1 "github.com/jaegertracing/jaeger-operator/pkg/apis/jaegertracing/v1"
)
var (
retryInterval = time.Second * 5
timeout = time.Minute * 5
storageNamespace = os.Getenv("STORAGE_NAMESPACE")
kafkaNamespace = os.Getenv("KAFKA_NAMESPACE")
debugMode = getBoolEnv("DEBUG_MODE", false)
usingOLM = getBoolEnv("OLM", false)
esServerUrls = "http://elasticsearch." + storageNamespace + ".svc:9200"
cassandraServiceName = "cassandra." + storageNamespace + ".svc"
cassandraKeyspace = "jaeger_v1_datacenter1"
cassandraDatacenter = "datacenter1"
ctx *framework.TestCtx
fw *framework.Framework
namespace string
t *testing.T
)
func getBoolEnv(key string, defaultValue bool) bool {
if value, ok := os.LookupEnv(key); ok {
boolValue, err := strconv.ParseBool(value)
if err != nil {
logrus.Warnf("Error [%v] received converting environment variable [%s] using [%v]", err, key, boolValue)
}
return boolValue
}
return defaultValue
}
// GetPod returns pod name
func GetPod(namespace, namePrefix, containsImage string, kubeclient kubernetes.Interface) corev1.Pod {
pods, err := kubeclient.CoreV1().Pods(namespace).List(metav1.ListOptions{})
if err != nil {
printTestStackTrace()
require.NoError(t, err)
}
for _, pod := range pods.Items {
if strings.HasPrefix(pod.Name, namePrefix) {
for _, c := range pod.Spec.Containers {
if strings.Contains(c.Image, containsImage) {
return pod
}
}
}
}
errorMessage := fmt.Sprintf("could not find pod in namespace %s with prefix %s and image %s", namespace, namePrefix, containsImage)
require.FailNow(t, errorMessage)
// We should never get here, but go requires a return statement
emptyPod := &corev1.Pod{}
return *emptyPod
}
func prepare(t *testing.T) (*framework.TestCtx, error) {
logrus.Infof("Debug Mode? %v", debugMode)
ctx := framework.NewTestCtx(t)
// Install jaeger-operator unless we've installed it from OperatorHub
if !usingOLM {
err := ctx.InitializeClusterResources(&framework.CleanupOptions{TestContext: ctx, Timeout: timeout, RetryInterval: retryInterval})
if err != nil {
t.Fatalf("failed to initialize cluster resources: %v", err)
}
}
namespace, err := ctx.GetNamespace()
if err != nil {
t.Fatal(err)
}
roleName := namespace + "-jaeger-operator-cluster-role-crbs"
cr := &rbac.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
},
Rules: []rbac.PolicyRule{{
APIGroups: []string{"rbac.authorization.k8s.io"},
Resources: []string{"clusterrolebindings"},
Verbs: []string{"*"},
}},
}
if _, err := framework.Global.KubeClient.RbacV1().ClusterRoles().Create(cr); err != nil {
t.Fatal(err)
}
crb := &rbac.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: namespace + "-jaeger-operator-cluster-admin",
},
Subjects: []rbac.Subject{{
Kind: "ServiceAccount",
Name: "jaeger-operator",
Namespace: namespace,
}},
RoleRef: rbac.RoleRef{Kind: "ClusterRole", Name: roleName},
}
if _, err := framework.Global.KubeClient.RbacV1().ClusterRoleBindings().Create(crb); err != nil {
t.Fatal(err)
}
t.Log("Initialized cluster resources. Namespace: " + namespace)
// get global framework variables
f := framework.Global
// wait for the operator to be ready
if !usingOLM {
err = e2eutil.WaitForDeployment(t, f.KubeClient, namespace, "jaeger-operator", 1, retryInterval, timeout)
if err != nil {
return nil, err
}
}
return ctx, nil
}
func getJaegerOperatorImages(kubeclient kubernetes.Interface, namespace string) (map[string]string, error) {
imageNamesMap := make(map[string]string)
deployment, err := kubeclient.AppsV1().Deployments(namespace).Get("jaeger-operator", metav1.GetOptions{})
if err != nil {
if strings.HasSuffix(err.Error(), "not found") {
return imageNamesMap, nil
}
return imageNamesMap, err
}
containers := deployment.Spec.Template.Spec.Containers
for _, container := range containers {
if container.Name == "jaeger-operator" {
for _, env := range container.Env {
if env.Name == "WATCH_NAMESPACE" {
imageNamesMap[container.Image] = env.Value
}
}
}
}
return imageNamesMap, nil
}
func isOpenShift(t *testing.T) bool {
apiList, err := availableAPIs(framework.Global.KubeConfig)
if err != nil {
t.Logf("Error trying to find APIs: %v\n", err)
}
apiGroups := apiList.Groups
for _, group := range apiGroups {
if group.Name == "route.openshift.io" {
return true
}
}
return false
}
func availableAPIs(kubeconfig *rest.Config) (*metav1.APIGroupList, error) {
discoveryClient, err := discovery.NewDiscoveryClientForConfig(kubeconfig)
if err != nil {
return nil, err
}
apiList, err := discoveryClient.ServerGroups()
if err != nil {
return nil, err
}
return apiList, nil
}
func addToFrameworkSchemeForSmokeTests(t *testing.T) {
assert.NoError(t, framework.AddToFrameworkScheme(apis.AddToScheme, &v1.JaegerList{
TypeMeta: metav1.TypeMeta{
Kind: "Jaeger",
APIVersion: "jaegertracing.io/v1",
},
}))
if isOpenShift(t) {
assert.NoError(t, framework.AddToFrameworkScheme(osv1.AddToScheme, &osv1.Route{}))
assert.NoError(t, framework.AddToFrameworkScheme(osv1sec.AddToScheme, &osv1sec.SecurityContextConstraints{}))
}
}
// Print a stack trace to help analyze test failures. This is shorter and easier to read than debug.printstack()
func printTestStackTrace() {
i := 1
for {
_, filename, lineNumber, ok := runtime.Caller(i)
if !ok || !strings.Contains(filename, "jaeger-operator") {
break
}
fmt.Printf("\t%s#%d\n", filename, lineNumber)
i++
}
}
func undeployJaegerInstance(jaeger *v1.Jaeger) {
if !debugMode || !t.Failed() {
err := fw.Client.Delete(goctx.TODO(), jaeger)
require.NoError(t, err, "Error undeploying Jaeger")
err = e2eutil.WaitForDeletion(t, fw.Client.Client, jaeger, retryInterval, timeout)
require.NoError(t, err)
}
}
func getJaegerInstance(name, namespace string) *v1.Jaeger {
jaegerInstance := &v1.Jaeger{}
key := types.NamespacedName{Name: name, Namespace: namespace}
err := fw.Client.Get(goctx.Background(), key, jaegerInstance)
require.NoError(t, err)
return jaegerInstance
}
// ValidateHTTPResponseFunc should determine whether the response contains the desired content
type ValidateHTTPResponseFunc func(response *http.Response) (done bool, err error)
// WaitAndPollForHTTPResponse will try the targetURL until it gets the desired response or times out
func WaitAndPollForHTTPResponse(targetURL string, condition ValidateHTTPResponseFunc) (err error) {
client := http.Client{Timeout: 5 * time.Second}
request, err := http.NewRequest(http.MethodGet, targetURL, nil)
require.NoError(t, err)
err = wait.Poll(retryInterval, timeout, func() (done bool, err error) {
response, err := client.Do(request)
require.NoError(t, err)
defer response.Body.Close()
return condition(response)
})
return err
}
func handleSuiteTearDown() {
logrus.Info("Entering TearDownSuite()")
if !debugMode || !t.Failed() {
ctx.Cleanup()
}
}
func handleTestFailure() {
if debugMode && t.Failed() {
logrus.Errorf("Test %s failed\n", t.Name())
// FIXME find a better way to terminate tests than os.Exit(1)
}
}
type resp struct {
Data []trace `json:"data"`
}
type trace struct {
TraceID string `json:"traceID"`
Spans []span `json:"spans"`
}
type span struct {
TraceID string `json:"traceID"`
SpanID string `json:"spanID"`
}
type services struct {
Data []string `json:"data"`
total int `json:"total"`
limit int `json:"limit"`
offset int `json:offset`
errors interface{} `json:"errors"`
}
func createJaegerInstanceFromFile(name, filename string) *v1.Jaeger {
// #nosec G204: Subprocess launching should be audited
cmd := exec.Command("kubectl", "create", "--namespace", namespace, "--filename", filename)
output, err := cmd.CombinedOutput()
if err != nil && !strings.Contains(string(output), "AlreadyExists") {
require.NoError(t, err, "Failed creating Jaeger instance with: [%s]\n", string(output))
}
return getJaegerInstance(name, namespace)
}
func smokeTestAllInOneExample(name, yamlFileName string) {
smokeTestAllInOneExampleWithTimeout(name, yamlFileName, timeout+1*time.Minute)
}
func smokeTestAllInOneExampleWithTimeout(name, yamlFileName string, to time.Duration) {
jaegerInstance := createJaegerInstanceFromFile(name, yamlFileName)
defer undeployJaegerInstance(jaegerInstance)
err := WaitForDeployment(t, fw.KubeClient, namespace, name, 1, retryInterval, to)
require.NoErrorf(t, err, "Error waiting for %s to deploy", name)
AllInOneSmokeTest(name)
}
func smokeTestProductionExample(name, yamlFileName string) {
jaegerInstance := createJaegerInstanceFromFile(name, yamlFileName)
defer undeployJaegerInstance(jaegerInstance)
queryDeploymentName := name + "-query"
collectorDeploymentName := name + "-collector"
if jaegerInstance.Spec.Strategy == v1.DeploymentStrategyStreaming {
ingesterDeploymentName := name + "-ingester"
err := WaitForDeployment(t, fw.KubeClient, namespace, ingesterDeploymentName, 1, retryInterval, timeout)
require.NoErrorf(t, err, "Error waiting for %s to deploy", ingesterDeploymentName)
}
err := WaitForDeployment(t, fw.KubeClient, namespace, queryDeploymentName, 1, retryInterval, timeout)
require.NoErrorf(t, err, "Error waiting for %s to deploy", queryDeploymentName)
err = WaitForDeployment(t, fw.KubeClient, namespace, collectorDeploymentName, 1, retryInterval, timeout)
require.NoErrorf(t, err, "Error waiting for %s to deploy", collectorDeploymentName)
ProductionSmokeTest(name)
}
func findRoute(t *testing.T, f *framework.Framework, name string) *osv1.Route {
routeList := &osv1.RouteList{}
err := wait.Poll(retryInterval, timeout, func() (bool, error) {
if err := f.Client.List(context.Background(), routeList); err != nil {
return false, err
}
if len(routeList.Items) >= 1 {
return true, nil
}
return false, nil
})
if err != nil {
t.Fatalf("Failed waiting for route: %v", err)
}
for _, r := range routeList.Items {
if strings.HasPrefix(r.Spec.Host, name) {
return &r
}
}
t.Fatal("Could not find route")
return nil
}
func getQueryURLAndHTTPClient(jaegerInstanceName, urlPattern string, insecure bool) (string, http.Client) {
var url string
var httpClient http.Client
if isOpenShift(t) {
route := findRoute(t, fw, jaegerInstanceName)
require.Len(t, route.Status.Ingress, 1, "Wrong number of ingresses.")
url = fmt.Sprintf("https://"+urlPattern, route.Spec.Host)
// #nosec
transport := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: insecure},
}
httpClient = http.Client{Timeout: 30 * time.Second, Transport: transport}
} else {
ingress, err := WaitForIngress(t, fw.KubeClient, namespace, jaegerInstanceName+"-query", retryInterval, timeout)
require.NoError(t, err, "Failed waiting for ingress")
require.Len(t, ingress.Status.LoadBalancer.Ingress, 1, "Wrong number of ingresses.")
address := ingress.Status.LoadBalancer.Ingress[0].IP
url = fmt.Sprintf("http://"+urlPattern, address)
httpClient = http.Client{Timeout: time.Second}
}
logrus.Infof("Using Query URL [%v]\n", url)
return url, httpClient
}
func createSecret(secretName, secretNamespace string, secretData map[string][]byte) *corev1.Secret {
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: secretName,
Namespace: secretNamespace,
},
Data: secretData,
}
createdSecret, err := fw.KubeClient.CoreV1().Secrets(secretNamespace).Create(secret)
require.NoError(t, err)
WaitForSecret(secretName, secretNamespace)
return createdSecret
}
func deletePersistentVolumeClaims(namespace string) {
pvcs, err := fw.KubeClient.CoreV1().PersistentVolumeClaims(kafkaNamespace).List(metav1.ListOptions{})
require.NoError(t, err)
emptyDeleteOptions := metav1.DeleteOptions{}
for _, pvc := range pvcs.Items {
logrus.Infof("Deleting PVC %s from namespace %s", pvc.Name, namespace)
fw.KubeClient.CoreV1().PersistentVolumeClaims(kafkaNamespace).Delete(pvc.Name, &emptyDeleteOptions)
}
}
| [
"\"STORAGE_NAMESPACE\"",
"\"KAFKA_NAMESPACE\""
]
| []
| [
"KAFKA_NAMESPACE",
"STORAGE_NAMESPACE"
]
| [] | ["KAFKA_NAMESPACE", "STORAGE_NAMESPACE"] | go | 2 | 0 | |
client_integration_test.go | // +build integration
package disgord
import (
"context"
"fmt"
"os"
"strings"
"sync"
"testing"
"time"
"github.com/andersfylling/disgord/internal/logger"
)
var token = os.Getenv("DISGORD_TOKEN_INTEGRATION_TEST")
var guildTypical = struct {
ID Snowflake
TextChannelGeneral Snowflake
VoiceChannelGeneral Snowflake
VoiceChannelOther1 Snowflake
VoiceChannelOther2 Snowflake
}{
ID: ParseSnowflakeString(os.Getenv("TEST_GUILD_TYPICAL_ID")),
TextChannelGeneral: ParseSnowflakeString(os.Getenv("TEST_GUILD_TYPICAL_TEXT_GENERAL")),
VoiceChannelGeneral: ParseSnowflakeString(os.Getenv("TEST_GUILD_TYPICAL_VOICE_GENERAL")),
VoiceChannelOther1: ParseSnowflakeString(os.Getenv("TEST_GUILD_TYPICAL_VOICE_1")),
VoiceChannelOther2: ParseSnowflakeString(os.Getenv("TEST_GUILD_TYPICAL_VOICE_2")),
}
func validSnowflakes() {
if guildTypical.ID.IsZero() {
panic("missing id for typical guild")
}
if guildTypical.TextChannelGeneral.IsZero() {
panic("missing id for typical guild TextChannelGeneral")
}
if guildTypical.VoiceChannelGeneral.IsZero() {
panic("missing id for typical guild VoiceChannelGeneral")
}
if guildTypical.VoiceChannelOther1.IsZero() {
panic("missing id for typical guild VoiceChannelOther1")
}
if guildTypical.VoiceChannelOther2.IsZero() {
panic("missing id for typical guild VoiceChannelOther2")
}
}
func TestClient(t *testing.T) {
validSnowflakes()
wg := &sync.WaitGroup{}
status := &UpdateStatusPayload{
Status: StatusIdle,
Game: &Activity{
Name: "hello",
},
}
var c *Client
wg.Add(1)
t.Run("New", func(t *testing.T) {
defer wg.Done()
var err error
c, err = NewClient(context.Background(), Config{
BotToken: token,
DisableCache: true,
Logger: &logger.FmtPrinter{},
Presence: status,
})
if err != nil {
t.Fatal("failed to initiate a client")
}
})
wg.Wait()
wg.Add(1)
t.Run("premature-emit", func(t *testing.T) {
defer wg.Done()
if _, err := c.Gateway().Dispatch(UpdateStatus, &UpdateStatusPayload{}); err == nil {
t.Fatal("Emit should have failed as no shards have been connected (initialised)")
}
})
wg.Wait()
// We need this for later.
guildCreateEvent := make(chan *GuildCreate, 2)
c.Gateway().WithCtrl(&Ctrl{Runs: 1}).GuildCreate(func(_ Session, evt *GuildCreate) {
guildCreateEvent <- evt
})
defer c.Gateway().Disconnect()
wg.Add(1)
t.Run("connect", func(t *testing.T) {
defer wg.Done()
if err := c.Gateway().Connect(); err != nil {
t.Fatal(err)
}
})
wg.Wait()
wg.Add(1)
t.Run("ready", func(t *testing.T) {
defer wg.Done()
ready := make(chan interface{}, 2)
c.Gateway().BotReady(func() {
ready <- true
})
select {
case <-time.After(10 * time.Second):
t.Fatal("unable to connect within time frame of 10s")
case <-ready:
}
})
wg.Wait()
wg.Add(1)
t.Run("default-presence", func(t *testing.T) {
defer wg.Done()
done := make(chan bool, 2)
c.Gateway().PresenceUpdate(func(_ Session, evt *PresenceUpdate) {
if !evt.User.Bot {
c.Logger().Info("was not bot")
return
}
usr, err := c.CurrentUser().Get()
if err != nil {
done <- false
return
}
if evt.User.ID != usr.ID {
return
}
if evt.Status != StatusIdle {
done <- false
return
}
game, err := evt.Game()
if err != nil {
done <- false
return
}
if game.Name != "hello" {
done <- false
return
}
done <- true
})
if _, err := c.Gateway().Dispatch(UpdateStatus, status); err != nil {
t.Fatal(err)
}
select {
case <-time.After(10 * time.Second):
// yay
// if no presence update is fired after calling emit,
// that means that no change took place.
// TODO: this test is fragile
case success := <-done:
if success {
t.Fatal("unable to set presence at boot")
}
}
})
wg.Wait()
// Add the voice state channel for later.
voiceStateChan := make(chan *VoiceStateUpdate)
wg.Add(1)
t.Run("voice/MoveTo", func(t *testing.T) {
defer wg.Done()
deadline, _ := context.WithDeadline(context.Background(), time.Now().Add(25*time.Second))
oldChannelID := guildTypical.VoiceChannelGeneral
newChannelID := guildTypical.VoiceChannelOther1
connectedToVoiceChannel := make(chan bool)
successfullyMoved := make(chan bool, 2)
done := make(chan bool)
c.Gateway().VoiceStateUpdate(func(_ Session, evt *VoiceStateUpdate) {
myself, err := c.CurrentUser().Get()
if err != nil {
panic(err)
}
if evt.UserID != myself.ID {
return
}
if evt.ChannelID == oldChannelID {
connectedToVoiceChannel <- true
return
}
if evt.ChannelID == newChannelID {
successfullyMoved <- true
successfullyMoved <- true
} else {
successfullyMoved <- false
successfullyMoved <- false
}
voiceStateChan <- evt
})
go func() {
v, err := c.Guild(guildTypical.ID).VoiceChannel(oldChannelID).Connect(false, true)
if err != nil {
t.Fatal(err)
}
select {
case <-connectedToVoiceChannel:
case <-deadline.Done():
panic("connectedToVoiceChannel did not emit")
}
if err = v.MoveTo(newChannelID); err != nil {
t.Fatal(err)
}
select {
case <-successfullyMoved:
case <-deadline.Done():
panic("successfullyMoved did not emit")
}
defer func() {
close(done)
}()
if err = v.Close(); err != nil {
t.Fatal(err)
}
<-time.After(50 * time.Millisecond)
}()
testFinished := sync.WaitGroup{}
testFinished.Add(1)
go func() {
select {
case <-time.After(10 * time.Second):
t.Fatal("switching to a different voice channel failed")
case success, ok := <-successfullyMoved:
if !ok {
t.Fatal("unexpected close of channel")
}
if !success {
t.Fatal("did not go to a different voice channel")
}
}
testFinished.Done()
}()
testFinished.Wait()
select {
case <-done:
case <-deadline.Done():
panic("done did not emit")
}
})
wg.Wait()
wg.Add(1)
t.Run("middleware", func(t *testing.T) {
defer wg.Done()
deadline, _ := context.WithDeadline(context.Background(), time.Now().Add(10*time.Second))
const prefix = "test"
content := prefix + " sads sdfjsd fkjsdf"
channelID := guildTypical.TextChannelGeneral
gotMessage := make(chan *MessageCreate)
defer close(gotMessage)
filterTestPrefix := func(evt interface{}) (ret interface{}) {
msg := (evt.(*MessageCreate)).Message
if strings.HasPrefix(msg.Content, prefix) {
return evt
}
return nil
}
filterChannel := func(evt interface{}) (ret interface{}) {
msg := (evt.(*MessageCreate)).Message
if msg.ChannelID == channelID {
return evt
}
return nil
}
c.Gateway().WithMiddleware(filterChannel, filterTestPrefix).MessageCreateChan(gotMessage)
_, err := c.Channel(channelID).WithContext(deadline).CreateMessage(&CreateMessageParams{Content: content})
if err != nil {
panic(fmt.Errorf("unable to send message. %w", err))
}
select {
case msg := <-gotMessage:
if msg.Message.Content != content {
panic("unexpected message content")
}
case <-deadline.Done():
panic("message create event did not trigger within the deadline")
}
})
wg.Wait()
wg.Add(1)
t.Run("test-member-guild-user-id-non-zero", func(t *testing.T) {
defer wg.Done()
deadline, _ := context.WithDeadline(context.Background(), time.Now().Add(25*time.Second))
// Test guild create event
select {
case x := <-guildCreateEvent:
firstMember := x.Guild.Members[0]
if firstMember.GuildID == 0 {
panic("GuildID is zero")
} else if firstMember.UserID == 0 {
panic("UserID is zero")
}
case <-deadline.Done():
panic("guildCreateEvent did not emit")
}
// Test message create event
snowflakeChan := make(chan Snowflake, 2)
c.Gateway().WithCtrl(&Ctrl{Runs: 1}).MessageCreate(func(_ Session, evt *MessageCreate) {
if evt.Message.Author.Bot && evt.Message.Member != nil {
snowflakeChan <- evt.Message.Member.GuildID
snowflakeChan <- evt.Message.Member.UserID
}
})
msg, err := c.WithContext(deadline).SendMsg(guildTypical.TextChannelGeneral, "Hello World!")
if err != nil {
panic(err)
}
select {
case x := <-snowflakeChan:
if x == 0 {
panic("GuildID is zero")
}
case <-deadline.Done():
panic("snowflakeChan did not emit")
}
if <-snowflakeChan == 0 {
panic("UserID is zero")
}
// Test message update event
snowflakeChan = make(chan Snowflake, 2)
c.Gateway().WithCtrl(&Ctrl{Runs: 1}).MessageUpdate(func(_ Session, evt *MessageUpdate) {
if evt.Message.Author.Bot {
snowflakeChan <- evt.Message.Member.GuildID
snowflakeChan <- evt.Message.Member.UserID
}
})
_, err = c.Channel(guildTypical.TextChannelGeneral).Message(msg.ID).WithContext(deadline).UpdateBuilder().SetContent("world").Execute()
if err != nil {
panic(err)
}
select {
case x := <-snowflakeChan:
if x == 0 {
panic("GuildID is zero")
}
case <-deadline.Done():
panic("snowflakeChan did not emit")
}
if <-snowflakeChan == 0 {
panic("UserID is zero")
}
// GC the message
_ = c.Channel(guildTypical.TextChannelGeneral).Message(msg.ID).WithContext(deadline).Delete()
// Handle voice state update
select {
case x := <-voiceStateChan:
if x.Member.GuildID == 0 {
panic("GuildID is zero")
} else if x.Member.UserID == 0 {
panic("UserID is zero")
}
case <-deadline.Done():
panic("voiceStateChan did not emit")
}
// Test getting a member
member, err := c.Guild(guildTypical.ID).Member(c.botID).WithContext(deadline).Get(IgnoreCache)
if err != nil {
panic(err)
}
if member.GuildID == 0 {
panic("GuildID is zero")
} else if member.UserID == 0 {
panic("UserID is zero")
}
})
wg.Wait()
}
func TestConnectWithShards(t *testing.T) {
validSnowflakes()
<-time.After(6 * time.Second) // avoid identify abuse
c := New(Config{
BotToken: token,
DisableCache: true,
Logger: &logger.FmtPrinter{},
ShardConfig: ShardConfig{
ShardIDs: []uint{0, 1},
},
})
defer c.Gateway().Disconnect()
if err := c.Gateway().Connect(); err != nil {
t.Fatal(err)
}
done := make(chan interface{}, 2)
c.Gateway().BotReady(func() {
done <- true
})
select {
case <-time.After(15 * time.Second):
t.Fatal("unable to connect within time frame of 10s")
case <-done:
}
}
func TestConnectWithSeveralInstances(t *testing.T) {
validSnowflakes()
<-time.After(6 * time.Second) // avoid identify abuse
createInstance := func(shardIDs []uint, shardCount uint) *Client {
return New(Config{
BotToken: token,
DisableCache: true,
Logger: &logger.FmtPrinter{},
ShardConfig: ShardConfig{
ShardIDs: shardIDs,
ShardCount: shardCount,
},
})
}
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(20*time.Second))
done := make(chan interface{}, 2)
instanceReady := make(chan interface{}, 3)
go func() {
untilZero := 2
for {
select {
case <-instanceReady:
untilZero--
case <-ctx.Done():
return
}
if untilZero == 0 {
done <- true
return
}
}
}()
shardCount := uint(2)
var instances []*Client
for i := uint(0); i < shardCount; i++ {
instance := createInstance([]uint{i}, shardCount)
instances = append(instances, instance)
instance.Gateway().BotReady(func() {
instanceReady <- true
})
if err := instance.Gateway().Connect(); err != nil {
cancel()
t.Error(err)
return
}
<-time.After(5 * time.Second)
}
defer func() {
for i := range instances {
_ = instances[i].Gateway().Disconnect()
}
}()
select {
case <-ctx.Done():
t.Error("unable to connect within time frame")
case <-done:
}
}
func TestREST(t *testing.T) {
const andersfylling = Snowflake(769640669135896586)
validSnowflakes()
c := New(Config{
BotToken: token,
DisableCache: true,
Logger: &logger.FmtPrinter{},
})
deadline, _ := context.WithDeadline(context.Background(), time.Now().Add(25*time.Second))
// -------------------
// CHANNELS
// -------------------
t.Run("channel", func(t *testing.T) {
func() {
channel, err := c.Channel(guildTypical.TextChannelGeneral).WithContext(deadline).Get()
if err != nil {
panic(err)
} else if channel == nil {
t.Error(fmt.Errorf("fetched channel is nil. %w", err))
} else if channel.ID != guildTypical.TextChannelGeneral {
t.Errorf("incorrect channel id. Got %s, wants %s", channel.ID.String(), guildTypical.TextChannelGeneral.String())
}
}()
// create DM & send a message
func() {
channel, err := c.User(andersfylling).WithContext(deadline).CreateDM()
if err != nil {
t.Error(fmt.Errorf("unable to create DM with user. %w", err))
} else if channel == nil {
t.Error(fmt.Errorf("returned DM channel is nil. %w", err))
}
content := "hi"
msg, err := c.Channel(channel.ID).WithContext(deadline).CreateMessage(&CreateMessageParams{Content: content})
if err != nil {
t.Error(fmt.Errorf("unable to create message in DM channel. %w", err))
}
if msg == nil {
t.Error("returned message was nil")
} else if msg.Content != content {
t.Errorf("unexpected message content from DM. Got %s, wants %s", msg.Content, content)
}
}()
})
// -------------------
// Current User
// -------------------
t.Run("current-user", func(t *testing.T) {
if _, err := c.CurrentUser().Get(IgnoreCache); err != nil {
t.Error(fmt.Errorf("unable to fetch current user. %w", err))
}
})
// -------------------
// User
// -------------------
t.Run("user", func(t *testing.T) {
const userID = andersfylling
user, err := c.User(userID).WithContext(deadline).Get(IgnoreCache)
if err != nil {
t.Error(fmt.Errorf("unable to fetch user. %w", err))
} else if user == nil {
t.Error("fetched user was nil")
} else if user.ID != userID {
t.Errorf("unexpected user id. Got %s, wants %s", user.ID.String(), userID.String())
}
})
// -------------------
// Voice Region
// -------------------
t.Run("voice-region", func(t *testing.T) {
regions, err := c.WithContext(deadline).GetVoiceRegions(IgnoreCache)
if err != nil {
t.Error(fmt.Errorf("unable to fetch voice regions. %w", err))
}
if len(regions) < 1 {
t.Error("expected at least one voice region")
}
})
// -------------------
// Members
// -------------------
t.Run("members", func(t *testing.T) {
members, err := c.Guild(guildTypical.ID).GetMembers(nil, IgnoreCache)
if err != nil {
t.Error("failed to fetched members over REST, ", err)
}
if len(members) == 0 {
t.Error("expected there to be members. None found.")
}
})
// -------------------
// Audit Logs
// -------------------
// t.Run("audit-logs", func(t *testing.T) {
// })
}
| [
"\"DISGORD_TOKEN_INTEGRATION_TEST\"",
"\"TEST_GUILD_TYPICAL_ID\"",
"\"TEST_GUILD_TYPICAL_TEXT_GENERAL\"",
"\"TEST_GUILD_TYPICAL_VOICE_GENERAL\"",
"\"TEST_GUILD_TYPICAL_VOICE_1\"",
"\"TEST_GUILD_TYPICAL_VOICE_2\""
]
| []
| [
"TEST_GUILD_TYPICAL_TEXT_GENERAL",
"TEST_GUILD_TYPICAL_ID",
"DISGORD_TOKEN_INTEGRATION_TEST",
"TEST_GUILD_TYPICAL_VOICE_1",
"TEST_GUILD_TYPICAL_VOICE_GENERAL",
"TEST_GUILD_TYPICAL_VOICE_2"
]
| [] | ["TEST_GUILD_TYPICAL_TEXT_GENERAL", "TEST_GUILD_TYPICAL_ID", "DISGORD_TOKEN_INTEGRATION_TEST", "TEST_GUILD_TYPICAL_VOICE_1", "TEST_GUILD_TYPICAL_VOICE_GENERAL", "TEST_GUILD_TYPICAL_VOICE_2"] | go | 6 | 0 | |
ATLASExperiment.py | # Class definition:
# ATLASExperiment
# This class is the ATLAS experiment class inheriting from Experiment
# Instances are generated with ExperimentFactory via pUtil::getExperiment()
# Implemented as a singleton class
# http://stackoverflow.com/questions/42558/python-and-the-singleton-pattern
# Import relevant python/pilot modules
from Experiment import Experiment # Main experiment class
from pUtil import tolog # Logging method that sends text to the pilot log
from pUtil import readpar # Used to read values from the schedconfig DB (queuedata)
from pUtil import isAnalysisJob # Is the current job a user analysis job or a production job?
from pUtil import grep # Grep function - reimplement using cli command
from pUtil import getCmtconfig # Get the cmtconfig from the job def or queuedata
from pUtil import getCmtconfigAlternatives # Get a list of locally available cmtconfigs
from pUtil import verifyReleaseString # To verify the release string (move to Experiment later)
from pUtil import getProperTimeout #
from pUtil import timedCommand # Protect cmd with timed_command
from pUtil import getSiteInformation # Get the SiteInformation object corresponding to the given experiment
from pUtil import isBuildJob # Is the current job a build job?
from pUtil import remove # Used to remove redundant file before log file creation
from pUtil import extractFilePaths # Used by verifySetupCommand
from pUtil import getInitialDirs # Used by getModernASetup()
from PilotErrors import PilotErrors # Error codes
from FileHandling import readFile, writeFile # File handling methods
from FileHandling import updatePilotErrorReport # Used to set the priority of an error
from RunJobUtilities import dumpOutput # ASCII dump
from RunJobUtilities import getStdoutFilename #
from RunJobUtilities import findVmPeaks #
from RunJobUtilities import getSourceSetup #
# Standard python modules
import re
import os
import time
import commands
from glob import glob
class ATLASExperiment(Experiment):
# private data members
__experiment = "ATLAS"
__instance = None # Boolean used by subclasses to become a Singleton
__warning = ""
__analysisJob = False
__job = None # Current Job object
__error = PilotErrors() # PilotErrors object
__doFileLookups = False # True for LFC based file lookups
__atlasEnv = False # True for releases beginning with "Atlas-"
# Required methods
# def __init__(self, *args, **kwargs):
def __init__(self):
""" Default initialization """
pass
# super(ATLASExperiment, self).__init__(self, *args, **kwargs)
def __new__(cls, *args, **kwargs):
""" Override the __new__ method to make the class a singleton """
if not cls.__instance:
cls.__instance = super(ATLASExperiment, cls).__new__(cls, *args, **kwargs)
# cls.__instance = super(ATLASExperiment, cls).__new__(cls)
return cls.__instance
def getExperiment(self):
""" Return a string with the experiment name """
return self.__experiment
def setParameters(self, *args, **kwargs):
""" Set any internally needed variables """
# set initial values
self.__job = kwargs.get('job', None)
if self.__job:
self.__analysisJob = isAnalysisJob(self.__job.trf)
else:
self.__warning = "setParameters found no job object"
def updateCmd1WithProject(self, cmd1, atlasProject):
""" Add the known project to the setup command """
if atlasProject != "" and atlasProject not in cmd1:
cmd1 = cmd1.replace("notest","%s,notest" % (atlasProject))
tolog("cmd1 = %s" % (cmd1))
return cmd1
def addMAKEFLAGS(self, jobCoreCount, cmd2):
""" Correct for multi-core if necessary (especially important in case coreCount=1 to limit parallel make) """
# ATHENA_PROC_NUMBER is set in Node.py using the schedconfig value
try:
coreCount = int(os.environ['ATHENA_PROC_NUMBER'])
except:
coreCount = -1
if coreCount == -1:
try:
coreCount = int(jobCoreCount)
except:
pass
else:
if coreCount >= 1:
cmd2 += 'export MAKEFLAGS="j%d QUICK=1 -l1";' % (coreCount)
tolog("Added multi-core support to cmd2: %s" % (cmd2))
# make sure that MAKEFLAGS is always set
if not "MAKEFLAGS=" in cmd2:
cmd2 += 'export MAKEFLAGS="j1 QUICK=1 -l1";'
return cmd2
def getJobExecutionCommand(self, job, jobSite, pilot_initdir, _checkCMTCONFIG = True):
""" Define and test the command(s) that will be used to execute the payload """
# Input tuple: (method is called from RunJob*)
# job: Job object
# jobSite: Site object
# pilot_initdir: launch directory of pilot.py
#
# Return tuple:
# pilot_error_code, pilot_error_diagnostics, job_execution_command, special_setup_command, JEM, cmtconfig
# where
# pilot_error_code : self.__error.<PILOT ERROR CODE as defined in PilotErrors class> (value should be 0 for successful setup)
# pilot_error_diagnostics: any output from problematic command or explanatory error diagnostics
# job_execution_command : command to execute payload, e.g. cmd = "source <path>/setup.sh; <path>/python trf.py [options]"
# special_setup_command : any special setup command that can be insterted into job_execution_command and is sent to stage-in/out methods
# JEM : Job Execution Monitor activation state (default value "NO", meaning JEM is not to be used. See JEMstub.py)
# cmtconfig : cmtconfig symbol from the job def or schedconfig, e.g. "x86_64-slc5-gcc43-opt"
pilotErrorDiag = ""
cmd = ""
special_setup_cmd = ""
pysiteroot = ""
siteroot = ""
JEM = "NO"
# Is it's an analysis job or not?
analysisJob = isAnalysisJob(job.trf)
# Set the INDS env variable (used by runAthena)
if analysisJob:
self.setINDS(job.realDatasetsIn)
# Command used to download runAthena or runGen
wgetCommand = 'wget'
# Get the cmtconfig value
cmtconfig = getCmtconfig(job.cmtconfig)
# Get the local path for the software
swbase = self.getSwbase(jobSite.appdir, job.release, job.homePackage, job.processingType, cmtconfig)
tolog("Local software path: swbase = %s" % (swbase))
# Get cmtconfig alternatives
cmtconfig_alternatives = getCmtconfigAlternatives(cmtconfig, swbase)
tolog("Found alternatives to cmtconfig: %s (the first item is the default cmtconfig value)" % str(cmtconfig_alternatives))
# Update the job parameters --input option for Merge trf's (to protect against potentially too long file lists)
# if "--input=" in job.jobPars and "Merge_tf" in job.trf:
# tolog("Will update input file list in job parameters and create input file list for merge job")
# job.jobPars = self.updateJobParameters4Input(job.jobPars)
# Is it a standard ATLAS job? (i.e. with swRelease = 'Atlas-...')
if self.__atlasEnv :
# Define the job runtime environment
if not analysisJob and job.trf.endswith('.py'): # for production python trf
tolog("Production python trf")
if os.environ.has_key('VO_ATLAS_SW_DIR'):
scappdir = readpar('appdir')
# is this release present in the tags file?
if scappdir == "":
rel_in_tags = self.verifyReleaseInTagsFile(os.environ['VO_ATLAS_SW_DIR'], job.release)
if not rel_in_tags:
tolog("WARNING: release was not found in tags file: %s" % (job.release))
# tolog("!!FAILED!!3000!! ...")
# failJob(0, self.__error.ERR_MISSINGINSTALLATION, job, pilotserver, pilotport, ins=ins)
# swbase = os.environ['VO_ATLAS_SW_DIR'] + '/software'
# Get the proper siteroot and cmtconfig
ec, pilotErrorDiag, status, siteroot, cmtconfig = self.getProperSiterootAndCmtconfig(swbase, job.release, job.homePackage, cmtconfig)
if not status:
tolog("!!WARNING!!3000!! Since setup encountered problems, any attempt of trf installation will fail (bailing out)")
tolog("ec=%d" % (ec))
tolog("pilotErrorDiag=%s" % (pilotErrorDiag))
# Set the error priority
updatePilotErrorReport(ec, pilotErrorDiag, "1", job.jobId, pilot_initdir)
return ec, pilotErrorDiag, "", special_setup_cmd, JEM, cmtconfig
else:
tolog("Will use SITEROOT=%s" % (siteroot))
pysiteroot = siteroot
else:
if verifyReleaseString(job.release) != "NULL":
_s = os.path.join(os.path.join(swbase, cmtconfig), job.release)
if os.path.exists(_s):
siteroot = _s
else:
siteroot = os.path.join(swbase, job.release)
else:
siteroot = swbase
siteroot = siteroot.replace('//','/')
# Get the install dir and update siteroot if necessary (dynamically install patch if not available)
ec, pilotErrorDiag, siteroot, installDir = self.getInstallDir(job, jobSite.workdir, siteroot, swbase, cmtconfig)
if ec != 0:
return ec, pilotErrorDiag, "", special_setup_cmd, JEM, cmtconfig
# Get the cmtsite setup command
ec, pilotErrorDiag, cmd1 = self.getCmtsiteCmd(swbase, job.release, job.homePackage, cmtconfig, siteroot=pysiteroot)
if ec != 0:
return ec, pilotErrorDiag, "", special_setup_cmd, JEM, cmtconfig
# Add the project to the setup (only for HLT jobs for now, both on cvmfs and afs)
if "AtlasP1HLT" in job.homePackage or "AtlasHLT" in job.homePackage:
cmd1 = self.updateCmd1WithProject(cmd1, atlasProject)
#########
#########
''' Dirty hack for Titan & Anselm to use local CondDB'''
if 'HPC_Titan' in readpar("catchall"):
cmd1 = cmd1 + ' --cmtextratags=ATLAS,useDBRelease'
if 'HPC_Anselm' in readpar("catchall"):
cmd1 = cmd1 + ' --cmtextratags=ATLAS,useDBRelease'
#######
#######
tolog("cmd1 = %s" % (cmd1))
# Get cmd2 for production jobs for set installDirs (not the case for unset homepackage strings)
if installDir != "":
cmd2, pilotErrorDiag = self.getProdCmd2(installDir, job.homePackage)
if pilotErrorDiag != "":
return self.__error.ERR_SETUPFAILURE, pilotErrorDiag, "", special_setup_cmd, JEM, cmtconfig
else:
cmd2 = ""
if 'HPC_HPC' in readpar("catchall"):
cmd2 = "export G4ATLAS_SKIPFILEPEEK=1"
# Set special_setup_cmd if necessary
special_setup_cmd = self.getSpecialSetupCommand()
else: # for analysis python trf
tolog("Preparing analysis job setup command")
# try alternatives to cmtconfig if necessary
first = True
first_ec = 0
first_pilotErrorDiag = ""
for cmtconfig in cmtconfig_alternatives:
ec = 0
pilotErrorDiag = ""
tolog("Testing cmtconfig=%s" % (cmtconfig))
# Get the cmd2 setup command before cmd1 is defined since cacheDir/Ver can be used in cmd1
cmd2, cacheDir, cacheVer = self.getAnalyCmd2(swbase, cmtconfig, job.homePackage, job.release)
# Add sub path in case of AnalysisTransforms homePackage
if verifyReleaseString(job.homePackage) != "NULL":
reSubDir = re.search('AnalysisTransforms[^/]*/(.+)', job.homePackage)
subDir = ""
if reSubDir != None:
subDir = reSubDir.group(1)
tolog("subDir = %s" % (subDir))
else:
subDir = ""
path = os.path.join(swbase, subDir)
# Define cmd0 and cmd1
if verifyReleaseString(job.release) != "NULL":
if job.release < "16.1.0":
cmd0 = "source %s/%s/setup.sh;" % (path, job.release)
tolog("cmd0 = %s" % (cmd0))
else:
cmd0 = ""
tolog("cmd0 will not be used for release %s" % (job.release))
else:
cmd0 = ""
# Get the cmtsite setup command
ec, pilotErrorDiag, cmd1 = \
self.getCmtsiteCmd(swbase, job.release, job.homePackage, cmtconfig, analysisJob=True, siteroot=siteroot, cacheDir=cacheDir, cacheVer=cacheVer)
if ec != 0:
# Store the first error code
if first:
first = False
first_ec = ec
first_pilotErrorDiag = pilotErrorDiag
# Function failed, try the next cmtconfig value or exit
continue
tolog("cmd1 = %s" % (cmd1))
# Exit if the tests above failed
if ec != 0:
# Use the first error code if set
if first_ec != 0:
tolog("Will report the first encountered problem: ec=%d, pilotErrorDiag=%s" % (first_ec, first_pilotErrorDiag))
ec = first_ec
pilotErrorDiag = first_pilotErrorDiag
return ec, pilotErrorDiag, "", special_setup_cmd, JEM, cmtconfig
# Cannot update cmd2/siteroot for unset release/homepackage strings
if verifyReleaseString(job.release) == "NULL" or verifyReleaseString(job.homePackage) == "NULL":
tolog("Will not update cmd2/siteroot since release/homepackage string is NULL")
else:
# Update cmd2 with AtlasVersion and AtlasProject from setup (and siteroot if not set)
_useAsetup = self.useAtlasSetup(swbase, job.release, job.homePackage, cmtconfig)
cmd2 = self.updateAnalyCmd2(cmd2, atlasVersion, atlasProject, _useAsetup)
tolog("cmd2 = %s" % (cmd2))
tolog("siteroot = %s" % (siteroot))
# Set special_setup_cmd if necessary
special_setup_cmd = self.getSpecialSetupCommand()
# correct for multi-core if necessary (especially important in case coreCount=1 to limit parallel make)
cmd2 = self.addMAKEFLAGS(job.coreCount, cmd2)
# Prepend cmd0 to cmd1 if set and if release < 16.1.0
if cmd0 != "" and job.release < "16.1.0":
cmd1 = cmd0 + cmd1
# construct the command of execution
if analysisJob:
# Try to download the trf
status, pilotErrorDiag, trfName = self.getAnalysisTrf(wgetCommand, job.trf, pilot_initdir)
if status != 0:
return status, pilotErrorDiag, "", special_setup_cmd, JEM, cmtconfig
# Set up runAthena
ec, pilotErrorDiag, cmd3 = self.getAnalysisRunCommand(job, jobSite, trfName)
if ec != 0:
return ec, pilotErrorDiag, "", special_setup_cmd, JEM, cmtconfig
# NOTE: if TURL based PFC creation fails, getAnalysisRunCommand() needs to be rerun
# Might not be possible, so if a user analysis job fails during TURL based PFC creation, fail the job
# Or can remote I/O features just be turned off and cmd3 corrected accordingly?
elif job.trf.endswith('.py'): # for python prod trf
if os.environ.has_key('VO_ATLAS_SW_DIR'):
# set python executable (after SITEROOT has been set)
if siteroot == "":
try:
siteroot = os.environ['SITEROOT']
except:
tolog("Warning: $SITEROOT unknown at this stage (2)")
if pysiteroot == "":
tolog("Will use SITEROOT: %s (2)" % (siteroot))
ec, pilotErrorDiag, pybin = self.setPython(siteroot, job.release, job.homePackage, cmtconfig, jobSite.sitename)
else:
tolog("Will use pysiteroot: %s (2)" % (pysiteroot))
ec, pilotErrorDiag, pybin = self.setPython(pysiteroot, job.release, job.homePackage, cmtconfig, jobSite.sitename)
if ec == self.__error.ERR_MISSINGINSTALLATION:
return ec, pilotErrorDiag, "", special_setup_cmd, JEM, cmtconfig
# Prepare the cmd3 command with the python from the release and the full path to the trf
_cmd = cmd1
if cmd2 != "": # could be unset (in the case of unset homepackage strings)
_cmd += ";" + cmd2
cmd3 = self.getProdCmd3(_cmd, pybin, job.trf, job.jobPars)
else:
cmd3 = "%s %s" % (job.trf, job.jobPars)
elif verifyReleaseString(job.homePackage) != 'NULL':
cmd3 = "%s/kitval/KitValidation/JobTransforms/%s/%s %s" %\
(swbase, job.homePackage, job.trf, job.jobPars)
else:
cmd3 = "%s/kitval/KitValidation/JobTransforms/%s %s" %\
(swbase, job.trf, job.jobPars)
tolog("cmd3 = %s" % (cmd3))
# Create the final command string
if 'HPC_' in readpar("catchall"):
cmd1 = cmd1.replace(';;',';')
cmd2 = cmd2.replace(';;',';')
a_cmd1 = cmd1.split(";")
a_cmd2 = cmd2.split(";")
a_cmd1.extend(a_cmd2)
for s in a_cmd1:
s = s + ';'
cmd = { "interpreter": '',
"payload": cmd3,
"parameters": '',
"environment": a_cmd1 }
else:
cmd = cmd1
if cmd2 != "":
cmd += ";" + cmd2
if cmd3 != "":
cmd += ";" + cmd3
# cmd2 and MAKEFLAGS can add an extra ;-sign, remove it
cmd = cmd.replace(';;',';')
tolog("cmd = %s" % (cmd))
else: # Generic, non-ATLAS specific jobs, or at least a job with undefined swRelease
tolog("Generic job")
# Set python executable (after SITEROOT has been set)
if siteroot == "":
try:
siteroot = os.environ['SITEROOT']
except:
tolog("Warning: $SITEROOT unknown at this stage (3)")
tolog("Will use $SITEROOT: %s (3)" % (siteroot))
ec, pilotErrorDiag, pybin = self.setPython(siteroot, job.release, job.homePackage, cmtconfig, jobSite.sitename)
if ec == self.__error.ERR_MISSINGINSTALLATION:
return ec, pilotErrorDiag, "", special_setup_cmd, JEM, cmtconfig
if analysisJob:
# Try to download the analysis trf
status, pilotErrorDiag, trfName = self.getAnalysisTrf(wgetCommand, job.trf, pilot_initdir)
if status != 0:
return status, pilotErrorDiag, "", special_setup_cmd, JEM, cmtconfig
# Set up the run command
if job.prodSourceLabel == 'ddm' or job.prodSourceLabel == 'software':
cmd = '%s %s %s' % (pybin, trfName, job.jobPars)
else:
ec, pilotErrorDiag, cmd = self.getAnalysisRunCommand(job, jobSite, trfName)
if ec != 0:
return ec, pilotErrorDiag, "", special_setup_cmd, JEM, cmtconfig
# should asetup be used? If so, sqeeze it into the run command (rather than moving the entire getAnalysisRunCommand() into this class)
m_cacheDirVer = re.search('AnalysisTransforms-([^/]+)', job.homePackage)
if m_cacheDirVer != None:
# homePackage="AnalysisTransforms-AthAnalysisBase_2.0.14"
# -> cacheDir = AthAnalysisBase, cacheVer = 2.0.14
cacheDir, cacheVer = self.getCacheInfo(m_cacheDirVer, "dummy_atlasRelease")
tolog("cacheDir = %s" % (cacheDir))
tolog("cacheVer = %s" % (cacheVer))
if cacheDir != "" and cacheVer != "":
#asetup = "export AtlasSetup=%s/%s/%s/%s/AtlasSetup; " % (swbase, cacheDir, cmtconfig, cacheVer)
#asetup += "source $AtlasSetup/scripts/asetup.sh %s,%s --cmtconfig=%s;" % (cacheDir, cacheVer, cmtconfig)
asetup = self.getModernASetup()
asetup += " %s,%s --cmtconfig=%s;" % (cacheDir, cacheVer, cmtconfig)
# now squeeze it back in
cmd = cmd.replace('./' + trfName, asetup + './' + trfName)
tolog("Updated run command for special homePackage: %s" % (cmd))
else:
tolog("asetup not needed (mo special home package: %s)" % (homePackage))
else:
tolog("asetup not needed (no special homePackage)")
elif verifyReleaseString(job.homePackage) != 'NULL' and job.homePackage != ' ':
if 'HPC_' in readpar("catchall"):
cmd = {"interpreter": pybin,
"payload": ("%s/%s" % (job.homePackage, job.trf)),
"parameters": job.jobPars,
"environment": [] }
else:
cmd = "%s %s/%s %s" % (pybin, job.homePackage, job.trf, job.jobPars)
else:
if 'HPC_' in readpar("catchall"):
cmd = {"interpreter": pybin,
"payload": job.trf,
"parameters": job.jobPars,
"environment": [] }
else:
cmd = "%s %s %s" % (pybin, job.trf, job.jobPars)
# Set special_setup_cmd if necessary
special_setup_cmd = self.getSpecialSetupCommand()
# add FRONTIER debugging and RUCIO env variables
if 'HPC_' in readpar("catchall"):
cmd['environment'] = cmd['environment'] + self.getEnvVars2Cmd(job.jobId, job.processingType, jobSite.sitename, analysisJob)
else:
cmd = self.addEnvVars2Cmd(cmd, job.jobId, job.processingType, jobSite.sitename, analysisJob)
# Is JEM allowed to be used?
if self.isJEMAllowed():
metaOut = {}
try:
import sys
from JEMstub import updateRunCommand4JEM
# If JEM should be used, the command will get updated by the JEMstub automatically.
cmd = updateRunCommand4JEM(cmd, job, jobSite, tolog, metaOut=metaOut)
except:
# On failure, cmd stays the same
tolog("Failed to update run command for JEM - will run unmonitored.")
# Is JEM to be used?
if metaOut.has_key("JEMactive"):
JEM = metaOut["JEMactive"]
tolog("Use JEM: %s (dictionary = %s)" % (JEM, str(metaOut)))
elif '--enable-jem' in cmd:
tolog("!!WARNING!!1111!! JEM can currently only be used on certain sites in DE")
# Pipe stdout/err for payload to files
if 'HPC_' not in readpar("catchall"):
cmd += " 1>%s 2>%s" % (job.stdout, job.stderr)
tolog("\nCommand to run the job is: \n%s" % (cmd))
tolog("ATLAS_PYTHON_PILOT = %s" % (os.environ['ATLAS_PYTHON_PILOT']))
if special_setup_cmd != "":
tolog("Special setup command: %s" % (special_setup_cmd))
return 0, pilotErrorDiag, cmd, special_setup_cmd, JEM, cmtconfig
def getFileLookups(self):
""" Return the file lookup boolean """
return self.__doFileLookups
def doFileLookups(self, doFileLookups):
""" Update the file lookups boolean """
self.__doFileLookups = doFileLookups
def willDoFileLookups(self):
""" Should (LFC) file lookups be done by the pilot or not? """
status = False
if readpar('lfchost') != "" and self.getFileLookups():
status = True
if status:
tolog("File lookups from %s" % (readpar('lfchost')))
else:
tolog("Will not do any file lookups")
return status
def willDoAlternativeFileLookups(self):
""" Should file lookups be done using alternative methods? """
# E.g. in the migration period where LFC lookups are halted in favour of other methods in the DQ2/Rucio API
# (for ATLAS), this method could be useful. See the usage in Mover::getReplicaDictionary() which is called
# after Experiment::willDoFileLookups() defined above. The motivation is that direct LFC calls are not to be
# used any longer by the pilot, and in the migration period the actual LFC calls will be done in the DQ2/Rucio
# API. Eventually this API will switch to alternative file lookups.
tolog("Using alternative file catalog lookups")
return True
def willDoFileRegistration(self):
""" Should (LFC) file registration be done by the pilot or not? """
status = False
# should the LFC file registration be done by the pilot or by the server?
if readpar('lfcregister') != "server":
status = True
# make sure that the lcgcpSiteMover (and thus lcg-cr) is not used
if readpar('copytool') == "lcgcp" or readpar('copytool') == "lcg-cp":
status = False
return status
# Additional optional methods
def removeRedundantFiles(self, workdir):
""" Remove redundant files and directories """
tolog("Removing redundant files prior to log creation")
dir_list = ["AtlasProduction*",
"AtlasPoint1",
"AtlasTier0",
"buildJob*",
"CDRelease*",
"csc*.log",
"DBRelease*",
"EvgenJobOptions",
"external",
"fort.*",
"geant4",
"geomDB",
"geomDB_sqlite",
"home",
"o..pacman..o",
"pacman-*",
"python",
"runAthena*",
"share",
"sources.*",
"sqlite*",
"sw",
"tcf_*",
"triggerDB",
"trusted.caches",
"workdir",
"*.data*",
"*.events",
"*.py",
"*.pyc",
"*.root*",
"JEM",
"tmp*",
"*.tmp",
"*.TMP",
"MC11JobOptions",
"scratch",
"jobState-*-test.pickle",
"*.writing",
"pwg*",
"pwhg*",
"*PROC*",
"HPC",
"saga",
"radical"]
# remove core and pool.root files from AthenaMP sub directories
try:
self.cleanupAthenaMP(workdir)
except Exception, e:
tolog("!!WARNING!!2341!! Failed to execure cleanupAthenaMP(): %s" % (e))
# note: these should be partitial file/dir names, not containing any wildcards
exceptions_list = ["runargs", "runwrapper", "jobReport", "log."]
for _dir in dir_list:
files = glob(os.path.join(workdir, _dir))
exclude = []
# remove any dirs/files from the exceptions list
if files:
for exc in exceptions_list:
for f in files:
if exc in f:
exclude.append(f)
if exclude != []:
tolog('To be excluded from removal: %s' % (exclude))
_files = []
for f in files:
if not f in exclude:
_files.append(f)
files = _files
tolog("To be removed: %s" % (files))
rc = remove(files)
if not rc:
tolog("IGNORE: Failed to remove redundant file(s): %s" % (files))
def getWarning(self):
""" Return any warning message passed to __warning """
return self.__warning
def displayChangeLog(self):
""" Display the cvmfs ChangeLog is possible """
# 'head' the ChangeLog on cvmfs (/cvmfs/atlas.cern.ch/repo/sw/ChangeLog)
# get the site information object
si = getSiteInformation(self.__experiment)
appdir = readpar('appdir')
if appdir == "":
if os.environ.has_key('VO_ATLAS_SW_DIR'):
appdir = os.environ['VO_ATLAS_SW_DIR']
else:
appdir = ""
if appdir != "":
# there might be more than one appdir, try them all
appdirs = si.getAppdirs(appdir)
tolog("appdirs = %s" % str(appdirs))
for appdir in appdirs:
path = os.path.join(appdir, 'ChangeLog')
if os.path.exists(path):
try:
rs = commands.getoutput("head %s" % (path))
except Exception, e:
tolog("!!WARNING!!1232!! Failed to read the ChangeLog: %s" % (e))
else:
rs = "\n"+"-"*80 + "\n" + rs
rs += "\n"+"-"*80
tolog("head of %s: %s" % (path, rs))
else:
tolog("No such path: %s (ignore)" % (path))
else:
tolog("Can not display ChangeLog: Found no appdir")
def testImportLFCModule(self):
""" Can the LFC module be imported? """
status = False
try:
import lfc
except Exception, e:
tolog("!!WARNING!!3111!! Failed to import the LFC module: %s" % (e))
else:
tolog("Successfully imported the LFC module")
status = True
return status
def getCVMFSPath(self):
""" Return the proper cvmfs path """
# get the site information object
si = getSiteInformation(self.__experiment)
return si.getFileSystemRootPath()
def testCVMFS(self):
""" Run the CVMFS diagnostics tool """
status = False
timeout = 5*60
cmd = "export ATLAS_LOCAL_ROOT_BASE=%s/atlas.cern.ch/repo/ATLASLocalRootBase;$ATLAS_LOCAL_ROOT_BASE/utilities/checkValidity.sh" % \
(self.getCVMFSPath())
tolog("Executing command: %s (time-out: %d)" % (cmd, timeout))
exitcode, output = timedCommand(cmd, timeout=timeout)
if exitcode != 0:
if "No such file or directory" in output:
tolog("!!WARNING!!1235!! Command checkValidity.sh was not found (can not run CVMFS validity test)")
status = True
elif "timed out" in output:
tolog("!!WARNING!!1236!! Command checkValidity.sh timed out: %s (ignore)" % (output))
status = True
else:
tolog("!!WARNING!!1234!! CVMFS diagnostics tool failed: %d, %s" % (exitcode, output))
else:
tolog("Diagnostics tool has verified CVMFS")
status = True
return status
def getNumberOfEvents(self, **kwargs):
""" Return the number of events """
# ..and a string of the form N|N|..|N with the number of jobs in the trf(s)
job = kwargs.get('job', None)
number_of_jobs = kwargs.get('number_of_jobs', 1)
if not job:
tolog("!!WARNING!!2332!! getNumberOfEvents did not receive a job object")
return 0, 0, ""
tolog("Looking for number of processed events (pass 0: metadata.xml)")
nEventsRead = self.processMetadata(job.workdir)
nEventsWritten = 0
if nEventsRead > 0:
return nEventsRead, nEventsWritten, str(nEventsRead)
else:
nEventsRead = 0
tolog("Looking for number of processed events (pass 1: Athena summary file(s))")
nEventsRead, nEventsWritten = self.processAthenaSummary(job.workdir)
if nEventsRead > 0:
return nEventsRead, nEventsWritten, str(nEventsRead)
tolog("Looking for number of processed events (pass 2: Resorting to brute force grepping of payload stdout)")
nEvents_str = ""
for i in range(number_of_jobs):
_stdout = job.stdout
if number_of_jobs > 1:
_stdout = _stdout.replace(".txt", "_%d.txt" % (i + 1))
filename = os.path.join(job.workdir, _stdout)
N = 0
if os.path.exists(filename):
tolog("Processing stdout file: %s" % (filename))
matched_lines = grep(["events processed so far"], filename)
if len(matched_lines) > 0:
if "events read and" in matched_lines[-1]:
# event #415044, run #142189 2 events read and 0 events processed so far
N = int(re.match('.* run #\d+ \d+ events read and (\d+) events processed so far.*', matched_lines[-1]).group(1))
else:
# event #4, run #0 3 events processed so far
N = int(re.match('.* run #\d+ (\d+) events processed so far.*', matched_lines[-1]).group(1))
if len(nEvents_str) == 0:
nEvents_str = str(N)
else:
nEvents_str += "|%d" % (N)
nEventsRead += N
return nEventsRead, nEventsWritten, nEvents_str
def processMetadata(self, workdir):
""" Extract number of events from metadata.xml """
N = 0
filename = os.path.join(workdir, "metadata.xml")
if os.path.exists(filename):
# Get the metadata
try:
f = open(filename, "r")
except IOError, e:
tolog("!!WARNING!!1222!! Exception: %s" % (e))
else:
xmlIN = f.read()
f.close()
# Get the XML objects
from xml.dom import minidom
xmldoc = minidom.parseString(xmlIN)
fileList = xmldoc.getElementsByTagName("File")
# Loop over all files, assume that the number of events are the same in all files
for _file in fileList:
lrc_metadata_dom = _file.getElementsByTagName("metadata")
for i in range(len(lrc_metadata_dom)):
_key = str(_file.getElementsByTagName("metadata")[i].getAttribute("att_name"))
_value = str(_file.getElementsByTagName("metadata")[i].getAttribute("att_value"))
if _key == "events":
try:
N = int(_value)
except Exception, e:
tolog("!!WARNING!!1222!! Number of events not an integer: %s" % (e))
else:
tolog("Number of events from metadata file: %d" % (N))
break
else:
tolog("%s does not exist" % (filename))
return N
def processAthenaSummary(self, workdir):
""" extract number of events etc from athena summary file(s) """
N1 = 0
N2 = 0
file_pattern_list = ['AthSummary*', 'AthenaSummary*']
file_list = []
# loop over all patterns in the list to find all possible summary files
for file_pattern in file_pattern_list:
# get all the summary files for the current file pattern
files = glob(os.path.join(workdir, file_pattern))
# append all found files to the file list
for summary_file in files:
file_list.append(summary_file)
if file_list == [] or file_list == ['']:
tolog("Did not find any athena summary files")
else:
# find the most recent and the oldest files
oldest_summary_file = ""
recent_summary_file = ""
oldest_time = 9999999999
recent_time = 0
if len(file_list) > 1:
for summary_file in file_list:
# get the modification time
try:
st_mtime = os.path.getmtime(summary_file)
except Exception, e:
tolog("!!WARNING!!1800!! Could not read modification time of file %s: %s" % (summary_file, str(e)))
else:
if st_mtime > recent_time:
recent_time = st_mtime
recent_summary_file = summary_file
if st_mtime < oldest_time:
oldest_time = st_mtime
oldest_summary_file = summary_file
else:
oldest_summary_file = file_list[0]
recent_summary_file = oldest_summary_file
oldest_time = os.path.getmtime(oldest_summary_file)
recent_time = oldest_time
if oldest_summary_file == recent_summary_file:
tolog("Summary file: %s: Will be processed for errors and number of events" %\
(os.path.basename(oldest_summary_file)))
else:
tolog("Most recent summary file: %s (updated at %d): Will be processed for errors" %\
(os.path.basename(recent_summary_file), recent_time))
tolog("Oldest summary file: %s (updated at %d): Will be processed for number of events" %\
(os.path.basename(oldest_summary_file), oldest_time))
# Get the number of events from the oldest summary file
try:
f = open(oldest_summary_file, "r")
except Exception, e:
tolog("!!WARNING!!1800!! Failed to get number of events from summary file. Could not open file: %s" % str(e))
else:
lines = f.readlines()
f.close()
if len(lines) > 0:
for line in lines:
if "Events Read:" in line:
N1 = int(re.match('Events Read\: *(\d+)', line).group(1))
if "Events Written:" in line:
N2 = int(re.match('Events Written\: *(\d+)', line).group(1))
if N1 > 0 and N2 > 0:
break
else:
tolog("!!WARNING!!1800!! Failed to get number of events from summary file. Encountered an empty summary file.")
tolog("Number of events: %d (read)" % (N1))
tolog("Number of events: %d (written)" % (N2))
# Get the errors from the most recent summary file
# ...
return N1, N2
def isOutOfMemory(self, **kwargs):
""" Try to identify out of memory errors in the stderr/out """
# (Used by ErrorDiagnosis)
# make this function shorter, basically same code twice
out_of_memory = False
job = kwargs.get('job', None)
number_of_jobs = kwargs.get('number_of_jobs', 1)
if not job:
tolog("!!WARNING!!3222!! isOutOfMemory() did not receive a job object")
return False
tolog("Checking for memory errors in stderr")
for i in range(number_of_jobs):
_stderr = job.stderr
if number_of_jobs > 1:
_stderr = _stderr.replace(".txt", "_%d.txt" % (i + 1))
filename = os.path.join(job.workdir, _stderr)
if os.path.exists(filename):
tolog("Processing stderr file: %s" % (filename))
if os.path.getsize(filename) > 0:
tolog("WARNING: %s produced stderr, will dump to log" % (job.payload))
stderr_output = dumpOutput(filename)
if stderr_output.find("MemoryRescueSvc") >= 0 and \
stderr_output.find("FATAL out of memory: taking the application down") > 0:
out_of_memory = True
else:
tolog("Warning: File %s does not exist" % (filename))
# try to identify out of memory errors in the stdout
tolog("Checking for memory errors in stdout..")
for i in range(number_of_jobs):
_stdout = job.stdout
if number_of_jobs > 1:
_stdout = _stdout.replace(".txt", "_%d.txt" % (i + 1))
filename = os.path.join(job.workdir, _stdout)
if os.path.exists(filename):
tolog("Processing stdout file: %s" % (filename))
matched_lines = grep(["St9bad_alloc", "std::bad_alloc"], filename)
if len(matched_lines) > 0:
tolog("Identified an out of memory error in %s stdout:" % (job.payload))
for line in matched_lines:
tolog(line)
out_of_memory = True
else:
tolog("Warning: File %s does not exist" % (filename))
return out_of_memory
def verifyReleaseInTagsFile(self, vo_atlas_sw_dir, atlasRelease):
""" verify that the release is in the tags file """
status = False
# make sure the release is actually set
if verifyReleaseString(atlasRelease) == "NULL":
return status
tags = dumpOutput(vo_atlas_sw_dir + '/tags')
if tags != "":
# is the release among the tags?
if tags.find(atlasRelease) >= 0:
tolog("Release %s was found in tags file" % (atlasRelease))
status = True
else:
tolog("!!WARNING!!3000!! Release %s was not found in tags file" % (atlasRelease))
# error = PilotErrors()
# failJob(0, self.__error.ERR_MISSINGINSTALLATION, job, pilotserver, pilotport, ins=ins)
else:
tolog("!!WARNING!!3000!! Next pilot release might fail at this stage since there was no tags file")
return status
def getInstallDir(self, job, workdir, siteroot, swbase, cmtconfig):
""" Get the path to the release """
ec = 0
pilotErrorDiag = ""
# do not proceed for unset homepackage strings (treat as release strings in the following function)
if verifyReleaseString(job.homePackage) == "NULL":
return ec, pilotErrorDiag, siteroot, ""
# install the trf in the work dir if it is not installed on the site
# special case for nightlies (rel_N already in siteroot path, so do not add it)
if "rel_" in job.homePackage:
installDir = siteroot
else:
installDir = os.path.join(siteroot, job.homePackage)
installDir = installDir.replace('//','/')
tolog("Atlas release: %s" % (job.release))
tolog("Job home package: %s" % (job.homePackage))
tolog("Trf installation dir: %s" % (installDir))
return ec, pilotErrorDiag, siteroot, installDir
def getInstallDir2(self, job, workdir, siteroot, swbase, cmtconfig):
""" Get the path to the release, install patch if necessary """
ec = 0
pilotErrorDiag = ""
# do not proceed for unset homepackage strings (treat as release strings in the following function)
if verifyReleaseString(job.homePackage) == "NULL":
return ec, pilotErrorDiag, siteroot, ""
# install the trf in the work dir if it is not installed on the site
# special case for nightlies (rel_N already in siteroot path, so do not add it)
if "rel_" in job.homePackage:
installDir = siteroot
else:
installDir = os.path.join(siteroot, job.homePackage)
installDir = installDir.replace('//','/')
tolog("Atlas release: %s" % (job.release))
tolog("Job home package: %s" % (job.homePackage))
tolog("Trf installation dir: %s" % (installDir))
# special case for nightlies (no RunTime dir)
if "rel_" in job.homePackage:
sfile = os.path.join(installDir, "setup.sh")
else:
sfile = installDir + ('/%sRunTime/cmt/setup.sh' % job.homePackage.split('/')[0])
sfile = sfile.replace('//','/')
if not os.path.isfile(sfile):
# pilotErrorDiag = "Patch not available (will not attempt dynamic patch installation)"
# tolog("!!FAILED!!3000!! %s" % (pilotErrorDiag))
# ec = self.__error.ERR_DYNTRFINST
# uncomment this section (and remove the comments in the above three lines) for dynamic path installation
tolog("!!WARNING!!3000!! Trf setup file does not exist at: %s" % (sfile))
tolog("Will try to install trf in work dir...")
# Install trf in the run dir
try:
ec, pilotErrorDiag = self.installPyJobTransforms(job.release, job.homePackage, swbase, cmtconfig)
except Exception, e:
pilotErrorDiag = "installPyJobTransforms failed: %s" % str(e)
tolog("!!FAILED!!3000!! %s" % (pilotErrorDiag))
ec = self.__error.ERR_DYNTRFINST
else:
if ec == 0:
tolog("Successfully installed trf")
installDir = workdir + "/" + job.homePackage
# replace siteroot="$SITEROOT" with siteroot=rundir
os.environ['SITEROOT'] = workdir
siteroot = workdir
# comment until here
else:
tolog("Found trf setup file: %s" % (sfile))
tolog("Using install dir = %s" % (installDir))
return ec, pilotErrorDiag, siteroot, installDir
def installPyJobTransforms(self, release, package, swbase, cmtconfig):
""" Install new python based TRFS """
status = False
pilotErrorDiag = ""
import string
if package.find('_') > 0: # jobdef style (e.g. "AtlasProduction_12_0_7_2")
ps = package.split('_')
if len(ps) == 5:
status = True
# dotver = string.join(ps[1:], '.')
# pth = 'AtlasProduction/%s' % dotver
else:
status = False
else: # Panda style (e.g. "AtlasProduction/12.0.3.2")
# Create pacman package = AtlasProduction_12_0_7_1
ps = package.split('/')
if len(ps) == 2:
ps2 = ps[1].split('.')
if len(ps2) == 4 or len(ps2) == 5:
dashver = string.join(ps2, '_')
pacpack = '%s_%s' % (ps[0], dashver)
tolog("Pacman package name: %s" % (pacpack))
status = True
else:
status = False
else:
status = False
if not status:
pilotErrorDiag = "installPyJobTransforms: Prod cache has incorrect format: %s" % (package)
tolog("!!FAILED!!2999!! %s" % (pilotErrorDiag))
return self.__error.ERR_DYNTRFINST, pilotErrorDiag
# Check if it exists already in rundir
tolog("Checking for path: %s" % (package))
# Current directory should be the job workdir at this point
if os.path.exists(package):
tolog("Found production cache, %s, in run directory" % (package))
return 0, pilotErrorDiag
# Install pacman
status, pilotErrorDiag = self.installPacman()
if status:
tolog("Pacman installed correctly")
else:
return self.__error.ERR_DYNTRFINST, pilotErrorDiag
# Prepare release setup command
if self.useAtlasSetup(swbase, release, package, cmtconfig):
setup_pbuild = self.getProperASetup(swbase, release, package, cmtconfig, source=False)
else:
setup_pbuild = '%s/%s/cmtsite/setup.sh -tag=%s,AtlasOffline,%s' % (swbase, release, release, cmtconfig)
got_JT = False
caches = [
'http://classis01.roma1.infn.it/pacman/Production/cache',
'http://atlas-computing.web.cern.ch/atlas-computing/links/kitsDirectory/Analysis/cache'
]
# shuffle list so same cache is not hit by all jobs
from random import shuffle
shuffle(caches)
for cache in caches:
# Need to setup some CMTROOT first
# Pretend platfrom for non-slc3, e.g. centOS on westgrid
# Parasitacally, while release is setup, get DBRELEASE version too
cmd = 'source %s' % (setup_pbuild)
cmd+= ';CMT_=`echo $CMTCONFIG | sed s/-/_/g`'
cmd+= ';cd pacman-*;source ./setup.sh;cd ..;echo "y"|'
cmd+= 'pacman -pretend-platform SLC -get %s:%s_$CMT_ -trust-all-caches'%\
(cache, pacpack)
tolog('Pacman installing JT %s from %s' % (pacpack, cache))
exitcode, output = timedCommand(cmd, timeout=60*60)
if exitcode != 0:
pilotErrorDiag = "installPyJobTransforms failed: %s" % str(output)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
else:
tolog('Installed JobTransforms %s from %s' % (pacpack, cache))
got_JT = True
break
if got_JT:
ec = 0
else:
ec = self.__error.ERR_DYNTRFINST
return ec, pilotErrorDiag
def installPacman(self):
""" Pacman installation """
pilotErrorDiag = ""
# Pacman version
pacman = 'pacman-3.18.3.tar.gz'
urlbases = [
'http://physics.bu.edu/~youssef/pacman/sample_cache/tarballs',
'http://atlas.web.cern.ch/Atlas/GROUPS/SOFTWARE/OO/Production'
]
# shuffle list so same server is not hit by all jobs
from random import shuffle
shuffle(urlbases)
got_tgz = False
for urlbase in urlbases:
url = urlbase + '/' + pacman
tolog('Downloading: %s' % (url))
try:
# returns httpMessage
from urllib import urlretrieve
(filename, msg) = urlretrieve(url, pacman)
if 'content-type' in msg.keys():
if msg['content-type'] == 'application/x-gzip':
got_tgz = True
tolog('Got %s' % (url))
break
else:
tolog('!!WARNING!!4000!! Failed to get %s' % (url))
except Exception ,e:
tolog('!!WARNING!!4000!! URL: %s throws: %s' % (url, e))
if got_tgz:
tolog('Success')
cmd = 'tar -zxf %s' % (pacman)
tolog("Executing command: %s" % (cmd))
(exitcode, output) = commands.getstatusoutput(cmd)
if exitcode != 0:
# Got a tgz but can't unpack it. Could try another source but will fail instead.
pilotErrorDiag = "%s failed: %d : %s" % (cmd, exitcode, output)
tolog('!!FAILED!!4000!! %s' % (pilotErrorDiag))
return False, pilotErrorDiag
else:
tolog('Pacman tarball install succeeded')
return True, pilotErrorDiag
else:
pilotErrorDiag = "Failed to get %s from any source url" % (pacman)
tolog('!!FAILED!!4000!! %s' % (pilotErrorDiag))
return False, pilotErrorDiag
def getCmtsiteCmd(self, swbase, atlasRelease, homePackage, cmtconfig, siteroot=None, analysisJob=False, cacheDir=None, cacheVer=None):
""" Get the cmtsite setup command """
ec = 0
pilotErrorDiag = ""
cmd = ""
if verifyReleaseString(homePackage) == "NULL":
homePackage = ""
# Handle sites using builds area in a special way
if swbase[-len('builds'):] == 'builds' or verifyReleaseString(atlasRelease) == "NULL":
_path = swbase
else:
_path = os.path.join(swbase, atlasRelease)
if self.useAtlasSetup(swbase, atlasRelease, homePackage, cmtconfig):
# homePackage=AnalysisTransforms-AtlasTier0_15.5.1.6
# cacheDir = AtlasTier0
# cacheVer = 15.5.1.6
m_cacheDirVer = re.search('AnalysisTransforms-([^/]+)', homePackage)
if m_cacheDirVer != None:
cacheDir, cacheVer = self.getCacheInfo(m_cacheDirVer, atlasRelease)
elif "," in homePackage or "rel_" in homePackage: # if nightlies are used, e.g. homePackage = "AtlasProduction,rel_0"
cacheDir = homePackage
cmd = self.getProperASetup(swbase, atlasRelease, homePackage, cmtconfig, cacheVer=cacheVer, cacheDir=cacheDir)
else:
# Get the tags
tags = self.getTag(analysisJob, swbase, atlasRelease, cacheDir, cacheVer)
ec, pilotErrorDiag, status = self.isForceConfigCompatible(swbase, atlasRelease, homePackage, cmtconfig, siteroot=siteroot)
if ec == self.__error.ERR_MISSINGINSTALLATION:
return ec, pilotErrorDiag, cmd
else:
if status:
if "slc5" in cmtconfig and os.path.exists("%s/gcc43_inst" % (_path)):
cmd = "source %s/gcc43_inst/setup.sh;export CMTCONFIG=%s;" % (_path, cmtconfig)
elif "slc5" in cmtconfig and "slc5" in swbase and os.path.exists(_path):
cmd = "source %s/setup.sh;export CMTCONFIG=%s;" % (_path, cmtconfig)
else:
cmd = "export CMTCONFIG=%s;" % (cmtconfig)
cmd += "source %s/cmtsite/setup.sh %s,forceConfig" % (_path, tags)
else:
cmd = "source %s/cmtsite/setup.sh %s" % (_path, tags)
return ec, pilotErrorDiag, cmd
def getCacheInfo(self, m_cacheDirVer, atlasRelease):
""" Get the cacheDir and cacheVer """
cacheDirVer = m_cacheDirVer.group(1)
if re.search('_', cacheDirVer) != None:
cacheDir = cacheDirVer.split('_')[0]
cacheVer = re.sub("^%s_" % cacheDir, '', cacheDirVer)
else:
cacheDir = 'AtlasProduction'
if atlasRelease in ['13.0.25']:
cacheDir = 'AtlasPoint1'
cacheVer = cacheDirVer
tolog("cacheDir = %s" % (cacheDir))
tolog("cacheVer = %s" % (cacheVer))
return cacheDir, cacheVer
def getTag(self, analysisJob, path, release, cacheDir, cacheVer):
""" Define the setup tags """
_setup = False
tag = "-tag="
if analysisJob:
if cacheDir and cacheDir != "" and cacheVer and cacheVer != "" and cacheVer.count('.') < 4:
# E.g. -tag=AtlasTier0,15.5.1.6,32,setup
tag += "%s" % (cacheDir)
tag += ",%s" % (cacheVer)
_setup = True
else:
# E.g. -tag=AtlasOffline,15.5.1
tag += "AtlasOffline"
if verifyReleaseString(release) != "NULL":
tag += ",%s" % (release)
# only add the "32" part if CMTCONFIG has been out-commented in the requirements file
if self.isCMTCONFIGOutcommented(path, release):
tag += ",32"
if _setup:
tag += ",setup"
else:
# for production jobs
tag = "-tag=AtlasOffline"
if verifyReleaseString(release) != "NULL":
tag += ",%s" % (release)
# always add the runtime
tag += ",runtime"
return tag
def isCMTCONFIGOutcommented(self, path, release):
""" Is CMTCONFIG out-commented in requirements file? """
status = False
filename = "%s%s/cmtsite/requirements" % (path, release)
if os.path.exists(filename):
cmd = "grep CMTCONFIG %s" % (filename)
ec, rs = commands.getstatusoutput(cmd)
if ec == 0:
if rs.startswith("#"):
status = True
return status
def verifyCmtsiteCmd(self, exitcode, _pilotErrorDiag):
""" Verify the cmtsite command """
pilotErrorDiag = "unknown"
if "#CMT> Warning: template <src_dir> not expected in pattern install_scripts (from TDAQCPolicy)" in _pilotErrorDiag:
tolog("Detected CMT warning (return code %d)" % (exitcode))
tolog("Installation setup command passed test (with precaution)")
elif "Error:" in _pilotErrorDiag or "Fatal exception:" in _pilotErrorDiag:
pilotErrorDiag = "Detected severe CMT error: %d, %s" % (exitcode, _pilotErrorDiag)
tolog("!!WARNING!!2992!! %s" % (pilotErrorDiag))
elif exitcode != 0:
from futil import is_timeout
if is_timeout(exitcode):
pilotErrorDiag = "cmtsite command was timed out: %d, %s" % (exitcode, _pilotErrorDiag)
else:
if "timed out" in _pilotErrorDiag:
pilotErrorDiag = "cmtsite command was timed out: %d, %s" % (exitcode, _pilotErrorDiag)
else:
pilotErrorDiag = "cmtsite command failed: %d, %s" % (exitcode, _pilotErrorDiag)
tolog("!!WARNING!!2992!! %s" % (pilotErrorDiag))
else:
tolog("Release test command returned exit code %d" % (exitcode))
pilotErrorDiag = ""
return pilotErrorDiag
def getProdCmd2(self, installDir, homePackage):
""" Get cmd2 for production jobs """
pilotErrorDiag = ""
# Define cmd2
try:
# Special case for nightlies
if "rel_" in homePackage or "AtlasP1HLT" in homePackage or "AtlasHLT" in homePackage: # rel_N is already in installDir, do not add like below
cmd2 = '' #unset CMTPATH;'
else:
cmd2 = 'unset CMTPATH;cd %s/%sRunTime/cmt;source ./setup.sh;cd -;' % (installDir, homePackage.split('/')[0])
# Correct setup for athena post 14.5 (N.B. harmless for version < 14.5)
cmd2 += 'export AtlasVersion=%s;export AtlasPatchVersion=%s' % (homePackage.split('/')[-1], homePackage.split('/')[-1])
except Exception, e:
pilotErrorDiag = "Bad homePackage format: %s, %s" % (homePackage, str(e))
tolog("!!FAILED!!2999!! %s" % (pilotErrorDiag))
cmd2 = ""
return cmd2, pilotErrorDiag
def getSpecialSetupCommand(self):
""" Set special_setup_cmd if necessary """
# Note: this special setup command is hardly used and could probably be removed
# in case any special setup should be added to the setup string before the trf is executed, the command defined in this method
# could be added to the run command by using method addSPSetupToCmd().
# the special command is also forwarded to the get and put functions (currently not used)
special_setup_cmd = ""
# add envsetup to the special command setup on tier-3 sites
# (unknown if this is still needed)
# get the site information object
si = getSiteInformation(self.__experiment)
if si.isTier3():
_envsetup = readpar('envsetup')
if _envsetup != "":
special_setup_cmd += _envsetup
if not special_setup_cmd.endswith(';'):
special_setup_cmd += ";"
return special_setup_cmd
def getAnalyCmd2(self, swbase, cmtconfig, homePackage, atlasRelease):
""" Return a proper cmd2 setup command """
cacheDir = None
cacheVer = None
cmd2 = ""
# cannot set cmd2 for unset release/homepackage strings
if verifyReleaseString(atlasRelease) == "NULL" or verifyReleaseString(homePackage) == "NULL":
return cmd2, cacheDir, cacheVer
# homePackage=AnalysisTransforms-AtlasTier0_15.5.1.6
# cacheDir = AtlasTier0
# cacheVer = 15.5.1.6
m_cacheDirVer = re.search('AnalysisTransforms-([^/]+)', homePackage)
if m_cacheDirVer != None:
cacheDir, cacheVer = self.getCacheInfo(m_cacheDirVer, atlasRelease)
if not self.useAtlasSetup(swbase, atlasRelease, homePackage, cmtconfig):
cmd2 = "export CMTPATH=$SITEROOT/%s/%s" % (cacheDir, cacheVer)
return cmd2, cacheDir, cacheVer
def updateAnalyCmd2(self, cmd2, atlasVersion, atlasProject, useAsetup):
""" Add AtlasVersion and AtlasProject to cmd2 """
# Add everything to cmd2 unless AtlasSetup is used
if not useAsetup:
if atlasVersion != "" and atlasProject != "":
if cmd2 == "" or cmd2.endswith(";"):
pass
else:
cmd2 += ";"
cmd2 += "export AtlasVersion=%s;export AtlasProject=%s" % (atlasVersion, atlasProject)
return cmd2
def setPython(self, site_root, atlasRelease, homePackage, cmtconfig, sitename):
""" set the python executable """
ec = 0
pilotErrorDiag = ""
pybin = ""
if os.environ.has_key('VO_ATLAS_SW_DIR') and verifyReleaseString(atlasRelease) != "NULL":
#ec, pilotErrorDiag, _pybin = self.findPythonInRelease(site_root, atlasRelease, homePackage, cmtconfig, sitename)
#if ec == self.__error.ERR_MISSINGINSTALLATION:
# return ec, pilotErrorDiag, pybin
#if _pybin != "":
# pybin = _pybin
#pybin = "`which python`"
pybin = "python"
if pybin == "":
python_list = ['python', 'python32', 'python2']
pybin = python_list[0]
for _python in python_list:
_pybin = commands.getoutput('which %s' % (_python))
if _pybin.startswith('/'):
# found python executable
pybin = _pybin
break
tolog("Using %s" % (pybin))
return ec, pilotErrorDiag, pybin
def findPythonInRelease(self, siteroot, atlasRelease, homePackage, cmtconfig, sitename):
""" Set the python executable in the release dir (LCG sites only) """
ec = 0
pilotErrorDiag = ""
py = ""
tolog("Trying to find a python executable for release: %s" % (atlasRelease))
scappdir = readpar('appdir')
# only use underscored cmtconfig paths on older cvmfs systems and only for now (remove at a later time)
_cmtconfig = cmtconfig.replace("-", "_")
if scappdir != "":
_swbase = self.getLCGSwbase(scappdir)
tolog("Using swbase: %s" % (_swbase))
# get the site information object
si = getSiteInformation(self.__experiment)
if self.useAtlasSetup(_swbase, atlasRelease, homePackage, cmtconfig):
cmd = self.getProperASetup(_swbase, atlasRelease, homePackage, cmtconfig, tailSemiColon=True)
tolog("Using new AtlasSetup: %s" % (cmd))
elif os.path.exists("%s/%s/%s/cmtsite/setup.sh" % (_swbase, _cmtconfig, atlasRelease)) and (si.isTier3() or "CERNVM" in sitename):
# use cmtconfig sub dir on CERNVM or tier3 (actually for older cvmfs systems)
cmd = "source %s/%s/%s/cmtsite/setup.sh -tag=%s,32,runtime;" % (_swbase, _cmtconfig, atlasRelease, atlasRelease)
else:
ec, pilotErrorDiag, status = self.isForceConfigCompatible(_swbase, atlasRelease, homePackage, cmtconfig, siteroot=siteroot)
if ec == self.__error.ERR_MISSINGINSTALLATION:
return ec, pilotErrorDiag, py
else:
if status:
if "slc5" in cmtconfig and os.path.exists("%s/%s/gcc43_inst" % (_swbase, atlasRelease)):
cmd = "source %s/%s/gcc43_inst/setup.sh;export CMTCONFIG=%s;" % (_swbase, atlasRelease, cmtconfig)
elif "slc5" in cmtconfig and "slc5" in _swbase and os.path.exists("%s/%s" % (_swbase, atlasRelease)):
cmd = "source %s/%s/setup.sh;export CMTCONFIG=%s;" % (_swbase, atlasRelease, cmtconfig)
else:
cmd = "export CMTCONFIG=%s;" % (cmtconfig)
cmd += "source %s/%s/cmtsite/setup.sh -tag=AtlasOffline,%s,forceConfig,runtime;" % (_swbase, atlasRelease, atlasRelease)
else:
cmd = "source %s/%s/cmtsite/setup.sh -tag=%s,32,runtime;" % (_swbase, atlasRelease, atlasRelease)
else:
vo_atlas_sw_dir = os.path.expandvars('$VO_ATLAS_SW_DIR')
if "gcc43" in cmtconfig and vo_atlas_sw_dir != '' and os.path.exists('%s/software/slc5' % (vo_atlas_sw_dir)):
cmd = "source $VO_ATLAS_SW_DIR/software/slc5/%s/setup.sh;" % (atlasRelease)
tolog("Found explicit slc5 dir in path: %s" % (cmd))
else:
# no known appdir, default to VO_ATLAS_SW_DIR
_appdir = vo_atlas_sw_dir
_swbase = self.getLCGSwbase(_appdir)
tolog("Using swbase: %s" % (_swbase))
if self.useAtlasSetup(_swbase, atlasRelease, homePackage, cmtconfig):
cmd = self.getProperASetup(_swbase, atlasRelease, homePackage, cmtconfig, tailSemiColon=True)
tolog("Using new AtlasSetup: %s" % (cmd))
else:
_path = os.path.join(_appdir, "software/%s/cmtsite/setup.sh" % (atlasRelease))
if os.path.exists(_path):
cmd = "source " + _path + ";"
else:
cmd = ""
tolog("!!WARNING!!1888!! No known path for setup script (using default python version)")
cmd += "which python"
exitcode, output = timedCommand(cmd, timeout=getProperTimeout(cmd))
if exitcode == 0:
if output.startswith('/'):
tolog("Found: %s" % (output))
py = output
else:
if '\n' in output:
output = output.split('\n')[-1]
if output.startswith('/'):
tolog("Found: %s" % (output))
py = output
else:
tolog("!!WARNING!!4000!! No python executable found in release dir: %s" % (output))
tolog("!!WARNING!!4000!! Will use default python")
py = "python"
else:
tolog("!!WARNING!!4000!! Find command failed: %d, %s" % (exitcode, output))
tolog("!!WARNING!!4000!! Will use default python")
py = "python"
return ec, pilotErrorDiag, py
def getLCGSwbase(self, scappdir):
""" Return the LCG swbase """
if os.path.exists(os.path.join(scappdir, 'software/releases')):
_swbase = os.path.join(scappdir, 'software/releases')
elif os.path.exists(os.path.join(scappdir, 'software')):
_swbase = os.path.join(scappdir, 'software')
else:
_swbase = scappdir
return _swbase
def getProdCmd3(self, cmd, pybin, jobtrf, jobPars):
""" Prepare the cmd3 command with the python from the release and the full path to the trf """
# When python is invoked using the full path, it also needs the full path to the script
return "%s %s" % (jobtrf, jobPars)
#return "%s `which %s` %s" % (pybin, jobtrf, jobPars)
#return "%s %s %s" % (pybin, jobtrf, jobPars)
def getProdCmd3Old(self, cmd, pybin, jobtrf, jobPars):
""" Prepare the cmd3 command with the python from the release and the full path to the trf """
# When python is invoked using the full path, it also needs the full path to the script
# First try to figure out where the trf is inside the release
if not cmd.endswith(";"):
cmd += ";"
_cmd = "%swhich %s" % (cmd, jobtrf)
_timedout = False
exitcode, _trf = timedCommand(_cmd, timeout=getProperTimeout(cmd))
if exitcode != 0:
_timedout = True
tolog("Trf: %s" % (_trf))
# split the output if necessary (the path should be the last entry)
if "\n" in _trf:
_trf = _trf.split("\n")[-1]
tolog("Trf: %s (extracted)" % (_trf))
# could the trf be found?
if "which: no" in _trf or jobtrf not in _trf or _timedout:
tolog("!!WARNING!!2999!! Will not use python from the release since the trf path could not be figured out")
cmd3 = "%s %s" % (jobtrf, jobPars)
else:
tolog("Will use python from the release: %s" % (pybin))
tolog("Path to trf: %s" % (_trf))
cmd3 = "%s %s %s" % (pybin, _trf, jobPars)
return cmd3
def addEnvVars2Cmd(self, cmd, jobId, processingType, sitename, analysisJob):
""" Add env variables """
_sitename = 'export PANDA_RESOURCE=\"%s\";' % (sitename)
_frontier1 = 'export FRONTIER_ID=\"[%s]\";' % (jobId)
_frontier2 = 'export CMSSW_VERSION=$FRONTIER_ID;'
_ttc = ''
# Unset ATHENA_PROC_NUMBER if set for event service Merge jobs
if "Merge_tf" in cmd and os.environ.has_key('ATHENA_PROC_NUMBER'):
_unset = "unset ATHENA_PROC_NUMBER;"
else:
_unset = ""
_coreCount = ""
if analysisJob:
_ttc = 'export ROOT_TTREECACHE_SIZE=1;'
try:
coreCount = int(os.environ['ATHENA_PROC_NUMBER'])
except:
_coreCount = 'export ROOTCORE_NCPUS=1;'
else:
_coreCount = 'export ROOTCORE_NCPUS=%d;' % (coreCount)
if processingType == "":
_rucio = ''
tolog("!!WARNING!!1887!! RUCIO_APPID needs job.processingType but it is not set!")
else:
_rucio = 'export RUCIO_APPID=\"%s\";' % (processingType)
_rucio += 'export RUCIO_ACCOUNT=\"pilot\";'
return _sitename + _ttc + _frontier1 + _frontier2 + _rucio + _coreCount + _unset + cmd
def getEnvVars2Cmd(self, jobId, processingType, sitename, analysisJob):
""" Return array with enviroment variables """
variables = []
variables.append('export PANDA_RESOURCE=\"%s\";' % (sitename))
if not 'HPC_Titan' in readpar("catchall"):
variables.append('export FRONTIER_ID=\"[%s]\";' % (jobId))
variables.append('export CMSSW_VERSION=$FRONTIER_ID;')
variables.append('export ROOT_TTREECACHE_SIZE=1;')
_coreCount = ""
if analysisJob:
try:
coreCount = int(os.environ['ATHENA_PROC_NUMBER'])
except:
pass
else:
_coreCount = 'export ROOTCORE_NCPUS=%d;' % (coreCount)
variables.append(_coreCount)
if processingType == "":
tolog("!!WARNING!!1887!! RUCIO_APPID needs job.processingType but it is not set!")
else:
variables.append('export RUCIO_APPID=\"%s\";' % (processingType))
variables.append('export RUCIO_ACCOUNT=\"pilot\";')
return variables
def isForceConfigCompatible(self, _dir, release, homePackage, cmtconfig, siteroot=None):
""" Test if the installed AtlasSettings and AtlasLogin versions are compatible with forceConfig """
# The forceConfig cmt tag was introduced in AtlasSettings-03-02-07 and AtlasLogin-00-03-07
status = True
ec = 0
pilotErrorDiag = ""
# only perform the forceConfig test for set release strings
if verifyReleaseString(release) == "NULL":
return ec, pilotErrorDiag, False
names = {"AtlasSettings":"AtlasSettings-03-02-07", "AtlasLogin":"AtlasLogin-00-03-07" }
for name in names.keys():
try:
ec, pilotErrorDiag, v, siteroot = self.getHighestVersionDir(release, homePackage, name, _dir, cmtconfig, siteroot=siteroot)
except Exception, e:
tolog("!!WARNING!!2999!! Exception caught: %s" % str(e))
v = None
if v:
if v >= names[name]:
tolog("%s version verified: %s" % (name, v))
else:
tolog("%s version too old: %s (older than %s, not forceConfig compatible)" % (name, v, names[name]))
status = False
else:
tolog("%s version not verified (not forceConfig compatible)" % (name))
status = False
return ec, pilotErrorDiag, status
def getHighestVersionDir(self, release, homePackage, name, swbase, cmtconfig, siteroot=None):
""" Grab the directory (AtlasLogin, AtlasSettings) with the highest version number """
# e.g. v = AtlasLogin-00-03-26
highestVersion = None
ec = 0
pilotErrorDiag = ""
# get the siteroot
if not siteroot:
ec, pilotErrorDiag, status, siteroot, cmtconfig = self.getProperSiterootAndCmtconfig(swbase, release, homePackage, cmtconfig)
else:
status = True
if ec != 0:
return ec, pilotErrorDiag, None, siteroot
if status and siteroot != "" and os.path.exists(os.path.join(siteroot, name)):
_dir = os.path.join(siteroot, name)
else:
if swbase[-len('builds'):] == 'builds':
_dir = os.path.join(swbase, name)
else:
_dir = os.path.join(swbase, release, name)
if not os.path.exists(_dir):
tolog("Directory does not exist: %s" % (_dir))
return ec, pilotErrorDiag, None, siteroot
tolog("Probing directory: %s" % (_dir))
if os.path.exists(_dir):
dirs = os.listdir(_dir)
_dirs = []
if dirs != []:
tolog("Found directories: %s" % str(dirs))
for d in dirs:
if d.startswith(name):
_dirs.append(d)
if _dirs != []:
# sort the directories
_dirs.sort()
# grab the directory with the highest version
highestVersion = _dirs[-1]
tolog("Directory with highest version: %s" % (highestVersion))
else:
tolog("WARNING: Found no %s dirs in %s" % (name, _dir))
else:
tolog("WARNING: Directory is empty: %s" % (_dir))
else:
tolog("Directory does not exist: %s" % (_dir))
return ec, pilotErrorDiag, highestVersion, siteroot
def getProperSiterootAndCmtconfig(self, swbase, release, homePackage, _cmtconfig, cmtconfig_alternatives=None):
""" return a proper $SITEROOT and cmtconfig """
status = False
siteroot = ""
ec = 0 # only non-zero for fatal errors (missing installation)
pilotErrorDiag = ""
# make sure the cmtconfig_alternatives is not empty/not set
if not cmtconfig_alternatives:
cmtconfig_alternatives = [_cmtconfig]
if readpar('region') == 'CERN':
if swbase[-len('builds'):] == 'builds':
status = True
return ec, pilotErrorDiag, status, swbase, _cmtconfig
# loop over all available cmtconfig's until a working one is found (the default cmtconfig value is the first to be tried)
for cmtconfig in cmtconfig_alternatives:
ec = 0
pilotErrorDiag = ""
tolog("Testing cmtconfig=%s" % (cmtconfig))
if self.useAtlasSetup(swbase, release, homePackage, cmtconfig):
cmd = self.getProperASetup(swbase, release, homePackage, cmtconfig, tailSemiColon=True)
elif "slc5" in cmtconfig and "gcc43" in cmtconfig:
cmd = "source %s/%s/cmtsite/setup.sh -tag=AtlasOffline,%s,%s,runtime" % (swbase, release, release, cmtconfig)
else:
cmd = "source %s/%s/cmtsite/setup.sh -tag=AtlasOffline,%s,runtime" % (swbase, release, release)
# verify that the setup path actually exists before attempting the source command
ec, pilotErrorDiag = self.verifySetupCommand(cmd)
if ec != 0:
pilotErrorDiag = "getProperSiterootAndCmtconfig: Missing installation: %s" % (pilotErrorDiag)
tolog("!!WARNING!!1996!! %s" % (pilotErrorDiag))
ec = self.__error.ERR_MISSINGINSTALLATION
continue
# do not test the source command, it is enough to verify its existence
(exitcode, output) = timedCommand(cmd, timeout=getProperTimeout(cmd))
if exitcode == 0:
# a proper cmtconfig has been found, now set the SITEROOT
# e.g. /cvmfs/atlas.cern.ch/repo/sw/software/i686-slc5-gcc43-opt/17.2.11
# note: this format (using cmtconfig is only valid on CVMFS, not on AFS)
# VO_ATLAS_RELEASE_DIR is only set on AFS (CERN)
if ("AtlasP1HLT" in homePackage or "AtlasHLT" in homePackage) and os.environ.has_key('VO_ATLAS_RELEASE_DIR'):
tolog("Encountered HLT homepackage: %s (must use special siteroot)" % (homePackage))
siteroot = os.path.join(swbase, release)
else:
# default SITEROOT on CVMFS
if "/cvmfs" in swbase:
siteroot = os.path.join(os.path.join(swbase, cmtconfig), release)
else:
siteroot = os.path.join(swbase, release)
siteroot = siteroot.replace('//','/')
# make sure that the path actually exists
if os.path.exists(siteroot):
tolog("SITEROOT path has been defined and exists: %s" % (siteroot))
status = True
break
else:
pilotErrorDiag = "getProperSiterootAndCmtconfig: cmtconfig %s has been confirmed but SITEROOT does not exist: %s" % (cmtconfig, siteroot)
tolog("!!WARNING!!1996!! %s" % (pilotErrorDiag))
ec = self.__error.ERR_MISSINGINSTALLATION
break
elif exitcode != 0 or "Error:" in output or "(ERROR):" in output:
# if time out error, don't bother with trying another cmtconfig
tolog("ATLAS setup for SITEROOT failed")
if "No such file or directory" in output:
pilotErrorDiag = "getProperSiterootAndCmtconfig: Missing installation: %s" % (output)
tolog("!!WARNING!!1996!! %s" % (pilotErrorDiag))
ec = self.__error.ERR_MISSINGINSTALLATION
continue
elif "Error:" in output:
pilotErrorDiag = "getProperSiterootAndCmtconfig: Caught CMT error: %s" % (output)
tolog("!!WARNING!!1996!! %s" % (pilotErrorDiag))
ec = self.__error.ERR_SETUPFAILURE
continue
elif "AtlasSetup(ERROR):" in output:
pilotErrorDiag = "getProperSiterootAndCmtconfig: Caught AtlasSetup error: %s" % (output)
tolog("!!WARNING!!1996!! %s" % (pilotErrorDiag))
ec = self.__error.ERR_SETUPFAILURE
continue
elif "timed out" in output:
# CVMFS problem, no point in continuing
pilotErrorDiag = "getProperSiterootAndCmtconfig: CVMFS setup command timed out: %s" % (output)
tolog("!!WARNING!!1996!! %s" % (pilotErrorDiag))
ec = self.__error.ERR_COMMANDTIMEOUT
break
# reset errors if siteroot was found
if status:
ec = 0
pilotErrorDiag = ""
return ec, pilotErrorDiag, status, siteroot, cmtconfig
def getProperSiterootAndCmtconfigOld(self, swbase, release, homePackage, _cmtconfig, cmtconfig_alternatives=None):
""" return a proper $SITEROOT and cmtconfig """
status = False
siteroot = ""
ec = 0 # only non-zero for fatal errors (missing installation)
pilotErrorDiag = ""
# make sure the cmtconfig_alternatives is not empty/not set
if not cmtconfig_alternatives:
cmtconfig_alternatives = [_cmtconfig]
if swbase[-len('builds'):] == 'builds':
status = True
return ec, pilotErrorDiag, status, swbase, _cmtconfig
# loop over all available cmtconfig's until a working one is found (the default cmtconfig value is the first to be tried)
for cmtconfig in cmtconfig_alternatives:
ec = 0
pilotErrorDiag = ""
tolog("Testing cmtconfig=%s" % (cmtconfig))
if self.useAtlasSetup(swbase, release, homePackage, cmtconfig):
cmd = self.getProperASetup(swbase, release, homePackage, cmtconfig, tailSemiColon=True)
cmd += " echo SITEROOT=$SITEROOT"
elif "slc5" in cmtconfig and "gcc43" in cmtconfig:
cmd = "source %s/%s/cmtsite/setup.sh -tag=AtlasOffline,%s,%s,runtime; echo SITEROOT=$SITEROOT" % (swbase, release, release, cmtconfig)
else:
cmd = "source %s/%s/cmtsite/setup.sh -tag=AtlasOffline,%s,runtime" % (swbase, release, release)
# verify that the setup path actually exists before attempting the source command
ec, pilotErrorDiag = self.verifySetupCommand(cmd)
if ec != 0:
pilotErrorDiag = "getProperSiterootAndCmtconfig: Missing installation: %s" % (pilotErrorDiag)
tolog("!!WARNING!!1996!! %s" % (pilotErrorDiag))
ec = self.__error.ERR_MISSINGINSTALLATION
continue
# do not test the source command, it is enough to verify its existence
(exitcode, output) = timedCommand(cmd, timeout=getProperTimeout(cmd))
if exitcode == 0:
# a proper cmtconfig has been found, now set the SITEROOT
# e.g. /cvmfs/atlas.cern.ch/repo/sw/software/i686-slc5-gcc43-opt/17.2.11
# note: this format (using cmtconfig is only valid on CVMFS, not on AFS)
# VO_ATLAS_RELEASE_DIR is only set on AFS (CERN)
if ("AtlasP1HLT" in homePackage or "AtlasHLT" in homePackage) and os.environ.has_key('VO_ATLAS_RELEASE_DIR'):
tolog("Encountered HLT homepackage: %s (must use special siteroot)" % (homePackage))
siteroot = os.path.join(swbase, release)
else:
# default SITEROOT on CVMFS
if "/cvmfs" in swbase:
siteroot = os.path.join(os.path.join(swbase, cmtconfig), release)
else:
siteroot = os.path.join(swbase, release)
siteroot = siteroot.replace('//','/')
# make sure that the path actually exists
if os.path.exists(siteroot):
tolog("SITEROOT path has been defined and exists: %s" % (siteroot))
status = True
break
else:
pilotErrorDiag = "getProperSiterootAndCmtconfig: cmtconfig %s has been confirmed but SITEROOT does not exist: %s" % (cmtconfig, siteroot)
tolog("!!WARNING!!1996!! %s" % (pilotErrorDiag))
ec = self.__error.ERR_MISSINGINSTALLATION
break
elif exitcode != 0 or "Error:" in output or "(ERROR):" in output:
# if time out error, don't bother with trying another cmtconfig
tolog("ATLAS setup for SITEROOT failed: ec=%d, output=%s" % (exitcode, output))
if "No such file or directory" in output:
pilotErrorDiag = "getProperSiterootAndCmtconfig: Missing installation: %s" % (output)
tolog("!!WARNING!!1996!! %s" % (pilotErrorDiag))
ec = self.__error.ERR_MISSINGINSTALLATION
continue
elif "Error:" in output:
pilotErrorDiag = "getProperSiterootAndCmtconfig: Caught CMT error: %s" % (output)
tolog("!!WARNING!!1996!! %s" % (pilotErrorDiag))
ec = self.__error.ERR_SETUPFAILURE
continue
elif "AtlasSetup(ERROR):" in output:
pilotErrorDiag = "getProperSiterootAndCmtconfig: Caught AtlasSetup error: %s" % (output)
tolog("!!WARNING!!1996!! %s" % (pilotErrorDiag))
ec = self.__error.ERR_SETUPFAILURE
continue
elif "timed out" in output:
# CVMFS problem, no point in continuing
pilotErrorDiag = "getProperSiterootAndCmtconfig: CVMFS setup command timed out: %s" % (output)
tolog("!!WARNING!!1996!! %s" % (pilotErrorDiag))
ec = self.__error.ERR_COMMANDTIMEOUT
break
# reset errors if siteroot was found
if status:
ec = 0
pilotErrorDiag = ""
return ec, pilotErrorDiag, status, siteroot, cmtconfig
def getVerifiedAtlasSetupPath(self, swbase, release, homePackage, cmtconfig):
""" Get a verified asetup path"""
rel_N = None
path = None
skipVerification = False # verification not possible for more complicated setup (nightlies)
if 'HPC_' in readpar("catchall"):
skipVerification = True # verification not possible for more complicated setup (nightlies)
# First try with the cmtconfig in the path. If that fails, try without it
# Are we using nightlies?
if "rel_" in homePackage:
# extract the rel_N bit and use it in the path
rel_N = self.extractRelN(homePackage)
tolog("Extracted %s from homePackage=%s" % (rel_N, homePackage))
if rel_N:
# path = "%s/%s/%s/%s/cmtsite/asetup.sh" % (swbase, cmtconfig, release, rel_N)
path = self.getModernASetup(swbase=swbase)
tolog("1. path = %s" % (path))
skipVerification = True
if not path:
path = "%s/%s/%s/cmtsite/asetup.sh" % (swbase, cmtconfig, release)
if not skipVerification:
status = os.path.exists(path)
if status:
tolog("Using AtlasSetup (%s exists with cmtconfig in the path)" % (path))
else:
tolog("%s does not exist (trying without cmtconfig in the path)" % (path))
if rel_N:
path = "%s/%s/%s/cmtsite/asetup.sh" % (swbase, release, rel_N)
else:
path = "%s/%s/cmtsite/asetup.sh" % (swbase, release)
status = os.path.exists(path)
if status:
tolog("Using AtlasSetup (%s exists)" % (path))
else:
tolog("Cannot use AtlasSetup since %s does not exist either" % (path))
else:
tolog("Skipping verification of asetup path for nightlies")
status = True
return status, path
def useAtlasSetup(self, swbase, release, homePackage, cmtconfig):
""" determine whether AtlasSetup is to be used """
status = False
# are we using at least release 16.1.0?
if release >= "16.1.0":
# the actual path is not needed in this method
status, _path = self.getVerifiedAtlasSetupPath(swbase, release, homePackage, cmtconfig)
else:
pass
# tolog("Release %s is too old for AtlasSetup (need at least 16.1.0)" % (release))
return status
def getProperASetup(self, swbase, atlasRelease, homePackage, cmtconfig, tailSemiColon=False, source=True, cacheVer=None, cacheDir=None):
""" return a proper asetup.sh command """
# handle sites using builds area in a special way
if swbase[-len('builds'):] == 'builds' or verifyReleaseString(atlasRelease) == "NULL":
path = swbase
else:
if os.path.exists(os.path.join(swbase, cmtconfig)):
if os.path.exists(os.path.join(os.path.join(swbase, cmtconfig), atlasRelease)):
path = os.path.join(os.path.join(swbase, cmtconfig), atlasRelease)
else:
path = os.path.join(swbase, atlasRelease)
else:
path = os.path.join(swbase, atlasRelease)
# need to tell asetup where the compiler is in the US (location of special config file)
_path = "%s/AtlasSite/AtlasSiteSetup" % (path)
if readpar('cloud') == "US" and os.path.exists(_path):
_input = "--input %s" % (_path)
else:
_input = ""
# add a tailing semicolon if needed
if tailSemiColon:
tail = ";"
else:
tail = ""
# define the setup options
if not cacheVer:
cacheVer = atlasRelease
# add the fast option if possible (for the moment, check for locally defined env variable)
if os.environ.has_key("ATLAS_FAST_ASETUP"):
options = cacheVer + ",notest,fast"
else:
options = cacheVer + ",notest"
if cacheDir and cacheDir != "":
options += ",%s" % (cacheDir)
# nightlies setup?
if "rel_" in homePackage:
# extract the rel_N bit and use it in the path
rel_N = self.extractRelN(homePackage) # e.g. rel_N = rel_0
if rel_N:
tolog("Extracted %s from homePackage=%s" % (rel_N, homePackage))
# asetup_path = "%s/%s/cmtsite/asetup.sh" % (path, rel_N)
asetup_path = self.getModernASetup(swbase=swbase)
tolog("2. path=%s" % (asetup_path))
# use special options for nightlies (not the release info set above)
# NOTE: 'HERE' IS NEEDED FOR MODERN SETUP
# Special case for AtlasDerivation. In this case cacheVer = rel_N, so we don't want to add both cacheVer and rel_N,
# and we need to add cacheDir and the release itself
special_cacheDirs = ['AtlasDerivation'] # Add more cases if necessary
if cacheDir in special_cacheDirs:
# strip any special cacheDirs from the release string, if present
for special_cacheDir in special_cacheDirs:
if special_cacheDir in atlasRelease:
tolog("Found special cacheDir=%s in release string: %s (will be removed)" % (special_cacheDir, atlasRelease)) # 19.1.X.Y-VAL-AtlasDerivation
atlasRelease = atlasRelease.replace('-' + special_cacheDir, '')
tolog("Release string updated: %s" % (atlasRelease))
options = cacheDir + "," + atlasRelease + "," + rel_N + ",notest,here" # E.g. AtlasDerivation,19.1.X.Y-VAL,rel_3,notest,here
else:
# correct an already set cacheVer if necessary
if cacheVer == rel_N:
tolog("Found a cacheVer set to %s: resetting to atlasRelease=%s" % (cacheVer, atlasRelease))
cacheVer = atlasRelease
options = cacheVer + "," + rel_N + ",notest,here"
tolog("Options: %s" % (options))
else:
tolog("!!WARNING!!1111!! Failed to extract rel_N from homePackage=%s (forced to use default cmtsite setup)" % (homePackage))
asetup_path = "%s/cmtsite/asetup.sh" % (path)
else:
asetup_path = "%s/cmtsite/asetup.sh" % (path)
# make sure that cmd doesn't start with 'source' if the asetup_path start with 'export', if so reset it (cmd and asetup_path will be added below)
if asetup_path.startswith('export'):
cmd = ""
elif source:
# add the source command (default), not wanted for installPyJobTransforms()
cmd = "source"
else:
cmd = ""
# HLT on AFS
if ("AtlasP1HLT" in homePackage or "AtlasHLT" in homePackage):
try:
h = homePackage.split("/") # ['AtlasP1HLT', '18.1.0.1']
project = h[0]
patch = h[1]
except Exception, e:
tolog("!!WARNING!!1234!! Could not extract project and patch from %s" % (homePackage))
else:
tolog("Extracted %s, %s from homePackage=%s" % (patch, project, homePackage))
if os.environ.has_key('VO_ATLAS_RELEASE_DIR'):
cmd = "export AtlasSetup=%s/../dist/AtlasSetup; " % readpar('appdir')
options = "%s,%s,notest,afs" % (patch, project)
else:
cmd = "export AtlasSetup=%s/AtlasSetup; " % (path)
options = "%s,%s,notest" % (patch, project)
#cmd = "source"
#asetup_path = os.path.join(path, 'AtlasSetup/scripts/asetup.sh')
asetup_path = "source $AtlasSetup/scripts/asetup.sh"
# for HPC
if 'HPC_HPC' in readpar("catchall"):
quick_setup = "%s/setup-quick.sh" % (path)
tolog("quick setup path: %s" % quick_setup)
if os.path.exists(quick_setup):
cmd = "source %s" % (quick_setup)
asetup_path = ""
cmtconfig = cmtconfig + " --cmtextratags=ATLAS,useDBRelease "
return "%s %s %s --cmtconfig %s %s%s" % (cmd, asetup_path, options, cmtconfig, _input, tail)
def extractRelN(self, homePackage):
""" Extract the rel_N bit from the homePackage string """
# s = "AtlasProduction,rel_0"
# -> rel_N = "rel_0"
rel_N = None
if "AnalysisTransforms" in homePackage and "_rel_" in homePackage:
pattern = re.compile(r"AnalysisTransforms\-[A-Za-z0-9]+\_(rel\_\d+)")
found = re.findall(pattern, homePackage)
if len(found) > 0:
rel_N = found[0]
elif not "," in homePackage:
rel_N = homePackage
elif homePackage != "":
pattern = re.compile(r"[A-Za-z0-9]+,(rel\_\d+)")
found = re.findall(pattern, homePackage)
if len(found) > 0:
rel_N = found[0]
return rel_N
def dump(self, path, cmd="cat"):
""" Dump the content of path to the log """
if cmd != "cat":
_cmd = "%s %s" % (cmd, path)
tolog("%s:\n%s" % (_cmd, commands.getoutput(_cmd)))
else:
if os.path.exists(path):
_cmd = "%s %s" % (cmd, path)
tolog("%s:\n%s" % (_cmd, commands.getoutput(_cmd)))
else:
tolog("Path %s does not exist" % (path))
def displayArchitecture(self):
""" Display system architecture """
tolog("Architecture information:")
cmd = "lsb_release -a"
tolog("Excuting command: %s" % (cmd))
out = commands.getoutput(cmd)
if "Command not found" in out:
# Dump standard architecture info files if available
self.dump("/etc/lsb-release")
self.dump("/etc/SuSE-release")
self.dump("/etc/redhat-release")
self.dump("/etc/debian_version")
self.dump("/etc/issue")
self.dump("$MACHTYPE", cmd="echo")
else:
tolog("\n%s" % (out))
def specialChecks(self, **kwargs):
""" Implement special checks here """
status = False
# appdir = kwargs.get('appdir', '')
# Display system architecture
self.displayArchitecture()
# Display the cvmfs ChangeLog is possible
self.displayChangeLog()
# Set the python version used by the pilot
self.setPilotPythonVersion()
if ('HPC_' in readpar("catchall")) or ('ORNL_Titan_install' in readpar("nickname")):
status = True
else:
# Test the LFC module
status = self.testImportLFCModule()
# Test CVMFS
if status:
status = self.testCVMFS()
return status
def checkSpecialEnvVars(self, sitename):
""" Check special environment variables """
ec = 0
# check if ATLAS_POOLCOND_PATH is set
try:
if os.environ.has_key('ATLAS_POOLCOND_PATH'):
tolog("ATLAS_POOLCOND_PATH = %s" % (os.environ['ATLAS_POOLCOND_PATH']))
else:
tolog("ATLAS_POOLCOND_PATH not set by wrapper")
except Exception, e:
tolog("WARNING: os.environ.has_key failed: %s" % str(e))
if os.environ.has_key("VO_ATLAS_SW_DIR") and not "CERNVM" in sitename and not os.environ.has_key('Nordugrid_pilot'):
vo_atlas_sw_dir = os.environ["VO_ATLAS_SW_DIR"]
if vo_atlas_sw_dir != "":
# on cvmfs the following dirs are symbolic links, so all tests are needed
paths = [vo_atlas_sw_dir, os.path.join(vo_atlas_sw_dir, "software")]
for path in paths:
if os.path.exists(path):
tolog("%s confirmed" % (path))
else:
tolog("!!FAILED!!1777!! %s does not exist" % (path))
ec = self.__error.ERR_NOSUCHFILE
break
# require that the "local" directory exists on cvmfs
path = os.path.join(vo_atlas_sw_dir, "local")
if "cvmfs" in path:
if os.path.exists(path):
tolog("%s confirmed" % (path))
else:
tolog("!!FAILED!!1777!! %s does not exist" % (path))
ec = self.__error.ERR_NOSUCHFILE
else:
tolog("Skipping verification of %s on non-cvmfs" % (path))
else:
tolog("VO_ATLAS_SW_DIR set to empty string (ignore)")
return ec
def getPayloadName(self, job):
""" Figure out a suitable name for the payload stdout """
if job.processingType in ['prun']:
name = job.processingType
else:
jobtrf = job.trf.split(",")[0]
if jobtrf.find("panda") > 0 and jobtrf.find("mover") > 0:
name = "pandamover"
elif jobtrf.find("Athena") > 0 or jobtrf.find("trf") > 0 or jobtrf.find("_tf") > 0:
name = "athena"
else:
if isBuildJob(job.outFiles):
name = "buildjob"
else:
name = "payload"
return name
def getMetadataForRegistration(self, guid):
""" Return metadata (not known yet) for LFC registration """
# Use the GUID as identifier (the string "<GUID>-surltobeset" will later be replaced with the SURL)
return ' <metadata att_name="surl" att_value="%s-surltobeset"/>\n' % (guid)
def getAttrForRegistration(self):
""" Return the attribute of the metadata XML to be updated with surl value """
return 'surl'
def getFileCatalog(self):
""" Return the default file catalog to use (e.g. for replica lookups) """
# See usage in Mover.py
# Note: no longer needed since list_replicas() doesn't need to know the catalog
# Return a dummy default to allow the existing host loop to remain in Mover
fileCatalog = "<rucio default>"
# fileCatalog = ""
# try:
# ddm = readpar('ddm')
# # note that ddm can contain a comma separated list; if it does, get the first value
# if "," in ddm:
# ddm = ddm.split(',')[0]
# # Try to get the default file catalog from Rucio
# from dq2.info import TiersOfATLAS
# fileCatalog = TiersOfATLAS.getLocalCatalog(ddm)
# except:
# tolog("!!WARNING!!3333!! Failed to import TiersOfATLAS from dq2.info")
#
# # This should not be necessary post-LFC
# if fileCatalog == "":
# tolog("Did not get a file catalog from dq2.info. Trying to construct one from lfchost")
# lfchost = readpar('lfchost')
# if not "lfc://" in lfchost:
# lfchost = "lfc://" + lfchost
# fileCatalog = lfchost + ":/grid/atlas"
#
# # e.g. 'lfc://prod-lfc-atlas.cern.ch:/grid/atlas'
tolog("Using file catalog: %s" % (fileCatalog))
return fileCatalog
def getFileCatalogHosts(self):
""" Return a list of file catalog hosts """
file_catalog_hosts = []
# # Get the catalogTopology dictionary
# try:
# from dq2.info import TiersOfATLAS
# catalogsTopology_dict = TiersOfATLAS.ToACache.catalogsTopology
#
# # Extract all the LFC hosts from the catalogTopology dictionary
# file_catalog_hosts = catalogsTopology_dict.keys()
# tolog("catalogsTopology=%s" % str(file_catalog_hosts))
# except:
# import traceback
# tolog("!!WARNING!!3334!! Exception caught in Mover: %s" % str(traceback.format_exc()))
# tolog("!!WARNING!!1212!! catalogsTopology lookup failed")
# else:
# tolog("Extracted file catalog hosts: %s" % (file_catalog_hosts))
return file_catalog_hosts
def verifySwbase(self, appdir):
""" Confirm existence of appdir/swbase """
# appdir/swbase is a queuedata parameter specifying the base location of physics analysis / release software
# This method will simply verify that the corresponding directory exists
#
# Input:
# appdir = application/software/release directory (e.g. /cvmfs/atlas.cern.ch/repo/sw)
# Return:
# error code (0 for success)
ec = 0
if not "|" in appdir and not "^" in appdir: # as can be the case at CERN
swbase = self.getSwbase(appdir, "", "", "", "")
if os.path.exists(swbase):
tolog("Application dir confirmed: %s" % (swbase))
else:
tolog("!!FAILED!!1999!! Software directory does not exist: %s" % (swbase))
ec = self.__error.ERR_NOSOFTWAREDIR
else:
# get the site information object
si = getSiteInformation(self.__experiment)
tolog("Encountered complex appdir. Will verify each path individually (primary path must exist, other paths are optional)")
appdirs = si.getAppdirs(appdir)
tolog("appdirs = %s" % str(appdirs))
primary = True
for appdir in appdirs:
if os.path.exists(appdir):
if primary:
tolog("Primary application directory confirmed: %s" % (appdir))
primary = False
else:
tolog("Additional application directory confirmed: %s" % (appdir))
else:
if primary: # must exist
tolog("!!FAILED!!1999!! Primary application directory does not exist: %s" % (appdir))
ec = self.__error.ERR_NOSOFTWAREDIR
break
else:
tolog("Additional application directory does not exist: %s (ignore)" % (appdir))
return ec
def interpretPayloadStdout(self, job, res, getstatusoutput_was_interrupted, current_job_number, runCommandList, failureCode):
""" Payload error interpretation and handling """
error = PilotErrors()
transExitCode = res[0]%255
# Get the proper stdout filename
if type(runCommandList) is dict:
number_of_jobs = 1
else:
number_of_jobs = len(runCommandList)
filename = getStdoutFilename(job.workdir, job.stdout, current_job_number, number_of_jobs)
# Try to identify out of memory errors in the stderr
out_of_memory = self.isOutOfMemory(job=job, number_of_jobs=number_of_jobs)
failed = out_of_memory # failed boolean used below
# Always look for the max and average VmPeak?
''' While will not be fixed in proper way
if not self.__analysisJob:
setup = getSourceSetup(runCommandList[0])
job.vmPeakMax, job.vmPeakMean, job.RSSMean = findVmPeaks(setup)
'''
# A killed job can have empty output but still transExitCode == 0
no_payload_output = False
installation_error = False
if getstatusoutput_was_interrupted:
if os.path.exists(filename):
if os.path.getsize(filename) > 0:
tolog("Payload produced stdout but was interrupted (getstatusoutput threw an exception)")
else:
no_payload_output = True
failed = True
else:
failed = True
no_payload_output = True
elif len(res[1]) < 20: # protect the following comparison against massive outputs
if res[1] == 'Undefined':
failed = True
no_payload_output = True
elif failureCode:
failed = True
else:
# check for installation error
res_tmp = res[1][:1024]
if res_tmp[0:3] == "sh:" and 'setup.sh' in res_tmp and 'No such file or directory' in res_tmp:
failed = True
installation_error = True
# handle non-zero failed job return code but do not set pilot error codes to all payload errors
if transExitCode or failed:
if failureCode:
job.pilotErrorDiag = "Payload failed: Interrupt failure code: %d" % (failureCode)
# (do not set pilot error code)
elif getstatusoutput_was_interrupted:
raise Exception, "Job execution was interrupted (see stderr)"
elif out_of_memory:
job.pilotErrorDiag = "Payload ran out of memory"
job.result[2] = error.ERR_PAYLOADOUTOFMEMORY
elif no_payload_output:
job.pilotErrorDiag = "Payload failed: No output"
job.result[2] = error.ERR_NOPAYLOADOUTPUT
elif installation_error:
job.pilotErrorDiag = "Payload failed: Missing installation"
job.result[2] = error.ERR_MISSINGINSTALLATION
elif transExitCode:
# Handle PandaMover errors
if transExitCode == 176:
job.pilotErrorDiag = "PandaMover staging error: File is not cached"
job.result[2] = error.ERR_PANDAMOVERFILENOTCACHED
elif transExitCode == 86:
job.pilotErrorDiag = "PandaMover transfer failure"
job.result[2] = error.ERR_PANDAMOVERTRANSFER
else:
# check for specific errors in athena stdout
if os.path.exists(filename):
e1 = "prepare 5 database is locked"
e2 = "Error SQLiteStatement"
_out = commands.getoutput('grep "%s" %s | grep "%s"' % (e1, filename, e2))
if 'sqlite' in _out:
job.pilotErrorDiag = "NFS/SQLite locking problems: %s" % (_out)
job.result[2] = error.ERR_NFSSQLITE
else:
job.pilotErrorDiag = "Job failed: Non-zero failed job return code: %d" % (transExitCode)
# (do not set a pilot error code)
else:
job.pilotErrorDiag = "Job failed: Non-zero failed job return code: %d (%s does not exist)" % (transExitCode, filename)
# (do not set a pilot error code)
else:
job.pilotErrorDiag = "Payload failed due to unknown reason (check payload stdout)"
job.result[2] = error.ERR_UNKNOWN
tolog("!!FAILED!!3000!! %s" % (job.pilotErrorDiag))
# set the trf diag error
if res[2] != "":
tolog("TRF diagnostics: %s" % (res[2]))
job.exeErrorDiag = res[2]
job.result[1] = transExitCode
return job
def isJEMAllowed(self):
""" Is it allowed to use JEM services? """
allowjem = False
_allowjem = readpar('allowjem')
if _allowjem == "":
# allowjem not added to queuedata yet, use old method
if readpar('cloud') == "DE":
allowjem = True
else:
if _allowjem.lower() == "true":
allowjem = True
if allowjem:
tolog("JEM is allowed")
else:
tolog("JEM is not allowed")
return allowjem
# Optional
def doSpecialLogFileTransfer(self, eventService=False):
""" Should the log file be transfered to a special SE? """
# The log file can at the end of the job be stored in a special SE - in addition to the normal stage-out of the log file
# If this method returns True, the JobLog class will attempt to store the log file in a secondary SE after the transfer of
# the log to the primary/normal SE. Additional information about the secondary SE is required and can be specified in
# another optional method defined in the *Experiment classes
transferLogToObjectstore = False
if "log_to_objectstore" in readpar('catchall') or eventService:
transferLogToObjectstore = True
if 'HPC_HPC' in readpar('catchall'):
transferLogToObjectstore = True
return transferLogToObjectstore
def extractInputOption(self, jobParameters):
""" Extract the entire input file list from the job parameters including the input option """
# jobParameters = .. --input=file1,file2,file3 ..
# -> --input=file1,file2,file3
# The string is later replaced with a "@file.txt" argparser directive (necessary in case the file list
# is too long which would lead to an "argument list too long"-error)
inputOption = ""
# define the regexp pattern for the input option extraction
pattern = re.compile(r'(\-\-input\=[^\s]*)')
_option = re.findall(pattern, jobParameters)
if _option != []:
inputOption = _option[0]
return inputOption
def updateJobParameters4Input(self, jobParameters):
""" Replace '--input=..' with @file argparser instruction """
inputOption = self.extractInputOption(jobParameters)
if inputOption == "":
tolog("!!WARNING!!1223!! Option --file=.. could not be extracted from: %s" % (jobParameters))
else:
tolog("Extracted input option = %s" % (inputOption))
# Put the extracted info in a file (to be automatically read by the argparser when the trf is executed)
filename = "input_file_list.txt"
try:
f = open(filename, "w")
except IOError, e:
tolog("!!WARNING!!1234!! Failed to open input file list: %s" % (e))
else:
f.write(inputOption)
f.close()
tolog("Wrote extracted input file list to file %s" % (filename))
jobParameters = jobParameters.replace(inputOption, "@%s" % (filename))
tolog("updated job parameters = %s" % (jobParameters))
return jobParameters
def cleanupAthenaMP(self, workdir):
""" Cleanup AthenaMP sud directories prior to log file creation """
for ampdir in glob('%s/athenaMP-workers-*' % (workdir)):
tolog("Attempting to cleanup %s" % (ampdir))
for (p, d, f) in os.walk(ampdir):
for filename in f:
if 'core' in filename or 'pool.root' in filename or 'tmp.' in filename:
path = os.path.join(p, filename)
tolog("Cleaning up %s" % (path))
os.unlink(path)
def getJobExecutionCommand4EventService(self, pilot_initdir):
""" Define and test the command(s) that will be used to execute the payload for the event service """
# E.g. cmd = ["source <path>/setup.sh; <path>/python <script>"]
# The command returned from this method is executed using subprocess.Popen() from the runEvent module
# The actual command must be declared as a list since that is expected by Popen()
cmd = ["python %s/client_test.py 1>AthenaMP_stdout.txt 2>AthenaMP_stderr.txt" % (pilot_initdir)]
return cmd
# Optional
def postGetJobActions(self, job):
""" Perform any special post-job definition download actions here """
# This method is called after the getJob() method has successfully downloaded a new job (job definition) from
# the server. If the job definition e.g. contains information that contradicts WN specifics, this method can
# be used to fail the job
ec = 0
pilotErrorDiag = ""
# Make sure that ATHENA_PROC_NUMBER has a proper value for the current job
if job.prodSourceLabel != "install":
ec, pilotErrorDiag = self.verifyNCoresSettings(job.coreCount)
if ec != 0:
tolog("!!WARNING!!3222!! %s" % (pilotErrorDiag))
return ec, pilotErrorDiag
return ec, pilotErrorDiag
# Local method (not defined in Experiment)
def verifyNCoresSettings(self, jobCoreCount):
""" Verify that nCores settings are correct """
ec = 0
pilotErrorDiag = ""
try:
coreCount = int(jobCoreCount)
except:
coreCount = None
try:
athenaProcNumber = int(os.environ['ATHENA_PROC_NUMBER'])
except:
athenaProcNumber = None
# Make sure that ATHENA_PROC_NUMBER has a proper value for the current job
if (coreCount == 1 or coreCount == None) and (athenaProcNumber > 1):
ec = self.__error.ERR_CORECOUNTMISMATCH
pilotErrorDiag = "Encountered a mismatch between core count from schedconfig (%s) and job definition (%s)" % (str(athenaProcNumber), str(coreCount))
tolog("!!WARNING!!3333!! %s" % (pilotErrorDiag))
else:
tolog("Using core count values: %s (job definition), %s (schedconfig)" % (str(coreCount), str(athenaProcNumber)))
return ec, pilotErrorDiag
def getSwbase(self, appdir, release, homePackage, processingType, cmtconfig):
""" Return the swbase variable """
# appdir comes from thisSite.appdir (might not be set)
# release info is needed to figure out the correct path to use when schedconfig.appdir is set
swbase = ""
# Verify the validity of the release string in case it is not set (as can be the case for prun jobs)
release = verifyReleaseString(release)
if os.environ.has_key('Nordugrid_pilot'):
if os.environ.has_key('RUNTIME_CONFIG_DIR'):
_swbase = os.environ['RUNTIME_CONFIG_DIR']
if os.path.exists(_swbase):
swbase = _swbase
elif os.environ.has_key('VO_ATLAS_SW_DIR'):
# use the appdir from queuedata if available
scappdir = readpar('appdir')
# protect against complex appdir form
if "|" in scappdir and appdir != "":
#from SiteInformation import SiteInformation
#si = SiteInformation()
si = getSiteInformation(self.__experiment)
ec, _scappdir = si.extractAppdir(scappdir, processingType, homePackage)
if ec != 0:
tolog("!!WARNING!!2222!! Failed to extract complex appdir: %d, %s, %s, %s" % (ec, scappdir, processingType, homePackage))
else:
scappdir = _scappdir
tolog("Using alternative appdir=%s" % (scappdir))
elif scappdir != "":
tolog("Got a plain appdir from queuedata: %s" % (scappdir))
else:
tolog("Appdir not set in queuedata")
if scappdir != "":
# CERN-RELEASE:
# appdir=/afs/cern.ch/atlas/software/releases (full path to releases)
# CERN-UNVALID:
# appdir=/afs/cern.ch/atlas/software/unvalidated/caches (full path to releases)
# CERN-BUILDS:
# appdir=/afs/cern.ch/atlas/software/builds (already points to the latest release, do not add release)
# CERN-PROD:
# appdir=/afs/cern.ch/atlas/software/releases (full path to releases)
# Release can be added to appdir for CERN-RELEASE, CERN-UNVALID, CERN-PROD, but not to CERN-BUILDS
if os.path.exists(os.path.join(scappdir, release)):
swbase = scappdir
else:
# the build queue is special
if scappdir[-len('builds'):] == 'builds':
swbase = scappdir
# backup, old cases
elif os.path.exists(os.path.join(scappdir, 'software/releases')):
swbase = os.path.join(scappdir, 'software/releases')
# backup, for remaining LCG sites, only 'software' needs to be added
else:
swbase = os.path.join(scappdir, 'software')
if not os.path.exists(swbase):
swbase = scappdir
else:
tolog("VO_ATLAS_SW_DIR=%s" % (os.environ['VO_ATLAS_SW_DIR']))
# primary software base (search appdir for alternatives)
swbase = os.environ['VO_ATLAS_SW_DIR'] + '/software'
else:
# for non-LCG sites
if appdir.find('atlas_app/atlas_rel') < 0:
_swbase = os.path.join(appdir, 'atlas_app/atlas_rel')
if os.path.exists(_swbase):
swbase = _swbase
else:
swbase = appdir
else:
swbase = appdir
# add cmtconfig sub dir for CERNVM and for cvmfs systems
_cmtconfig = cmtconfig.replace("-", "_")
_swbase = os.path.join(swbase, _cmtconfig)
if os.path.exists(_swbase) and release != "" and release.upper() != "NULL":
swbase = _swbase
# uncomment if testing interactively at lxplus
# swbase = appdir
return swbase.replace('//','/')
def setPilotPythonVersion(self):
""" Set an environmental variable to the python version used by the pilot """
# Needed to disentangle which python version runAthena should fall back to in case of problems with LFC import
which_python = commands.getoutput("which python")
if which_python.startswith('/'):
os.environ['ATLAS_PYTHON_PILOT'] = which_python
tolog("ATLAS_PYTHON_PILOT set to %s" % (which_python))
else:
tolog("!!WARNING!!1111!! Could not set ATLAS_PYTHON_PILOT to %s" % (which_python))
# Optional
def updateJobSetupScript(self, workdir, create=False, to_script=None):
""" Create or update the job setup script (used to recreate the job locally if needed) """
# If create=True, this step will only create the file with the script header (bash info)
if create:
filename = os.path.basename(super(ATLASExperiment, self).getJobSetupScriptName(workdir))
tolog("Creating job setup script with stage-in and payload execution commands: %s" % (filename))
to_script = "#!/bin/bash\n# %s %s\n\n" % (filename, time.strftime("%d %b %Y %H:%M:%S", time.gmtime(time.time())))
# setup for EGI sites
if os.environ.has_key('VO_ATLAS_SW_DIR'):
to_script += "export VO_ATLAS_SW_DIR=%s\n" % (os.path.expandvars('$VO_ATLAS_SW_DIR'))
to_script += "if [ -f $VO_ATLAS_SW_DIR/local/setup.sh ]; then\n source $VO_ATLAS_SW_DIR/local/setup.sh\nfi"
# Add the string to the setup script
if to_script:
super(ATLASExperiment, self).addToJobSetupScript(to_script, workdir)
def verifyProxy(self, envsetup="", limit=None):
""" Check for a valid voms/grid proxy longer than N hours """
# Use 'limit' to set required length
error = PilotErrors()
pilotErrorDiag = ""
if limit == None:
limit = 48
from SiteMover import SiteMover
if envsetup == "":
envsetup = SiteMover.getEnvsetup()
# add setup for arcproxy if it exists
arcproxy_setup = "%s/atlas.cern.ch/repo/sw/arc/client/latest/slc6/x86_64/setup.sh" % (self.getCVMFSPath())
_envsetup = ""
if os.path.exists(arcproxy_setup):
if envsetup != "":
if not envsetup.endswith(";"):
envsetup += ";"
# but remove any existing setup file in this path since it will tamper with the arcproxy setup
pattern = re.compile(r'(source .+\;)')
s = re.findall(pattern, envsetup)
if s != []:
_envsetup = envsetup.replace(s[0], "")
else:
_envsetup = envsetup
_envsetup += ". %s;" % (arcproxy_setup)
# first try to use arcproxy since voms-proxy-info is not working properly on SL6 (memory issues on queues with limited memory)
# cmd = "%sarcproxy -I |grep 'AC:'|awk '{sum=$5*3600+$7*60+$9; print sum}'" % (envsetup)
cmd = "%sarcproxy -i vomsACvalidityLeft" % (_envsetup)
tolog("Executing command: %s" % (cmd))
exitcode, output = commands.getstatusoutput(cmd)
if "command not found" in output:
tolog("!!WARNING!!1234!! arcproxy is not available on this queue, this can lead to memory issues with voms-proxy-info on SL6: %s" % (output))
else:
ec, pilotErrorDiag = self.interpretProxyInfo(exitcode, output, limit)
if ec == 0:
tolog("Voms proxy verified using arcproxy")
return 0, pilotErrorDiag
elif ec == error.ERR_NOVOMSPROXY:
return ec, pilotErrorDiag
else:
tolog("Will try voms-proxy-info instead")
# -valid HH:MM is broken
cmd = "%svoms-proxy-info -actimeleft --file $X509_USER_PROXY" % (envsetup)
tolog("Executing command: %s" % (cmd))
exitcode, output = commands.getstatusoutput(cmd)
if "command not found" in output:
tolog("Skipping voms proxy check since command is not available")
else:
ec, pilotErrorDiag = self.interpretProxyInfo(exitcode, output, limit)
if ec == 0:
tolog("Voms proxy verified using voms-proxy-info")
return 0, pilotErrorDiag
if limit:
cmd = "%sgrid-proxy-info -exists -valid %s:00" % (envsetup, str(limit))
else:
cmd = "%sgrid-proxy-info -exists -valid 24:00" % (envsetup)
tolog("Executing command: %s" % (cmd))
exitcode, output = commands.getstatusoutput(cmd)
if exitcode != 0:
if output.find("command not found") > 0:
tolog("Skipping grid proxy check since command is not available")
else:
# Analyze exit code / output
from futil import check_syserr
check_syserr(exitcode, output)
pilotErrorDiag = "Grid proxy certificate does not exist or is too short: %d, %s" % (exitcode, output)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
return error.ERR_NOPROXY, pilotErrorDiag
else:
tolog("Grid proxy verified")
return 0, pilotErrorDiag
def interpretProxyInfo(self, ec, output, limit):
""" Interpret the output from arcproxy or voms-proxy-info """
exitcode = 0
pilotErrorDiag = ""
error = PilotErrors()
tolog("ec=%d output=%s" % (ec, output))
if ec != 0:
if "Unable to verify signature! Server certificate possibly not installed" in output:
tolog("!!WARNING!!2999!! Skipping voms proxy check: %s" % (output))
# test for command errors
elif "arcproxy:" in output:
pilotErrorDiag = "Arcproxy failed: %s" % (output)
tolog("!!WARNING!!2998!! %s" % (pilotErrorDiag))
exitcode = error.ERR_GENERALERROR
else:
# Analyze exit code / output
from futil import check_syserr
check_syserr(ec, output)
pilotErrorDiag = "Voms proxy certificate check failure: %d, %s" % (ec, output)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
exitcode = error.ERR_NOVOMSPROXY
else:
# remove any additional print-outs if present, assume that the last line is the time left
if "\n" in output:
output = output.split('\n')[-1]
# test for command errors
if "arcproxy:" in output:
pilotErrorDiag = "Arcproxy failed: %s" % (output)
tolog("!!WARNING!!2998!! %s" % (pilotErrorDiag))
exitcode = error.ERR_GENERALERROR
else:
# on EMI-3 the time output is different (HH:MM:SS as compared to SS on EMI-2)
if ":" in output:
ftr = [3600, 60, 1]
output = sum([a*b for a,b in zip(ftr, map(int,output.split(':')))])
try:
validity = int(output)
if validity >= limit * 3600:
tolog("Voms proxy verified (%ds)" % (validity))
else:
pilotErrorDiag = "Voms proxy certificate does not exist or is too short. Lifetime %ds" % (validity)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
exitcode = error.ERR_NOVOMSPROXY
except ValueError:
pilotErrorDiag = "Failed to evalute command output: %s" % (output)
tolog("!!WARNING!!2999!! %s" % (pilotErrorDiag))
exitcode = error.ERR_GENERALERROR
return exitcode, pilotErrorDiag
def getRelease(self, release):
""" Return a list of the software release id's """
# Assuming 'release' is a string that separates release id's with '\n'
# Used in the case of payload using multiple steps with different release versions
# E.g. release = "19.0.0\n19.1.0" -> ['19.0.0', '19.1.0']
if os.environ.has_key('Nordugrid_pilot') and os.environ.has_key('ATLAS_RELEASE'):
return os.environ['ATLAS_RELEASE'].split(",")
else:
return release.split("\n")
# Optional
def formatReleaseString(self, release):
""" Return a special formatted release string """
# E.g. release = "Atlas-19.0.0" -> "19.0.0"
self.__atlasEnv = release.startswith('Atlas-')
if self.__atlasEnv : # require atlas env. to be set up
if release.find("\n") > 0:
# E.g. multi-trf: swRelease = 'Atlas-14.1.0\nAtlas-14.1.0' (normally 'Atlas-14.1.0')
# We only want to keep the release number, not the 'Atlas' string
rm = release.split('-')[0] + '-' # 'Atlas-'
release = release.replace(rm, '')
else:
# update needed to handle cases like "Atlas-19.0.X.Y-VAL"
release = release[release.find('-')+1:]
return release
def getModernASetup(self, swbase=None):
""" Return the full modern setup for asetup """
# Handle nightlies correctly, since these releases will have different initial paths
path = "%s/atlas.cern.ch/repo" % (self.getCVMFSPath())
#if swbase:
# path = getInitialDirs(swbase, 3) # path = "/cvmfs/atlas-nightlies.cern.ch/repo"
# # correct for a possible change of the root directory (/cvmfs)
# path = path.replace("/cvmfs", self.getCVMFSPath())
#else:
# path = "%s/atlas.cern.ch/repo" % (self.getCVMFSPath())
cmd = "export ATLAS_LOCAL_ROOT_BASE=%s/ATLASLocalRootBase;" % (path)
cmd += "source ${ATLAS_LOCAL_ROOT_BASE}/user/atlasLocalSetup.sh --quiet;"
cmd += "source $AtlasSetup/scripts/asetup.sh"
return cmd
def verifySetupCommand(self, _setup_str):
""" Make sure the setup command exists """
ec = 0
pilotErrorDiag = ""
# remove any '-signs
_setup_str = _setup_str.replace("'", "")
tolog("Will verify: %s" % (_setup_str))
if _setup_str != "" and "source " in _setup_str:
# if a modern setup is used (i.e. a naked asetup instead of asetup.sh), then we need to verify that the entire setup string works
# and not just check the existance of the path (i.e. the modern setup is more complicated to test)
if self.getModernASetup() in _setup_str:
tolog("Modern asetup detected, need to verify entire setup (not just existance of path)")
tolog("Executing command: %s" % (_setup_str))
exitcode, output = timedCommand(_setup_str)
if exitcode != 0:
pilotErrorDiag = output
tolog('!!WARNING!!2991!! %s' % (pilotErrorDiag))
if "No such file or directory" in output:
ec = self.__error.ERR_NOSUCHFILE
elif "timed out" in output:
ec = self.__error.ERR_COMMANDTIMEOUT
else:
ec = self.__error.ERR_SETUPFAILURE
else:
tolog("Setup has been verified")
else:
# first extract the file paths from the source command(s)
setup_paths = extractFilePaths(_setup_str)
# only run test if string begins with an "/"
if setup_paths:
# verify that the file paths actually exists
for setup_path in setup_paths:
if os.path.exists(setup_path):
tolog("File %s has been verified" % (setup_path))
else:
pilotErrorDiag = "No such file or directory: %s" % (setup_path)
tolog('!!WARNING!!2991!! %s' % (pilotErrorDiag))
ec = self.__error.ERR_NOSUCHFILE
break
else:
# nothing left to test
pass
else:
tolog("Nothing to verify in setup: %s (either empty string or no source command)" % (_setup_str))
return ec, pilotErrorDiag
# Optional
def useTracingService(self):
""" Use the DQ2 Tracing Service """
# A service provided by the DQ2 system that allows for file transfer tracking; all file transfers
# are reported by the pilot to the DQ2 Tracing Service if this method returns True
return True
# Optional
def updateJobDefinition(self, job, filename):
""" Update the job definition file and object before using it in RunJob """
# This method is called from Monitor, before RunJob is launched, which allows to make changes to the job object after it was downloaded from the job dispatcher
# (used within Monitor) and the job definition file (which is used from RunJob to recreate the same job object as is used in Monitor).
# 'job' is the job object, defined in Job.py, while 'filename' is the name of the file containing the job definition information.
# Update the job definition in case ATHENA_PROC_NUMBER has been set
if os.environ.has_key('ATHENA_PROC_NUMBER'):
try:
coreCount = int(os.environ['ATHENA_PROC_NUMBER'])
except Exception, e:
tolog("!!WARNING!!2332!! ATHENA_PROC_NUMBER not an integer (can not update job definition): %s" % (e))
else:
tolog("ATHENA_PROC_NUMBER set to %d - will update job object and job definition file" % (coreCount))
job.coreCount = coreCount
# Get the contents of the job definition
contents = readFile(filename)
if contents != "":
# Parse the job definition and extract the coreCount string + value
pattern = re.compile(r"coreCount\=([A-Za-z0-9]+)?")
found = re.findall(pattern, contents)
if len(found) > 0: # ie found a least 'coreCount='
try:
coreCount_string = "coreCount=%s" % found[0] # might or might not add an int, or even NULL
except Exception, e:
tolog("!!WARNING!!2333!! Failed to extract coreCount from job definition: %s" % (e))
else:
tolog("Extracted \'%s\' from job definition" % (coreCount_string))
# Update the coreCount
new_coreCount = "coreCount=%d" % (coreCount)
updated_contents = contents.replace(coreCount_string, new_coreCount)
if writeFile(filename, updated_contents):
tolog("Updated job definition with: \'%s\'" % (new_coreCount))
else:
tolog("!!WARNING!!2336!! Failed to update coreCount in job definition")
else:
tolog("!!WARNING!!2334!! coreCount could not be extracted from job definition")
else:
tolog("!!WARNING!!2335!! Empty job definition")
else:
tolog("ATHENA_PROC_NUMBER is not set, will not update coreCount in job definition")
return job
# Optional
def shouldExecuteUtility(self):
""" Determine where a memory utility monitor should be executed """
# The RunJob class has the possibility to execute a memory utility monitor that can track the memory usage
# of the payload. The monitor is executed if this method returns True. The monitor is expected to produce
# a summary JSON file whose name is defined by the getMemoryMonitorJSONFilename() method. The contents of
# this file (ie. the full JSON dictionary) will be added to the jobMetrics at the end of the job (see
# PandaServerClient class).
return True
# Optional
def getUtilityJSONFilename(self):
""" Return the filename of the memory monitor JSON file """
# For explanation, see shouldExecuteUtility()
return "memory_monitor_summary.json"
# Optional
def getUtilityCommand(self, **argdict):
""" Prepare a utility command string """
# This method can be used to prepare a setup string for an optional utility tool, e.g. a memory monitor,
# that will be executed by the pilot in parallel with the payload.
# The pilot will look for an output JSON file (summary.json) and will extract pre-determined fields
# from it and report them with the job updates. Currently the pilot expects to find fields related
# to memory information.
pid = argdict.get('pid', 0)
release = argdict.get('release', '')
homePackage = argdict.get('homePackage', '')
cmtconfig = argdict.get('cmtconfig', '')
summary = self.getUtilityJSONFilename()
interval = 60
default_release = "20.1.5"
default_patch_release = "20.1.5.2" #"20.1.4.1"
default_cmtconfig = "x86_64-slc6-gcc48-opt"
default_swbase = "%s/atlas.cern.ch/repo/sw/software" % (self.getCVMFSPath())
#default_path = "%s/%s/%s/AtlasProduction/%s/InstallArea/%s/bin/MemoryMonitor" % (default_swbase, default_cmtconfig, default_release, default_patch_release, default_cmtconfig)
default_setup = "source %s/%s/%s/cmtsite/asetup.sh %s,notest --cmtconfig %s" % (default_swbase, default_cmtconfig, default_release, default_patch_release, default_cmtconfig)
# Construct the name of the output file using the summary variable
if summary.endswith('.json'):
output = summary.replace('.json', '.txt')
else:
output = summary + '.txt'
# Get the standard setup
cacheVer = homePackage.split('/')[-1]
# Could anything be extracted?
if homePackage == cacheVer: # (no)
# This means there is no patched release available, ie. we need to use the fallback
useDefault = True
else:
useDefault = False
if useDefault:
tolog("Will use default (fallback) setup for MemoryMonitor since patched release number is needed for the setup, and none is available")
cmd = default_setup
else:
# get the standard setup
standard_setup = self.getProperASetup(default_swbase, release, homePackage, cmtconfig, cacheVer=cacheVer)
_cmd = standard_setup + "; which MemoryMonitor"
# Can the MemoryMonitor be found?
try:
ec, output = timedCommand(_cmd, timeout=60)
except Exception, e:
tolog("!!WARNING!!3434!! Failed to locate MemoryMonitor: will use default (for patch release %s): %s" % (default_patch_release, e))
cmd = default_setup
else:
if "which: no MemoryMonitor in" in output:
tolog("Failed to locate MemoryMonitor: will use default (for patch release %s)" % (default_patch_release))
cmd = default_setup
else:
# Standard setup passed the test
cmd = standard_setup
# Now add the MemoryMonitor command
cmd += "; MemoryMonitor --pid %d --filename %s --json-summary %s --interval %d" % (pid, "memory_monitor_output.txt", summary, interval)
return cmd
# Optional
def getGUIDSourceFilename(self):
""" Return the filename of the file containing the GUIDs for the output files """
# In the case of ATLAS, Athena produces an XML file containing the GUIDs of the output files. The name of this
# file is PoolFileCatalog.xml. If this method returns an empty string (ie the default), the GUID generation will
# be done by the pilot in RunJobUtilities::getOutFilesGuids()
return "PoolFileCatalog.xml"
if __name__ == "__main__":
a=ATLASExperiment()
#a.specialChecks()
print a.formatReleaseString("Atlas-19.0.X.Y-VAL")
#appdir='/cvmfs/atlas.cern.ch/repo/sw'
#a.specialChecks(appdir=appdir)
#def useTracingService(self):
# return True
#def sendTracingReport(self, exitCode):
# ts = TracingService()
# ts.send
| []
| []
| [
"ATLAS_RELEASE",
"SITEROOT",
"ATLAS_PYTHON_PILOT",
"ATLAS_POOLCOND_PATH",
"VO_ATLAS_SW_DIR",
"ATHENA_PROC_NUMBER",
"RUNTIME_CONFIG_DIR"
]
| [] | ["ATLAS_RELEASE", "SITEROOT", "ATLAS_PYTHON_PILOT", "ATLAS_POOLCOND_PATH", "VO_ATLAS_SW_DIR", "ATHENA_PROC_NUMBER", "RUNTIME_CONFIG_DIR"] | python | 7 | 0 | |
lambda_function.py | import os
import logging
from typing import List
from datetime import datetime
from dataclasses import dataclass
import httpx
from slack_sdk import WebClient
from slack_sdk.errors import SlackApiError
# Logging setup
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# Model setup
@dataclass
class Game:
title: str
store_link: str
image_url: str
# Epic functionality
def get_free_epic_games() -> List[Game]:
"""Uses an API from Epic to parse a list of free games to find this week's free games."""
# HTTP params for the US free games
free_games_params = {
"locale": "en-US",
"country": "US",
"allowCountries": "US",
}
# Epic's backend API URL for the free games promotion
epic_api_url = "https://store-site-backend-static.ak.epicgames.com/freeGamesPromotions"
# backend API request
response = httpx.get(epic_api_url, params=free_games_params)
logger.info("## HTTP response code")
logger.info(response)
logger.info("## HTTP response body")
logger.info(response.json())
# list of dictionaries containing information about the free games
free_games: List[Game] = []
# create Game objects for each entry found
for game in response.json()["data"]["Catalog"]["searchStore"]["elements"]:
if len(game["promotions"]["promotionalOffers"]) == 0:
continue
else:
discount_price = game["price"]["totalPrice"]["discountPrice"]
promo_start_date = datetime.strptime(
game["promotions"]["promotionalOffers"][0][
"promotionalOffers"
][0]["startDate"],
"%Y-%m-%dT%H:%M:%S.000%z",
).replace(tzinfo=None)
promo_end_date = datetime.strptime(
game["promotions"]["promotionalOffers"][0][
"promotionalOffers"
][0]["endDate"],
"%Y-%m-%dT%H:%M:%S.000%z",
).replace(tzinfo=None)
if (
discount_price == 0
and promo_start_date <= datetime.now() <= promo_end_date
):
free_games.append(
Game(
title=game["title"],
store_link=f"https://www.epicgames.com/store/en-US/p/{game['productSlug']}"
if game["productSlug"]
else "https://www.epicgames.com/store/en-US/p/",
image_url=[
image["url"]
for image in game["keyImages"]
if image["type"] == "OfferImageWide"
][0],
)
)
logger.info("## Free game(s)")
logger.info(free_games)
return free_games
# Slack setup
def lambda_handler(event, context):
slack_client = WebClient(token=os.getenv("SLACK_TOKEN"))
free_epic_games = get_free_epic_games()
for entry in free_epic_games:
game_blocks = [
{
"type": "header",
"text": {"type": "plain_text", "text": f"{entry.title}"},
},
{
"type": "image",
"title": {"type": "plain_text", "text": f"{entry.title}"},
"image_url": f"{entry.image_url}",
"alt_text": f"{entry.title}",
},
{
"type": "actions",
"elements": [
{
"type": "button",
"text": {"type": "plain_text", "text": "Click Me"},
"value": "click_me_123",
"url": f"{entry.store_link}",
"action_id": "button-action",
}
],
},
{"type": "divider"},
]
print(game_blocks)
try:
slack_client.chat_postMessage(
channel=str(
os.getenv("SLACK_CHANNEL_ID")
), # get this from an env variable
blocks=game_blocks,
)
logger.info("## Slack message")
logger.info("Slack message successfully sent")
except SlackApiError as e:
logger.error("## Slack message ERROR")
logger.error(e.response["error"])
assert e.response["error"]
if __name__ == "__main__":
lambda_handler("", "")
| []
| []
| [
"SLACK_CHANNEL_ID",
"SLACK_TOKEN"
]
| [] | ["SLACK_CHANNEL_ID", "SLACK_TOKEN"] | python | 2 | 0 | |
pyexp/files_os_practice.py | data_dir = '/home/cn/data/sample_tick/'
import os
import shutil # higher level file operations - more choices for error handling
os.path.join('usr', 'bin', 'spam') # join path
cur_dir = os.getcwd() # current working dir
os.chdir('/tmp'); os.getcwd() # move around
os.chdir('/home/cn/program/python/sandbox');
os.getcwd()
if not os.path.exists('/tmp/blah'):
os.mkdir('/tmp/blah')
os.rmdir('/tmp/blah') # only work if the dir is empty
shutil.rmtree('/tmp/blah', ignore_errors=True) # works for most dir - shutils is more adaptable
## ABS and REL paths
os.path.abspath('.')
os.path.isabs('.')
os.path.relpath('/tmp/blah')
## deal with names - split names etc.
os.path.basename(os.path.join(os.getcwd(), 'test_file.py'))
os.path.dirname(os.path.join(os.getcwd(), 'test_file.py'))
os.path.split(os.path.join(os.getcwd(), 'test_file.py'))
# zip, unzip, tar, untar etc.
shutil.disk_usage('.')
# create a new file
if not os.path.exists('/tmp/to_arc'):
os.mkdir('/tmp/to_arc')
to_arc = '/tmp/to_arc/test_arc.txt'
with open(to_arc, 'a') as fh: # touch behavior - will throw if no immediate dir available
os.utime(to_arc, times=None)
fh.writelines('\n'.join(['ha', 'asdfjalsdjadf'])) # writelines does NOT add new lines. Genius!
shutil.get_archive_formats() # all supported formats - depending on other tools in the os
# make archive needs a dir to archive so you need to move everything into that dir first
# syntax is quite tricky
shutil.make_archive('/tmp/test_arc.txt', base_dir='to_arc', root_dir='/tmp', format='gztar') # zip or tar work too
shutil.unpack_archive(('/tmp/test_arc.txt.tar.gz'), extract_dir='/tmp/unpack/crazy')
for root, dirs, files in os.walk('/tmp/unpack/crazy'): ## hmm - need to review os.walk()
print(files)
# finding directory contents
base_dir = os.environ['HOME'] + '/data/sample_tick'
# first way:
kk = os.listdir(base_dir) # list things in that directory only - level 1
for name in kk:
name = os.path.join(base_dir, name)
print( name, ", is dir:", os.path.isdir(name), ", is file:", os.path.isfile(name))
# second way:
for cur_dir, subdirs, filenames in os.walk(base_dir):
""" per iteration, list all subdirs and filenames under cur_dir, then go deeper into subdirs in the
next iterations. It basically does a tree_walk
"""
print( 'the current dir is %s' % cur_dir)
for subdir in subdirs:
print('\tthe current subdir is %s' % subdir)
for filename in filenames:
print('\tthe current filename is %s' % filename)
# TODO: could use regex to detect if a file is a .gz or .csv file and then do some stuff with it
| []
| []
| [
"HOME"
]
| [] | ["HOME"] | python | 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.