filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
Python/pythonLevel1/python0811_file.py
|
# -*- coding: utf-8 -*-
import os
print '-------操作文件和目录-------'
# 操作系统名字
print os.name + '\n'
print '\n' + '详细的系统信息'
print os.uname()
print '\n' + '环境变量'
print os.environ
print '\n' + '获取某个环境变量的值'
print os.getenv('PATH')
print '\n'
# 查看当前目录的绝对路径:
print os.path.abspath('.')
selfAbsPath = os.path.abspath('.')
# 在某个目录下创建一个新目录,
# 首先把新目录的完整路径表示出来:
filePathDir = os.path.join(selfAbsPath, 'testdir')
# '/Users/michael/testdir'
# # 然后创建一个目录:
os.mkdir(filePathDir)
# # 删掉一个目录:
os.rmdir(filePathDir)
print '-------os.path.join()函数-------'
# 这样可以正确处理不同操作系统的路径分隔符
print '-------os.path.split() 直接让你得到文件扩展名-------'
print os.path.split('/Users/michael/testdir/file.txt')
# 对文件重命名:
# os.rename('test.txt', 'test.py')
# 删掉文件:
# os.remove('test.py')
print '-------shutil-------'
# shutil模块提供了copyfile()的函数,你还可以在shutil模块中找到很多实用函数,它们可以看做是os模块的补充。
# 当前目录下的所有目录
print[x for x in os.listdir('.') if os.path.isdir(x)]
# # 当前文件夹下所有python文件
# print [x for x in os.listdir('.') if os.path.isfile(x) and
# os.path.splitext(x)[1]=='.py']
# print os.listdir('.')
# print dir(os.path)
# 编写一个search(s)的函数,能在当前目录以及当前目录的所有子目录下查找文件名包含指定字符串的文件,并打印出完整路径:
def search(fileName):
currentPath = os.path.abspath('.')
for x in os.listdir('.'):
if os.path.isfile(x) and fileName in os.path.splitext(x)[0]:
print x
if os.path.isdir(x):
newP = os.path.join(currentPath, x)
print newP
print '-------search start-------'
search('0810')
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
pkg/typrls/github.go
|
package typrls
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"github.com/google/go-github/v33/github"
"golang.org/x/oauth2"
)
type (
// Github to publish
Github struct {
Owner string
Repo string
Draft bool
}
)
var _ Publisher = (*Github)(nil)
// Publish to github
func (g *Github) Publish(c *Context) (err error) {
githubToken := os.Getenv("GITHUB_TOKEN")
if githubToken == "" {
return errors.New("github-release: missing $GITHUB_TOKEN")
}
token := &oauth2.Token{AccessToken: githubToken}
oauth := oauth2.NewClient(c.Ctx(), oauth2.StaticTokenSource(token))
repo := github.NewClient(oauth).Repositories
if _, _, err = g.getReleaseByTag(c, repo); err == nil {
return fmt.Errorf("github-release: Can't publish to existing tag '%s'", c.TagName)
}
c.Infof("Create github release for %s/%s\n", g.Owner, g.Repo)
rls := &github.RepositoryRelease{
Name: github.String(fmt.Sprintf("%s - %s", c.Descriptor.ProjectName, c.TagName)),
TagName: github.String(c.TagName),
Body: github.String(c.Summary),
Draft: github.Bool(g.Draft),
Prerelease: github.Bool(c.Alpha),
}
if rls, _, err = g.createRelease(c, repo, rls); err != nil {
return
}
files, _ := ioutil.ReadDir(c.ReleaseFolder)
for _, fileInfo := range files {
path := c.ReleaseFolder + "/" + fileInfo.Name()
c.Infof("Upload '%s'\n", path)
var file *os.File
if file, err = os.Open(path); err != nil {
return
}
defer file.Close()
opt := &github.UploadOptions{Name: filepath.Base(path)}
if _, _, err := g.uploadReleaseAsset(c, repo, *rls.ID, opt, file); err != nil {
c.Infof("WARN: %s\n", err.Error())
}
}
return
}
func (g *Github) getReleaseByTag(c *Context, repo *github.RepositoriesService) (*github.RepositoryRelease, *github.Response, error) {
return repo.GetReleaseByTag(c.Ctx(), g.Owner, g.Repo, c.TagName)
}
func (g *Github) createRelease(c *Context, repo *github.RepositoriesService, rls *github.RepositoryRelease) (*github.RepositoryRelease, *github.Response, error) {
return repo.CreateRelease(c.Ctx(), g.Owner, g.Repo, rls)
}
func (g *Github) uploadReleaseAsset(c *Context, repo *github.RepositoriesService, id int64, opt *github.UploadOptions, file *os.File) (*github.ReleaseAsset, *github.Response, error) {
return repo.UploadReleaseAsset(c.Ctx(), g.Owner, g.Repo, id, opt, file)
}
|
[
"\"GITHUB_TOKEN\""
] |
[] |
[
"GITHUB_TOKEN"
] |
[]
|
["GITHUB_TOKEN"]
|
go
| 1 | 0 | |
spreadsheets/spreadsheet.go
|
package spreadsheets
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"github.com/cloudevents/sdk-go"
"github.com/oms-services/google-sheets/result"
"golang.org/x/oauth2/google"
driveV3 "google.golang.org/api/drive/v3"
sheetsV4 "google.golang.org/api/sheets/v4"
"log"
"net/http"
"net/url"
"os"
"regexp"
"strconv"
"strings"
"time"
)
//ArgsData struct
type ArgsData struct {
RowLength *sheetsV4.Request `json:"rowLength"`
ColumnLength *sheetsV4.Request `json:"columnLength"`
Title string `json:"title"`
ID string `json:"spreadsheetId"`
SheetID int64 `json:"sheetId"`
SheetIndex int `json:"sheetIndex"`
SheetTitle string `json:"sheetTitle"`
Row int64 `json:"row"`
Column int64 `json:"column"`
Content string `json:"content"`
Start int `json:"start"`
End int `json:"end"`
EmailAddress string `json:"emailAddress"`
IsTesting bool `json:"isTesting"`
Role string `json:"role"`
Type string `json:"type"`
CellNumber string `json:"cellNumber"`
}
//Subscribe struct
type Subscribe struct {
Data RequestParam `json:"data"`
Endpoint string `json:"endpoint"`
ID string `json:"id"`
IsTesting bool `json:"istesting"`
}
//SubscribeReturn struct
type SubscribeReturn struct {
SpreadsheetID string `json:"spreadsheetID"`
SheetTitle string `json:"sheetTitle"`
TwitterCell string `json:"twitterCell"`
EmailAddress string `json:"emailAddress"`
}
//RequestParam struct
type RequestParam struct {
SpreadsheetID string `json:"spreadsheetID"`
SheetTitle string `json:"sheetTitle"`
}
//Message struct
type Message struct {
Success bool `json:"success"`
Message string `json:"message"`
StatusCode int `json:"statusCode"`
}
//SheetScope Spreadsheet
const (
SheetScope = "https://www.googleapis.com/auth/spreadsheets"
DriveScope = "https://www.googleapis.com/auth/drive.file"
)
//Global Variables
var (
Listener = make(map[string]Subscribe)
rtmStarted bool
sheetService *sheetsV4.Service
sheetServiceErr error
oldRowCount int
twitterIndex int
subReturn SubscribeReturn
count int
)
//HealthCheck Google-Sheets
func HealthCheck(responseWriter http.ResponseWriter, request *http.Request) {
bytes, _ := json.Marshal("OK")
result.WriteJSONResponse(responseWriter, bytes, http.StatusOK)
}
//CreateSpreadsheet func
func CreateSpreadsheet(responseWriter http.ResponseWriter, request *http.Request) {
var key = os.Getenv("CREDENTIAL_JSON")
decodedJSON, err := base64.StdEncoding.DecodeString(key)
if err != nil {
result.WriteErrorResponseString(responseWriter, err.Error())
return
}
decoder := json.NewDecoder(request.Body)
var argsdata ArgsData
decodeErr := decoder.Decode(&argsdata)
if decodeErr != nil {
result.WriteErrorResponseString(responseWriter, decodeErr.Error())
return
}
sheetConf, sheetConfErr := google.JWTConfigFromJSON(decodedJSON, SheetScope)
if sheetConfErr != nil {
result.WriteErrorResponseString(responseWriter, sheetConfErr.Error())
return
}
sheetClient := sheetConf.Client(context.TODO())
sheetService, sheetServiceErr := sheetsV4.New(sheetClient)
if sheetServiceErr != nil {
result.WriteErrorResponseString(responseWriter, sheetServiceErr.Error())
return
}
sheetProperties := sheetsV4.Spreadsheet{
Properties: &sheetsV4.SpreadsheetProperties{
Title: argsdata.Title,
},
}
newSpreadsheet := sheetService.Spreadsheets.Create(&sheetProperties)
spreadsheet, sheetErr := newSpreadsheet.Do()
if sheetErr != nil {
result.WriteErrorResponseString(responseWriter, sheetErr.Error())
return
}
spreadsheetID := spreadsheet.SpreadsheetId
driveConf, driveConfErr := google.JWTConfigFromJSON(decodedJSON, DriveScope)
if driveConfErr != nil {
result.WriteErrorResponseString(responseWriter, driveConfErr.Error())
return
}
driveClient := driveConf.Client(context.TODO())
driveService, driveServiceErr := driveV3.New(driveClient)
if driveServiceErr != nil {
result.WriteErrorResponseString(responseWriter, driveServiceErr.Error())
return
}
driveProperties := driveV3.Permission{
EmailAddress: argsdata.EmailAddress,
Role: argsdata.Role,
Type: argsdata.Type,
}
if spreadsheetID != "" {
permission := driveService.Permissions.Create(spreadsheetID, &driveProperties)
_, doErr := permission.Do()
if doErr != nil && argsdata.IsTesting == false {
result.WriteErrorResponseString(responseWriter, doErr.Error())
return
}
}
bytes, _ := json.Marshal(spreadsheet)
result.WriteJSONResponse(responseWriter, bytes, http.StatusOK)
}
//FindSpreadsheet func
func FindSpreadsheet(responseWriter http.ResponseWriter, request *http.Request) {
var key = os.Getenv("CREDENTIAL_JSON")
decoder := json.NewDecoder(request.Body)
var argsdata ArgsData
decodeErr := decoder.Decode(&argsdata)
if decodeErr != nil {
result.WriteErrorResponse(responseWriter, decodeErr)
return
}
decodedJSON, decodeErr := base64.StdEncoding.DecodeString(key)
if decodeErr != nil {
result.WriteErrorResponse(responseWriter, decodeErr)
return
}
sheetConf, sheetConfErr := google.JWTConfigFromJSON(decodedJSON, SheetScope)
if sheetConfErr != nil {
result.WriteErrorResponseString(responseWriter, sheetConfErr.Error())
return
}
sheetClient := sheetConf.Client(context.TODO())
sheetService, sheetServiceErr := sheetsV4.New(sheetClient)
if sheetServiceErr != nil {
result.WriteErrorResponseString(responseWriter, sheetServiceErr.Error())
return
}
getSpreadsheet := sheetService.Spreadsheets.Get(argsdata.ID)
spreadsheet, sheetErr := getSpreadsheet.Do()
if sheetErr != nil {
result.WriteErrorResponseString(responseWriter, sheetErr.Error())
return
}
bytes, _ := json.Marshal(spreadsheet)
result.WriteJSONResponse(responseWriter, bytes, http.StatusOK)
}
//AddSheet func
func AddSheet(responseWriter http.ResponseWriter, request *http.Request) {
var key = os.Getenv("CREDENTIAL_JSON")
decoder := json.NewDecoder(request.Body)
var argsdata ArgsData
decodeErr := decoder.Decode(&argsdata)
if decodeErr != nil {
result.WriteErrorResponse(responseWriter, decodeErr)
return
}
decodedJSON, decodeErr := base64.StdEncoding.DecodeString(key)
if decodeErr != nil {
result.WriteErrorResponse(responseWriter, decodeErr)
return
}
sheetConf, sheetConfErr := google.JWTConfigFromJSON(decodedJSON, SheetScope)
if sheetConfErr != nil {
result.WriteErrorResponseString(responseWriter, sheetConfErr.Error())
return
}
sheetClient := sheetConf.Client(context.TODO())
sheetService, sheetServiceErr := sheetsV4.New(sheetClient)
if sheetServiceErr != nil {
result.WriteErrorResponseString(responseWriter, sheetServiceErr.Error())
return
}
addSheet := sheetsV4.BatchUpdateSpreadsheetRequest{
Requests: []*sheetsV4.Request{
&sheetsV4.Request{
AddSheet: &sheetsV4.AddSheetRequest{
Properties: &sheetsV4.SheetProperties{
Title: argsdata.SheetTitle,
},
},
},
},
}
addSpreadsheet := sheetService.Spreadsheets.BatchUpdate(argsdata.ID, &addSheet)
spreadsheet, sheetErr := addSpreadsheet.Do()
if sheetErr != nil {
result.WriteErrorResponseString(responseWriter, sheetErr.Error())
return
}
bytes, _ := json.Marshal(spreadsheet)
result.WriteJSONResponse(responseWriter, bytes, http.StatusOK)
}
//FindSheet func
func FindSheet(responseWriter http.ResponseWriter, request *http.Request) {
var key = os.Getenv("CREDENTIAL_JSON")
decoder := json.NewDecoder(request.Body)
var argsdata ArgsData
decodeErr := decoder.Decode(&argsdata)
if decodeErr != nil {
result.WriteErrorResponse(responseWriter, decodeErr)
return
}
if argsdata.SheetID <= 0 && argsdata.SheetIndex <= 0 && argsdata.SheetTitle == "" {
message := Message{false, "Please provide at least one argument(sheet Id, title or index)", http.StatusBadRequest}
bytes, _ := json.Marshal(message)
result.WriteJSONResponse(responseWriter, bytes, http.StatusBadRequest)
return
}
decodedJSON, decodeErr := base64.StdEncoding.DecodeString(key)
if decodeErr != nil {
result.WriteErrorResponse(responseWriter, decodeErr)
return
}
sheetConf, sheetConfErr := google.JWTConfigFromJSON(decodedJSON, SheetScope)
if sheetConfErr != nil {
result.WriteErrorResponseString(responseWriter, sheetConfErr.Error())
return
}
sheetClient := sheetConf.Client(context.TODO())
sheetService, sheetServiceErr := sheetsV4.New(sheetClient)
if sheetServiceErr != nil {
result.WriteErrorResponseString(responseWriter, sheetServiceErr.Error())
return
}
getSheet := sheetService.Spreadsheets.Values.Get(argsdata.ID, argsdata.SheetTitle)
sheet, sheetErr := getSheet.Do()
if sheetErr != nil {
result.WriteErrorResponseString(responseWriter, sheetErr.Error())
return
}
bytes, _ := json.Marshal(sheet)
result.WriteJSONResponse(responseWriter, bytes, http.StatusOK)
}
//UpdateSheetSize func
func UpdateSheetSize(responseWriter http.ResponseWriter, request *http.Request) {
var key = os.Getenv("CREDENTIAL_JSON")
decoder := json.NewDecoder(request.Body)
var argsdata ArgsData
decodeErr := decoder.Decode(&argsdata)
if decodeErr != nil {
result.WriteErrorResponse(responseWriter, decodeErr)
return
}
decodedJSON, decodeErr := base64.StdEncoding.DecodeString(key)
if decodeErr != nil {
result.WriteErrorResponse(responseWriter, decodeErr)
return
}
sheetConf, sheetConfErr := google.JWTConfigFromJSON(decodedJSON, SheetScope)
if sheetConfErr != nil {
result.WriteErrorResponseString(responseWriter, sheetConfErr.Error())
return
}
sheetClient := sheetConf.Client(context.TODO())
sheetService, sheetServiceErr := sheetsV4.New(sheetClient)
if sheetServiceErr != nil {
result.WriteErrorResponseString(responseWriter, sheetServiceErr.Error())
return
}
resizeValues := sheetsV4.BatchUpdateSpreadsheetRequest{
Requests: []*sheetsV4.Request{
&sheetsV4.Request{
AppendDimension: &sheetsV4.AppendDimensionRequest{
Length: argsdata.Row,
Dimension: "ROWS",
SheetId: argsdata.SheetID,
},
},
&sheetsV4.Request{
AppendDimension: &sheetsV4.AppendDimensionRequest{
Length: argsdata.Column,
Dimension: "COLUMNS",
SheetId: argsdata.SheetID,
},
},
},
}
resizeSheet := sheetService.Spreadsheets.BatchUpdate(argsdata.ID, &resizeValues)
_, sheetErr := resizeSheet.Do()
if sheetErr != nil {
result.WriteErrorResponseString(responseWriter, sheetErr.Error())
return
}
message := Message{Success: true, Message: "Updated sheet size successfully", StatusCode: http.StatusOK}
bytes, _ := json.Marshal(message)
result.WriteJSONResponse(responseWriter, bytes, http.StatusOK)
}
//UpdateCell func
func UpdateCell(responseWriter http.ResponseWriter, request *http.Request) {
var key = os.Getenv("CREDENTIAL_JSON")
decoder := json.NewDecoder(request.Body)
var argsdata ArgsData
decodeErr := decoder.Decode(&argsdata)
if decodeErr != nil {
result.WriteErrorResponse(responseWriter, decodeErr)
return
}
decodedJSON, decodeErr := base64.StdEncoding.DecodeString(key)
if decodeErr != nil {
result.WriteErrorResponse(responseWriter, decodeErr)
return
}
sheetConf, sheetConfErr := google.JWTConfigFromJSON(decodedJSON, SheetScope)
if sheetConfErr != nil {
result.WriteErrorResponseString(responseWriter, sheetConfErr.Error())
return
}
sheetClient := sheetConf.Client(context.TODO())
sheetService, sheetServiceErr := sheetsV4.New(sheetClient)
if sheetServiceErr != nil {
result.WriteErrorResponseString(responseWriter, sheetServiceErr.Error())
return
}
writeProp := sheetsV4.ValueRange{
MajorDimension: "ROWS",
Values: [][]interface{}{{argsdata.Content}},
}
writeSheet := sheetService.Spreadsheets.Values.Update(argsdata.ID, argsdata.SheetTitle+"!"+argsdata.CellNumber, &writeProp)
writeSheet.ValueInputOption("USER_ENTERED")
sheet, sheetErr := writeSheet.Do()
if sheetErr != nil {
result.WriteErrorResponseString(responseWriter, sheetErr.Error())
return
}
bytes, _ := json.Marshal(sheet)
result.WriteJSONResponse(responseWriter, bytes, http.StatusOK)
}
//DeleteSheet func
func DeleteSheet(responseWriter http.ResponseWriter, request *http.Request) {
var key = os.Getenv("CREDENTIAL_JSON")
decoder := json.NewDecoder(request.Body)
var argsdata ArgsData
decodeErr := decoder.Decode(&argsdata)
if decodeErr != nil {
result.WriteErrorResponse(responseWriter, decodeErr)
return
}
decodedJSON, decodeErr := base64.StdEncoding.DecodeString(key)
if decodeErr != nil {
result.WriteErrorResponse(responseWriter, decodeErr)
return
}
sheetConf, sheetConfErr := google.JWTConfigFromJSON(decodedJSON, SheetScope)
if sheetConfErr != nil {
result.WriteErrorResponseString(responseWriter, sheetConfErr.Error())
return
}
sheetClient := sheetConf.Client(context.TODO())
sheetService, sheetServiceErr := sheetsV4.New(sheetClient)
if sheetServiceErr != nil {
result.WriteErrorResponseString(responseWriter, sheetServiceErr.Error())
return
}
deleteProperties := sheetsV4.BatchUpdateSpreadsheetRequest{
Requests: []*sheetsV4.Request{
&sheetsV4.Request{
DeleteSheet: &sheetsV4.DeleteSheetRequest{
SheetId: argsdata.SheetID,
},
},
},
}
deleteSheet := sheetService.Spreadsheets.BatchUpdate(argsdata.ID, &deleteProperties)
_, sheetErr := deleteSheet.Do()
if sheetErr != nil {
result.WriteErrorResponseString(responseWriter, sheetErr.Error())
return
}
message := Message{true, "Sheet deleted successfully", http.StatusOK}
bytes, _ := json.Marshal(message)
result.WriteJSONResponse(responseWriter, bytes, http.StatusOK)
}
//SheetSubscribe func
func SheetSubscribe(responseWriter http.ResponseWriter, request *http.Request) {
var key = os.Getenv("CREDENTIAL_JSON")
decodedJSON, err := base64.StdEncoding.DecodeString(key)
if err != nil {
result.WriteErrorResponseString(responseWriter, err.Error())
return
}
decoder := json.NewDecoder(request.Body)
var sub Subscribe
decodeError := decoder.Decode(&sub)
if decodeError != nil {
result.WriteErrorResponseString(responseWriter, decodeError.Error())
return
}
sheetConf, sheetConfErr := google.JWTConfigFromJSON(decodedJSON, SheetScope)
if sheetConfErr != nil {
result.WriteErrorResponseString(responseWriter, sheetConfErr.Error())
return
}
sheetClient := sheetConf.Client(context.TODO())
sheetService, sheetServiceErr = sheetsV4.New(sheetClient)
if sheetServiceErr != nil {
result.WriteErrorResponseString(responseWriter, sheetServiceErr.Error())
return
}
Listener[sub.Data.SpreadsheetID] = sub
if !rtmStarted {
go SheetRTM()
rtmStarted = true
}
bytes, _ := json.Marshal("Subscribed")
result.WriteJSONResponse(responseWriter, bytes, http.StatusOK)
}
//SheetRTM func
func SheetRTM() {
isTest := false
for {
if len(Listener) > 0 {
for k, v := range Listener {
go getNewRowUpdate(k, v)
isTest = v.IsTesting
}
} else {
rtmStarted = false
break
}
time.Sleep(10 * time.Second)
if isTest {
break
}
}
}
func getNewRowUpdate(spreadsheetID string, sub Subscribe) {
subReturn.SpreadsheetID = spreadsheetID
subReturn.SheetTitle = sub.Data.SheetTitle
readSheet := sheetService.Spreadsheets.Values.Get(spreadsheetID, sub.Data.SheetTitle)
sheet, readSheetErr := readSheet.Do()
if readSheetErr != nil {
fmt.Println("Read Sheet error: ", readSheetErr)
return
}
currentRowCount := len(sheet.Values)
if currentRowCount > 0 {
sheetData := sheet.Values
columnHeading := sheetData[0]
for index, value := range columnHeading {
columnContent := fmt.Sprintf("%v", value)
if strings.EqualFold(columnContent, "twitter") {
twitterIndex = index + 1
letter := toCharStr(twitterIndex)
subReturn.TwitterCell = letter + strconv.FormatInt(int64(currentRowCount), 10)
}
}
if currentRowCount >= 2 {
list := sheet.Values
extractedList := list[currentRowCount-1]
for _, v := range extractedList {
columnContent := fmt.Sprintf("%v", v)
match, _ := regexp.MatchString("^\\w+([-+.']\\w+)*@[A-Za-z\\d]+\\.com$", columnContent)
if match {
subReturn.EmailAddress = columnContent
}
}
}
contentType := "application/json"
transport, err := cloudevents.NewHTTPTransport(cloudevents.WithTarget(sub.Endpoint), cloudevents.WithStructuredEncoding())
if err != nil {
fmt.Println("failed to create transport : ", err)
return
}
client, err := cloudevents.NewClient(transport, cloudevents.WithTimeNow())
if err != nil {
fmt.Println("failed to create client : ", err)
return
}
source, err := url.Parse(sub.Endpoint)
event := cloudevents.Event{
Context: cloudevents.EventContextV01{
EventID: sub.ID,
EventType: "listener",
Source: cloudevents.URLRef{URL: *source},
ContentType: &contentType,
}.AsV01(),
Data: subReturn,
}
if (oldRowCount == 0 || oldRowCount < currentRowCount) && currentRowCount >= 2 {
oldRowCount = currentRowCount
_, resp, err := client.Send(context.Background(), event)
if err != nil {
log.Printf("failed to send: %v", err)
}
subReturn.EmailAddress = ""
subReturn.SheetTitle = ""
subReturn.SpreadsheetID = ""
subReturn.TwitterCell = ""
fmt.Printf("Response: \n%s\n", resp)
}
}
}
func toCharStr(i int) string {
return string('A' - 1 + i)
}
|
[
"\"CREDENTIAL_JSON\"",
"\"CREDENTIAL_JSON\"",
"\"CREDENTIAL_JSON\"",
"\"CREDENTIAL_JSON\"",
"\"CREDENTIAL_JSON\"",
"\"CREDENTIAL_JSON\"",
"\"CREDENTIAL_JSON\"",
"\"CREDENTIAL_JSON\""
] |
[] |
[
"CREDENTIAL_JSON"
] |
[]
|
["CREDENTIAL_JSON"]
|
go
| 1 | 0 | |
examples/oauthcheck/main.go
|
package main
import (
"fmt"
"os"
"github.com/zeroxx1986/flickr"
"github.com/zeroxx1986/flickr/auth/oauth"
)
func main() {
// retrieve Flickr credentials from env vars
apik := os.Getenv("FLICKRGO_API_KEY")
apisec := os.Getenv("FLICKRGO_API_SECRET")
token := os.Getenv("FLICKRGO_OAUTH_TOKEN")
// do not proceed if credentials were not provided
if apik == "" || apisec == "" || token == "" {
fmt.Fprintln(os.Stderr, "Please set FLICKRGO_API_KEY, FLICKRGO_API_SECRET "+
"and FLICKRGO_OAUTH_TOKEN env vars")
os.Exit(1)
}
// create an API client with credentials
client := flickr.NewFlickrClient(apik, apisec)
response, _ := oauth.CheckToken(client, token)
fmt.Println(fmt.Sprintf("%+v", *response))
}
|
[
"\"FLICKRGO_API_KEY\"",
"\"FLICKRGO_API_SECRET\"",
"\"FLICKRGO_OAUTH_TOKEN\""
] |
[] |
[
"FLICKRGO_API_SECRET",
"FLICKRGO_OAUTH_TOKEN",
"FLICKRGO_API_KEY"
] |
[]
|
["FLICKRGO_API_SECRET", "FLICKRGO_OAUTH_TOKEN", "FLICKRGO_API_KEY"]
|
go
| 3 | 0 | |
blur-mapred/src/test/java/org/apache/blur/mapreduce/lib/BlurInputFormatTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.blur.mapreduce.lib;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.TreeMap;
import java.util.UUID;
import org.apache.blur.MiniCluster;
import org.apache.blur.store.buffer.BufferStore;
import org.apache.blur.thirdparty.thrift_0_9_0.TException;
import org.apache.blur.thrift.BlurClient;
import org.apache.blur.thrift.generated.Blur.Iface;
import org.apache.blur.thrift.generated.BlurException;
import org.apache.blur.thrift.generated.Column;
import org.apache.blur.thrift.generated.ColumnDefinition;
import org.apache.blur.thrift.generated.Record;
import org.apache.blur.thrift.generated.RecordMutation;
import org.apache.blur.thrift.generated.RecordMutationType;
import org.apache.blur.thrift.generated.RowMutation;
import org.apache.blur.thrift.generated.TableDescriptor;
import org.apache.blur.utils.BlurConstants;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.SequenceFile.Reader;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
public class BlurInputFormatTest {
private static Configuration conf = new Configuration();
private static MiniCluster miniCluster;
@BeforeClass
public static void setupTest() throws Exception {
setupJavaHome();
File file = new File("./target/tmp/BlurInputFormatTest_tmp");
String pathStr = file.getAbsoluteFile().toURI().toString();
System.setProperty("test.build.data", pathStr + "/data");
System.setProperty("hadoop.log.dir", pathStr + "/hadoop_log");
miniCluster = new MiniCluster();
miniCluster.startBlurCluster(pathStr + "/blur", 2, 2);
miniCluster.startMrMiniCluster();
conf = miniCluster.getMRConfiguration();
BufferStore.initNewBuffer(128, 128 * 128);
}
public static void setupJavaHome() {
String str = System.getenv("JAVA_HOME");
if (str == null) {
String property = System.getProperty("java.home");
if (property != null) {
throw new RuntimeException("JAVA_HOME not set should probably be [" + property + "].");
}
throw new RuntimeException("JAVA_HOME not set.");
}
}
@AfterClass
public static void teardown() throws IOException {
if (miniCluster != null) {
miniCluster.stopMrMiniCluster();
}
rm(new File("build"));
}
private static void rm(File file) {
if (!file.exists()) {
return;
}
if (file.isDirectory()) {
for (File f : file.listFiles()) {
rm(f);
}
}
file.delete();
}
@Test
public void testBlurInputFormatFastDisabledNoFileCache() throws IOException, BlurException, TException,
ClassNotFoundException, InterruptedException {
String tableName = "testBlurInputFormatFastDisabledNoFileCache";
runTest(tableName, true, null);
}
@Test
public void testBlurInputFormatFastEnabledNoFileCache() throws IOException, BlurException, TException,
ClassNotFoundException, InterruptedException {
String tableName = "testBlurInputFormatFastEnabledNoFileCache";
runTest(tableName, false, null);
}
@Test
public void testBlurInputFormatFastDisabledFileCache() throws IOException, BlurException, TException,
ClassNotFoundException, InterruptedException {
String tableName = "testBlurInputFormatFastDisabledFileCache";
Path fileCache = new Path(miniCluster.getFileSystemUri() + "/filecache");
runTest(tableName, true, fileCache);
FileSystem fileSystem = miniCluster.getFileSystem();
// @TODO write some assertions.
// RemoteIterator<LocatedFileStatus> listFiles =
// fileSystem.listFiles(fileCache, true);
// while (listFiles.hasNext()) {
// LocatedFileStatus locatedFileStatus = listFiles.next();
// System.out.println(locatedFileStatus.getPath());
// }
}
@Test
public void testBlurInputFormatFastEnabledFileCache() throws IOException, BlurException, TException,
ClassNotFoundException, InterruptedException {
String tableName = "testBlurInputFormatFastEnabledFileCache";
Path fileCache = new Path(miniCluster.getFileSystemUri() + "/filecache");
runTest(tableName, false, fileCache);
FileSystem fileSystem = miniCluster.getFileSystem();
// @TODO write some assertions.
// RemoteIterator<LocatedFileStatus> listFiles =
// fileSystem.listFiles(fileCache, true);
// while (listFiles.hasNext()) {
// LocatedFileStatus locatedFileStatus = listFiles.next();
// System.out.println(locatedFileStatus.getPath());
// }
}
private void runTest(String tableName, boolean disableFast, Path fileCache) throws IOException, BlurException,
TException, InterruptedException, ClassNotFoundException {
FileSystem fileSystem = miniCluster.getFileSystem();
Path root = new Path(fileSystem.getUri() + "/");
creatTable(tableName, new Path(root, "tables"), disableFast);
loadTable(tableName, 100, 100);
Iface client = getClient();
TableDescriptor tableDescriptor = client.describe(tableName);
Job job = Job.getInstance(conf, "Read Data");
job.setJarByClass(BlurInputFormatTest.class);
job.setMapperClass(TestMapper.class);
job.setInputFormatClass(BlurInputFormat.class);
job.setOutputFormatClass(SequenceFileOutputFormat.class);
job.setNumReduceTasks(0);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(TableBlurRecord.class);
Path output = new Path(new Path(root, "output"), tableName);
String snapshot = UUID.randomUUID().toString();
client.createSnapshot(tableName, snapshot);
if (fileCache != null) {
BlurInputFormat.setLocalCachePath(job, fileCache);
}
BlurInputFormat.addTable(job, tableDescriptor, snapshot);
FileOutputFormat.setOutputPath(job, output);
try {
assertTrue(job.waitForCompletion(true));
} finally {
client.removeSnapshot(tableName, snapshot);
}
final Map<Text, TableBlurRecord> results = new TreeMap<Text, TableBlurRecord>();
walkOutput(output, conf, new ResultReader() {
@Override
public void read(Text rowId, TableBlurRecord tableBlurRecord) {
results.put(new Text(rowId), new TableBlurRecord(tableBlurRecord));
}
});
int rowId = 100;
for (Entry<Text, TableBlurRecord> e : results.entrySet()) {
Text r = e.getKey();
assertEquals(new Text("row-" + rowId), r);
BlurRecord blurRecord = new BlurRecord();
blurRecord.setRowId("row-" + rowId);
blurRecord.setRecordId("record-" + rowId);
blurRecord.setFamily("fam0");
blurRecord.addColumn("col0", "value-" + rowId);
TableBlurRecord tableBlurRecord = new TableBlurRecord(new Text(tableName), blurRecord);
assertEquals(tableBlurRecord, e.getValue());
rowId++;
}
assertEquals(200, rowId);
}
public interface ResultReader {
void read(Text rowId, TableBlurRecord tableBlurRecord);
}
private void walkOutput(Path output, Configuration conf, ResultReader resultReader) throws IOException {
FileSystem fileSystem = output.getFileSystem(conf);
FileStatus fileStatus = fileSystem.getFileStatus(output);
if (fileStatus.isDir()) {
FileStatus[] listStatus = fileSystem.listStatus(output, new PathFilter() {
@Override
public boolean accept(Path path) {
return !path.getName().startsWith("_");
}
});
for (FileStatus fs : listStatus) {
walkOutput(fs.getPath(), conf, resultReader);
}
} else {
Reader reader = new SequenceFile.Reader(fileSystem, output, conf);
Text rowId = new Text();
TableBlurRecord tableBlurRecord = new TableBlurRecord();
while (reader.next(rowId, tableBlurRecord)) {
resultReader.read(rowId, tableBlurRecord);
}
reader.close();
}
}
private Iface getClient() {
return BlurClient.getClientFromZooKeeperConnectionStr(miniCluster.getZkConnectionString());
}
private void loadTable(String tableName, int startId, int numb) throws BlurException, TException {
Iface client = getClient();
List<RowMutation> batch = new ArrayList<RowMutation>();
for (int i = 0; i < numb; i++) {
int id = startId + i;
RowMutation rowMutation = new RowMutation();
rowMutation.setTable(tableName);
rowMutation.setRowId("row-" + Integer.toString(id));
Record record = new Record();
record.setFamily("fam0");
record.setRecordId("record-" + id);
record.addToColumns(new Column("col0", "value-" + id));
rowMutation.addToRecordMutations(new RecordMutation(RecordMutationType.REPLACE_ENTIRE_RECORD, record));
batch.add(rowMutation);
}
client.mutateBatch(batch);
}
private void creatTable(String tableName, Path tables, boolean fastDisable) throws BlurException, TException {
Path tablePath = new Path(tables, tableName);
Iface client = getClient();
TableDescriptor tableDescriptor = new TableDescriptor();
tableDescriptor.setTableUri(tablePath.toString());
tableDescriptor.setName(tableName);
tableDescriptor.setShardCount(2);
tableDescriptor.putToTableProperties(BlurConstants.BLUR_TABLE_DISABLE_FAST_DIR, Boolean.toString(fastDisable));
client.createTable(tableDescriptor);
ColumnDefinition colDef = new ColumnDefinition();
colDef.setFamily("fam0");
colDef.setColumnName("col0");
colDef.setFieldType("string");
client.addColumnDefinition(tableName, colDef);
}
public static class TestMapper extends Mapper<Text, TableBlurRecord, Text, TableBlurRecord> {
@Override
protected void map(Text key, TableBlurRecord value, Context context) throws IOException, InterruptedException {
context.write(key, value);
}
}
}
|
[
"\"JAVA_HOME\""
] |
[] |
[
"JAVA_HOME"
] |
[]
|
["JAVA_HOME"]
|
java
| 1 | 0 | |
pkg/yara/scanner_test.go
|
//go:build yara
// +build yara
/*
* Copyright 2019-2020 by Nedim Sabic Sabic
* https://www.fibratus.io
* All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package yara
import (
"os"
"path/filepath"
"syscall"
"testing"
"time"
"github.com/hillu/go-yara/v4"
"github.com/rabbitstack/fibratus/pkg/kevent"
"github.com/rabbitstack/fibratus/pkg/kevent/ktypes"
"github.com/rabbitstack/fibratus/pkg/alertsender"
htypes "github.com/rabbitstack/fibratus/pkg/handle/types"
"github.com/rabbitstack/fibratus/pkg/kevent/kparams"
"github.com/rabbitstack/fibratus/pkg/pe"
"github.com/rabbitstack/fibratus/pkg/ps"
pstypes "github.com/rabbitstack/fibratus/pkg/ps/types"
"github.com/rabbitstack/fibratus/pkg/syscall/handle"
"github.com/rabbitstack/fibratus/pkg/syscall/process"
"github.com/rabbitstack/fibratus/pkg/yara/config"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
var yaraAlert *alertsender.Alert
type mockSender struct{}
func (s *mockSender) Send(a alertsender.Alert) error {
yaraAlert = &a
return nil
}
func makeSender(config alertsender.Config) (alertsender.Sender, error) {
return &mockSender{}, nil
}
func init() {
alertsender.Register(alertsender.Noop, makeSender)
}
func TestScan(t *testing.T) {
psnap := new(ps.SnapshotterMock)
require.NoError(t, alertsender.LoadAll([]alertsender.Config{{Type: alertsender.Noop}}))
s, err := NewScanner(psnap, config.Config{
Enabled: true,
AlertVia: "noop",
Rule: config.Rule{
Paths: []config.RulePath{
{
Namespace: "default",
Path: "_fixtures/rules",
},
},
},
})
require.NoError(t, err)
var si syscall.StartupInfo
var pi syscall.ProcessInformation
argv := syscall.StringToUTF16Ptr(filepath.Join(os.Getenv("windir"), "notepad.exe"))
err = syscall.CreateProcess(
nil,
argv,
nil,
nil,
true,
0,
nil,
nil,
&si,
&pi)
require.NoError(t, err)
defer syscall.TerminateProcess(pi.Process, uint32(257))
proc := &pstypes.PS{
Name: "notepad.exe",
PID: pi.ProcessId,
Ppid: 2434,
Exe: `C:\Windows\notepad.exe`,
Comm: `C:\Windows\notepad.exe`,
SID: "archrabbit\\SYSTEM",
Cwd: `C:\Windows\`,
SessionID: 1,
Threads: map[uint32]pstypes.Thread{
3453: {Tid: 3453, Entrypoint: kparams.Hex("0x7ffe2557ff80"), IOPrio: 2, PagePrio: 5, KstackBase: kparams.Hex("0xffffc307810d6000"), KstackLimit: kparams.Hex("0xffffc307810cf000"), UstackLimit: kparams.Hex("0x5260000"), UstackBase: kparams.Hex("0x525f000")},
3455: {Tid: 3455, Entrypoint: kparams.Hex("0x5efe2557ff80"), IOPrio: 3, PagePrio: 5, KstackBase: kparams.Hex("0xffffc307810d6000"), KstackLimit: kparams.Hex("0xffffc307810cf000"), UstackLimit: kparams.Hex("0x5260000"), UstackBase: kparams.Hex("0x525f000")},
},
Envs: map[string]string{"ProgramData": "C:\\ProgramData", "COMPUTRENAME": "archrabbit"},
Modules: []pstypes.Module{
{Name: "kernel32.dll", Size: 12354, Checksum: 23123343, BaseAddress: kparams.Hex("fff23fff"), DefaultBaseAddress: kparams.Hex("fff124fd")},
{Name: "user32.dll", Size: 212354, Checksum: 33123343, BaseAddress: kparams.Hex("fef23fff"), DefaultBaseAddress: kparams.Hex("fff124fd")},
},
Handles: []htypes.Handle{
{Num: handle.Handle(0xffffd105e9baaf70),
Name: `\REGISTRY\MACHINE\SYSTEM\ControlSet001\Services\Tcpip\Parameters\Interfaces\{b677c565-6ca5-45d3-b618-736b4e09b036}`,
Type: "Key",
Object: 777488883434455544,
Pid: uint32(1023),
},
{
Num: handle.Handle(0xffffd105e9adaf70),
Name: `\RPC Control\OLEA61B27E13E028C4EA6C286932E80`,
Type: "ALPC Port",
Pid: uint32(1023),
MD: &htypes.AlpcPortInfo{
Seqno: 1,
Context: 0x0,
Flags: 0x0,
},
Object: 457488883434455544,
},
{
Num: handle.Handle(0xeaffd105e9adaf30),
Name: `C:\Users\bunny`,
Type: "File",
Pid: uint32(1023),
MD: &htypes.FileInfo{
IsDirectory: true,
},
Object: 357488883434455544,
},
},
PE: &pe.PE{
NumberOfSections: 2,
NumberOfSymbols: 10,
EntryPoint: "0x20110",
ImageBase: "0x140000000",
LinkTime: time.Now(),
Sections: []pe.Sec{
{Name: ".text", Size: 132608, Entropy: 6.368381, Md5: "db23dce3911a42e987041d98abd4f7cd"},
{Name: ".rdata", Size: 35840, Entropy: 5.996976, Md5: "ffa5c960b421ca9887e54966588e97e8"},
},
Symbols: []string{"SelectObject", "GetTextFaceW", "EnumFontsW", "TextOutW", "GetProcessHeap"},
Imports: []string{"GDI32.dll", "USER32.dll", "msvcrt.dll", "api-ms-win-core-libraryloader-l1-2-0.dl"},
VersionResources: map[string]string{"CompanyName": "Microsoft Corporation", "FileDescription": "Notepad", "FileVersion": "10.0.18362.693"},
},
}
psnap.On("Find", mock.Anything).Return(proc)
for {
if process.IsAlive(handle.Handle(pi.Process)) {
break
}
time.Sleep(time.Millisecond * 100)
}
kevt := &kevent.Kevent{
Type: ktypes.CreateProcess,
Name: "CreateProcess",
Tid: 2484,
PID: 859,
Kparams: kevent.Kparams{
kparams.ProcessName: {Name: kparams.ProcessName, Type: kparams.UnicodeString, Value: "svchost.exe"},
},
Metadata: make(map[string]string),
}
// test attaching on pid
require.NoError(t, s.ScanProc(pi.ProcessId, kevt))
require.NotNil(t, yaraAlert)
assert.Equal(t, "YARA alert on process notepad.exe", yaraAlert.Title)
assert.NotEmpty(t, yaraAlert.Text)
assert.Contains(t, yaraAlert.Tags, "notepad")
// test file scanning on DLL that merely contains
// the fmt.Println("Go Yara DLL Test") statement
require.NoError(t, s.ScanFile("_fixtures/yara-test.dll", kevt))
require.NotNil(t, yaraAlert)
assert.Equal(t, "YARA alert on file _fixtures/yara-test.dll", yaraAlert.Title)
assert.Contains(t, yaraAlert.Tags, "dll")
}
func TestMatchesMeta(t *testing.T) {
yaraMatches := []yara.MatchRule{
{Rule: "test", Namespace: "ns1"},
{Rule: "test2", Namespace: "ns2", Tags: []string{"dropper"}, Metas: []yara.Meta{{Identifier: "author", Value: "rabbit"}}},
}
kevt := &kevent.Kevent{
Type: ktypes.CreateProcess,
Name: "CreateProcess",
Tid: 2484,
PID: 859,
Kparams: kevent.Kparams{
kparams.ProcessName: {Name: kparams.ProcessName, Type: kparams.UnicodeString, Value: "svchost.exe"},
},
Metadata: make(map[string]string),
}
assert.Empty(t, kevt.Metadata)
putMatchesMeta(yaraMatches, kevt)
assert.NotEmpty(t, kevt.Metadata)
assert.Contains(t, kevt.Metadata, matchesMeta)
}
|
[
"\"windir\""
] |
[] |
[
"windir"
] |
[]
|
["windir"]
|
go
| 1 | 0 | |
test/torch_ucc_test_setup.py
|
#
# Copyright (C) Mellanox Technologies Ltd. 2001-2021.
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import torch
import torch.distributed as dist
import torch_ucc
import sys
import os
from datetime import timedelta
def parse_test_args():
parser = argparse.ArgumentParser(description="PG UCC Test")
parser.add_argument("--backend", type=str, default='mpi')
parser.add_argument("--use-cuda", default=False, action='store_true')
parser.add_argument("--enable-prof",default=False, action='store_true')
args = parser.parse_args()
if args.use_cuda and not torch.cuda.is_available():
print("CUDA is not available")
sys.exit(0)
return args
def get_tensor(count, is_cuda):
dev = torch.device('cuda') if is_cuda else torch.device('cpu')
t = torch.randint(0, 100, (count,), dtype=torch.int, device=dev)
return t
def init_process_groups(bend, use_cuda, to=timedelta(seconds=60)):
try:
comm_size = int(os.environ['OMPI_COMM_WORLD_SIZE'])
comm_rank = int(os.environ['OMPI_COMM_WORLD_RANK'])
local_rank = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
except:
print('OMPI env variables are not found')
sys.exit(1)
if use_cuda:
torch.cuda.set_device(local_rank)
os.environ['MASTER_PORT'] = '32167'
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['RANK'] = str(comm_rank)
os.environ['WORLD_SIZE'] = str(comm_size)
dist.init_process_group('ucc', rank=comm_rank, world_size=comm_size, timeout=to)
pg = dist.new_group(backend=bend)
return pg
def check_tensor_equal(t1, t2):
if torch.all(torch.eq(t1, t2)):
return torch.tensor(1, device=t1.device)
else:
print("failed on rank {}".format(os.environ['RANK']))
return torch.tensor(0, device=t1.device)
def check_tensor_list_equal(t1, t2):
num_tensors = len(t1)
for i in range(num_tensors):
if not torch.all(torch.eq(t1[i], t2[i])):
return torch.tensor(0, device=t1[i].device)
return torch.tensor(1, device=t1[i].device)
def print_test_head(test_name, comm_rank):
if comm_rank == 0:
print("{} test".format(test_name))
print("{0:20} {1}".format("count", "result"))
def print_test_result(status, count, comm_rank, comm_size):
if comm_rank == 0:
result = "OK" if status == comm_size else "Failed"
print("{0:20} {1}".format(str(count), result))
if status != comm_size:
sys.exit(1)
def do_compute(t):
return torch.topk(t, t.size()[0])[0]
|
[] |
[] |
[
"MASTER_ADDR",
"MASTER_PORT",
"OMPI_COMM_WORLD_LOCAL_RANK",
"OMPI_COMM_WORLD_SIZE",
"OMPI_COMM_WORLD_RANK",
"RANK",
"WORLD_SIZE"
] |
[]
|
["MASTER_ADDR", "MASTER_PORT", "OMPI_COMM_WORLD_LOCAL_RANK", "OMPI_COMM_WORLD_SIZE", "OMPI_COMM_WORLD_RANK", "RANK", "WORLD_SIZE"]
|
python
| 7 | 0 | |
main.go
|
package main
import (
"context"
"os"
"github.com/alcarria/data-marketplace-chaincode-rest/controller"
"github.com/alcarria/data-marketplace-chaincode-rest/rest"
"github.com/alcarria/data-marketplace-chaincode-rest/utils"
)
func main() {
ctx := context.Background()
logger := utils.CreateLogger("data-marketplace-chaincode-rest")
config, err := utils.LoadConfig()
if err != nil {
logger.Printf("no-port-specified-using-default-9090")
config.Port = 9090
}
fabricSetup := controller.FabricSetup{
// Network parameters
//OrdererID: "orderer.example.com",
//OrdererURL: "blockchain-orderer:31010",
OrdererID: "orderer.example.com",
OrdererURL: "orderer.example.com:7050",
// Channel parameters
//ChannelID: "dmp",
//ChannelConfig: "/shared/dmp.tx",
ChannelID: "mychannel",
ChannelConfig: "/channel-artifacts/channel.tx",
// Chaincode parameters
//ChainCodeID: "dmp",
//ChaincodeGoPath: os.Getenv("GOPATH"),
//ChaincodePath: "github.com/alcarria/data-marketplace-chaincode",
//OrgAdmin: "Admin",
//OrgName: "Org1",
//ConfigFile: "/shared/artifacts/config.yaml",
//UserName: "Admin",
ChainCodeID: "data-marketplace-chaincode",
ChaincodeGoPath: os.Getenv("GOPATH"),
ChaincodePath: "github.com/alcarria/data-marketplace-chaincode",
OrgAdmin: "Admin",
OrgName: "Org1",
ConfigFile: "/shared/artifacts/config.yaml",
UserName: "Admin",
}
// Initialization of the Fabric SDK from the previously set properties
// err = fSDKSetup.Initialize()
// if err != nil {
// fmt.Printf("Unable to initialize the Fabric SDK: %v\n", err)
// panic("Unable to initialize the Fabric SDK")
// }
controller := controller.NewPeerController(ctx, logger, fabricSetup)
handler := rest.NewCCHandler(ctx, logger, controller)
server := rest.NewCCServer(ctx, logger, handler, config)
logger.Printf("starting-server", server.Start())
}
|
[
"\"GOPATH\"",
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
commands/kubeapi/auth.go
|
package kubeapi
/*
Copyright 2017 - 2019 Crunchy Data Solutions, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import (
"flag"
"path/filepath"
"os"
log "github.com/sirupsen/logrus"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/rest"
)
func GetClientConfig (oocFlag bool, namespaceFlag string)(* kubernetes.Clientset, string, error) {
var kubeconfig *string
var config *rest.Config
var err error
namespace := getNamespace(oocFlag, namespaceFlag) // this may call os.Exit(non-zero)
if !oocFlag {
config, err = rest.InClusterConfig()
if err != nil {
log.Error(err.Error())
log.Info("If running outside of container, use [ -r | --remote ] flag")
os.Exit(-1)
}
} else if home := homeDir(); home != "" {
kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "")
// use the current context in kubeconfig
config, err = clientcmd.BuildConfigFromFlags("", *kubeconfig)
if err != nil {
panic(err.Error())
}
} else {
panic("Unable to obtain a cluster configuration. Exiting.")
}
flag.Parse()
// create the clientset
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
panic(err.Error())
}
return clientset, namespace, err
}
func homeDir() string {
if h := os.Getenv("HOME"); h != "" {
return h
}
return os.Getenv("USERPROFILE") // windows
}
func getNamespace(outOfContainer bool, namespaceFlag string) string {
if namespaceFlag != "" {
return namespaceFlag
}
if ns := os.Getenv("CCP_NAMESPACE"); ns != "" || outOfContainer {
return ns
}
log.Error("CCP_NAMESPACE must be set.")
// if namespace not set, exit
os.Exit(-1)
return "" // make compiler happy - never executed
}
|
[
"\"HOME\"",
"\"USERPROFILE\"",
"\"CCP_NAMESPACE\""
] |
[] |
[
"HOME",
"CCP_NAMESPACE",
"USERPROFILE"
] |
[]
|
["HOME", "CCP_NAMESPACE", "USERPROFILE"]
|
go
| 3 | 0 | |
pkg/storage/storage.go
|
package storage
import (
"context"
"errors"
"fmt"
"math/big"
"os"
"path/filepath"
"runtime"
"sort"
"sync"
"time"
"github.com/dgraph-io/badger/v2"
"github.com/dgraph-io/badger/v2/options"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/sirupsen/logrus"
"github.com/pyroscope-io/pyroscope/pkg/config"
"github.com/pyroscope-io/pyroscope/pkg/flameql"
"github.com/pyroscope-io/pyroscope/pkg/storage/cache"
"github.com/pyroscope-io/pyroscope/pkg/storage/dict"
"github.com/pyroscope-io/pyroscope/pkg/storage/dimension"
"github.com/pyroscope-io/pyroscope/pkg/storage/labels"
"github.com/pyroscope-io/pyroscope/pkg/storage/segment"
"github.com/pyroscope-io/pyroscope/pkg/storage/tree"
"github.com/pyroscope-io/pyroscope/pkg/structs/merge"
"github.com/pyroscope-io/pyroscope/pkg/util/bytesize"
"github.com/pyroscope-io/pyroscope/pkg/util/disk"
"github.com/pyroscope-io/pyroscope/pkg/util/slices"
)
var (
errOutOfSpace = errors.New("running out of space")
errRetention = errors.New("could not write because of retention settings")
evictInterval = 20 * time.Second
writeBackInterval = time.Second
retentionInterval = time.Minute
badgerGCInterval = 5 * time.Minute
)
type Storage struct {
putMutex sync.Mutex
config *config.Server
segments *cache.Cache
dimensions *cache.Cache
dicts *cache.Cache
trees *cache.Cache
labels *labels.Labels
db *badger.DB
dbTrees *badger.DB
dbDicts *badger.DB
dbDimensions *badger.DB
dbSegments *badger.DB
localProfilesDir string
stop chan struct{}
wg sync.WaitGroup
// prometheus metrics
storageWritesTotal prometheus.Counter
writeBackTotal prometheus.Counter
evictionsTotal prometheus.Counter
retentionCount prometheus.Counter
storageReadsTotal prometheus.Counter
evictionsAllocBytes prometheus.Gauge
evictionsTotalBytes prometheus.Gauge
storageCachesFlushTimer prometheus.Histogram
storageBadgerCloseTimer prometheus.Histogram
evictionsTimer prometheus.Histogram
writeBackTimer prometheus.Histogram
retentionTimer prometheus.Histogram
}
func (s *Storage) newBadger(name string) (*badger.DB, error) {
badgerPath := filepath.Join(s.config.StoragePath, name)
err := os.MkdirAll(badgerPath, 0o755)
if err != nil {
return nil, err
}
badgerOptions := badger.DefaultOptions(badgerPath)
badgerOptions = badgerOptions.WithTruncate(!s.config.BadgerNoTruncate)
badgerOptions = badgerOptions.WithSyncWrites(false)
badgerOptions = badgerOptions.WithCompactL0OnClose(false)
badgerOptions = badgerOptions.WithCompression(options.ZSTD)
badgerLevel := logrus.ErrorLevel
if l, err := logrus.ParseLevel(s.config.BadgerLogLevel); err == nil {
badgerLevel = l
}
badgerOptions = badgerOptions.WithLogger(badgerLogger{name: name, logLevel: badgerLevel})
db, err := badger.Open(badgerOptions)
if err != nil {
return nil, err
}
s.wg.Add(1)
go s.periodicTask(badgerGCInterval, s.badgerGCTask(db))
return db, nil
}
func New(c *config.Server, reg prometheus.Registerer) (*Storage, error) {
s := &Storage{
config: c,
stop: make(chan struct{}),
localProfilesDir: filepath.Join(c.StoragePath, "local-profiles"),
storageWritesTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "pyroscope_storage_writes_total",
Help: "number of calls to storage.Put",
}),
storageReadsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "pyroscope_storage_reads_total",
Help: "number of calls to storage.Get",
}),
// Evictions
evictionsTimer: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{
Name: "pyroscope_storage_evictions_duration_seconds",
Help: "duration of evictions (triggered when there's memory pressure)",
Buckets: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10},
}),
// The following 2 metrics are somewhat broad
// Nevertheless they are still useful to grasp evictions
evictionsAllocBytes: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "pyroscope_storage_evictions_alloc_bytes",
Help: "number of bytes allocated in the heap",
}),
evictionsTotalBytes: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "pyroscope_storage_evictions_total_mem_bytes",
Help: "total number of memory bytes",
}),
storageCachesFlushTimer: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{
Name: "pyroscope_storage_caches_flush_duration_seconds",
Help: "duration of storage caches flush (triggered when server is closing)",
Buckets: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10},
}),
storageBadgerCloseTimer: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{
Name: "pyroscope_storage_db_close_duration_seconds",
Help: "duration of db close (triggered when server is closing)",
Buckets: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10},
}),
writeBackTimer: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{
Name: "pyroscope_storage_writeback_duration_seconds",
Help: "duration of write-back writes (triggered periodically)",
Buckets: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10},
}),
retentionTimer: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{
Name: "pyroscope_storage_retention_duration_seconds",
Help: "duration of old data deletion",
// TODO what buckets to use here?
Buckets: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10},
}),
}
var err error
s.db, err = s.newBadger("main")
if err != nil {
return nil, err
}
s.labels = labels.New(s.db)
s.dbTrees, err = s.newBadger("trees")
if err != nil {
return nil, err
}
s.dbDicts, err = s.newBadger("dicts")
if err != nil {
return nil, err
}
s.dbDimensions, err = s.newBadger("dimensions")
if err != nil {
return nil, err
}
s.dbSegments, err = s.newBadger("segments")
if err != nil {
return nil, err
}
if err = os.MkdirAll(s.localProfilesDir, 0o755); err != nil {
return nil, err
}
hitCounterMetrics := promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "pyroscope_storage_cache_hits_total",
Help: "total number of cache hits",
}, []string{"name"})
missCounterMetrics := promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "pyroscope_storage_cache_misses_total",
Help: "total number of cache misses",
}, []string{"name"})
storageReadCounterMetrics := promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "pyroscope_storage_cache_reads_total",
Help: "total number of cache queries",
}, []string{"name"})
writesToDiskCounterMetrics := promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "pyroscope_storage_cache_persisted_total",
Help: "number of items persisted from cache to disk",
}, []string{"name"})
s.dimensions = cache.New(s.dbDimensions, "i:", &cache.Metrics{
HitCounter: hitCounterMetrics.With(prometheus.Labels{"name": "dimensions"}),
MissCounter: missCounterMetrics.With(prometheus.Labels{"name": "dimensions"}),
ReadCounter: storageReadCounterMetrics.With(prometheus.Labels{"name": "dimensions"}),
WritesToDiskCounter: writesToDiskCounterMetrics.With(prometheus.Labels{"name": "dimensions"}),
})
s.dimensions.Bytes = func(k string, v interface{}) ([]byte, error) {
return v.(*dimension.Dimension).Bytes()
}
s.dimensions.FromBytes = func(k string, v []byte) (interface{}, error) {
return dimension.FromBytes(v)
}
s.dimensions.New = func(k string) interface{} {
return dimension.New()
}
s.segments = cache.New(s.dbSegments, "s:", &cache.Metrics{
HitCounter: hitCounterMetrics.With(prometheus.Labels{"name": "segments"}),
MissCounter: missCounterMetrics.With(prometheus.Labels{"name": "segments"}),
ReadCounter: storageReadCounterMetrics.With(prometheus.Labels{"name": "segments"}),
WritesToDiskCounter: writesToDiskCounterMetrics.With(prometheus.Labels{"name": "segments"}),
})
s.segments.Bytes = func(k string, v interface{}) ([]byte, error) {
return v.(*segment.Segment).Bytes()
}
s.segments.FromBytes = func(k string, v []byte) (interface{}, error) {
// TODO:
// these configuration params should be saved in db when it initializes
return segment.FromBytes(v)
}
s.segments.New = func(k string) interface{} {
return segment.New()
}
s.dicts = cache.New(s.dbDicts, "d:", &cache.Metrics{
HitCounter: hitCounterMetrics.With(prometheus.Labels{"name": "dicts"}),
MissCounter: missCounterMetrics.With(prometheus.Labels{"name": "dicts"}),
ReadCounter: storageReadCounterMetrics.With(prometheus.Labels{"name": "dicts"}),
WritesToDiskCounter: writesToDiskCounterMetrics.With(prometheus.Labels{"name": "dicts"}),
})
s.dicts.Bytes = func(k string, v interface{}) ([]byte, error) {
return v.(*dict.Dict).Bytes()
}
s.dicts.FromBytes = func(k string, v []byte) (interface{}, error) {
return dict.FromBytes(v)
}
s.dicts.New = func(k string) interface{} {
return dict.New()
}
s.trees = cache.New(s.dbTrees, "t:", &cache.Metrics{
HitCounter: hitCounterMetrics.With(prometheus.Labels{"name": "trees"}),
MissCounter: missCounterMetrics.With(prometheus.Labels{"name": "trees"}),
ReadCounter: storageReadCounterMetrics.With(prometheus.Labels{"name": "trees"}),
WritesToDiskCounter: writesToDiskCounterMetrics.With(prometheus.Labels{"name": "trees"}),
})
s.trees.Bytes = s.treeBytes
s.trees.FromBytes = s.treeFromBytes
s.trees.New = func(k string) interface{} {
return tree.New()
}
memTotal, err := getMemTotal()
if err != nil {
return nil, err
}
s.wg.Add(2)
go s.periodicTask(evictInterval, s.evictionTask(memTotal))
go s.periodicTask(writeBackInterval, s.writeBackTask)
if s.config.Retention > 0 {
s.wg.Add(1)
go s.periodicTask(retentionInterval, s.retentionTask)
}
if err = s.migrate(); err != nil {
return nil, err
}
return s, nil
}
type PutInput struct {
StartTime time.Time
EndTime time.Time
Key *segment.Key
Val *tree.Tree
SpyName string
SampleRate uint32
Units string
AggregationType string
}
func (s *Storage) treeFromBytes(k string, v []byte) (interface{}, error) {
key := segment.FromTreeToDictKey(k)
d, err := s.dicts.GetOrCreate(key)
if err != nil {
return nil, fmt.Errorf("dicts cache for %v: %v", key, err)
}
return tree.FromBytes(d.(*dict.Dict), v)
}
func (s *Storage) treeBytes(k string, v interface{}) ([]byte, error) {
key := segment.FromTreeToDictKey(k)
d, err := s.dicts.GetOrCreate(key)
if err != nil {
return nil, fmt.Errorf("dicts cache for %v: %v", key, err)
}
b, err := v.(*tree.Tree).Bytes(d.(*dict.Dict), s.config.MaxNodesSerialization)
if err != nil {
return nil, fmt.Errorf("dicts cache for %v: %v", key, err)
}
s.dicts.Put(key, d)
return b, nil
}
var OutOfSpaceThreshold = 512 * bytesize.MB
func (s *Storage) Put(po *PutInput) error {
// TODO: This is a pretty broad lock. We should find a way to make these locks more selective.
s.putMutex.Lock()
defer s.putMutex.Unlock()
if err := s.performFreeSpaceCheck(); err != nil {
return err
}
if po.StartTime.Before(s.lifetimeBasedRetentionThreshold()) {
return errRetention
}
logrus.WithFields(logrus.Fields{
"startTime": po.StartTime.String(),
"endTime": po.EndTime.String(),
"key": po.Key.Normalized(),
"samples": po.Val.Samples(),
"units": po.Units,
"aggregationType": po.AggregationType,
}).Debug("storage.Put")
s.storageWritesTotal.Add(1.0)
for k, v := range po.Key.Labels() {
s.labels.Put(k, v)
}
sk := po.Key.SegmentKey()
for k, v := range po.Key.Labels() {
key := k + ":" + v
r, err := s.dimensions.GetOrCreate(key)
if err != nil {
logrus.Errorf("dimensions cache for %v: %v", key, err)
continue
}
r.(*dimension.Dimension).Insert([]byte(sk))
s.dimensions.Put(key, r)
}
r, err := s.segments.GetOrCreate(sk)
if err != nil {
return fmt.Errorf("segments cache for %v: %v", sk, err)
}
st := r.(*segment.Segment)
st.SetMetadata(po.SpyName, po.SampleRate, po.Units, po.AggregationType)
samples := po.Val.Samples()
st.Put(po.StartTime, po.EndTime, samples, func(depth int, t time.Time, r *big.Rat, addons []segment.Addon) {
tk := po.Key.TreeKey(depth, t)
res, err := s.trees.GetOrCreate(tk)
if err != nil {
logrus.Errorf("trees cache for %v: %v", tk, err)
return
}
cachedTree := res.(*tree.Tree)
treeClone := po.Val.Clone(r)
for _, addon := range addons {
if res, ok := s.trees.Lookup(po.Key.TreeKey(addon.Depth, addon.T)); ok {
ta := res.(*tree.Tree)
ta.RLock()
treeClone.Merge(ta)
ta.RUnlock()
}
}
cachedTree.Lock()
cachedTree.Merge(treeClone)
cachedTree.Unlock()
s.trees.Put(tk, cachedTree)
})
s.segments.Put(sk, st)
return nil
}
type GetInput struct {
StartTime time.Time
EndTime time.Time
Key *segment.Key
Query *flameql.Query
}
type GetOutput struct {
Tree *tree.Tree
Timeline *segment.Timeline
SpyName string
SampleRate uint32
Units string
}
func (s *Storage) Get(gi *GetInput) (*GetOutput, error) {
logger := logrus.WithFields(logrus.Fields{
"startTime": gi.StartTime.String(),
"endTime": gi.EndTime.String(),
})
var dimensionKeys func() []dimension.Key
switch {
case gi.Key != nil:
logger = logger.WithField("key", gi.Key.Normalized())
dimensionKeys = s.dimensionKeysByKey(gi.Key)
case gi.Query != nil:
logger = logger.WithField("query", gi.Query)
dimensionKeys = s.dimensionKeysByQuery(gi.Query)
default:
// Should never happen.
return nil, fmt.Errorf("key or query must be specified")
}
logger.Debug("storage.Get")
s.storageReadsTotal.Add(1)
var (
triesToMerge []merge.Merger
lastSegment *segment.Segment
writesTotal uint64
timeline = segment.GenerateTimeline(gi.StartTime, gi.EndTime)
aggregationType = "sum"
)
for _, k := range dimensionKeys() {
// TODO: refactor, store `Key`s in dimensions
parsedKey, err := segment.ParseKey(string(k))
if err != nil {
logrus.Errorf("parse key: %v: %v", string(k), err)
continue
}
key := parsedKey.SegmentKey()
res, ok := s.segments.Lookup(key)
if !ok {
continue
}
st := res.(*segment.Segment)
if st.AggregationType() == "average" {
aggregationType = "average"
}
timeline.PopulateTimeline(st)
lastSegment = st
st.Get(gi.StartTime, gi.EndTime, func(depth int, samples, writes uint64, t time.Time, r *big.Rat) {
if res, ok = s.trees.Lookup(parsedKey.TreeKey(depth, t)); ok {
triesToMerge = append(triesToMerge, res.(*tree.Tree).Clone(r))
writesTotal += writes
}
})
}
resultTrie := merge.MergeTriesSerially(runtime.NumCPU(), triesToMerge...)
if resultTrie == nil {
return nil, nil
}
t := resultTrie.(*tree.Tree)
if writesTotal > 0 && aggregationType == "average" {
t = t.Clone(big.NewRat(1, int64(writesTotal)))
}
return &GetOutput{
Tree: t,
Timeline: timeline,
SpyName: lastSegment.SpyName(),
SampleRate: lastSegment.SampleRate(),
Units: lastSegment.Units(),
}, nil
}
func (s *Storage) dimensionKeysByKey(key *segment.Key) func() []dimension.Key {
return func() []dimension.Key {
d, ok := s.lookupAppDimension(key.AppName())
if !ok {
return nil
}
l := key.Labels()
if len(l) == 1 {
// No tags specified: return application dimension keys.
return d.Keys
}
dimensions := []*dimension.Dimension{d}
for k, v := range l {
if flameql.IsTagKeyReserved(k) {
continue
}
if d, ok = s.lookupDimensionKV(k, v); ok {
dimensions = append(dimensions, d)
}
}
if len(dimensions) == 1 {
// Tags specified but not found.
return nil
}
return dimension.Intersection(dimensions...)
}
}
func (s *Storage) dimensionKeysByQuery(qry *flameql.Query) func() []dimension.Key {
return func() []dimension.Key { return s.exec(context.TODO(), qry) }
}
func (s *Storage) iterateOverAllSegments(cb func(*segment.Key, *segment.Segment) error) error {
nameKey := "__name__"
var dimensions []*dimension.Dimension
s.labels.GetValues(nameKey, func(v string) bool {
dmInt, ok := s.dimensions.Lookup(nameKey + ":" + v)
if !ok {
return true
}
dimensions = append(dimensions, dmInt.(*dimension.Dimension))
return true
})
for _, rawSk := range dimension.Union(dimensions...) {
sk, _ := segment.ParseKey(string(rawSk))
stInt, ok := s.segments.Lookup(sk.SegmentKey())
if !ok {
continue
}
st := stInt.(*segment.Segment)
if err := cb(sk, st); err != nil {
return err
}
}
return nil
}
func (s *Storage) DeleteDataBefore(threshold time.Time) error {
return s.iterateOverAllSegments(func(sk *segment.Key, st *segment.Segment) error {
var err error
deletedRoot := st.DeleteDataBefore(threshold, func(depth int, t time.Time) {
tk := sk.TreeKey(depth, t)
if delErr := s.trees.Delete(tk); delErr != nil {
err = delErr
}
})
if err != nil {
return err
}
if deletedRoot {
s.deleteSegmentAndRelatedData(sk)
}
return nil
})
}
type DeleteInput struct {
Key *segment.Key
}
var maxTime = time.Unix(1<<62, 999999999)
func (s *Storage) Delete(di *DeleteInput) error {
var dimensions []*dimension.Dimension
for k, v := range di.Key.Labels() {
dInt, ok := s.dimensions.Lookup(k + ":" + v)
if !ok {
return nil
}
dimensions = append(dimensions, dInt.(*dimension.Dimension))
}
for _, sk := range dimension.Intersection(dimensions...) {
skk, _ := segment.ParseKey(string(sk))
stInt, ok := s.segments.Lookup(skk.SegmentKey())
if !ok {
continue
}
st := stInt.(*segment.Segment)
var err error
st.Get(zeroTime, maxTime, func(depth int, _, _ uint64, t time.Time, _ *big.Rat) {
treeKey := skk.TreeKey(depth, t)
err = s.trees.Delete(treeKey)
})
if err != nil {
return err
}
s.deleteSegmentAndRelatedData(skk)
}
return nil
}
func (s *Storage) deleteSegmentAndRelatedData(key *segment.Key) error {
s.dicts.Delete(key.DictKey())
s.segments.Delete(key.SegmentKey())
for k, v := range key.Labels() {
dInt, ok := s.dimensions.Lookup(k + ":" + v)
if !ok {
continue
}
d := dInt.(*dimension.Dimension)
d.Delete(dimension.Key(key.SegmentKey()))
}
return nil
}
func (s *Storage) Close() error {
close(s.stop)
s.wg.Wait()
func() {
timer := prometheus.NewTimer(prometheus.ObserverFunc(s.storageCachesFlushTimer.Observe))
defer timer.ObserveDuration()
wg := sync.WaitGroup{}
wg.Add(3)
go func() { defer wg.Done(); s.dimensions.Flush() }()
go func() { defer wg.Done(); s.segments.Flush() }()
go func() { defer wg.Done(); s.trees.Flush() }()
wg.Wait()
// dictionary has to flush last because trees write to dictionaries
s.dicts.Flush()
}()
func() {
timer := prometheus.NewTimer(prometheus.ObserverFunc(s.storageBadgerCloseTimer.Observe))
defer timer.ObserveDuration()
wg := sync.WaitGroup{}
wg.Add(5)
go func() { defer wg.Done(); s.dbTrees.Close() }()
go func() { defer wg.Done(); s.dbDicts.Close() }()
go func() { defer wg.Done(); s.dbDimensions.Close() }()
go func() { defer wg.Done(); s.dbSegments.Close() }()
go func() { defer wg.Done(); s.db.Close() }()
wg.Wait()
}()
// this allows prometheus to collect metrics before pyroscope exits
if os.Getenv("PYROSCOPE_WAIT_AFTER_STOP") != "" {
time.Sleep(5 * time.Second)
}
return nil
}
func (s *Storage) GetKeys(cb func(_k string) bool) {
s.labels.GetKeys(cb)
}
func (s *Storage) GetValues(key string, cb func(v string) bool) {
s.labels.GetValues(key, func(v string) bool {
if key != "__name__" || !slices.StringContains(s.config.HideApplications, v) {
return cb(v)
}
return true
})
}
func (s *Storage) GetKeysByQuery(query string, cb func(_k string) bool) error {
parsedQuery, err := flameql.ParseQuery(query)
if err != nil {
return err
}
segmentKey, err := segment.ParseKey(parsedQuery.AppName + "{}")
if err != nil {
return err
}
dimensionKeys := s.dimensionKeysByKey(segmentKey)
resultSet := map[string]bool{}
for _, dk := range dimensionKeys() {
dkParsed, _ := segment.ParseKey(string(dk))
if dkParsed.AppName() == parsedQuery.AppName {
for k := range dkParsed.Labels() {
resultSet[k] = true
}
}
}
resultList := []string{}
for v := range resultSet {
resultList = append(resultList, v)
}
sort.Strings(resultList)
for _, v := range resultList {
if !cb(v) {
break
}
}
return nil
}
func (s *Storage) GetValuesByQuery(label string, query string, cb func(v string) bool) error {
parsedQuery, err := flameql.ParseQuery(query)
if err != nil {
return err
}
segmentKey, err := segment.ParseKey(parsedQuery.AppName + "{}")
if err != nil {
return err
}
dimensionKeys := s.dimensionKeysByKey(segmentKey)
resultSet := map[string]bool{}
for _, dk := range dimensionKeys() {
dkParsed, _ := segment.ParseKey(string(dk))
if v, ok := dkParsed.Labels()[label]; ok {
resultSet[v] = true
}
}
resultList := []string{}
for v := range resultSet {
resultList = append(resultList, v)
}
sort.Strings(resultList)
for _, v := range resultList {
if !cb(v) {
break
}
}
return nil
}
func (s *Storage) DiskUsage() map[string]bytesize.ByteSize {
res := map[string]bytesize.ByteSize{
"main": 0,
"trees": 0,
"dicts": 0,
"dimensions": 0,
"segments": 0,
}
for k := range res {
res[k] = dirSize(filepath.Join(s.config.StoragePath, k))
}
return res
}
func dirSize(path string) (result bytesize.ByteSize) {
filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
result += bytesize.ByteSize(info.Size())
}
return nil
})
return result
}
func (s *Storage) CacheStats() map[string]interface{} {
return map[string]interface{}{
"dimensions_size": s.dimensions.Size(),
"segments_size": s.segments.Size(),
"dicts_size": s.dicts.Size(),
"trees_size": s.trees.Size(),
}
}
var zeroTime time.Time
func (s *Storage) lifetimeBasedRetentionThreshold() time.Time {
var t time.Time
if s.config.Retention != 0 {
t = time.Now().Add(-1 * s.config.Retention)
}
return t
}
func (s *Storage) performFreeSpaceCheck() error {
freeSpace, err := disk.FreeSpace(s.config.StoragePath)
if err == nil {
if freeSpace < OutOfSpaceThreshold {
return errOutOfSpace
}
}
return nil
}
|
[
"\"PYROSCOPE_WAIT_AFTER_STOP\""
] |
[] |
[
"PYROSCOPE_WAIT_AFTER_STOP"
] |
[]
|
["PYROSCOPE_WAIT_AFTER_STOP"]
|
go
| 1 | 0 | |
qa/rpc-tests/maxuploadtarget.py
|
#!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
'''
Test behavior of -maxuploadtarget.
* Verify that getdata requests for old blocks (>1week) are dropped
if uploadtarget has been reached.
* Verify that getdata requests for recent blocks are respecteved even
if uploadtarget has been reached.
* Verify that the upload counters are reset after 24 hours.
'''
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
# p2p messages to a node, generating the messages in the main testing logic.
class TestNode(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
self.block_receive_map = {}
def add_connection(self, conn):
self.connection = conn
self.peer_disconnected = False
def on_inv(self, conn, message):
pass
# Track the last getdata message we receive (used in the test)
def on_getdata(self, conn, message):
self.last_getdata = message
def on_block(self, conn, message):
message.block.calc_sha256()
try:
self.block_receive_map[message.block.sha256] += 1
except KeyError as e:
self.block_receive_map[message.block.sha256] = 1
# Spin until verack message is received from the node.
# We use this to signal that our test can begin. This
# is called from the testing thread, so it needs to acquire
# the global lock.
def wait_for_verack(self):
def veracked():
return self.verack_received
return wait_until(veracked, timeout=10)
def wait_for_disconnect(self):
def disconnected():
return self.peer_disconnected
return wait_until(disconnected, timeout=10)
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
def on_close(self, conn):
self.peer_disconnected = True
# Sync up with the node after delivery of a block
def sync_with_ping(self, timeout=30):
def received_pong():
return (self.last_pong.nonce == self.ping_counter)
self.connection.send_message(msg_ping(nonce=self.ping_counter))
success = wait_until(received_pong, timeout)
self.ping_counter += 1
return success
class MaxUploadTest(BitcoinTestFramework):
def __init__(self):
self.utxo = []
self.txouts = gen_return_txouts()
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("MOGWAID", "mogwaid"),
help="mogwaid binary to test")
def setup_chain(self):
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
# Start a node with maxuploadtarget of 200 MB (/24h)
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-maxuploadtarget=200", "-blockmaxsize=999000"]))
def mine_full_block(self, node, address):
# Want to create a full block
# We'll generate a 66k transaction below, and 14 of them is close to the 1MB block limit
for j in xrange(14):
if len(self.utxo) < 14:
self.utxo = node.listunspent()
inputs=[]
outputs = {}
t = self.utxo.pop()
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
remchange = t["amount"] - Decimal("0.001000")
outputs[address]=remchange
# Create a basic transaction that will send change back to ourself after account for a fee
# And then insert the 128 generated transaction outs in the middle rawtx[92] is where the #
# of txouts is stored and is the only thing we overwrite from the original transaction
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + self.txouts
newtx = newtx + rawtx[94:]
# Appears to be ever so slightly faster to sign with SIGHASH_NONE
signresult = node.signrawtransaction(newtx,None,None,"NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
# Mine a full sized block which will be these transactions we just created
node.generate(1)
def run_test(self):
# Before we connect anything, we first set the time on the node
# to be in the past, otherwise things break because the CNode
# time counters can't be reset backward after initialization
old_time = int(time.time() - 2*60*60*24*7)
self.nodes[0].setmocktime(old_time)
# Generate some old blocks
self.nodes[0].generate(130)
# test_nodes[0] will only request old blocks
# test_nodes[1] will only request new blocks
# test_nodes[2] will test resetting the counters
test_nodes = []
connections = []
for i in xrange(3):
test_nodes.append(TestNode())
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
test_nodes[i].add_connection(connections[i])
NetworkThread().start() # Start up network handling in another thread
[x.wait_for_verack() for x in test_nodes]
# Test logic begins here
# Now mine a big block
self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())
# Store the hash; we'll request this later
big_old_block = self.nodes[0].getbestblockhash()
old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
big_old_block = int(big_old_block, 16)
# Advance to two days ago
self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24)
# Mine one more block, so that the prior block looks old
self.mine_full_block(self.nodes[0], self.nodes[0].getnewaddress())
# We'll be requesting this new block too
big_new_block = self.nodes[0].getbestblockhash()
new_block_size = self.nodes[0].getblock(big_new_block)['size']
big_new_block = int(big_new_block, 16)
# test_nodes[0] will test what happens if we just keep requesting the
# the same big old block too many times (expect: disconnect)
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(2, big_old_block))
max_bytes_per_day = 200*1024*1024
daily_buffer = 144 * MAX_BLOCK_SIZE
max_bytes_available = max_bytes_per_day - daily_buffer
success_count = max_bytes_available // old_block_size
# 144MB will be reserved for relaying new blocks, so expect this to
# succeed for ~70 tries.
for i in xrange(success_count):
test_nodes[0].send_message(getdata_request)
test_nodes[0].sync_with_ping()
assert_equal(test_nodes[0].block_receive_map[big_old_block], i+1)
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
# At most a couple more tries should succeed (depending on how long
# the test has been running so far).
for i in xrange(3):
test_nodes[0].send_message(getdata_request)
test_nodes[0].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
print "Peer 0 disconnected after downloading old block too many times"
# Requesting the current block on test_nodes[1] should succeed indefinitely,
# even when over the max upload target.
# We'll try 200 times
getdata_request.inv = [CInv(2, big_new_block)]
for i in xrange(200):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
print "Peer 1 able to repeatedly download new block"
# But if test_nodes[1] tries for an old block, it gets disconnected too.
getdata_request.inv = [CInv(2, big_old_block)]
test_nodes[1].send_message(getdata_request)
test_nodes[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 1)
print "Peer 1 disconnected after trying to download old block"
print "Advancing system time on node to clear counters..."
# If we advance the time by 24 hours, then the counters should reset,
# and test_nodes[2] should be able to retrieve the old block.
self.nodes[0].setmocktime(int(time.time()))
test_nodes[2].sync_with_ping()
test_nodes[2].send_message(getdata_request)
test_nodes[2].sync_with_ping()
assert_equal(test_nodes[2].block_receive_map[big_old_block], 1)
print "Peer 2 able to download old block"
[c.disconnect_node() for c in connections]
#stop and start node 0 with 1MB maxuploadtarget, whitelist 127.0.0.1
print "Restarting nodes with -whitelist=127.0.0.1"
stop_node(self.nodes[0], 0)
self.nodes[0] = start_node(0, self.options.tmpdir, ["-debug", "-whitelist=127.0.0.1", "-maxuploadtarget=1", "-blockmaxsize=999000"])
#recreate/reconnect 3 test nodes
test_nodes = []
connections = []
for i in xrange(3):
test_nodes.append(TestNode())
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_nodes[i]))
test_nodes[i].add_connection(connections[i])
NetworkThread().start() # Start up network handling in another thread
[x.wait_for_verack() for x in test_nodes]
#retrieve 20 blocks which should be enough to break the 1MB limit
getdata_request.inv = [CInv(2, big_new_block)]
for i in xrange(20):
test_nodes[1].send_message(getdata_request)
test_nodes[1].sync_with_ping()
assert_equal(test_nodes[1].block_receive_map[big_new_block], i+1)
getdata_request.inv = [CInv(2, big_old_block)]
test_nodes[1].send_message(getdata_request)
test_nodes[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 3) #node is still connected because of the whitelist
print "Peer 1 still connected after trying to download old block (whitelisted)"
[c.disconnect_node() for c in connections]
if __name__ == '__main__':
MaxUploadTest().main()
|
[] |
[] |
[
"MOGWAID"
] |
[]
|
["MOGWAID"]
|
python
| 1 | 0 | |
test/test_treestore.py
|
import os, tempfile, unittest, shutil, subprocess, datetime, time
from s3ts.filestore import LocalFileStore
from s3ts.s3filestore import S3FileStore
from s3ts.config import TreeStoreConfig, readInstallProperties, S3TS_PROPERTIES
from s3ts.treestore import TreeStore
from s3ts.utils import datetimeFromIso
from s3ts.package import PackageJS, S3TS_PACKAGEFILE
from s3ts.metapackage import MetaPackage, SubPackage
import boto
import logging
# boto.set_stream_logger('boto')
class CaptureDownloadProgress:
def __init__( self ):
self.recorded = []
def __call__( self, bytesDownloaded, bytesFromCache ):
self.recorded.append( bytesDownloaded + bytesFromCache )
CaptureUploadProgress = CaptureDownloadProgress
class CaptureInstallProgress:
def __init__( self ):
self.recorded = []
def __call__( self, nBytes ):
self.recorded.append( nBytes )
class EmptyS3Bucket:
def __init__( self, bucket ):
self.bucket = bucket
def __enter__(self):
# Ensure the bucket starts empty
assert len(list(self.bucket.list()))==0, "S3 bucket is not empty"
def __exit__(self, type, value, traceback):
# Clean the bucket (ok, as we know it started empty)
self.bucket.delete_keys( self.bucket.list() )
class TestTreeStore(unittest.TestCase):
def setUp(self):
self.workdir = tempfile.mkdtemp()
if os.path.exists( self.workdir ):
shutil.rmtree( self.workdir )
os.makedirs( self.workdir )
self.FILE1 = b'#!/bin/env python\n def main(): print "hello"\n'
self.FILE2 = b'#!/bin/env python\n def main(): print "goodbye"\n'
self.FILE2_A = b'#!/bin/env python\n def main(): print "goodbye foreever"\n'
self.FILE3 = b'#!/bin/env python\n def main(): print "goodbye foreever"\n'
self.FILE4 = b'#!/bin/env python\n def main(): print "what now"\n'
self.FILE5 = b'Just text'
self.CAR01 = (
b'Some big and complicated data structure goes here, hopefully big enough that it requires chunking and compression.\n'
b'sydney london paris port moresby okinawa st petersburg salt lake city new york whitehorse mawson woy woy st louis\n'
)
# Create some test input data
self.srcTree = makeEmptyDir( os.path.join( self.workdir, 'src-1' ) )
fs = LocalFileStore( self.srcTree )
fs.put( 'code/file1.py', self.FILE1)
fs.put( 'code/file2.py', self.FILE2)
fs.put( 'assets/car-01.db', self.CAR01)
self.srcTree2 = makeEmptyDir( os.path.join( self.workdir, 'src-2' ) )
fs = LocalFileStore( self.srcTree2 )
fs.put( 'code/file1.py', self.FILE1 )
fs.put( 'code/file3.py', self.FILE3 )
fs.put( 'code/file4.py', self.FILE4)
fs.put( 'assets/car-01.db', self.CAR01 )
self.srcTree3 = makeEmptyDir( os.path.join( self.workdir, 'src-3' ) )
fs = LocalFileStore( self.srcTree3 )
fs.put( 'code/file1.py', self.FILE1 )
fs.put( 'code/file2.py', self.FILE2_A )
fs.put( 'code/file4.py', self.FILE4 )
fs.put( 'text/text', self.FILE5 )
self.srcTree4 = makeEmptyDir( os.path.join( self.workdir, 'src-4' ) )
fs = LocalFileStore( self.srcTree4 )
fs.put( 'file1.py', self.FILE1 )
fs.put( 'code/file2.py', self.FILE2_A )
fs.put( 'code/file4.py', self.FILE4 )
fs.put( 'text', self.FILE5 )
self.srcVariant = makeEmptyDir( os.path.join( self.workdir, 'src1-kiosk' ) )
fs = LocalFileStore( self.srcVariant )
fs.put( 'kiosk-01/key', b'this is the key src1:kiosk-01' )
fs.put( 'kiosk-02/key', b'this is the key src1:kiosk-02' )
def tearDown(self):
shutil.rmtree( self.workdir )
def test_fs_treestore(self):
# Create a file system backed treestore
fileStore = LocalFileStore( makeEmptyDir( os.path.join( self.workdir, 'fs' ) ) )
localCache = LocalFileStore( makeEmptyDir( os.path.join( self.workdir, 'cache' ) ) )
treestore = TreeStore.create( fileStore, localCache, TreeStoreConfig( 100, True ) )
# Upload 2 trees
creationTime = datetimeFromIso( '2015-01-01T00:00:00.0' )
treestore.upload( 'v1.0', '', creationTime, self.srcTree, CaptureUploadProgress() )
pkg = treestore.findPackage( 'v1.0' )
# Confirm it's in the index
self.assertEqual( treestore.listPackages(), ['v1.0'] )
# Verify it
treestore.verify( pkg )
# Test the cache priming function
treestore.prime( self.srcTree2, CaptureUploadProgress() )
# Test whether the verifyCache works
corruptedFiles = treestore.validateLocalCache()
self.assertEqual( len(corruptedFiles), 0)
# Download it, checking we get expected progress callbacks
# The order of the callbacks will depend on the order of the
# chunks in the package definition, which will depend on the
# iteration order of the file system when the package was created.
# So check independently of ordering.
cb = CaptureDownloadProgress()
treestore.download( pkg, cb )
self.assertEqual( sorted(cb.recorded), [30, 45, 47, 100, 100] )
# Verify it locally
treestore.verifyLocal( pkg )
# Install it
destTree = os.path.join( self.workdir, 'dest-1' )
treestore.install( pkg, destTree, CaptureInstallProgress() )
# Check that the installed tree is the same as the source tree
self.assertEqual( subprocess.call( 'diff -r -x {0} {1} {2}'.format(S3TS_PROPERTIES,self.srcTree,destTree), shell=True ), 0 )
# Rename the tree, and check that installing that is the same
treestore.rename( 'v1.0', 'v1.0x' )
pkg = treestore.findPackage( 'v1.0x' )
treestore.download( pkg, CaptureDownloadProgress() )
destTree = os.path.join( self.workdir, 'dest-2' )
treestore.install( pkg, destTree, CaptureInstallProgress() )
self.assertEqual( subprocess.call( 'diff -r -x {0} {1} {2}'.format(S3TS_PROPERTIES,self.srcTree,destTree), shell=True ), 0 )
# Test the flushStore function has nothing to remove)
treestore.upload( 'extra', '', creationTime, self.srcTree2, CaptureUploadProgress() )
removed = treestore.flushStore()
self.assertEqual(len(removed), 0)
# Remove a tree
treestore.remove( 'v1.0x' )
# Test the store now has dangling chunks when can be removed
removed = treestore.flushStore()
self.assertTrue(len(removed) > 0)
treestore.upload( 'v1.0', '', creationTime, self.srcTree, CaptureUploadProgress() )
# Initially the local cache should contain chunks for v1.0 and extra. Empty
# the local cache by successive flush operations
removed = treestore.flushLocalCache(['extra'])
self.assertTrue(len(removed) > 0)
removed = treestore.flushLocalCache(['v1.0'])
self.assertTrue(len(removed) > 0)
# Confirm that removing everything from the local cache is refused
with self.assertRaises(RuntimeError):
treestore.flushLocalCache([])
def test_sync(self):
# Create a file system backed treestore
fileStore = LocalFileStore( makeEmptyDir( os.path.join( self.workdir, 'fs' ) ) )
localCache = LocalFileStore( makeEmptyDir( os.path.join( self.workdir, 'cache' ) ) )
treestore = TreeStore.create( fileStore, localCache, TreeStoreConfig( 10, True ) )
creationTime = datetimeFromIso( '2015-01-01T00:00:00.0' )
treestore.upload( 'v1.0', '', creationTime, self.srcTree, CaptureUploadProgress() )
treestore.upload( 'v1.3', '', creationTime, self.srcTree3, CaptureUploadProgress() )
treestore.upload( 'v1.4', '', creationTime, self.srcTree4, CaptureUploadProgress() )
testdir = makeEmptyDir( os.path.join( self.workdir, 'test' ) )
def assertExists( path ):
self.assertTrue( os.path.exists( os.path.join(testdir, path) ) )
def assertContains( path, data ):
with open( os.path.join(testdir, path), 'rb' ) as f:
self.assertEqual( f.read(), data )
def assertDoesntExist( path ):
self.assertFalse( os.path.exists( os.path.join(testdir, path) ) )
def assertInstalled(pkg, testdir):
result = treestore.compareInstall(pkg, testdir)
self.assertEqual( result.missing, set() )
self.assertEqual( result.extra, set() )
self.assertEqual( result.diffs, set() )
# sync a package to an empty directory
pkg = treestore.findPackage('v1.0')
treestore.download( pkg, CaptureDownloadProgress() )
treestore.sync( pkg, testdir, CaptureInstallProgress() )
assertContains( "code/file1.py", self.FILE1 )
assertContains( "code/file2.py", self.FILE2 )
assertContains( "assets/car-01.db", self.CAR01 )
assertExists( S3TS_PACKAGEFILE )
assertInstalled( pkg, testdir )
# Re-sync the same package
pkg = treestore.findPackage('v1.0')
treestore.download( pkg, CaptureDownloadProgress() )
treestore.sync( pkg, testdir, CaptureInstallProgress() )
assertContains( "code/file1.py", self.FILE1 )
assertContains( "code/file2.py", self.FILE2 )
assertContains( "assets/car-01.db", self.CAR01 )
assertExists( S3TS_PACKAGEFILE )
assertInstalled( pkg, testdir )
# Sync to a different package
pkg = treestore.findPackage('v1.3')
treestore.download( pkg, CaptureDownloadProgress() )
treestore.sync( pkg, testdir, CaptureInstallProgress() )
assertContains( "code/file1.py", self.FILE1 )
assertContains( "code/file2.py", self.FILE2_A )
assertDoesntExist( "assets/car-01.db" )
assertContains( "code/file4.py", self.FILE4 )
assertContains( "text/text", self.FILE5 )
assertExists( S3TS_PACKAGEFILE )
assertInstalled( pkg, testdir )
# Sync back to the first package
pkg = treestore.findPackage('v1.0')
treestore.download( pkg, CaptureDownloadProgress() )
treestore.sync( pkg, testdir, CaptureInstallProgress() )
assertContains( "code/file1.py", self.FILE1 )
assertContains( "code/file2.py", self.FILE2 )
assertContains( "assets/car-01.db", self.CAR01 )
assertDoesntExist( "code/file4.py" )
assertExists( S3TS_PACKAGEFILE )
assertInstalled( pkg, testdir )
# Remove the package file, and sync the second package again
os.unlink( os.path.join( testdir, S3TS_PACKAGEFILE ) )
pkg = treestore.findPackage('v1.3')
treestore.download( pkg, CaptureDownloadProgress() )
treestore.sync( pkg, testdir, CaptureInstallProgress() )
assertContains( "code/file1.py", self.FILE1 )
assertContains( "code/file2.py", self.FILE2_A )
assertDoesntExist( "assets/car-01.db" )
assertContains( "code/file4.py", self.FILE4 )
assertExists( S3TS_PACKAGEFILE )
assertInstalled( pkg, testdir )
# Add an extra file not in the package, and ensure
# that syncing deletes it
with open( os.path.join(testdir, "debug.log"), 'w') as f:
f.write( "something" )
pkg = treestore.findPackage('v1.3')
treestore.sync( pkg, testdir, CaptureInstallProgress() )
assertInstalled( pkg, testdir )
# Sync to test replacing a directory with a file
pkg = treestore.findPackage('v1.4')
treestore.download( pkg, CaptureDownloadProgress() )
treestore.sync( pkg, testdir, CaptureInstallProgress() )
assertContains( "text", self.FILE5 )
assertInstalled( pkg, testdir )
def test_metapackages(self):
# Create a file system backed treestore
fileStore = LocalFileStore( makeEmptyDir( os.path.join( self.workdir, 'fs' ) ) )
localCache = LocalFileStore( makeEmptyDir( os.path.join( self.workdir, 'cache' ) ) )
treestore = TreeStore.create( fileStore, localCache, TreeStoreConfig( 10, True ) )
creationTime = datetimeFromIso( '2015-01-01T00:00:00.0' )
treestore.upload( 'v1.0', '', creationTime, self.srcTree, CaptureUploadProgress() )
treestore.upload( 'v1.3', '', creationTime, self.srcTree3, CaptureUploadProgress() )
treestore.upload( 'v1.4', '', creationTime, self.srcTree4, CaptureUploadProgress() )
meta1 = MetaPackage(
name = 'meta1',
description = '',
creationTime = creationTime,
components = [
SubPackage( 'dir-1', 'v1.0' ),
SubPackage( 'dir-2', 'v1.3' ),
]
)
meta1.verify(treestore,{})
treestore.uploadMetaPackage(meta1)
meta1p = treestore.find( 'meta1', {})
treestore.download(meta1p, CaptureDownloadProgress() )
# Install it
destTree = os.path.join( self.workdir, 'dest-1' )
treestore.install(meta1p, destTree, CaptureInstallProgress() )
def assertContains( path, text ):
with open( os.path.join(destTree, path), 'rb' ) as f:
self.assertEqual( f.read(), text )
assertContains("dir-1/code/file1.py", self.FILE1)
assertContains("dir-2/text/text", self.FILE5)
def test_s3_treestore(self):
# Create an s3 backed treestore
# Requires these environment variables set
#
# AWS_ACCESS_KEY_ID
# AWS_SECRET_ACCESS_KEY
# S3TS_BUCKET
#
# NB: **this will only work if the bucket is empty
s3c = boto.connect_s3()
bucket = s3c.get_bucket( os.environ['S3TS_BUCKET'] )
with EmptyS3Bucket(bucket):
fileStore = S3FileStore( bucket )
localCache = LocalFileStore( makeEmptyDir( os.path.join( self.workdir, 'cache' ) ) )
treestore = TreeStore.create( fileStore, localCache, TreeStoreConfig( 100, True ) )
# Upload it as a tree
creationTime = datetimeFromIso( '2015-01-01T00:00:00.0' )
treestore.upload( 'v1.0', '', creationTime, self.srcTree, CaptureUploadProgress() )
pkg = treestore.findPackage( 'v1.0' )
# Confirm it's in the index
self.assertEqual( treestore.listPackages(), ['v1.0'] )
# Verify it
treestore.verify( pkg )
# Download it, checking we get expected progress callbacks
cb = CaptureDownloadProgress()
treestore.download( pkg, cb )
self.assertEqual( sorted(cb.recorded), [30, 45, 47, 100, 100] )
# Verify it locally
treestore.verifyLocal( pkg )
# Install it
destTree = os.path.join( self.workdir, 'dest-1' )
treestore.install( pkg, destTree, CaptureInstallProgress() )
# Check that the installed tree is the same as the source tree
self.assertEqual( subprocess.call( 'diff -r -x {0} {1} {2}'.format(S3TS_PROPERTIES,self.srcTree,destTree), shell=True ), 0 )
self.assertEqual( readInstallProperties(destTree).treeName, 'v1.0' )
# Use the compareInstall function to confirm the installed package is ok, and
# then check that modifying the files show up in the comparison
result = treestore.compareInstall( pkg, destTree )
self.assertEqual( len(result.missing), 0 )
self.assertEqual( len(result.extra), 0 )
self.assertEqual( len(result.diffs), 0 )
with open( os.path.join(destTree,"code/file1.py"), "w" ) as f:
f.write("x")
with open( os.path.join(destTree,"code/file3.py"), "w" ) as f:
f.write("y")
os.unlink(os.path.join(destTree,'assets/car-01.db'))
result = treestore.compareInstall( pkg, destTree )
self.assertEqual( result.missing, set(['assets/car-01.db']) )
self.assertEqual( result.extra, set(['code/file3.py']) )
self.assertEqual( result.diffs, set(['code/file1.py']) )
# Reinstall to fix directory content
shutil.rmtree( destTree )
treestore.install( pkg, destTree, CaptureInstallProgress() )
result = treestore.compareInstall( pkg, destTree )
self.assertEqual( len(result.missing), 0 )
self.assertEqual( len(result.extra), 0 )
self.assertEqual( len(result.diffs), 0 )
# Now create a pre-signed version of the package
pkg = treestore.findPackage( 'v1.0' )
treestore.addUrls( pkg, 3600 )
self.assertEqual( len(result.missing), 0 )
self.assertEqual( len(result.extra), 0 )
self.assertEqual( len(result.diffs), 0 )
# And download it directly via http. Create a new local cache
# to ensure that we actually redownload each chunk
localCache = LocalFileStore( makeEmptyDir( os.path.join( self.workdir, 'cache' ) ) )
treestore2 = TreeStore.forHttpOnly( localCache )
cb = CaptureDownloadProgress()
treestore2.downloadHttp( pkg, cb )
self.assertEqual( sorted(cb.recorded), [30, 45, 47, 100, 100] )
# Install it
destTree2 = os.path.join( self.workdir, 'dest-2' )
treestore2.install( pkg, destTree2, CaptureInstallProgress() )
# Check that the new installed tree is the same as the source tree
self.assertEqual( subprocess.call( 'diff -r -x {0} {1} {2}'.format(S3TS_PROPERTIES,self.srcTree,destTree2), shell=True ), 0 )
# Rename the tree, and check that installing that is the same
treestore.rename( 'v1.0', 'v1.0x' )
pkg = treestore.findPackage( 'v1.0x' )
treestore.download( pkg, CaptureDownloadProgress() )
destTree = os.path.join( self.workdir, 'dest-3' )
treestore.install( pkg, destTree, CaptureInstallProgress() )
self.assertEqual( subprocess.call( 'diff -r -x {0} {1} {2}'.format(S3TS_PROPERTIES,self.srcTree,destTree), shell=True ), 0 )
# Remove the tree
treestore.remove( 'v1.0x' )
def test_s3_prefixes(self):
# Requires these environment variables set
#
# AWS_ACCESS_KEY_ID
# AWS_SECRET_ACCESS_KEY
# S3TS_BUCKET
#
# NB: **this will only work if the bucket is empty
s3c = boto.connect_s3()
bucket = s3c.get_bucket( os.environ['S3TS_BUCKET'] )
with EmptyS3Bucket(bucket):
localCache = LocalFileStore( makeEmptyDir( os.path.join( self.workdir, 'cache' ) ) )
treestore1 = TreeStore.create( S3FileStore( bucket, "prefix1" ), localCache, TreeStoreConfig( 100, True ) )
treestore2 = TreeStore.create( S3FileStore( bucket, "prefix2" ), localCache, TreeStoreConfig( 100, True ) )
# Confirm we can write the different values to the same path in both treestores,
# and the different prefix keeps them separate independent
creationTime = datetimeFromIso( '2015-01-01T00:00:00.0' )
treestore1.upload( 'release', '', creationTime, self.srcTree, CaptureUploadProgress() )
treestore2.upload( 'release', '', creationTime, self.srcTree2, CaptureUploadProgress() )
pkg1 = treestore1.findPackage( 'release' )
pkg2 = treestore2.findPackage( 'release' )
self.assertEqual(len(pkg1.files),3)
self.assertEqual(len(pkg2.files),4)
def test_s3_merged_package(self):
# Test the creation and subsequent installation of merged packages
# Requires these environment variables set
#
# AWS_ACCESS_KEY_ID
# AWS_SECRET_ACCESS_KEY
# S3TS_BUCKET
#
# NB: **this will only work if the bucket is empty
s3c = boto.connect_s3()
bucket = s3c.get_bucket( os.environ['S3TS_BUCKET'] )
with EmptyS3Bucket(bucket):
localCache = LocalFileStore( makeEmptyDir( os.path.join( self.workdir, 'cache' ) ) )
treestore = TreeStore.create( S3FileStore( bucket), localCache, TreeStoreConfig( 100, True ) )
creationTime = datetimeFromIso( '2015-01-01T00:00:00.0' )
treestore.upload( 'src1', '', creationTime, self.srcTree, CaptureUploadProgress() )
treestore.upload( 'src2', '', creationTime, self.srcTree2, CaptureUploadProgress() )
treestore.upload( 'src3', '', creationTime, self.srcTree3, CaptureUploadProgress() )
treestore.createMerged( 'merged', creationTime, { '.' : 'src1', 'subdir-a' : 'src2', 'subdir-b' : 'src3'})
pkg = treestore.findPackage( 'merged' )
treestore.download( pkg, CaptureDownloadProgress() )
destTree = os.path.join( self.workdir, 'merged' )
treestore.install( pkg, destTree, CaptureInstallProgress() )
def assertSameContent( path1, path2 ):
with open(path1) as f1:
with open(path2) as f2:
self.assertEqual( f1.read(), f2.read() )
assertSameContent(os.path.join(destTree, "code/file1.py"), os.path.join(self.srcTree, "code/file1.py"))
assertSameContent(os.path.join(destTree, "subdir-a/code/file4.py"), os.path.join(self.srcTree2, "code/file4.py"))
assertSameContent(os.path.join(destTree, "subdir-b/text/text"), os.path.join(self.srcTree3, "text/text"))
def test_s3_many_treestore(self):
# Create an s3 backed treestore
# Requires these environment variables set
#
# AWS_ACCESS_KEY_ID
# AWS_SECRET_ACCESS_KEY
# S3TS_BUCKET
#
# NB: **this will only work if the bucket is empty
s3c = boto.connect_s3()
bucket = s3c.get_bucket( os.environ['S3TS_BUCKET'] )
with EmptyS3Bucket(bucket):
fileStore = S3FileStore( bucket )
localCache = LocalFileStore( makeEmptyDir( os.path.join( self.workdir, 'cache' ) ) )
treestore = TreeStore.create( fileStore, localCache, TreeStoreConfig( 100, True ) )
# Upload it as a tree
creationTime = datetimeFromIso( '2015-01-01T00:00:00.0' )
treestore.uploadMany( 'v1.0', '', creationTime, self.srcTree, self.srcVariant, CaptureUploadProgress() )
pkg = treestore.findPackage( 'v1.0:kiosk-01' )
# Confirm it's in the index
self.assertEqual( treestore.listPackages(), ['v1.0:kiosk-01', 'v1.0:kiosk-02'] )
# Verify it
treestore.verify( pkg )
# Download it, checking we get expected progress callbacks
cb = CaptureDownloadProgress()
treestore.download( pkg, cb )
self.assertEqual( sorted(cb.recorded), [29, 30, 45, 47, 100, 100] )
# Verify it locally
treestore.verifyLocal( pkg )
# Install it
destTree = os.path.join( self.workdir, 'dest-1' )
treestore.install( pkg, destTree, CaptureInstallProgress() )
# Check that the installed tree is the same as the source tree
self.assertEqual( subprocess.call( 'diff -r -x {0} {1} {2}'.format(S3TS_PROPERTIES,self.srcTree + '/assets',destTree + '/assets'), shell=True ), 0 )
self.assertEqual( subprocess.call( 'diff -r -x {0} {1} {2}'.format(S3TS_PROPERTIES,self.srcTree + '/code',destTree + '/code'), shell=True ), 0 )
self.assertEqual( readInstallProperties(destTree).treeName, 'v1.0:kiosk-01' )
def makeEmptyDir( path ):
if os.path.exists( path ):
shutil.rmtree( path )
os.makedirs( path )
return path
if __name__ == '__main__':
unittest.main()
|
[] |
[] |
[
"S3TS_BUCKET"
] |
[]
|
["S3TS_BUCKET"]
|
python
| 1 | 0 | |
tensorflow_net/E2E-ARNN/train_arnn_sleep.py
|
import os
os.environ["CUDA_VISIBLE_DEVICES"]="0,-1"
import numpy as np
import tensorflow as tf
#from tensorflow.python.client import device_lib
#print(device_lib.list_local_devices())
import shutil, sys
from datetime import datetime
import h5py
from arnn_sleep import ARNN_Sleep
from arnn_sleep_config import Config
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import cohen_kappa_score
from datagenerator_from_list_v2 import DataGenerator
#from scipy.io import loadmat
# Parameters
# ==================================================
# Misc Parameters
tf.app.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.app.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
# My Parameters
tf.app.flags.DEFINE_string("eeg_train_data", "../train_data.mat", "Point to directory of input data")
tf.app.flags.DEFINE_string("eeg_eval_data", "../data/eval_data_1.mat", "Point to directory of input data")
tf.app.flags.DEFINE_string("eeg_test_data", "../test_data.mat", "Point to directory of input data")
tf.app.flags.DEFINE_string("eog_train_data", "../train_data.mat", "Point to directory of input data")
tf.app.flags.DEFINE_string("eog_eval_data", "../data/eval_data_1.mat", "Point to directory of input data")
tf.app.flags.DEFINE_string("eog_test_data", "../test_data.mat", "Point to directory of input data")
tf.app.flags.DEFINE_string("emg_train_data", "../train_data.mat", "Point to directory of input data")
tf.app.flags.DEFINE_string("emg_eval_data", "../data/eval_data_1.mat", "Point to directory of input data")
tf.app.flags.DEFINE_string("emg_test_data", "../test_data.mat", "Point to directory of input data")
tf.app.flags.DEFINE_string("out_dir", "./output/", "Point to output directory")
tf.app.flags.DEFINE_string("checkpoint_dir", "./checkpoint/", "Point to checkpoint directory")
tf.app.flags.DEFINE_float("dropout_keep_prob_rnn", 0.75, "Dropout keep probability (default: 0.75)")
tf.app.flags.DEFINE_integer("seq_len", 32, "Sequence length (default: 32)")
tf.app.flags.DEFINE_integer("nfilter", 20, "Sequence length (default: 20)")
tf.app.flags.DEFINE_integer("nhidden1", 64, "Sequence length (default: 20)")
tf.app.flags.DEFINE_integer("attention_size1", 32, "Sequence length (default: 20)")
FLAGS = tf.app.flags.FLAGS
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()): # python3
print("{}={}".format(attr.upper(), value))
print("")
# Data Preparatopn
# ==================================================
# path where some output are stored
out_path = os.path.abspath(os.path.join(os.path.curdir,FLAGS.out_dir))
# path where checkpoint models are stored
checkpoint_path = os.path.abspath(os.path.join(out_path,FLAGS.checkpoint_dir))
if not os.path.isdir(os.path.abspath(out_path)): os.makedirs(os.path.abspath(out_path))
if not os.path.isdir(os.path.abspath(checkpoint_path)): os.makedirs(os.path.abspath(checkpoint_path))
config = Config()
config.dropout_keep_prob_rnn = FLAGS.dropout_keep_prob_rnn
config.epoch_seq_len = FLAGS.seq_len
config.epoch_step = FLAGS.seq_len
config.nfilter = FLAGS.nfilter
config.nhidden1 = FLAGS.nhidden1
config.attention_size1 = FLAGS.attention_size1
eeg_active = ((FLAGS.eeg_train_data != "") and (FLAGS.eeg_test_data != ""))
eog_active = ((FLAGS.eog_train_data != "") and (FLAGS.eog_test_data != ""))
emg_active = ((FLAGS.emg_train_data != "") and (FLAGS.emg_test_data != ""))
if (eeg_active):
print("eeg active")
# Initalize the data generator seperately for the training, validation, and test sets
eeg_train_gen = DataGenerator(os.path.abspath(FLAGS.eeg_train_data), data_shape=[config.frame_seq_len, config.ndim], shuffle = False)
eeg_test_gen = DataGenerator(os.path.abspath(FLAGS.eeg_test_data), data_shape=[config.frame_seq_len, config.ndim], shuffle = False)
eeg_eval_gen = DataGenerator(os.path.abspath(FLAGS.eeg_eval_data), data_shape=[config.frame_seq_len, config.ndim], shuffle = False)
# data normalization here
X = eeg_train_gen.X
X = np.reshape(X,(eeg_train_gen.data_size*eeg_train_gen.data_shape[0], eeg_train_gen.data_shape[1]))
meanX = X.mean(axis=0)
stdX = X.std(axis=0)
X = (X - meanX) / stdX
eeg_train_gen.X = np.reshape(X, (eeg_train_gen.data_size, eeg_train_gen.data_shape[0], eeg_train_gen.data_shape[1]))
X = eeg_eval_gen.X
X = np.reshape(X,(eeg_eval_gen.data_size*eeg_eval_gen.data_shape[0], eeg_eval_gen.data_shape[1]))
X = (X - meanX) / stdX
eeg_eval_gen.X = np.reshape(X, (eeg_eval_gen.data_size, eeg_eval_gen.data_shape[0], eeg_eval_gen.data_shape[1]))
X = eeg_test_gen.X
X = np.reshape(X,(eeg_test_gen.data_size*eeg_test_gen.data_shape[0], eeg_test_gen.data_shape[1]))
X = (X - meanX) / stdX
eeg_test_gen.X = np.reshape(X, (eeg_test_gen.data_size, eeg_test_gen.data_shape[0], eeg_test_gen.data_shape[1]))
if (eog_active):
print("eog active")
# Initalize the data generator seperately for the training, validation, and test sets
eog_train_gen = DataGenerator(os.path.abspath(FLAGS.eog_train_data), data_shape=[config.frame_seq_len, config.ndim], shuffle = False)
eog_test_gen = DataGenerator(os.path.abspath(FLAGS.eog_test_data), data_shape=[config.frame_seq_len, config.ndim], shuffle = False)
eog_eval_gen = DataGenerator(os.path.abspath(FLAGS.eog_eval_data), data_shape=[config.frame_seq_len, config.ndim], shuffle = False)
# data normalization here
X = eog_train_gen.X
X = np.reshape(X,(eog_train_gen.data_size*eog_train_gen.data_shape[0], eog_train_gen.data_shape[1]))
meanX = X.mean(axis=0)
stdX = X.std(axis=0)
X = (X - meanX) / stdX
eog_train_gen.X = np.reshape(X, (eog_train_gen.data_size, eog_train_gen.data_shape[0], eog_train_gen.data_shape[1]))
X = eog_eval_gen.X
X = np.reshape(X,(eog_eval_gen.data_size*eog_eval_gen.data_shape[0], eog_eval_gen.data_shape[1]))
X = (X - meanX) / stdX
eog_eval_gen.X = np.reshape(X, (eog_eval_gen.data_size, eog_eval_gen.data_shape[0], eog_eval_gen.data_shape[1]))
X = eog_test_gen.X
X = np.reshape(X,(eog_test_gen.data_size*eog_test_gen.data_shape[0], eog_test_gen.data_shape[1]))
X = (X - meanX) / stdX
eog_test_gen.X = np.reshape(X, (eog_test_gen.data_size, eog_test_gen.data_shape[0], eog_test_gen.data_shape[1]))
if (emg_active):
print("emg active")
# Initalize the data generator seperately for the training, validation, and test sets
emg_train_gen = DataGenerator(os.path.abspath(FLAGS.emg_train_data), data_shape=[config.frame_seq_len, config.ndim], shuffle = False)
emg_test_gen = DataGenerator(os.path.abspath(FLAGS.emg_test_data), data_shape=[config.frame_seq_len, config.ndim], shuffle = False)
emg_eval_gen = DataGenerator(os.path.abspath(FLAGS.emg_eval_data), data_shape=[config.frame_seq_len, config.ndim], shuffle = False)
# data normalization here
X = emg_train_gen.X
X = np.reshape(X,(emg_train_gen.data_size*emg_train_gen.data_shape[0], emg_train_gen.data_shape[1]))
meanX = X.mean(axis=0)
stdX = X.std(axis=0)
X = (X - meanX) / stdX
emg_train_gen.X = np.reshape(X, (emg_train_gen.data_size, emg_train_gen.data_shape[0], emg_train_gen.data_shape[1]))
X = emg_eval_gen.X
X = np.reshape(X,(emg_eval_gen.data_size*emg_eval_gen.data_shape[0], emg_eval_gen.data_shape[1]))
X = (X - meanX) / stdX
emg_eval_gen.X = np.reshape(X, (emg_eval_gen.data_size, emg_eval_gen.data_shape[0], emg_eval_gen.data_shape[1]))
X = emg_test_gen.X
X = np.reshape(X,(emg_test_gen.data_size*emg_test_gen.data_shape[0], emg_test_gen.data_shape[1]))
X = (X - meanX) / stdX
emg_test_gen.X = np.reshape(X, (emg_test_gen.data_size, emg_test_gen.data_shape[0], emg_test_gen.data_shape[1]))
# eeg always active
train_generator = eeg_train_gen
test_generator = eeg_test_gen
eval_generator = eeg_eval_gen
if (not(eog_active) and not(emg_active)):
train_generator.X = np.expand_dims(train_generator.X, axis=-1) # expand channel dimension
train_generator.data_shape = train_generator.X.shape[1:]
test_generator.X = np.expand_dims(test_generator.X, axis=-1) # expand channel dimension
test_generator.data_shape = test_generator.X.shape[1:]
eval_generator.X = np.expand_dims(eval_generator.X, axis=-1) # expand channel dimension
eval_generator.data_shape = eval_generator.X.shape[1:]
nchannel = 1
print(train_generator.X.shape)
if (eog_active and not(emg_active)):
print(train_generator.X.shape)
print(eog_train_gen.X.shape)
train_generator.X = np.stack((train_generator.X, eog_train_gen.X), axis=-1) # merge and make new dimension
train_generator.data_shape = train_generator.X.shape[1:]
test_generator.X = np.stack((test_generator.X, eog_test_gen.X), axis=-1) # merge and make new dimension
test_generator.data_shape = test_generator.X.shape[1:]
eval_generator.X = np.stack((eval_generator.X, eog_eval_gen.X), axis=-1) # merge and make new dimension
eval_generator.data_shape = eval_generator.X.shape[1:]
nchannel = 2
print(train_generator.X.shape)
if (eog_active and emg_active):
print(train_generator.X.shape)
print(eog_train_gen.X.shape)
print(emg_train_gen.X.shape)
train_generator.X = np.stack((train_generator.X, eog_train_gen.X, emg_train_gen.X), axis=-1) # merge and make new dimension
train_generator.data_shape = train_generator.X.shape[1:]
test_generator.X = np.stack((test_generator.X, eog_test_gen.X, emg_test_gen.X), axis=-1) # merge and make new dimension
test_generator.data_shape = test_generator.X.shape[1:]
eval_generator.X = np.stack((eval_generator.X, eog_eval_gen.X, emg_eval_gen.X), axis=-1) # merge and make new dimension
eval_generator.data_shape = eval_generator.X.shape[1:]
nchannel = 3
print(train_generator.X.shape)
config.nchannel = nchannel
del eeg_train_gen
del eeg_test_gen
del eeg_eval_gen
if (eog_active):
del eog_train_gen
del eog_test_gen
del eog_eval_gen
if (emg_active):
del emg_train_gen
del emg_test_gen
del emg_eval_gen
# shuffle training data here
train_generator.shuffle_data()
train_batches_per_epoch = np.floor(len(train_generator.data_index) / config.batch_size).astype(np.uint32)
eval_batches_per_epoch = np.floor(len(eval_generator.data_index) / config.batch_size).astype(np.uint32)
test_batches_per_epoch = np.floor(len(test_generator.data_index) / config.batch_size).astype(np.uint32)
print("Train/Eval/Test set: {:d}/{:d}/{:d}".format(train_generator.data_size, eval_generator.data_size, test_generator.data_size))
print("Train/Eval/Test batches per epoch: {:d}/{:d}/{:d}".format(train_batches_per_epoch, eval_batches_per_epoch, test_batches_per_epoch))
# variable to keep track of best fscore
best_fscore = 0.0
best_acc = 0.0
best_kappa = 0.0
min_loss = float("inf")
# Training
# ==================================================
with tf.Graph().as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
session_conf.gpu_options.allow_growth = True
sess = tf.Session(config=session_conf)
with sess.as_default():
arnn = ARNN_Sleep(config=config)
# Define Training procedure
global_step = tf.Variable(0, name="global_step", trainable=False)
optimizer = tf.train.AdamOptimizer(config.learning_rate)
grads_and_vars = optimizer.compute_gradients(arnn.loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
out_dir = os.path.abspath(os.path.join(os.path.curdir,FLAGS.out_dir))
print("Writing to {}\n".format(out_dir))
saver = tf.train.Saver(tf.all_variables(), max_to_keep=1)
# initialize all variables
print("Model initialized")
sess.run(tf.initialize_all_variables())
def train_step(x_batch, y_batch):
"""
A single training step
"""
frame_seq_len = np.ones(len(x_batch),dtype=int) * config.frame_seq_len
feed_dict = {
arnn.input_x: x_batch,
arnn.input_y: y_batch,
arnn.dropout_keep_prob_rnn: config.dropout_keep_prob_rnn,
arnn.frame_seq_len: frame_seq_len
}
_, step, output_loss, total_loss, accuracy = sess.run(
[train_op, global_step, arnn.output_loss, arnn.loss, arnn.accuracy],
feed_dict)
return step, output_loss, total_loss, accuracy
def dev_step(x_batch, y_batch):
frame_seq_len = np.ones(len(x_batch),dtype=int) * config.frame_seq_len
feed_dict = {
arnn.input_x: x_batch,
arnn.input_y: y_batch,
arnn.dropout_keep_prob_rnn: 1.0,
arnn.frame_seq_len: frame_seq_len
}
output_loss, total_loss, yhat = sess.run(
[arnn.output_loss, arnn.loss, arnn.prediction], feed_dict)
return output_loss, total_loss, yhat
def evaluate(gen, log_filename):
# Validate the model on the entire evaluation test set after each epoch
output_loss =0
total_loss = 0
yhat = np.zeros([len(gen.data_index)])
num_batch_per_epoch = np.floor(len(gen.data_index) / (config.batch_size)).astype(np.uint32)
test_step = 1
while test_step < num_batch_per_epoch:
x_batch, y_batch, label_batch_ = gen.next_batch(config.batch_size)
output_loss_, total_loss_, yhat_ = dev_step(x_batch, y_batch)
output_loss += output_loss_
total_loss += total_loss_
yhat[(test_step-1)*config.batch_size : test_step*config.batch_size] = yhat_
test_step += 1
if(gen.pointer < len(gen.data_index)):
actual_len, x_batch, y_batch, label_batch_ = gen.rest_batch(config.batch_size)
output_loss_, total_loss_, yhat_ = dev_step(x_batch, y_batch)
yhat[(test_step-1)*config.batch_size : len(gen.data_index)] = yhat_
output_loss += output_loss_
total_loss += total_loss_
yhat = yhat + 1
acc = accuracy_score(gen.label, yhat)
with open(os.path.join(out_dir, log_filename), "a") as text_file:
text_file.write("{:g} {:g} {:g}\n".format(output_loss, total_loss, acc))
return acc, yhat, output_loss, total_loss
# Loop over number of epochs
for epoch in range(config.training_epoch):
print("{} Epoch number: {}".format(datetime.now(), epoch + 1))
step = 1
while step < train_batches_per_epoch:
# Get a batch
x_batch, y_batch, label_batch = train_generator.next_batch(config.batch_size)
train_step_, train_output_loss_, train_total_loss_, train_acc_ = train_step(x_batch, y_batch)
time_str = datetime.now().isoformat()
print("{}: step {}, output_loss {}, total_loss {} acc {}".format(time_str, train_step_, train_output_loss_, train_total_loss_, train_acc_))
step += 1
current_step = tf.train.global_step(sess, global_step)
if current_step % config.evaluate_every == 0:
# Validate the model on the entire evaluation test set after each epoch
print("{} Start validation".format(datetime.now()))
eval_acc, eval_yhat, eval_output_loss, eval_total_loss = evaluate(gen=eval_generator, log_filename="eval_result_log.txt")
test_acc, test_yhat, test_output_loss, test_total_loss = evaluate(gen=test_generator, log_filename="test_result_log.txt")
if(eval_acc >= best_acc):
best_acc = eval_acc
checkpoint_name = os.path.join(checkpoint_path, 'model_step' + str(current_step) +'.ckpt')
save_path = saver.save(sess, checkpoint_name)
print("Best model updated")
source_file = checkpoint_name
dest_file = os.path.join(checkpoint_path, 'best_model_acc')
shutil.copy(source_file + '.data-00000-of-00001', dest_file + '.data-00000-of-00001')
shutil.copy(source_file + '.index', dest_file + '.index')
shutil.copy(source_file + '.meta', dest_file + '.meta')
test_generator.reset_pointer()
eval_generator.reset_pointer()
train_generator.reset_pointer()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
tempita/__init__.py
|
#!/usr/bin/env python
"""
A small templating language
This implements a small templating language. This language implements
if/elif/else, for/continue/break, expressions, and blocks of Python
code. The syntax is::
{{any expression (function calls etc)}}
{{any expression | filter}}
{{for x in y}}...{{endfor}}
{{if x}}x{{elif y}}y{{else}}z{{endif}}
{{py:x=1}}
{{py:
def foo(bar):
return 'baz'
}}
{{default var = default_value}}
{{# comment}}
You use this with the ``Template`` class or the ``sub`` shortcut.
The ``Template`` class takes the template string and the name of
the template (for errors) and a default namespace. Then (like
``string.Template``) you can call the ``tmpl.substitute(**kw)``
method to make a substitution (or ``tmpl.substitute(a_dict)``).
``sub(content, **kw)`` substitutes the template immediately. You
can use ``__name='tmpl.html'`` to set the name of the template.
If there are syntax errors ``TemplateError`` will be raised.
"""
import cgi
import os
import re
import sys
import tokenize
if sys.version_info[0] == 2:
from cStringIO import StringIO
from urllib import quote as url_quote
else:
from io import StringIO
from urllib.parse import quote as url_quote
from tempita._looper import looper
from tempita.compat3 import bytes, basestring_, next, is_unicode, coerce_text
__all__ = ['TemplateError', 'Template', 'sub', 'HTMLTemplate',
'sub_html', 'html', 'bunch']
in_re = re.compile(r'\s+in\s+')
var_re = re.compile(r'^[a-z_][a-z0-9_]*$', re.I)
class TemplateError(Exception):
"""Exception raised while parsing a template
"""
def __init__(self, message, position, name=None):
Exception.__init__(self, message)
self.position = position
self.name = name
def __str__(self):
msg = ' '.join(self.args)
if self.position:
msg = '%s at line %s column %s' % (
msg, self.position[0], self.position[1])
if self.name:
msg += ' in %s' % self.name
return msg
class _TemplateContinue(Exception):
pass
class _TemplateBreak(Exception):
pass
def get_file_template(name, from_template):
path = os.path.join(os.path.dirname(from_template.name), name)
return from_template.__class__.from_filename(
path, namespace=from_template.namespace,
get_template=from_template.get_template)
class Template(object):
default_namespace = {
'start_braces': '{{',
'end_braces': '}}',
'looper': looper,
}
default_encoding = 'utf8'
default_inherit = None
def __init__(self, content, name=None, namespace=None, stacklevel=None,
get_template=None, default_inherit=None, line_offset=0,
delimiters=None):
self.content = content
# set delimiters
if delimiters is None:
delimiters = (self.default_namespace['start_braces'],
self.default_namespace['end_braces'])
else:
assert len(delimiters) == 2 and all([isinstance(delimeter, basestring)
for delimeter in delimiters])
self.default_namespace = self.__class__.default_namespace.copy()
self.default_namespace['start_braces'] = delimiters[0]
self.default_namespace['end_braces'] = delimiters[1]
self.delimiters = delimiters
self._unicode = is_unicode(content)
if name is None and stacklevel is not None:
try:
caller = sys._getframe(stacklevel)
except ValueError:
pass
else:
globals = caller.f_globals
lineno = caller.f_lineno
if '__file__' in globals:
name = globals['__file__']
if name.endswith('.pyc') or name.endswith('.pyo'):
name = name[:-1]
elif '__name__' in globals:
name = globals['__name__']
else:
name = '<string>'
if lineno:
name += ':%s' % lineno
self.name = name
self._parsed = parse(content, name=name, line_offset=line_offset, delimiters=self.delimiters)
if namespace is None:
namespace = {}
self.namespace = namespace
self.get_template = get_template
if default_inherit is not None:
self.default_inherit = default_inherit
def from_filename(cls, filename, namespace=None, encoding=None,
default_inherit=None, get_template=get_file_template):
f = open(filename, 'rb')
c = f.read()
f.close()
if encoding:
c = c.decode(encoding)
return cls(content=c, name=filename, namespace=namespace,
default_inherit=default_inherit, get_template=get_template)
from_filename = classmethod(from_filename)
def __repr__(self):
return '<%s %s name=%r>' % (
self.__class__.__name__,
hex(id(self))[2:], self.name)
def substitute(self, *args, **kw):
if args:
if kw:
raise TypeError(
"You can only give positional *or* keyword arguments")
if len(args) > 1:
raise TypeError(
"You can only give one positional argument")
if not hasattr(args[0], 'items'):
raise TypeError(
("If you pass in a single argument, you must pass in a ",
"dict-like object (with a .items() method); you gave %r")
% (args[0],))
kw = args[0]
ns = kw
ns['__template_name__'] = self.name
if self.namespace:
ns.update(self.namespace)
result, defs, inherit = self._interpret(ns)
if not inherit:
inherit = self.default_inherit
if inherit:
result = self._interpret_inherit(result, defs, inherit, ns)
return result
def _interpret(self, ns):
__traceback_hide__ = True
parts = []
defs = {}
self._interpret_codes(self._parsed, ns, out=parts, defs=defs)
if '__inherit__' in defs:
inherit = defs.pop('__inherit__')
else:
inherit = None
return ''.join(parts), defs, inherit
def _interpret_inherit(self, body, defs, inherit_template, ns):
__traceback_hide__ = True
if not self.get_template:
raise TemplateError(
'You cannot use inheritance without passing in get_template',
position=None, name=self.name)
templ = self.get_template(inherit_template, self)
self_ = TemplateObject(self.name)
for name, value in defs.iteritems():
setattr(self_, name, value)
self_.body = body
ns = ns.copy()
ns['self'] = self_
return templ.substitute(ns)
def _interpret_codes(self, codes, ns, out, defs):
__traceback_hide__ = True
for item in codes:
if isinstance(item, basestring_):
out.append(item)
else:
self._interpret_code(item, ns, out, defs)
def _interpret_code(self, code, ns, out, defs):
__traceback_hide__ = True
name, pos = code[0], code[1]
if name == 'py':
self._exec(code[2], ns, pos)
elif name == 'continue':
raise _TemplateContinue()
elif name == 'break':
raise _TemplateBreak()
elif name == 'for':
vars, expr, content = code[2], code[3], code[4]
expr = self._eval(expr, ns, pos)
self._interpret_for(vars, expr, content, ns, out, defs)
elif name == 'cond':
parts = code[2:]
self._interpret_if(parts, ns, out, defs)
elif name == 'expr':
parts = code[2].split('|')
base = self._eval(parts[0], ns, pos)
for part in parts[1:]:
func = self._eval(part, ns, pos)
base = func(base)
out.append(self._repr(base, pos))
elif name == 'default':
var, expr = code[2], code[3]
if var not in ns:
result = self._eval(expr, ns, pos)
ns[var] = result
elif name == 'inherit':
expr = code[2]
value = self._eval(expr, ns, pos)
defs['__inherit__'] = value
elif name == 'def':
name = code[2]
signature = code[3]
parts = code[4]
ns[name] = defs[name] = TemplateDef(self, name, signature, body=parts, ns=ns,
pos=pos)
elif name == 'comment':
return
else:
assert 0, "Unknown code: %r" % name
def _interpret_for(self, vars, expr, content, ns, out, defs):
__traceback_hide__ = True
for item in expr:
if len(vars) == 1:
ns[vars[0]] = item
else:
if len(vars) != len(item):
raise ValueError(
'Need %i items to unpack (got %i items)'
% (len(vars), len(item)))
for name, value in zip(vars, item):
ns[name] = value
try:
self._interpret_codes(content, ns, out, defs)
except _TemplateContinue:
continue
except _TemplateBreak:
break
def _interpret_if(self, parts, ns, out, defs):
__traceback_hide__ = True
# @@: if/else/else gets through
for part in parts:
assert not isinstance(part, basestring_)
name, pos = part[0], part[1]
if name == 'else':
result = True
else:
result = self._eval(part[2], ns, pos)
if result:
self._interpret_codes(part[3], ns, out, defs)
break
def _eval(self, code, ns, pos):
__traceback_hide__ = True
try:
try:
value = eval(code, self.default_namespace, ns)
except SyntaxError as e:
raise SyntaxError(
'invalid syntax in expression: %s' % code)
return value
except:
exc_info = sys.exc_info()
e = exc_info[1]
if getattr(e, 'args', None):
arg0 = e.args[0]
else:
arg0 = coerce_text(e)
e.args = (self._add_line_info(arg0, pos),)
raise (exc_info[1], e, exc_info[2])
def _exec(self, code, ns, pos):
__traceback_hide__ = True
try:
exec(code, self.default_namespace, ns)
except:
exc_info = sys.exc_info()
e = exc_info[1]
if e.args:
e.args = (self._add_line_info(e.args[0], pos),)
else:
e.args = (self._add_line_info(None, pos),)
raise(exc_info[1], e, exc_info[2])
def _repr(self, value, pos):
__traceback_hide__ = True
try:
if value is None:
return ''
if self._unicode:
try:
value = unicode(value)
except UnicodeDecodeError:
value = bytes(value)
else:
if not isinstance(value, basestring_):
value = coerce_text(value)
if (is_unicode(value)
and self.default_encoding):
value = value.encode(self.default_encoding)
except:
exc_info = sys.exc_info()
e = exc_info[1]
e.args = (self._add_line_info(e.args[0], pos),)
raise(exc_info[1], e, exc_info[2])
else:
if self._unicode and isinstance(value, bytes):
if not self.default_encoding:
raise UnicodeDecodeError(
'Cannot decode bytes value %r into unicode '
'(no default_encoding provided)' % value)
try:
value = value.decode(self.default_encoding)
except UnicodeDecodeError as e:
raise UnicodeDecodeError(
e.encoding,
e.object,
e.start,
e.end,
e.reason + ' in string %r' % value)
elif not self._unicode and is_unicode(value):
if not self.default_encoding:
raise UnicodeEncodeError(
'Cannot encode unicode value %r into bytes '
'(no default_encoding provided)' % value)
value = value.encode(self.default_encoding)
return value
def _add_line_info(self, msg, pos):
msg = "%s at line %s column %s" % (
msg, pos[0], pos[1])
if self.name:
msg += " in file %s" % self.name
return msg
def sub(content, delimiters=None, **kw):
name = kw.get('__name')
tmpl = Template(content, name=name, delimiters=delimiters)
return tmpl.substitute(kw)
def paste_script_template_renderer(content, vars, filename=None):
tmpl = Template(content, name=filename)
return tmpl.substitute(vars)
class bunch(dict):
def __init__(self, **kw):
for name, value in kw.iteritems():
setattr(self, name, value)
def __setattr__(self, name, value):
self[name] = value
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __getitem__(self, key):
if 'default' in self:
try:
return dict.__getitem__(self, key)
except KeyError:
return dict.__getitem__(self, 'default')
else:
return dict.__getitem__(self, key)
def __repr__(self):
items = [
(k, v) for k, v in self.iteritems()]
items.sort()
return '<%s %s>' % (
self.__class__.__name__,
' '.join(['%s=%r' % (k, v) for k, v in items]))
############################################################
## HTML Templating
############################################################
class html(object):
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
def __html__(self):
return self.value
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__, self.value)
def html_quote(value, force=True):
if not force and hasattr(value, '__html__'):
return value.__html__()
if value is None:
return ''
if not isinstance(value, basestring_):
value = coerce_text(value)
if sys.version >= "3" and isinstance(value, bytes):
value = cgi.escape(value.decode('latin1'), 1)
value = value.encode('latin1')
else:
value = cgi.escape(value, 1)
if sys.version < "3":
if is_unicode(value):
value = value.encode('ascii', 'xmlcharrefreplace')
return value
def url(v):
v = coerce_text(v)
if is_unicode(v):
v = v.encode('utf8')
return url_quote(v)
def attr(**kw):
kw = list(kw.iteritems())
kw.sort()
parts = []
for name, value in kw:
if value is None:
continue
if name.endswith('_'):
name = name[:-1]
parts.append('%s="%s"' % (html_quote(name), html_quote(value)))
return html(' '.join(parts))
class HTMLTemplate(Template):
default_namespace = Template.default_namespace.copy()
default_namespace.update(dict(
html=html,
attr=attr,
url=url,
html_quote=html_quote,
))
def _repr(self, value, pos):
if hasattr(value, '__html__'):
value = value.__html__()
quote = False
else:
quote = True
plain = Template._repr(self, value, pos)
if quote:
return html_quote(plain)
else:
return plain
def sub_html(content, **kw):
name = kw.get('__name')
tmpl = HTMLTemplate(content, name=name)
return tmpl.substitute(kw)
class TemplateDef(object):
def __init__(self, template, func_name, func_signature,
body, ns, pos, bound_self=None):
self._template = template
self._func_name = func_name
self._func_signature = func_signature
self._body = body
self._ns = ns
self._pos = pos
self._bound_self = bound_self
def __repr__(self):
return '<tempita function %s(%s) at %s:%s>' % (
self._func_name, self._func_signature,
self._template.name, self._pos)
def __str__(self):
return self()
def __call__(self, *args, **kw):
values = self._parse_signature(args, kw)
ns = self._ns.copy()
ns.update(values)
if self._bound_self is not None:
ns['self'] = self._bound_self
out = []
subdefs = {}
self._template._interpret_codes(self._body, ns, out, subdefs)
return ''.join(out)
def __get__(self, obj, type=None):
if obj is None:
return self
return self.__class__(
self._template, self._func_name, self._func_signature,
self._body, self._ns, self._pos, bound_self=obj)
def _parse_signature(self, args, kw):
values = {}
sig_args, var_args, var_kw, defaults = self._func_signature
extra_kw = {}
for name, value in kw.iteritems():
if not var_kw and name not in sig_args:
raise TypeError(
'Unexpected argument %s' % name)
if name in sig_args:
values[sig_args] = value
else:
extra_kw[name] = value
args = list(args)
sig_args = list(sig_args)
while args:
while sig_args and sig_args[0] in values:
sig_args.pop(0)
if sig_args:
name = sig_args.pop(0)
values[name] = args.pop(0)
elif var_args:
values[var_args] = tuple(args)
break
else:
raise TypeError(
'Extra position arguments: %s'
% ', '.join(repr(v) for v in args))
for name, value_expr in defaults.iteritems():
if name not in values:
values[name] = self._template._eval(
value_expr, self._ns, self._pos)
for name in sig_args:
if name not in values:
raise TypeError(
'Missing argument: %s' % name)
if var_kw:
values[var_kw] = extra_kw
return values
class TemplateObject(object):
def __init__(self, name):
self.__name = name
self.get = TemplateObjectGetter(self)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.__name)
class TemplateObjectGetter(object):
def __init__(self, template_obj):
self.__template_obj = template_obj
def __getattr__(self, attr):
return getattr(self.__template_obj, attr, Empty)
def __repr__(self):
return '<%s around %r>' % (self.__class__.__name__, self.__template_obj)
class _Empty(object):
def __call__(self, *args, **kw):
return self
def __str__(self):
return ''
def __repr__(self):
return 'Empty'
def __unicode__(self):
return u''
def __iter__(self):
return iter(())
def __bool__(self):
return False
if sys.version < "3":
__nonzero__ = __bool__
Empty = _Empty()
del _Empty
############################################################
## Lexing and Parsing
############################################################
def lex(s, name=None, trim_whitespace=True, line_offset=0, delimiters=None):
"""
Lex a string into chunks:
>>> lex('hey')
['hey']
>>> lex('hey {{you}}')
['hey ', ('you', (1, 7))]
>>> lex('hey {{')
Traceback (most recent call last):
...
TemplateError: No }} to finish last expression at line 1 column 7
>>> lex('hey }}')
Traceback (most recent call last):
...
TemplateError: }} outside expression at line 1 column 7
>>> lex('hey {{ {{')
Traceback (most recent call last):
...
TemplateError: {{ inside expression at line 1 column 10
"""
if delimiters is None:
delimiters = ( Template.default_namespace['start_braces'],
Template.default_namespace['end_braces'] )
in_expr = False
chunks = []
last = 0
last_pos = (line_offset + 1, 1)
token_re = re.compile(r'%s|%s' % (re.escape(delimiters[0]),
re.escape(delimiters[1])))
for match in token_re.finditer(s):
expr = match.group(0)
pos = find_position(s, match.end(), line_offset, last_pos)
if expr == delimiters[0] and in_expr:
raise TemplateError('%s inside expression' % delimiters[0],
position=pos,
name=name)
elif expr == delimiters[1] and not in_expr:
raise TemplateError('%s outside expression' % delimiters[1],
position=pos,
name=name)
if expr == delimiters[0]:
part = s[last:match.start()]
if part:
chunks.append(part)
in_expr = True
else:
chunks.append((s[last:match.start()], last_pos))
in_expr = False
last = match.end()
last_pos = pos
if in_expr:
raise TemplateError('No %s to finish last expression' % delimiters[1],
name=name, position=last_pos)
part = s[last:]
if part:
chunks.append(part)
if trim_whitespace:
chunks = trim_lex(chunks)
return chunks
statement_re = re.compile(r'^(?:if |elif |for |def |inherit |default |py:)')
single_statements = ['else', 'endif', 'endfor', 'enddef', 'continue', 'break']
trail_whitespace_re = re.compile(r'\n\r?[\t ]*$')
lead_whitespace_re = re.compile(r'^[\t ]*\n')
def trim_lex(tokens):
r"""
Takes a lexed set of tokens, and removes whitespace when there is
a directive on a line by itself:
>>> tokens = lex('{{if x}}\nx\n{{endif}}\ny', trim_whitespace=False)
>>> tokens
[('if x', (1, 3)), '\nx\n', ('endif', (3, 3)), '\ny']
>>> trim_lex(tokens)
[('if x', (1, 3)), 'x\n', ('endif', (3, 3)), 'y']
"""
last_trim = None
for i in range(len(tokens)):
current = tokens[i]
if isinstance(tokens[i], basestring_):
# we don't trim this
continue
item = current[0]
if not statement_re.search(item) and item not in single_statements:
continue
if not i:
prev = ''
else:
prev = tokens[i - 1]
if i + 1 >= len(tokens):
next_chunk = ''
else:
next_chunk = tokens[i + 1]
if (not isinstance(next_chunk, basestring_)
or not isinstance(prev, basestring_)):
continue
prev_ok = not prev or trail_whitespace_re.search(prev)
if i == 1 and not prev.strip():
prev_ok = True
if last_trim is not None and last_trim + 2 == i and not prev.strip():
prev_ok = 'last'
if (prev_ok
and (not next_chunk or lead_whitespace_re.search(next_chunk)
or (i == len(tokens) - 2 and not next_chunk.strip()))):
if prev:
if ((i == 1 and not prev.strip())
or prev_ok == 'last'):
tokens[i - 1] = ''
else:
m = trail_whitespace_re.search(prev)
# +1 to leave the leading \n on:
prev = prev[:m.start() + 1]
tokens[i - 1] = prev
if next_chunk:
last_trim = i
if i == len(tokens) - 2 and not next_chunk.strip():
tokens[i + 1] = ''
else:
m = lead_whitespace_re.search(next_chunk)
next_chunk = next_chunk[m.end():]
tokens[i + 1] = next_chunk
return tokens
def find_position(string, index, last_index, last_pos=(1, 1)):
"""
Given a string and index, return (line, column)
"""
lines = string.count('\n', last_index, index)
if lines > 0:
column = index - string.rfind('\n', last_index, index)
else:
column = last_pos[1] + (index - last_index)
return (last_pos[0] + lines, column)
def parse(s, name=None, line_offset=0, delimiters=None):
r"""
Parses a string into a kind of AST
>>> parse('{{x}}')
[('expr', (1, 3), 'x')]
>>> parse('foo')
['foo']
>>> parse('{{if x}}test{{endif}}')
[('cond', (1, 3), ('if', (1, 3), 'x', ['test']))]
>>> parse('series->{{for x in y}}x={{x}}{{endfor}}')
['series->', ('for', (1, 11), ('x',), 'y', ['x=', ('expr', (1, 27), 'x')])]
>>> parse('{{for x, y in z:}}{{continue}}{{endfor}}')
[('for', (1, 3), ('x', 'y'), 'z', [('continue', (1, 21))])]
>>> parse('{{py:x=1}}')
[('py', (1, 3), 'x=1')]
>>> parse('{{if x}}a{{elif y}}b{{else}}c{{endif}}')
[('cond', (1, 3), ('if', (1, 3), 'x', ['a']), ('elif', (1, 12), 'y', ['b']), ('else', (1, 23), None, ['c']))]
Some exceptions::
>>> parse('{{continue}}')
Traceback (most recent call last):
...
TemplateError: continue outside of for loop at line 1 column 3
>>> parse('{{if x}}foo')
Traceback (most recent call last):
...
TemplateError: No {{endif}} at line 1 column 3
>>> parse('{{else}}')
Traceback (most recent call last):
...
TemplateError: else outside of an if block at line 1 column 3
>>> parse('{{if x}}{{for x in y}}{{endif}}{{endfor}}')
Traceback (most recent call last):
...
TemplateError: Unexpected endif at line 1 column 25
>>> parse('{{if}}{{endif}}')
Traceback (most recent call last):
...
TemplateError: if with no expression at line 1 column 3
>>> parse('{{for x y}}{{endfor}}')
Traceback (most recent call last):
...
TemplateError: Bad for (no "in") in 'x y' at line 1 column 3
>>> parse('{{py:x=1\ny=2}}')
Traceback (most recent call last):
...
TemplateError: Multi-line py blocks must start with a newline at line 1 column 3
"""
if delimiters is None:
delimiters = ( Template.default_namespace['start_braces'],
Template.default_namespace['end_braces'] )
tokens = lex(s, name=name, line_offset=line_offset, delimiters=delimiters)
result = []
while tokens:
next_chunk, tokens = parse_expr(tokens, name)
result.append(next_chunk)
return result
def parse_expr(tokens, name, context=()):
if isinstance(tokens[0], basestring_):
return tokens[0], tokens[1:]
expr, pos = tokens[0]
expr = expr.strip()
if expr.startswith('py:'):
expr = expr[3:].lstrip(' \t')
if expr.startswith('\n') or expr.startswith('\r'):
expr = expr.lstrip('\r\n')
if '\r' in expr:
expr = expr.replace('\r\n', '\n')
expr = expr.replace('\r', '')
expr += '\n'
else:
if '\n' in expr:
raise TemplateError(
'Multi-line py blocks must start with a newline',
position=pos, name=name)
return ('py', pos, expr), tokens[1:]
elif expr in ('continue', 'break'):
if 'for' not in context:
raise TemplateError(
'continue outside of for loop',
position=pos, name=name)
return (expr, pos), tokens[1:]
elif expr.startswith('if '):
return parse_cond(tokens, name, context)
elif (expr.startswith('elif ')
or expr == 'else'):
raise TemplateError(
'%s outside of an if block' % expr.split()[0],
position=pos, name=name)
elif expr in ('if', 'elif', 'for'):
raise TemplateError(
'%s with no expression' % expr,
position=pos, name=name)
elif expr in ('endif', 'endfor', 'enddef'):
raise TemplateError(
'Unexpected %s' % expr,
position=pos, name=name)
elif expr.startswith('for '):
return parse_for(tokens, name, context)
elif expr.startswith('default '):
return parse_default(tokens, name, context)
elif expr.startswith('inherit '):
return parse_inherit(tokens, name, context)
elif expr.startswith('def '):
return parse_def(tokens, name, context)
elif expr.startswith('#'):
return ('comment', pos, tokens[0][0]), tokens[1:]
return ('expr', pos, tokens[0][0]), tokens[1:]
def parse_cond(tokens, name, context):
start = tokens[0][1]
pieces = []
context = context + ('if',)
while 1:
if not tokens:
raise TemplateError(
'Missing {{endif}}',
position=start, name=name)
if (isinstance(tokens[0], tuple)
and tokens[0][0] == 'endif'):
return ('cond', start) + tuple(pieces), tokens[1:]
next_chunk, tokens = parse_one_cond(tokens, name, context)
pieces.append(next_chunk)
def parse_one_cond(tokens, name, context):
(first, pos), tokens = tokens[0], tokens[1:]
content = []
if first.endswith(':'):
first = first[:-1]
if first.startswith('if '):
part = ('if', pos, first[3:].lstrip(), content)
elif first.startswith('elif '):
part = ('elif', pos, first[5:].lstrip(), content)
elif first == 'else':
part = ('else', pos, None, content)
else:
assert 0, "Unexpected token %r at %s" % (first, pos)
while 1:
if not tokens:
raise TemplateError(
'No {{endif}}',
position=pos, name=name)
if (isinstance(tokens[0], tuple)
and (tokens[0][0] == 'endif'
or tokens[0][0].startswith('elif ')
or tokens[0][0] == 'else')):
return part, tokens
next_chunk, tokens = parse_expr(tokens, name, context)
content.append(next_chunk)
def parse_for(tokens, name, context):
first, pos = tokens[0]
tokens = tokens[1:]
context = ('for',) + context
content = []
assert first.startswith('for ')
if first.endswith(':'):
first = first[:-1]
first = first[3:].strip()
match = in_re.search(first)
if not match:
raise TemplateError(
'Bad for (no "in") in %r' % first,
position=pos, name=name)
vars = first[:match.start()]
if '(' in vars:
raise TemplateError(
'You cannot have () in the variable section of a for loop (%r)'
% vars, position=pos, name=name)
vars = tuple([
v.strip() for v in first[:match.start()].split(',')
if v.strip()])
expr = first[match.end():]
while 1:
if not tokens:
raise TemplateError(
'No {{endfor}}',
position=pos, name=name)
if (isinstance(tokens[0], tuple)
and tokens[0][0] == 'endfor'):
return ('for', pos, vars, expr, content), tokens[1:]
next_chunk, tokens = parse_expr(tokens, name, context)
content.append(next_chunk)
def parse_default(tokens, name, context):
first, pos = tokens[0]
assert first.startswith('default ')
first = first.split(None, 1)[1]
parts = first.split('=', 1)
if len(parts) == 1:
raise TemplateError(
"Expression must be {{default var=value}}; no = found in %r" % first,
position=pos, name=name)
var = parts[0].strip()
if ',' in var:
raise TemplateError(
"{{default x, y = ...}} is not supported",
position=pos, name=name)
if not var_re.search(var):
raise TemplateError(
"Not a valid variable name for {{default}}: %r"
% var, position=pos, name=name)
expr = parts[1].strip()
return ('default', pos, var, expr), tokens[1:]
def parse_inherit(tokens, name, context):
first, pos = tokens[0]
assert first.startswith('inherit ')
expr = first.split(None, 1)[1]
return ('inherit', pos, expr), tokens[1:]
def parse_def(tokens, name, context):
first, start = tokens[0]
tokens = tokens[1:]
assert first.startswith('def ')
first = first.split(None, 1)[1]
if first.endswith(':'):
first = first[:-1]
if '(' not in first:
func_name = first
sig = ((), None, None, {})
elif not first.endswith(')'):
raise TemplateError("Function definition doesn't end with ): %s" % first,
position=start, name=name)
else:
first = first[:-1]
func_name, sig_text = first.split('(', 1)
sig = parse_signature(sig_text, name, start)
context = context + ('def',)
content = []
while 1:
if not tokens:
raise TemplateError(
'Missing {{enddef}}',
position=start, name=name)
if (isinstance(tokens[0], tuple)
and tokens[0][0] == 'enddef'):
return ('def', start, func_name, sig, content), tokens[1:]
next_chunk, tokens = parse_expr(tokens, name, context)
content.append(next_chunk)
def parse_signature(sig_text, name, pos):
tokens = tokenize.generate_tokens(StringIO(sig_text).readline)
sig_args = []
var_arg = None
var_kw = None
defaults = {}
def get_token(pos=False):
try:
tok_type, tok_string, (srow, scol), (erow, ecol), line = next(tokens)
except StopIteration:
return tokenize.ENDMARKER, ''
if pos:
return tok_type, tok_string, (srow, scol), (erow, ecol)
else:
return tok_type, tok_string
while 1:
var_arg_type = None
tok_type, tok_string = get_token()
if tok_type == tokenize.ENDMARKER:
break
if tok_type == tokenize.OP and (tok_string == '*' or tok_string == '**'):
var_arg_type = tok_string
tok_type, tok_string = get_token()
if tok_type != tokenize.NAME:
raise TemplateError('Invalid signature: (%s)' % sig_text,
position=pos, name=name)
var_name = tok_string
tok_type, tok_string = get_token()
if tok_type == tokenize.ENDMARKER or (tok_type == tokenize.OP and tok_string == ','):
if var_arg_type == '*':
var_arg = var_name
elif var_arg_type == '**':
var_kw = var_name
else:
sig_args.append(var_name)
if tok_type == tokenize.ENDMARKER:
break
continue
if var_arg_type is not None:
raise TemplateError('Invalid signature: (%s)' % sig_text,
position=pos, name=name)
if tok_type == tokenize.OP and tok_string == '=':
nest_type = None
unnest_type = None
nest_count = 0
start_pos = end_pos = None
parts = []
while 1:
tok_type, tok_string, s, e = get_token(True)
if start_pos is None:
start_pos = s
end_pos = e
if tok_type == tokenize.ENDMARKER and nest_count:
raise TemplateError('Invalid signature: (%s)' % sig_text,
position=pos, name=name)
if (not nest_count and
(tok_type == tokenize.ENDMARKER or (tok_type == tokenize.OP and tok_string == ','))):
default_expr = isolate_expression(sig_text, start_pos, end_pos)
defaults[var_name] = default_expr
sig_args.append(var_name)
break
parts.append((tok_type, tok_string))
if nest_count and tok_type == tokenize.OP and tok_string == nest_type:
nest_count += 1
elif nest_count and tok_type == tokenize.OP and tok_string == unnest_type:
nest_count -= 1
if not nest_count:
nest_type = unnest_type = None
elif not nest_count and tok_type == tokenize.OP and tok_string in ('(', '[', '{'):
nest_type = tok_string
nest_count = 1
unnest_type = {'(': ')', '[': ']', '{': '}'}[nest_type]
return sig_args, var_arg, var_kw, defaults
def isolate_expression(string, start_pos, end_pos):
srow, scol = start_pos
srow -= 1
erow, ecol = end_pos
erow -= 1
lines = string.splitlines(True)
if srow == erow:
return lines[srow][scol:ecol]
parts = [lines[srow][scol:]]
parts.extend(lines[srow+1:erow])
if erow < len(lines):
# It'll sometimes give (end_row_past_finish, 0)
parts.append(lines[erow][:ecol])
return ''.join(parts)
_fill_command_usage = """\
%prog [OPTIONS] TEMPLATE arg=value
Use py:arg=value to set a Python value; otherwise all values are
strings.
"""
def fill_command(args=None):
import sys
import optparse
import os
if args is None:
args = sys.argv[1:]
kwargs = dict(usage=_fill_command_usage)
try:
import pkg_resources
dist = pkg_resources.get_distribution('tempita')
kwargs['version'] = coerce_text(dist)
except ImportError:
# pkg_resources not available
pass
parser = optparse.OptionParser(**kwargs)
parser.add_option(
'-o', '--output',
dest='output',
metavar="FILENAME",
help="File to write output to (default stdout)")
parser.add_option(
'--html',
dest='use_html',
action='store_true',
help="Use HTML style filling (including automatic HTML quoting)")
parser.add_option(
'--env',
dest='use_env',
action='store_true',
help="Put the environment in as top-level variables")
options, args = parser.parse_args(args)
if len(args) < 1:
print('You must give a template filename')
sys.exit(2)
template_name = args[0]
args = args[1:]
vars = {}
if options.use_env:
vars.update(os.environ)
for value in args:
if '=' not in value:
print('Bad argument: %r' % value)
sys.exit(2)
name, value = value.split('=', 1)
if name.startswith('py:'):
name = name[:3]
value = eval(value)
vars[name] = value
if template_name == '-':
template_content = sys.stdin.read()
template_name = '<stdin>'
else:
f = open(template_name, 'rb')
template_content = f.read()
f.close()
if options.use_html:
TemplateClass = HTMLTemplate
else:
TemplateClass = Template
template = TemplateClass(template_content, name=template_name)
result = template.substitute(vars)
if options.output:
f = open(options.output, 'wb')
f.write(result)
f.close()
else:
sys.stdout.write(result)
if __name__ == '__main__':
fill_command()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
terminfo/terminfo.go
|
// Copyright 2019 The TCell Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use file except in compliance with the License.
// You may obtain a copy of the license at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package terminfo
import (
"bytes"
"errors"
"fmt"
"io"
"os"
"strconv"
"strings"
"sync"
"time"
)
var (
// ErrTermNotFound indicates that a suitable terminal entry could
// not be found. This can result from either not having TERM set,
// or from the TERM failing to support certain minimal functionality,
// in particular absolute cursor addressability (the cup capability)
// is required. For example, legacy "adm3" lacks this capability,
// whereas the slightly newer "adm3a" supports it. This failure
// occurs most often with "dumb".
ErrTermNotFound = errors.New("terminal entry not found")
)
// Terminfo represents a terminfo entry. Note that we use friendly names
// in Go, but when we write out JSON, we use the same names as terminfo.
// The name, aliases and smous, rmous fields do not come from terminfo directly.
type Terminfo struct {
Name string
Aliases []string
Columns int // cols
Lines int // lines
Colors int // colors
Bell string // bell
Clear string // clear
EnterCA string // smcup
ExitCA string // rmcup
ShowCursor string // cnorm
HideCursor string // civis
AttrOff string // sgr0
Underline string // smul
Bold string // bold
Blink string // blink
Reverse string // rev
Dim string // dim
EnterKeypad string // smkx
ExitKeypad string // rmkx
SetFg string // setaf
SetBg string // setab
SetCursor string // cup
CursorBack1 string // cub1
CursorUp1 string // cuu1
PadChar string // pad
KeyBackspace string // kbs
KeyF1 string // kf1
KeyF2 string // kf2
KeyF3 string // kf3
KeyF4 string // kf4
KeyF5 string // kf5
KeyF6 string // kf6
KeyF7 string // kf7
KeyF8 string // kf8
KeyF9 string // kf9
KeyF10 string // kf10
KeyF11 string // kf11
KeyF12 string // kf12
KeyF13 string // kf13
KeyF14 string // kf14
KeyF15 string // kf15
KeyF16 string // kf16
KeyF17 string // kf17
KeyF18 string // kf18
KeyF19 string // kf19
KeyF20 string // kf20
KeyF21 string // kf21
KeyF22 string // kf22
KeyF23 string // kf23
KeyF24 string // kf24
KeyF25 string // kf25
KeyF26 string // kf26
KeyF27 string // kf27
KeyF28 string // kf28
KeyF29 string // kf29
KeyF30 string // kf30
KeyF31 string // kf31
KeyF32 string // kf32
KeyF33 string // kf33
KeyF34 string // kf34
KeyF35 string // kf35
KeyF36 string // kf36
KeyF37 string // kf37
KeyF38 string // kf38
KeyF39 string // kf39
KeyF40 string // kf40
KeyF41 string // kf41
KeyF42 string // kf42
KeyF43 string // kf43
KeyF44 string // kf44
KeyF45 string // kf45
KeyF46 string // kf46
KeyF47 string // kf47
KeyF48 string // kf48
KeyF49 string // kf49
KeyF50 string // kf50
KeyF51 string // kf51
KeyF52 string // kf52
KeyF53 string // kf53
KeyF54 string // kf54
KeyF55 string // kf55
KeyF56 string // kf56
KeyF57 string // kf57
KeyF58 string // kf58
KeyF59 string // kf59
KeyF60 string // kf60
KeyF61 string // kf61
KeyF62 string // kf62
KeyF63 string // kf63
KeyF64 string // kf64
KeyInsert string // kich1
KeyDelete string // kdch1
KeyHome string // khome
KeyEnd string // kend
KeyHelp string // khlp
KeyPgUp string // kpp
KeyPgDn string // knp
KeyUp string // kcuu1
KeyDown string // kcud1
KeyLeft string // kcub1
KeyRight string // kcuf1
KeyBacktab string // kcbt
KeyExit string // kext
KeyClear string // kclr
KeyPrint string // kprt
KeyCancel string // kcan
Mouse string // kmous
MouseMode string // XM
AltChars string // acsc
EnterAcs string // smacs
ExitAcs string // rmacs
EnableAcs string // enacs
KeyShfRight string // kRIT
KeyShfLeft string // kLFT
KeyShfHome string // kHOM
KeyShfEnd string // kEND
// These are non-standard extensions to terminfo. This includes
// true color support, and some additional keys. Its kind of bizarre
// that shifted variants of left and right exist, but not up and down.
// Terminal support for these are going to vary amongst XTerm
// emulations, so don't depend too much on them in your application.
SetFgBg string // setfgbg
SetFgBgRGB string // setfgbgrgb
SetFgRGB string // setfrgb
SetBgRGB string // setbrgb
KeyShfUp string // shift-up
KeyShfDown string // shift-down
KeyCtrlUp string // ctrl-up
KeyCtrlDown string // ctrl-left
KeyCtrlRight string // ctrl-right
KeyCtrlLeft string // ctrl-left
KeyMetaUp string // meta-up
KeyMetaDown string // meta-left
KeyMetaRight string // meta-right
KeyMetaLeft string // meta-left
KeyAltUp string // alt-up
KeyAltDown string // alt-left
KeyAltRight string // alt-right
KeyAltLeft string // alt-left
KeyCtrlHome string
KeyCtrlEnd string
KeyMetaHome string
KeyMetaEnd string
KeyAltHome string
KeyAltEnd string
KeyAltShfUp string
KeyAltShfDown string
KeyAltShfLeft string
KeyAltShfRight string
KeyMetaShfUp string
KeyMetaShfDown string
KeyMetaShfLeft string
KeyMetaShfRight string
KeyCtrlShfUp string
KeyCtrlShfDown string
KeyCtrlShfLeft string
KeyCtrlShfRight string
KeyCtrlShfHome string
KeyCtrlShfEnd string
KeyAltShfHome string
KeyAltShfEnd string
KeyMetaShfHome string
KeyMetaShfEnd string
KeyPasteBegin string
KeyPasteEnd string
}
type stackElem struct {
s string
i int
isStr bool
isInt bool
}
type stack []stackElem
func (st stack) Push(v string) stack {
e := stackElem{
s: v,
isStr: true,
}
return append(st, e)
}
func (st stack) Pop() (string, stack) {
v := ""
if len(st) > 0 {
e := st[len(st)-1]
st = st[:len(st)-1]
if e.isStr {
v = e.s
} else {
v = strconv.Itoa(e.i)
}
}
return v, st
}
func (st stack) PopInt() (int, stack) {
if len(st) > 0 {
e := st[len(st)-1]
st = st[:len(st)-1]
if e.isInt {
return e.i, st
} else if e.isStr {
i, _ := strconv.Atoi(e.s)
return i, st
}
}
return 0, st
}
func (st stack) PopBool() (bool, stack) {
if len(st) > 0 {
e := st[len(st)-1]
st = st[:len(st)-1]
if e.isStr {
if e.s == "1" {
return true, st
}
return false, st
} else if e.i == 1 {
return true, st
} else {
return false, st
}
}
return false, st
}
func (st stack) PushInt(i int) stack {
e := stackElem{
i: i,
isInt: true,
}
return append(st, e)
}
func (st stack) PushBool(i bool) stack {
if i {
return st.PushInt(1)
}
return st.PushInt(0)
}
func nextch(s string, index int) (byte, int) {
if index < len(s) {
return s[index], index + 1
}
return 0, index
}
// static vars
var svars [26]string
// paramsBuffer handles some persistent state for TParam. Technically we
// could probably dispense with this, but caching buffer arrays gives us
// a nice little performance boost. Furthermore, we know that TParam is
// rarely (never?) called re-entrantly, so we can just reuse the same
// buffers, making it thread-safe by stashing a lock.
type paramsBuffer struct {
out bytes.Buffer
buf bytes.Buffer
lk sync.Mutex
}
// Start initializes the params buffer with the initial string data.
// It also locks the paramsBuffer. The caller must call End() when
// finished.
func (pb *paramsBuffer) Start(s string) {
pb.lk.Lock()
pb.out.Reset()
pb.buf.Reset()
pb.buf.WriteString(s)
}
// End returns the final output from TParam, but it also releases the lock.
func (pb *paramsBuffer) End() string {
s := pb.out.String()
pb.lk.Unlock()
return s
}
// NextCh returns the next input character to the expander.
func (pb *paramsBuffer) NextCh() (byte, error) {
return pb.buf.ReadByte()
}
// PutCh "emits" (rather schedules for output) a single byte character.
func (pb *paramsBuffer) PutCh(ch byte) {
pb.out.WriteByte(ch)
}
// PutString schedules a string for output.
func (pb *paramsBuffer) PutString(s string) {
pb.out.WriteString(s)
}
var pb = ¶msBuffer{}
// TParm takes a terminfo parameterized string, such as setaf or cup, and
// evaluates the string, and returns the result with the parameter
// applied.
func (t *Terminfo) TParm(s string, p ...int) string {
var stk stack
var a, b string
var ai, bi int
var ab bool
var dvars [26]string
var params [9]int
pb.Start(s)
// make sure we always have 9 parameters -- makes it easier
// later to skip checks
for i := 0; i < len(params) && i < len(p); i++ {
params[i] = p[i]
}
nest := 0
for {
ch, err := pb.NextCh()
if err != nil {
break
}
if ch != '%' {
pb.PutCh(ch)
continue
}
ch, err = pb.NextCh()
if err != nil {
// XXX Error
break
}
switch ch {
case '%': // quoted %
pb.PutCh(ch)
case 'i': // increment both parameters (ANSI cup support)
params[0]++
params[1]++
case 'c', 's':
// NB: these, and 'd' below are special cased for
// efficiency. They could be handled by the richer
// format support below, less efficiently.
a, stk = stk.Pop()
pb.PutString(a)
case 'd':
ai, stk = stk.PopInt()
pb.PutString(strconv.Itoa(ai))
case '0', '1', '2', '3', '4', 'x', 'X', 'o', ':':
// This is pretty suboptimal, but this is rarely used.
// None of the mainstream terminals use any of this,
// and it would surprise me if this code is ever
// executed outside of test cases.
f := "%"
if ch == ':' {
ch, _ = pb.NextCh()
}
f += string(ch)
for ch == '+' || ch == '-' || ch == '#' || ch == ' ' {
ch, _ = pb.NextCh()
f += string(ch)
}
for (ch >= '0' && ch <= '9') || ch == '.' {
ch, _ = pb.NextCh()
f += string(ch)
}
switch ch {
case 'd', 'x', 'X', 'o':
ai, stk = stk.PopInt()
pb.PutString(fmt.Sprintf(f, ai))
case 'c', 's':
a, stk = stk.Pop()
pb.PutString(fmt.Sprintf(f, a))
}
case 'p': // push parameter
ch, _ = pb.NextCh()
ai = int(ch - '1')
if ai >= 0 && ai < len(params) {
stk = stk.PushInt(params[ai])
} else {
stk = stk.PushInt(0)
}
case 'P': // pop & store variable
ch, _ = pb.NextCh()
if ch >= 'A' && ch <= 'Z' {
svars[int(ch-'A')], stk = stk.Pop()
} else if ch >= 'a' && ch <= 'z' {
dvars[int(ch-'a')], stk = stk.Pop()
}
case 'g': // recall & push variable
ch, _ = pb.NextCh()
if ch >= 'A' && ch <= 'Z' {
stk = stk.Push(svars[int(ch-'A')])
} else if ch >= 'a' && ch <= 'z' {
stk = stk.Push(dvars[int(ch-'a')])
}
case '\'': // push(char)
ch, _ = pb.NextCh()
pb.NextCh() // must be ' but we don't check
stk = stk.Push(string(ch))
case '{': // push(int)
ai = 0
ch, _ = pb.NextCh()
for ch >= '0' && ch <= '9' {
ai *= 10
ai += int(ch - '0')
ch, _ = pb.NextCh()
}
// ch must be '}' but no verification
stk = stk.PushInt(ai)
case 'l': // push(strlen(pop))
a, stk = stk.Pop()
stk = stk.PushInt(len(a))
case '+':
bi, stk = stk.PopInt()
ai, stk = stk.PopInt()
stk = stk.PushInt(ai + bi)
case '-':
bi, stk = stk.PopInt()
ai, stk = stk.PopInt()
stk = stk.PushInt(ai - bi)
case '*':
bi, stk = stk.PopInt()
ai, stk = stk.PopInt()
stk = stk.PushInt(ai * bi)
case '/':
bi, stk = stk.PopInt()
ai, stk = stk.PopInt()
if bi != 0 {
stk = stk.PushInt(ai / bi)
} else {
stk = stk.PushInt(0)
}
case 'm': // push(pop mod pop)
bi, stk = stk.PopInt()
ai, stk = stk.PopInt()
if bi != 0 {
stk = stk.PushInt(ai % bi)
} else {
stk = stk.PushInt(0)
}
case '&': // AND
bi, stk = stk.PopInt()
ai, stk = stk.PopInt()
stk = stk.PushInt(ai & bi)
case '|': // OR
bi, stk = stk.PopInt()
ai, stk = stk.PopInt()
stk = stk.PushInt(ai | bi)
case '^': // XOR
bi, stk = stk.PopInt()
ai, stk = stk.PopInt()
stk = stk.PushInt(ai ^ bi)
case '~': // bit complement
ai, stk = stk.PopInt()
stk = stk.PushInt(ai ^ -1)
case '!': // logical NOT
ai, stk = stk.PopInt()
stk = stk.PushBool(ai != 0)
case '=': // numeric compare or string compare
b, stk = stk.Pop()
a, stk = stk.Pop()
stk = stk.PushBool(a == b)
case '>': // greater than, numeric
bi, stk = stk.PopInt()
ai, stk = stk.PopInt()
stk = stk.PushBool(ai > bi)
case '<': // less than, numeric
bi, stk = stk.PopInt()
ai, stk = stk.PopInt()
stk = stk.PushBool(ai < bi)
case '?': // start conditional
case 't':
ab, stk = stk.PopBool()
if ab {
// just keep going
break
}
nest = 0
ifloop:
// this loop consumes everything until we hit our else,
// or the end of the conditional
for {
ch, err = pb.NextCh()
if err != nil {
break
}
if ch != '%' {
continue
}
ch, _ = pb.NextCh()
switch ch {
case ';':
if nest == 0 {
break ifloop
}
nest--
case '?':
nest++
case 'e':
if nest == 0 {
break ifloop
}
}
}
case 'e':
// if we got here, it means we didn't use the else
// in the 't' case above, and we should skip until
// the end of the conditional
nest = 0
elloop:
for {
ch, err = pb.NextCh()
if err != nil {
break
}
if ch != '%' {
continue
}
ch, _ = pb.NextCh()
switch ch {
case ';':
if nest == 0 {
break elloop
}
nest--
case '?':
nest++
}
}
case ';': // endif
}
}
return pb.End()
}
// TPuts emits the string to the writer, but expands inline padding
// indications (of the form $<[delay]> where [delay] is msec) to
// a suitable time (unless the terminfo string indicates this isn't needed
// by specifying npc - no padding). All Terminfo based strings should be
// emitted using this function.
func (t *Terminfo) TPuts(w io.Writer, s string) {
for {
beg := strings.Index(s, "$<")
if beg < 0 {
// Most strings don't need padding, which is good news!
io.WriteString(w, s)
return
}
io.WriteString(w, s[:beg])
s = s[beg+2:]
end := strings.Index(s, ">")
if end < 0 {
// unterminated.. just emit bytes unadulterated
io.WriteString(w, "$<"+s)
return
}
val := s[:end]
s = s[end+1:]
padus := 0
unit := time.Millisecond
dot := false
loop:
for i := range val {
switch val[i] {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
padus *= 10
padus += int(val[i] - '0')
if dot {
unit /= 10
}
case '.':
if !dot {
dot = true
} else {
break loop
}
default:
break loop
}
}
// Curses historically uses padding to achieve "fine grained"
// delays. We have much better clocks these days, and so we
// do not rely on padding but simply sleep a bit.
if len(t.PadChar) > 0 {
time.Sleep(unit * time.Duration(padus))
}
}
}
// TGoto returns a string suitable for addressing the cursor at the given
// row and column. The origin 0, 0 is in the upper left corner of the screen.
func (t *Terminfo) TGoto(col, row int) string {
return t.TParm(t.SetCursor, row, col)
}
// TColor returns a string corresponding to the given foreground and background
// colors. Either fg or bg can be set to -1 to elide.
func (t *Terminfo) TColor(fi, bi int) string {
rv := ""
// As a special case, we map bright colors to lower versions if the
// color table only holds 8. For the remaining 240 colors, the user
// is out of luck. Someday we could create a mapping table, but its
// not worth it.
if t.Colors == 8 {
if fi > 7 && fi < 16 {
fi -= 8
}
if bi > 7 && bi < 16 {
bi -= 8
}
}
if t.Colors > fi && fi >= 0 {
rv += t.TParm(t.SetFg, fi)
}
if t.Colors > bi && bi >= 0 {
rv += t.TParm(t.SetBg, bi)
}
return rv
}
var (
dblock sync.Mutex
terminfos = make(map[string]*Terminfo)
aliases = make(map[string]string)
)
// AddTerminfo can be called to register a new Terminfo entry.
func AddTerminfo(t *Terminfo) {
dblock.Lock()
terminfos[t.Name] = t
for _, x := range t.Aliases {
terminfos[x] = t
}
dblock.Unlock()
}
// LookupTerminfo attempts to find a definition for the named $TERM.
func LookupTerminfo(name string) (*Terminfo, error) {
if name == "" {
// else on windows: index out of bounds
// on the name[0] reference below
return nil, ErrTermNotFound
}
addtruecolor := false
switch os.Getenv("COLORTERM") {
case "truecolor", "24bit", "24-bit":
addtruecolor = true
}
dblock.Lock()
t := terminfos[name]
dblock.Unlock()
// If the name ends in -truecolor, then fabricate an entry
// from the corresponding -256color, -color, or bare terminal.
if t == nil && strings.HasSuffix(name, "-truecolor") {
suffixes := []string{
"-256color",
"-88color",
"-color",
"",
}
base := name[:len(name)-len("-truecolor")]
for _, s := range suffixes {
if t, _ = LookupTerminfo(base + s); t != nil {
addtruecolor = true
break
}
}
}
if t == nil {
return nil, ErrTermNotFound
}
switch os.Getenv("TCELL_TRUECOLOR") {
case "":
case "disable":
addtruecolor = false
default:
addtruecolor = true
}
// If the user has requested 24-bit color with $COLORTERM, then
// amend the value (unless already present). This means we don't
// need to have a value present.
if addtruecolor &&
t.SetFgBgRGB == "" &&
t.SetFgRGB == "" &&
t.SetBgRGB == "" {
// Supply vanilla ISO 8613-6:1994 24-bit color sequences.
t.SetFgRGB = "\x1b[38;2;%p1%d;%p2%d;%p3%dm"
t.SetBgRGB = "\x1b[48;2;%p1%d;%p2%d;%p3%dm"
t.SetFgBgRGB = "\x1b[38;2;%p1%d;%p2%d;%p3%d;" +
"48;2;%p4%d;%p5%d;%p6%dm"
}
return t, nil
}
|
[
"\"COLORTERM\"",
"\"TCELL_TRUECOLOR\""
] |
[] |
[
"TCELL_TRUECOLOR",
"COLORTERM"
] |
[]
|
["TCELL_TRUECOLOR", "COLORTERM"]
|
go
| 2 | 0 | |
imagenet.py
|
'''
Training script for ImageNet
Copyright (c) Wei YANG, 2017
'''
from __future__ import print_function
import argparse
import os
import shutil
import time
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data as data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
# import torchvision.models as models
import models.imagenet as models
from utils import Bar, Logger, AverageMeter, accuracy, mkdir_p, savefig
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
print('model names: \t', model_names)
# Parse arguments
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
# Datasets
parser.add_argument('-d', '--data', default='path to dataset', type=str)
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
# Optimization options
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('--train-batch', default=256, type=int, metavar='N',
help='train batchsize (default: 256)')
parser.add_argument('--test-batch', default=256, type=int, metavar='N',
help='test batchsize (default: 200)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--drop', '--dropout', default=0, type=float,
metavar='Dropout', help='Dropout ratio')
parser.add_argument('--schedule', type=int, nargs='+', default=[150, 225],
help='Decrease learning rate at these epochs.')
parser.add_argument('--gamma', type=float, default=0.1, help='LR is multiplied by gamma on schedule.')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
# Checkpoints
parser.add_argument('-c', '--checkpoint', default='checkpoint', type=str, metavar='PATH',
help='path to save checkpoint (default: checkpoint)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
# Architecture
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('--depth', type=int, default=29, help='Model depth.')
parser.add_argument('--cardinality', type=int, default=32, help='ResNet cardinality (group).')
parser.add_argument('--base-width', type=int, default=4, help='ResNet base width.')
parser.add_argument('--widen-factor', type=int, default=4, help='Widen factor. 4 -> 64, 8 -> 128, ...')
# Miscs
parser.add_argument('--manualSeed', type=int, help='manual seed')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
#Device options
parser.add_argument('--gpu-id', default='0,1,2,3', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
args = parser.parse_args()
state = {k: v for k, v in args._get_kwargs()}
# Use CUDA
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
use_cuda = torch.cuda.is_available()
# Random seed
if args.manualSeed is None:
args.manualSeed = random.randint(1, 10000)
random.seed(args.manualSeed)
torch.manual_seed(args.manualSeed)
if use_cuda:
torch.cuda.manual_seed_all(args.manualSeed)
best_acc = 0 # best test accuracy
def main():
global best_acc
start_epoch = args.start_epoch # start from epoch 0 or last checkpoint epoch
if not os.path.isdir(args.checkpoint):
mkdir_p(args.checkpoint)
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
# transforms.Resize(128),
# transforms.RandomSizedCrop(64), # tiny imagenet 64x64
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])),
batch_size=args.train_batch, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
# transforms.Resize(128),
# transforms.CenterCrop(64),
transforms.ToTensor(),
normalize,
])),
batch_size=args.test_batch, shuffle=False,
num_workers=args.workers, pin_memory=True)
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
elif args.arch.startswith('resnext'):
model = models.__dict__[args.arch](
baseWidth=args.base_width,
cardinality=args.cardinality,
)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch](num_classes=200) # tiny-imagenet 200 classes
# if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
# model.features = torch.nn.DataParallel(model.features)
# model.cuda()
# else:
# model = torch.nn.DataParallel(model).cuda()
model = torch.nn.DataParallel(model).cuda()
cudnn.benchmark = True
for name, parameter in model.named_parameters():
if parameter.requires_grad:
print(name, parameter.shape)
print('Total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0))
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
# Resume
title = 'ImageNet-' + args.arch
if args.resume:
# Load checkpoint.
print('==> Resuming from checkpoint..')
assert os.path.isfile(args.resume), 'Error: no checkpoint directory found!'
args.checkpoint = os.path.dirname(args.resume)
checkpoint = torch.load(args.resume)
best_acc = checkpoint['best_acc']
start_epoch = checkpoint['epoch']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title, resume=True)
else:
logger = Logger(os.path.join(args.checkpoint, 'log.txt'), title=title)
logger.set_names(['Learning Rate', 'Train Loss', 'Valid Loss', 'Train Acc.', 'Valid Acc.'])
if args.evaluate:
print('\nEvaluation only')
test_loss, test_acc = test(val_loader, model, criterion, start_epoch, use_cuda)
print(' Test Loss: %.8f, Test Acc: %.2f' % (test_loss, test_acc))
return
# Train and val
for epoch in range(start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch)
print('\nEpoch: [%d | %d] LR: %f' % (epoch + 1, args.epochs, state['lr']))
train_loss, train_acc = train(train_loader, model, criterion, optimizer, epoch, use_cuda)
test_loss, test_acc = test(val_loader, model, criterion, epoch, use_cuda)
# append logger file
logger.append([state['lr'], train_loss, test_loss, train_acc, test_acc])
# save model
is_best = test_acc > best_acc
best_acc = max(test_acc, best_acc)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'acc': test_acc,
'best_acc': best_acc,
'optimizer' : optimizer.state_dict(),
}, is_best, checkpoint=args.checkpoint)
logger.close()
logger.plot()
savefig(os.path.join(args.checkpoint, 'log.eps'))
print('Best acc:')
print(best_acc)
def train(train_loader, model, criterion, optimizer, epoch, use_cuda):
# switch to train mode
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
end = time.time()
bar = Bar('Processing', max=len(train_loader))
for batch_idx, (inputs, targets) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
# print ('input {} \t output {}'. format(inputs.size(), targets.size()))
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda(async=True)
# inputs, targets = torch.autograd.Variable(inputs), torch.autograd.Variable(targets)
# compute output
outputs = model(inputs)
loss = criterion(outputs, targets)
# measure accuracy and record loss
prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update(prec1[0], inputs.size(0))
top5.update(prec5[0], inputs.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# plot progress
bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(
batch=batch_idx + 1,
size=len(train_loader),
data=data_time.val,
bt=batch_time.val,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
top1=top1.avg,
top5=top5.avg,
)
bar.next()
bar.finish()
return (losses.avg, top1.avg)
def test(val_loader, model, criterion, epoch, use_cuda):
global best_acc
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
bar = Bar('Processing', max=len(val_loader))
for batch_idx, (inputs, targets) in enumerate(val_loader):
# measure data loading time
data_time.update(time.time() - end)
# print ('input {} \t output {}'. format(inputs.size(), targets.size()))
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
# inputs, targets = torch.autograd.Variable(inputs, volatile=True), torch.autograd.Variable(targets)
# compute output
outputs = model(inputs)
loss = criterion(outputs, targets)
# measure accuracy and record loss
prec1, prec5 = accuracy(outputs.data, targets.data, topk=(1, 5))
losses.update(loss.item(), inputs.size(0))
top1.update(prec1[0], inputs.size(0))
top5.update(prec5[0], inputs.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# plot progress
bar.suffix = '({batch}/{size}) Data: {data:.3f}s | Batch: {bt:.3f}s | Total: {total:} | ETA: {eta:} | Loss: {loss:.4f} | top1: {top1: .4f} | top5: {top5: .4f}'.format(
batch=batch_idx + 1,
size=len(val_loader),
data=data_time.avg,
bt=batch_time.avg,
total=bar.elapsed_td,
eta=bar.eta_td,
loss=losses.avg,
top1=top1.avg,
top5=top5.avg,
)
bar.next()
bar.finish()
print('losses.avg {} \t top1.avg {}'.format(losses.avg, top1.avg) )
return (losses.avg, top1.avg)
def save_checkpoint(state, is_best, checkpoint='checkpoint', filename='checkpoint.pth.tar'):
filepath = os.path.join(checkpoint, filename)
torch.save(state, filepath)
if is_best:
shutil.copyfile(filepath, os.path.join(checkpoint, 'model_best.pth.tar'))
def adjust_learning_rate(optimizer, epoch):
global state
if epoch in args.schedule:
state['lr'] *= args.gamma
for param_group in optimizer.param_groups:
param_group['lr'] = state['lr']
if __name__ == '__main__':
main()
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
api/client/commands.go
|
package client
import (
"bufio"
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"os/exec"
"path"
"path/filepath"
"runtime"
"strconv"
"strings"
"syscall"
"text/tabwriter"
"text/template"
"time"
"github.com/docker/docker/api"
"github.com/docker/docker/dockerversion"
"github.com/docker/docker/engine"
"github.com/docker/docker/graph"
"github.com/docker/docker/nat"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/log"
flag "github.com/docker/docker/pkg/mflag"
"github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/pkg/parsers/filters"
"github.com/docker/docker/pkg/promise"
"github.com/docker/docker/pkg/signal"
"github.com/docker/docker/pkg/term"
"github.com/docker/docker/pkg/timeutils"
"github.com/docker/docker/pkg/units"
"github.com/docker/docker/registry"
"github.com/docker/docker/runconfig"
"github.com/docker/docker/utils"
)
const (
tarHeaderSize = 512
)
func (cli *DockerCli) CmdHelp(args ...string) error {
if len(args) > 1 {
method, exists := cli.getMethod(args[:2]...)
if exists {
method("--help")
return nil
}
}
if len(args) > 0 {
method, exists := cli.getMethod(args[0])
if !exists {
fmt.Fprintf(cli.err, "Error: Command not found: %s\n", args[0])
} else {
method("--help")
return nil
}
}
flag.Usage()
return nil
}
func (cli *DockerCli) CmdBuild(args ...string) error {
cmd := cli.Subcmd("build", "PATH | URL | -", "Build a new image from the source code at PATH")
tag := cmd.String([]string{"t", "-tag"}, "", "Repository name (and optionally a tag) to be applied to the resulting image in case of success")
suppressOutput := cmd.Bool([]string{"q", "-quiet"}, false, "Suppress the verbose output generated by the containers")
noCache := cmd.Bool([]string{"#no-cache", "-no-cache"}, false, "Do not use cache when building the image")
rm := cmd.Bool([]string{"#rm", "-rm"}, true, "Remove intermediate containers after a successful build")
forceRm := cmd.Bool([]string{"-force-rm"}, false, "Always remove intermediate containers, even after unsuccessful builds")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
var (
context archive.Archive
isRemote bool
err error
)
_, err = exec.LookPath("git")
hasGit := err == nil
if cmd.Arg(0) == "-" {
// As a special case, 'docker build -' will build from either an empty context with the
// contents of stdin as a Dockerfile, or a tar-ed context from stdin.
buf := bufio.NewReader(cli.in)
magic, err := buf.Peek(tarHeaderSize)
if err != nil && err != io.EOF {
return fmt.Errorf("failed to peek context header from STDIN: %v", err)
}
if !archive.IsArchive(magic) {
dockerfile, err := ioutil.ReadAll(buf)
if err != nil {
return fmt.Errorf("failed to read Dockerfile from STDIN: %v", err)
}
context, err = archive.Generate("Dockerfile", string(dockerfile))
} else {
context = ioutil.NopCloser(buf)
}
} else if utils.IsURL(cmd.Arg(0)) && (!utils.IsGIT(cmd.Arg(0)) || !hasGit) {
isRemote = true
} else {
root := cmd.Arg(0)
if utils.IsGIT(root) {
remoteURL := cmd.Arg(0)
if !strings.HasPrefix(remoteURL, "git://") && !strings.HasPrefix(remoteURL, "git@") && !utils.IsURL(remoteURL) {
remoteURL = "https://" + remoteURL
}
root, err = ioutil.TempDir("", "docker-build-git")
if err != nil {
return err
}
defer os.RemoveAll(root)
if output, err := exec.Command("git", "clone", "--recursive", remoteURL, root).CombinedOutput(); err != nil {
return fmt.Errorf("Error trying to use git: %s (%s)", err, output)
}
}
if _, err := os.Stat(root); err != nil {
return err
}
filename := path.Join(root, "Dockerfile")
if _, err = os.Stat(filename); os.IsNotExist(err) {
return fmt.Errorf("no Dockerfile found in %s", cmd.Arg(0))
}
var excludes []string
ignore, err := ioutil.ReadFile(path.Join(root, ".dockerignore"))
if err != nil && !os.IsNotExist(err) {
return fmt.Errorf("Error reading .dockerignore: '%s'", err)
}
for _, pattern := range strings.Split(string(ignore), "\n") {
ok, err := filepath.Match(pattern, "Dockerfile")
if err != nil {
return fmt.Errorf("Bad .dockerignore pattern: '%s', error: %s", pattern, err)
}
if ok {
return fmt.Errorf("Dockerfile was excluded by .dockerignore pattern '%s'", pattern)
}
excludes = append(excludes, pattern)
}
if err = utils.ValidateContextDirectory(root, excludes); err != nil {
return fmt.Errorf("Error checking context is accessible: '%s'. Please check permissions and try again.", err)
}
options := &archive.TarOptions{
Compression: archive.Uncompressed,
Excludes: excludes,
}
context, err = archive.TarWithOptions(root, options)
if err != nil {
return err
}
}
var body io.Reader
// Setup an upload progress bar
// FIXME: ProgressReader shouldn't be this annoying to use
if context != nil {
sf := utils.NewStreamFormatter(false)
body = utils.ProgressReader(context, 0, cli.err, sf, true, "", "Sending build context to Docker daemon")
}
// Send the build context
v := &url.Values{}
//Check if the given image name can be resolved
if *tag != "" {
repository, tag := parsers.ParseRepositoryTag(*tag)
if _, _, err := registry.ResolveRepositoryName(repository); err != nil {
return err
}
if len(tag) > 0 {
if err := graph.ValidateTagName(tag); err != nil {
return err
}
}
}
v.Set("t", *tag)
if *suppressOutput {
v.Set("q", "1")
}
if isRemote {
v.Set("remote", cmd.Arg(0))
}
if *noCache {
v.Set("nocache", "1")
}
if *rm {
v.Set("rm", "1")
} else {
v.Set("rm", "0")
}
if *forceRm {
v.Set("forcerm", "1")
}
cli.LoadConfigFile()
headers := http.Header(make(map[string][]string))
buf, err := json.Marshal(cli.configFile)
if err != nil {
return err
}
headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf))
if context != nil {
headers.Set("Content-Type", "application/tar")
}
err = cli.stream("POST", fmt.Sprintf("/build?%s", v.Encode()), body, cli.out, headers)
if jerr, ok := err.(*utils.JSONError); ok {
// If no error code is set, default to 1
if jerr.Code == 0 {
jerr.Code = 1
}
return &utils.StatusError{Status: jerr.Message, StatusCode: jerr.Code}
}
return err
}
// 'docker login': login / register a user to registry service.
func (cli *DockerCli) CmdLogin(args ...string) error {
cmd := cli.Subcmd("login", "[SERVER]", "Register or log in to a Docker registry server, if no server is specified \""+registry.IndexServerAddress()+"\" is the default.")
var username, password, email string
cmd.StringVar(&username, []string{"u", "-username"}, "", "Username")
cmd.StringVar(&password, []string{"p", "-password"}, "", "Password")
cmd.StringVar(&email, []string{"e", "-email"}, "", "Email")
err := cmd.Parse(args)
if err != nil {
return nil
}
serverAddress := registry.IndexServerAddress()
if len(cmd.Args()) > 0 {
serverAddress = cmd.Arg(0)
}
promptDefault := func(prompt string, configDefault string) {
if configDefault == "" {
fmt.Fprintf(cli.out, "%s: ", prompt)
} else {
fmt.Fprintf(cli.out, "%s (%s): ", prompt, configDefault)
}
}
readInput := func(in io.Reader, out io.Writer) string {
reader := bufio.NewReader(in)
line, _, err := reader.ReadLine()
if err != nil {
fmt.Fprintln(out, err.Error())
os.Exit(1)
}
return string(line)
}
cli.LoadConfigFile()
authconfig, ok := cli.configFile.Configs[serverAddress]
if !ok {
authconfig = registry.AuthConfig{}
}
if username == "" {
promptDefault("Username", authconfig.Username)
username = readInput(cli.in, cli.out)
if username == "" {
username = authconfig.Username
}
}
// Assume that a different username means they may not want to use
// the password or email from the config file, so prompt them
if username != authconfig.Username {
if password == "" {
oldState, _ := term.SaveState(cli.inFd)
fmt.Fprintf(cli.out, "Password: ")
term.DisableEcho(cli.inFd, oldState)
password = readInput(cli.in, cli.out)
fmt.Fprint(cli.out, "\n")
term.RestoreTerminal(cli.inFd, oldState)
if password == "" {
return fmt.Errorf("Error : Password Required")
}
}
if email == "" {
promptDefault("Email", authconfig.Email)
email = readInput(cli.in, cli.out)
if email == "" {
email = authconfig.Email
}
}
} else {
// However, if they don't override the username use the
// password or email from the cmd line if specified. IOW, allow
// then to change/overide them. And if not specified, just
// use what's in the config file
if password == "" {
password = authconfig.Password
}
if email == "" {
email = authconfig.Email
}
}
authconfig.Username = username
authconfig.Password = password
authconfig.Email = email
authconfig.ServerAddress = serverAddress
cli.configFile.Configs[serverAddress] = authconfig
stream, statusCode, err := cli.call("POST", "/auth", cli.configFile.Configs[serverAddress], false)
if statusCode == 401 {
delete(cli.configFile.Configs, serverAddress)
registry.SaveConfig(cli.configFile)
return err
}
if err != nil {
return err
}
var out2 engine.Env
err = out2.Decode(stream)
if err != nil {
cli.configFile, _ = registry.LoadConfig(os.Getenv("HOME"))
return err
}
registry.SaveConfig(cli.configFile)
if out2.Get("Status") != "" {
fmt.Fprintf(cli.out, "%s\n", out2.Get("Status"))
}
return nil
}
// log out from a Docker registry
func (cli *DockerCli) CmdLogout(args ...string) error {
cmd := cli.Subcmd("logout", "[SERVER]", "Log out from a Docker registry, if no server is specified \""+registry.IndexServerAddress()+"\" is the default.")
if err := cmd.Parse(args); err != nil {
return nil
}
serverAddress := registry.IndexServerAddress()
if len(cmd.Args()) > 0 {
serverAddress = cmd.Arg(0)
}
cli.LoadConfigFile()
if _, ok := cli.configFile.Configs[serverAddress]; !ok {
fmt.Fprintf(cli.out, "Not logged in to %s\n", serverAddress)
} else {
fmt.Fprintf(cli.out, "Remove login credentials for %s\n", serverAddress)
delete(cli.configFile.Configs, serverAddress)
if err := registry.SaveConfig(cli.configFile); err != nil {
return fmt.Errorf("Failed to save docker config: %v", err)
}
}
return nil
}
// 'docker wait': block until a container stops
func (cli *DockerCli) CmdWait(args ...string) error {
cmd := cli.Subcmd("wait", "CONTAINER [CONTAINER...]", "Block until a container stops, then print its exit code.")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
var encounteredError error
for _, name := range cmd.Args() {
status, err := waitForExit(cli, name)
if err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to wait one or more containers")
} else {
fmt.Fprintf(cli.out, "%d\n", status)
}
}
return encounteredError
}
// 'docker version': show version information
func (cli *DockerCli) CmdVersion(args ...string) error {
cmd := cli.Subcmd("version", "", "Show the Docker version information.")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() > 0 {
cmd.Usage()
return nil
}
if dockerversion.VERSION != "" {
fmt.Fprintf(cli.out, "Client version: %s\n", dockerversion.VERSION)
}
fmt.Fprintf(cli.out, "Client API version: %s\n", api.APIVERSION)
fmt.Fprintf(cli.out, "Go version (client): %s\n", runtime.Version())
if dockerversion.GITCOMMIT != "" {
fmt.Fprintf(cli.out, "Git commit (client): %s\n", dockerversion.GITCOMMIT)
}
fmt.Fprintf(cli.out, "OS/Arch (client): %s/%s\n", runtime.GOOS, runtime.GOARCH)
body, _, err := readBody(cli.call("GET", "/version", nil, false))
if err != nil {
return err
}
out := engine.NewOutput()
remoteVersion, err := out.AddEnv()
if err != nil {
log.Errorf("Error reading remote version: %s", err)
return err
}
if _, err := out.Write(body); err != nil {
log.Errorf("Error reading remote version: %s", err)
return err
}
out.Close()
fmt.Fprintf(cli.out, "Server version: %s\n", remoteVersion.Get("Version"))
if apiVersion := remoteVersion.Get("ApiVersion"); apiVersion != "" {
fmt.Fprintf(cli.out, "Server API version: %s\n", apiVersion)
}
fmt.Fprintf(cli.out, "Go version (server): %s\n", remoteVersion.Get("GoVersion"))
fmt.Fprintf(cli.out, "Git commit (server): %s\n", remoteVersion.Get("GitCommit"))
return nil
}
// 'docker info': display system-wide information.
func (cli *DockerCli) CmdInfo(args ...string) error {
cmd := cli.Subcmd("info", "", "Display system-wide information")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() > 0 {
cmd.Usage()
return nil
}
body, _, err := readBody(cli.call("GET", "/info", nil, false))
if err != nil {
return err
}
out := engine.NewOutput()
remoteInfo, err := out.AddEnv()
if err != nil {
return err
}
if _, err := out.Write(body); err != nil {
log.Errorf("Error reading remote info: %s", err)
return err
}
out.Close()
fmt.Fprintf(cli.out, "Containers: %d\n", remoteInfo.GetInt("Containers"))
fmt.Fprintf(cli.out, "Images: %d\n", remoteInfo.GetInt("Images"))
fmt.Fprintf(cli.out, "Storage Driver: %s\n", remoteInfo.Get("Driver"))
var driverStatus [][2]string
if err := remoteInfo.GetJson("DriverStatus", &driverStatus); err != nil {
return err
}
for _, pair := range driverStatus {
fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1])
}
fmt.Fprintf(cli.out, "Execution Driver: %s\n", remoteInfo.Get("ExecutionDriver"))
fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion"))
fmt.Fprintf(cli.out, "Operating System: %s\n", remoteInfo.Get("OperatingSystem"))
if remoteInfo.GetBool("Debug") || os.Getenv("DEBUG") != "" {
fmt.Fprintf(cli.out, "Debug mode (server): %v\n", remoteInfo.GetBool("Debug"))
fmt.Fprintf(cli.out, "Debug mode (client): %v\n", os.Getenv("DEBUG") != "")
fmt.Fprintf(cli.out, "Fds: %d\n", remoteInfo.GetInt("NFd"))
fmt.Fprintf(cli.out, "Goroutines: %d\n", remoteInfo.GetInt("NGoroutines"))
fmt.Fprintf(cli.out, "EventsListeners: %d\n", remoteInfo.GetInt("NEventsListener"))
if initSha1 := remoteInfo.Get("InitSha1"); initSha1 != "" {
fmt.Fprintf(cli.out, "Init SHA1: %s\n", initSha1)
}
if initPath := remoteInfo.Get("InitPath"); initPath != "" {
fmt.Fprintf(cli.out, "Init Path: %s\n", initPath)
}
}
if len(remoteInfo.GetList("IndexServerAddress")) != 0 {
cli.LoadConfigFile()
u := cli.configFile.Configs[remoteInfo.Get("IndexServerAddress")].Username
if len(u) > 0 {
fmt.Fprintf(cli.out, "Username: %v\n", u)
fmt.Fprintf(cli.out, "Registry: %v\n", remoteInfo.GetList("IndexServerAddress"))
}
}
if !remoteInfo.GetBool("MemoryLimit") {
fmt.Fprintf(cli.err, "WARNING: No memory limit support\n")
}
if !remoteInfo.GetBool("SwapLimit") {
fmt.Fprintf(cli.err, "WARNING: No swap limit support\n")
}
if !remoteInfo.GetBool("IPv4Forwarding") {
fmt.Fprintf(cli.err, "WARNING: IPv4 forwarding is disabled.\n")
}
return nil
}
func (cli *DockerCli) CmdStop(args ...string) error {
cmd := cli.Subcmd("stop", "CONTAINER [CONTAINER...]", "Stop a running container by sending SIGTERM and then SIGKILL after a grace period")
nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to wait for the container to stop before killing it. Default is 10 seconds.")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
v := url.Values{}
v.Set("t", strconv.Itoa(*nSeconds))
var encounteredError error
for _, name := range cmd.Args() {
_, _, err := readBody(cli.call("POST", "/containers/"+name+"/stop?"+v.Encode(), nil, false))
if err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to stop one or more containers")
} else {
fmt.Fprintf(cli.out, "%s\n", name)
}
}
return encounteredError
}
func (cli *DockerCli) CmdRestart(args ...string) error {
cmd := cli.Subcmd("restart", "CONTAINER [CONTAINER...]", "Restart a running container")
nSeconds := cmd.Int([]string{"t", "-time"}, 10, "Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default is 10 seconds.")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
v := url.Values{}
v.Set("t", strconv.Itoa(*nSeconds))
var encounteredError error
for _, name := range cmd.Args() {
_, _, err := readBody(cli.call("POST", "/containers/"+name+"/restart?"+v.Encode(), nil, false))
if err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to restart one or more containers")
} else {
fmt.Fprintf(cli.out, "%s\n", name)
}
}
return encounteredError
}
func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal {
sigc := make(chan os.Signal, 128)
signal.CatchAll(sigc)
go func() {
for s := range sigc {
if s == syscall.SIGCHLD {
continue
}
var sig string
for sigStr, sigN := range signal.SignalMap {
if sigN == s {
sig = sigStr
break
}
}
if sig == "" {
log.Errorf("Unsupported signal: %d. Discarding.", s)
}
if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", cid, sig), nil, false)); err != nil {
log.Debugf("Error sending signal: %s", err)
}
}
}()
return sigc
}
func (cli *DockerCli) CmdStart(args ...string) error {
var (
cErr chan error
tty bool
cmd = cli.Subcmd("start", "CONTAINER [CONTAINER...]", "Restart a stopped container")
attach = cmd.Bool([]string{"a", "-attach"}, false, "Attach container's STDOUT and STDERR and forward all signals to the process")
openStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Attach container's STDIN")
)
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
if *attach || *openStdin {
if cmd.NArg() > 1 {
return fmt.Errorf("You cannot start and attach multiple containers at once.")
}
steam, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false)
if err != nil {
return err
}
env := engine.Env{}
if err := env.Decode(steam); err != nil {
return err
}
config := env.GetSubEnv("Config")
tty = config.GetBool("Tty")
if !tty {
sigc := cli.forwardAllSignals(cmd.Arg(0))
defer signal.StopCatch(sigc)
}
var in io.ReadCloser
v := url.Values{}
v.Set("stream", "1")
if *openStdin && config.GetBool("OpenStdin") {
v.Set("stdin", "1")
in = cli.in
}
v.Set("stdout", "1")
v.Set("stderr", "1")
cErr = promise.Go(func() error {
return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), tty, in, cli.out, cli.err, nil, nil)
})
}
var encounteredError error
for _, name := range cmd.Args() {
_, _, err := readBody(cli.call("POST", "/containers/"+name+"/start", nil, false))
if err != nil {
if !*attach || !*openStdin {
fmt.Fprintf(cli.err, "%s\n", err)
}
encounteredError = fmt.Errorf("Error: failed to start one or more containers")
} else {
if !*attach || !*openStdin {
fmt.Fprintf(cli.out, "%s\n", name)
}
}
}
if encounteredError != nil {
if *openStdin || *attach {
cli.in.Close()
}
return encounteredError
}
if *openStdin || *attach {
if tty && cli.isTerminalOut {
if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil {
log.Errorf("Error monitoring TTY size: %s", err)
}
}
return <-cErr
}
return nil
}
func (cli *DockerCli) CmdUnpause(args ...string) error {
cmd := cli.Subcmd("unpause", "CONTAINER", "Unpause all processes within a container")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
var encounteredError error
for _, name := range cmd.Args() {
if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/unpause", name), nil, false)); err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to unpause container named %s", name)
} else {
fmt.Fprintf(cli.out, "%s\n", name)
}
}
return encounteredError
}
func (cli *DockerCli) CmdPause(args ...string) error {
cmd := cli.Subcmd("pause", "CONTAINER", "Pause all processes within a container")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
var encounteredError error
for _, name := range cmd.Args() {
if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/pause", name), nil, false)); err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to pause container named %s", name)
} else {
fmt.Fprintf(cli.out, "%s\n", name)
}
}
return encounteredError
}
func (cli *DockerCli) CmdInspect(args ...string) error {
cmd := cli.Subcmd("inspect", "CONTAINER|IMAGE [CONTAINER|IMAGE...]", "Return low-level information on a container or image")
tmplStr := cmd.String([]string{"f", "#format", "-format"}, "", "Format the output using the given go template.")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
var tmpl *template.Template
if *tmplStr != "" {
var err error
if tmpl, err = template.New("").Funcs(funcMap).Parse(*tmplStr); err != nil {
fmt.Fprintf(cli.err, "Template parsing error: %v\n", err)
return &utils.StatusError{StatusCode: 64,
Status: "Template parsing error: " + err.Error()}
}
}
indented := new(bytes.Buffer)
indented.WriteByte('[')
status := 0
for _, name := range cmd.Args() {
obj, _, err := readBody(cli.call("GET", "/containers/"+name+"/json", nil, false))
if err != nil {
obj, _, err = readBody(cli.call("GET", "/images/"+name+"/json", nil, false))
if err != nil {
if strings.Contains(err.Error(), "No such") {
fmt.Fprintf(cli.err, "Error: No such image or container: %s\n", name)
} else {
fmt.Fprintf(cli.err, "%s", err)
}
status = 1
continue
}
}
if tmpl == nil {
if err = json.Indent(indented, obj, "", " "); err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
status = 1
continue
}
} else {
// Has template, will render
var value interface{}
if err := json.Unmarshal(obj, &value); err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
status = 1
continue
}
if err := tmpl.Execute(cli.out, value); err != nil {
return err
}
cli.out.Write([]byte{'\n'})
}
indented.WriteString(",")
}
if indented.Len() > 1 {
// Remove trailing ','
indented.Truncate(indented.Len() - 1)
}
indented.WriteByte(']')
if tmpl == nil {
if _, err := io.Copy(cli.out, indented); err != nil {
return err
}
}
if status != 0 {
return &utils.StatusError{StatusCode: status}
}
return nil
}
func (cli *DockerCli) CmdTop(args ...string) error {
cmd := cli.Subcmd("top", "CONTAINER [ps OPTIONS]", "Display the running processes of a container")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() == 0 {
cmd.Usage()
return nil
}
val := url.Values{}
if cmd.NArg() > 1 {
val.Set("ps_args", strings.Join(cmd.Args()[1:], " "))
}
stream, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/top?"+val.Encode(), nil, false)
if err != nil {
return err
}
var procs engine.Env
if err := procs.Decode(stream); err != nil {
return err
}
w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
fmt.Fprintln(w, strings.Join(procs.GetList("Titles"), "\t"))
processes := [][]string{}
if err := procs.GetJson("Processes", &processes); err != nil {
return err
}
for _, proc := range processes {
fmt.Fprintln(w, strings.Join(proc, "\t"))
}
w.Flush()
return nil
}
func (cli *DockerCli) CmdPort(args ...string) error {
cmd := cli.Subcmd("port", "CONTAINER [PRIVATE_PORT[/PROTO]]", "List port mappings for the CONTAINER, or lookup the public-facing port that is NAT-ed to the PRIVATE_PORT")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
steam, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil, false)
if err != nil {
return err
}
env := engine.Env{}
if err := env.Decode(steam); err != nil {
return err
}
ports := nat.PortMap{}
if err := env.GetSubEnv("NetworkSettings").GetJson("Ports", &ports); err != nil {
return err
}
if cmd.NArg() == 2 {
var (
port = cmd.Arg(1)
proto = "tcp"
parts = strings.SplitN(port, "/", 2)
)
if len(parts) == 2 && len(parts[1]) != 0 {
port = parts[0]
proto = parts[1]
}
natPort := port + "/" + proto
if frontends, exists := ports[nat.Port(port+"/"+proto)]; exists && frontends != nil {
for _, frontend := range frontends {
fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIp, frontend.HostPort)
}
return nil
}
return fmt.Errorf("Error: No public port '%s' published for %s", natPort, cmd.Arg(0))
}
for from, frontends := range ports {
for _, frontend := range frontends {
fmt.Fprintf(cli.out, "%s -> %s:%s\n", from, frontend.HostIp, frontend.HostPort)
}
}
return nil
}
// 'docker rmi IMAGE' removes all images with the name IMAGE
func (cli *DockerCli) CmdRmi(args ...string) error {
var (
cmd = cli.Subcmd("rmi", "IMAGE [IMAGE...]", "Remove one or more images")
force = cmd.Bool([]string{"f", "-force"}, false, "Force removal of the image")
noprune = cmd.Bool([]string{"-no-prune"}, false, "Do not delete untagged parents")
)
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
v := url.Values{}
if *force {
v.Set("force", "1")
}
if *noprune {
v.Set("noprune", "1")
}
var encounteredError error
for _, name := range cmd.Args() {
body, _, err := readBody(cli.call("DELETE", "/images/"+name+"?"+v.Encode(), nil, false))
if err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to remove one or more images")
} else {
outs := engine.NewTable("Created", 0)
if _, err := outs.ReadListFrom(body); err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to remove one or more images")
continue
}
for _, out := range outs.Data {
if out.Get("Deleted") != "" {
fmt.Fprintf(cli.out, "Deleted: %s\n", out.Get("Deleted"))
} else {
fmt.Fprintf(cli.out, "Untagged: %s\n", out.Get("Untagged"))
}
}
}
}
return encounteredError
}
func (cli *DockerCli) CmdHistory(args ...string) error {
cmd := cli.Subcmd("history", "IMAGE", "Show the history of an image")
quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs")
noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
body, _, err := readBody(cli.call("GET", "/images/"+cmd.Arg(0)+"/history", nil, false))
if err != nil {
return err
}
outs := engine.NewTable("Created", 0)
if _, err := outs.ReadListFrom(body); err != nil {
return err
}
w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
if !*quiet {
fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE")
}
for _, out := range outs.Data {
outID := out.Get("Id")
if !*quiet {
if *noTrunc {
fmt.Fprintf(w, "%s\t", outID)
} else {
fmt.Fprintf(w, "%s\t", utils.TruncateID(outID))
}
fmt.Fprintf(w, "%s ago\t", units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))))
if *noTrunc {
fmt.Fprintf(w, "%s\t", out.Get("CreatedBy"))
} else {
fmt.Fprintf(w, "%s\t", utils.Trunc(out.Get("CreatedBy"), 45))
}
fmt.Fprintf(w, "%s\n", units.HumanSize(out.GetInt64("Size")))
} else {
if *noTrunc {
fmt.Fprintln(w, outID)
} else {
fmt.Fprintln(w, utils.TruncateID(outID))
}
}
}
w.Flush()
return nil
}
func (cli *DockerCli) CmdRm(args ...string) error {
cmd := cli.Subcmd("rm", "CONTAINER [CONTAINER...]", "Remove one or more containers")
v := cmd.Bool([]string{"v", "-volumes"}, false, "Remove the volumes associated with the container")
link := cmd.Bool([]string{"l", "#link", "-link"}, false, "Remove the specified link and not the underlying container")
force := cmd.Bool([]string{"f", "-force"}, false, "Force the removal of a running container (uses SIGKILL)")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
val := url.Values{}
if *v {
val.Set("v", "1")
}
if *link {
val.Set("link", "1")
}
if *force {
val.Set("force", "1")
}
var encounteredError error
for _, name := range cmd.Args() {
_, _, err := readBody(cli.call("DELETE", "/containers/"+name+"?"+val.Encode(), nil, false))
if err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to remove one or more containers")
} else {
fmt.Fprintf(cli.out, "%s\n", name)
}
}
return encounteredError
}
// 'docker kill NAME' kills a running container
func (cli *DockerCli) CmdKill(args ...string) error {
cmd := cli.Subcmd("kill", "CONTAINER [CONTAINER...]", "Kill a running container using SIGKILL or a specified signal")
signal := cmd.String([]string{"s", "-signal"}, "KILL", "Signal to send to the container")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
var encounteredError error
for _, name := range cmd.Args() {
if _, _, err := readBody(cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%s", name, *signal), nil, false)); err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to kill one or more containers")
} else {
fmt.Fprintf(cli.out, "%s\n", name)
}
}
return encounteredError
}
func (cli *DockerCli) CmdImport(args ...string) error {
cmd := cli.Subcmd("import", "URL|- [REPOSITORY[:TAG]]", "Create an empty filesystem image and import the contents of the tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it.")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
var (
v = url.Values{}
src = cmd.Arg(0)
repository = cmd.Arg(1)
)
v.Set("fromSrc", src)
v.Set("repo", repository)
if cmd.NArg() == 3 {
fmt.Fprintf(cli.err, "[DEPRECATED] The format 'URL|- [REPOSITORY [TAG]]' as been deprecated. Please use URL|- [REPOSITORY[:TAG]]\n")
v.Set("tag", cmd.Arg(2))
}
if repository != "" {
//Check if the given image name can be resolved
repo, _ := parsers.ParseRepositoryTag(repository)
if _, _, err := registry.ResolveRepositoryName(repo); err != nil {
return err
}
}
var in io.Reader
if src == "-" {
in = cli.in
}
return cli.stream("POST", "/images/create?"+v.Encode(), in, cli.out, nil)
}
func (cli *DockerCli) CmdPush(args ...string) error {
cmd := cli.Subcmd("push", "NAME[:TAG]", "Push an image or a repository to the registry")
if err := cmd.Parse(args); err != nil {
return nil
}
name := cmd.Arg(0)
if name == "" {
cmd.Usage()
return nil
}
cli.LoadConfigFile()
remote, tag := parsers.ParseRepositoryTag(name)
// Resolve the Repository name from fqn to hostname + name
hostname, _, err := registry.ResolveRepositoryName(remote)
if err != nil {
return err
}
// Resolve the Auth config relevant for this server
authConfig := cli.configFile.ResolveAuthConfig(hostname)
// If we're not using a custom registry, we know the restrictions
// applied to repository names and can warn the user in advance.
// Custom repositories can have different rules, and we must also
// allow pushing by image ID.
if len(strings.SplitN(name, "/", 2)) == 1 {
username := cli.configFile.Configs[registry.IndexServerAddress()].Username
if username == "" {
username = "<user>"
}
return fmt.Errorf("You cannot push a \"root\" repository. Please rename your repository in <user>/<repo> (ex: %s/%s)", username, name)
}
v := url.Values{}
v.Set("tag", tag)
push := func(authConfig registry.AuthConfig) error {
buf, err := json.Marshal(authConfig)
if err != nil {
return err
}
registryAuthHeader := []string{
base64.URLEncoding.EncodeToString(buf),
}
return cli.stream("POST", "/images/"+remote+"/push?"+v.Encode(), nil, cli.out, map[string][]string{
"X-Registry-Auth": registryAuthHeader,
})
}
if err := push(authConfig); err != nil {
if strings.Contains(err.Error(), "Status 401") {
fmt.Fprintln(cli.out, "\nPlease login prior to push:")
if err := cli.CmdLogin(hostname); err != nil {
return err
}
authConfig := cli.configFile.ResolveAuthConfig(hostname)
return push(authConfig)
}
return err
}
return nil
}
func (cli *DockerCli) CmdPull(args ...string) error {
cmd := cli.Subcmd("pull", "NAME[:TAG]", "Pull an image or a repository from the registry")
allTags := cmd.Bool([]string{"a", "-all-tags"}, false, "Download all tagged images in the repository")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
var (
v = url.Values{}
remote = cmd.Arg(0)
newRemote = remote
)
taglessRemote, tag := parsers.ParseRepositoryTag(remote)
if tag == "" && !*allTags {
newRemote = taglessRemote + ":latest"
}
if tag != "" && *allTags {
return fmt.Errorf("tag can't be used with --all-tags/-a")
}
v.Set("fromImage", newRemote)
// Resolve the Repository name from fqn to hostname + name
hostname, _, err := registry.ResolveRepositoryName(taglessRemote)
if err != nil {
return err
}
cli.LoadConfigFile()
// Resolve the Auth config relevant for this server
authConfig := cli.configFile.ResolveAuthConfig(hostname)
pull := func(authConfig registry.AuthConfig) error {
buf, err := json.Marshal(authConfig)
if err != nil {
return err
}
registryAuthHeader := []string{
base64.URLEncoding.EncodeToString(buf),
}
return cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.out, map[string][]string{
"X-Registry-Auth": registryAuthHeader,
})
}
if err := pull(authConfig); err != nil {
if strings.Contains(err.Error(), "Status 401") {
fmt.Fprintln(cli.out, "\nPlease login prior to pull:")
if err := cli.CmdLogin(hostname); err != nil {
return err
}
authConfig := cli.configFile.ResolveAuthConfig(hostname)
return pull(authConfig)
}
return err
}
return nil
}
func (cli *DockerCli) CmdImages(args ...string) error {
cmd := cli.Subcmd("images", "[NAME]", "List images")
quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only show numeric IDs")
all := cmd.Bool([]string{"a", "-all"}, false, "Show all images (by default filter out the intermediate image layers)")
noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
// FIXME: --viz and --tree are deprecated. Remove them in a future version.
flViz := cmd.Bool([]string{"#v", "#viz", "#-viz"}, false, "Output graph in graphviz format")
flTree := cmd.Bool([]string{"#t", "#tree", "#-tree"}, false, "Output graph in tree format")
flFilter := opts.NewListOpts(nil)
cmd.Var(&flFilter, []string{"f", "-filter"}, "Provide filter values (i.e. 'dangling=true')")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() > 1 {
cmd.Usage()
return nil
}
// Consolidate all filter flags, and sanity check them early.
// They'll get process in the daemon/server.
imageFilterArgs := filters.Args{}
for _, f := range flFilter.GetAll() {
var err error
imageFilterArgs, err = filters.ParseFlag(f, imageFilterArgs)
if err != nil {
return err
}
}
matchName := cmd.Arg(0)
// FIXME: --viz and --tree are deprecated. Remove them in a future version.
if *flViz || *flTree {
v := url.Values{
"all": []string{"1"},
}
if len(imageFilterArgs) > 0 {
filterJson, err := filters.ToParam(imageFilterArgs)
if err != nil {
return err
}
v.Set("filters", filterJson)
}
body, _, err := readBody(cli.call("GET", "/images/json?"+v.Encode(), nil, false))
if err != nil {
return err
}
outs := engine.NewTable("Created", 0)
if _, err := outs.ReadListFrom(body); err != nil {
return err
}
var (
printNode func(cli *DockerCli, noTrunc bool, image *engine.Env, prefix string)
startImage *engine.Env
roots = engine.NewTable("Created", outs.Len())
byParent = make(map[string]*engine.Table)
)
for _, image := range outs.Data {
if image.Get("ParentId") == "" {
roots.Add(image)
} else {
if children, exists := byParent[image.Get("ParentId")]; exists {
children.Add(image)
} else {
byParent[image.Get("ParentId")] = engine.NewTable("Created", 1)
byParent[image.Get("ParentId")].Add(image)
}
}
if matchName != "" {
if matchName == image.Get("Id") || matchName == utils.TruncateID(image.Get("Id")) {
startImage = image
}
for _, repotag := range image.GetList("RepoTags") {
if repotag == matchName {
startImage = image
}
}
}
}
if *flViz {
fmt.Fprintf(cli.out, "digraph docker {\n")
printNode = (*DockerCli).printVizNode
} else {
printNode = (*DockerCli).printTreeNode
}
if startImage != nil {
root := engine.NewTable("Created", 1)
root.Add(startImage)
cli.WalkTree(*noTrunc, root, byParent, "", printNode)
} else if matchName == "" {
cli.WalkTree(*noTrunc, roots, byParent, "", printNode)
}
if *flViz {
fmt.Fprintf(cli.out, " base [style=invisible]\n}\n")
}
} else {
v := url.Values{}
if len(imageFilterArgs) > 0 {
filterJson, err := filters.ToParam(imageFilterArgs)
if err != nil {
return err
}
v.Set("filters", filterJson)
}
if cmd.NArg() == 1 {
// FIXME rename this parameter, to not be confused with the filters flag
v.Set("filter", matchName)
}
if *all {
v.Set("all", "1")
}
body, _, err := readBody(cli.call("GET", "/images/json?"+v.Encode(), nil, false))
if err != nil {
return err
}
outs := engine.NewTable("Created", 0)
if _, err := outs.ReadListFrom(body); err != nil {
return err
}
w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
if !*quiet {
fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tVIRTUAL SIZE")
}
for _, out := range outs.Data {
for _, repotag := range out.GetList("RepoTags") {
repo, tag := parsers.ParseRepositoryTag(repotag)
outID := out.Get("Id")
if !*noTrunc {
outID = utils.TruncateID(outID)
}
if !*quiet {
fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, outID, units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), units.HumanSize(out.GetInt64("VirtualSize")))
} else {
fmt.Fprintln(w, outID)
}
}
}
if !*quiet {
w.Flush()
}
}
return nil
}
// FIXME: --viz and --tree are deprecated. Remove them in a future version.
func (cli *DockerCli) WalkTree(noTrunc bool, images *engine.Table, byParent map[string]*engine.Table, prefix string, printNode func(cli *DockerCli, noTrunc bool, image *engine.Env, prefix string)) {
length := images.Len()
if length > 1 {
for index, image := range images.Data {
if index+1 == length {
printNode(cli, noTrunc, image, prefix+"└─")
if subimages, exists := byParent[image.Get("Id")]; exists {
cli.WalkTree(noTrunc, subimages, byParent, prefix+" ", printNode)
}
} else {
printNode(cli, noTrunc, image, prefix+"\u251C─")
if subimages, exists := byParent[image.Get("Id")]; exists {
cli.WalkTree(noTrunc, subimages, byParent, prefix+"\u2502 ", printNode)
}
}
}
} else {
for _, image := range images.Data {
printNode(cli, noTrunc, image, prefix+"└─")
if subimages, exists := byParent[image.Get("Id")]; exists {
cli.WalkTree(noTrunc, subimages, byParent, prefix+" ", printNode)
}
}
}
}
// FIXME: --viz and --tree are deprecated. Remove them in a future version.
func (cli *DockerCli) printVizNode(noTrunc bool, image *engine.Env, prefix string) {
var (
imageID string
parentID string
)
if noTrunc {
imageID = image.Get("Id")
parentID = image.Get("ParentId")
} else {
imageID = utils.TruncateID(image.Get("Id"))
parentID = utils.TruncateID(image.Get("ParentId"))
}
if parentID == "" {
fmt.Fprintf(cli.out, " base -> \"%s\" [style=invis]\n", imageID)
} else {
fmt.Fprintf(cli.out, " \"%s\" -> \"%s\"\n", parentID, imageID)
}
if image.GetList("RepoTags")[0] != "<none>:<none>" {
fmt.Fprintf(cli.out, " \"%s\" [label=\"%s\\n%s\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n",
imageID, imageID, strings.Join(image.GetList("RepoTags"), "\\n"))
}
}
// FIXME: --viz and --tree are deprecated. Remove them in a future version.
func (cli *DockerCli) printTreeNode(noTrunc bool, image *engine.Env, prefix string) {
var imageID string
if noTrunc {
imageID = image.Get("Id")
} else {
imageID = utils.TruncateID(image.Get("Id"))
}
fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, units.HumanSize(image.GetInt64("VirtualSize")))
if image.GetList("RepoTags")[0] != "<none>:<none>" {
fmt.Fprintf(cli.out, " Tags: %s\n", strings.Join(image.GetList("RepoTags"), ", "))
} else {
fmt.Fprint(cli.out, "\n")
}
}
func (cli *DockerCli) CmdPs(args ...string) error {
cmd := cli.Subcmd("ps", "", "List containers")
quiet := cmd.Bool([]string{"q", "-quiet"}, false, "Only display numeric IDs")
size := cmd.Bool([]string{"s", "-size"}, false, "Display sizes")
all := cmd.Bool([]string{"a", "-all"}, false, "Show all containers. Only running containers are shown by default.")
noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
nLatest := cmd.Bool([]string{"l", "-latest"}, false, "Show only the latest created container, include non-running ones.")
since := cmd.String([]string{"#sinceId", "#-since-id", "-since"}, "", "Show only containers created since Id or Name, include non-running ones.")
before := cmd.String([]string{"#beforeId", "#-before-id", "-before"}, "", "Show only container created before Id or Name, include non-running ones.")
last := cmd.Int([]string{"n"}, -1, "Show n last created containers, include non-running ones.")
flFilter := opts.NewListOpts(nil)
cmd.Var(&flFilter, []string{"f", "-filter"}, "Provide filter values. Valid filters:\nexited=<int> - containers with exit code of <int>\nstatus=(restarting|running|paused|exited)")
if err := cmd.Parse(args); err != nil {
return nil
}
v := url.Values{}
if *last == -1 && *nLatest {
*last = 1
}
if *all {
v.Set("all", "1")
}
if *last != -1 {
v.Set("limit", strconv.Itoa(*last))
}
if *since != "" {
v.Set("since", *since)
}
if *before != "" {
v.Set("before", *before)
}
if *size {
v.Set("size", "1")
}
// Consolidate all filter flags, and sanity check them.
// They'll get processed in the daemon/server.
psFilterArgs := filters.Args{}
for _, f := range flFilter.GetAll() {
var err error
psFilterArgs, err = filters.ParseFlag(f, psFilterArgs)
if err != nil {
return err
}
}
if len(psFilterArgs) > 0 {
filterJson, err := filters.ToParam(psFilterArgs)
if err != nil {
return err
}
v.Set("filters", filterJson)
}
body, _, err := readBody(cli.call("GET", "/containers/json?"+v.Encode(), nil, false))
if err != nil {
return err
}
outs := engine.NewTable("Created", 0)
if _, err := outs.ReadListFrom(body); err != nil {
return err
}
w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
if !*quiet {
fmt.Fprint(w, "CONTAINER ID\tIMAGE\tCOMMAND\tCREATED\tSTATUS\tPORTS\tNAMES")
if *size {
fmt.Fprintln(w, "\tSIZE")
} else {
fmt.Fprint(w, "\n")
}
}
for _, out := range outs.Data {
var (
outID = out.Get("Id")
outNames = out.GetList("Names")
)
if !*noTrunc {
outID = utils.TruncateID(outID)
}
// Remove the leading / from the names
for i := 0; i < len(outNames); i++ {
outNames[i] = outNames[i][1:]
}
if !*quiet {
var (
outCommand = out.Get("Command")
ports = engine.NewTable("", 0)
outNamesList = strings.Join(outNames, ",")
)
outCommand = strconv.Quote(outCommand)
if !*noTrunc {
outCommand = utils.Trunc(outCommand, 20)
outNamesList = outNames[0]
}
ports.ReadListFrom([]byte(out.Get("Ports")))
fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t%s\t", outID, out.Get("Image"), outCommand, units.HumanDuration(time.Now().UTC().Sub(time.Unix(out.GetInt64("Created"), 0))), out.Get("Status"), api.DisplayablePorts(ports), outNamesList)
if *size {
if out.GetInt("SizeRootFs") > 0 {
fmt.Fprintf(w, "%s (virtual %s)\n", units.HumanSize(out.GetInt64("SizeRw")), units.HumanSize(out.GetInt64("SizeRootFs")))
} else {
fmt.Fprintf(w, "%s\n", units.HumanSize(out.GetInt64("SizeRw")))
}
} else {
fmt.Fprint(w, "\n")
}
} else {
fmt.Fprintln(w, outID)
}
}
if !*quiet {
w.Flush()
}
return nil
}
func (cli *DockerCli) CmdCommit(args ...string) error {
cmd := cli.Subcmd("commit", "CONTAINER [REPOSITORY[:TAG]]", "Create a new image from a container's changes")
flPause := cmd.Bool([]string{"p", "-pause"}, true, "Pause container during commit")
flComment := cmd.String([]string{"m", "-message"}, "", "Commit message")
flAuthor := cmd.String([]string{"a", "#author", "-author"}, "", "Author (e.g., \"John Hannibal Smith <[email protected]>\")")
// FIXME: --run is deprecated, it will be replaced with inline Dockerfile commands.
flConfig := cmd.String([]string{"#run", "#-run"}, "", "This option is deprecated and will be removed in a future version in favor of inline Dockerfile-compatible commands")
if err := cmd.Parse(args); err != nil {
return nil
}
var (
name = cmd.Arg(0)
repository, tag = parsers.ParseRepositoryTag(cmd.Arg(1))
)
if name == "" || len(cmd.Args()) > 2 {
cmd.Usage()
return nil
}
//Check if the given image name can be resolved
if repository != "" {
if _, _, err := registry.ResolveRepositoryName(repository); err != nil {
return err
}
}
v := url.Values{}
v.Set("container", name)
v.Set("repo", repository)
v.Set("tag", tag)
v.Set("comment", *flComment)
v.Set("author", *flAuthor)
if *flPause != true {
v.Set("pause", "0")
}
var (
config *runconfig.Config
env engine.Env
)
if *flConfig != "" {
config = &runconfig.Config{}
if err := json.Unmarshal([]byte(*flConfig), config); err != nil {
return err
}
}
stream, _, err := cli.call("POST", "/commit?"+v.Encode(), config, false)
if err != nil {
return err
}
if err := env.Decode(stream); err != nil {
return err
}
fmt.Fprintf(cli.out, "%s\n", env.Get("Id"))
return nil
}
func (cli *DockerCli) CmdEvents(args ...string) error {
cmd := cli.Subcmd("events", "", "Get real time events from the server")
since := cmd.String([]string{"#since", "-since"}, "", "Show all events created since timestamp")
until := cmd.String([]string{"-until"}, "", "Stream events until this timestamp")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 0 {
cmd.Usage()
return nil
}
var (
v = url.Values{}
loc = time.FixedZone(time.Now().Zone())
)
var setTime = func(key, value string) {
format := timeutils.RFC3339NanoFixed
if len(value) < len(format) {
format = format[:len(value)]
}
if t, err := time.ParseInLocation(format, value, loc); err == nil {
v.Set(key, strconv.FormatInt(t.Unix(), 10))
} else {
v.Set(key, value)
}
}
if *since != "" {
setTime("since", *since)
}
if *until != "" {
setTime("until", *until)
}
if err := cli.stream("GET", "/events?"+v.Encode(), nil, cli.out, nil); err != nil {
return err
}
return nil
}
func (cli *DockerCli) CmdExport(args ...string) error {
cmd := cli.Subcmd("export", "CONTAINER", "Export the contents of a filesystem as a tar archive to STDOUT")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
if err := cli.stream("GET", "/containers/"+cmd.Arg(0)+"/export", nil, cli.out, nil); err != nil {
return err
}
return nil
}
func (cli *DockerCli) CmdDiff(args ...string) error {
cmd := cli.Subcmd("diff", "CONTAINER", "Inspect changes on a container's filesystem")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
body, _, err := readBody(cli.call("GET", "/containers/"+cmd.Arg(0)+"/changes", nil, false))
if err != nil {
return err
}
outs := engine.NewTable("", 0)
if _, err := outs.ReadListFrom(body); err != nil {
return err
}
for _, change := range outs.Data {
var kind string
switch change.GetInt("Kind") {
case archive.ChangeModify:
kind = "C"
case archive.ChangeAdd:
kind = "A"
case archive.ChangeDelete:
kind = "D"
}
fmt.Fprintf(cli.out, "%s %s\n", kind, change.Get("Path"))
}
return nil
}
func (cli *DockerCli) CmdLogs(args ...string) error {
var (
cmd = cli.Subcmd("logs", "CONTAINER", "Fetch the logs of a container")
follow = cmd.Bool([]string{"f", "-follow"}, false, "Follow log output")
times = cmd.Bool([]string{"t", "-timestamps"}, false, "Show timestamps")
tail = cmd.String([]string{"-tail"}, "all", "Output the specified number of lines at the end of logs (defaults to all logs)")
)
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
name := cmd.Arg(0)
steam, _, err := cli.call("GET", "/containers/"+name+"/json", nil, false)
if err != nil {
return err
}
env := engine.Env{}
if err := env.Decode(steam); err != nil {
return err
}
v := url.Values{}
v.Set("stdout", "1")
v.Set("stderr", "1")
if *times {
v.Set("timestamps", "1")
}
if *follow {
v.Set("follow", "1")
}
v.Set("tail", *tail)
return cli.streamHelper("GET", "/containers/"+name+"/logs?"+v.Encode(), env.GetSubEnv("Config").GetBool("Tty"), nil, cli.out, cli.err, nil)
}
func (cli *DockerCli) CmdAttach(args ...string) error {
var (
cmd = cli.Subcmd("attach", "CONTAINER", "Attach to a running container")
noStdin = cmd.Bool([]string{"#nostdin", "-no-stdin"}, false, "Do not attach STDIN")
proxy = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy all received signals to the process (even in non-TTY mode). SIGCHLD, SIGKILL, and SIGSTOP are not proxied.")
)
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
name := cmd.Arg(0)
stream, _, err := cli.call("GET", "/containers/"+name+"/json", nil, false)
if err != nil {
return err
}
env := engine.Env{}
if err := env.Decode(stream); err != nil {
return err
}
if !env.GetSubEnv("State").GetBool("Running") {
return fmt.Errorf("You cannot attach to a stopped container, start it first")
}
var (
config = env.GetSubEnv("Config")
tty = config.GetBool("Tty")
)
if tty && cli.isTerminalOut {
if err := cli.monitorTtySize(cmd.Arg(0), false); err != nil {
log.Debugf("Error monitoring TTY size: %s", err)
}
}
var in io.ReadCloser
v := url.Values{}
v.Set("stream", "1")
if !*noStdin && config.GetBool("OpenStdin") {
v.Set("stdin", "1")
in = cli.in
}
v.Set("stdout", "1")
v.Set("stderr", "1")
if *proxy && !tty {
sigc := cli.forwardAllSignals(cmd.Arg(0))
defer signal.StopCatch(sigc)
}
if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), tty, in, cli.out, cli.err, nil, nil); err != nil {
return err
}
_, status, err := getExitCode(cli, cmd.Arg(0))
if err != nil {
return err
}
if status != 0 {
return &utils.StatusError{StatusCode: status}
}
return nil
}
func (cli *DockerCli) CmdSearch(args ...string) error {
cmd := cli.Subcmd("search", "TERM", "Search the Docker Hub for images")
noTrunc := cmd.Bool([]string{"#notrunc", "-no-trunc"}, false, "Don't truncate output")
trusted := cmd.Bool([]string{"#t", "#trusted", "#-trusted"}, false, "Only show trusted builds")
automated := cmd.Bool([]string{"-automated"}, false, "Only show automated builds")
stars := cmd.Int([]string{"s", "#stars", "-stars"}, 0, "Only displays with at least x stars")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
v := url.Values{}
v.Set("term", cmd.Arg(0))
body, _, err := readBody(cli.call("GET", "/images/search?"+v.Encode(), nil, true))
if err != nil {
return err
}
outs := engine.NewTable("star_count", 0)
if _, err := outs.ReadListFrom(body); err != nil {
return err
}
w := tabwriter.NewWriter(cli.out, 10, 1, 3, ' ', 0)
fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tAUTOMATED\n")
for _, out := range outs.Data {
if ((*automated || *trusted) && (!out.GetBool("is_trusted") && !out.GetBool("is_automated"))) || (*stars > out.GetInt("star_count")) {
continue
}
desc := strings.Replace(out.Get("description"), "\n", " ", -1)
desc = strings.Replace(desc, "\r", " ", -1)
if !*noTrunc && len(desc) > 45 {
desc = utils.Trunc(desc, 42) + "..."
}
fmt.Fprintf(w, "%s\t%s\t%d\t", out.Get("name"), desc, out.GetInt("star_count"))
if out.GetBool("is_official") {
fmt.Fprint(w, "[OK]")
}
fmt.Fprint(w, "\t")
if out.GetBool("is_automated") || out.GetBool("is_trusted") {
fmt.Fprint(w, "[OK]")
}
fmt.Fprint(w, "\n")
}
w.Flush()
return nil
}
// Ports type - Used to parse multiple -p flags
type ports []int
func (cli *DockerCli) CmdTag(args ...string) error {
cmd := cli.Subcmd("tag", "IMAGE[:TAG] [REGISTRYHOST/][USERNAME/]NAME[:TAG]", "Tag an image into a repository")
force := cmd.Bool([]string{"f", "#force", "-force"}, false, "Force")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 2 {
cmd.Usage()
return nil
}
var (
repository, tag = parsers.ParseRepositoryTag(cmd.Arg(1))
v = url.Values{}
)
//Check if the given image name can be resolved
if _, _, err := registry.ResolveRepositoryName(repository); err != nil {
return err
}
v.Set("repo", repository)
v.Set("tag", tag)
if *force {
v.Set("force", "1")
}
if _, _, err := readBody(cli.call("POST", "/images/"+cmd.Arg(0)+"/tag?"+v.Encode(), nil, false)); err != nil {
return err
}
return nil
}
func (cli *DockerCli) pullImage(image string) error {
v := url.Values{}
repos, tag := parsers.ParseRepositoryTag(image)
// pull only the image tagged 'latest' if no tag was specified
if tag == "" {
tag = "latest"
}
v.Set("fromImage", repos)
v.Set("tag", tag)
// Resolve the Repository name from fqn to hostname + name
hostname, _, err := registry.ResolveRepositoryName(repos)
if err != nil {
return err
}
// Load the auth config file, to be able to pull the image
cli.LoadConfigFile()
// Resolve the Auth config relevant for this server
authConfig := cli.configFile.ResolveAuthConfig(hostname)
buf, err := json.Marshal(authConfig)
if err != nil {
return err
}
registryAuthHeader := []string{
base64.URLEncoding.EncodeToString(buf),
}
if err = cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.out, map[string][]string{"X-Registry-Auth": registryAuthHeader}); err != nil {
return err
}
return nil
}
type cidFile struct {
path string
file *os.File
written bool
}
func newCIDFile(path string) (*cidFile, error) {
if _, err := os.Stat(path); err == nil {
return nil, fmt.Errorf("Container ID file found, make sure the other container isn't running or delete %s", path)
}
f, err := os.Create(path)
if err != nil {
return nil, fmt.Errorf("Failed to create the container ID file: %s", err)
}
return &cidFile{path: path, file: f}, nil
}
func (cid *cidFile) Close() error {
cid.file.Close()
if !cid.written {
if err := os.Remove(cid.path); err != nil {
return fmt.Errorf("failed to remove the CID file '%s': %s \n", cid.path, err)
}
}
return nil
}
func (cid *cidFile) Write(id string) error {
if _, err := cid.file.Write([]byte(id)); err != nil {
return fmt.Errorf("Failed to write the container ID to the file: %s", err)
}
cid.written = true
return nil
}
func (cli *DockerCli) createContainer(config *runconfig.Config, hostConfig *runconfig.HostConfig, cidfile, name string) (engine.Env, error) {
containerValues := url.Values{}
if name != "" {
containerValues.Set("name", name)
}
mergedConfig := runconfig.MergeConfigs(config, hostConfig)
var containerIDFile *cidFile
if cidfile != "" {
var err error
if containerIDFile, err = newCIDFile(cidfile); err != nil {
return nil, err
}
defer containerIDFile.Close()
}
//create the container
stream, statusCode, err := cli.call("POST", "/containers/create?"+containerValues.Encode(), mergedConfig, false)
//if image not found try to pull it
if statusCode == 404 {
fmt.Fprintf(cli.err, "Unable to find image '%s' locally\n", config.Image)
if err = cli.pullImage(config.Image); err != nil {
return nil, err
}
// Retry
if stream, _, err = cli.call("POST", "/containers/create?"+containerValues.Encode(), mergedConfig, false); err != nil {
return nil, err
}
} else if err != nil {
return nil, err
}
var result engine.Env
if err := result.Decode(stream); err != nil {
return nil, err
}
for _, warning := range result.GetList("Warnings") {
fmt.Fprintf(cli.err, "WARNING: %s\n", warning)
}
if containerIDFile != nil {
if err = containerIDFile.Write(result.Get("Id")); err != nil {
return nil, err
}
}
return result, nil
}
func (cli *DockerCli) CmdCreate(args ...string) error {
cmd := cli.Subcmd("create", "IMAGE [COMMAND] [ARG...]", "Create a new container")
// These are flags not stored in Config/HostConfig
var (
flName = cmd.String([]string{"-name"}, "", "Assign a name to the container")
)
config, hostConfig, cmd, err := runconfig.Parse(cmd, args, nil)
if err != nil {
return err
}
if config.Image == "" {
cmd.Usage()
return nil
}
createResult, err := cli.createContainer(config, hostConfig, hostConfig.ContainerIDFile, *flName)
if err != nil {
return err
}
fmt.Fprintf(cli.out, "%s\n", createResult.Get("Id"))
return nil
}
func (cli *DockerCli) CmdRun(args ...string) error {
// FIXME: just use runconfig.Parse already
cmd := cli.Subcmd("run", "IMAGE [COMMAND] [ARG...]", "Run a command in a new container")
// These are flags not stored in Config/HostConfig
var (
flAutoRemove = cmd.Bool([]string{"#rm", "-rm"}, false, "Automatically remove the container when it exits (incompatible with -d)")
flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: run the container in the background and print the new container ID")
flSigProxy = cmd.Bool([]string{"#sig-proxy", "-sig-proxy"}, true, "Proxy received signals to the process (even in non-TTY mode). SIGCHLD, SIGSTOP, and SIGKILL are not proxied.")
flName = cmd.String([]string{"#name", "-name"}, "", "Assign a name to the container")
flAttach *opts.ListOpts
ErrConflictAttachDetach = fmt.Errorf("Conflicting options: -a and -d")
ErrConflictRestartPolicyAndAutoRemove = fmt.Errorf("Conflicting options: --restart and --rm")
ErrConflictDetachAutoRemove = fmt.Errorf("Conflicting options: --rm and -d")
)
config, hostConfig, cmd, err := runconfig.Parse(cmd, args, nil)
if err != nil {
return err
}
if config.Image == "" {
cmd.Usage()
return nil
}
if *flDetach {
if fl := cmd.Lookup("attach"); fl != nil {
flAttach = fl.Value.(*opts.ListOpts)
if flAttach.Len() != 0 {
return ErrConflictAttachDetach
}
}
if *flAutoRemove {
return ErrConflictDetachAutoRemove
}
config.AttachStdin = false
config.AttachStdout = false
config.AttachStderr = false
config.StdinOnce = false
}
// Disable flSigProxy in case on TTY
sigProxy := *flSigProxy
if config.Tty {
sigProxy = false
}
runResult, err := cli.createContainer(config, hostConfig, hostConfig.ContainerIDFile, *flName)
if err != nil {
return err
}
if sigProxy {
sigc := cli.forwardAllSignals(runResult.Get("Id"))
defer signal.StopCatch(sigc)
}
var (
waitDisplayId chan struct{}
errCh chan error
)
if !config.AttachStdout && !config.AttachStderr {
// Make this asynchrone in order to let the client write to stdin before having to read the ID
waitDisplayId = make(chan struct{})
go func() {
defer close(waitDisplayId)
fmt.Fprintf(cli.out, "%s\n", runResult.Get("Id"))
}()
}
if *flAutoRemove && (hostConfig.RestartPolicy.Name == "always" || hostConfig.RestartPolicy.Name == "on-failure") {
return ErrConflictRestartPolicyAndAutoRemove
}
// We need to instanciate the chan because the select needs it. It can
// be closed but can't be uninitialized.
hijacked := make(chan io.Closer)
// Block the return until the chan gets closed
defer func() {
log.Debugf("End of CmdRun(), Waiting for hijack to finish.")
if _, ok := <-hijacked; ok {
log.Errorf("Hijack did not finish (chan still open)")
}
}()
if config.AttachStdin || config.AttachStdout || config.AttachStderr {
var (
out, stderr io.Writer
in io.ReadCloser
v = url.Values{}
)
v.Set("stream", "1")
if config.AttachStdin {
v.Set("stdin", "1")
in = cli.in
}
if config.AttachStdout {
v.Set("stdout", "1")
out = cli.out
}
if config.AttachStderr {
v.Set("stderr", "1")
if config.Tty {
stderr = cli.out
} else {
stderr = cli.err
}
}
errCh = promise.Go(func() error {
return cli.hijack("POST", "/containers/"+runResult.Get("Id")+"/attach?"+v.Encode(), config.Tty, in, out, stderr, hijacked, nil)
})
} else {
close(hijacked)
}
// Acknowledge the hijack before starting
select {
case closer := <-hijacked:
// Make sure that hijack gets closed when returning. (result
// in closing hijack chan and freeing server's goroutines.
if closer != nil {
defer closer.Close()
}
case err := <-errCh:
if err != nil {
log.Debugf("Error hijack: %s", err)
return err
}
}
//start the container
if _, _, err = readBody(cli.call("POST", "/containers/"+runResult.Get("Id")+"/start", hostConfig, false)); err != nil {
return err
}
if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminalOut {
if err := cli.monitorTtySize(runResult.Get("Id"), false); err != nil {
log.Errorf("Error monitoring TTY size: %s", err)
}
}
if errCh != nil {
if err := <-errCh; err != nil {
log.Debugf("Error hijack: %s", err)
return err
}
}
// Detached mode: wait for the id to be displayed and return.
if !config.AttachStdout && !config.AttachStderr {
// Detached mode
<-waitDisplayId
return nil
}
var status int
// Attached mode
if *flAutoRemove {
// Autoremove: wait for the container to finish, retrieve
// the exit code and remove the container
if _, _, err := readBody(cli.call("POST", "/containers/"+runResult.Get("Id")+"/wait", nil, false)); err != nil {
return err
}
if _, status, err = getExitCode(cli, runResult.Get("Id")); err != nil {
return err
}
if _, _, err := readBody(cli.call("DELETE", "/containers/"+runResult.Get("Id")+"?v=1", nil, false)); err != nil {
return err
}
} else {
if !config.Tty {
// In non-tty mode, we can't dettach, so we know we need to wait.
if status, err = waitForExit(cli, runResult.Get("Id")); err != nil {
return err
}
} else {
// In TTY mode, there is a race. If the process dies too slowly, the state can be update after the getExitCode call
// and result in a wrong exit code.
// No Autoremove: Simply retrieve the exit code
if _, status, err = getExitCode(cli, runResult.Get("Id")); err != nil {
return err
}
}
}
if status != 0 {
return &utils.StatusError{StatusCode: status}
}
return nil
}
func (cli *DockerCli) CmdCp(args ...string) error {
cmd := cli.Subcmd("cp", "CONTAINER:PATH HOSTPATH", "Copy files/folders from the PATH to the HOSTPATH")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 2 {
cmd.Usage()
return nil
}
var copyData engine.Env
info := strings.Split(cmd.Arg(0), ":")
if len(info) != 2 {
return fmt.Errorf("Error: Path not specified")
}
copyData.Set("Resource", info[1])
copyData.Set("HostPath", cmd.Arg(1))
stream, statusCode, err := cli.call("POST", "/containers/"+info[0]+"/copy", copyData, false)
if stream != nil {
defer stream.Close()
}
if statusCode == 404 {
return fmt.Errorf("No such container: %v", info[0])
}
if err != nil {
return err
}
if statusCode == 200 {
if err := archive.Untar(stream, copyData.Get("HostPath"), &archive.TarOptions{NoLchown: true}); err != nil {
return err
}
}
return nil
}
func (cli *DockerCli) CmdSave(args ...string) error {
cmd := cli.Subcmd("save", "IMAGE [IMAGE...]", "Save an image(s) to a tar archive (streamed to STDOUT by default)")
outfile := cmd.String([]string{"o", "-output"}, "", "Write to a file, instead of STDOUT")
if err := cmd.Parse(args); err != nil {
return err
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
var (
output io.Writer = cli.out
err error
)
if *outfile != "" {
output, err = os.Create(*outfile)
if err != nil {
return err
}
}
if len(cmd.Args()) == 1 {
image := cmd.Arg(0)
if err := cli.stream("GET", "/images/"+image+"/get", nil, output, nil); err != nil {
return err
}
} else {
v := url.Values{}
for _, arg := range cmd.Args() {
v.Add("names", arg)
}
if err := cli.stream("GET", "/images/get?"+v.Encode(), nil, output, nil); err != nil {
return err
}
}
return nil
}
func (cli *DockerCli) CmdLoad(args ...string) error {
cmd := cli.Subcmd("load", "", "Load an image from a tar archive on STDIN")
infile := cmd.String([]string{"i", "-input"}, "", "Read from a tar archive file, instead of STDIN")
if err := cmd.Parse(args); err != nil {
return err
}
if cmd.NArg() != 0 {
cmd.Usage()
return nil
}
var (
input io.Reader = cli.in
err error
)
if *infile != "" {
input, err = os.Open(*infile)
if err != nil {
return err
}
}
if err := cli.stream("POST", "/images/load", input, cli.out, nil); err != nil {
return err
}
return nil
}
func (cli *DockerCli) CmdExec(args ...string) error {
cmd := cli.Subcmd("exec", "CONTAINER COMMAND [ARG...]", "Run a command in an existing container")
execConfig, err := runconfig.ParseExec(cmd, args)
if err != nil {
return err
}
if execConfig.Container == "" {
cmd.Usage()
return nil
}
stream, _, err := cli.call("POST", "/containers/"+execConfig.Container+"/exec", execConfig, false)
if err != nil {
return err
}
var execResult engine.Env
if err := execResult.Decode(stream); err != nil {
return err
}
execID := execResult.Get("Id")
if execID == "" {
fmt.Fprintf(cli.out, "exec ID empty")
return nil
}
if execConfig.Detach {
if _, _, err := readBody(cli.call("POST", "/exec/"+execID+"/start", execConfig, false)); err != nil {
return err
}
return nil
}
// Interactive exec requested.
var (
out, stderr io.Writer
in io.ReadCloser
hijacked = make(chan io.Closer)
errCh chan error
)
// Block the return until the chan gets closed
defer func() {
log.Debugf("End of CmdExec(), Waiting for hijack to finish.")
if _, ok := <-hijacked; ok {
log.Errorf("Hijack did not finish (chan still open)")
}
}()
if execConfig.AttachStdin {
in = cli.in
}
if execConfig.AttachStdout {
out = cli.out
}
if execConfig.AttachStderr {
if execConfig.Tty {
stderr = cli.out
} else {
stderr = cli.err
}
}
errCh = promise.Go(func() error {
return cli.hijack("POST", "/exec/"+execID+"/start", execConfig.Tty, in, out, stderr, hijacked, execConfig)
})
// Acknowledge the hijack before starting
select {
case closer := <-hijacked:
// Make sure that hijack gets closed when returning. (result
// in closing hijack chan and freeing server's goroutines.
if closer != nil {
defer closer.Close()
}
case err := <-errCh:
if err != nil {
log.Debugf("Error hijack: %s", err)
return err
}
}
if execConfig.Tty && cli.isTerminalIn {
if err := cli.monitorTtySize(execID, true); err != nil {
log.Errorf("Error monitoring TTY size: %s", err)
}
}
if err := <-errCh; err != nil {
log.Debugf("Error hijack: %s", err)
return err
}
return nil
}
|
[
"\"HOME\"",
"\"DEBUG\"",
"\"DEBUG\""
] |
[] |
[
"HOME",
"DEBUG"
] |
[]
|
["HOME", "DEBUG"]
|
go
| 2 | 0 | |
frappe/app.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import os
import MySQLdb
from six import iteritems
import logging
from werkzeug.wrappers import Request
from werkzeug.local import LocalManager
from werkzeug.exceptions import HTTPException, NotFound
from werkzeug.contrib.profiler import ProfilerMiddleware
from werkzeug.wsgi import SharedDataMiddleware
import frappe
import frappe.handler
import frappe.auth
import frappe.api
import frappe.async
import frappe.utils.response
import frappe.website.render
from frappe.utils import get_site_name
from frappe.middlewares import StaticDataMiddleware
from frappe.utils.error import make_error_snapshot
from frappe.core.doctype.communication.comment import update_comments_in_parent_after_request
from frappe import _
local_manager = LocalManager([frappe.local])
_site = None
_sites_path = os.environ.get("SITES_PATH", ".")
class RequestContext(object):
def __init__(self, environ):
self.request = Request(environ)
def __enter__(self):
init_request(self.request)
def __exit__(self, type, value, traceback):
frappe.destroy()
@Request.application
def application(request):
response = None
try:
rollback = True
init_request(request)
if frappe.local.form_dict.cmd:
response = frappe.handler.handle()
elif frappe.request.path.startswith("/api/"):
if frappe.local.form_dict.data is None:
frappe.local.form_dict.data = request.get_data()
response = frappe.api.handle()
elif frappe.request.path.startswith('/backups'):
response = frappe.utils.response.download_backup(request.path)
elif frappe.request.path.startswith('/private/files/'):
response = frappe.utils.response.download_private_file(request.path)
elif frappe.local.request.method in ('GET', 'HEAD', 'POST'):
response = frappe.website.render.render()
else:
raise NotFound
except HTTPException as e:
return e
except frappe.SessionStopped as e:
response = frappe.utils.response.handle_session_stopped()
except Exception as e:
response = handle_exception(e)
else:
rollback = after_request(rollback)
finally:
if frappe.local.request.method in ("POST", "PUT") and frappe.db and rollback:
frappe.db.rollback()
# set cookies
if response and hasattr(frappe.local, 'cookie_manager'):
frappe.local.cookie_manager.flush_cookies(response=response)
frappe.destroy()
return response
def init_request(request):
frappe.local.request = request
frappe.local.is_ajax = frappe.get_request_header("X-Requested-With")=="XMLHttpRequest"
site = _site or request.headers.get('X-Frappe-Site-Name') or get_site_name(request.host)
frappe.init(site=site, sites_path=_sites_path)
if not (frappe.local.conf and frappe.local.conf.db_name):
# site does not exist
raise NotFound
if frappe.local.conf.get('maintenance_mode'):
raise frappe.SessionStopped
make_form_dict(request)
frappe.local.http_request = frappe.auth.HTTPRequest()
def make_form_dict(request):
frappe.local.form_dict = frappe._dict({ k:v[0] if isinstance(v, (list, tuple)) else v \
for k, v in iteritems(request.form or request.args) })
if "_" in frappe.local.form_dict:
# _ is passed by $.ajax so that the request is not cached by the browser. So, remove _ from form_dict
frappe.local.form_dict.pop("_")
def handle_exception(e):
response = None
http_status_code = getattr(e, "http_status_code", 500)
return_as_message = False
if frappe.local.is_ajax or 'application/json' in frappe.local.request.headers.get('Accept', ''):
# handle ajax responses first
# if the request is ajax, send back the trace or error message
response = frappe.utils.response.report_error(http_status_code)
elif (http_status_code==500
and isinstance(e, MySQLdb.OperationalError)
and e.args[0] in (1205, 1213)):
# 1205 = lock wait timeout
# 1213 = deadlock
# code 409 represents conflict
http_status_code = 508
elif http_status_code==401:
frappe.respond_as_web_page(_("Session Expired"),
_("Your session has expired, please login again to continue."),
http_status_code=http_status_code, indicator_color='red')
return_as_message = True
elif http_status_code==403:
frappe.respond_as_web_page(_("Not Permitted"),
_("You do not have enough permissions to complete the action"),
http_status_code=http_status_code, indicator_color='red')
return_as_message = True
elif http_status_code==404:
frappe.respond_as_web_page(_("Not Found"),
_("The resource you are looking for is not available"),
http_status_code=http_status_code, indicator_color='red')
return_as_message = True
else:
traceback = "<pre>"+frappe.get_traceback()+"</pre>"
if frappe.local.flags.disable_traceback:
traceback = ""
frappe.respond_as_web_page("Server Error",
traceback, http_status_code=http_status_code,
indicator_color='red', width=640)
return_as_message = True
if e.__class__ == frappe.AuthenticationError:
if hasattr(frappe.local, "login_manager"):
frappe.local.login_manager.clear_cookies()
if http_status_code >= 500:
frappe.logger().error('Request Error', exc_info=True)
make_error_snapshot(e)
if return_as_message:
response = frappe.website.render.render("message",
http_status_code=http_status_code)
return response
def after_request(rollback):
if (frappe.local.request.method in ("POST", "PUT") or frappe.local.flags.commit) and frappe.db:
if frappe.db.transaction_writes:
frappe.db.commit()
rollback = False
# update session
if getattr(frappe.local, "session_obj", None):
updated_in_db = frappe.local.session_obj.update()
if updated_in_db:
frappe.db.commit()
rollback = False
update_comments_in_parent_after_request()
return rollback
application = local_manager.make_middleware(application)
def serve(port=8000, profile=False, site=None, sites_path='.'):
global application, _site, _sites_path
_site = site
_sites_path = sites_path
from werkzeug.serving import run_simple
if profile:
application = ProfilerMiddleware(application, sort_by=('cumtime', 'calls'))
if not os.environ.get('NO_STATICS'):
application = SharedDataMiddleware(application, {
'/assets': os.path.join(sites_path, 'assets'),
})
application = StaticDataMiddleware(application, {
'/files': os.path.abspath(sites_path)
})
application.debug = True
application.config = {
'SERVER_NAME': 'localhost:8000'
}
in_test_env = os.environ.get('CI')
if in_test_env:
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
run_simple('0.0.0.0', int(port), application,
use_reloader=not in_test_env,
use_debugger=not in_test_env,
use_evalex=not in_test_env,
threaded=True)
|
[] |
[] |
[
"NO_STATICS",
"CI",
"SITES_PATH"
] |
[]
|
["NO_STATICS", "CI", "SITES_PATH"]
|
python
| 3 | 0 | |
twoCharacter.java
|
import java.io.*;
import java.math.*;
import java.security.*;
import java.text.*;
import java.util.*;
import java.util.concurrent.*;
import java.util.function.*;
import java.util.regex.*;
import java.util.stream.*;
import static java.util.stream.Collectors.joining;
import static java.util.stream.Collectors.toList;
public class Solution {
// Complete the alternate function below.
static int alternate(String s) {
/*
use a set to get all distinct chars in s
iterate through set for every two elements
remove in string then update count
*/
Set<Character> set = new HashSet<>();
for (int i = 0; i < s.length(); i++) {
set.add(s.charAt(i));
}
int res = 0;
for (Character a: set) {
for (Character b : set) {
if (a != b) {
StringBuilder dup_s = new StringBuilder(s);
int ct = dup_s.length();
int i = 0;
int length = dup_s.length();
while (i < length) {
if (dup_s.charAt(i) != a && dup_s.charAt(i) != b) {
dup_s = dup_s.deleteCharAt(i);
ct--;
length--;
if (i > 0) {
i--;
}
} else {
i++;
}
}
if (hasRepeat(dup_s) == false) {
System.out.println(dup_s);
System.out.println("no repeat");
res = Math.max(ct, res);
}
}
}
}
return res;
}
static boolean hasRepeat(StringBuilder dup_s) {
for (int i = 0; i < dup_s.length() - 1; i++) {
if (dup_s.charAt(i) == dup_s.charAt(i+1)) {
return true;
}
}
return false;
}
public static void main(String[] args) throws IOException {
BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(System.in));
BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(System.getenv("OUTPUT_PATH")));
int l = Integer.parseInt(bufferedReader.readLine().trim());
String s = bufferedReader.readLine();
int result = alternate(s);
bufferedWriter.write(String.valueOf(result));
bufferedWriter.newLine();
bufferedReader.close();
bufferedWriter.close();
}
}
/*
import java.io.*;
import java.math.*;
import java.security.*;
import java.text.*;
import java.util.*;
import java.util.concurrent.*;
import java.util.regex.*;
public class Solution {
// get keys whose value is min
// static void printKeys(Map<Character,Integer> map) {
// // System.out.println("map content is:\n");
// for (Character key : map.keySet()) {
// // System.out.println("map key is " + key + map.get(key));
// }
// }
// static void test (Map<Character,Integer> map) {
// // System.out.println("removing e\n");
// String h = "e";
// map.remove(h.charAt(0));
// printKeys(map);
// }
public static Character getKey(Map<Character, Integer> map) {
int curMin = Integer.MAX_VALUE;
Character curKey = '\0';
// System.out.println("map passed to getKey is\n");
// printKeys(map);
for (Character key : map.keySet()) {
// System.out.println("map key is " + key);
// System.out.println("keys are " + keys);
// System.out.println("while (map_size > 2)" + map_size);
// if (map_size <= 2) return keys;
if (map.get(key) < curMin) {
// System.out.println("key val in comparison is " + map.get(key));
// keys.add(key);
curMin = map.get(key);
curKey = key;
// map_size--;
}
}
if (map.size() < 4) {
// System.out.println("key to remove is " + curKey);
}
return curKey;
}
static String removeDup(String sb) {
int cur_sz = sb.length();
Character cur_ch = '\0';
for (int i = 0; i < cur_sz; i++) { // removes dups
for (int j = i; j < cur_sz; j++) {
// inc num of occurrences of char
// int num = (map.get(sb.charAt(j)) == null) ? 1 : (map.get(sb.charAt(j)) + 1);
// map stores char and num of occurrences
// map.put(sb.charAt(j), num);
if ((j < cur_sz -1) && sb.charAt(j) == sb.charAt(j+1)) {
cur_ch = sb.charAt(j); // get cur_ch
sb = sb.replace(Character.toString(cur_ch), "");
cur_sz = sb.length(); // gets sz of str after removing repeated ch
// map.remove(cur_ch); // removes repeated ch from map
}
}
}
}
// Complete the twoCharaters function below.
static int twoCharaters(String s) {
if (s == null || s.length() == 0) {
return 0;
}
String sb = s;
Character cur_ch = '\0';
int cur_sz = sb.length();
// StringBuilder sb = new StringBuilder(s);
String sb = removeDup(sb);
Map<Character, Integer> map = new HashMap<>();
int num = (map.get(sb.charAt(i)) == null) ? 1 : (map.get(sb.charAt(i)) + 1);
for (int i = 0; i < sb.length(); i++) {
map.put(sb.charAt(i), num);
}
int k = 0;
char[] keys= new char[map.size()+1];
int mapSz = map.size();
while (mapSz > 2) {
keys[k] = getKey(map);
map.remove(keys[k]); // removing val from map immediately after finding it to be of least occurrence
k++;
mapSz--;
// System.out.println("mapSz is " + mapSz);
// System.out.println("key in returned getKey is " + keys[j-1]);
}
cur_sz = sb.length();
for (Character c : keys) {
// System.out.println("sb sz is " + sb.length());
sb = sb.replace(Character.toString(c), "");
// System.out.println("c is " + Character.toString(c) + " sb is " + sb);
}
// System.out.println(sb);
return sb.length();
}
private static final Scanner scanner = new Scanner(System.in);
public static void main(String[] args) throws IOException {
BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(System.getenv("OUTPUT_PATH")));
int l = scanner.nextInt();
scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?");
String s = scanner.nextLine();
int result = twoCharaters(s);
bufferedWriter.write(String.valueOf(result));
bufferedWriter.newLine();
bufferedWriter.close();
scanner.close();
}
}
*/
|
[
"\"OUTPUT_PATH\"",
"\"OUTPUT_PATH\""
] |
[] |
[
"OUTPUT_PATH"
] |
[]
|
["OUTPUT_PATH"]
|
java
| 1 | 0 | |
vendor/cloud.google.com/go/profiler/profiler_test.go
|
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package profiler
import (
"bytes"
"compress/gzip"
"errors"
"fmt"
"io"
"log"
"math/rand"
"os"
"strings"
"testing"
"time"
"cloud.google.com/go/internal/testutil"
"cloud.google.com/go/profiler/mocks"
"github.com/golang/mock/gomock"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"github.com/google/pprof/profile"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
gtransport "google.golang.org/api/transport/grpc"
pb "google.golang.org/genproto/googleapis/devtools/cloudprofiler/v2"
edpb "google.golang.org/genproto/googleapis/rpc/errdetails"
"google.golang.org/grpc/codes"
grpcmd "google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
const (
testProjectID = "test-project-ID"
testInstance = "test-instance"
testZone = "test-zone"
testTarget = "test-target"
testService = "test-service"
testSvcVersion = "test-service-version"
testProfileDuration = time.Second * 10
testServerTimeout = time.Second * 15
wantFunctionName = "profilee"
)
func createTestDeployment() *pb.Deployment {
labels := map[string]string{
zoneNameLabel: testZone,
versionLabel: testSvcVersion,
}
return &pb.Deployment{
ProjectId: testProjectID,
Target: testService,
Labels: labels,
}
}
func createTestAgent(psc pb.ProfilerServiceClient) *agent {
c := &client{client: psc}
return &agent{
client: c,
deployment: createTestDeployment(),
profileLabels: map[string]string{instanceLabel: testInstance},
}
}
func createTrailers(dur time.Duration) map[string]string {
b, _ := proto.Marshal(&edpb.RetryInfo{
RetryDelay: ptypes.DurationProto(dur),
})
return map[string]string{
retryInfoMetadata: string(b),
}
}
func TestCreateProfile(t *testing.T) {
ctx := context.Background()
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mpc := mocks.NewMockProfilerServiceClient(ctrl)
a := createTestAgent(mpc)
p := &pb.Profile{Name: "test_profile"}
wantRequest := pb.CreateProfileRequest{
Deployment: a.deployment,
ProfileType: []pb.ProfileType{pb.ProfileType_CPU, pb.ProfileType_HEAP},
}
mpc.EXPECT().CreateProfile(ctx, gomock.Eq(&wantRequest), gomock.Any()).Times(1).Return(p, nil)
gotP := a.createProfile(ctx)
if !testutil.Equal(gotP, p) {
t.Errorf("CreateProfile() got wrong profile, got %v, want %v", gotP, p)
}
}
func TestProfileAndUpload(t *testing.T) {
oldStartCPUProfile, oldStopCPUProfile, oldWriteHeapProfile, oldSleep := startCPUProfile, stopCPUProfile, writeHeapProfile, sleep
defer func() {
startCPUProfile, stopCPUProfile, writeHeapProfile, sleep = oldStartCPUProfile, oldStopCPUProfile, oldWriteHeapProfile, oldSleep
}()
ctx := context.Background()
ctrl := gomock.NewController(t)
defer ctrl.Finish()
errFunc := func(io.Writer) error { return errors.New("") }
testDuration := time.Second * 5
tests := []struct {
profileType pb.ProfileType
duration *time.Duration
startCPUProfileFunc func(io.Writer) error
writeHeapProfileFunc func(io.Writer) error
wantBytes []byte
}{
{
profileType: pb.ProfileType_CPU,
duration: &testDuration,
startCPUProfileFunc: func(w io.Writer) error {
w.Write([]byte{1})
return nil
},
writeHeapProfileFunc: errFunc,
wantBytes: []byte{1},
},
{
profileType: pb.ProfileType_CPU,
startCPUProfileFunc: errFunc,
writeHeapProfileFunc: errFunc,
},
{
profileType: pb.ProfileType_CPU,
duration: &testDuration,
startCPUProfileFunc: func(w io.Writer) error {
w.Write([]byte{2})
return nil
},
writeHeapProfileFunc: func(w io.Writer) error {
w.Write([]byte{3})
return nil
},
wantBytes: []byte{2},
},
{
profileType: pb.ProfileType_HEAP,
startCPUProfileFunc: errFunc,
writeHeapProfileFunc: func(w io.Writer) error {
w.Write([]byte{4})
return nil
},
wantBytes: []byte{4},
},
{
profileType: pb.ProfileType_HEAP,
startCPUProfileFunc: errFunc,
writeHeapProfileFunc: errFunc,
},
{
profileType: pb.ProfileType_HEAP,
startCPUProfileFunc: func(w io.Writer) error {
w.Write([]byte{5})
return nil
},
writeHeapProfileFunc: func(w io.Writer) error {
w.Write([]byte{6})
return nil
},
wantBytes: []byte{6},
},
{
profileType: pb.ProfileType_PROFILE_TYPE_UNSPECIFIED,
startCPUProfileFunc: func(w io.Writer) error {
w.Write([]byte{7})
return nil
},
writeHeapProfileFunc: func(w io.Writer) error {
w.Write([]byte{8})
return nil
},
},
}
for _, tt := range tests {
mpc := mocks.NewMockProfilerServiceClient(ctrl)
a := createTestAgent(mpc)
startCPUProfile = tt.startCPUProfileFunc
stopCPUProfile = func() {}
writeHeapProfile = tt.writeHeapProfileFunc
var gotSleep *time.Duration
sleep = func(ctx context.Context, d time.Duration) error {
gotSleep = &d
return nil
}
p := &pb.Profile{ProfileType: tt.profileType}
if tt.duration != nil {
p.Duration = ptypes.DurationProto(*tt.duration)
}
if tt.wantBytes != nil {
wantProfile := &pb.Profile{
ProfileType: p.ProfileType,
Duration: p.Duration,
ProfileBytes: tt.wantBytes,
Labels: a.profileLabels,
}
wantRequest := pb.UpdateProfileRequest{
Profile: wantProfile,
}
mpc.EXPECT().UpdateProfile(ctx, gomock.Eq(&wantRequest)).Times(1)
} else {
mpc.EXPECT().UpdateProfile(gomock.Any(), gomock.Any()).MaxTimes(0)
}
a.profileAndUpload(ctx, p)
if tt.duration == nil {
if gotSleep != nil {
t.Errorf("profileAndUpload(%v) slept for: %v, want no sleep", p, gotSleep)
}
} else {
if gotSleep == nil {
t.Errorf("profileAndUpload(%v) didn't sleep, want sleep for: %v", p, tt.duration)
} else if *gotSleep != *tt.duration {
t.Errorf("profileAndUpload(%v) slept for wrong duration, got: %v, want: %v", p, gotSleep, tt.duration)
}
}
}
}
func TestRetry(t *testing.T) {
normalDuration := time.Second * 3
negativeDuration := time.Second * -3
tests := []struct {
trailers map[string]string
wantPause *time.Duration
}{
{
createTrailers(normalDuration),
&normalDuration,
},
{
createTrailers(negativeDuration),
nil,
},
{
map[string]string{retryInfoMetadata: "wrong format"},
nil,
},
{
map[string]string{},
nil,
},
}
for _, tt := range tests {
md := grpcmd.New(tt.trailers)
r := &retryer{
backoff: gax.Backoff{
Initial: initialBackoff,
Max: maxBackoff,
Multiplier: backoffMultiplier,
},
md: md,
}
pause, shouldRetry := r.Retry(status.Error(codes.Aborted, ""))
if !shouldRetry {
t.Error("retryer.Retry() returned shouldRetry false, want true")
}
if tt.wantPause != nil {
if pause != *tt.wantPause {
t.Errorf("retryer.Retry() returned wrong pause, got: %v, want: %v", pause, tt.wantPause)
}
} else {
if pause > initialBackoff {
t.Errorf("retryer.Retry() returned wrong pause, got: %v, want: < %v", pause, initialBackoff)
}
}
}
md := grpcmd.New(map[string]string{})
r := &retryer{
backoff: gax.Backoff{
Initial: initialBackoff,
Max: maxBackoff,
Multiplier: backoffMultiplier,
},
md: md,
}
for i := 0; i < 100; i++ {
pause, shouldRetry := r.Retry(errors.New(""))
if !shouldRetry {
t.Errorf("retryer.Retry() called %v times, returned shouldRetry false, want true", i)
}
if pause > maxBackoff {
t.Errorf("retryer.Retry() called %v times, returned wrong pause, got: %v, want: < %v", i, pause, maxBackoff)
}
}
}
func TestInitializeResources(t *testing.T) {
d := createTestDeployment()
l := map[string]string{instanceLabel: testInstance}
ctx := context.Background()
a, ctx := initializeResources(ctx, nil, d, l)
if xg := a.client.xGoogHeader; len(xg) == 0 {
t.Errorf("initializeResources() sets empty xGoogHeader")
} else {
if !strings.Contains(xg[0], "gl-go/") {
t.Errorf("initializeResources() sets wrong xGoogHeader, got: %v, want gl-go key", xg[0])
}
if !strings.Contains(xg[0], "gccl/") {
t.Errorf("initializeResources() sets wrong xGoogHeader, got: %v, want gccl key", xg[0])
}
if !strings.Contains(xg[0], "gax/") {
t.Errorf("initializeResources() sets wrong xGoogHeader, got: %v, want gax key", xg[0])
}
if !strings.Contains(xg[0], "grpc/") {
t.Errorf("initializeResources() sets wrong xGoogHeader, got: %v, want grpc key", xg[0])
}
}
md, _ := grpcmd.FromOutgoingContext(ctx)
if !testutil.Equal(md[xGoogAPIMetadata], a.client.xGoogHeader) {
t.Errorf("md[%v] = %v, want equal xGoogHeader = %v", xGoogAPIMetadata, md[xGoogAPIMetadata], a.client.xGoogHeader)
}
}
func TestInitializeDeployment(t *testing.T) {
oldGetProjectID, oldGetZone, oldConfig := getProjectID, getZone, config
defer func() {
getProjectID, getZone, config = oldGetProjectID, oldGetZone, oldConfig
}()
getProjectID = func() (string, error) {
return testProjectID, nil
}
getZone = func() (string, error) {
return testZone, nil
}
cfg := Config{Service: testService, ServiceVersion: testSvcVersion}
initializeConfig(cfg)
d, err := initializeDeployment()
if err != nil {
t.Errorf("initializeDeployment() got error: %v, want no error", err)
}
if want := createTestDeployment(); !testutil.Equal(d, want) {
t.Errorf("createTestDeployment() got: %v, want %v", d, want)
}
}
func TestInitializeConfig(t *testing.T) {
oldConfig, oldService, oldVersion := config, os.Getenv("GAE_SERVICE"), os.Getenv("GAE_VERSION")
defer func() {
config = oldConfig
if err := os.Setenv("GAE_SERVICE", oldService); err != nil {
t.Fatal(err)
}
if err := os.Setenv("GAE_VERSION", oldVersion); err != nil {
t.Fatal(err)
}
}()
testGAEService := "test-gae-service"
testGAEVersion := "test-gae-version"
for _, tt := range []struct {
config Config
wantTarget string
wantErrorString string
wantSvcVersion string
onGAE bool
}{
{
Config{Service: testService},
testService,
"",
"",
false,
},
{
Config{Target: testTarget},
testTarget,
"",
"",
false,
},
{
Config{},
"",
"service name must be specified in the configuration",
"",
false,
},
{
Config{Service: testService},
testService,
"",
testGAEVersion,
true,
},
{
Config{Target: testTarget},
testTarget,
"",
testGAEVersion,
true,
},
{
Config{},
testGAEService,
"",
testGAEVersion,
true,
},
{
Config{Service: testService, ServiceVersion: testSvcVersion},
testService,
"",
testSvcVersion,
false,
},
{
Config{Service: testService, ServiceVersion: testSvcVersion},
testService,
"",
testSvcVersion,
true,
},
} {
envService, envVersion := "", ""
if tt.onGAE {
envService, envVersion = testGAEService, testGAEVersion
}
if err := os.Setenv("GAE_SERVICE", envService); err != nil {
t.Fatal(err)
}
if err := os.Setenv("GAE_VERSION", envVersion); err != nil {
t.Fatal(err)
}
errorString := ""
if err := initializeConfig(tt.config); err != nil {
errorString = err.Error()
}
if errorString != tt.wantErrorString {
t.Errorf("initializeConfig(%v) got error: %v, want %v", tt.config, errorString, tt.wantErrorString)
}
if config.Target != tt.wantTarget {
t.Errorf("initializeConfig(%v) got target: %v, want %v", tt.config, config.Target, tt.wantTarget)
}
if config.ServiceVersion != tt.wantSvcVersion {
t.Errorf("initializeConfig(%v) got service version: %v, want %v", tt.config, config.ServiceVersion, tt.wantSvcVersion)
}
}
}
func TestInitializeProfileLabels(t *testing.T) {
oldGetInstanceName := getInstanceName
defer func() {
getInstanceName = oldGetInstanceName
}()
getInstanceName = func() (string, error) {
return testInstance, nil
}
l := initializeProfileLabels()
want := map[string]string{instanceLabel: testInstance}
if !testutil.Equal(l, want) {
t.Errorf("initializeProfileLabels() got: %v, want %v", l, want)
}
}
type fakeProfilerServer struct {
pb.ProfilerServiceServer
count int
gotCPUProfile []byte
gotHeapProfile []byte
done chan bool
}
func (fs *fakeProfilerServer) CreateProfile(ctx context.Context, in *pb.CreateProfileRequest) (*pb.Profile, error) {
fs.count++
switch fs.count {
case 1:
return &pb.Profile{Name: "testCPU", ProfileType: pb.ProfileType_CPU, Duration: ptypes.DurationProto(testProfileDuration)}, nil
case 2:
return &pb.Profile{Name: "testHeap", ProfileType: pb.ProfileType_HEAP}, nil
default:
select {}
}
}
func (fs *fakeProfilerServer) UpdateProfile(ctx context.Context, in *pb.UpdateProfileRequest) (*pb.Profile, error) {
switch in.Profile.ProfileType {
case pb.ProfileType_CPU:
fs.gotCPUProfile = in.Profile.ProfileBytes
case pb.ProfileType_HEAP:
fs.gotHeapProfile = in.Profile.ProfileBytes
fs.done <- true
}
return in.Profile, nil
}
func profileeLoop(quit chan bool) {
for {
select {
case <-quit:
return
default:
profileeWork()
}
}
}
func profileeWork() {
data := make([]byte, 1024*1024)
rand.Read(data)
var b bytes.Buffer
gz := gzip.NewWriter(&b)
if _, err := gz.Write(data); err != nil {
log.Printf("failed to write to gzip stream", err)
return
}
if err := gz.Flush(); err != nil {
log.Printf("failed to flush to gzip stream", err)
return
}
if err := gz.Close(); err != nil {
log.Printf("failed to close gzip stream", err)
}
}
func checkSymbolization(p *profile.Profile) error {
for _, l := range p.Location {
if len(l.Line) > 0 && l.Line[0].Function != nil && strings.Contains(l.Line[0].Function.Name, wantFunctionName) {
return nil
}
}
return fmt.Errorf("want function name %v not found in profile", wantFunctionName)
}
func validateProfile(rawData []byte) error {
p, err := profile.ParseData(rawData)
if err != nil {
return fmt.Errorf("ParseData failed: %v", err)
}
if len(p.Sample) == 0 {
return fmt.Errorf("profile contains zero samples: %v", p)
}
if len(p.Location) == 0 {
return fmt.Errorf("profile contains zero locations: %v", p)
}
if len(p.Function) == 0 {
return fmt.Errorf("profile contains zero functions: %v", p)
}
if err := checkSymbolization(p); err != nil {
return fmt.Errorf("checkSymbolization failed: %v for %v", err, p)
}
return nil
}
func TestAgentWithServer(t *testing.T) {
oldDialGRPC, oldConfig := dialGRPC, config
defer func() {
dialGRPC, config = oldDialGRPC, oldConfig
}()
srv, err := testutil.NewServer()
if err != nil {
t.Fatalf("testutil.NewServer(): %v", err)
}
fakeServer := &fakeProfilerServer{done: make(chan bool)}
pb.RegisterProfilerServiceServer(srv.Gsrv, fakeServer)
srv.Start()
dialGRPC = gtransport.DialInsecure
if err := Start(Config{
Target: testTarget,
ProjectID: testProjectID,
Instance: testInstance,
Zone: testZone,
APIAddr: srv.Addr,
}); err != nil {
t.Fatalf("Start(): %v", err)
}
quitProfilee := make(chan bool)
go profileeLoop(quitProfilee)
select {
case <-fakeServer.done:
case <-time.After(testServerTimeout):
t.Errorf("got timeout after %v, want fake server done", testServerTimeout)
}
quitProfilee <- true
if err := validateProfile(fakeServer.gotCPUProfile); err != nil {
t.Errorf("validateProfile(gotCPUProfile): %v", err)
}
if err := validateProfile(fakeServer.gotHeapProfile); err != nil {
t.Errorf("validateProfile(gotHeapProfile): %v", err)
}
}
|
[
"\"GAE_SERVICE\"",
"\"GAE_VERSION\""
] |
[] |
[
"GAE_SERVICE",
"GAE_VERSION"
] |
[]
|
["GAE_SERVICE", "GAE_VERSION"]
|
go
| 2 | 0 | |
feapder/commands/create/create_item.py
|
# -*- coding: utf-8 -*-
"""
Created on 2018-08-28 17:38:43
---------
@summary: 创建item
---------
@author: Boris
@email: [email protected]
"""
import getpass
import os
import feapder.utils.tools as tools
from feapder import setting
from feapder.db.mysqldb import MysqlDB
from .create_init import CreateInit
def deal_file_info(file):
file = file.replace("{DATE}", tools.get_current_date())
file = file.replace("{USER}", os.getenv("FEAPDER_USER") or getpass.getuser())
return file
class CreateItem:
def __init__(self):
self._db = MysqlDB()
self._create_init = CreateInit()
def select_columns(self, table_name):
# sql = 'SHOW COLUMNS FROM ' + table_name
sql = f"SELECT COLUMN_NAME, COLUMN_TYPE, IS_NULLABLE, COLUMN_DEFAULT, EXTRA, COLUMN_KEY, COLUMN_COMMENT FROM INFORMATION_SCHEMA.Columns WHERE table_name = '{table_name}' and table_schema = '{setting.MYSQL_DB}'"
columns = self._db.find(sql)
return columns
def select_tables_name(self, tables_name):
"""
@summary:
---------
@param tables_name: 一类tables 如 qidian*
---------
@result:
"""
sql = f"select table_name from information_schema.tables where table_name like '{tables_name}' and table_schema = '{setting.MYSQL_DB}'"
tables_name = self._db.find(sql)
return tables_name
def convert_table_name_to_hump(self, table_name):
"""
@summary: 格式化表明为驼峰格式
---------
@param table:
---------
@result:
"""
table_hump_format = ""
words = table_name.split("_")
for word in words:
table_hump_format += word.capitalize() # 首字母大写
return table_hump_format
def get_item_template(self):
template_path = os.path.abspath(
os.path.join(__file__, "../../../templates/item_template.tmpl")
)
with open(template_path, "r", encoding="utf-8") as file:
item_template = file.read()
return item_template
def create_item(self, item_template, columns, table_name, support_dict):
table_name_hump_format = self.convert_table_name_to_hump(table_name)
# 组装 类名
item_template = item_template.replace("${item_name}", table_name_hump_format)
if support_dict:
item_template = item_template.replace("${command}", table_name + " 1")
else:
item_template = item_template.replace("${command}", table_name)
item_template = item_template.replace("${table_name}", table_name)
# 组装 属性
propertys = ""
for column in columns:
column_name = column[0]
column_type = column[1]
is_nullable = column[2]
column_default = column[3]
column_extra = column[4]
column_key = column[5]
column_comment = column[6]
try:
column_default = None if column_default == "NULL" else column_default
value = (
"kwargs.get('{column_name}')".format(column_name=column_name)
if support_dict
else (
column_default != "CURRENT_TIMESTAMP" and column_default or None
)
and eval(column_default)
)
except:
value = (
"kwargs.get('{column_name}')".format(column_name=column_name)
if support_dict
else (
column_default != "CURRENT_TIMESTAMP" and column_default or None
)
and column_default
)
if column_extra == "auto_increment" or column_default is not None:
propertys += f"# self.{column_name} = {value}"
else:
if value is None or isinstance(value, (float, int)) or support_dict:
propertys += f"self.{column_name} = {value}"
else:
propertys += f"self.{column_name} = '{value}'"
if column_comment:
propertys += f" # {column_comment}"
propertys += "\n" + " " * 8
item_template = item_template.replace("${propertys}", propertys.strip())
item_template = deal_file_info(item_template)
return item_template
def save_template_to_file(self, item_template, table_name):
item_file = table_name + "_item.py"
if os.path.exists(item_file):
confirm = input("%s 文件已存在 是否覆盖 (y/n). " % item_file)
if confirm != "y":
print("取消覆盖 退出")
return
with open(item_file, "w", encoding="utf-8") as file:
file.write(item_template)
print("\n%s 生成成功" % item_file)
if os.path.basename(os.path.dirname(os.path.abspath(item_file))) == "items":
self._create_init.create()
def create(self, tables_name, support_dict):
input_tables_name = tables_name
tables_name = self.select_tables_name(tables_name)
if not tables_name:
print(tables_name)
tip = "mysql数据库中无 %s 表 " % input_tables_name
raise KeyError(tip)
for table_name in tables_name:
table_name = table_name[0]
columns = self.select_columns(table_name)
item_template = self.get_item_template()
item_template = self.create_item(
item_template, columns, table_name, support_dict
)
self.save_template_to_file(item_template, table_name)
|
[] |
[] |
[
"FEAPDER_USER"
] |
[]
|
["FEAPDER_USER"]
|
python
| 1 | 0 | |
project.py
|
# -*- coding: utf-8 -*-
import json
import os.path
import sys
from lemoncheesecake.project import Project
class MyProject(Project):
def build_report_title(self):
return "ECHO tests (ECHO v. 0.10.0)"
project_dir = os.path.dirname(__file__)
sys.path.append(project_dir)
project = MyProject(project_dir)
project.metadata_policy.add_property_rule("main", "type", on_suite=True, required=False)
project.metadata_policy.add_property_rule("positive", "type", on_suite=True, required=False)
project.metadata_policy.add_property_rule("negative", "type", on_suite=True, required=False)
RESOURCES_DIR = os.path.join(os.path.dirname(__file__), "resources")
GENESIS = json.load(open(os.path.join(os.path.dirname(__file__), "genesis.json")))
if "ROPSTEN" in os.environ and os.environ["ROPSTEN"].lower() != "false":
ROPSTEN = True
else:
ROPSTEN = False
if "DEBUG" in os.environ and os.environ["DEBUG"].lower() != "false":
DEBUG = True
else:
DEBUG = False
if "BASE_URL" not in os.environ:
BASE_URL = json.load(open(os.path.join(RESOURCES_DIR, "urls.json")))["BASE_URL"]
else:
BASE_URL = os.environ["BASE_URL"]
if "ETHEREUM_URL" not in os.environ:
ETHEREUM_URL = json.load(open(os.path.join(RESOURCES_DIR, "urls.json")))["ETHEREUM_URL"]
else:
ETHEREUM_URL = os.environ["ETHEREUM_URL"]
if "NATHAN_PK" not in os.environ:
NATHAN_PK = json.load(open(os.path.join(RESOURCES_DIR, "private_keys.json")))["NATHAN_PK"]
else:
NATHAN_PK = os.environ["NATHAN_PK"]
if "INIT0_PK" not in os.environ:
INIT0_PK = json.load(open(os.path.join(RESOURCES_DIR, "private_keys.json")))["INIT0_PK"]
else:
INIT0_PK = os.environ["INIT0_PK"]
ECHO_OPERATIONS = json.load(open(os.path.join(RESOURCES_DIR, "echo_operations.json")))
ECHO_CONTRACTS = json.load(open(os.path.join(RESOURCES_DIR, "echo_contracts.json")))
WALLETS = os.path.join(RESOURCES_DIR, "wallets.json")
UTILS = os.path.join(RESOURCES_DIR, "utils.json")
ECHO_INITIAL_BALANCE = int(GENESIS["initial_balances"][0]["amount"])
ECHO_ASSET_SYMBOL = GENESIS["initial_balances"][0]["asset_symbol"]
INITIAL_ACCOUNTS = GENESIS["initial_accounts"]
INITIAL_COMMITTEE_CANDIDATES = GENESIS["initial_committee_candidates"]
INITIAL_ACCOUNTS_COUNT = len(INITIAL_ACCOUNTS)
INITIAL_ACCOUNTS_NAMES = []
for i in range(INITIAL_ACCOUNTS_COUNT):
INITIAL_ACCOUNTS_NAMES.append(INITIAL_ACCOUNTS[i]["name"])
INITIAL_COMMITTEE_ETH_ADDRESSES = []
for i, initial_committee_candidate in enumerate(INITIAL_COMMITTEE_CANDIDATES):
if initial_committee_candidate["owner_name"] == INITIAL_ACCOUNTS_NAMES[i]:
INITIAL_COMMITTEE_ETH_ADDRESSES.append(initial_committee_candidate["eth_address"])
ACCOUNT_PREFIX = "account"
DEFAULT_ACCOUNTS_COUNT = 1000
MAIN_TEST_ACCOUNT_COUNT = 1
#todo: delete. Block_interval =5
BLOCK_RELEASE_INTERVAL = 5
BLOCKS_NUM_TO_WAIT = 45
BASE_ASSET_SYMBOL, ETH_ASSET_SYMBOL = "ECHO", "EETH"
ETH_ASSET_ID = GENESIS["initial_parameters"]["sidechain_config"]["ETH_asset_id"]
ETH_CONTRACT_ADDRESS = "0x" + GENESIS["initial_parameters"]["sidechain_config"]["eth_contract_address"]
UNPAID_FEE_METHOD = "0x19c4518a"
COMMITTEE = "0x130f679d"
ETHEREUM_OPERATIONS = json.load(open(os.path.join(RESOURCES_DIR, "ethereum_transactions.json")))
ETHEREUM_CONTRACTS = json.load(open(os.path.join(RESOURCES_DIR, "ethereum_contracts.json")))
with open(".env") as env_file:
GANACHE_PK = (env_file.readline().split('RPC_ACCOUNT=')[1]).split(",")[0]
with open(".env") as env_file:
ROPSTEN_PK = env_file.readlines()[-1].split('ROPSTEN_PRIVATE_KEY=')[1]
|
[] |
[] |
[
"BASE_URL",
"INIT0_PK",
"ETHEREUM_URL",
"NATHAN_PK",
"DEBUG",
"ROPSTEN"
] |
[]
|
["BASE_URL", "INIT0_PK", "ETHEREUM_URL", "NATHAN_PK", "DEBUG", "ROPSTEN"]
|
python
| 6 | 0 | |
cmd/trojansourcedetector/main.go
|
package main
import (
"encoding/json"
"errors"
"flag"
"fmt"
"os"
"github.com/haveyoudebuggedit/trojansourcedetector"
)
const defaultConfigFile = ".trojansourcedetector.json"
func main() {
configFile := defaultConfigFile
flag.StringVar(&configFile, "config", configFile, "JSON file containing the configuration.")
flag.Parse()
cfg, err := readConfigFile(configFile)
if err != nil {
if !errors.Is(err, os.ErrNotExist) || configFile != defaultConfigFile {
panic(err)
}
}
detector := trojansourcedetector.New(cfg)
errs := detector.Run()
if err := writeErrors(errs); err != nil {
panic(err)
}
if len(errs.Get()) > 0 {
os.Exit(1)
} else {
fmt.Printf("No errors found.")
}
}
func writeErrors(errors trojansourcedetector.Errors) error {
for _, e := range errors.Get() {
if os.Getenv("GITHUB_ACTIONS") != "" {
fmt.Printf(
"::error file=%s,line=%d,col=%d,title=%s::%s\n",
e.File(),
e.Line(),
e.Column(),
e.Code(),
fmt.Sprintf("%s: %s", e.Code(), e.Details()),
)
fmt.Printf("%s in %s line %d column %d (%s)\n", e.Code(), e.File(), e.Line(), e.Column(), e.Details())
} else {
encoded, err := e.JSON()
if err != nil {
return fmt.Errorf("bug: failed to encode error entry (%w)", err)
}
fmt.Printf("%s\n", encoded)
}
}
return nil
}
func readConfigFile(file string) (*trojansourcedetector.Config, error) {
result := &trojansourcedetector.Config{}
result.Defaults()
fh, err := os.Open(file) //nolint:gosec
if err != nil {
return nil, fmt.Errorf("failed to open config file %s (%w)", file, err)
}
defer func() {
_ = fh.Close()
}()
decoder := json.NewDecoder(fh)
decoder.DisallowUnknownFields()
if err := decoder.Decode(&result); err != nil {
return nil, fmt.Errorf("failed to decode configuration file %s (%w)", file, err)
}
return result, nil
}
|
[
"\"GITHUB_ACTIONS\""
] |
[] |
[
"GITHUB_ACTIONS"
] |
[]
|
["GITHUB_ACTIONS"]
|
go
| 1 | 0 | |
vendor/github.com/kun-lun/common/configuration/configuration.go
|
package configuration
import "github.com/kun-lun/common/storage"
type GlobalConfiguration struct {
StateDir string
Debug bool
Name string
}
type StringSlice []string
func (s StringSlice) ContainsAny(targets ...string) bool {
for _, target := range targets {
for _, element := range s {
if element == target {
return true
}
}
}
return false
}
type Configuration struct {
Global GlobalConfiguration
Command string
SubcommandFlags StringSlice
State storage.State
ShowCommandHelp bool
CommandModifiesState bool
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
cmd/vartojson/main.go
|
package main
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"go/ast"
"go/parser"
"go/printer"
"go/token"
"go/types"
"io/ioutil"
"os"
"os/exec"
"os/signal"
"runtime"
"strings"
"sync"
"github.com/rogpeppe/go-internal/imports"
)
//go:generate gobin -m -run myitcv.io/cmd/helpflagtopkgdoc
type tagsFlag struct {
vals []string
}
func (e *tagsFlag) String() string {
return fmt.Sprintf("%v", e.vals)
}
func (e *tagsFlag) Set(v string) error {
e.vals = append(e.vals, v)
return nil
}
func main() {
os.Exit(main1())
}
func main1() int {
switch err := mainerr(); err {
case nil:
return 0
case flag.ErrHelp:
return 2
default:
fmt.Fprintln(os.Stderr, err)
return 1
}
}
func mainerr() (retErr error) {
fs := flag.NewFlagSet(os.Args[0], flag.ContinueOnError)
fs.Usage = func() {
mainUsage(os.Stderr)
}
var tagsVals tagsFlag
fs.Var(&tagsVals, "tags", "tags for build list")
if err := fs.Parse(os.Args[1:]); err != nil {
return err
}
if len(fs.Args()) != 1 {
return fmt.Errorf("expected a single arg; the variable to marshal")
}
varName := fs.Arg(0)
goos := os.Getenv("GOOS")
if goos == "" {
goos = runtime.GOOS
}
goarch := os.Getenv("GOARCH")
if goarch == "" {
goarch = runtime.GOARCH
}
tags := map[string]bool{
goos: true,
goarch: true,
}
for _, v := range tagsVals.vals {
for _, vv := range strings.Fields(v) {
tags[vv] = true
}
}
fset := token.NewFileSet()
matchFile := func(fi os.FileInfo) bool {
return imports.MatchFile(fi.Name(), tags)
}
pkgs, err := parser.ParseDir(fset, ".", matchFile, 0)
if err != nil {
return fmt.Errorf("failed to parse current directory: %v", err)
}
pkgName := os.Getenv("GOPACKAGE")
pkg := pkgs[pkgName]
if pkg == nil {
return fmt.Errorf("failed to find package for package name %v", pkgName)
}
type match struct {
file *ast.File
expr ast.Expr
}
var matches []match
typeDecls := make(map[string]*ast.TypeSpec)
for _, f := range pkg.Files {
var comments bytes.Buffer
for _, cg := range f.Comments {
if cg == f.Doc || cg.Pos() > f.Package {
break
}
for _, cm := range cg.List {
comments.WriteString(cm.Text + "\n")
}
comments.WriteString("\n")
}
if !imports.ShouldBuild(comments.Bytes(), tags) {
continue
}
for _, gd := range f.Decls {
switch gd := gd.(type) {
case *ast.GenDecl:
for _, s := range gd.Specs {
switch gd.Tok {
case token.VAR:
vs := s.(*ast.ValueSpec)
if len(vs.Values) == 0 {
// no value; nothing to do
continue
}
for i, name := range vs.Names {
expr := vs.Values[i]
if varName == name.Name {
matches = append(matches, match{
file: f,
expr: expr,
})
}
}
case token.TYPE:
ts := s.(*ast.TypeSpec)
typeDecls[ts.Name.Name] = ts
}
}
}
}
}
switch len(matches) {
case 0:
return fmt.Errorf("failed to find declaration of %v", varName)
case 1:
default:
var dups []string
for _, m := range matches {
dups = append(dups, fmt.Sprintf("found declaration of %v at %v", varName, fset.Position(m.expr.Pos())))
}
return fmt.Errorf("%v", strings.Join(dups, "\n"))
}
theMatch := matches[0]
imports := make(map[*ast.ImportSpec]bool)
usedTypes := make(map[*ast.TypeSpec]bool)
work := []ast.Node{theMatch.expr}
visitType := func(node ast.Node) bool {
switch node := node.(type) {
case *ast.SelectorExpr:
if x, ok := node.X.(*ast.Ident); ok {
for _, imp := range pkg.Files[fset.File(node.Pos()).Name()].Imports {
if imp.Name != nil {
if x.Name == imp.Name.Name {
imports[imp] = true
}
} else {
cleanPath := strings.Trim(imp.Path.Value, "\"")
parts := strings.Split(cleanPath, "/")
if x.Name == parts[len(parts)-1] {
imports[imp] = true
}
}
}
}
// we have handled the qualified identifier; do no inspect its Idents
return false
case *ast.Ident:
typ := typeDecls[node.Name]
if typ != nil {
usedTypes[typ] = true
} else if types.Universe.Lookup(node.Name) == nil {
panic(fmt.Errorf("failed to find type declaration for %v", node.Name))
}
work = append(work, node)
}
return true
}
visitVal := func(node ast.Node) bool {
switch node := node.(type) {
case *ast.CompositeLit:
if node.Type != nil {
ast.Inspect(node.Type, visitType)
}
case *ast.BasicLit:
ast.Inspect(node, visitType)
}
return true
}
err = func() (err error) {
defer func() {
if r := recover(); r != nil {
err = r.(error)
}
}()
for len(work) > 0 {
w := work[0]
work = work[1:]
ast.Inspect(w, visitVal)
}
return
}()
if err != nil {
return fmt.Errorf("failed to walk AST: %v", err)
}
var tempFile string
var lock sync.Mutex
ctrlc := make(chan os.Signal)
signal.Notify(ctrlc, os.Interrupt)
go func() {
<-ctrlc
lock.Lock()
if tempFile != "" {
os.Remove(tempFile)
}
os.Exit(1)
}()
lock.Lock()
tf, err := ioutil.TempFile(".", "vartojson.*.go")
if err != nil {
lock.Unlock()
return fmt.Errorf("failed to create temp file: %v", err)
}
tempFile = tf.Name()
defer func() {
lock.Lock()
defer lock.Unlock()
os.Remove(tempFile)
}()
lock.Unlock()
var buf bytes.Buffer
p := func(format string, args ...interface{}) {
fmt.Fprintf(&buf, format, args...)
}
pp := func(node interface{}) string {
var sb strings.Builder
printer.Fprint(&sb, fset, node)
return sb.String()
}
p(`
package main
import (
"fmt"
"encoding/json"
)
`[1:])
for i := range imports {
p("import %v\n", pp(i))
}
for t := range usedTypes {
p("type %v\n", pp(t))
}
p(`
func main() {
v := %v
byts, err := json.MarshalIndent(&v, "", " ")
if err != nil {
panic(err)
}
fmt.Println(string(byts))
}
`[1:], pp(theMatch.expr))
if err := ioutil.WriteFile(tempFile, buf.Bytes(), 0666); err != nil {
return fmt.Errorf("failed to write temp main")
}
var stdout, stderr bytes.Buffer
cmd := exec.Command("go", "run", tempFile)
cmd.Stdout = &stdout
cmd.Stderr = &stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("failed to %v: %v\n%s", strings.Join(cmd.Args, " "), err, stderr.Bytes())
}
// reparse into an interface{} and write out again... so that we get consistently formatted JSON
// output
var i interface{}
if err := json.Unmarshal(stdout.Bytes(), &i); err != nil {
return fmt.Errorf("failed to Unmarshal JSON: %v", err)
}
toWrite, err := json.MarshalIndent(i, "", " ")
if err != nil {
return fmt.Errorf("failed to re-Marshal JSON: %v", err)
}
fn := "gen_" + varName + "_vartojson.json"
if err := ioutil.WriteFile(fn, append(toWrite, '\n'), 0666); err != nil {
return fmt.Errorf("failed to write %v: %v", fn, err)
}
return nil
}
|
[
"\"GOOS\"",
"\"GOARCH\"",
"\"GOPACKAGE\""
] |
[] |
[
"GOARCH",
"GOOS",
"GOPACKAGE"
] |
[]
|
["GOARCH", "GOOS", "GOPACKAGE"]
|
go
| 3 | 0 | |
pipenv/vendor/pythonfinder/pythonfinder.py
|
# -*- coding=utf-8 -*-
from __future__ import print_function, absolute_import
import os
import six
import operator
from .models import SystemPath
class Finder(object):
def __init__(self, path=None, system=False, global_search=True):
"""Finder A cross-platform Finder for locating python and other executables.
Searches for python and other specified binaries starting in `path`, if supplied,
but searching the bin path of `sys.executable` if `system=True`, and then
searching in the `os.environ['PATH']` if `global_search=True`. When `global_search`
is `False`, this search operation is restricted to the allowed locations of
`path` and `system`.
:param path: A bin-directory search location, defaults to None
:param path: str, optional
:param system: Whether to include the bin-dir of `sys.executable`, defaults to False
:param system: bool, optional
:param global_search: Whether to search the global path from os.environ, defaults to True
:param global_search: bool, optional
:returns: a :class:`~pythonfinder.pythonfinder.Finder` object.
"""
self.path_prepend = path
self.global_search = global_search
self.system = system
self._system_path = None
self._windows_finder = None
@property
def system_path(self):
if not self._system_path:
self._system_path = SystemPath.create(
path=self.path_prepend,
system=self.system,
global_search=self.global_search,
)
return self._system_path
@property
def windows_finder(self):
if os.name == "nt" and not self._windows_finder:
from .models import WindowsFinder
self._windows_finder = WindowsFinder()
return self._windows_finder
def which(self, exe):
return self.system_path.which(exe)
def find_python_version(
self, major, minor=None, patch=None, pre=None, dev=None, arch=None
):
from .models import PythonVersion
if (
isinstance(major, six.string_types)
and pre is None
and minor is None
and dev is None
and patch is None
):
if arch is None and "-" in major:
major, arch = major.rsplit("-", 1)
if not arch.isdigit():
major = "{0}-{1}".format(major, arch)
else:
arch = "{0}bit".format(arch)
version_dict = PythonVersion.parse(major)
major = version_dict.get("major", major)
minor = version_dict.get("minor", minor)
patch = version_dict.get("patch", patch)
pre = version_dict.get("is_prerelease", pre) if pre is None else pre
dev = version_dict.get("is_devrelease", dev) if dev is None else dev
arch = version_dict.get("architecture", arch) if arch is None else arch
if os.name == "nt":
match = self.windows_finder.find_python_version(
major, minor=minor, patch=patch, pre=pre, dev=dev, arch=arch
)
if match:
return match
return self.system_path.find_python_version(
major=major, minor=minor, patch=patch, pre=pre, dev=dev, arch=arch
)
def find_all_python_versions(
self, major=None, minor=None, patch=None, pre=None, dev=None, arch=None
):
version_sort = operator.attrgetter("as_python.version_sort")
python_version_dict = getattr(self.system_path, "python_version_dict")
if python_version_dict:
paths = filter(
None,
[
path
for version in python_version_dict.values()
for path in version
if path.as_python
],
)
paths = sorted(paths, key=version_sort, reverse=True)
return paths
versions = self.system_path.find_all_python_versions(
major=major, minor=minor, patch=patch, pre=pre, dev=dev, arch=arch
)
if not isinstance(versions, list):
versions = [versions]
paths = sorted(versions, key=version_sort, reverse=True)
path_map = {}
for path in paths:
try:
resolved_path = path.path.resolve()
except OSError:
resolved_path = path.path.absolute()
if not path_map.get(resolved_path.as_posix()):
path_map[resolved_path.as_posix()] = path
return list(path_map.values())
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
bot/bot.go
|
package bot
import (
"github.com/jonas747/discordgo"
"github.com/jonas747/dshardmanager"
"github.com/jonas747/dutil/dstate"
"github.com/jonas747/yagpdb/bot/eventsystem"
"github.com/jonas747/yagpdb/common"
log "github.com/sirupsen/logrus"
"os"
"strconv"
"sync"
"time"
)
var (
// When the bot was started
Started = time.Now()
Running bool
State *dstate.State
ShardManager *dshardmanager.Manager
StateHandlerPtr *eventsystem.Handler
)
func Setup() {
// Things may rely on state being available at this point for initialization
State = dstate.NewState()
eventsystem.AddHandler(HandleReady, eventsystem.EventReady)
StateHandlerPtr = eventsystem.AddHandler(StateHandler, eventsystem.EventAll)
eventsystem.ConcurrentAfter = StateHandlerPtr
// eventsystem.AddHandler(HandlePresenceUpdate, eventsystem.EventPresenceUpdate)
eventsystem.AddHandler(ConcurrentEventHandler(EventLogger.handleEvent), eventsystem.EventAll)
eventsystem.AddHandler(RedisWrapper(HandleGuildCreate), eventsystem.EventGuildCreate)
eventsystem.AddHandler(RedisWrapper(HandleGuildDelete), eventsystem.EventGuildDelete)
eventsystem.AddHandler(RedisWrapper(HandleGuildUpdate), eventsystem.EventGuildUpdate)
eventsystem.AddHandler(RedisWrapper(HandleGuildRoleCreate), eventsystem.EventGuildRoleCreate)
eventsystem.AddHandler(RedisWrapper(HandleGuildRoleUpdate), eventsystem.EventGuildRoleUpdate)
eventsystem.AddHandler(RedisWrapper(HandleGuildRoleRemove), eventsystem.EventGuildRoleDelete)
eventsystem.AddHandler(RedisWrapper(HandleChannelCreate), eventsystem.EventChannelCreate)
eventsystem.AddHandler(RedisWrapper(HandleChannelUpdate), eventsystem.EventChannelUpdate)
eventsystem.AddHandler(RedisWrapper(HandleChannelDelete), eventsystem.EventChannelDelete)
eventsystem.AddHandler(RedisWrapper(HandleGuildMemberUpdate), eventsystem.EventGuildMemberUpdate)
log.Info("Initializing bot plugins")
for _, plugin := range common.Plugins {
if botPlugin, ok := plugin.(Plugin); ok {
botPlugin.InitBot()
log.Info("Initialized bot plugin ", plugin.Name())
}
}
log.Printf("Registered %d event handlers", eventsystem.NumHandlers(eventsystem.EventAll))
}
func Run() {
log.Println("Running bot")
connEvtChannel, _ := strconv.ParseInt(os.Getenv("YAGPDB_CONNEVT_CHANNEL"), 10, 64)
connStatusChannel, _ := strconv.ParseInt(os.Getenv("YAGPDB_CONNSTATUS_CHANNEL"), 10, 64)
// Set up shard manager
ShardManager = dshardmanager.New(common.Conf.BotToken)
ShardManager.LogChannel = connEvtChannel
ShardManager.StatusMessageChannel = connStatusChannel
ShardManager.Name = "YAGPDB"
ShardManager.GuildCountsFunc = GuildCountsFunc
ShardManager.SessionFunc = func(token string) (session *discordgo.Session, err error) {
session, err = discordgo.New(token)
if err != nil {
return
}
session.StateEnabled = false
session.LogLevel = discordgo.LogInformational
session.SyncEvents = true
return
}
// Only handler
ShardManager.AddHandler(eventsystem.HandleEvent)
shardCount, err := ShardManager.GetRecommendedCount()
if err != nil {
panic("Failed getting shard count: " + err.Error())
}
go EventLogger.run(shardCount)
for i := 0; i < shardCount; i++ {
waitingReadies = append(waitingReadies, i)
}
State.MaxChannelMessages = 1000
State.MaxMessageAge = time.Hour
// State.Debug = true
Running = true
go ShardManager.Start()
go MemberFetcher.Run()
go mergedMessageSender()
go MonitorLoading()
for _, p := range common.Plugins {
starter, ok := p.(BotStarterHandler)
if ok {
starter.StartBot()
log.Debug("Ran StartBot for ", p.Name())
}
}
}
func MonitorLoading() {
t := time.NewTicker(time.Second)
defer t.Stop()
for {
<-t.C
waitingGuildsMU.Lock()
numWaitingGuilds := len(waitingGuilds)
numWaitingShards := len(waitingReadies)
waitingGuildsMU.Unlock()
log.Infof("Starting up... GC's Remaining: %d, Shards remaining: %d", numWaitingGuilds, numWaitingShards)
if numWaitingGuilds == 0 && numWaitingShards == 0 {
return
}
}
}
func Stop(wg *sync.WaitGroup) {
for _, v := range common.Plugins {
stopper, ok := v.(BotStopperHandler)
if !ok {
continue
}
wg.Add(1)
log.Debug("Sending stop event to stopper: ", v.Name())
go stopper.StopBot(wg)
}
ShardManager.StopAll()
wg.Done()
}
// checks all connected guilds and emites guildremoved on those no longer connected
func checkConnectedGuilds() {
log.Info("Checking joined guilds")
client, err := common.RedisPool.Get()
if err != nil {
log.WithError(err).Error("Failed retrieving connection from redis pool")
return
}
defer common.RedisPool.Put(client)
currentlyConnected, err := client.Cmd("SMEMBERS", "connected_guilds").List()
if err != nil {
log.WithError(err).Error("Failed retrieving currently connected guilds")
return
}
guilds := make([]*discordgo.UserGuild, 0)
var after int64
for {
g, err := common.BotSession.UserGuilds(100, 0, after)
if err != nil {
log.WithError(err).Error("Userguilds failed")
return
}
guilds = append(guilds, g...)
if len(g) < 100 {
break
}
after = g[len(g)-1].ID
}
OUTER:
for _, gID := range currentlyConnected {
parsedGID, _ := strconv.ParseInt(gID, 10, 64)
for _, g := range guilds {
if g.ID == parsedGID {
continue OUTER
}
}
err := client.Cmd("SREM", "connected_guilds", gID).Err
if err != nil {
log.WithError(err).Error("Failed removing guild from connected guilds")
} else {
EmitGuildRemoved(client, parsedGID)
log.WithField("guild", gID).Info("Removed from guild when offline")
}
}
}
func GuildCountsFunc() []int {
numShards := ShardManager.GetNumShards()
result := make([]int, numShards)
State.RLock()
for _, v := range State.Guilds {
shard := (v.ID() >> 22) % int64(numShards)
result[shard]++
}
State.RUnlock()
return result
}
|
[
"\"YAGPDB_CONNEVT_CHANNEL\"",
"\"YAGPDB_CONNSTATUS_CHANNEL\""
] |
[] |
[
"YAGPDB_CONNEVT_CHANNEL",
"YAGPDB_CONNSTATUS_CHANNEL"
] |
[]
|
["YAGPDB_CONNEVT_CHANNEL", "YAGPDB_CONNSTATUS_CHANNEL"]
|
go
| 2 | 0 | |
app/bot/bot.py
|
import json
import logging
import os
from time import time
from apiai import ApiAI
from raven.contrib.django.raven_compat.models import client as error_client
from lib.config import FB_PAGE_TOKEN
from lib.response import Replyable
from lib.redis import redis_connection, HANDLED_UPDATES_FB
from metrics.models.activity import UserActivity
from metrics.models.unique_users import UserListing
# dirty
from .callbacks import dirty
from .callbacks import result, calendar, news, \
subscription, video, medal, athlete, standing, sport
from .callbacks import testing
from .callbacks.default import (
get_started, greetings, push, push_step, subscribe, unsubscribe, share_bot,
apiai_fulfillment, wiki, countdown, korea_standard_time, story, story_payload, report,
report_step, how_to, privacy, about_bot, company_details, btn_send_report)
from .handlers.apiaihandler import ApiAiHandler
from .handlers.payloadhandler import PayloadHandler
from .handlers.texthandler import TextHandler
logger = logging.getLogger(__name__)
DIALOGFLOW_TOKEN = os.environ.get('DIALOGFLOW_TOKEN', 'na')
ADMINS = [
1947930888581193, #Lisa
]
def make_event_handler():
ai = ApiAI(DIALOGFLOW_TOKEN)
handlers = []
# testing
handlers.extend(testing.handlers)
handlers.extend(subscription.handlers)
handlers.extend(result.handlers)
handlers.extend(video.handlers)
handlers.extend(medal.handlers)
handlers.extend(athlete.handlers)
handlers.extend(standing.handlers)
handlers.extend(calendar.handlers)
handlers.extend(news.handlers)
handlers.extend([
ApiAiHandler(greetings, 'gruss'),
PayloadHandler(greetings, ['gruss']),
PayloadHandler(get_started, ['start']),
ApiAiHandler(subscribe, 'anmelden'),
PayloadHandler(subscribe, ['subscribe']),
ApiAiHandler(unsubscribe, 'abmelden'),
PayloadHandler(unsubscribe, ['unsubscribe']),
# menu handler
PayloadHandler(privacy, ['privacy']),
PayloadHandler(company_details, ['company_details']),
PayloadHandler(about_bot, ['about']),
PayloadHandler(how_to, ['how_to']),
PayloadHandler(share_bot, ['share_bot']),
ApiAiHandler(share_bot, 'share_bot'),
ApiAiHandler(push, 'push.highlight'),
PayloadHandler(push_step, ['push', 'report', 'next_state']),
ApiAiHandler(report, 'push.report'),
PayloadHandler(btn_send_report, ['report_sport', 'report_discipline']),
PayloadHandler(report_step, ['report', 'next_state']),
ApiAiHandler(korea_standard_time, 'korea_standard_time'),
ApiAiHandler(countdown, 'countdown'),
ApiAiHandler(countdown, 'info.olympia.countown_days'),
ApiAiHandler(wiki, 'wiki'),
#story
PayloadHandler(story_payload, ['story', 'fragment']),
# info.general
ApiAiHandler(sport.api_sport,'info.general.sport', follow_up=True),
# ApiAiHandler(sport.api_discipline,'info.general.discipline'),
# ApiAiHandler(calendar.api_next, 'info.general.sport'),
ApiAiHandler(calendar.api_next, 'info.general.discipline'),
# info.match.result
ApiAiHandler(result.api_winner ,'info.match.result.winner', follow_up=True),
ApiAiHandler(result.api_podium, 'info.match.result.podium', follow_up=True),
# info.match.calendar
ApiAiHandler(calendar.api_next, 'info.match.calendar.next', follow_up=True),
PayloadHandler(calendar.pl_entry_by_matchmeta, ['calendar.entry_by_matchmeta']),
# info.medal
ApiAiHandler(medal.medals, 'info.medals.filtered'),
ApiAiHandler(medal.medals_table, 'info.medals.table'),
# dirty
ApiAiHandler(dirty.force_start, 'dirty.force_start'),
TextHandler(apiai_fulfillment, '.*')
])
def query_api_ai(event):
"""
Runs the message text through api.ai if the message is a regular text message and returns
the response dict. Returns None if the message is not a regular text message (buttons etc).
"""
message = event['message']
text = message.get('text')
if (text is not None
and event.get('postback') is None
and message.get('quick_reply') is None):
request = ai.text_request()
request.lang = 'de'
request.query = text
request.session_id = event['sender']['id']
response = request.getresponse()
nlp = json.loads(response.read().decode())
nlp['result']['parameters'] = {
k: v or None for k, v in nlp['result']['parameters'].items()}
if nlp['result']['contexts']:
temp = nlp['result']['parameters']
temp= {
i[:-2]: temp[i]
for i in temp
if i[:-2] in temp and not temp[i[:-2]]
}
nlp['result']['parameters'].update(temp)
logging.debug(nlp)
return nlp
else:
return None
def api_ai_story_hook(event, nlp):
"""Checks if the api.ai intent is a story hook and runs it.
:returns True if the intent was a story hook and it was proccessed successfully, else False
"""
try:
if nlp and nlp['result']['metadata']['intentName'].startswith('story:'):
slug = nlp['result']['metadata']['intentName'][len('story:'):]
story(event, slug, fragment_nr=None)
return True
except:
logging.exception("Story failed")
return False
def event_handler(events, type):
"""handle all incoming messages"""
for event in events:
logging.debug('Incoming message : ' + str(event))
message = event.get('message')
sender_id = event['sender']['id']
if message:
msg_id = message['mid']
else:
msg_id = f'{sender_id}.{event["timestamp"]}'
with redis_connection() as redis:
already_handled = (redis.zadd(HANDLED_UPDATES_FB, time(), msg_id) == 0)
if already_handled:
logger.warning('Skipping duplicate event: %s', event)
continue
event = Replyable(event, type)
UserListing.capture(sender_id)
if message:
nlp = query_api_ai(event)
if nlp:
message['nlp'] = nlp
api_ai_story_hook(event, nlp)
for handler in handlers:
try:
if handler.check_event(event):
try:
handler.handle_event(event)
except Exception as e:
error_client.captureException()
logging.exception("Handling event failed")
try:
event.send_text('Huppsala, das hat nicht funktioniert :(')
if int(sender_id) in ADMINS:
txt = str(e)
txt = txt.replace(FB_PAGE_TOKEN, '[redacted]')
txt = txt.replace(DIALOGFLOW_TOKEN, '[redacted]')
event.send_text(txt)
except:
pass
finally:
break
except:
logging.exception("Testing handler failed")
return event_handler
handle_events = make_event_handler()
|
[] |
[] |
[
"DIALOGFLOW_TOKEN"
] |
[]
|
["DIALOGFLOW_TOKEN"]
|
python
| 1 | 0 | |
backend-service/internal/billing/billing.go
|
package billing
import (
"encoding/json"
"fmt"
"net/http"
"net/url"
"os"
"strings"
Stripe "github.com/stripe/stripe-go/v72"
"github.com/stripe/stripe-go/v72/client"
"github.com/image-village/backend-service/internal/auth"
)
var backendHost = os.Getenv("BACKEND_HOST")
var frontendHost = os.Getenv("FRONTEND_HOST")
var Handler = getHandler()
var stripe *client.API
// Subscriptions for frontend to display available plans available for checkout
type Subscriptions struct {
Nickname string `json:"nickname"`
Interval string `json:"interval"`
Type string `json:"type"`
ID string `json:"id"`
Price string `json:"price"`
}
func getHandler() http.Handler {
setupStripe()
mux := http.NewServeMux()
mux.HandleFunc("/billing/products", getProducts)
mux.HandleFunc("/billing/checkout", checkout)
mux.HandleFunc("/billing/success", success)
mux.HandleFunc("/billing/cancel", cancel)
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("Not found"))
})
return mux
}
func setupStripe() {
stripe = &client.API{}
apiKey := os.Getenv("STRIPE_API_SECRET_KEY")
stripe.Init(apiKey, nil)
}
func getProducts(w http.ResponseWriter, r *http.Request) {
active := true
listParams := &Stripe.PriceListParams{Active: &active}
stripePriceIterator := stripe.Prices.List(listParams).Iter
subs := []Subscriptions{}
// Data to display subscriptions for the frontend example
for stripePriceIterator.Next() {
stripePrice := stripePriceIterator.Current().(*Stripe.Price)
amount := float64(stripePrice.UnitAmount) / 100
displayPrice := fmt.Sprintf("%s $%.2f/%s", strings.ToUpper(string(stripePrice.Currency)), amount, string(stripePrice.Recurring.Interval))
sub := Subscriptions{
Nickname: stripePrice.Nickname,
Interval: string(stripePrice.Recurring.Interval),
Type: string(stripePrice.Type),
ID: stripePrice.ID,
Price: displayPrice,
}
subs = append(subs, sub)
}
output, err := json.Marshal(&subs)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
w.Write(output)
}
func checkout(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
http.Error(w, "Incorrect method", http.StatusBadRequest)
return
}
price := struct {
ID string `json:"price_id"`
}{}
err := json.NewDecoder(r.Body).Decode(&price)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
paymentMethod := "card"
paymentMode := "subscription"
checkoutQuantity := int64(1)
lineItem := &Stripe.CheckoutSessionLineItemParams{
Price: &price.ID,
Quantity: &checkoutQuantity,
}
successURL := "https://" + backendHost + "/billing/success?session_id={CHECKOUT_SESSION_ID}"
cancelURL := "https://" + backendHost + "/billing/cancel?session_id={CHECKOUT_SESSION_ID}"
authErr, userInfo := auth.GetUserInfoFromHeaders(r)
clientReferenceID := userInfo.ID
if authErr != nil {
http.Error(w, authErr.Error(), http.StatusUnauthorized)
return
}
checkoutParams := &Stripe.CheckoutSessionParams{
Mode: &paymentMode,
PaymentMethodTypes: []*string{&paymentMethod},
ClientReferenceID: &clientReferenceID,
LineItems: []*Stripe.CheckoutSessionLineItemParams{lineItem},
SuccessURL: &successURL,
CancelURL: &cancelURL,
}
session, err := stripe.CheckoutSessions.New(checkoutParams)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
fmt.Fprintf(w, `{"sessionId": "%s"}`, session.ID)
}
func success(w http.ResponseWriter, r *http.Request) {
sessionId := string(r.URL.Query().Get("session_id"))
session, err := stripe.CheckoutSessions.Get(sessionId, &Stripe.CheckoutSessionParams{})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
data := map[string]string{
"payment_status": string(session.PaymentStatus),
"amount": fmt.Sprintf("%d", session.AmountSubtotal),
"currency": string(session.Currency),
"customer": string(session.CustomerDetails.Email),
"reference": string(session.ClientReferenceID),
}
baseUrl := url.URL{
Scheme: "https",
Host: frontendHost,
Path: "/billing/confirmation",
}
redirectURL := fmt.Sprintf("%s?%s", baseUrl.String(), mapToQueryString(data))
http.Redirect(w, r, redirectURL, 302)
}
func cancel(w http.ResponseWriter, r *http.Request) {
sessionId := string(r.URL.Query().Get("session_id"))
session, err := stripe.CheckoutSessions.Get(sessionId, &Stripe.CheckoutSessionParams{})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
data := map[string]string{
"payment_status": string(session.PaymentStatus),
"amount": fmt.Sprintf("%d", session.AmountSubtotal),
"currency": string(session.Currency),
"reference": string(session.ClientReferenceID),
}
baseUrl := url.URL{
Scheme: "https",
Host: frontendHost,
Path: "/billing/confirmation",
}
redirectURL := fmt.Sprintf("%s?%s", baseUrl.String(), mapToQueryString(data))
http.Redirect(w, r, redirectURL, 302)
}
func mapToQueryString(data map[string]string) string {
queryString := url.Values{}
for k, v := range data {
queryString.Add(k, v)
}
return queryString.Encode()
}
|
[
"\"BACKEND_HOST\"",
"\"FRONTEND_HOST\"",
"\"STRIPE_API_SECRET_KEY\""
] |
[] |
[
"BACKEND_HOST",
"STRIPE_API_SECRET_KEY",
"FRONTEND_HOST"
] |
[]
|
["BACKEND_HOST", "STRIPE_API_SECRET_KEY", "FRONTEND_HOST"]
|
go
| 3 | 0 | |
trax/layers/assert_shape.py
|
# coding=utf-8
# Copyright 2020 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Trax decorators and layers for asserts on tensor shapes."""
import functools
import inspect
import string
from absl import logging
from trax.layers import base
from trax.layers import combinators
def assert_shape(specification):
"""Decorator for checking the input and output shapes of Layer.
Decorator can be applied on trax.base.Layer class, or a function returning
a trax.base.Layer class. It uses notation similar to einsum (Einstein
summation convention), achieving concise and simple representation of tensors.
For example 'ij,jh->ih' is a valid representation of a function taking two
2D matrices as input, and returning a single output, also a 2D matrix.
It improves readability and puts puts three levels of asserts on the function:
first level is the number of input tensors and output tensors; second level is
the rank of each tensor; third level is the size of each dimension of each
tensor. The decorator inserts those asserts right before and right after
'forward' call.
First level, assert on number of inputs and outputs. In the representation
input tensors are separated from output tensors by an arrow '->'. For layers
taking multiple input tensors or returning multiple output tensors, those
tensors will be separated by a comma ','.
For example, specification 'bsd,df->bsf' asserts that there will be two
input tensors, with shapes represented by 'bsd' and 'df' respectively; and
a single output tensor with shape represented by 'bsf'.
Second level, asserts on possible rank of each tensor. Most commonly,
each letter represents a single dimension. For example,the tensor with shapes
represented by 'bsd' has rank three; with 'df' it has rank two. The special
case is an ellipsis ('...'), which expand to arbitrary number of dimensions,
including zero. For example, the tensor with specification '...sf' has at
least two dimensions. Each tensor may have in its representation one ellipsis.
Third level, asserts the size of each dimension. If two dimensions in any
of input or output tensors have the same letter in the representation then
they must have the same size. For example, with a tensor A represented by 'df'
and a tensor B represented by 'bsf', the size of the second dimension of A
must equal the size of the third dimension of B. Another example: with a
tensor C represented by '...dv' and a tensor D represented by 'd', the size of
the first and only dimension of D must be equal to the size of the second to
last dimension of tensor C.
If two distinct tensors have an ellipsis in their representation then all of
dimensions covered by those ellipses must match. For example, with a tensor E
represented by '...d' and tensor F represented by '...x' then E and F must
have the same rank, and the sizes of all but the last dimensions must match.
Examples:
# In Dense layer there is a single input and single output; the last dimension
# may change in size, while the sizes of all previous dimensions, marked by
# an ellipsis, will stay the same.
@assert_shape('...a->...b')
class Dense(base.Layer):
(...)
# DotProductCausalAttention takes three tensors as input: Queries, Keys, and
# Values, and outputs a single tensor. Sizes of the first two dimensions in
# all those tensors must match, while the last dimension must match only
# between Queries and Keys, and separately between Values and output tensor.
@assert_shape('blk,blk,bld->bld')
class DotProductCausalAttention(base.Layer):
(...)
# assert_shape can also be placed before the function returning base.Layer.
@assert_shape('...d->...')
def ReduceSum():
return Fn('ReduceSum', lambda x: jnp.sum(x, axis=-1, keepdims=False))
Args:
specification: A text specification for the input/output tensors.
Returns:
The decorator changing the class or function.
"""
caller = inspect.getframeinfo(inspect.stack()[1][0])
message = f'Defined at {caller.filename}:{caller.lineno}'
def wrap_cls(cls):
forward = getattr(cls, 'forward')
init = getattr(cls, '__init__')
before_spec, after_spec = specification.split('->')
@functools.wraps(init)
def init_wrapper(self, *args, **kwargs):
before_assert = AssertShape(before_spec,
message=message + ' function input')
after_assert = AssertShape(after_spec,
message=message + ' function output')
after_assert._create_link(before_assert) # pylint: disable=protected-access
out = init(self, *args, **kwargs)
self._before_assert_fun = before_assert # pylint: disable=protected-access
self._after_assert_fun = after_assert # pylint: disable=protected-access
return out
@functools.wraps(forward)
def forward_wrapper(self, x, *args, **kwargs):
x = self._before_assert_fun.forward(x) # pylint: disable=protected-access
y = forward(self, x, *args, **kwargs)
y = self._after_assert_fun.forward(y) # pylint: disable=protected-access
return y
setattr(cls, 'forward', forward_wrapper)
setattr(cls, '__init__', init_wrapper)
return cls
# TODO(jaszczur): replace this with forward/init override.
def wrap_fun(fun):
@functools.wraps(fun)
def fun_wrapper(*args, **kwargs):
layer = fun(*args, **kwargs)
return AssertFunction(specification, layer, message)
return fun_wrapper
def wrap_fun_or_cls(fun_or_cls):
return (wrap_cls(fun_or_cls) if inspect.isclass(fun_or_cls) else
wrap_fun(fun_or_cls))
return wrap_fun_or_cls
def AssertFunction(specification, layer, message=None): # pylint: disable=invalid-name
"""AssertFunction asserts shapes on the input/output tensors of a layer.
It passes all inputs to the layer, and returns all outputs of the layer
unchanged.
Args:
specification: A specification. See assert_shape decorator for a full
documentation.
layer: A base.Layer to wrap around.
message: An optional message to print if an assert fails. By default it will
print the filename and the line number where AssertFunction was called.
Returns:
The given layer wrapped in asserts on its inputs and outputs.
"""
if message is None:
caller = inspect.getframeinfo(inspect.stack()[1][0])
message = f'Defined at {caller.filename}:{caller.lineno}'
before_spec, after_spec = specification.split('->')
before_assert = AssertShape(before_spec, message=message + ' function input')
after_assert = AssertShape(after_spec, message=message + ' function output')
after_assert._create_link(before_assert) # pylint: disable=protected-access
return combinators.Serial(
before_assert, layer, after_assert)
class AssertShape(base.Layer):
"""Layer which put asserts on shapes of tensors, and returns them unchanged.
It borrows the notation from assert_shape decorator, except it doesn't have
the arrow '->' special character, as the input tensors are the same as output.
"""
def __init__(self, spec, message=None, visible_layer=False):
"""Creates AssertShape layer.
Args:
spec: Specification for input tensors. See assert_shape decorator for the
full documentation.
message: An optional message to include when assert fails. By default it
includes the filename and line number where this function was called.
visible_layer: If true, print this layer inside the model (default: False)
"""
name = 'AssertShape' if visible_layer else ''
super().__init__(name=name)
spec = spec.replace('...', '.')
for letter in spec:
assert letter in string.ascii_letters + string.digits + '.' + ','
self._specs = spec.split(',')
self._n_in = self._n_out = len(self._specs)
self.defined_shapes = {str(i): i for i in range(10)}
self.linked = False
if message is None:
caller = inspect.getframeinfo(inspect.stack()[1][0])
self.message = f'Defined at {caller.filename}:{caller.lineno}'
else:
self.message = message
def forward(self, xs):
if not self.linked:
for k in list(self.defined_shapes.keys()):
if not k.isdigit():
del self.defined_shapes[k]
if not isinstance(xs, (list, tuple)):
xs = [xs]
# Try-except below checks if something is wrong with shapes. It can happen
# e.g. when using trax2keras. If this is the case we cannot check if shapes
# are correct or not
try:
for x in xs:
for i in range(len(x.shape)):
if x.shape[i] != x.shape[i]:
raise TypeError()
except TypeError:
message = ('AssertShape cannot check shapes. This often happens when'
' using trax2keras. Shape asserts are skipped.')
print(message)
logging.warning(message)
if len(xs) == 1:
return xs[0]
else:
return xs
# helper functions
def assert_true(cond):
if not cond:
shapes = [x.shape for x in xs]
defined_shapes_dict_without_digits = {
k: v for k, v in self.defined_shapes.items() if not k.isdigit()}
raise ValueError(
f'AssertShape Error. Expected {self._specs}, got {shapes} with dict'
f' {defined_shapes_dict_without_digits}. {self.message}')
def assert_equal(a, b):
assert_true(a == b)
return a
def check_shape(shape, spec):
assert_equal(len(shape), len(spec))
for shape_dim, letter in zip(shape, spec):
if letter in self.defined_shapes:
self.defined_shapes[letter] = assert_equal(
self.defined_shapes[letter], shape_dim)
else:
self.defined_shapes[letter] = shape_dim
def check_ellipsys(shape):
if '.' not in self.defined_shapes:
self.defined_shapes['.'] = shape
else:
assert_equal(len(shape), len(self.defined_shapes['.']))
for s1, s2 in zip(shape, self.defined_shapes['.']):
assert_equal(s1, s2)
# actual asserts
assert_equal(len(xs), len(self._specs))
for x, spec in zip(xs, self._specs):
if '.' in spec:
assert_true(len(x.shape) >= (len(spec) - 1))
before, after = spec.split('.')
check_shape(x.shape[:len(before)], before)
if after:
check_shape(x.shape[-len(after):], after)
check_ellipsys(x.shape[len(before):-len(after)])
else:
# if len(after) == 0 then -len(after) in indices evaluates badly.
check_ellipsys(x.shape[len(before):])
else:
check_shape(x.shape, spec)
if len(xs) == 1:
return xs[0]
else:
return xs
def _create_link(self, other):
"""Internal. Used to create a shared dictionary."""
# This works well for assert_shape and AssertFunction; but it can break
# easily if the order of calls to forward() is not known in advance.
self.linked = True
self.defined_shapes = other.defined_shapes
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
tests/awsf3/test_utils.py
|
import os
import pytest
import json
import boto3
from datetime import datetime
from awsf3.utils import (
create_env_def_file,
create_mount_command_list,
create_download_command_list,
create_download_cmd,
add_download_cmd,
determine_key_type,
create_output_files_dict,
read_md5file,
update_postrun_json_init,
update_postrun_json_upload_output,
postrun_json_final,
upload_postrun_json,
upload_to_output_target,
upload_output
)
from awsf3.log import (
parse_commands,
read_logfile_by_line
)
from tibanna.awsem import (
AwsemRunJson,
AwsemRunJsonInput,
AwsemPostRunJsonOutput,
AwsemPostRunJson
)
from tests.awsf3.conftest import upload_test_bucket
def test_create_env_def_file_cwl():
"""testing create_env_def_file with cwl option and an input Env variable"""
envfilename = 'someenvfile'
runjson_dict = {'Job': {'App': {'language': 'cwl_v1',
'cwl_url': 'someurl',
'main_cwl': 'somecwl',
'other_cwl_files': 'othercwl1,othercwl2'},
'Input': {'Env': {'SOME_ENV': '1234'}},
'Output': {'output_bucket_directory': 'somebucket'},
'JOBID': 'somejobid'},
'config': {'log_bucket': 'somebucket'}}
runjson = AwsemRunJson(**runjson_dict)
create_env_def_file(envfilename, runjson, 'cwl')
with open(envfilename, 'r') as f:
envfile_content = f.read()
right_content = ('export LANGUAGE=cwl_v1\n'
'export CWL_URL=someurl\n'
'export MAIN_CWL=somecwl\n'
'export CWL_FILES="othercwl1 othercwl2"\n'
'export SOME_ENV=1234\n'
'export PRESERVED_ENV_OPTION="--preserve-environment SOME_ENV "\n'
'export DOCKER_ENV_OPTION="-e SOME_ENV "\n')
assert envfile_content == right_content
os.remove(envfilename)
def test_create_env_def_file_wdl_v1():
"""testing create_env_def_file with wdl option and no input Env variable"""
envfilename = 'someenvfile'
runjson_dict = {'Job': {'App': {'language': 'wdl_v1',
'wdl_url': 'someurl',
'main_wdl': 'somewdl',
'other_wdl_files': 'otherwdl1,otherwdl2'},
'Input': {'Env': {}},
'Output': {'output_bucket_directory': 'somebucket'},
'JOBID': 'somejobid'},
'config': {'log_bucket': 'somebucket'}}
runjson = AwsemRunJson(**runjson_dict)
create_env_def_file(envfilename, runjson, 'wdl_v1')
with open(envfilename, 'r') as f:
envfile_content = f.read()
right_content = ('export LANGUAGE=wdl_v1\n'
'export WDL_URL=someurl\n'
'export MAIN_WDL=somewdl\n'
'export WDL_FILES="otherwdl1 otherwdl2"\n'
'export PRESERVED_ENV_OPTION=""\n'
'export DOCKER_ENV_OPTION=""\n')
assert envfile_content == right_content
os.remove(envfilename)
def test_create_env_def_file_wdl_draft2():
"""testing create_env_def_file with wdl option and no input Env variable"""
envfilename = 'someenvfile'
runjson_dict = {'Job': {'App': {'language': 'wdl_draft2',
'wdl_url': 'someurl',
'main_wdl': 'somewdl',
'other_wdl_files': 'otherwdl1,otherwdl2'},
'Input': {'Env': {}},
'Output': {'output_bucket_directory': 'somebucket'},
'JOBID': 'somejobid'},
'config': {'log_bucket': 'somebucket'}}
runjson = AwsemRunJson(**runjson_dict)
create_env_def_file(envfilename, runjson, 'wdl_draft2')
with open(envfilename, 'r') as f:
envfile_content = f.read()
right_content = ('export LANGUAGE=wdl_draft2\n'
'export WDL_URL=someurl\n'
'export MAIN_WDL=somewdl\n'
'export WDL_FILES="otherwdl1 otherwdl2"\n'
'export PRESERVED_ENV_OPTION=""\n'
'export DOCKER_ENV_OPTION=""\n')
assert envfile_content == right_content
os.remove(envfilename)
def test_create_env_def_file_wdl():
"""testing create_env_def_file with wdl option and no input Env variable"""
envfilename = 'someenvfile'
runjson_dict = {'Job': {'App': {'language': 'wdl',
'wdl_url': 'someurl',
'main_wdl': 'somewdl',
'other_wdl_files': 'otherwdl1,otherwdl2'},
'Input': {'Env': {}},
'Output': {'output_bucket_directory': 'somebucket'},
'JOBID': 'somejobid'},
'config': {'log_bucket': 'somebucket'}}
runjson = AwsemRunJson(**runjson_dict)
create_env_def_file(envfilename, runjson, 'wdl')
with open(envfilename, 'r') as f:
envfile_content = f.read()
right_content = ('export LANGUAGE=wdl\n'
'export WDL_URL=someurl\n'
'export MAIN_WDL=somewdl\n'
'export WDL_FILES="otherwdl1 otherwdl2"\n'
'export PRESERVED_ENV_OPTION=""\n'
'export DOCKER_ENV_OPTION=""\n')
assert envfile_content == right_content
os.remove(envfilename)
def test_create_env_def_file_shell():
"""testing create_env_def_file with shell option and two input Env variables"""
envfilename = 'someenvfile'
runjson_dict = {'Job': {'App': {'language': 'shell',
'command': 'com1;com2',
'container_image': 'someimage'},
'Input': {'Env': {'ENV1': '1234', 'ENV2': '5678'}},
'Output': {'output_bucket_directory': 'somebucket'},
'JOBID': 'somejobid'},
'config': {'log_bucket': 'somebucket'}}
runjson = AwsemRunJson(**runjson_dict)
create_env_def_file(envfilename, runjson, 'shell')
with open(envfilename, 'r') as f:
envfile_content = f.read()
right_content = ('export LANGUAGE=shell\n'
'export COMMAND="com1;com2"\n'
'export CONTAINER_IMAGE=someimage\n'
'export ENV1=1234\n'
'export ENV2=5678\n'
'export PRESERVED_ENV_OPTION="--preserve-environment ENV1 --preserve-environment ENV2 "\n'
'export DOCKER_ENV_OPTION="-e ENV1 -e ENV2 "\n')
assert envfile_content == right_content
os.remove(envfilename)
def test_create_env_def_file_shell2():
"""testing create_env_def_file with shell option with complex commands and an env variable"""
envfilename = 'someenvfile'
complex_command = 'echo $SOME_ENV | xargs -i echo {} > somedir/somefile'
runjson_dict = {'Job': {'App': {'language': 'shell',
'command': complex_command,
'container_image': 'someimage'},
'Input': {'Env': {'SOME_ENV': '1234'}},
'Output': {'output_bucket_directory': 'somebucket'},
'JOBID': 'somejobid'},
'config': {'log_bucket': 'somebucket'}}
runjson = AwsemRunJson(**runjson_dict)
create_env_def_file(envfilename, runjson, 'shell')
with open(envfilename, 'r') as f:
envfile_content = f.read()
right_content = ('export LANGUAGE=shell\n'
'export COMMAND="echo $SOME_ENV | xargs -i echo {} > somedir/somefile"\n'
'export CONTAINER_IMAGE=someimage\n'
'export SOME_ENV=1234\n'
'export PRESERVED_ENV_OPTION="--preserve-environment SOME_ENV "\n'
'export DOCKER_ENV_OPTION="-e SOME_ENV "\n')
assert envfile_content == right_content
os.remove(envfilename)
def test_create_env_def_file_shell3():
"""testing create_env_def_file with shell option with complex commands and an env variable.
double-quotes are escaped when written to the env file ('"' -> '\"')"""
envfilename = 'someenvfile'
complex_command = 'echo "haha" > somefile; ls -1 [st]*'
runjson_dict = {'Job': {'App': {'language': 'shell',
'command': complex_command,
'container_image': 'someimage'},
'Input': {'Env': {}},
'Output': {'output_bucket_directory': 'somebucket'},
'JOBID': 'somejobid'},
'config': {'log_bucket': 'somebucket'}}
runjson = AwsemRunJson(**runjson_dict)
create_env_def_file(envfilename, runjson, 'shell')
with open(envfilename, 'r') as f:
envfile_content = f.read()
right_content = ('export LANGUAGE=shell\n'
'export COMMAND="echo \\"haha\\" > somefile; ls -1 [st]*"\n'
'export CONTAINER_IMAGE=someimage\n'
'export PRESERVED_ENV_OPTION=""\n'
'export DOCKER_ENV_OPTION=""\n')
assert envfile_content == right_content
os.remove(envfilename)
def test_create_env_def_file_snakemake():
"""testing create_env_def_file with shell option and two input Env variables"""
envfilename = 'someenvfile'
runjson_dict = {'Job': {'App': {'language': 'snakemake',
'command': 'com1;com2',
'container_image': 'someimage',
'snakemake_url': 'someurl',
'main_snakemake': 'somesnakemake',
'other_snakemake_files': 'othersnakemake1,othersnakemake2'},
'JOBID': 'somejobid',
'Input': {},
'Output': {'output_bucket_directory': 'somebucket'},
'JOBID': 'somejobid'},
'config': {'log_bucket': 'somebucket'}}
runjson = AwsemRunJson(**runjson_dict)
create_env_def_file(envfilename, runjson, 'snakemake')
with open(envfilename, 'r') as f:
envfile_content = f.read()
right_content = ('export LANGUAGE=snakemake\n'
'export SNAKEMAKE_URL=someurl\n'
'export MAIN_SNAKEMAKE=somesnakemake\n'
'export SNAKEMAKE_FILES="othersnakemake1 othersnakemake2"\n'
'export COMMAND="com1;com2"\n'
'export CONTAINER_IMAGE=someimage\n'
'export PRESERVED_ENV_OPTION=""\n'
'export DOCKER_ENV_OPTION=""\n')
assert envfile_content == right_content
os.remove(envfilename)
def test_create_mount_command_list():
mountcommand_filename = 'some_mountcommand_filename'
rji_dict = {'arg1': {'path': 'somefile', 'dir': 'somebucket', 'mount': True},
'arg2': {'path': 'somefile2', 'dir': 'somebucket', 'mount': True},
'arg3': {'path': 'whatever', 'dir': 'do_not_mount_this_bucket', 'mount': False},
'arg4': {'path': 'somefile3', 'dir': 'somebucket2', 'mount': True}}
runjson_input = AwsemRunJsonInput(**{'Input_files_data': rji_dict})
create_mount_command_list(mountcommand_filename, runjson_input)
with open(mountcommand_filename, 'r') as f:
mcfile_content = f.read()
right_content = ('mkdir -p /data1/input-mounted-somebucket\n'
'goofys -f somebucket /data1/input-mounted-somebucket &\n'
'mkdir -p /data1/input-mounted-somebucket2\n'
'goofys -f somebucket2 /data1/input-mounted-somebucket2 &\n')
assert mcfile_content == right_content
os.remove(mountcommand_filename)
def test_create_download_command_list_args(mocker):
dl_command_filename = 'some_dlcommand_filename'
rji_dict = {'arg1': {'path': 'somefile', 'dir': 'somebucket', 'mount': False},
'arg2': {'path': 'somefile2.gz', 'dir': 'somebucket', 'mount': False, 'unzip': 'gz'},
'arg3': {'path': 'whatever', 'dir': 'mount_this_bucket', 'mount': True},
'arg4': {'path': 'somefile3', 'dir': 'somebucket2', 'mount': False}}
runjson_input = AwsemRunJsonInput(**{'Input_files_data': rji_dict})
mocker.patch('awsf3.utils.determine_key_type', return_value='File')
create_download_command_list(dl_command_filename, runjson_input)
with open(dl_command_filename, 'r') as f:
dcfile_content = f.read()
right_content = ('aws s3 cp s3://somebucket/somefile /data1/input/somefile; \n'
'aws s3 cp s3://somebucket/somefile2.gz /data1/input/somefile2.gz; '
'gunzip /data1/input/somefile2.gz\n'
'aws s3 cp s3://somebucket2/somefile3 /data1/input/somefile3; \n')
assert dcfile_content == right_content
os.remove(dl_command_filename)
def test_create_download_command_list_args_rename(mocker):
dl_command_filename = 'some_dlcommand_filename'
rji_dict = {'arg1': {'path': 'somefile', 'dir': 'somebucket', 'mount': False, 'rename': 'renamed_file'},
'arg2': {'path': 'somefile2.gz', 'dir': 'somebucket', 'mount': False, 'unzip': 'gz'},
'arg3': {'path': 'whatever', 'dir': 'mount_this_bucket', 'mount': True},
'arg4': {'path': 'somefile3', 'dir': 'somebucket2', 'mount': False, 'rename': 'renamed_file2'}}
runjson_input = AwsemRunJsonInput(**{'Input_files_data': rji_dict})
mocker.patch('awsf3.utils.determine_key_type', return_value='File')
create_download_command_list(dl_command_filename, runjson_input)
with open(dl_command_filename, 'r') as f:
dcfile_content = f.read()
right_content = ('aws s3 cp s3://somebucket/somefile /data1/input/renamed_file; \n'
'aws s3 cp s3://somebucket/somefile2.gz /data1/input/somefile2.gz; '
'gunzip /data1/input/somefile2.gz\n'
'aws s3 cp s3://somebucket2/somefile3 /data1/input/renamed_file2; \n')
assert dcfile_content == right_content
os.remove(dl_command_filename)
def test_create_download_command_list_args_array(mocker):
dl_command_filename = 'some_dlcommand_filename'
rji_dict = {'arg1': {'path': [['somefilea', 'somefileb'], ['somefilec']], 'dir': 'somebucket', 'mount': False,
'rename': [['renameda', 'renamedb'], ['renamedc']]},
'arg2': {'path': [['anotherfilea', 'anotherfileb'], ['anotherfilec']], 'dir': 'somebucket', 'mount': False,
'rename': ''}}
runjson_input = AwsemRunJsonInput(**{'Input_files_data': rji_dict})
mocker.patch('awsf3.utils.determine_key_type', return_value='File')
create_download_command_list(dl_command_filename, runjson_input)
with open(dl_command_filename, 'r') as f:
dcfile_content = f.read()
right_content = ('aws s3 cp s3://somebucket/somefilea /data1/input/renameda; \n'
'aws s3 cp s3://somebucket/somefileb /data1/input/renamedb; \n'
'aws s3 cp s3://somebucket/somefilec /data1/input/renamedc; \n'
'aws s3 cp s3://somebucket/anotherfilea /data1/input/anotherfilea; \n'
'aws s3 cp s3://somebucket/anotherfileb /data1/input/anotherfileb; \n'
'aws s3 cp s3://somebucket/anotherfilec /data1/input/anotherfilec; \n')
assert dcfile_content == right_content
os.remove(dl_command_filename)
def test_create_download_command_list_file_uri(mocker):
dl_command_filename = 'some_dlcommand_filename'
rji_dict = {'file:///data1/input/file1': {'path': 'somefile', 'dir': 'somebucket', 'mount': False},
'file:///data1/input/file2.gz': {'path': 'somefile2.gz', 'dir': 'somebucket', 'mount': False, 'unzip': 'gz'},
'file:///data1/input/haha': {'path': 'whatever', 'dir': 'mount_this_bucket', 'mount': True},
'file:///data1/input/file3': {'path': 'somefile3', 'dir': 'somebucket2', 'mount': False}}
runjson_input = AwsemRunJsonInput(**{'Input_files_data': rji_dict})
mocker.patch('awsf3.utils.determine_key_type', return_value='File')
create_download_command_list(dl_command_filename, runjson_input)
with open(dl_command_filename, 'r') as f:
dcfile_content = f.read()
right_content = ('aws s3 cp s3://somebucket/somefile /data1/input/file1; \n'
'aws s3 cp s3://somebucket/somefile2.gz /data1/input/file2.gz; '
'gunzip /data1/input/file2.gz\n'
'aws s3 cp s3://somebucket2/somefile3 /data1/input/file3; \n')
assert dcfile_content == right_content
os.remove(dl_command_filename)
def test_create_download_cmd_unzip_bz2(mocker):
mocker.patch('awsf3.utils.determine_key_type', return_value='File')
dc_cmd = create_download_cmd('somebucket', 'somefile.bz2', 'sometarget.bz2', '', 'bz2')
assert dc_cmd == 'aws s3 cp s3://somebucket/somefile.bz2 sometarget.bz2; bzip2 -d sometarget.bz2; '
def test_create_download_cmd_unzip_bz2(mocker):
mocker.patch('awsf3.utils.determine_key_type', return_value='File')
dc_cmd = create_download_cmd('somebucket', 'somefile.gz', 'sometarget.gz', '', 'gz')
assert dc_cmd == 'aws s3 cp s3://somebucket/somefile.gz sometarget.gz; gunzip sometarget.gz'
def test_create_download_cmd_nounzip(mocker):
mocker.patch('awsf3.utils.determine_key_type', return_value='File')
dc_cmd = create_download_cmd('somebucket', 'somefile.gz', 'sometarget.gz', '', '')
assert dc_cmd == 'aws s3 cp s3://somebucket/somefile.gz sometarget.gz; '
def test_create_download_cmd_nounzip_profile(mocker):
mocker.patch('awsf3.utils.determine_key_type', return_value='File')
dc_cmd = create_download_cmd('somebucket', 'somefile.gz', 'sometarget.gz', 'user1', '')
assert dc_cmd == 'aws s3 cp s3://somebucket/somefile.gz sometarget.gz --profile user1; '
def test_create_download_cmd_unzip_bz2_dir(mocker):
mocker.patch('awsf3.utils.determine_key_type', return_value='Folder')
dc_cmd = create_download_cmd('somebucket', 'somedir', 'sometarget', '', 'bz2')
assert dc_cmd == 'aws s3 cp s3://somebucket/somedir sometarget; bzip2 -d sometarget.bz2'
right_cmd = ('aws s3 cp --recursive s3://somebucket/somedir sometarget; '
'for f in `find sometarget -type f`; '
'do if [[ $f =~ \\.bz2$ ]]; then bzip2 $f; fi; done;')
assert dc_cmd == right_cmd
def test_create_download_cmd_unzip_bz2_dir(mocker):
mocker.patch('awsf3.utils.determine_key_type', return_value='Folder')
dc_cmd = create_download_cmd('somebucket', 'somedir', 'sometarget', '', 'gz')
right_cmd = ('aws s3 cp --recursive s3://somebucket/somedir sometarget; '
'for f in `find sometarget -type f`; '
'do if [[ $f =~ \\.gz$ ]]; then gunzip $f; fi; done;')
assert dc_cmd == right_cmd
def test_create_download_cmd_nounzip_dir(mocker):
mocker.patch('awsf3.utils.determine_key_type', return_value='Folder')
dc_cmd = create_download_cmd('somebucket', 'somedir', 'sometarget', '', '')
assert dc_cmd == 'aws s3 cp --recursive s3://somebucket/somedir sometarget; '
def test_create_download_cmd_nounzip_profile_dir(mocker):
mocker.patch('awsf3.utils.determine_key_type', return_value='Folder')
dc_cmd = create_download_cmd('somebucket', 'somedir', 'sometarget', 'user1', '')
assert dc_cmd == 'aws s3 cp --recursive s3://somebucket/somedir sometarget --profile user1; '
def test_read_md5file():
test_md5file_name = 'some_test_md5_file'
with open(test_md5file_name, 'w') as fo:
fo.write('62449071d08c9a9dfa0efbaaa82a62f3\tsomefile\n') # could be tab-delimited
fo.write('d41d8cd98f00b204e9800998ecf8427e anotherfile\n') # could be space-delimited
md5dict = read_md5file(test_md5file_name)
assert md5dict == {'somefile': '62449071d08c9a9dfa0efbaaa82a62f3',
'anotherfile': 'd41d8cd98f00b204e9800998ecf8427e'}
os.remove(test_md5file_name)
def test_read_logfile_by_line():
test_logfile_name = 'some_test_log_file'
with open(test_logfile_name, 'w') as fo:
fo.write('1\n2\n3\n')
log_content = read_logfile_by_line(test_logfile_name)
assert next(log_content) == '1\n'
assert next(log_content) == '2\n'
assert next(log_content) == '3\n'
assert next(log_content) is None
os.remove(test_logfile_name)
def test_parse_commands():
def log_gen():
log = ['Status: Downloaded newer image',
'[job clip] /data1/tmpQM7Ol5$ docker \\',
'run \\',
'-i \\',
'duplexa/4dn-repliseq:v13 \\',
'clip \\',
'VFL.fastq.gz',
'Pulled Docker image node:slim',
'f2b6b4884fc8: Pulling fs layer',
'[job align] /data1/tmp2EQtm2$ docker \\',
'run \\',
'-i \\',
'duplexa/4dn-repliseq:v14 \\',
'run-align.sh']
for line in log:
yield line
yield None
log_content = log_gen()
commands = parse_commands(log_content)
assert commands == [['docker', 'run', '-i', 'duplexa/4dn-repliseq:v13', 'clip', 'VFL.fastq.gz'],
['docker', 'run', '-i', 'duplexa/4dn-repliseq:v14', 'run-align.sh']]
def test_create_output_files_dict_cwl():
md5dict = {'path1': '683153f0051fef9e778ce0866cfd97e9', 'path2': 'c14105f8209836cd3b1cc1b63b906fed'}
outmeta = create_output_files_dict('cwl', {'arg1': {'path': 'path1'}, 'arg2': {'path': 'path2'}}, md5dict=md5dict)
assert outmeta == {'arg1': {'path': 'path1', 'md5sum': md5dict['path1']},
'arg2': {'path': 'path2', 'md5sum': md5dict['path2']}}
def test_create_output_files_dict_cwl_secondary_files():
md5dict = {'path1': '683153f0051fef9e778ce0866cfd97e9', 'path2': 'c14105f8209836cd3b1cc1b63b906fed'}
outmeta = create_output_files_dict('cwl', {'arg1': {'path': 'path1', 'secondaryFiles': [{'path': 'path2'}]}}, md5dict=md5dict)
assert outmeta == {'arg1': {'path': 'path1', 'md5sum': md5dict['path1'],
'secondaryFiles': [{'path': 'path2', 'md5sum': md5dict['path2']}]}}
def test_create_output_files_dict_cwl_no_md5():
outmeta = create_output_files_dict('cwl', {'arg1': {'path': 'path1'}, 'arg2': {'path': 'path2'}})
assert outmeta == {'arg1': {'path': 'path1'}, 'arg2': {'path': 'path2'}}
def test_create_output_files_dict_cwl_no_execution_metadata():
with pytest.raises(Exception) as ex:
outmeta = create_output_files_dict('cwl')
assert 'execution_metadata' in str(ex.value)
def test_create_output_files_dict_wdl():
md5dict = {'path1': '683153f0051fef9e778ce0866cfd97e9', 'path2': 'c14105f8209836cd3b1cc1b63b906fed'}
outmeta = create_output_files_dict('wdl', {'outputs': {'arg1': 'path1', 'arg2': 'path2'}}, md5dict=md5dict)
assert outmeta == {'arg1': {'path': 'path1', 'md5sum': md5dict['path1']},
'arg2': {'path': 'path2', 'md5sum': md5dict['path2']}}
def test_create_output_files_dict_wdl_no_md5():
outmeta = create_output_files_dict('wdl', {'outputs': {'arg1': 'path1', 'arg2': 'path2'}})
assert outmeta == {'arg1': {'path': 'path1'}, 'arg2': {'path': 'path2'}}
def test_create_output_files_dict_wdl_no_execution_metadata():
with pytest.raises(Exception) as ex:
outmeta = create_output_files_dict('wdl')
assert 'execution_metadata' in str(ex.value)
def test_create_output_files_dict_snakemake():
outmeta = create_output_files_dict('snakemake')
assert outmeta == {}
def test_create_output_files_dict_shell():
outmeta = create_output_files_dict('shell')
assert outmeta == {}
def test_postrun_json_final():
os.environ['JOB_STATUS'] = '0'
os.environ['INPUTSIZE'] = '34K'
os.environ['TEMPSIZE'] = '56M'
os.environ['OUTPUTSIZE'] = '78K'
prj = AwsemPostRunJson(**{"Job": {"App": {"App_name": "repliseq-parta"}, "JOBID": "alw3r78v3"}}, strict=False)
postrun_json_final(prj)
d_job = prj.Job.as_dict()
for k in ['end_time', 'status', 'instance_id', 'total_input_size',
'total_tmp_size', 'total_output_size', 'App', 'JOBID']:
assert k in d_job
today = datetime.now().strftime('%Y%m%d')
assert d_job['end_time'].startswith(today)
assert len(d_job['end_time'].split('-')) == 3
assert d_job['status'] == '0'
assert d_job['total_input_size'] == '34K'
assert d_job['total_tmp_size'] == '56M'
assert d_job['total_output_size'] == '78K'
def test_upload_to_output_target():
"""testing comprehensively that includes custom target (file://),
cwl with two secondary file, wdl with conditional arg names"""
testfiledir = 'tests/awsf3/test_files/'
localfile1 = testfiledir + 'some_test_file_to_upload'
localfile2 = testfiledir + 'some_test_file_to_upload2'
localfile3 = testfiledir + 'some_test_file_to_upload3.abc'
localfile4 = testfiledir + 'some_test_file_to_upload3.def'
localfile5 = testfiledir + 'some_test_file_to_upload3.ghi'
# prep prjo (postrun_json_output)
output_target = {'file://' + localfile1: 'somekey',
'arg1': 'somekey2',
'arg2': 'somekey3.abc'}
secondary_output_target = {'arg2': ['somekey3.def', 'somekey3.ghi']}
output_files = {'file://' + localfile1: {'path': localfile1},
'arg1b': {'path': localfile2},
'arg2': {'path': localfile3,
'secondaryFiles': [{'path': localfile4},
{'path': localfile5}]}}
alt_cond_output_argnames = {'arg1': ['arg1a', 'arg1b']}
prjo_dict = {'output_target': output_target,
'Output files': output_files,
'secondary_output_target': secondary_output_target,
'alt_cond_output_argnames': alt_cond_output_argnames,
'output_bucket_directory': upload_test_bucket}
prjo = AwsemPostRunJsonOutput(**prjo_dict)
# run function upload_to_output_target
upload_to_output_target(prjo)
# still the directory should be uploaded despite the unzip conflict
s3 = boto3.client('s3')
def test_and_delete_key(key):
res = s3.get_object(Bucket=upload_test_bucket, Key=key)
assert res['Body'].read()
s3.delete_object(Bucket=upload_test_bucket, Key=key)
with pytest.raises(Exception) as ex:
res = s3.get_object(Bucket=upload_test_bucket, Key=key)
assert 'NoSuchKey' in str(ex.value)
test_and_delete_key('somekey2')
test_and_delete_key('somekey3.abc')
test_and_delete_key('somekey3.def')
test_and_delete_key('somekey3.ghi')
test_and_delete_key('somekey')
def test_upload_postrun_json():
prjfile = "tests/awsf3/postrunjson/bqLd8oa7Tdz1.postrun.json"
upload_postrun_json(prjfile)
# get the result to check
s3 = boto3.client('s3')
res = s3.head_object(Bucket="soos-4dn-bucket", Key='tibanna-test/bqLd8oa7Tdz1.postrun.json')
# cleanup
s3.delete_object(Bucket="soos-4dn-bucket", Key='tibanna-test/bqLd8oa7Tdz1.postrun.json')
# check the result
assert 'ServerSideEncryption' not in res
def test_upload_postrun_json_encrypt_s3_upload():
prjfile = "tests/awsf3/postrunjson/bqLd8oa7Tdz2.postrun.json"
upload_postrun_json(prjfile)
# get the result to check
s3 = boto3.client('s3')
res = s3.head_object(Bucket="soos-4dn-bucket", Key='tibanna-test/bqLd8oa7Tdz2.postrun.json')
# cleanup
s3.delete_object(Bucket="soos-4dn-bucket", Key='tibanna-test/bqLd8oa7Tdz2.postrun.json')
# check the result
assert res['ServerSideEncryption'] == 'aws:kms'
def test_update_postrun_json_upload_output():
rjfile = "tests/awsf3/runjson/GBPtlqb2rFGH.run.json"
md5file = "tests/awsf3/runjson/GBPtlqb2rFGH.md5"
execfile = "tests/awsf3/runjson/GBPtlqb2rFGH.cwllog.json"
prjfile = "tests/awsf3/postrunjson/GBPtlqb2rFGH.postrun.json"
update_postrun_json_upload_output(rjfile, execfile, md5file, prjfile, upload=False)
with open(prjfile, 'r') as f:
d = json.load(f)
print(d)
assert 'md5sum' in d['Job']['Output']['Output files']['vcf']
def test_upload_output():
prjfile = "tests/awsf3/postrunjson/testJob-ABC.postrun.json"
with open(prjfile, 'r') as f:
d = json.load(f)
prj = AwsemPostRunJson(**d)
upload_output(prj)
# get the results to check
s3 = boto3.client('s3')
res = s3.head_object(Bucket="soos-4dn-bucket", Key='tibanna-test/some_test_file_to_upload')
assert 'ServerSideEncryption' not in res
res = s3.head_object(Bucket="soos-4dn-bucket", Key='tibanna-test/some_test_file_to_upload2')
assert 'ServerSideEncryption' not in res
res = s3.head_object(Bucket="soos-4dn-bucket", Key='tibanna-test/some_test_file_to_upload3.abc')
assert 'ServerSideEncryption' not in res
res = s3.head_object(Bucket="soos-4dn-bucket", Key='tibanna-test/some_zip_file_to_upload/dir1/file1')
assert 'ServerSideEncryption' not in res
res = s3.head_object(Bucket="soos-4dn-bucket", Key='tibanna-test/some_zip_file_to_upload/file1')
assert 'ServerSideEncryption' not in res
# clean up
s3.delete_object(Bucket="soos-4dn-bucket", Key='tibanna-test/some_test_file_to_upload')
s3.delete_object(Bucket="soos-4dn-bucket", Key='tibanna-test/some_test_file_to_upload2')
s3.delete_object(Bucket="soos-4dn-bucket", Key='tibanna-test/some_test_file_to_upload3.abc')
s3.delete_object(Bucket="soos-4dn-bucket", Key='tibanna-test/some_zip_file_to_upload/dir1/file1')
s3.delete_object(Bucket="soos-4dn-bucket", Key='tibanna-test/some_zip_file_to_upload/file1')
s3.delete_object(Bucket="soos-4dn-bucket", Key='tibanna-test/some_zip_file_to_upload/file2')
def test_upload_output_encrypt_s3_upload():
prjfile = "tests/awsf3/postrunjson/testJob-ABC.postrun.json"
with open(prjfile, 'r') as f:
d = json.load(f)
d['config']['encrypt_s3_upload'] = True
prj = AwsemPostRunJson(**d)
upload_output(prj)
# get the results to check
s3 = boto3.client('s3')
res = s3.head_object(Bucket="soos-4dn-bucket", Key='tibanna-test/some_test_file_to_upload')
assert res['ServerSideEncryption'] == 'aws:kms'
res = s3.head_object(Bucket="soos-4dn-bucket", Key='tibanna-test/some_test_file_to_upload2')
assert res['ServerSideEncryption'] == 'aws:kms'
res = s3.head_object(Bucket="soos-4dn-bucket", Key='tibanna-test/some_test_file_to_upload3.abc')
assert res['ServerSideEncryption'] == 'aws:kms'
res = s3.head_object(Bucket="soos-4dn-bucket", Key='tibanna-test/some_zip_file_to_upload/dir1/file1')
assert res['ServerSideEncryption'] == 'aws:kms'
res = s3.head_object(Bucket="soos-4dn-bucket", Key='tibanna-test/some_zip_file_to_upload/file1')
assert res['ServerSideEncryption'] == 'aws:kms'
# clean up
s3.delete_object(Bucket="soos-4dn-bucket", Key='tibanna-test/some_test_file_to_upload')
s3.delete_object(Bucket="soos-4dn-bucket", Key='tibanna-test/some_test_file_to_upload2')
s3.delete_object(Bucket="soos-4dn-bucket", Key='tibanna-test/some_test_file_to_upload3.abc')
s3.delete_object(Bucket="soos-4dn-bucket", Key='tibanna-test/some_zip_file_to_upload/dir1/file1')
s3.delete_object(Bucket="soos-4dn-bucket", Key='tibanna-test/some_zip_file_to_upload/file1')
s3.delete_object(Bucket="soos-4dn-bucket", Key='tibanna-test/some_zip_file_to_upload/file2')
|
[] |
[] |
[
"JOB_STATUS",
"INPUTSIZE",
"TEMPSIZE",
"OUTPUTSIZE"
] |
[]
|
["JOB_STATUS", "INPUTSIZE", "TEMPSIZE", "OUTPUTSIZE"]
|
python
| 4 | 0 | |
cache.go
|
package makeaddon
import (
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"math/rand"
"os"
"path/filepath"
"runtime"
)
var (
cacheDir = flag.String("cache-dir", "", "Directory to cache VCS checkouts in")
cache *Cache
)
func init() {
cache = NewCache()
}
// Cache provides persistent directories for checking out dependencies.
type Cache struct {
dir string
content map[string]string
}
// NewCache creates a new cache, loading the previously saved index if it exists.
func NewCache() *Cache {
cache := &Cache{
dir: findCacheDir(),
content: map[string]string{},
}
cache.loadIndex()
return cache
}
// Dir provides a directory to use for the given url/tag combo. The second return parameter indicates whether the
// directory was newly created (true), or has previously been cached (false).
func (c *Cache) Dir(url, tag string) (string, bool) {
key := fmt.Sprintf("%s %s", url, tag)
if existing, ok := c.content[key]; ok {
return filepath.Join(c.dir, existing), false
}
for {
dir := randomDirName()
fullDir := filepath.Join(c.dir, dir)
_, err := os.Stat(fullDir)
if os.IsNotExist(err) {
if err := os.MkdirAll(fullDir, os.FileMode(0755)); err != nil {
log.Panicf("Unable to create cache dir: %v", err)
}
c.content[key] = dir
c.saveIndex()
return filepath.Join(c.dir, dir), true
}
}
}
func (c *Cache) loadIndex() {
index := filepath.Join(c.dir, "index.json")
if _, err := os.Stat(index); err == nil {
b, err := ioutil.ReadFile(index)
if err != nil {
log.Printf("Unable to read cache index file: %v", err)
return
}
if err := json.Unmarshal(b, &c.content); err != nil {
log.Printf("Unable to deserialise cache index file: %v", err)
return
}
}
}
func (c *Cache) saveIndex() {
index := filepath.Join(c.dir, "index.json")
b, err := json.Marshal(c.content)
if err != nil {
log.Printf("Unable to serialise cache index file: %v", err)
return
}
if err := ioutil.WriteFile(index, b, os.FileMode(0755)); err != nil {
log.Printf("Unable to write cache index file: %v", err)
}
}
func findCacheDir() string {
if *cacheDir != "" {
return *cacheDir
}
dir := ""
switch runtime.GOOS {
case "windows":
dir = os.Getenv("LocalAppData")
case "darwin":
dir = filepath.Join(os.Getenv("home"), "lib", "cache")
default:
dir = os.Getenv("XDG_CACHE_HOME")
if dir == "" {
dir = filepath.Join(os.Getenv("HOME"), ".cache")
}
}
if dir == "" {
dir = os.TempDir()
}
return filepath.Join(dir, "makeaddon")
}
func randomDirName() string {
const (
chars = "abcdefghijklmnopqrstuvwxyz0123456789"
length = 10
)
b := make([]byte, length)
for i := range b {
b[i] = chars[rand.Intn(len(chars))]
}
return string(b)
}
|
[
"\"LocalAppData\"",
"\"home\"",
"\"XDG_CACHE_HOME\"",
"\"HOME\""
] |
[] |
[
"home",
"LocalAppData",
"HOME",
"XDG_CACHE_HOME"
] |
[]
|
["home", "LocalAppData", "HOME", "XDG_CACHE_HOME"]
|
go
| 4 | 0 | |
go/libkb/home_nix_test.go
|
// Copyright 2015 Keybase, Inc. All rights reserved. Use of
// this source code is governed by the included BSD license.
// +build !windows
package libkb
import (
"os"
"path"
"strings"
"testing"
"github.com/stretchr/testify/require"
)
func TestPosix(t *testing.T) {
hf := NewHomeFinder("tester", nil, nil, nil, "posix", func() RunMode { return ProductionRunMode },
makeLogGetter(t), nil)
d := hf.CacheDir()
if !strings.Contains(d, ".cache/tester") {
t.Errorf("Bad Cache dir: %s", d)
}
d = hf.DataDir()
if !strings.Contains(d, ".local/share/tester") {
t.Errorf("Bad Data dir: %s", d)
}
d = hf.ConfigDir()
if !strings.Contains(d, ".config/tester") {
t.Errorf("Bad Config dir: %s", d)
}
}
func TestDarwinHomeFinder(t *testing.T) {
hf := NewHomeFinder("keybase", nil, nil, nil, "darwin", func() RunMode { return ProductionRunMode }, makeLogGetter(t), nil)
d := hf.ConfigDir()
if !strings.HasSuffix(d, "Library/Application Support/Keybase") {
t.Errorf("Bad config dir: %s", d)
}
d = hf.CacheDir()
if !strings.HasSuffix(d, "Library/Caches/Keybase") {
t.Errorf("Bad cache dir: %s", d)
}
hfInt := NewHomeFinder("keybase", func() string { return "home" }, nil, func() string { return "mobilehome" },
"darwin", func() RunMode { return ProductionRunMode }, makeLogGetter(t), nil)
hfDarwin := hfInt.(Darwin)
hfDarwin.forceIOS = true
hf = hfDarwin
d = hf.ConfigDir()
require.True(t, strings.HasSuffix(d, "Library/Application Support/Keybase"))
require.True(t, strings.HasPrefix(d, "mobilehome"))
d = hf.DataDir()
require.True(t, strings.HasSuffix(d, "Library/Application Support/Keybase"))
require.False(t, strings.HasPrefix(d, "mobilehome"))
require.True(t, strings.HasPrefix(d, "home"))
}
func TestDarwinHomeFinderInDev(t *testing.T) {
devHomeFinder := NewHomeFinder("keybase", nil, nil, nil, "darwin", func() RunMode { return DevelRunMode }, makeLogGetter(t), nil)
configDir := devHomeFinder.ConfigDir()
if !strings.HasSuffix(configDir, "Library/Application Support/KeybaseDevel") {
t.Errorf("Bad config dir: %s", configDir)
}
cacheDir := devHomeFinder.CacheDir()
if !strings.HasSuffix(cacheDir, "Library/Caches/KeybaseDevel") {
t.Errorf("Bad cache dir: %s", cacheDir)
}
}
func TestPosixRuntimeDir(t *testing.T) {
var cmdHome string
env := make(map[string]string)
ge := func(s string) string { return env[s] }
hf := NewHomeFinder("tester", func() string { return cmdHome }, nil, nil, "posix", func() RunMode { return ProductionRunMode }, makeLogGetter(t), ge)
origHomeEnv := os.Getenv("HOME")
// Custom env, custom cmd, XDG set
cmdHome = "/footown"
env["HOME"] = "/yoyo"
env["XDG_RUNTIME_DIR"] = "/barland"
require.Equal(t, "/footown/.config/tester", hf.RuntimeDir(), "expect custom cmd to win")
// Custom env, no cmd, XDG set
cmdHome = ""
env["HOME"] = "/yoyo"
env["XDG_RUNTIME_DIR"] = "/barland"
require.Equal(t, "/yoyo/.config/tester", hf.RuntimeDir(), "expect custom env to win")
// Standard env, no cmd, XDG set
cmdHome = ""
env["HOME"] = origHomeEnv
env["XDG_RUNTIME_DIR"] = "/barland"
require.Equal(t, "/barland/tester", hf.RuntimeDir(), "expect xdg to win")
// Standard env, no cmd, XDG unset
cmdHome = ""
env["HOME"] = origHomeEnv
delete(env, "XDG_RUNTIME_DIR")
require.Equal(t, path.Join(origHomeEnv, ".config", "tester"), hf.RuntimeDir(), "expect home to win")
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
Application/app.go
|
package main
import (
"fmt"
"html/template"
"log"
"net/http"
"os"
"github.com/Microsoft/ApplicationInsights-Go/appinsights"
)
type PageVars struct {
Message string
Language string
}
func main() {
client := appinsights.NewTelemetryClient(os.Getenv("APPINSIGHTS_INSTRUMENTATIONKEY"))
request := appinsights.NewRequestTelemetry("GET", "https://myapp.azurewebsites.net/", 1 , "Success")
client.Track(request)
http.Handle("/css/", http.StripPrefix("/css/", http.FileServer(http.Dir("css"))))
http.Handle("/img/", http.StripPrefix("/img/", http.FileServer(http.Dir("img"))))
http.Handle("/fonts/", http.StripPrefix("/fonts/", http.FileServer(http.Dir("fonts"))))
http.HandleFunc("/", Home)
log.Fatal(http.ListenAndServe(getPort(), nil))
}
func getPort() string {
p := os.Getenv("HTTP_PLATFORM_PORT")
if p != "" {
return ":" + p
}
return ":80"
}
func render(w http.ResponseWriter, tmpl string, pageVars PageVars) {
tmpl = fmt.Sprintf("views/%s", tmpl)
t, err := template.ParseFiles(tmpl)
if err != nil { // if there is an error
log.Print("template parsing error: ", err) // log it
}
err = t.Execute(w, pageVars) //execute the template and pass in the variables to fill the gaps
if err != nil { // if there is an error
log.Print("template executing error: ", err) //log it
}
}
func Home(w http.ResponseWriter, req *http.Request) {
pageVars := PageVars{
Message: "Success!",
Language: "Go Lang",
}
render(w, "index.html", pageVars)
}
|
[
"\"APPINSIGHTS_INSTRUMENTATIONKEY\"",
"\"HTTP_PLATFORM_PORT\""
] |
[] |
[
"APPINSIGHTS_INSTRUMENTATIONKEY",
"HTTP_PLATFORM_PORT"
] |
[]
|
["APPINSIGHTS_INSTRUMENTATIONKEY", "HTTP_PLATFORM_PORT"]
|
go
| 2 | 0 | |
python/elm_license_finder/main.py
|
import os
import sys
import re
import json
import platform
try:
from pathlib import Path
from colorclass import Color
from terminaltables import SingleTable
import semver
except ImportError:
print("ERROR: Need to install required modules.")
print("python3 -m pip install colorclass terminaltables semver")
sys.exit(1)
VERSION_REGEX = re.compile(r'^(\d+)\.(\d+)\.(\d+)$')
VERSION_REGEX_NOCAP = r'\d+\.\d+\.\d+'
COMPARATOR_REGEX = r'(?:<|>)=?'
RANGE = f'({VERSION_REGEX_NOCAP})\\s+' + \
f'({COMPARATOR_REGEX})\\s+' + \
'v\\s+' + \
f'({COMPARATOR_REGEX})\\s+' + \
f'({VERSION_REGEX_NOCAP})'
VRANGE_REGEX = re.compile(RANGE)
CMP = {
"<": [-1],
"<=": [-1, 0],
">": [1],
">=": [0, 1]
}
# will handle all dependencies
PROJECT_DEPS = {}
def get_versions_from_home(elm_home):
dirs = filter(
lambda d: os.path.isdir(os.path.join(elm_home, d)),
[dir for dir in os.listdir(elm_home)]
)
return [v for v in dirs if re.match(VERSION_REGEX_NOCAP, v)]
def version_in_range(low, low_op, version, high_op, high):
compare_low = semver.compare(low, version) in CMP[low_op]
compare_high = semver.compare(version, high) in CMP[high_op]
return compare_low and compare_high
def get_highest_version_from_dir(dir, cmp_version):
low, low_op, high_op, high = VRANGE_REGEX.findall(cmp_version)[0]
all_versions = [v for v in get_versions_from_home(dir)]
return max(list(filter(
lambda v: version_in_range(low, low_op, v, high_op, high),
all_versions
)))
def add_dep_to_dict(pkg_home, who, what, pkg, version, type):
with open(
os.path.join(pkg_home, who, what, version, "elm.json"), "r"
) as dep_file:
license = json.load(dep_file)["license"]
PROJECT_DEPS[pkg] = {
"version": version,
"license": license,
"type": type
}
def get_project_dependencies(json_directory):
json_path = os.path.join(
json_directory if json_directory else os.getcwd(),
"elm.json"
)
if platform.system() == "Windows":
ELM_HOME = os.path.join(str(Path.home()), "AppData", "Roaming", "elm")
else:
ELM_HOME = os.path.join(str(Path.home()), ".elm")
ELM_HOME = os.getenv("ELM_HOME", ELM_HOME)
with open(json_path, "r") as elm_file:
json_data = json.load(elm_file)
dependencies = json_data["dependencies"]
type = json_data["type"]
elm_version = json_data["elm-version"]
if type == "package":
elm_version = get_highest_version_from_dir(ELM_HOME, elm_version)
package_home = os.path.join(ELM_HOME, elm_version, "packages")
if not os.path.exists(package_home):
print(f"I'm unable to find your package home: {package_home}")
raise
if type == "application":
for type in ["direct", "indirect"]:
deps = dependencies[type]
for pkg, ver in deps.items():
who, what = pkg.split("/")
add_dep_to_dict(package_home, who, what, pkg, ver)
elif type == "package":
for pkg, ver in dependencies.items():
who, what = pkg.split("/")
high_ver = get_highest_version_from_dir(
os.path.join(package_home, who, what),
ver
)
add_dep_to_dict(package_home, who, what, pkg, high_ver, "direct")
else:
print(f"""Unknown Elm project type of {type}.
Expected your elm.json to have either:
\"type\": \"application\"
or
\"type\": \"package\"
""")
raise
return PROJECT_DEPS
def output_tables(deps):
# Build Table Headers
lsc_count_data = [
[Color("{red}License{/red}"), Color("{red}Count{/red}")]
]
large_table_data = [
[
Color("{red}Package{/red}"),
Color("{red}Version{/red}"),
Color("{red}License{/red}"),
Color("{red}Type{/red}")
]
]
# Build table bodies
packages = list(deps.keys())
lsc_count_data = {"total": 0, "direct": 0, "indirect": 0}
for pkg in packages:
pkg_data = deps[pkg]
license = pkg_data["license"]
if license not in lsc_count_data.keys():
lsc_count_data[license] = 0
lsc_count_data[license] += 1
lsc_count_data["total"] += 1
lsc_count_data[pkg_data["type"]] += 1
large_table_data.append(
[pkg, pkg_data["version"], license, pkg_data["type"]]
)
for l, c in lsc_count_data.items():
if l not in ["total", "direct", "indirect"]:
lsc_count_data.append([l, str(c)])
# Format Tables
lsc_table = SingleTable(lsc_count_data)
lsc_table.inner_row_border = True
lsc_table.justify_columns = {0: 'center', 1: 'center'}
print("Dependencies:")
print(f"Total: {lsc_count_data['total']}")
print(f"Direct: {lsc_count_data['direct']}")
print(f"Indirect: {lsc_count_data['indirect']}")
print(lsc_table.table)
large_table = SingleTable(large_table_data)
large_table.inner_row_border = True
large_table.justify_columns = {
0: 'center',
1: 'center',
2: 'center',
3: 'center'
}
print(large_table.table)
|
[] |
[] |
[
"ELM_HOME"
] |
[]
|
["ELM_HOME"]
|
python
| 1 | 0 | |
bin/duplicates.py
|
#!/usr/bin/env python
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import with_statement
import argparse
import logging
import os
# import sys
# import platform
from imagededup.methods import PHash
from imagededup.methods import AHash
from imagededup.methods import DHash
from imagededup.methods import WHash
from imagededup.methods import CNN
__header__ = """
-`
... .o+`
.+++s+ .h`. `ooo/
`+++%++ .h+++ `+oooo:
+++o+++ .hhs++. `+oooooo:
+s%%so%.hohhoo' 'oooooo+:
`+ooohs+h+sh++`/: ++oooo+:
hh+o+hoso+h+`/++++.+++++++:
`+h+++h.+ `/++++++++++++++:
`/+++ooooooooooooo/`
./ooosssso++osssssso+`
.oossssso-````/osssss::`
-osssssso. :ssss``to.
:osssssss/ Mike osssl +
/ossssssss/ 8a +sssslb
`/ossssso+/:- -:/+ossss'.-
`+sso+:-` `.-/+oso:
`++:. `-/+/
.` `/
"""
_version = "0.1.0"
_author = "Mike"
_mail = "[email protected]"
def _parseArgs():
"""Parse CLI arguments
:returns: argparse.ArgumentParser class instance
"""
parser = argparse.ArgumentParser()
home = os.environ["HOME"]
parser.add_argument(
"--version",
dest="show_version",
action="store_true",
help="print script version and exit",
)
parser.add_argument(
"-l",
"--logging",
dest="logging",
default="INFO",
type=str,
help="Enable debug messages",
)
parser.add_argument(
"-o",
"--output",
dest="output",
default="duplicates.json",
type=str,
help="Output file",
)
parser.add_argument(
"-d",
"--image_dir",
dest="dir",
default=os.path.join(home, "Pictures"),
type=str,
help="Directory to inspect",
)
parser.add_argument(
"-t",
"--threshold",
dest="threshold",
default=12,
type=int,
help="Image duplicate threshold",
)
parser.add_argument("-s", "--score", dest="score", action="store_true", help="Enable scores")
parser.add_argument(
"-m",
"--method",
dest="method",
choices=["phash", "ahash", "dhash", "whash", "cnn"],
default="phash",
help="Hash method",
)
return parser.parse_args()
def main():
"""Main function
:returns: TODO
"""
args = _parseArgs()
if args.show_version:
print(_version)
return 0
if args.logging:
try:
level = int(args.logging)
except Exception:
if args.logging.lower() == "debug":
level = logging.DEBUG
elif args.logging.lower() == "info":
level = logging.INFO
elif args.logging.lower() == "warn" or args.logging.lower() == "warning":
level = logging.WARN
elif args.logging.lower() == "error":
level = logging.ERROR
elif args.logging.lower() == "critical":
level = logging.CRITICAL
else:
level = 0
logging.basicConfig(level=level, format="[%(levelname)s] - %(message)s")
outfile = args.output
image_dir = args.dir
threshold = args.threshold
score = args.score
logging.debug(f"Looking duplicates in {image_dir}")
logging.debug(f"Using threshold {threshold}")
logging.debug(f"Output file {outfile}")
methods = {
"phash": PHash,
"ahash": AHash,
"dhash": DHash,
"whash": WHash,
"cnn": CNN,
}
logging.debug(f"Using Hash method {args.method}")
hasher = methods[args.method.lower()]()
hasher.find_duplicates(
image_dir=image_dir,
max_distance_threshold=threshold,
scores=score,
outfile=outfile,
)
logging.info("Finished")
return 0
if __name__ == "__main__":
main()
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
Cloud-Scale_Text_Classification_with_CNNs_on_Azure/data/download_amazon_review_polarity.py
|
# Download dataset
import sys
import os
current_path = os.path.dirname(os.path.abspath(__file__))
root_path = current_path.rsplit('solutions')[0]
sys.path.insert(0,root_path)
from solutions.utils.python_utils import download_file
url = 'https://mxnetstorage.blob.core.windows.net/public/nlp/amazon_review_polarity_csv.tar.gz'
print("Downloading file %s" % url)
download_file(url)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
readchat.py
|
try:
import time, os, requests, re
from discord_webhook import DiscordWebhook, DiscordEmbed
except:
import pip
pip.main(['install','requests'])
pip.main(['install', 're'])
pip.main(['install', 'discord_webhook'])
import time, os, requests, re
from discord_webhook import DiscordWebhook, DiscordEmbed
################################################################ !variables!
# the url of your webhook
url = 'your webhook'
# text/author when its missing
missing = "none"
# standard author (when the messages doesn't come from a player)
standardAuthor = "System"
############################################################################
# Base from Stack Overflow
# https://stackoverflow.com/questions/13655083/automatically-read-chat-text-from-minecraft
def follow(thefile):
thefile.seek(0,2)
while True:
line = thefile.readline()
if not line:
time.sleep(0.1)
continue
yield line
if __name__ == "__main__":
logfile = open(os.getenv("APPDATA")+"/.minecraft/logs/latest.log", "r")
loglines = follow(logfile)
for line in loglines:
print(line)
if "[Client thread/INFO]: [CHAT]" in line:
author = missing
text = missing
raw = line.split("[Client thread/INFO]: [CHAT] ")
try:
message = raw[1].split(":",1)
author = message[0]
text = message[1]
x = re.findall("§.", author)
y = re.findall("§.", text)
for i in range(len(x)):
author = author.replace(x[i],"")
for i in range(len(y)):
text = text.replace(y[i],"")
except:
author = standardAuthor
text = raw[1]
y = re.findall("§.", text)
for i in range(len(y)):
text = text.replace(y[i],"")
webhook = DiscordWebhook(url=url, rate_limit_retry=True)
embed = DiscordEmbed(title=author, description=text, color='03b2f8')
webhook.add_embed(embed)
response = webhook.execute()
if response.status_code != 200:
print("Warning Webhook wasn't sent")
|
[] |
[] |
[
"APPDATA"
] |
[]
|
["APPDATA"]
|
python
| 1 | 0 | |
artifacts/term_python_docker_challenge/i_escaped.py
|
# uncompyle6 version 2.11.5
# Python bytecode 3.5 (3351)
# Decompiled from: Python 3.6.7 (default, Oct 22 2018, 11:32:17)
# [GCC 8.2.0]
# Embedded file name: i_escaped.py
# Compiled at: 2017-11-16 12:09:38
# Size of source mod 2**32: 1323 bytes
import json
import sys
import os
import time
import signal
from hashlib import sha256
import hmac
def calcHmac(secret, resourceId):
return hmac.new(secret.encode('utf8'), resourceId.encode('utf8'), sha256).hexdigest()
def printResponse(hash, resourceId):
print('#####hhc:%s#####' % json.dumps({'hash': hash,
'resourceId': resourceId}))
def signal_handler(signal, frame):
print('')
sys.exit(0)
def errorandexit(msg2):
error = "\nI'm very sorry, but we seem to have an internal issue preventing the successful\ncompletion of this challenge. Please email [email protected] with\na screen-shot or any other details you can provide. Thank you!\n\n"
print(error)
if msg2 != '':
print(msg2)
sys.exit(-1)
if __name__ == '__main__':
debuggin = False
r = None
signal.signal(signal.SIGINT, signal_handler)
try:
RESOURCEID = os.environ.get('RESOURCE_ID')
if RESOURCEID == '' or RESOURCEID == None:
errorandexit('Unable to obtain resource ID information.')
if debuggin:
print('\nRESOURCEID = ' + RESOURCEID)
key = '09f90c21d59845a7b0c972b8e871e8fe'
h = hmac.new(key.encode('utf8'), RESOURCEID.encode('utf8'), sha256)
payload = {'hash': h.hexdigest(),'resourceid': RESOURCEID}
sys.stdout.write('Loading, please wait.')
sys.stdout.flush()
for i in range(0, 5):
if not debuggin:
time.sleep(1)
sys.stdout.write('.')
sys.stdout.flush()
print('\n')
if 1 == 1:
hmac256 = calcHmac(key, RESOURCEID)
printResponse(hmac256, RESOURCEID)
time.sleep(0.5)
print(" \x1b[32m\n ____ _ _ \n | _ \\ _ _| |_| |__ ___ _ __ \n | |_) | | | | __| '_ \\ / _ \\| '_ \\ \n | __/| |_| | |_| | | | (_) | | | | \n |_|___ \\__, |\\__|_| |_|\\___/|_| |_| _ _ \n | ____||___/___ __ _ _ __ ___ __| | |\n | _| / __|/ __/ _` | '_ \\ / _ \\/ _` | |\n | |___\\__ \\ (_| (_| | |_) | __/ (_| |_|\n |_____|___/\\___\\__,_| .__/ \\___|\\__,_(_)\n |_| \n\n\x1b[91m\nThat's some fancy Python hacking -\nYou have sent that lizard packing!\n\x1b[92m\n-SugarPlum Mary\n \nYou escaped! Congratulations!\n")
else:
print("Sorry, I don't think that is correct answer.")
except Exception as e:
errorandexit(str(e))
sys.exit(0)
# okay decompiling i_escaped.extracted.pyc
|
[] |
[] |
[
"RESOURCE_ID"
] |
[]
|
["RESOURCE_ID"]
|
python
| 1 | 0 | |
src/cmd/link/dwarf_test.go
|
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"cmd/internal/objfile"
"debug/dwarf"
"internal/testenv"
"io"
"io/ioutil"
"os"
"os/exec"
"path"
"path/filepath"
"runtime"
"strings"
"testing"
)
func testDWARF(t *testing.T, buildmode string, expectDWARF bool, env ...string) {
testenv.MustHaveCGO(t)
testenv.MustHaveGoBuild(t)
if runtime.GOOS == "plan9" {
t.Skip("skipping on plan9; no DWARF symbol table in executables")
}
out, err := exec.Command(testenv.GoToolPath(t), "list", "-f", "{{.Stale}}", "cmd/link").CombinedOutput()
if err != nil {
t.Fatalf("go list: %v\n%s", err, out)
}
if string(out) != "false\n" {
if os.Getenv("GOROOT_FINAL_OLD") != "" {
t.Skip("cmd/link is stale, but $GOROOT_FINAL_OLD is set")
}
t.Fatalf("cmd/link is stale - run go install cmd/link")
}
tmpDir, err := ioutil.TempDir("", "go-link-TestDWARF")
if err != nil {
t.Fatal("TempDir failed: ", err)
}
defer os.RemoveAll(tmpDir)
for _, prog := range []string{"testprog", "testprogcgo"} {
t.Run(prog, func(t *testing.T) {
exe := filepath.Join(tmpDir, prog+".exe")
dir := "../../runtime/testdata/" + prog
cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", exe)
if buildmode != "" {
cmd.Args = append(cmd.Args, "-buildmode", buildmode)
}
cmd.Args = append(cmd.Args, dir)
if env != nil {
cmd.Env = append(os.Environ(), env...)
}
out, err := cmd.CombinedOutput()
if err != nil {
t.Fatalf("go build -o %v %v: %v\n%s", exe, dir, err, out)
}
if buildmode == "c-archive" {
// Extract the archive and use the go.o object within.
cmd := exec.Command("ar", "-x", exe)
cmd.Dir = tmpDir
if out, err := cmd.CombinedOutput(); err != nil {
t.Fatalf("ar -x %s: %v\n%s", exe, err, out)
}
exe = filepath.Join(tmpDir, "go.o")
}
f, err := objfile.Open(exe)
if err != nil {
t.Fatal(err)
}
defer f.Close()
syms, err := f.Symbols()
if err != nil {
t.Fatal(err)
}
var addr uint64
for _, sym := range syms {
if sym.Name == "main.main" {
addr = sym.Addr
break
}
}
if addr == 0 {
t.Fatal("cannot find main.main in symbols")
}
d, err := f.DWARF()
if err != nil {
if expectDWARF {
t.Fatal(err)
}
return
} else {
if !expectDWARF {
t.Fatal("unexpected DWARF section")
}
}
// TODO: We'd like to use filepath.Join here.
// Also related: golang.org/issue/19784.
wantFile := path.Join(prog, "main.go")
wantLine := 24
r := d.Reader()
var line dwarf.LineEntry
for {
cu, err := r.Next()
if err != nil {
t.Fatal(err)
}
if cu == nil {
break
}
if cu.Tag != dwarf.TagCompileUnit {
r.SkipChildren()
continue
}
lr, err := d.LineReader(cu)
if err != nil {
t.Fatal(err)
}
for {
err := lr.Next(&line)
if err == io.EOF {
break
}
if err != nil {
t.Fatal(err)
}
if line.Address == addr {
if !strings.HasSuffix(line.File.Name, wantFile) || line.Line != wantLine {
t.Errorf("%#x is %s:%d, want %s:%d", addr, line.File.Name, line.Line, filepath.Join("...", wantFile), wantLine)
}
return
}
}
}
t.Fatalf("did not find file:line for %#x (main.main)", addr)
})
}
}
func TestDWARF(t *testing.T) {
testDWARF(t, "", true)
}
func TestDWARFiOS(t *testing.T) {
// Normally we run TestDWARF on native platform. But on iOS we don't have
// go build, so we do this test with a cross build.
// Only run this on darwin/amd64, where we can cross build for iOS.
if testing.Short() {
t.Skip("skipping in short mode")
}
if runtime.GOARCH != "amd64" || runtime.GOOS != "darwin" {
t.Skip("skipping on non-darwin/amd64 platform")
}
if err := exec.Command("xcrun", "--help").Run(); err != nil {
t.Skipf("error running xcrun, required for iOS cross build: %v", err)
}
cc := "CC=" + runtime.GOROOT() + "/misc/ios/clangwrap.sh"
// iOS doesn't allow unmapped segments, so iOS executables don't have DWARF.
testDWARF(t, "", false, cc, "CGO_ENABLED=1", "GOOS=darwin", "GOARCH=arm", "GOARM=7")
testDWARF(t, "", false, cc, "CGO_ENABLED=1", "GOOS=darwin", "GOARCH=arm64")
// However, c-archive iOS objects have embedded DWARF.
testDWARF(t, "c-archive", true, cc, "CGO_ENABLED=1", "GOOS=darwin", "GOARCH=arm", "GOARM=7")
testDWARF(t, "c-archive", true, cc, "CGO_ENABLED=1", "GOOS=darwin", "GOARCH=arm64")
}
|
[
"\"GOROOT_FINAL_OLD\""
] |
[] |
[
"GOROOT_FINAL_OLD"
] |
[]
|
["GOROOT_FINAL_OLD"]
|
go
| 1 | 0 | |
settings.py
|
# -*- coding: utf-8 -*-
"""
请不要修改该文件
如果你需要对settings里的内容做修改,config/default.py 文件中 添加即可
如有任何疑问,请联系 【蓝鲸助手】
"""
import os
run_env = ""
# V3判断环境的环境变量为BKPAAS_ENVIRONMENT
if 'BKPAAS_ENVIRONMENT' in os.environ:
ENVIRONMENT = os.getenv('BKPAAS_ENVIRONMENT', 'dev')
run_env = "dev"
# V2判断环境的环境变量为BK_ENV
else:
PAAS_V2_ENVIRONMENT = os.environ.get('BK_ENV', 'development')
ENVIRONMENT = {
'development': 'dev',
'testing': 'stag',
'production': 'prod',
}.get(PAAS_V2_ENVIRONMENT)
run_env = ENVIRONMENT
DJANGO_CONF_MODULE = 'config.{env}'.format(env=ENVIRONMENT)
try:
_module = __import__(DJANGO_CONF_MODULE, globals(), locals(), ['*'])
except ImportError as e:
raise ImportError("Could not import config '%s' (Is it on sys.path?): %s"
% (DJANGO_CONF_MODULE, e))
for _setting in dir(_module):
if _setting == _setting.upper():
locals()[_setting] = getattr(_module, _setting)
|
[] |
[] |
[
"BKPAAS_ENVIRONMENT",
"BK_ENV"
] |
[]
|
["BKPAAS_ENVIRONMENT", "BK_ENV"]
|
python
| 2 | 0 | |
kubetest/kubeadmdind/kubeadm_dind.go
|
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package kubeadmdind implements a kubetest deployer based on the scripts
// in the github.com/kubernetes-sigs/kubeadm-dind-cluster repo.
// This deployer can be used to create a multinode, containerized Kubernetes
// cluster that runs inside a Prow DinD container.
package kubeadmdind
import (
"bytes"
"errors"
"flag"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"k8s.io/test-infra/kubetest/process"
)
var (
// Names that are fixed in the Kubeadm DinD scripts
kubeMasterPrefix = "kube-master"
kubeNodePrefix = "kube-node"
// Systemd service logs to collect on the host container
hostServices = []string{
"docker",
}
// Docker commands to run on the host container and embedded node
// containers for log dump
dockerCommands = []struct {
cmd string
logFile string
}{
{"docker images", "docker_images.log"},
{"docker ps -a", "docker_ps.log"},
}
// Systemd service logs to collect on the master and worker embedded
// node containers for log dump
systemdServices = []string{
"kubelet",
"docker",
}
masterKubePods = []string{
"kube-apiserver",
"kube-scheduler",
"kube-controller-manager",
"kube-proxy",
"etcd",
}
nodeKubePods = []string{
"kube-proxy",
"kube-dns",
}
// Where to look for (nested) container log files on the node containers
nodeLogDir = "/var/log"
// Relative path to Kubernetes source tree
kubeOrg = "k8s.io"
kubeRepo = "kubernetes"
// Kubeadm-DinD-Cluster (kdc) repo and main script
kdcOrg = "github.com/kubernetes-sigs"
kdcRepo = "kubeadm-dind-cluster"
kdcScript = "fixed/dind-cluster-stable.sh"
// Number of worker nodes to create for testing
numWorkerNodes = "2"
// Kubeadm-DinD specific flags
kubeadmDinDIPMode = flag.String("kubeadm-dind-ip-mode", "ipv4", "(Kubeadm-DinD only) IP Mode. Can be 'ipv4' (default), 'ipv6', or 'dual-stack'.")
kubeadmDinDK8sTarFile = flag.String("kubeadm-dind-k8s-tar-file", "", "(Kubeadm-DinD only) Location of tar file containing Kubernetes server binaries.")
k8sExtractSubDir = "kubernetes/server/bin"
k8sTestBinSubDir = "platforms/linux/amd64"
testBinDir = "/usr/bin"
ipv6EnableCmd = "sysctl -w net.ipv6.conf.all.disable_ipv6=0"
)
// Deployer is used to implement a kubetest deployer interface
type Deployer struct {
ipMode string
k8sTarFile string
hostCmder execCmder
control *process.Control
}
// NewDeployer returns a new Kubeadm-DinD Deployer
func NewDeployer(control *process.Control) (*Deployer, error) {
d := &Deployer{
ipMode: *kubeadmDinDIPMode,
k8sTarFile: *kubeadmDinDK8sTarFile,
hostCmder: new(hostCmder),
control: control,
}
switch d.ipMode {
case "ipv4":
// Valid value
case "ipv6", "dual-stack":
log.Printf("Enabling IPv6")
if err := d.run(ipv6EnableCmd); err != nil {
return nil, err
}
default:
return nil, fmt.Errorf("configured --ip-mode=%s is not supported for --deployment=kubeadmdind", d.ipMode)
}
return d, nil
}
// execCmd executes a command on the host container.
func (d *Deployer) execCmd(cmd string) *exec.Cmd {
return d.hostCmder.execCmd(cmd)
}
// run runs a command on the host container, and prints any errors.
func (d *Deployer) run(cmd string) error {
err := d.control.FinishRunning(d.execCmd(cmd))
if err != nil {
fmt.Printf("Error: '%v'", err)
}
return err
}
// getOutput runs a command on the host container, prints any errors,
// and returns command output.
func (d *Deployer) getOutput(cmd string) ([]byte, error) {
execCmd := d.execCmd(cmd)
o, err := d.control.Output(execCmd)
if err != nil {
log.Printf("Error: '%v'", err)
return nil, err
}
return o, nil
}
// outputWithStderr runs a command on the host container and returns
// combined stdout and stderr.
func (d *Deployer) outputWithStderr(cmd *exec.Cmd) ([]byte, error) {
var stdOutErr bytes.Buffer
cmd.Stdout = &stdOutErr
cmd.Stderr = &stdOutErr
err := d.control.FinishRunning(cmd)
return stdOutErr.Bytes(), err
}
// Up brings up a multinode, containerized Kubernetes cluster inside a
// Prow DinD container.
func (d *Deployer) Up() error {
var binDir string
if d.k8sTarFile != "" {
// Extract Kubernetes server binaries
cmd := fmt.Sprintf("tar -xvf %s", *kubeadmDinDK8sTarFile)
if err := d.run(cmd); err != nil {
return err
}
// Derive the location of the extracted binaries
cwd, err := os.Getwd()
if err != nil {
return err
}
binDir = filepath.Join(cwd, k8sExtractSubDir)
} else {
// K-D-C scripts must be run from Kubernetes source tree for
// building binaries.
kubeDir, err := findPath(kubeOrg, kubeRepo, "")
if err == nil {
err = os.Chdir(kubeDir)
}
if err != nil {
return err
}
}
d.setEnv(binDir)
// Bring up a cluster inside the host Prow container
script, err := findPath(kdcOrg, kdcRepo, kdcScript)
if err != nil {
return err
}
return d.run(script + " up")
}
// setEnv sets environment variables for building and testing
// a cluster.
func (d *Deployer) setEnv(k8sBinDir string) error {
var doBuild string
switch {
case k8sBinDir == "":
doBuild = "y"
default:
doBuild = "n"
}
// Set KUBERNETES_CONFORMANCE_TEST so that the master IP address
// is derived from kube config rather than through gcloud.
envMap := map[string]string{
"NUM_NODES": numWorkerNodes,
"DIND_K8S_BIN_DIR": k8sBinDir,
"BUILD_KUBEADM": doBuild,
"BUILD_HYPERKUBE": doBuild,
"IP_MODE": d.ipMode,
"KUBERNETES_CONFORMANCE_TEST": "y",
"NAT64_V4_SUBNET_PREFIX": "172.20",
}
for env, val := range envMap {
if err := os.Setenv(env, val); err != nil {
return err
}
}
return nil
}
// IsUp determines if a cluster is up based on whether one or more nodes
// is ready.
func (d *Deployer) IsUp() error {
n, err := d.clusterSize()
if err != nil {
return err
}
if n <= 0 {
return fmt.Errorf("cluster found, but %d nodes reported", n)
}
return nil
}
// DumpClusterLogs copies dumps docker state and service logs for:
// - Host Prow container
// - Kube master node container(s)
// - Kube worker node containers
// to a local artifacts directory.
func (d *Deployer) DumpClusterLogs(localPath, gcsPath string) error {
// Save logs from the host container
if err := d.saveHostLogs(localPath); err != nil {
return err
}
// Save logs from master node container(s)
if err := d.saveMasterNodeLogs(localPath); err != nil {
return err
}
// Save logs from worker node containers
return d.saveWorkerNodeLogs(localPath)
}
// TestSetup builds end-to-end test and ginkgo binaries.
func (d *Deployer) TestSetup() error {
if d.k8sTarFile == "" {
// Build e2e.test and ginkgo binaries
if err := d.run("make WHAT=test/e2e/e2e.test"); err != nil {
return err
}
return d.run("make WHAT=vendor/github.com/onsi/ginkgo/ginkgo")
}
// Copy downloaded e2e.test and ginkgo binaries
for _, file := range []string{"e2e.test", "ginkgo"} {
srcPath := filepath.Join(k8sTestBinSubDir, file)
cmd := fmt.Sprintf("cp %s %s", srcPath, testBinDir)
if err := d.run(cmd); err != nil {
return err
}
}
return nil
}
// Down brings the DinD-based cluster down and cleans up any DinD state
func (d *Deployer) Down() error {
// Bring the cluster down and clean up kubeadm-dind-cluster state
script, err := findPath(kdcOrg, kdcRepo, kdcScript)
if err != nil {
return err
}
clusterDownCommands := []string{
script + " down",
script + " clean",
}
for _, cmd := range clusterDownCommands {
if err := d.run(cmd); err != nil {
return err
}
}
return nil
}
// GetClusterCreated is not yet implemented.
func (d *Deployer) GetClusterCreated(gcpProject string) (time.Time, error) {
return time.Time{}, errors.New("not implemented")
}
// findPath looks for the existence of a file or directory based on a
// a github organization, github repo, and a relative path. It looks
// for the file/directory in this order:
// - $WORKSPACE/<gitOrg>/<gitRepo>/<gitFile>
// - $GOPATH/src/<gitOrg>/<gitRepo>/<gitFile>
// - ./<gitRepo>/<gitFile>
// - ./<gitFile>
// - ../<gitFile>
// and returns the path for the first match or returns an error.
func findPath(gitOrg, gitRepo, gitFile string) (string, error) {
workPath := os.Getenv("WORKSPACE")
if workPath != "" {
workPath = filepath.Join(workPath, gitOrg, gitRepo, gitFile)
}
goPath := os.Getenv("GOPATH")
if goPath != "" {
goPath = filepath.Join(goPath, "src", gitOrg, gitRepo, gitFile)
}
relPath := filepath.Join(gitRepo, gitFile)
cwd, err := os.Getwd()
if err != nil {
return "", err
}
parentDir := filepath.Dir(cwd)
parentPath := filepath.Join(parentDir, gitFile)
paths := []string{workPath, goPath, relPath, gitFile, parentPath}
for _, path := range paths {
_, err := os.Stat(path)
if err == nil {
return path, nil
}
}
err = fmt.Errorf("could not locate %s/%s/%s", gitOrg, gitRepo, gitFile)
return "", err
}
// execCmder defines an interface for providing a wrapper for processing
// command line strings before calling os/exec.Command().
// There are two implementations of this interface defined below:
// - hostCmder: For executing commands locally (e.g. in Prow container).
// - nodeCmder: For executing commands on node containers embedded
// in the Prow container.
type execCmder interface {
execCmd(cmd string) *exec.Cmd
}
// hostCmder implements the execCmder interface for processing commands
// locally (e.g. in Prow container).
type hostCmder struct{}
// execCmd splits a command line string into a command (first word) and
// remaining arguments in variadic form, as required by exec.Command().
func (h *hostCmder) execCmd(cmd string) *exec.Cmd {
words := strings.Fields(cmd)
return exec.Command(words[0], words[1:]...)
}
// nodeCmder implements the nodeExecCmder interface for processing
// commands in an embedded node container.
type nodeCmder struct {
node string
}
func newNodeCmder(node string) *nodeCmder {
cmder := new(nodeCmder)
cmder.node = node
return cmder
}
// execCmd creates an exec.Cmd structure for running a command line on a
// nested node container in the host container. It is equivalent to running
// a command via 'docker exec <node-container-name> <cmd>'.
func (n *nodeCmder) execCmd(cmd string) *exec.Cmd {
args := strings.Fields(fmt.Sprintf("exec %s %s", n.node, cmd))
return exec.Command("docker", args...)
}
// getNode returns the node name for a nodeExecCmder
func (n *nodeCmder) getNode() string {
return n.node
}
// execCmdSaveLog executes a command either in the host container or
// in an embedded node container, and writes the combined stdout and
// stderr to a log file in a local artifacts directory. (Stderr is
// required because running 'docker logs ...' on nodes sometimes
// returns results as stderr).
func (d *Deployer) execCmdSaveLog(cmder execCmder, cmd string, logDir string, logFile string) error {
execCmd := cmder.execCmd(cmd)
o, err := d.outputWithStderr(execCmd)
if err != nil {
log.Printf("%v", err)
if len(o) > 0 {
log.Printf("%s", o)
}
// Ignore the command error and continue collecting logs
return nil
}
logPath := filepath.Join(logDir, logFile)
return ioutil.WriteFile(logPath, o, 0644)
}
// saveDockerState saves docker state for either a host Prow container
// or an embedded node container.
func (d *Deployer) saveDockerState(cmder execCmder, logDir string) error {
for _, dockerCommand := range dockerCommands {
if err := d.execCmdSaveLog(cmder, dockerCommand.cmd, logDir, dockerCommand.logFile); err != nil {
return err
}
}
return nil
}
// saveServiceLogs saves logs for a list of systemd services on either
// a host Prow container or an embedded node container.
func (d *Deployer) saveServiceLogs(cmder execCmder, services []string, logDir string) error {
for _, svc := range services {
cmd := fmt.Sprintf("journalctl -u %s.service", svc)
logFile := fmt.Sprintf("%s.log", svc)
if err := d.execCmdSaveLog(cmder, cmd, logDir, logFile); err != nil {
return err
}
}
return nil
}
// clusterSize determines the number of nodes in a cluster.
func (d *Deployer) clusterSize() (int, error) {
o, err := d.getOutput("kubectl get nodes --no-headers")
if err != nil {
return -1, fmt.Errorf("kubectl get nodes failed: %s\n%s", err, string(o))
}
trimmed := strings.TrimSpace(string(o))
if trimmed != "" {
return len(strings.Split(trimmed, "\n")), nil
}
return 0, nil
}
// Create a local log artifacts directory
func (d *Deployer) makeLogDir(logDir string) error {
cmd := fmt.Sprintf("mkdir -p %s", logDir)
execCmd := d.execCmd(cmd)
return d.control.FinishRunning(execCmd)
}
// saveHostLogs collects service logs and docker state from the host
// container, and saves the logs in a local artifacts directory.
func (d *Deployer) saveHostLogs(artifactsDir string) error {
log.Printf("Saving logs from host container")
// Create directory for the host container artifacts
logDir := filepath.Join(artifactsDir, "host-container")
if err := d.run("mkdir -p " + logDir); err != nil {
return err
}
// Save docker state for the host container
if err := d.saveDockerState(d.hostCmder, logDir); err != nil {
return err
}
// Copy service logs from the node container
return d.saveServiceLogs(d.hostCmder, hostServices, logDir)
}
// saveMasterNodeLogs collects docker state, service logs, and Kubernetes
// system pod logs from all nested master node containers that are running
// on the host container, and saves the logs in a local artifacts directory.
func (d *Deployer) saveMasterNodeLogs(artifactsDir string) error {
masters, err := d.detectNodeContainers(kubeMasterPrefix)
if err != nil {
return err
}
for _, master := range masters {
if err := d.saveNodeLogs(master, artifactsDir, systemdServices, masterKubePods); err != nil {
return err
}
}
return nil
}
// saveWorkerNodeLogs collects docker state, service logs, and Kubernetes
// system pod logs from all nested worker node containers that are running
// on the host container, and saves the logs in a local artifacts directory.
func (d *Deployer) saveWorkerNodeLogs(artifactsDir string) error {
nodes, err := d.detectNodeContainers(kubeNodePrefix)
if err != nil {
return err
}
for _, node := range nodes {
if err := d.saveNodeLogs(node, artifactsDir, systemdServices, nodeKubePods); err != nil {
return err
}
}
return nil
}
// detectNodeContainers creates a list of names for either all master or all
// worker node containers. It does this by running 'kubectl get nodes ... '
// and searching for container names that begin with a specified name prefix.
func (d *Deployer) detectNodeContainers(namePrefix string) ([]string, error) {
log.Printf("Looking for container names beginning with '%s'", namePrefix)
o, err := d.getOutput("kubectl get nodes --no-headers")
if err != nil {
return nil, err
}
var nodes []string
trimmed := strings.TrimSpace(string(o))
if trimmed != "" {
lines := strings.Split(trimmed, "\n")
for _, line := range lines {
fields := strings.Fields(line)
name := fields[0]
if strings.Contains(name, namePrefix) {
nodes = append(nodes, name)
}
}
}
return nodes, nil
}
// detectKubeContainers creates a list of containers (either running or
// exited) on a master or worker node whose names contain any of a list of
// Kubernetes system pod name substrings.
func (d *Deployer) detectKubeContainers(nodeCmder execCmder, node string, kubePods []string) ([]string, error) {
// Run 'docker ps -a' on the node container
cmd := fmt.Sprintf("docker ps -a")
execCmd := nodeCmder.execCmd(cmd)
o, err := d.control.Output(execCmd)
if err != nil {
log.Printf("Error running '%s' on %s: '%v'", cmd, node, err)
return nil, err
}
// Find container names that contain any of a list of pod name substrings
var containers []string
if trimmed := strings.TrimSpace(string(o)); trimmed != "" {
lines := strings.Split(trimmed, "\n")
for _, line := range lines {
if fields := strings.Fields(line); len(fields) > 0 {
name := fields[len(fields)-1]
if strings.Contains(name, "_POD_") {
// Ignore infra containers
continue
}
for _, pod := range kubePods {
if strings.Contains(name, pod) {
containers = append(containers, name)
break
}
}
}
}
}
return containers, nil
}
// saveNodeLogs collects docker state, service logs, and Kubernetes
// system pod logs for a given node container, and saves the logs in a local
// artifacts directory.
func (d *Deployer) saveNodeLogs(node string, artifactsDir string, services []string, kubePods []string) error {
log.Printf("Saving logs from node container %s", node)
// Create directory for node container artifacts
logDir := filepath.Join(artifactsDir, node)
if err := d.run("mkdir -p " + logDir); err != nil {
return err
}
cmder := newNodeCmder(node)
// Save docker state for this node
if err := d.saveDockerState(cmder, logDir); err != nil {
return err
}
// Copy service logs from the node container
if err := d.saveServiceLogs(cmder, services, logDir); err != nil {
return err
}
// Copy log files for kube system pod containers (running or exited)
// from this node container.
containers, err := d.detectKubeContainers(cmder, node, kubePods)
if err != nil {
return err
}
for _, container := range containers {
cmd := fmt.Sprintf("docker logs %s", container)
logFile := fmt.Sprintf("%s.log", container)
if err := d.execCmdSaveLog(cmder, cmd, logDir, logFile); err != nil {
return err
}
}
return nil
}
|
[
"\"WORKSPACE\"",
"\"GOPATH\""
] |
[] |
[
"GOPATH",
"WORKSPACE"
] |
[]
|
["GOPATH", "WORKSPACE"]
|
go
| 2 | 0 | |
addfeed.py
|
import yaml, os
input = os.environ.get('relative') + os.environ.get('config')
output = os.environ.get('ini')
submission = "\n"
with open(input,'r') as file:
feed = yaml.load(file, Loader=yaml.FullLoader)
for k, v in feed.items():
if k == "_id":
submission += '[' + v + ']'
if k == "title":
submission += '\n title = ' + v
if k == "link":
submission += '\n link = ' + v
if k == "feed":
submission += '\n feed = ' + v
file.close
print(submission)
with open(output,'a') as ini:
ini.write(submission)
ini.close
|
[] |
[] |
[
"config",
"relative",
"ini"
] |
[]
|
["config", "relative", "ini"]
|
python
| 3 | 0 | |
main_test.go
|
package main
import (
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"os"
"testing"
"time"
)
func Test_cors(t *testing.T) {
for _, method := range []string{
http.MethodGet,
http.MethodHead,
http.MethodOptions,
http.MethodPut,
http.MethodPost,
http.MethodPatch,
http.MethodDelete,
} {
t.Run(method, func(t *testing.T) {
req := httptest.NewRequest(http.MethodOptions, "http://foo.co", nil)
req.Header.Set("Origin", "http://foo.co")
rec := httptest.NewRecorder()
cors(rec, req)
resp := rec.Result()
if resp.StatusCode != http.StatusOK {
t.Fatalf("expected %d; got %d", http.StatusOK, rec.Code)
}
v := resp.Header.Get("Access-Control-Allow-Origin")
if v != "http://foo.co" {
t.Fatalf("expected %q, got %q", "http://foo.co", v)
}
v = resp.Header.Get("Access-Control-Allow-Methods")
if v != AllowMethods.String() {
t.Fatalf("expected %q, got %q", AllowMethods.String(), v)
}
v = resp.Header.Get("Access-Control-Allow-Headers")
if v != AllowHeaders.String() {
t.Fatalf("expected %q, got %q", AllowHeaders.String(), v)
}
})
}
}
func Test_getEnvString(t *testing.T) {
expected := os.Getenv("GOPATH")
t.Run("existing values are returned", func(t *testing.T) {
if actual := getEnvString("GOPATH", "!wrong!"); actual != expected {
t.Fatalf("expected %q, got %q", expected, actual)
}
})
t.Run("missing values use the fallback", func(t *testing.T) {
expected := "fallback value"
if actual := getEnvString("blob blub", expected); actual != expected {
t.Fatalf("expected %q, got %q", expected, actual)
}
})
}
func Test_getEnvDuration(t *testing.T) {
expected := time.Second * 7
t.Run("gets a duration from env or fallback", func(t *testing.T) {
if actual := getEnvDuration("missing value", expected); actual != expected {
t.Fatalf("expected %v, got %v", expected, actual)
}
})
}
func Test_get(t *testing.T) {
t.Run("gives 400 if the URI isn't supplied", func(t *testing.T) {
w := httptest.NewRecorder()
r := httptest.NewRequest(http.MethodGet, "https://example.com/", nil)
get(w, r)
expected := http.StatusBadRequest
resp := w.Result()
if resp.StatusCode != expected {
t.Fatalf("expected %d, got %d", expected, resp.StatusCode)
}
})
t.Run("gives 400 if the URI is malformed", func(t *testing.T) {
w := httptest.NewRecorder()
r := httptest.NewRequest(http.MethodGet, "https://example.com/", nil)
r.URL.Query().Set("uri", "bad uri")
get(w, r)
expected := http.StatusBadRequest
resp := w.Result()
if resp.StatusCode != expected {
t.Fatalf("expected %d, got %d", expected, resp.StatusCode)
}
})
t.Run("proxies to the given URI ", func(t *testing.T) {
expected := http.StatusOK
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
http.Error(w, "proxied!", expected)
}))
u := fmt.Sprintf("https://example.com/?uri=%s", url.QueryEscape(ts.URL))
w := httptest.NewRecorder()
r := httptest.NewRequest(http.MethodGet, u, nil)
get(w, r)
resp := w.Result()
if resp.StatusCode != expected {
t.Errorf("expected %d, got %d", expected, resp.StatusCode)
}
body, _ := ioutil.ReadAll(resp.Body)
if string(body) != "proxied!\n" {
t.Errorf("expected %q, got %q", "proxied!\n", string(body))
}
})
}
func Test_convert(t *testing.T) {
t.Run("gives 400 if the URI isn't supplied", func(t *testing.T) {
w := httptest.NewRecorder()
r := httptest.NewRequest(http.MethodGet, "https://example.com/", nil)
convert(w, r)
expected := http.StatusBadRequest
resp := w.Result()
if resp.StatusCode != expected {
t.Fatalf("expected %d, got %d", expected, resp.StatusCode)
}
})
t.Run("gives 400 if the URI is malformed", func(t *testing.T) {
w := httptest.NewRecorder()
r := httptest.NewRequest(http.MethodGet, "https://example.com/", nil)
r.URL.Query().Set("uri", "bad uri")
convert(w, r)
expected := http.StatusBadRequest
resp := w.Result()
if resp.StatusCode != expected {
t.Fatalf("expected %d, got %d", expected, resp.StatusCode)
}
})
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
database/es6/esutil/bulk_indexer_internal_test.go
|
// Licensed to Elasticsearch B.V. under one or more agreements.
// Elasticsearch B.V. licenses this file to you under the Apache 2.0 License.
// See the LICENSE file in the project root for more information.
// +build !integration
package esutil
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"reflect"
"strconv"
"strings"
"sync"
"sync/atomic"
"testing"
"time"
"github.com/jinycoo/jinygo/database/es6"
"github.com/jinycoo/jinygo/database/es6/estransport"
)
var defaultRoundTripFunc = func(*http.Request) (*http.Response, error) {
return &http.Response{Body: ioutil.NopCloser(strings.NewReader(`{}`))}, nil
}
type mockTransport struct {
RoundTripFunc func(*http.Request) (*http.Response, error)
}
func (t *mockTransport) RoundTrip(req *http.Request) (*http.Response, error) {
if t.RoundTripFunc == nil {
return defaultRoundTripFunc(req)
}
return t.RoundTripFunc(req)
}
func TestBulkIndexer(t *testing.T) {
t.Run("Basic", func(t *testing.T) {
var (
wg sync.WaitGroup
countReqs int
testfile string
numItems = 6
)
es, _ := es6.NewClient(es6.Config{Transport: &mockTransport{
RoundTripFunc: func(*http.Request) (*http.Response, error) {
countReqs++
switch countReqs {
case 1:
testfile = "testdata/bulk_response_1a.json"
case 2:
testfile = "testdata/bulk_response_1b.json"
case 3:
testfile = "testdata/bulk_response_1c.json"
}
bodyContent, _ := ioutil.ReadFile(testfile)
return &http.Response{Body: ioutil.NopCloser(bytes.NewBuffer(bodyContent))}, nil
},
}})
cfg := BulkIndexerConfig{
NumWorkers: 1,
FlushBytes: 75,
FlushInterval: time.Hour, // Disable auto-flushing, because response doesn't match number of items
Client: es}
if os.Getenv("DEBUG") != "" {
cfg.DebugLogger = log.New(os.Stdout, "", 0)
}
bi, _ := NewBulkIndexer(cfg)
for i := 1; i <= numItems; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
err := bi.Add(context.Background(), BulkIndexerItem{
Action: "foo",
DocumentType: "bar",
DocumentID: strconv.Itoa(i),
Body: strings.NewReader(fmt.Sprintf(`{"title":"foo-%d"}`, i)),
})
if err != nil {
t.Fatalf("Unexpected error: %s", err)
}
}(i)
}
wg.Wait()
if err := bi.Close(context.Background()); err != nil {
t.Errorf("Unexpected error: %s", err)
}
stats := bi.Stats()
// added = numitems
if stats.NumAdded != uint64(numItems) {
t.Errorf("Unexpected NumAdded: want=%d, got=%d", numItems, stats.NumAdded)
}
// flushed = numitems - 1x conflict + 1x not_found
if stats.NumFlushed != uint64(numItems-2) {
t.Errorf("Unexpected NumFlushed: want=%d, got=%d", numItems-2, stats.NumFlushed)
}
// failed = 1x conflict + 1x not_found
if stats.NumFailed != 2 {
t.Errorf("Unexpected NumFailed: want=%d, got=%d", 2, stats.NumFailed)
}
// indexed = 1x
if stats.NumIndexed != 1 {
t.Errorf("Unexpected NumIndexed: want=%d, got=%d", 1, stats.NumIndexed)
}
// created = 1x
if stats.NumCreated != 1 {
t.Errorf("Unexpected NumCreated: want=%d, got=%d", 1, stats.NumCreated)
}
// deleted = 1x
if stats.NumDeleted != 1 {
t.Errorf("Unexpected NumDeleted: want=%d, got=%d", 1, stats.NumDeleted)
}
if stats.NumUpdated != 1 {
t.Errorf("Unexpected NumUpdated: want=%d, got=%d", 1, stats.NumUpdated)
}
// 3 items * 40 bytes, 2 workers, 1 request per worker
if stats.NumRequests != 3 {
t.Errorf("Unexpected NumRequests: want=%d, got=%d", 3, stats.NumRequests)
}
})
t.Run("Add() Timeout", func(t *testing.T) {
es, _ := es6.NewClient(es6.Config{Transport: &mockTransport{}})
bi, _ := NewBulkIndexer(BulkIndexerConfig{NumWorkers: 1, Client: es})
ctx, cancel := context.WithTimeout(context.Background(), time.Nanosecond)
defer cancel()
time.Sleep(100 * time.Millisecond)
var errs []error
for i := 0; i < 10; i++ {
errs = append(errs, bi.Add(ctx, BulkIndexerItem{Action: "delete", DocumentID: "timeout"}))
}
if err := bi.Close(context.Background()); err != nil {
t.Errorf("Unexpected error: %s", err)
}
var gotError bool
for _, err := range errs {
if err != nil && err.Error() == "context deadline exceeded" {
gotError = true
}
}
if !gotError {
t.Errorf("Expected timeout error, but none in: %q", errs)
}
})
t.Run("Close() Cancel", func(t *testing.T) {
es, _ := es6.NewClient(es6.Config{Transport: &mockTransport{}})
bi, _ := NewBulkIndexer(BulkIndexerConfig{
NumWorkers: 1,
FlushBytes: 1,
Client: es,
})
for i := 0; i < 10; i++ {
bi.Add(context.Background(), BulkIndexerItem{Action: "foo"})
}
ctx, cancel := context.WithCancel(context.Background())
cancel()
if err := bi.Close(ctx); err == nil {
t.Errorf("Expected context cancelled error, but got: %v", err)
}
})
t.Run("Indexer Callback", func(t *testing.T) {
esCfg := es6.Config{
Transport: &mockTransport{
RoundTripFunc: func(*http.Request) (*http.Response, error) {
return nil, fmt.Errorf("Mock transport error")
},
},
}
if os.Getenv("DEBUG") != "" {
esCfg.Logger = &estransport.ColorLogger{
Output: os.Stdout,
EnableRequestBody: true,
EnableResponseBody: true,
}
}
es, _ := es6.NewClient(esCfg)
var indexerError error
biCfg := BulkIndexerConfig{
NumWorkers: 1,
Client: es,
OnError: func(ctx context.Context, err error) { indexerError = err },
}
if os.Getenv("DEBUG") != "" {
biCfg.DebugLogger = log.New(os.Stdout, "", 0)
}
bi, _ := NewBulkIndexer(biCfg)
if err := bi.Add(context.Background(), BulkIndexerItem{
Action: "foo",
}); err != nil {
t.Fatalf("Unexpected error: %s", err)
}
bi.Close(context.Background())
if indexerError == nil {
t.Errorf("Expected indexerError to not be nil")
}
})
t.Run("Item Callbacks", func(t *testing.T) {
var (
countSuccessful uint64
countFailed uint64
failedIDs []string
numItems = 4
numFailed = 2
bodyContent, _ = ioutil.ReadFile("testdata/bulk_response_2.json")
)
es, _ := es6.NewClient(es6.Config{Transport: &mockTransport{
RoundTripFunc: func(*http.Request) (*http.Response, error) {
return &http.Response{Body: ioutil.NopCloser(bytes.NewBuffer(bodyContent))}, nil
},
}})
cfg := BulkIndexerConfig{NumWorkers: 1, Client: es}
if os.Getenv("DEBUG") != "" {
cfg.DebugLogger = log.New(os.Stdout, "", 0)
}
bi, _ := NewBulkIndexer(cfg)
successFunc := func(ctx context.Context, item BulkIndexerItem, res BulkIndexerResponseItem) {
atomic.AddUint64(&countSuccessful, 1)
}
failureFunc := func(ctx context.Context, item BulkIndexerItem, res BulkIndexerResponseItem, err error) {
atomic.AddUint64(&countFailed, 1)
failedIDs = append(failedIDs, item.DocumentID)
}
if err := bi.Add(context.Background(), BulkIndexerItem{
Action: "index",
DocumentID: "1",
Body: strings.NewReader(`{"title":"foo"}`),
OnSuccess: successFunc,
OnFailure: failureFunc,
}); err != nil {
t.Fatalf("Unexpected error: %s", err)
}
if err := bi.Add(context.Background(), BulkIndexerItem{
Action: "create",
DocumentID: "1",
Body: strings.NewReader(`{"title":"bar"}`),
OnSuccess: successFunc,
OnFailure: failureFunc,
}); err != nil {
t.Fatalf("Unexpected error: %s", err)
}
if err := bi.Add(context.Background(), BulkIndexerItem{
Action: "delete",
DocumentID: "2",
OnSuccess: successFunc,
OnFailure: failureFunc,
}); err != nil {
t.Fatalf("Unexpected error: %s", err)
}
if err := bi.Add(context.Background(), BulkIndexerItem{
Action: "update",
DocumentID: "3",
Body: strings.NewReader(`{"doc":{"title":"qux"}}`),
OnSuccess: successFunc,
OnFailure: failureFunc,
}); err != nil {
t.Fatalf("Unexpected error: %s", err)
}
if err := bi.Close(context.Background()); err != nil {
t.Errorf("Unexpected error: %s", err)
}
stats := bi.Stats()
if stats.NumAdded != uint64(numItems) {
t.Errorf("Unexpected NumAdded: %d", stats.NumAdded)
}
// Two failures are expected:
//
// * Operation #2: document can't be created, because a document with the same ID already exists.
// * Operation #3: document can't be deleted, because it doesn't exist.
if stats.NumFailed != uint64(numFailed) {
t.Errorf("Unexpected NumFailed: %d", stats.NumFailed)
}
if stats.NumFlushed != 2 {
t.Errorf("Unexpected NumFailed: %d", stats.NumFailed)
}
if stats.NumIndexed != 1 {
t.Errorf("Unexpected NumIndexed: %d", stats.NumIndexed)
}
if stats.NumUpdated != 1 {
t.Errorf("Unexpected NumUpdated: %d", stats.NumUpdated)
}
if countSuccessful != uint64(numItems-numFailed) {
t.Errorf("Unexpected countSuccessful: %d", countSuccessful)
}
if countFailed != uint64(numFailed) {
t.Errorf("Unexpected countFailed: %d", countFailed)
}
if !reflect.DeepEqual(failedIDs, []string{"1", "2"}) {
t.Errorf("Unexpected failedIDs: %#v", failedIDs)
}
})
t.Run("OnFlush callbacks", func(t *testing.T) {
type contextKey string
es, _ := es6.NewClient(es6.Config{Transport: &mockTransport{}})
bi, _ := NewBulkIndexer(BulkIndexerConfig{
Client: es,
Index: "foo",
OnFlushStart: func(ctx context.Context) context.Context {
fmt.Println(">>> Flush started")
return context.WithValue(ctx, contextKey("start"), time.Now().UTC())
},
OnFlushEnd: func(ctx context.Context) {
var duration time.Duration
if v := ctx.Value("start"); v != nil {
duration = time.Since(v.(time.Time))
}
fmt.Printf(">>> Flush finished (duration: %s)\n", duration)
},
})
err := bi.Add(context.Background(), BulkIndexerItem{
Action: "index",
Body: strings.NewReader(`{"title":"foo"}`),
})
if err != nil {
t.Fatalf("Unexpected error: %s", err)
}
if err := bi.Close(context.Background()); err != nil {
t.Errorf("Unexpected error: %s", err)
}
stats := bi.Stats()
if stats.NumAdded != uint64(1) {
t.Errorf("Unexpected NumAdded: %d", stats.NumAdded)
}
})
t.Run("Automatic flush", func(t *testing.T) {
es, _ := es6.NewClient(es6.Config{Transport: &mockTransport{
RoundTripFunc: func(*http.Request) (*http.Response, error) {
return &http.Response{
StatusCode: http.StatusOK,
Status: "200 OK",
Body: ioutil.NopCloser(strings.NewReader(`{"items":[{"index": {}}]}`))}, nil
},
}})
cfg := BulkIndexerConfig{
NumWorkers: 1,
Client: es,
FlushInterval: 50 * time.Millisecond, // Decrease the flush timeout
}
if os.Getenv("DEBUG") != "" {
cfg.DebugLogger = log.New(os.Stdout, "", 0)
}
bi, _ := NewBulkIndexer(cfg)
bi.Add(context.Background(),
BulkIndexerItem{Action: "index", Body: strings.NewReader(`{"title":"foo"}`)})
// Allow some time for auto-flush to kick in
time.Sleep(250 * time.Millisecond)
stats := bi.Stats()
expected := uint64(1)
if stats.NumAdded != expected {
t.Errorf("Unexpected NumAdded: want=%d, got=%d", expected, stats.NumAdded)
}
if stats.NumFailed != 0 {
t.Errorf("Unexpected NumFailed: want=%d, got=%d", 0, stats.NumFlushed)
}
if stats.NumFlushed != expected {
t.Errorf("Unexpected NumFlushed: want=%d, got=%d", expected, stats.NumFlushed)
}
if stats.NumIndexed != expected {
t.Errorf("Unexpected NumIndexed: want=%d, got=%d", expected, stats.NumIndexed)
}
// Wait some time before closing the indexer to clear the timer
time.Sleep(200 * time.Millisecond)
bi.Close(context.Background())
})
t.Run("TooManyRequests", func(t *testing.T) {
var (
wg sync.WaitGroup
countReqs int
numItems = 2
)
esCfg := es6.Config{
Transport: &mockTransport{
RoundTripFunc: func(*http.Request) (*http.Response, error) {
countReqs++
if countReqs <= 4 {
return &http.Response{
StatusCode: http.StatusTooManyRequests,
Status: "429 TooManyRequests",
Body: ioutil.NopCloser(strings.NewReader(`{"took":1}`))}, nil
}
bodyContent, _ := ioutil.ReadFile("testdata/bulk_response_1c.json")
return &http.Response{
StatusCode: http.StatusOK,
Status: "200 OK",
Body: ioutil.NopCloser(bytes.NewBuffer(bodyContent)),
}, nil
},
},
MaxRetries: 5,
RetryOnStatus: []int{502, 503, 504, 429},
RetryBackoff: func(i int) time.Duration {
if os.Getenv("DEBUG") != "" {
fmt.Printf("*** Retry #%d\n", i)
}
return time.Duration(i) * 100 * time.Millisecond
},
}
if os.Getenv("DEBUG") != "" {
esCfg.Logger = &estransport.ColorLogger{Output: os.Stdout}
}
es, _ := es6.NewClient(esCfg)
biCfg := BulkIndexerConfig{NumWorkers: 1, FlushBytes: 50, Client: es}
if os.Getenv("DEBUG") != "" {
biCfg.DebugLogger = log.New(os.Stdout, "", 0)
}
bi, _ := NewBulkIndexer(biCfg)
for i := 1; i <= numItems; i++ {
wg.Add(1)
go func(i int) {
defer wg.Done()
err := bi.Add(context.Background(), BulkIndexerItem{
Action: "foo",
Body: strings.NewReader(`{"title":"foo"}`),
})
if err != nil {
t.Fatalf("Unexpected error: %s", err)
}
}(i)
}
wg.Wait()
if err := bi.Close(context.Background()); err != nil {
t.Errorf("Unexpected error: %s", err)
}
stats := bi.Stats()
if stats.NumAdded != uint64(numItems) {
t.Errorf("Unexpected NumAdded: want=%d, got=%d", numItems, stats.NumAdded)
}
if stats.NumFlushed != uint64(numItems) {
t.Errorf("Unexpected NumFlushed: want=%d, got=%d", numItems, stats.NumFlushed)
}
if stats.NumFailed != 0 {
t.Errorf("Unexpected NumFailed: want=%d, got=%d", 0, stats.NumFailed)
}
// Stats don't include the retries in client
if stats.NumRequests != 1 {
t.Errorf("Unexpected NumRequests: want=%d, got=%d", 3, stats.NumRequests)
}
})
t.Run("Custom JSON Decoder", func(t *testing.T) {
es, _ := es6.NewClient(es6.Config{Transport: &mockTransport{}})
bi, _ := NewBulkIndexer(BulkIndexerConfig{Client: es, Decoder: customJSONDecoder{}})
err := bi.Add(context.Background(), BulkIndexerItem{
Action: "index",
DocumentID: "1",
Body: strings.NewReader(`{"title":"foo"}`),
})
if err != nil {
t.Fatalf("Unexpected error: %s", err)
}
if err := bi.Close(context.Background()); err != nil {
t.Errorf("Unexpected error: %s", err)
}
stats := bi.Stats()
if stats.NumAdded != uint64(1) {
t.Errorf("Unexpected NumAdded: %d", stats.NumAdded)
}
})
t.Run("Worker.writeMeta()", func(t *testing.T) {
type args struct {
item BulkIndexerItem
}
tests := []struct {
name string
args args
want string
}{
{
"without _index and _id",
args{BulkIndexerItem{Action: "index"}},
`{"index":{}}` + "\n",
},
{
"with _id",
args{BulkIndexerItem{
Action: "index",
DocumentID: "42",
}},
`{"index":{"_id":"42"}}` + "\n",
},
{
"with _index",
args{BulkIndexerItem{
Action: "index",
Index: "test",
}},
`{"index":{"_index":"test"}}` + "\n",
},
{
"with _index and _id",
args{BulkIndexerItem{
Action: "index",
DocumentID: "42",
Index: "test",
}},
`{"index":{"_id":"42","_index":"test"}}` + "\n",
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
w := &worker{
buf: bytes.NewBuffer(make([]byte, 0, 5e+6)),
aux: make([]byte, 0, 512),
}
if err := w.writeMeta(tt.args.item); err != nil {
t.Errorf("Unexpected error: %v", err)
}
if w.buf.String() != tt.want {
t.Errorf("worker.writeMeta() %s = got [%s], want [%s]", tt.name, w.buf.String(), tt.want)
}
})
}
})
}
type customJSONDecoder struct{}
func (d customJSONDecoder) UnmarshalFromReader(r io.Reader, blk *BulkIndexerResponse) error {
return json.NewDecoder(r).Decode(blk)
}
|
[
"\"DEBUG\"",
"\"DEBUG\"",
"\"DEBUG\"",
"\"DEBUG\"",
"\"DEBUG\"",
"\"DEBUG\"",
"\"DEBUG\"",
"\"DEBUG\""
] |
[] |
[
"DEBUG"
] |
[]
|
["DEBUG"]
|
go
| 1 | 0 | |
metadata-ingestion/src/datahub/entrypoints.py
|
import logging
import os
import sys
import click
import stackprinter
import datahub as datahub_package
from datahub.cli.check_cli import check
from datahub.cli.cli_utils import DATAHUB_CONFIG_PATH, write_datahub_config
from datahub.cli.delete_cli import delete
from datahub.cli.docker import docker
from datahub.cli.get_cli import get
from datahub.cli.ingest_cli import ingest
from datahub.cli.migrate import migrate
from datahub.cli.put_cli import put
from datahub.cli.telemetry import telemetry as telemetry_cli
from datahub.cli.timeline_cli import timeline
from datahub.configuration import SensitiveError
from datahub.telemetry import telemetry
logger = logging.getLogger(__name__)
# Configure some loggers.
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("snowflake").setLevel(level=logging.WARNING)
# logging.getLogger("botocore").setLevel(logging.INFO)
# logging.getLogger("google").setLevel(logging.INFO)
# Configure logger.
BASE_LOGGING_FORMAT = (
"[%(asctime)s] %(levelname)-8s {%(name)s:%(lineno)d} - %(message)s"
)
logging.basicConfig(format=BASE_LOGGING_FORMAT)
MAX_CONTENT_WIDTH = 120
@click.group(
context_settings=dict(
# Avoid truncation of help text.
# See https://github.com/pallets/click/issues/486.
max_content_width=MAX_CONTENT_WIDTH,
)
)
@click.option("--debug/--no-debug", default=False)
@click.version_option(
version=datahub_package.nice_version_name(),
prog_name=datahub_package.__package_name__,
)
@click.option(
"-dl",
"--detect-memory-leaks",
type=bool,
is_flag=True,
default=False,
help="Run memory leak detection.",
)
@click.pass_context
def datahub(ctx: click.Context, debug: bool, detect_memory_leaks: bool) -> None:
# Insulate 'datahub' and all child loggers from inadvertent changes to the
# root logger by the external site packages that we import.
# (Eg: https://github.com/reata/sqllineage/commit/2df027c77ea0a8ea4909e471dcd1ecbf4b8aeb2f#diff-30685ea717322cd1e79c33ed8d37903eea388e1750aa00833c33c0c5b89448b3R11
# changes the root logger's handler level to WARNING, causing any message below
# WARNING level to be dropped after this module is imported, irrespective
# of the logger's logging level! The lookml source was affected by this).
# 1. Create 'datahub' parent logger.
datahub_logger = logging.getLogger("datahub")
# 2. Setup the stream handler with formatter.
stream_handler = logging.StreamHandler()
formatter = logging.Formatter(BASE_LOGGING_FORMAT)
stream_handler.setFormatter(formatter)
datahub_logger.addHandler(stream_handler)
# 3. Turn off propagation to the root handler.
datahub_logger.propagate = False
# 4. Adjust log-levels.
if debug or os.getenv("DATAHUB_DEBUG", False):
logging.getLogger().setLevel(logging.INFO)
datahub_logger.setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.WARNING)
datahub_logger.setLevel(logging.INFO)
# loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict]
# print(loggers)
# Setup the context for the memory_leak_detector decorator.
ctx.ensure_object(dict)
ctx.obj["detect_memory_leaks"] = detect_memory_leaks
@datahub.command()
@telemetry.with_telemetry
def version() -> None:
"""Print version number and exit."""
click.echo(f"DataHub CLI version: {datahub_package.nice_version_name()}")
click.echo(f"Python version: {sys.version}")
@datahub.command()
@telemetry.with_telemetry
def init() -> None:
"""Configure which datahub instance to connect to"""
if os.path.isfile(DATAHUB_CONFIG_PATH):
click.confirm(f"{DATAHUB_CONFIG_PATH} already exists. Overwrite?", abort=True)
click.echo("Configure which datahub instance to connect to")
host = click.prompt(
"Enter your DataHub host", type=str, default="http://localhost:8080"
)
token = click.prompt(
"Enter your DataHub access token (Supports env vars via `{VAR_NAME}` syntax)",
type=str,
default="",
)
write_datahub_config(host, token)
click.echo(f"Written to {DATAHUB_CONFIG_PATH}")
datahub.add_command(check)
datahub.add_command(docker)
datahub.add_command(ingest)
datahub.add_command(delete)
datahub.add_command(get)
datahub.add_command(put)
datahub.add_command(telemetry_cli)
datahub.add_command(migrate)
datahub.add_command(timeline)
def main(**kwargs):
# This wrapper prevents click from suppressing errors.
try:
sys.exit(datahub(standalone_mode=False, **kwargs))
except click.exceptions.Abort:
# Click already automatically prints an abort message, so we can just exit.
sys.exit(1)
except click.ClickException as error:
error.show()
sys.exit(1)
except Exception as exc:
kwargs = {}
sensitive_cause = SensitiveError.get_sensitive_cause(exc)
if sensitive_cause:
kwargs = {"show_vals": None}
exc = sensitive_cause
logger.error(
stackprinter.format(
exc,
line_wrap=MAX_CONTENT_WIDTH,
truncate_vals=10 * MAX_CONTENT_WIDTH,
suppressed_paths=[r"lib/python.*/site-packages/click/"],
**kwargs,
)
)
sys.exit(1)
|
[] |
[] |
[
"DATAHUB_DEBUG"
] |
[]
|
["DATAHUB_DEBUG"]
|
python
| 1 | 0 | |
submissions/reusable/CovTesting/Coverage VS. # Adv Examples/tknp_testing.py
|
import argparse
import os
import random
import shutil
import warnings
import sys
warnings.filterwarnings("ignore")
from keras import backend as K
import numpy as np
from PIL import Image, ImageFilter
from skimage.measure import compare_ssim as SSIM
import keras
import tensorflow as tf
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
####for solving some specific problems, don't care
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
# helper function
def get_layer_i_output(model, i, data):
layer_model = K.function([model.layers[0].input], [model.layers[i].output])
ret = layer_model([data])[0]
num = data.shape[0]
ret = np.reshape(ret, (num, -1))
return ret
# the data is in range(-.5, .5)
def load_data(name):
assert (name.upper() in ['MNIST', 'CIFAR', 'SVHN'])
name = name.lower()
x_train = np.load('../data/' + name + '_data/' + name + '_x_train.npy')
y_train = np.load('../data/' + name + '_data/' + name + '_y_train.npy')
x_test = np.load('../data/' + name + '_data/' + name + '_x_test.npy')
y_test = np.load('../data/' + name + '_data/' + name + '_y_test.npy')
return x_train, y_train, x_test, y_test
class Coverage:
def __init__(self, model, x_train, y_train, x_test, y_test, x_adv):
self.model = model
self.x_train = x_train
self.y_train = y_train
self.x_test = x_test
self.y_test = y_test
self.x_adv = x_adv
# find scale factors and min num
def scale(self, layers, batch=1024):
data_num = self.x_adv.shape[0]
factors = dict()
for i in layers:
begin, end = 0, batch
max_num, min_num = np.NINF, np.inf
while begin < data_num:
layer_output = get_layer_i_output(self.model, i, self.x_adv[begin:end])
tmp = layer_output.max()
max_num = tmp if tmp > max_num else max_num
tmp = layer_output.min()
min_num = tmp if tmp < min_num else min_num
begin += batch
end += batch
factors[i] = (max_num - min_num, min_num)
return factors
# 1 Neuron Coverage
def NC(self, layers, threshold=0., batch=1024):
factors = self.scale(layers, batch=batch)
neuron_num = 0
for i in layers:
out_shape = self.model.layers[i].output.shape
neuron_num += np.prod(out_shape[1:])
neuron_num = int(neuron_num)
activate_num = 0
data_num = self.x_adv.shape[0]
for i in layers:
neurons = np.prod(self.model.layers[i].output.shape[1:])
buckets = np.zeros(neurons).astype('bool')
begin, end = 0, batch
while begin < data_num:
layer_output = get_layer_i_output(self.model, i, self.x_adv[begin:end])
# scale the layer output to (0, 1)
layer_output -= factors[i][1]
layer_output /= factors[i][0]
col_max = np.max(layer_output, axis=0)
begin += batch
end += batch
buckets[col_max > threshold] = True
activate_num += np.sum(buckets)
# print('NC:\t{:.3f} activate_num:\t{} neuron_num:\t{}'.format(activate_num / neuron_num, activate_num, neuron_num))
return activate_num / neuron_num, activate_num, neuron_num
# 2 k-multisection neuron coverage, neuron boundary coverage and strong activation neuron coverage
def KMNC(self, layers, k=10, batch=1024):
neuron_num = 0
for i in layers:
out_shape = self.model.layers[i].output.shape
neuron_num += np.prod(out_shape[1:])
neuron_num = int(neuron_num)
covered_num = 0
l_covered_num = 0
u_covered_num = 0
for i in layers:
neurons = np.prod(self.model.layers[i].output.shape[1:])
print(neurons)
begin, end = 0, batch
data_num = self.x_train.shape[0]
neuron_max = np.full(neurons, np.NINF).astype('float')
neuron_min = np.full(neurons, np.inf).astype('float')
while begin < data_num:
layer_output_train = get_layer_i_output(self.model, i, self.x_train[begin:end])
batch_neuron_max = np.max(layer_output_train, axis=0)
batch_neuron_min = np.min(layer_output_train, axis=0)
neuron_max = np.maximum(batch_neuron_max, neuron_max)
neuron_min = np.minimum(batch_neuron_min, neuron_min)
begin += batch
end += batch
buckets = np.zeros((neurons, k + 2)).astype('bool')
interval = (neuron_max - neuron_min) / k
# print(interval[8], neuron_max[8], neuron_min[8])
begin, end = 0, batch
data_num = self.x_adv.shape[0]
while begin < data_num:
layer_output_adv = get_layer_i_output(model, i, self.x_adv[begin: end])
layer_output_adv -= neuron_min
layer_output_adv /= (interval + 10 ** (-100))
layer_output_adv[layer_output_adv < 0.] = -1
layer_output_adv[layer_output_adv >= k / 1.0] = k
layer_output_adv = layer_output_adv.astype('int')
# index 0 for lower, 1 to k for between, k + 1 for upper
layer_output_adv = layer_output_adv + 1
for j in range(neurons):
uniq = np.unique(layer_output_adv[:, j])
# print(layer_output_adv[:, j])
buckets[j, uniq] = True
begin += batch
end += batch
covered_num += np.sum(buckets[:, 1:-1])
u_covered_num += np.sum(buckets[:, -1])
l_covered_num += np.sum(buckets[:, 0])
print('KMNC:\t{:.3f} covered_num:\t{}'.format(covered_num / (neuron_num * k), covered_num))
print(
'NBC:\t{:.3f} l_covered_num:\t{}'.format((l_covered_num + u_covered_num) / (neuron_num * 2), l_covered_num))
print('SNAC:\t{:.3f} u_covered_num:\t{}'.format(u_covered_num / neuron_num, u_covered_num))
return covered_num / (neuron_num * k), (l_covered_num + u_covered_num) / (
neuron_num * 2), u_covered_num / neuron_num, covered_num, l_covered_num, u_covered_num, neuron_num * k
# 3 top-k neuron coverage
def TKNC(self, layers, k=2, batch=1024):
def top_k(x, k):
ind = np.argpartition(x, -k)[-k:]
return ind[np.argsort((-x)[ind])]
neuron_num = 0
for i in layers:
out_shape = self.model.layers[i].output.shape
neuron_num += np.prod(out_shape[1:])
neuron_num = int(neuron_num)
pattern_num = 0
data_num = self.x_adv.shape[0]
for i in layers:
pattern_set = set()
begin, end = 0, batch
while begin < data_num:
layer_output = get_layer_i_output(self.model, i, self.x_adv[begin:end])
topk = np.argpartition(layer_output, -k, axis=1)[:, -k:]
topk = np.sort(topk, axis=1)
# or in order
# topk = np.apply_along_axis[lambda x: top_k(layer_output, k), 1, layer_output]
for j in range(topk.shape[0]):
pattern_set.add(tuple(topk[j]))
begin += batch
end += batch
pattern_num += len(pattern_set)
print(
'TKNC:\t{:.3f} pattern_num:\t{} neuron_num:\t{}'.format(pattern_num / neuron_num, pattern_num, neuron_num))
return pattern_num / neuron_num, pattern_num, neuron_num
# 4 top-k neuron patterns
def TKNP(self, layers, k=2, batch=1024):
def top_k(x, k):
ind = np.argpartition(x, -k)[-k:]
return ind[np.argsort((-x)[ind])]
def to_tuple(x):
l = list()
for row in x:
l.append(tuple(row))
return tuple(l)
pattern_set = set()
layer_num = len(layers)
data_num = self.x_adv.shape[0]
patterns = np.zeros((data_num, layer_num, k))
layer_cnt = 0
for i in layers:
neurons = np.prod(self.model.layers[i].output.shape[1:])
begin, end = 0, batch
while begin < data_num:
layer_output = get_layer_i_output(self.model, i, self.x_adv[begin:end])
topk = np.argpartition(layer_output, -k, axis=1)[:, -k:]
topk = np.sort(topk, axis=1)
# or in order
# topk = np.apply_along_axis[lambda x: top_k(layer_output, k), 1, layer_output]
patterns[begin:end, layer_cnt, :] = topk
begin += batch
end += batch
layer_cnt += 1
for i in range(patterns.shape[0]):
pattern_set.add(to_tuple(patterns[i]))
pattern_num = len(pattern_set)
print('TKNP:\t{:.3f}'.format(pattern_num))
return pattern_num
def all(self, layers, batch=100):
self.NC(layers, batch=batch)
self.KMNC(layers, batch=batch)
self.TKNC(layers, batch=batch)
self.TKNP(layers, batch=batch)
if __name__ == '__main__':
dataset = 'mnist'
model_name = 'lenet1'
l = [0, 8]
x_train, y_train, x_test, y_test = load_data(dataset)
# ## load mine trained model
from keras.models import load_model
model = load_model('../data/' + dataset + '_data/model/' + model_name + '.h5')
model.summary()
tknp_all = np.array([])
for num in range(0, 50):
coverage = Coverage(model, x_train, y_train, x_test, y_test, x_test[0: 200*num])
tknp = coverage.TKNP(l)
tknp_all = np.append(tknp_all, tknp)
with open("testing_coverage_result.txt", "a") as f:
f.write("\n------------------------------------------------------------------------------\n")
f.write('x: {} \n'.format(num*200+1))
f.write('TKNP: {} \n'.format(tknp))
np.save('Q2_original/tknp_all.npy', tknp_all)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
tinygrad/tensor.py
|
# inspired by https://github.com/karpathy/micrograd/blob/master/micrograd/engine.py
import sys
import inspect
import functools
import os
from collections import defaultdict
import numpy as np
# **** profiler ****
DEBUG = os.getenv("DEBUG", None) is not None
if DEBUG:
import atexit, time
debug_counts, debug_times = defaultdict(int), defaultdict(float)
def print_debug_exit():
for name, _ in sorted(debug_times.items(), key=lambda x: -x[1]):
print(f"{name:>20} : {debug_counts[name]:>6} {debug_times[name]:>10.2f} ms")
atexit.register(print_debug_exit)
class ProfileOp:
def __init__(self, name, x, backward=False):
self.name, self.x = f"back_{name}" if backward else name, x
def __enter__(self):
if DEBUG: self.st = time.time()
def __exit__(self, *junk):
if DEBUG:
if cl_queue is not None:
cl_queue.finish()
et = (time.time()-self.st)*1000.
debug_counts[self.name] += 1
debug_times[self.name] += et
print(f"{self.name:>20} : {et:>7.2f} ms {[y.shape for y in self.x]}")
# **** GPU functions ****
cl_ctx, cl_queue = None, None
def require_init_gpu():
if not GPU: raise Exception("No GPU Support, install pyopencl")
global cl_ctx, cl_queue
if cl_queue is None:
devices = cl.get_platforms()[0].get_devices(device_type=cl.device_type.GPU)
if len(devices) == 0:
devices = cl.get_platforms()[0].get_devices(device_type=cl.device_type.CPU)
cl_ctx = cl.Context(devices=devices)
# this is an in-order command queue
cl_queue = cl.CommandQueue(cl_ctx)
class GPUBuffer:
def __init__(self, shape, hostbuf=None):
self.shape, self.dtype = tuple(shape), np.float32
self.cl = hostbuf.cl if isinstance(hostbuf, GPUBuffer) else \
cl.Buffer(cl_ctx, cl.mem_flags.READ_WRITE | (cl.mem_flags.COPY_HOST_PTR if hostbuf is not None else 0), 4*np.prod(shape),
hostbuf=hostbuf.astype(np.float32).ravel() if hostbuf is not None else None)
def __repr__(self):
return f"<GPUBuffer with shape {self.shape!r}>"
# **** ANE functions ****
ane = None
def require_init_ane():
global ane
if ane is None:
import ane.lib.ane, tinygrad.ops_ane
ane = ane.lib.ane.ANE()
# **** start with two base classes, Tensor and Function ****
class Device: CPU, GPU, ANE = 0, 1, 2
DEFAULT_DEVICE = Device.CPU if os.environ.get("GPU", 0) != "1" else Device.GPU
class Tensor:
did_float_warning = False
training = True
ops = defaultdict(dict)
def __init__(self, data, device=DEFAULT_DEVICE, requires_grad=True):
self.device, self.data = device, self._move_data(data, device)
self.grad, self.requires_grad = None, requires_grad
# internal variables used for autograd graph construction
self._ctx = None
def __repr__(self):
return f"<Tensor {self.data!r} with grad {(self.grad.data if self.grad else None)!r}>"
def assign(self, x):
self.data = x.data
@property
def shape(self):
return self.data.shape
@property
def dtype(self):
return self.data.dtype
# ***** creation helper functions *****
@classmethod
def zeros(cls, *shape, **kwargs):
return cls(np.zeros(shape, dtype=np.float32), **kwargs)
@classmethod
def ones(cls, *shape, **kwargs):
return cls(np.ones(shape, dtype=np.float32), **kwargs)
@classmethod
def randn(cls, *shape, **kwargs):
return cls(np.random.randn(*shape).astype(np.float32), **kwargs)
@classmethod
def uniform(cls, *shape, **kwargs):
return cls((np.random.uniform(-1., 1., size=shape)/np.sqrt(np.prod(shape))).astype(np.float32), **kwargs)
@classmethod
def eye(cls, dim, **kwargs):
return cls(np.eye(dim).astype(np.float32), **kwargs)
# ***** toposort and backward pass *****
def deepwalk(self, visited: set, nodes: list):
visited.add(self)
if self._ctx:
[i.deepwalk(visited, nodes) for i in self._ctx.parents if i not in visited]
nodes.append(self)
return nodes
def backward(self):
assert self.shape == (1,)
# fill in the first grad with one
# this is "implicit gradient creation"
self.grad = Tensor(np.ones(self.shape, dtype=self.dtype), device=self.device, requires_grad=False)
for t0 in reversed(self.deepwalk(set(), [])):
assert (t0.grad is not None)
with ProfileOp(t0._ctx.__class__.__name__, [t0.grad], backward=True):
grads = t0._ctx.backward(t0._ctx, t0.grad.data)
if len(t0._ctx.parents) == 1:
grads = [grads]
for t, g in zip(t0._ctx.parents, grads):
if g is not None:
assert g.shape == t.shape, \
f"grad shape must match tensor shape in {self._ctx!r}, {g.shape!r} != {t.shape!r}"
gt = Tensor(g, device=self.device, requires_grad=False)
t.grad = gt if t.grad is None else (t.grad + gt)
# ***** tinygrad supports CPU and GPU *****
@staticmethod
def _move_data(data, device):
if isinstance(data, GPUBuffer):
if device == Device.GPU: return data
old = data
data = np.empty(old.shape, dtype=np.float32)
with ProfileOp("toCPU", [data]):
cl.enqueue_copy(cl_queue, data, old.cl, is_blocking=True)
elif "ANETensor" in str(type(data)):
if device == Device.ANE: return data
with ProfileOp("toCPU", [data]):
data = data.data().astype(np.float32)
if not isinstance(data, np.ndarray):
data = np.array(data, dtype=np.float32)
if data.dtype != np.float32 and not Tensor.did_float_warning:
# warning? float64 is actually needed for numerical jacobian
print(f"warning, {data.shape!r} isn't float32")
Tensor.did_float_warning = True
if device == Device.GPU:
require_init_gpu()
with ProfileOp("toGPU", [data]):
return GPUBuffer(data.shape, data)
elif device == Device.ANE:
require_init_ane()
with ProfileOp("toANE", [data]):
ndata = ane.tensor(data.shape)
ndata.data()[:] = data
return ndata
return data
def to_(self, device):
self.data, self.device = self._move_data(self.data, device), device
if self.grad: self.grad.to_(device)
def to(self, device):
ret = Tensor(self.data, device)
if self.grad: ret.grad = self.grad.to(device)
return ret
def detach(self):
return Tensor(self.data, device=self.device)
# ***** non first class ops *****
def __getitem__(self, val):
arg = []
for i,s in enumerate(val if type(val) in [list, tuple] else ([] if val is None else [val])):
arg.append((s.start if s.start is not None else 0,
(s.stop if s.stop >=0 else self.shape[i]+s.stop) if s.stop is not None else self.shape[i]))
assert s.step is None or s.step == 1
return self.slice(arg = arg+[(0,self.shape[i]) for i in range(len(arg), len(self.shape))])
def pad2d(self, padding):
return self[:, :, -padding[2]:self.shape[2]+padding[3], -padding[0]:self.shape[3]+padding[1]]
def dot(self, w):
return self.matmul(w)
def mean(self, axis=None):
out = self.sum(axis=axis)
return out * (np.prod(out.shape)/np.prod(self.shape))
def sqrt(self):
return self.pow(0.5)
def div(self, y):
return self * (y ** -1.0)
def sigmoid(self):
e = self.exp()
return e.div(1 + e)
def swish(self):
return self * self.sigmoid()
def tanh(self):
return 2.0 * ((2.0 * self).sigmoid()) - 1.0
def leakyrelu(self, neg_slope=0.01):
return self.relu() - (-neg_slope*self).relu()
def softmax(self):
ns = list(self.shape)[:-1]+[1]
m = self.max(axis=len(self.shape)-1).reshape(shape=ns)
e = (self - m).exp()
ss = e.sum(axis=len(self.shape)-1).reshape(shape=ns)
return e.div(ss)
def logsoftmax(self):
ns = list(self.shape)[:-1]+[1]
m = self.max(axis=len(self.shape)-1).reshape(shape=ns)
ss = m + (self-m).exp().sum(axis=len(self.shape)-1).reshape(shape=ns).log()
return self - ss
def dropout(self, p=0.5):
# TODO: this needs a test
if Tensor.training:
_mask = np.asarray(np.random.binomial(1, 1.0-p, size=self.shape), dtype=self.dtype)
return self * Tensor(_mask, requires_grad=False, device=self.device) * (1/(1.0 - p))
else:
return self
def abs(self):
return self.relu() + (-1.0*self).relu()
def _pool2d(self, py, px):
xup = self[:, :, :self.shape[2]-self.shape[2]%py, :self.shape[3]-self.shape[3]%px]
return xup.reshape(shape=(xup.shape[0], xup.shape[1], xup.shape[2]//py, py, xup.shape[3]//px, px))
def avg_pool2d(self, kernel_size=(2,2)):
return self._pool2d(*kernel_size).mean(axis=(3,5))
def max_pool2d(self, kernel_size=(2,2)):
return self._pool2d(*kernel_size).max(axis=(3,5))
# An instantiation of the Function is the Context
class Function:
def __init__(self, *tensors):
self.parents = tensors
self.saved_tensors = []
def save_for_backward(self, *x):
self.saved_tensors.extend(x)
def apply(self, *x, **kwargs):
ctx = self(*x) # self - operation i.e 'add', 'sub', etc.
# use default params
params = inspect.signature(self.forward).parameters
for p in params.values():
if p.default is not p.empty:
setattr(ctx, p.name, p.default)
# overwrite with passed params
for k, v in kwargs.items():
setattr(ctx, k, v)
with ProfileOp(ctx.__class__.__name__, x):
ret = Tensor(self.forward(ctx, *[t.data for t in x], **kwargs),
device=ctx.device, requires_grad=any([t.requires_grad for t in x]))
if ret.requires_grad:
ret._ctx = ctx
return ret
def register(name, fxn, device=Device.CPU):
Tensor.ops[device][name] = fxn
def dispatch(*x, **kwargs):
tt = [arg for arg in x if isinstance(arg, Tensor)][0]
x = [Tensor(np.array([arg], dtype=tt.dtype), device=tt.device, requires_grad=False) if not isinstance(arg, Tensor) else arg for arg in x]
f = Tensor.ops[tt.device][name]
f.cl_ctx, f.cl_queue, f.ane, f.device = cl_ctx, cl_queue, ane, tt.device
return f.apply(f, *x, **kwargs)
setattr(Tensor, name, dispatch)
# TODO: div is a second class op, so it doesn't work here
if name in ['add', 'sub', 'mul', 'pow', 'matmul']:
setattr(Tensor, f"__{name}__", dispatch)
setattr(Tensor, f"__i{name}__", lambda self,x: self.assign(dispatch(self,x)))
setattr(Tensor, f"__r{name}__", lambda self,x: dispatch(x,self))
for device in [device for device in Device.__dict__.keys() if device[0] != "_"]:
setattr(Tensor, f"{device.lower()}", functools.partialmethod(Tensor.to, Device.__dict__[device]))
setattr(Tensor, f"{device.lower()}_", functools.partialmethod(Tensor.to_, Device.__dict__[device]))
# this registers all the operations
def _register_ops(namespace, device=Device.CPU):
for name, cls in inspect.getmembers(namespace, inspect.isclass):
if name[0] != "_": register(name.lower(), cls, device=device)
from tinygrad import ops_cpu
_register_ops(ops_cpu)
try:
import pyopencl as cl
# TODO: move this import to require_init_gpu?
from tinygrad import ops_gpu
_register_ops(ops_gpu, device=Device.GPU)
GPU = True
except ImportError:
# no GPU support
GPU = False
ANE = False
|
[] |
[] |
[
"GPU",
"DEBUG"
] |
[]
|
["GPU", "DEBUG"]
|
python
| 2 | 0 | |
example/multi-data-source/source-data-b/main.go
|
package main
import (
"fmt"
"io"
"log"
"math/rand"
"os"
"strconv"
"strings"
"time"
y3 "github.com/yomorun/y3-codec-golang"
"github.com/yomorun/yomo"
)
var serverAddr = os.Getenv("YOMO_SERVER_ENDPOINT")
func main() {
if serverAddr == "" {
serverAddr = "localhost:9000"
}
err := emit(serverAddr)
if err != nil {
log.Printf("❌ Emit the data to YoMo-Zipper %s failure with err: %v", serverAddr, err)
}
}
func emit(addr string) error {
splits := strings.Split(addr, ":")
if len(splits) != 2 {
return fmt.Errorf(`❌ The format of url "%s" is incorrect, it should be "host:port", f.e. localhost:9000`, addr)
}
host := splits[0]
port, err := strconv.Atoi(splits[1])
cli, err := yomo.NewSource(yomo.WithName("source-b")).Connect(host, port)
if err != nil {
panic(err)
}
defer cli.Close()
generateAndSendData(cli)
return nil
}
var codec = y3.NewCodec(0x12)
func generateAndSendData(writer io.Writer) {
for {
time.Sleep(200 * time.Millisecond)
num := rand.New(rand.NewSource(time.Now().UnixNano())).Float32() * 200
sendingBuf, _ := codec.Marshal(num)
_, err := writer.Write(sendingBuf)
if err != nil {
log.Printf("❌ Emit %v to YoMo-Zipper failure with err: %f", num, err)
} else {
log.Printf("✅ Emit %f to YoMo-Zipper", num)
}
}
}
|
[
"\"YOMO_SERVER_ENDPOINT\""
] |
[] |
[
"YOMO_SERVER_ENDPOINT"
] |
[]
|
["YOMO_SERVER_ENDPOINT"]
|
go
| 1 | 0 | |
lambda_function.py
|
import json
import urllib
import urllib.request
import boto3
import os
# S3 高レベルAPI
s3 = boto3.resource('s3')
client = boto3.client('ses', region_name=os.environ['REGION'])
pattern_confirmed = ''
pattern_unconfirmed = ''
def post_slack(webfook_url, user_name, message):
send_data = {
"username": user_name,
"text": message,
}
send_text = json.dumps(send_data)
request = urllib.request.Request(
webfook_url,
data=send_text.encode('utf-8'),
method="POST"
)
with urllib.request.urlopen(request) as response:
response.read().decode('utf-8')
def lambda_handler(event, context):
# バケット名取得
bucket = event['Records'][0]['s3']['bucket']['name']
# オブジェクトのkey取得
key = urllib.parse.unquote_plus(event['Records'][0]['s3']['object']['key'])
# オブジェクトを取得する
s3_object = s3.Object(bucket, key)
# オブジェクトの内容を取得
s3_object_response = s3_object.get()
info_body = s3_object_response['Body'].read()
bodystr = info_body.decode('utf-8')
array = bodystr.split('\n')
li = array[-2].split(',')
if li[3].replace('\"', '') == 'EstimatedDisclaimer':
# 確定前
cost_idx = -3
whens = array[-2].split(',')
wi = whens[18].replace('This report reflects your estimated monthly bill for activity through approximately ', '').replace('\"', '')
estimated = '\n' + wi + '時点情報'
else:
# 確定後
cost_idx = -2
estimated = '\n確定情報'
costs = array[cost_idx].split(',')
currency = costs[23].replace('\"', '')
to = costs[24].replace('\"', '')
ti = costs[28].replace('\"', '')
period = costs[18].replace('Total statement amount for period ', '').replace('\"', '')
u = os.environ['SLACK_URL']
user = 'aws billing : ' + os.environ['ACCOUNT']
m = period
m += '\n'
m += '税抜 : '+ to + currency
m += '\n'
m += '税込 : '+ ti + currency
m += estimated
post_slack(u, user, m)
|
[] |
[] |
[
"REGION",
"ACCOUNT",
"SLACK_URL"
] |
[]
|
["REGION", "ACCOUNT", "SLACK_URL"]
|
python
| 3 | 0 | |
logs.go
|
package main
import (
"io"
"log"
"os"
"os/exec"
"runtime"
"runtime/debug"
"github.com/Nv7-Github/Nv7Haven/eod/logs"
"github.com/go-sql-driver/mysql"
"github.com/gofiber/fiber/v2"
)
var monitors = [][]string{{"measure_temp"}, {"measure_volts"}, {"get_mem", "arm"} /*, {"get_mem", "gpu"}, {"get_throttled"}*/} // Commented out part 1 gets VRAM, commented out part 2 gets if throttled
func systemHandlers(app *fiber.App) {
if runtime.GOOS == "linux" {
app.Get("/temp", func(c *fiber.Ctx) error {
for _, m := range monitors {
cmd := exec.Command("vcgencmd", m...)
cmd.Stdout = c
err := cmd.Run()
if err != nil {
return err
}
}
return nil
})
}
app.Get("/freememory", func(c *fiber.Ctx) error {
debug.FreeOSMemory()
return nil
})
app.Get("/kill/:password", func(c *fiber.Ctx) error {
if c.Params("password") == os.Getenv("PASSWORD") {
os.Exit(2)
}
return nil
})
app.Get("/logs", func(c *fiber.Ctx) error {
file, err := os.Open("logs.txt")
if err != nil {
return err
}
_, err = io.Copy(c, file)
if err != nil {
return err
}
file.Close()
return nil
})
app.Get("/createlogs", func(c *fiber.Ctx) error {
file, err := os.Open("createlogs.txt")
if err != nil {
return err
}
_, err = io.Copy(c, file)
if err != nil {
return err
}
file.Close()
return nil
})
app.Get("/discordlogs", func(c *fiber.Ctx) error {
file, err := os.Open("discordlogs.txt")
if err != nil {
return err
}
_, err = io.Copy(c, file)
if err != nil {
return err
}
file.Close()
return nil
})
mysql.SetLogger(&Logger{})
app.Get("/mysqlogs", func(c *fiber.Ctx) error {
file, err := os.Open("mysqlogs.txt")
if err != nil {
return err
}
_, err = io.Copy(c, file)
if err != nil {
return err
}
file.Close()
return nil
})
}
type Logger struct{}
func (l *Logger) Print(args ...interface{}) {
log.SetOutput(logs.MysqLogs)
log.Print(args...)
}
|
[
"\"PASSWORD\""
] |
[] |
[
"PASSWORD"
] |
[]
|
["PASSWORD"]
|
go
| 1 | 0 | |
app/src/main/java/net/oschina/ecust/util/FileUtil.java
|
package net.oschina.ecust.util;
import android.content.Context;
import android.os.Environment;
import android.os.StatFs;
import android.util.Log;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
/**
* 文件操作工具包
*
* @author liux (http://my.oschina.net/liux)
* @version 1.0
* @created 2012-3-21
*/
public class FileUtil {
/**
* 写文本文件 在Android系统中,文件保存在 /data/data/PACKAGE_NAME/files 目录下
*
* @param context
* @param msg
*/
public static void write(Context context, String fileName, String content) {
if (content == null)
content = "";
try {
FileOutputStream fos = context.openFileOutput(fileName,
Context.MODE_PRIVATE);
fos.write(content.getBytes());
fos.close();
} catch (Exception e) {
e.printStackTrace();
}
}
/**
* 读取文本文件
*
* @param context
* @param fileName
* @return
*/
public static String read(Context context, String fileName) {
try {
FileInputStream in = context.openFileInput(fileName);
return readInStream(in);
} catch (Exception e) {
e.printStackTrace();
}
return "";
}
public static String readInStream(InputStream inStream) {
try {
ByteArrayOutputStream outStream = new ByteArrayOutputStream();
byte[] buffer = new byte[512];
int length = -1;
while ((length = inStream.read(buffer)) != -1) {
outStream.write(buffer, 0, length);
}
outStream.close();
inStream.close();
return outStream.toString();
} catch (IOException e) {
Log.i("FileTest", e.getMessage());
}
return null;
}
public static File createFile(String folderPath, String fileName) {
File destDir = new File(folderPath);
if (!destDir.exists()) {
destDir.mkdirs();
}
return new File(folderPath, fileName + fileName);
}
/**
* 向手机写图片
*
* @param buffer
* @param folder
* @param fileName
* @return
*/
public static boolean writeFile(byte[] buffer, String folder,
String fileName) {
boolean writeSucc = false;
boolean sdCardExist = Environment.getExternalStorageState().equals(
android.os.Environment.MEDIA_MOUNTED);
String folderPath = "";
if (sdCardExist) {
folderPath = Environment.getExternalStorageDirectory()
+ File.separator + folder + File.separator;
} else {
writeSucc = false;
}
File fileDir = new File(folderPath);
if (!fileDir.exists()) {
fileDir.mkdirs();
}
File file = new File(folderPath + fileName);
FileOutputStream out = null;
try {
out = new FileOutputStream(file);
out.write(buffer);
writeSucc = true;
} catch (Exception e) {
e.printStackTrace();
} finally {
try {
out.close();
} catch (IOException e) {
e.printStackTrace();
}
}
return writeSucc;
}
/**
* 根据文件绝对路径获取文件名
*
* @param filePath
* @return
*/
public static String getFileName(String filePath) {
if (StringUtils.isEmpty(filePath))
return "";
return filePath.substring(filePath.lastIndexOf(File.separator) + 1);
}
/**
* 根据文件的绝对路径获取文件名但不包含扩展名
*
* @param filePath
* @return
*/
public static String getFileNameNoFormat(String filePath) {
if (StringUtils.isEmpty(filePath)) {
return "";
}
int point = filePath.lastIndexOf('.');
return filePath.substring(filePath.lastIndexOf(File.separator) + 1,
point);
}
/**
* 获取文件扩展名
*
* @param fileName
* @return
*/
public static String getFileFormat(String fileName) {
if (StringUtils.isEmpty(fileName))
return "";
int point = fileName.lastIndexOf('.');
return fileName.substring(point + 1);
}
/**
* 获取文件大小
*
* @param filePath
* @return
*/
public static long getFileSize(String filePath) {
long size = 0;
File file = new File(filePath);
if (file != null && file.exists()) {
size = file.length();
}
return size;
}
/**
* 获取文件大小
*
* @param size 字节
* @return
*/
public static String getFileSize(long size) {
if (size <= 0)
return "0";
java.text.DecimalFormat df = new java.text.DecimalFormat("##.##");
float temp = (float) size / 1024;
if (temp >= 1024) {
return df.format(temp / 1024) + "M";
} else {
return df.format(temp) + "K";
}
}
/**
* 转换文件大小
*
* @param fileS
* @return B/KB/MB/GB
*/
public static String formatFileSize(long fileS) {
java.text.DecimalFormat df = new java.text.DecimalFormat("#.00");
String fileSizeString = "";
if (fileS < 1024) {
fileSizeString = df.format((double) fileS) + "B";
} else if (fileS < 1048576) {
fileSizeString = df.format((double) fileS / 1024) + "KB";
} else if (fileS < 1073741824) {
fileSizeString = df.format((double) fileS / 1048576) + "MB";
} else {
fileSizeString = df.format((double) fileS / 1073741824) + "G";
}
return fileSizeString;
}
/**
* 获取目录文件大小
*
* @param dir
* @return
*/
public static long getDirSize(File dir) {
if (dir == null) {
return 0;
}
if (!dir.isDirectory()) {
return 0;
}
long dirSize = 0;
File[] files = dir.listFiles();
if (files != null) {
for (File file : files) {
if (file.isFile()) {
dirSize += file.length();
} else if (file.isDirectory()) {
dirSize += file.length();
dirSize += getDirSize(file); // 递归调用继续统计
}
}
}
return dirSize;
}
/**
* 获取目录文件个数
*
* @param emojiFragment
* @return
*/
public long getFileList(File dir) {
long count = 0;
File[] files = dir.listFiles();
count = files.length;
for (File file : files) {
if (file.isDirectory()) {
count = count + getFileList(file);// 递归
count--;
}
}
return count;
}
public static byte[] toBytes(InputStream in) throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
int ch;
while ((ch = in.read()) != -1) {
out.write(ch);
}
byte buffer[] = out.toByteArray();
out.close();
return buffer;
}
/**
* 检查文件是否存在
*
* @param name
* @return
*/
public static boolean checkFileExists(String name) {
boolean status;
if (!name.equals("")) {
File path = Environment.getExternalStorageDirectory();
File newPath = new File(path.toString() + name);
status = newPath.exists();
} else {
status = false;
}
return status;
}
/**
* 检查路径是否存在
*
* @param path
* @return
*/
public static boolean checkFilePathExists(String path) {
return new File(path).exists();
}
/**
* 计算SD卡的剩余空间
*
* @return 返回-1,说明没有安装sd卡
*/
public static long getFreeDiskSpace() {
String status = Environment.getExternalStorageState();
long freeSpace = 0;
if (status.equals(Environment.MEDIA_MOUNTED)) {
try {
File path = Environment.getExternalStorageDirectory();
StatFs stat = new StatFs(path.getPath());
long blockSize = stat.getBlockSize();
long availableBlocks = stat.getAvailableBlocks();
freeSpace = availableBlocks * blockSize / 1024;
} catch (Exception e) {
e.printStackTrace();
}
} else {
return -1;
}
return (freeSpace);
}
/**
* 新建目录
*
* @param directoryName
* @return
*/
public static boolean createDirectory(String directoryName) {
boolean status;
if (!directoryName.equals("")) {
File path = Environment.getExternalStorageDirectory();
File newPath = new File(path.toString() + directoryName);
status = newPath.mkdir();
status = true;
} else
status = false;
return status;
}
/**
* 检查是否安装SD卡
*
* @return
*/
public static boolean checkSaveLocationExists() {
String sDCardStatus = Environment.getExternalStorageState();
boolean status;
if (sDCardStatus.equals(Environment.MEDIA_MOUNTED)) {
status = true;
} else
status = false;
return status;
}
/**
* 检查是否安装外置的SD卡
*
* @return
*/
public static boolean checkExternalSDExists() {
Map<String, String> evn = System.getenv();
return evn.containsKey("SECONDARY_STORAGE");
}
/**
* 删除目录(包括:目录里的所有文件)
*
* @param fileName
* @return
*/
public static boolean deleteDirectory(String fileName) {
boolean status;
SecurityManager checker = new SecurityManager();
if (!fileName.equals("")) {
File path = Environment.getExternalStorageDirectory();
File newPath = new File(path.toString() + fileName);
checker.checkDelete(newPath.toString());
if (newPath.isDirectory()) {
String[] listfile = newPath.list();
try {
for (int i = 0; i < listfile.length; i++) {
File deletedFile = new File(newPath.toString() + "/"
+ listfile[i].toString());
deletedFile.delete();
}
newPath.delete();
Log.i("DirectoryManager deleteDirectory", fileName);
status = true;
} catch (Exception e) {
e.printStackTrace();
status = false;
}
} else
status = false;
} else
status = false;
return status;
}
/**
* 删除文件
*
* @param fileName
* @return
*/
public static boolean deleteFile(String fileName) {
boolean status;
SecurityManager checker = new SecurityManager();
if (!fileName.equals("")) {
File path = Environment.getExternalStorageDirectory();
File newPath = new File(path.toString() + fileName);
checker.checkDelete(newPath.toString());
if (newPath.isFile()) {
try {
Log.i("DirectoryManager deleteFile", fileName);
newPath.delete();
status = true;
} catch (SecurityException se) {
se.printStackTrace();
status = false;
}
} else
status = false;
} else
status = false;
return status;
}
/**
* 删除空目录
* <p/>
* 返回 0代表成功 ,1 代表没有删除权限, 2代表不是空目录,3 代表未知错误
*
* @return
*/
public static int deleteBlankPath(String path) {
File f = new File(path);
if (!f.canWrite()) {
return 1;
}
if (f.list() != null && f.list().length > 0) {
return 2;
}
if (f.delete()) {
return 0;
}
return 3;
}
/**
* 重命名
*
* @param oldName
* @param newName
* @return
*/
public static boolean reNamePath(String oldName, String newName) {
File f = new File(oldName);
return f.renameTo(new File(newName));
}
/**
* 删除文件
*
* @param filePath
*/
public static boolean deleteFileWithPath(String filePath) {
SecurityManager checker = new SecurityManager();
File f = new File(filePath);
checker.checkDelete(filePath);
if (f.isFile()) {
Log.i("DirectoryManager deleteFile", filePath);
f.delete();
return true;
}
return false;
}
/**
* 清空一个文件夹
*
* @param files
*/
public static void clearFileWithPath(String filePath) {
List<File> files = FileUtil.listPathFiles(filePath);
if (files.isEmpty()) {
return;
}
for (File f : files) {
if (f.isDirectory()) {
clearFileWithPath(f.getAbsolutePath());
} else {
f.delete();
}
}
}
/**
* 获取SD卡的根目录
*
* @return
*/
public static String getSDRoot() {
return Environment.getExternalStorageDirectory().getAbsolutePath();
}
/**
* 获取手机外置SD卡的根目录
*
* @return
*/
public static String getExternalSDRoot() {
Map<String, String> evn = System.getenv();
return evn.get("SECONDARY_STORAGE");
}
/**
* 列出root目录下所有子目录
*
* @param path
* @return 绝对路径
*/
public static List<String> listPath(String root) {
List<String> allDir = new ArrayList<String>();
SecurityManager checker = new SecurityManager();
File path = new File(root);
checker.checkRead(root);
// 过滤掉以.开始的文件夹
if (path.isDirectory()) {
for (File f : path.listFiles()) {
if (f.isDirectory() && !f.getName().startsWith(".")) {
allDir.add(f.getAbsolutePath());
}
}
}
return allDir;
}
/**
* 获取一个文件夹下的所有文件
*
* @param root
* @return
*/
public static List<File> listPathFiles(String root) {
List<File> allDir = new ArrayList<File>();
SecurityManager checker = new SecurityManager();
File path = new File(root);
checker.checkRead(root);
File[] files = path.listFiles();
for (File f : files) {
if (f.isFile())
allDir.add(f);
else
listPath(f.getAbsolutePath());
}
return allDir;
}
public enum PathStatus {
SUCCESS, EXITS, ERROR
}
/**
* 创建目录
*
* @param path
*/
public static PathStatus createPath(String newPath) {
File path = new File(newPath);
if (path.exists()) {
return PathStatus.EXITS;
}
if (path.mkdir()) {
return PathStatus.SUCCESS;
} else {
return PathStatus.ERROR;
}
}
/**
* 截取路径名
*
* @return
*/
public static String getPathName(String absolutePath) {
int start = absolutePath.lastIndexOf(File.separator) + 1;
int end = absolutePath.length();
return absolutePath.substring(start, end);
}
/**
* 获取应用程序缓存文件夹下的指定目录
*
* @param context
* @param dir
* @return
*/
public static String getAppCache(Context context, String dir) {
String savePath = context.getCacheDir().getAbsolutePath() + "/" + dir + "/";
File savedir = new File(savePath);
if (!savedir.exists()) {
savedir.mkdirs();
}
savedir = null;
return savePath;
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
payments/lister_test.go
|
package payments
import (
"context"
"errors"
"fmt"
"net/http"
"os"
"testing"
"time"
"github.com/cpurta/go-raiden-client/config"
"github.com/ethereum/go-ethereum/common"
"github.com/jarcoal/httpmock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func ExampleLister() {
var (
paymentClient *Client
config = &config.Config{
Host: "http://localhost:5001",
APIVersion: "v1",
}
tokenAddress = common.HexToAddress("0x89d24a6b4ccb1b6faa2625fe562bdd9a23260359") // DAI Stablecoin
targetAddress = common.HexToAddress("")
events []*Event
err error
)
paymentClient = NewClient(config, http.DefaultClient)
if events, err = paymentClient.List(context.Background(), tokenAddress, targetAddress); err != nil {
panic(fmt.Sprintf("unable to list payments: %s", err.Error()))
}
fmt.Printf("successfully listed payment: %+v\n", events)
}
func TestLister(t *testing.T) {
var (
localhostIP = "[::1]"
config = &config.Config{
Host: "http://localhost:5001",
APIVersion: "v1",
}
time1, _ = time.Parse(time.RFC3339, "2018-10-30T07:03:52.193Z")
time2, _ = time.Parse(time.RFC3339, "2018-10-30T07:04:22.293Z")
time3, _ = time.Parse(time.RFC3339, "2018-10-30T07:10:13.122Z")
)
if os.Getenv("USE_IPV4") != "" {
localhostIP = "127.0.0.1"
}
type testcase struct {
name string
prepHTTPMock func()
expectedEvents []*Event
expectedError error
}
testcases := []testcase{
testcase{
name: "successfully returns at least one pending transfer",
prepHTTPMock: func() {
httpmock.RegisterResponder(
"GET",
"http://localhost:5001/api/v1/payments/0x0f114A1E9Db192502E7856309cc899952b3db1ED/0x82641569b2062B545431cF6D7F0A418582865ba7",
httpmock.NewStringResponder(
http.StatusOK,
`[{"event":"EventPaymentReceivedSuccess","amount":5,"initiator":"0x82641569b2062B545431cF6D7F0A418582865ba7","identifier":1,"log_time":"2018-10-30T07:03:52.193Z"},{"event":"EventPaymentSentSuccess","amount":35,"target":"0x82641569b2062B545431cF6D7F0A418582865ba7","identifier":2,"log_time":"2018-10-30T07:04:22.293Z"},{"event":"EventPaymentSentSuccess","amount":20,"target":"0x82641569b2062B545431cF6D7F0A418582865ba7","identifier":3,"log_time":"2018-10-30T07:10:13.122Z"}]`,
),
)
},
expectedError: nil,
expectedEvents: []*Event{
&Event{
EventName: "EventPaymentReceivedSuccess",
Amount: int64(5),
Initiator: common.HexToAddress("0x82641569b2062B545431cF6D7F0A418582865ba7"),
Identifier: int64(1),
LogTime: time1,
},
&Event{
EventName: "EventPaymentSentSuccess",
Amount: int64(35),
Target: common.HexToAddress("0x82641569b2062B545431cF6D7F0A418582865ba7"),
Identifier: int64(2),
LogTime: time2,
},
&Event{
EventName: "EventPaymentSentSuccess",
Amount: int64(20),
Target: common.HexToAddress("0x82641569b2062B545431cF6D7F0A418582865ba7"),
Identifier: int64(3),
LogTime: time3,
},
},
},
testcase{
name: "unexpected 500 response",
prepHTTPMock: func() {
httpmock.RegisterResponder(
"GET",
"http://localhost:5001/api/v1/payments/0x0f114A1E9Db192502E7856309cc899952b3db1ED/0x82641569b2062B545431cF6D7F0A418582865ba7",
httpmock.NewStringResponder(
http.StatusInternalServerError,
``,
),
)
},
expectedError: errors.New("EOF"),
expectedEvents: nil,
},
testcase{
name: "unable to make http request",
prepHTTPMock: func() {
httpmock.Deactivate()
},
expectedError: fmt.Errorf("Get http://localhost:5001/api/v1/payments/0x0f114A1E9Db192502E7856309cc899952b3db1ED/0x82641569b2062B545431cF6D7F0A418582865ba7: dial tcp %s:5001: connect: connection refused", localhostIP),
expectedEvents: nil,
},
}
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
var (
err error
events []*Event
lister = NewLister(config, http.DefaultClient)
ctx = context.Background()
tokenAddress = common.HexToAddress("0x0f114A1E9Db192502E7856309cc899952b3db1ED")
partnerAddress = common.HexToAddress("0x82641569b2062B545431cF6D7F0A418582865ba7")
)
httpmock.Activate()
defer httpmock.Deactivate()
tc.prepHTTPMock()
// test list all
events, err = lister.List(ctx, tokenAddress, partnerAddress)
if tc.expectedError != nil {
assert.EqualError(t, err, tc.expectedError.Error())
return
}
require.NoError(t, err)
assert.Equal(t, tc.expectedEvents, events)
})
}
}
|
[
"\"USE_IPV4\""
] |
[] |
[
"USE_IPV4"
] |
[]
|
["USE_IPV4"]
|
go
| 1 | 0 | |
contrib/for-tests/network-tester/webserver.go
|
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// A tiny web server for checking networking connectivity.
//
// Will dial out to, and expect to hear from, every pod that is a member of
// the service passed in the flag -service.
//
// Will serve a webserver on given -port.
//
// Visit /read to see the current state, or /quit to shut down.
//
// Visit /status to see pass/running/fail determination. (literally, it will
// return one of those words.)
//
// /write is used by other network test pods to register connectivity.
package main
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"math/rand"
"net/http"
"net/url"
"os"
"sync"
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
)
var (
port = flag.Int("port", 8080, "Port number to serve at.")
peerCount = flag.Int("peers", 8, "Must find at least this many peers for the test to pass.")
service = flag.String("service", "nettest", "Service to find other network test pods in.")
namespace = flag.String("namespace", "default", "Namespace of this pod. TODO: kubernetes should make this discoverable.")
)
// State tracks the internal state of our little http server.
// It's returned verbatim over the /read endpoint.
type State struct {
// Hostname is set once and never changed-- it's always safe to read.
Hostname string
// The below fields require that lock is held before reading or writing.
Sent map[string]int
Received map[string]int
Errors []string
Log []string
StillContactingPeers bool
lock sync.Mutex
}
func (s *State) doneContactingPeers() {
s.lock.Lock()
defer s.lock.Unlock()
s.StillContactingPeers = false
}
// serveStatus returns "pass", "running", or "fail".
func (s *State) serveStatus(w http.ResponseWriter, r *http.Request) {
s.lock.Lock()
defer s.lock.Unlock()
if len(s.Sent) >= *peerCount && len(s.Received) >= *peerCount {
fmt.Fprintf(w, "pass")
return
}
if s.StillContactingPeers {
fmt.Fprintf(w, "running")
return
}
s.Logf("Declaring failure for %s/%s with %d sent and %d received and %d peers", *namespace, *service, s.Sent, s.Received, *peerCount)
fmt.Fprintf(w, "fail")
}
// serveRead writes our json encoded state
func (s *State) serveRead(w http.ResponseWriter, r *http.Request) {
s.lock.Lock()
defer s.lock.Unlock()
w.WriteHeader(http.StatusOK)
b, err := json.MarshalIndent(s, "", "\t")
s.appendErr(err)
_, err = w.Write(b)
s.appendErr(err)
}
// WritePost is the format that (json encoded) requests to the /write handler should take.
type WritePost struct {
Source string
Dest string
}
// WriteResp is returned by /write
type WriteResp struct {
Hostname string
}
// serveWrite records the contact in our state.
func (s *State) serveWrite(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
s.lock.Lock()
defer s.lock.Unlock()
w.WriteHeader(http.StatusOK)
var wp WritePost
s.appendErr(json.NewDecoder(r.Body).Decode(&wp))
if wp.Source == "" {
s.appendErr(fmt.Errorf("%v: Got request with no source", s.Hostname))
} else {
if s.Received == nil {
s.Received = map[string]int{}
}
s.Received[wp.Source] += 1
}
s.appendErr(json.NewEncoder(w).Encode(&WriteResp{Hostname: s.Hostname}))
}
// appendErr adds err to the list, if err is not nil. s must be locked.
func (s *State) appendErr(err error) {
if err != nil {
s.Errors = append(s.Errors, err.Error())
}
}
// Logf writes to the log message list. s must not be locked.
// s's Log member will drop an old message if it would otherwise
// become longer than 500 messages.
func (s *State) Logf(format string, args ...interface{}) {
s.lock.Lock()
defer s.lock.Unlock()
s.Log = append(s.Log, fmt.Sprintf(format, args...))
if len(s.Log) > 500 {
s.Log = s.Log[1:]
}
}
// s must not be locked
func (s *State) appendSuccessfulSend(toHostname string) {
s.lock.Lock()
defer s.lock.Unlock()
if s.Sent == nil {
s.Sent = map[string]int{}
}
s.Sent[toHostname] += 1
}
var (
// Our one and only state object
state State
)
func main() {
flag.Parse()
if *service == "" {
log.Fatal("Must provide -service flag.")
}
hostname, err := os.Hostname()
if err != nil {
log.Fatalf("Error getting hostname: %v", err)
}
state := State{
Hostname: hostname,
StillContactingPeers: true,
}
go contactOthers(&state)
http.HandleFunc("/quit", func(w http.ResponseWriter, r *http.Request) {
os.Exit(0)
})
http.HandleFunc("/read", state.serveRead)
http.HandleFunc("/write", state.serveWrite)
http.HandleFunc("/status", state.serveStatus)
go log.Fatal(http.ListenAndServe(fmt.Sprintf("0.0.0.0:%d", *port), nil))
select {}
}
// Find all sibling pods in the service and post to their /write handler.
func contactOthers(state *State) {
defer state.doneContactingPeers()
masterRO := url.URL{
Scheme: "http",
Host: os.Getenv("KUBERNETES_RO_SERVICE_HOST") + ":" + os.Getenv("KUBERNETES_RO_SERVICE_PORT"),
Path: "/api/" + latest.Version,
}
client := &client.Client{client.NewRESTClient(&masterRO, latest.Version, latest.Codec, false, 5, 10)}
// Do this repeatedly, in case there's some propagation delay with getting
// newly started pods into the endpoints list.
for i := 0; i < 15; i++ {
endpoints, err := client.Endpoints(*namespace).Get(*service)
if err != nil {
state.Logf("Unable to read the endpoints for %v/%v: %v; will try again.", *namespace, *service, err)
time.Sleep(time.Duration(1+rand.Intn(10)) * time.Second)
}
eps := util.StringSet{}
for _, ss := range endpoints.Subsets {
for _, a := range ss.Addresses {
for _, p := range ss.Ports {
eps.Insert(fmt.Sprintf("http://%s:%d", a.IP, p.Port))
}
}
}
for ep := range eps {
state.Logf("Attempting to contact %s", ep)
contactSingle(ep, state)
}
time.Sleep(5 * time.Second)
}
}
// contactSingle dials the address 'e' and tries to POST to its /write address.
func contactSingle(e string, state *State) {
body, err := json.Marshal(&WritePost{
Dest: e,
Source: state.Hostname,
})
if err != nil {
log.Fatalf("json marshal error: %v", err)
}
resp, err := http.Post(e+"/write", "application/json", bytes.NewReader(body))
if err != nil {
state.Logf("Warning: unable to contact the endpoint %q: %v", e, err)
return
}
defer resp.Body.Close()
body, err = ioutil.ReadAll(resp.Body)
if err != nil {
state.Logf("Warning: unable to read response from '%v': '%v'", e, err)
return
}
var wr WriteResp
err = json.Unmarshal(body, &wr)
if err != nil {
state.Logf("Warning: unable to unmarshal response (%v) from '%v': '%v'", string(body), e, err)
return
}
state.appendSuccessfulSend(wr.Hostname)
}
|
[
"\"KUBERNETES_RO_SERVICE_HOST\"",
"\"KUBERNETES_RO_SERVICE_PORT\""
] |
[] |
[
"KUBERNETES_RO_SERVICE_HOST",
"KUBERNETES_RO_SERVICE_PORT"
] |
[]
|
["KUBERNETES_RO_SERVICE_HOST", "KUBERNETES_RO_SERVICE_PORT"]
|
go
| 2 | 0 | |
code/weather.py
|
import os
import requests
url = "http://api.openweathermap.org/data/2.5/weather?q={city_name}&appid={API_key}&units=metric"
try:
from keys import weather_key
key = weather_key()
except:
key = os.environ.get('WEATHER_KEY')
def currentweather(City):
if key is None:
return "None", "None"
query = url.format(city_name=City, API_key=key)
ans = requests.get(query)
if ans.status_code == 200:
data = ans.json()
temp = data['main']['temp']
temp = round(temp)
temp = str(temp) + "°C"
weather_type = data['weather'][0]['main']
return temp, weather_type
else:
print(f"The city {str(City)} you entered is not valid")
return None, None
if __name__ == "__main__":
City = input("Enter the name of the city: ")
temp, weather_type = currentweather(City)
if temp == None:
print("The city you entered is not valid")
else:
print("The current weather is " + weather_type + " in " + City + " is " + temp)
|
[] |
[] |
[
"WEATHER_KEY"
] |
[]
|
["WEATHER_KEY"]
|
python
| 1 | 0 | |
UNIX/extract_benchmark_data.py
|
#!/usr/bin/python
import argparse
import azure_table
import getpass
import json
import os
import re
import time
from datetime import date
parser = argparse.ArgumentParser(
description='Extract benchmark data from a log file and store to database'
)
parser.add_argument('--logfile',
type=str,
required=True,
help='path to log file containing benchmark data')
parser.add_argument('--pretty-print',
default=False,
action='store_true',
help='print the benchmark config and test data to stdout')
parser.add_argument('--store-to-db',
default=False,
action='store_true',
help='store the benchmark data to an azure table storage')
args = parser.parse_args()
class LogFile:
def __init__(self, logFile):
self.logFile = logFile
self.totalCompileTime = 0.0
def splitEntry(self, entry):
return entry.split(':')
def getValue(self, arr, i):
return arr[i].strip() \
.replace('"', '') \
.replace("'", '')
def getNameValue(self, entry):
arr = self.splitEntry(entry)
if len(arr) < 2:
return (False, None, None)
return (True, self.getValue(arr, 0), self.getValue(arr, 1))
def prettyPrint(self, data):
print json.dumps(data, sort_keys=True, indent=4)
def getRunData(self, configData):
runData = {}
runData['partitionkey'] = os.environ['RUNPARTITIONKEY']
runData['rowkey'] = os.environ['RUNROWKEY']
runData['date'] = date.today().strftime('%Y-%m-%d')
runData['config'] = {}
for option, value in configData.items():
runData['config'][option] = value
runData['config']['TEST_TARGET'] = os.getenv('TEST_TARGET')
return runData
def getTestConfigData(self):
testData = {}
configData = {}
beginConfig = False
with open(self.logFile) as lines:
for line in lines:
line = line.strip()
if line.startswith('***'):
line = line.replace('*', '') \
.replace('::', ':') \
.replace("TEST 'test-suite", 'TEST') \
.replace("' RESULTS", '') \
.replace('.test', '')
(res, name, value) = self.getNameValue(line)
if not res:
continue
# New test record begins.
if name == 'TEST':
testName = value
testData[testName] = {}
testData[testName]['exec_time'] = {}
testData[testName]['section_sizes'] = {}
microTestName = value
elif line.startswith('compile_time:') or \
line.startswith('link_time:') or \
line.startswith('exec_time:') or \
line.startswith('size:') or \
line.startswith('size.'):
(res, name, value) = self.getNameValue(line)
if not res:
continue
if testName not in testData:
continue
if line.startswith('exec_time:'):
testData[testName][name][microTestName] = value
elif line.startswith('size.'):
testData[testName]['section_sizes'][name] = value
else:
testData[testName][name] = value
# Sum the compile times.
if line.startswith('compile_time:'):
self.totalCompileTime += float(value)
elif 'INFO: Configuring with' in line:
beginConfig = True
elif 'INFO: }' in line:
beginConfig = False
if beginConfig:
line = re.sub('^.*INFO:', '', line)
line = line.replace('FILEPATH:', '')
(res, name, value) = self.getNameValue(line)
if not res:
continue
configData[name] = value
return (testData, configData)
# Read user options.
logFilePath = args.logfile
shouldPrint = args.pretty_print
storeToDB = args.store_to_db
# Test data are the results of the benchmarks like test name, compile time, code
# size, etc.
# Config data are the compiler flags and other options like test target.
# Run data is the config data plus timestamp, username, etc.
logFile = LogFile(logFilePath)
(testData, configData) = logFile.getTestConfigData()
runData = logFile.getRunData(configData)
totalCompileTime = logFile.totalCompileTime
if shouldPrint:
logFile.prettyPrint(runData)
logFile.prettyPrint(testData)
print ('Total compile time: {0} s'.format(totalCompileTime))
if storeToDB:
azure_table.put(runData, testData)
|
[] |
[] |
[
"TEST_TARGET",
"RUNPARTITIONKEY",
"RUNROWKEY"
] |
[]
|
["TEST_TARGET", "RUNPARTITIONKEY", "RUNROWKEY"]
|
python
| 3 | 0 | |
scripts/gen_vimdoc.py
|
#!/usr/bin/env python3
"""Generates Nvim :help docs from C/Lua docstrings, using Doxygen.
Also generates *.mpack files. To inspect the *.mpack structure:
:new | put=v:lua.vim.inspect(msgpackparse(readfile('runtime/doc/api.mpack')))
Flow:
main
extract_from_xml
fmt_node_as_vimhelp \
para_as_map } recursive
update_params_map /
render_node
This would be easier using lxml and XSLT, but:
1. This should avoid needing Python dependencies, especially ones that are
C modules that have library dependencies (lxml requires libxml and
libxslt).
2. I wouldn't know how to deal with nested indentation in <para> tags using
XSLT.
Each function :help block is formatted as follows:
- Max width of 78 columns (`text_width`).
- Indent with spaces (not tabs).
- Indent of 16 columns for body text.
- Function signature and helptag (right-aligned) on the same line.
- Signature and helptag must have a minimum of 8 spaces between them.
- If the signature is too long, it is placed on the line after the helptag.
Signature wraps at `text_width - 8` characters with subsequent
lines indented to the open parenthesis.
- Subsection bodies are indented an additional 4 spaces.
- Body consists of function description, parameters, return description, and
C declaration (`INCLUDE_C_DECL`).
- Parameters are omitted for the `void` and `Error *` types, or if the
parameter is marked as [out].
- Each function documentation is separated by a single line.
"""
import argparse
import os
import re
import sys
import shutil
import textwrap
import subprocess
import collections
import msgpack
from xml.dom import minidom
MIN_PYTHON_VERSION = (3, 5)
if sys.version_info < MIN_PYTHON_VERSION:
print("requires Python {}.{}+".format(*MIN_PYTHON_VERSION))
sys.exit(1)
DEBUG = ('DEBUG' in os.environ)
INCLUDE_C_DECL = ('INCLUDE_C_DECL' in os.environ)
INCLUDE_DEPRECATED = ('INCLUDE_DEPRECATED' in os.environ)
fmt_vimhelp = False # HACK
text_width = 78
script_path = os.path.abspath(__file__)
base_dir = os.path.dirname(os.path.dirname(script_path))
out_dir = os.path.join(base_dir, 'tmp-{target}-doc')
filter_cmd = '%s %s' % (sys.executable, script_path)
seen_funcs = set()
msgs = [] # Messages to show on exit.
lua2dox_filter = os.path.join(base_dir, 'scripts', 'lua2dox_filter')
CONFIG = {
'api': {
'mode': 'c',
'filename': 'api.txt',
# String used to find the start of the generated part of the doc.
'section_start_token': '*api-global*',
# Section ordering.
'section_order': [
'vim.c',
'buffer.c',
'window.c',
'tabpage.c',
'ui.c',
],
# List of files/directories for doxygen to read, separated by blanks
'files': os.path.join(base_dir, 'src/nvim/api'),
# file patterns used by doxygen
'file_patterns': '*.h *.c',
# Only function with this prefix are considered
'fn_name_prefix': 'nvim_',
# Section name overrides.
'section_name': {
'vim.c': 'Global',
},
# For generated section names.
'section_fmt': lambda name: f'{name} Functions',
# Section helptag.
'helptag_fmt': lambda name: f'*api-{name.lower()}*',
# Per-function helptag.
'fn_helptag_fmt': lambda fstem, name: f'*{name}()*',
# Module name overrides (for Lua).
'module_override': {},
# Append the docs for these modules, do not start a new section.
'append_only': [],
},
'lua': {
'mode': 'lua',
'filename': 'lua.txt',
'section_start_token': '*lua-vim*',
'section_order': [
'vim.lua',
'shared.lua',
],
'files': ' '.join([
os.path.join(base_dir, 'src/nvim/lua/vim.lua'),
os.path.join(base_dir, 'runtime/lua/vim/shared.lua'),
]),
'file_patterns': '*.lua',
'fn_name_prefix': '',
'section_name': {
'lsp.lua': 'core',
},
'section_fmt': lambda name: f'Lua module: {name.lower()}',
'helptag_fmt': lambda name: f'*lua-{name.lower()}*',
'fn_helptag_fmt': lambda fstem, name: f'*{fstem}.{name}()*',
'module_override': {
# `shared` functions are exposed on the `vim` module.
'shared': 'vim',
},
'append_only': [
'shared.lua',
],
},
'lsp': {
'mode': 'lua',
'filename': 'lsp.txt',
'section_start_token': '*lsp-core*',
'section_order': [
'lsp.lua',
'protocol.lua',
'buf.lua',
'callbacks.lua',
'log.lua',
'rpc.lua',
'util.lua'
],
'files': ' '.join([
os.path.join(base_dir, 'runtime/lua/vim/lsp'),
os.path.join(base_dir, 'runtime/lua/vim/lsp.lua'),
]),
'file_patterns': '*.lua',
'fn_name_prefix': '',
'section_name': {},
'section_fmt': lambda name: (
'Lua module: vim.lsp'
if name.lower() == 'lsp'
else f'Lua module: vim.lsp.{name.lower()}'),
'helptag_fmt': lambda name: (
'*lsp-core*'
if name.lower() == 'lsp'
else f'*lsp-{name.lower()}*'),
'fn_helptag_fmt': lambda fstem, name: (
f'*vim.lsp.{name}()*'
if fstem == 'lsp' and name != 'client'
else (
'*vim.lsp.client*'
# HACK. TODO(justinmk): class/structure support in lua2dox
if 'lsp.client' == f'{fstem}.{name}'
else f'*vim.lsp.{fstem}.{name}()*')),
'module_override': {},
'append_only': [],
},
}
param_exclude = (
'channel_id',
)
# Annotations are displayed as line items after API function descriptions.
annotation_map = {
'FUNC_API_FAST': '{fast}',
}
# Tracks `xrefsect` titles. As of this writing, used only for separating
# deprecated functions.
xrefs = set()
# Raises an error with details about `o`, if `cond` is in object `o`,
# or if `cond()` is callable and returns True.
def debug_this(o, cond=True):
name = ''
if not isinstance(o, str):
try:
name = o.nodeName
o = o.toprettyxml(indent=' ', newl='\n')
except Exception:
pass
if ((callable(cond) and cond())
or (not callable(cond) and cond)
or (not callable(cond) and cond in o)):
raise RuntimeError('xxx: {}\n{}'.format(name, o))
# Appends a message to a list which will be printed on exit.
def msg(s):
msgs.append(s)
# Print all collected messages.
def msg_report():
for m in msgs:
print(f' {m}')
# Print collected messages, then throw an exception.
def fail(s):
msg_report()
raise RuntimeError(s)
def find_first(parent, name):
"""Finds the first matching node within parent."""
sub = parent.getElementsByTagName(name)
if not sub:
return None
return sub[0]
def iter_children(parent, name):
"""Yields matching child nodes within parent."""
for child in parent.childNodes:
if child.nodeType == child.ELEMENT_NODE and child.nodeName == name:
yield child
def get_child(parent, name):
"""Gets the first matching child node."""
for child in iter_children(parent, name):
return child
return None
def self_or_child(n):
"""Gets the first child node, or self."""
if len(n.childNodes) == 0:
return n
return n.childNodes[0]
def clean_text(text):
"""Cleans text.
Only cleans superfluous whitespace at the moment.
"""
return ' '.join(text.split()).strip()
def clean_lines(text):
"""Removes superfluous lines.
The beginning and end of the string is trimmed. Empty lines are collapsed.
"""
return re.sub(r'\A\n\s*\n*|\n\s*\n*\Z', '', re.sub(r'(\n\s*\n+)+', '\n\n', text))
def is_blank(text):
return '' == clean_lines(text)
def get_text(n, preformatted=False):
"""Recursively concatenates all text in a node tree."""
text = ''
if n.nodeType == n.TEXT_NODE:
return n.data
if n.nodeName == 'computeroutput':
for node in n.childNodes:
text += get_text(node)
return '`{}` '.format(text)
for node in n.childNodes:
if node.nodeType == node.TEXT_NODE:
text += node.data if preformatted else clean_text(node.data)
elif node.nodeType == node.ELEMENT_NODE:
text += ' ' + get_text(node, preformatted)
return text
# Gets the length of the last line in `text`, excluding newline ("\n") char.
def len_lastline(text):
lastnl = text.rfind('\n')
if -1 == lastnl:
return len(text)
if '\n' == text[-1]:
return lastnl - (1 + text.rfind('\n', 0, lastnl))
return len(text) - (1 + lastnl)
def len_lastline_withoutindent(text, indent):
n = len_lastline(text)
return (n - len(indent)) if n > len(indent) else 0
# Returns True if node `n` contains only inline (not block-level) elements.
def is_inline(n):
# if len(n.childNodes) == 0:
# return n.nodeType == n.TEXT_NODE or n.nodeName == 'computeroutput'
for c in n.childNodes:
if c.nodeType != c.TEXT_NODE and c.nodeName != 'computeroutput':
return False
if not is_inline(c):
return False
return True
def doc_wrap(text, prefix='', width=70, func=False, indent=None):
"""Wraps text to `width`.
First line is prefixed with `prefix`, subsequent lines are aligned.
If `func` is True, only wrap at commas.
"""
if not width:
# return prefix + text
return text
# Whitespace used to indent all lines except the first line.
indent = ' ' * len(prefix) if indent is None else indent
indent_only = (prefix == '' and indent is not None)
if func:
lines = [prefix]
for part in text.split(', '):
if part[-1] not in ');':
part += ', '
if len(lines[-1]) + len(part) > width:
lines.append(indent)
lines[-1] += part
return '\n'.join(x.rstrip() for x in lines).rstrip()
# XXX: Dummy prefix to force TextWrapper() to wrap the first line.
if indent_only:
prefix = indent
tw = textwrap.TextWrapper(break_long_words=False,
break_on_hyphens=False,
width=width,
initial_indent=prefix,
subsequent_indent=indent)
result = '\n'.join(tw.wrap(text.strip()))
# XXX: Remove the dummy prefix.
if indent_only:
result = result[len(indent):]
return result
def max_name(names):
if len(names) == 0:
return 0
return max(len(name) for name in names)
def update_params_map(parent, ret_map, width=62):
"""Updates `ret_map` with name:desc key-value pairs extracted
from Doxygen XML node `parent`.
"""
params = collections.OrderedDict()
for node in parent.childNodes:
if node.nodeType == node.TEXT_NODE:
continue
name_node = find_first(node, 'parametername')
if name_node.getAttribute('direction') == 'out':
continue
name = get_text(name_node)
if name in param_exclude:
continue
params[name.strip()] = node
max_name_len = max_name(params.keys()) + 8
# `ret_map` is a name:desc map.
for name, node in params.items():
desc = ''
desc_node = get_child(node, 'parameterdescription')
if desc_node:
desc = fmt_node_as_vimhelp(
desc_node, width=width, indent=(' ' * max_name_len))
ret_map[name] = desc
return ret_map
def render_node(n, text, prefix='', indent='', width=62):
"""Renders a node as Vim help text, recursively traversing all descendants."""
global fmt_vimhelp
global has_seen_preformatted
def ind(s):
return s if fmt_vimhelp else ''
text = ''
# space_preceding = (len(text) > 0 and ' ' == text[-1][-1])
# text += (int(not space_preceding) * ' ')
if n.nodeName == 'preformatted':
o = get_text(n, preformatted=True)
ensure_nl = '' if o[-1] == '\n' else '\n'
text += '>{}{}\n<'.format(ensure_nl, o)
elif is_inline(n):
text = doc_wrap(get_text(n), indent=indent, width=width)
elif n.nodeName == 'verbatim':
# TODO: currently we don't use this. The "[verbatim]" hint is there as
# a reminder that we must decide how to format this if we do use it.
text += ' [verbatim] {}'.format(get_text(n))
elif n.nodeName == 'listitem':
for c in n.childNodes:
result = render_node(
c,
text,
indent=indent + (' ' * len(prefix)),
width=width
)
if is_blank(result):
continue
text += indent + prefix + result
elif n.nodeName in ('para', 'heading'):
for c in n.childNodes:
text += render_node(c, text, indent=indent, width=width)
elif n.nodeName == 'itemizedlist':
for c in n.childNodes:
text += '{}\n'.format(render_node(c, text, prefix='• ',
indent=indent, width=width))
elif n.nodeName == 'orderedlist':
i = 1
for c in n.childNodes:
if is_blank(get_text(c)):
text += '\n'
continue
text += '{}\n'.format(render_node(c, text, prefix='{}. '.format(i),
indent=indent, width=width))
i = i + 1
elif n.nodeName == 'simplesect' and 'note' == n.getAttribute('kind'):
text += 'Note:\n '
for c in n.childNodes:
text += render_node(c, text, indent=' ', width=width)
text += '\n'
elif n.nodeName == 'simplesect' and 'warning' == n.getAttribute('kind'):
text += 'Warning:\n '
for c in n.childNodes:
text += render_node(c, text, indent=' ', width=width)
text += '\n'
elif (n.nodeName == 'simplesect'
and n.getAttribute('kind') in ('return', 'see')):
text += ind(' ')
for c in n.childNodes:
text += render_node(c, text, indent=' ', width=width)
else:
raise RuntimeError('unhandled node type: {}\n{}'.format(
n.nodeName, n.toprettyxml(indent=' ', newl='\n')))
return text
def para_as_map(parent, indent='', width=62):
"""Extracts a Doxygen XML <para> node to a map.
Keys:
'text': Text from this <para> element
'params': <parameterlist> map
'return': List of @return strings
'seealso': List of @see strings
'xrefs': ?
"""
chunks = {
'text': '',
'params': collections.OrderedDict(),
'return': [],
'seealso': [],
'xrefs': []
}
# Ordered dict of ordered lists.
groups = collections.OrderedDict([
('params', []),
('return', []),
('seealso', []),
('xrefs', []),
])
# Gather nodes into groups. Mostly this is because we want "parameterlist"
# nodes to appear together.
text = ''
kind = ''
last = ''
if is_inline(parent):
# Flatten inline text from a tree of non-block nodes.
text = doc_wrap(render_node(parent, ""), indent=indent, width=width)
else:
prev = None # Previous node
for child in parent.childNodes:
if child.nodeName == 'parameterlist':
groups['params'].append(child)
elif child.nodeName == 'xrefsect':
groups['xrefs'].append(child)
elif child.nodeName == 'simplesect':
last = kind
kind = child.getAttribute('kind')
if kind == 'return' or (kind == 'note' and last == 'return'):
groups['return'].append(child)
elif kind == 'see':
groups['seealso'].append(child)
elif kind in ('note', 'warning'):
text += render_node(child, text, indent=indent, width=width)
else:
raise RuntimeError('unhandled simplesect: {}\n{}'.format(
child.nodeName, child.toprettyxml(indent=' ', newl='\n')))
else:
if (prev is not None
and is_inline(self_or_child(prev))
and is_inline(self_or_child(child))
and '' != get_text(self_or_child(child)).strip()
and ' ' != text[-1]):
text += ' '
text += render_node(child, text, indent=indent, width=width)
prev = child
chunks['text'] += text
# Generate map from the gathered items.
if len(groups['params']) > 0:
for child in groups['params']:
update_params_map(child, ret_map=chunks['params'], width=width)
for child in groups['return']:
chunks['return'].append(render_node(
child, '', indent=indent, width=width))
for child in groups['seealso']:
chunks['seealso'].append(render_node(
child, '', indent=indent, width=width))
for child in groups['xrefs']:
# XXX: Add a space (or any char) to `title` here, otherwise xrefs
# ("Deprecated" section) acts very weird...
title = get_text(get_child(child, 'xreftitle')) + ' '
xrefs.add(title)
xrefdesc = get_text(get_child(child, 'xrefdescription'))
chunks['xrefs'].append(doc_wrap(xrefdesc, prefix='{}: '.format(title),
width=width) + '\n')
return chunks
def fmt_node_as_vimhelp(parent, width=62, indent=''):
"""Renders (nested) Doxygen <para> nodes as Vim :help text.
NB: Blank lines in a docstring manifest as <para> tags.
"""
rendered_blocks = []
def fmt_param_doc(m):
"""Renders a params map as Vim :help text."""
max_name_len = max_name(m.keys()) + 4
out = ''
for name, desc in m.items():
name = ' {}'.format('{{{}}}'.format(name).ljust(max_name_len))
out += '{}{}\n'.format(name, desc)
return out.rstrip()
def has_nonexcluded_params(m):
"""Returns true if any of the given params has at least
one non-excluded item."""
if fmt_param_doc(m) != '':
return True
for child in parent.childNodes:
para = para_as_map(child, indent, width)
# Generate text from the gathered items.
chunks = [para['text']]
if len(para['params']) > 0 and has_nonexcluded_params(para['params']):
chunks.append('\nParameters: ~')
chunks.append(fmt_param_doc(para['params']))
if len(para['return']) > 0:
chunks.append('\nReturn: ~')
for s in para['return']:
chunks.append(s)
if len(para['seealso']) > 0:
chunks.append('\nSee also: ~')
for s in para['seealso']:
chunks.append(s)
for s in para['xrefs']:
chunks.append(s)
rendered_blocks.append(clean_lines('\n'.join(chunks).strip()))
rendered_blocks.append('')
return clean_lines('\n'.join(rendered_blocks).strip())
def extract_from_xml(filename, target, width):
"""Extracts Doxygen info as maps without formatting the text.
Returns two maps:
1. Functions
2. Deprecated functions
The `fmt_vimhelp` global controls some special cases for use by
fmt_doxygen_xml_as_vimhelp(). (TODO: ugly :)
"""
global xrefs
global fmt_vimhelp
xrefs.clear()
fns = {} # Map of func_name:docstring.
deprecated_fns = {} # Map of func_name:docstring.
dom = minidom.parse(filename)
compoundname = get_text(dom.getElementsByTagName('compoundname')[0])
for member in dom.getElementsByTagName('memberdef'):
if member.getAttribute('static') == 'yes' or \
member.getAttribute('kind') != 'function' or \
member.getAttribute('prot') == 'private' or \
get_text(get_child(member, 'name')).startswith('_'):
continue
loc = find_first(member, 'location')
if 'private' in loc.getAttribute('file'):
continue
return_type = get_text(get_child(member, 'type'))
if return_type == '':
continue
if return_type.startswith(('ArrayOf', 'DictionaryOf')):
parts = return_type.strip('_').split('_')
return_type = '{}({})'.format(parts[0], ', '.join(parts[1:]))
name = get_text(get_child(member, 'name'))
annotations = get_text(get_child(member, 'argsstring'))
if annotations and ')' in annotations:
annotations = annotations.rsplit(')', 1)[-1].strip()
# XXX: (doxygen 1.8.11) 'argsstring' only includes attributes of
# non-void functions. Special-case void functions here.
if name == 'nvim_get_mode' and len(annotations) == 0:
annotations += 'FUNC_API_FAST'
annotations = filter(None, map(lambda x: annotation_map.get(x),
annotations.split()))
if not fmt_vimhelp:
pass
else:
fstem = '?'
if '.' in compoundname:
fstem = compoundname.split('.')[0]
fstem = CONFIG[target]['module_override'].get(fstem, fstem)
vimtag = CONFIG[target]['fn_helptag_fmt'](fstem, name)
params = []
type_length = 0
for param in iter_children(member, 'param'):
param_type = get_text(get_child(param, 'type')).strip()
param_name = ''
declname = get_child(param, 'declname')
if declname:
param_name = get_text(declname).strip()
elif CONFIG[target]['mode'] == 'lua':
# XXX: this is what lua2dox gives us...
param_name = param_type
param_type = ''
if param_name in param_exclude:
continue
if fmt_vimhelp and param_type.endswith('*'):
param_type = param_type.strip('* ')
param_name = '*' + param_name
type_length = max(type_length, len(param_type))
params.append((param_type, param_name))
c_args = []
for param_type, param_name in params:
c_args.append((' ' if fmt_vimhelp else '') + (
'%s %s' % (param_type.ljust(type_length), param_name)).strip())
prefix = '%s(' % name
suffix = '%s)' % ', '.join('{%s}' % a[1] for a in params
if a[0] not in ('void', 'Error'))
if not fmt_vimhelp:
c_decl = '%s %s(%s);' % (return_type, name, ', '.join(c_args))
signature = prefix + suffix
else:
c_decl = textwrap.indent('%s %s(\n%s\n);' % (return_type, name,
',\n'.join(c_args)),
' ')
# Minimum 8 chars between signature and vimtag
lhs = (width - 8) - len(vimtag)
if len(prefix) + len(suffix) > lhs:
signature = vimtag.rjust(width) + '\n'
signature += doc_wrap(suffix, width=width-8, prefix=prefix,
func=True)
else:
signature = prefix + suffix
signature += vimtag.rjust(width - len(signature))
paras = []
brief_desc = find_first(member, 'briefdescription')
if brief_desc:
for child in brief_desc.childNodes:
paras.append(para_as_map(child))
desc = find_first(member, 'detaileddescription')
if desc:
for child in desc.childNodes:
paras.append(para_as_map(child))
if DEBUG:
print(textwrap.indent(
re.sub(r'\n\s*\n+', '\n',
desc.toprettyxml(indent=' ', newl='\n')), ' ' * 16))
fn = {
'annotations': list(annotations),
'signature': signature,
'parameters': params,
'parameters_doc': collections.OrderedDict(),
'doc': [],
'return': [],
'seealso': [],
}
if fmt_vimhelp:
fn['desc_node'] = desc # HACK :(
for m in paras:
if 'text' in m:
if not m['text'] == '':
fn['doc'].append(m['text'])
if 'params' in m:
# Merge OrderedDicts.
fn['parameters_doc'].update(m['params'])
if 'return' in m and len(m['return']) > 0:
fn['return'] += m['return']
if 'seealso' in m and len(m['seealso']) > 0:
fn['seealso'] += m['seealso']
if INCLUDE_C_DECL:
fn['c_decl'] = c_decl
if 'Deprecated' in str(xrefs):
deprecated_fns[name] = fn
elif name.startswith(CONFIG[target]['fn_name_prefix']):
fns[name] = fn
xrefs.clear()
fns = collections.OrderedDict(sorted(fns.items()))
deprecated_fns = collections.OrderedDict(sorted(deprecated_fns.items()))
return (fns, deprecated_fns)
def fmt_doxygen_xml_as_vimhelp(filename, target):
"""Entrypoint for generating Vim :help from from Doxygen XML.
Returns 3 items:
1. Vim help text for functions found in `filename`.
2. Vim help text for deprecated functions.
"""
global fmt_vimhelp
fmt_vimhelp = True
fns_txt = {} # Map of func_name:vim-help-text.
deprecated_fns_txt = {} # Map of func_name:vim-help-text.
fns, _ = extract_from_xml(filename, target, width=text_width)
for name, fn in fns.items():
# Generate Vim :help for parameters.
if fn['desc_node']:
doc = fmt_node_as_vimhelp(fn['desc_node'])
if not doc:
doc = 'TODO: Documentation'
annotations = '\n'.join(fn['annotations'])
if annotations:
annotations = ('\n\nAttributes: ~\n' +
textwrap.indent(annotations, ' '))
i = doc.rfind('Parameters: ~')
if i == -1:
doc += annotations
else:
doc = doc[:i] + annotations + '\n\n' + doc[i:]
if INCLUDE_C_DECL:
doc += '\n\nC Declaration: ~\n>\n'
doc += fn['c_decl']
doc += '\n<'
func_doc = fn['signature'] + '\n'
func_doc += textwrap.indent(clean_lines(doc), ' ' * 16)
# Verbatim handling.
func_doc = re.sub(r'^\s+([<>])$', r'\1', func_doc, flags=re.M)
split_lines = func_doc.split('\n')
start = 0
while True:
try:
start = split_lines.index('>', start)
except ValueError:
break
try:
end = split_lines.index('<', start)
except ValueError:
break
split_lines[start + 1:end] = [
(' ' + x).rstrip()
for x in textwrap.dedent(
"\n".join(
split_lines[start+1:end]
)
).split("\n")
]
start = end
func_doc = "\n".join(split_lines)
if 'Deprecated' in xrefs:
deprecated_fns_txt[name] = func_doc
elif name.startswith(CONFIG[target]['fn_name_prefix']):
fns_txt[name] = func_doc
xrefs.clear()
fmt_vimhelp = False
return ('\n\n'.join(list(fns_txt.values())),
'\n\n'.join(list(deprecated_fns_txt.values())))
def delete_lines_below(filename, tokenstr):
"""Deletes all lines below the line containing `tokenstr`, the line itself,
and one line above it.
"""
lines = open(filename).readlines()
i = 0
found = False
for i, line in enumerate(lines, 1):
if tokenstr in line:
found = True
break
if not found:
raise RuntimeError(f'not found: "{tokenstr}"')
i = max(0, i - 2)
with open(filename, 'wt') as fp:
fp.writelines(lines[0:i])
def main(config, args):
"""Generates:
1. Vim :help docs
2. *.mpack files for use by API clients
Doxygen is called and configured through stdin.
"""
for target in CONFIG:
if args.target is not None and target != args.target:
continue
mpack_file = os.path.join(
base_dir, 'runtime', 'doc',
CONFIG[target]['filename'].replace('.txt', '.mpack'))
if os.path.exists(mpack_file):
os.remove(mpack_file)
output_dir = out_dir.format(target=target)
p = subprocess.Popen(
['doxygen', '-'],
stdin=subprocess.PIPE,
# silence warnings
# runtime/lua/vim/lsp.lua:209: warning: argument 'foo' not found
stderr=(subprocess.STDOUT if DEBUG else subprocess.DEVNULL))
p.communicate(
config.format(
input=CONFIG[target]['files'],
output=output_dir,
filter=filter_cmd,
file_patterns=CONFIG[target]['file_patterns'])
.encode('utf8')
)
if p.returncode:
sys.exit(p.returncode)
fn_map_full = {} # Collects all functions as each module is processed.
sections = {}
intros = {}
sep = '=' * text_width
base = os.path.join(output_dir, 'xml')
dom = minidom.parse(os.path.join(base, 'index.xml'))
# generate docs for section intros
for compound in dom.getElementsByTagName('compound'):
if compound.getAttribute('kind') != 'group':
continue
groupname = get_text(find_first(compound, 'name'))
groupxml = os.path.join(base, '%s.xml' %
compound.getAttribute('refid'))
group_parsed = minidom.parse(groupxml)
doc_list = []
brief_desc = find_first(group_parsed, 'briefdescription')
if brief_desc:
for child in brief_desc.childNodes:
doc_list.append(fmt_node_as_vimhelp(child))
desc = find_first(group_parsed, 'detaileddescription')
if desc:
doc = fmt_node_as_vimhelp(desc)
if doc:
doc_list.append(doc)
intros[groupname] = "\n".join(doc_list)
for compound in dom.getElementsByTagName('compound'):
if compound.getAttribute('kind') != 'file':
continue
filename = get_text(find_first(compound, 'name'))
if filename.endswith('.c') or filename.endswith('.lua'):
xmlfile = os.path.join(base,
'{}.xml'.format(compound.getAttribute('refid')))
# Extract unformatted (*.mpack).
fn_map, _ = extract_from_xml(xmlfile, target, width=9999)
# Extract formatted (:help).
functions_text, deprecated_text = fmt_doxygen_xml_as_vimhelp(
os.path.join(base, '{}.xml'.format(
compound.getAttribute('refid'))), target)
if not functions_text and not deprecated_text:
continue
else:
name = os.path.splitext(
os.path.basename(filename))[0].lower()
sectname = name.upper() if name == 'ui' else name.title()
doc = ''
intro = intros.get(f'api-{name}')
if intro:
doc += '\n\n' + intro
if functions_text:
doc += '\n\n' + functions_text
if INCLUDE_DEPRECATED and deprecated_text:
doc += f'\n\n\nDeprecated {sectname} Functions: ~\n\n'
doc += deprecated_text
if doc:
filename = os.path.basename(filename)
sectname = CONFIG[target]['section_name'].get(
filename, sectname)
title = CONFIG[target]['section_fmt'](sectname)
helptag = CONFIG[target]['helptag_fmt'](sectname)
sections[filename] = (title, helptag, doc)
fn_map_full.update(fn_map)
if len(sections) == 0:
fail(f'no sections for target: {target}')
if len(sections) > len(CONFIG[target]['section_order']):
raise RuntimeError(
'found new modules "{}"; update the "section_order" map'.format(
set(sections).difference(CONFIG[target]['section_order'])))
docs = ''
i = 0
for filename in CONFIG[target]['section_order']:
try:
title, helptag, section_doc = sections.pop(filename)
except KeyError:
msg(f'warning: empty docs, skipping (target={target}): {filename}')
continue
i += 1
if filename not in CONFIG[target]['append_only']:
docs += sep
docs += '\n%s%s' % (title,
helptag.rjust(text_width - len(title)))
docs += section_doc
docs += '\n\n\n'
docs = docs.rstrip() + '\n\n'
docs += ' vim:tw=78:ts=8:ft=help:norl:\n'
doc_file = os.path.join(base_dir, 'runtime', 'doc',
CONFIG[target]['filename'])
delete_lines_below(doc_file, CONFIG[target]['section_start_token'])
with open(doc_file, 'ab') as fp:
fp.write(docs.encode('utf8'))
fn_map_full = collections.OrderedDict(sorted(fn_map_full.items()))
with open(mpack_file, 'wb') as fp:
fp.write(msgpack.packb(fn_map_full, use_bin_type=True))
if not args.keep_tmpfiles:
shutil.rmtree(output_dir)
msg_report()
def filter_source(filename):
name, extension = os.path.splitext(filename)
if extension == '.lua':
p = subprocess.run([lua2dox_filter, filename], stdout=subprocess.PIPE)
op = ('?' if 0 != p.returncode else p.stdout.decode('utf-8'))
print(op)
else:
"""Filters the source to fix macros that confuse Doxygen."""
with open(filename, 'rt') as fp:
print(re.sub(r'^(ArrayOf|DictionaryOf)(\(.*?\))',
lambda m: m.group(1)+'_'.join(
re.split(r'[^\w]+', m.group(2))),
fp.read(), flags=re.M))
def parse_args():
targets = ', '.join(CONFIG.keys())
ap = argparse.ArgumentParser()
ap.add_argument('source_filter', nargs='*',
help="Filter source file(s)")
ap.add_argument('-k', '--keep-tmpfiles', action='store_true',
help="Keep temporary files")
ap.add_argument('-t', '--target',
help=f'One of ({targets}), defaults to "all"')
return ap.parse_args()
Doxyfile = textwrap.dedent('''
OUTPUT_DIRECTORY = {output}
INPUT = {input}
INPUT_ENCODING = UTF-8
FILE_PATTERNS = {file_patterns}
RECURSIVE = YES
INPUT_FILTER = "{filter}"
EXCLUDE =
EXCLUDE_SYMLINKS = NO
EXCLUDE_PATTERNS = */private/*
EXCLUDE_SYMBOLS =
EXTENSION_MAPPING = lua=C
EXTRACT_PRIVATE = NO
GENERATE_HTML = NO
GENERATE_DOCSET = NO
GENERATE_HTMLHELP = NO
GENERATE_QHP = NO
GENERATE_TREEVIEW = NO
GENERATE_LATEX = NO
GENERATE_RTF = NO
GENERATE_MAN = NO
GENERATE_DOCBOOK = NO
GENERATE_AUTOGEN_DEF = NO
GENERATE_XML = YES
XML_OUTPUT = xml
XML_PROGRAMLISTING = NO
ENABLE_PREPROCESSING = YES
MACRO_EXPANSION = YES
EXPAND_ONLY_PREDEF = NO
MARKDOWN_SUPPORT = YES
''')
if __name__ == "__main__":
args = parse_args()
if len(args.source_filter) > 0:
filter_source(args.source_filter[0])
else:
main(Doxyfile, args)
# vim: set ft=python ts=4 sw=4 tw=79 et :
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
vendor/github.com/mongodb/mongo-go-driver/mongo/transactions_test.go
|
// Copyright (C) MongoDB, Inc. 2017-present.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
package mongo
import (
"encoding/json"
"io/ioutil"
"testing"
"context"
"strings"
"bytes"
"os"
"path"
"github.com/mongodb/mongo-go-driver/bson"
"github.com/mongodb/mongo-go-driver/bson/bsontype"
"github.com/mongodb/mongo-go-driver/event"
"github.com/mongodb/mongo-go-driver/internal/testutil"
"github.com/mongodb/mongo-go-driver/internal/testutil/helpers"
"github.com/mongodb/mongo-go-driver/mongo/options"
"github.com/mongodb/mongo-go-driver/mongo/readconcern"
"github.com/mongodb/mongo-go-driver/mongo/readpref"
"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
"github.com/mongodb/mongo-go-driver/x/bsonx"
"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
"github.com/mongodb/mongo-go-driver/x/network/command"
"github.com/mongodb/mongo-go-driver/x/network/description"
"github.com/stretchr/testify/require"
)
const transactionTestsDir = "../data/transactions"
type transTestFile struct {
DatabaseName string `json:"database_name"`
CollectionName string `json:"collection_name"`
Data json.RawMessage `json:"data"`
Tests []*transTestCase `json:"tests"`
}
type transTestCase struct {
Description string `json:"description"`
FailPoint *failPoint `json:"failPoint"`
ClientOptions map[string]interface{} `json:"clientOptions"`
SessionOptions map[string]interface{} `json:"sessionOptions"`
Operations []*transOperation `json:"operations"`
Outcome *transOutcome `json:"outcome"`
Expectations []*transExpectation `json:"expectations"`
}
type failPoint struct {
ConfigureFailPoint string `json:"configureFailPoint"`
Mode json.RawMessage `json:"mode"`
Data *failPointData `json:"data"`
}
type failPointData struct {
FailCommands []string `json:"failCommands"`
CloseConnection bool `json:"closeConnection"`
ErrorCode int32 `json:"errorCode"`
FailBeforeCommitExceptionCode int32 `json:"failBeforeCommitExceptionCode"`
WriteConcernError *struct {
Code int32 `json:"code"`
Errmsg string `json:"errmsg"`
} `json:"writeConcernError"`
}
type transOperation struct {
Name string `json:"name"`
Object string `json:"object"`
CollectionOptions map[string]interface{} `json:"collectionOptions"`
Result json.RawMessage `json:"result"`
Arguments json.RawMessage `json:"arguments"`
ArgMap map[string]interface{}
}
type transOutcome struct {
Collection struct {
Data json.RawMessage `json:"data"`
} `json:"collection"`
}
type transExpectation struct {
CommandStartedEvent struct {
CommandName string `json:"command_name"`
DatabaseName string `json:"database_name"`
Command json.RawMessage `json:"command"`
} `json:"command_started_event"`
}
type transError struct {
ErrorContains string `bson:"errorContains"`
ErrorCodeName string `bson:"errorCodeName"`
ErrorLabelsContain []string `bson:"errorLabelsContain"`
ErrorLabelsOmit []string `bson:"errorLabelsOmit"`
}
var transStartedChan = make(chan *event.CommandStartedEvent, 100)
var transMonitor = &event.CommandMonitor{
Started: func(ctx context.Context, cse *event.CommandStartedEvent) {
//fmt.Printf("STARTED: %v\n", cse)
transStartedChan <- cse
},
}
// test case for all TransactionSpec tests
func TestTransactionSpec(t *testing.T) {
for _, file := range testhelpers.FindJSONFilesInDir(t, transactionTestsDir) {
runTransactionTestFile(t, path.Join(transactionTestsDir, file))
}
}
func runTransactionTestFile(t *testing.T, filepath string) {
content, err := ioutil.ReadFile(filepath)
require.NoError(t, err)
var testfile transTestFile
require.NoError(t, json.Unmarshal(content, &testfile))
dbName := "admin"
dbAdmin := createTestDatabase(t, &dbName)
version, err := getServerVersion(dbAdmin)
require.NoError(t, err)
if shouldSkipTransactionsTest(t, version) {
t.Skip()
}
for _, test := range testfile.Tests {
runTransactionsTestCase(t, test, testfile, dbAdmin)
}
}
func runTransactionsTestCase(t *testing.T, test *transTestCase, testfile transTestFile, dbAdmin *Database) {
t.Run(test.Description, func(t *testing.T) {
// kill sessions from previously failed tests
killSessions(t, dbAdmin.client)
// configure failpoint if specified
if test.FailPoint != nil {
doc := createFailPointDoc(t, test.FailPoint)
err := dbAdmin.RunCommand(ctx, doc).Err()
require.NoError(t, err)
defer func() {
// disable failpoint if specified
_ = dbAdmin.RunCommand(ctx, bsonx.Doc{
{"configureFailPoint", bsonx.String(test.FailPoint.ConfigureFailPoint)},
{"mode", bsonx.String("off")},
})
}()
}
client := createTransactionsMonitoredClient(t, transMonitor, test.ClientOptions)
addClientOptions(client, test.ClientOptions)
db := client.Database(testfile.DatabaseName)
collName := sanitizeCollectionName(testfile.DatabaseName, testfile.CollectionName)
err := db.Drop(ctx)
require.NoError(t, err)
err = db.RunCommand(
context.Background(),
bsonx.Doc{{"create", bsonx.String(collName)}},
).Err()
require.NoError(t, err)
// insert data if present
coll := db.Collection(collName)
docsToInsert := docSliceToInterfaceSlice(docSliceFromRaw(t, testfile.Data))
if len(docsToInsert) > 0 {
coll2, err := coll.Clone(options.Collection().SetWriteConcern(writeconcern.New(writeconcern.WMajority())))
require.NoError(t, err)
_, err = coll2.InsertMany(context.Background(), docsToInsert)
require.NoError(t, err)
}
var sess0Opts *options.SessionOptions
var sess1Opts *options.SessionOptions
if test.SessionOptions != nil {
if test.SessionOptions["session0"] != nil {
sess0Opts = getSessionOptions(test.SessionOptions["session0"].(map[string]interface{}))
} else if test.SessionOptions["session1"] != nil {
sess1Opts = getSessionOptions(test.SessionOptions["session1"].(map[string]interface{}))
}
}
session0, err := client.StartSession(sess0Opts)
require.NoError(t, err)
session1, err := client.StartSession(sess1Opts)
require.NoError(t, err)
sess0 := session0.(*sessionImpl)
sess1 := session1.(*sessionImpl)
lsid0 := sess0.SessionID
lsid1 := sess1.SessionID
defer func() {
sess0.EndSession(ctx)
sess1.EndSession(ctx)
}()
// Drain the channel so we only capture events for this test.
for len(transStartedChan) > 0 {
<-transStartedChan
}
for _, op := range test.Operations {
if op.Name == "count" {
t.Skip("count has been deprecated")
}
// create collection with default read preference Primary (needed to prevent server selection fail)
coll = db.Collection(collName, options.Collection().SetReadPreference(readpref.Primary()))
addCollectionOptions(coll, op.CollectionOptions)
// Arguments aren't marshaled directly into a map because runcommand
// needs to convert them into BSON docs. We convert them to a map here
// for getting the session and for all other collection operations
op.ArgMap = getArgMap(t, op.Arguments)
// Get the session if specified in arguments
var sess *sessionImpl
if sessStr, ok := op.ArgMap["session"]; ok {
switch sessStr.(string) {
case "session0":
sess = sess0
case "session1":
sess = sess1
}
}
// execute the command on given object
switch op.Object {
case "session0":
err = executeSessionOperation(op, sess0)
case "session1":
err = executeSessionOperation(op, sess1)
case "collection":
err = executeCollectionOperation(t, op, sess, coll)
case "database":
err = executeDatabaseOperation(t, op, sess, db)
}
// ensure error is what we expect
verifyError(t, err, op.Result)
}
// Needs to be done here (in spite of defer) because some tests
// require end session to be called before we check expectation
sess0.EndSession(ctx)
sess1.EndSession(ctx)
checkExpectations(t, test.Expectations, lsid0, lsid1)
if test.Outcome != nil {
// Verify with primary read pref
coll2, err := coll.Clone(options.Collection().SetReadPreference(readpref.Primary()))
require.NoError(t, err)
verifyCollectionContents(t, coll2, test.Outcome.Collection.Data)
}
})
}
func killSessions(t *testing.T, client *Client) {
s, err := client.topology.SelectServer(ctx, description.WriteSelector())
require.NoError(t, err)
vals := make(bsonx.Arr, 0, 0)
cmd := command.Write{
DB: "admin",
Command: bsonx.Doc{{"killAllSessions", bsonx.Array(vals)}},
}
conn, err := s.Connection(ctx)
require.NoError(t, err)
defer testhelpers.RequireNoErrorOnClose(t, conn)
// ignore the error because command kills its own implicit session
_, _ = cmd.RoundTrip(context.Background(), s.SelectedDescription(), conn)
}
func createTransactionsMonitoredClient(t *testing.T, monitor *event.CommandMonitor, opts map[string]interface{}) *Client {
clock := &session.ClusterClock{}
c := &Client{
topology: createMonitoredTopology(t, clock, monitor),
connString: testutil.ConnString(t),
readPreference: readpref.Primary(),
clock: clock,
registry: bson.NewRegistryBuilder().Build(),
}
addClientOptions(c, opts)
subscription, err := c.topology.Subscribe()
testhelpers.RequireNil(t, err, "error subscribing to topology: %s", err)
c.topology.SessionPool = session.NewPool(subscription.C)
return c
}
func createFailPointDoc(t *testing.T, failPoint *failPoint) bsonx.Doc {
failDoc := bsonx.Doc{{"configureFailPoint", bsonx.String(failPoint.ConfigureFailPoint)}}
modeBytes, err := failPoint.Mode.MarshalJSON()
require.NoError(t, err)
var modeStruct struct {
Times int32 `json:"times"`
Skip int32 `json:"skip"`
}
err = json.Unmarshal(modeBytes, &modeStruct)
if err != nil {
failDoc = append(failDoc, bsonx.Elem{"mode", bsonx.String("alwaysOn")})
} else {
modeDoc := bsonx.Doc{}
if modeStruct.Times != 0 {
modeDoc = append(modeDoc, bsonx.Elem{"times", bsonx.Int32(modeStruct.Times)})
}
if modeStruct.Skip != 0 {
modeDoc = append(modeDoc, bsonx.Elem{"skip", bsonx.Int32(modeStruct.Skip)})
}
failDoc = append(failDoc, bsonx.Elem{"mode", bsonx.Document(modeDoc)})
}
if failPoint.Data != nil {
dataDoc := bsonx.Doc{}
if failPoint.Data.FailCommands != nil {
failCommandElems := make(bsonx.Arr, len(failPoint.Data.FailCommands))
for i, str := range failPoint.Data.FailCommands {
failCommandElems[i] = bsonx.String(str)
}
dataDoc = append(dataDoc, bsonx.Elem{"failCommands", bsonx.Array(failCommandElems)})
}
if failPoint.Data.CloseConnection {
dataDoc = append(dataDoc, bsonx.Elem{"closeConnection", bsonx.Boolean(failPoint.Data.CloseConnection)})
}
if failPoint.Data.ErrorCode != 0 {
dataDoc = append(dataDoc, bsonx.Elem{"errorCode", bsonx.Int32(failPoint.Data.ErrorCode)})
}
if failPoint.Data.WriteConcernError != nil {
dataDoc = append(dataDoc,
bsonx.Elem{"writeConcernError", bsonx.Document(bsonx.Doc{
{"code", bsonx.Int32(failPoint.Data.WriteConcernError.Code)},
{"errmsg", bsonx.String(failPoint.Data.WriteConcernError.Errmsg)},
})},
)
}
if failPoint.Data.FailBeforeCommitExceptionCode != 0 {
dataDoc = append(dataDoc, bsonx.Elem{"failBeforeCommitExceptionCode", bsonx.Int32(failPoint.Data.FailBeforeCommitExceptionCode)})
}
failDoc = append(failDoc, bsonx.Elem{"data", bsonx.Document(dataDoc)})
}
return failDoc
}
func executeSessionOperation(op *transOperation, sess *sessionImpl) error {
switch op.Name {
case "startTransaction":
// options are only argument
var transOpts *options.TransactionOptions
if op.ArgMap["options"] != nil {
transOpts = getTransactionOptions(op.ArgMap["options"].(map[string]interface{}))
}
return sess.StartTransaction(transOpts)
case "commitTransaction":
return sess.CommitTransaction(ctx)
case "abortTransaction":
return sess.AbortTransaction(ctx)
}
return nil
}
func executeCollectionOperation(t *testing.T, op *transOperation, sess *sessionImpl, coll *Collection) error {
switch op.Name {
case "countDocuments":
_, err := executeCountDocuments(sess, coll, op.ArgMap)
// no results to verify with count
return err
case "distinct":
res, err := executeDistinct(sess, coll, op.ArgMap)
if !resultHasError(t, op.Result) {
verifyDistinctResult(t, res, op.Result)
}
return err
case "insertOne":
res, err := executeInsertOne(sess, coll, op.ArgMap)
if !resultHasError(t, op.Result) {
verifyInsertOneResult(t, res, op.Result)
}
return err
case "insertMany":
res, err := executeInsertMany(sess, coll, op.ArgMap)
if !resultHasError(t, op.Result) {
verifyInsertManyResult(t, res, op.Result)
}
return err
case "find":
res, err := executeFind(sess, coll, op.ArgMap)
if !resultHasError(t, op.Result) {
verifyCursorResult(t, res, op.Result)
}
return err
case "findOneAndDelete":
res := executeFindOneAndDelete(sess, coll, op.ArgMap)
if !resultHasError(t, op.Result) {
verifySingleResult(t, res, op.Result)
}
return res.err
case "findOneAndUpdate":
res := executeFindOneAndUpdate(sess, coll, op.ArgMap)
if !resultHasError(t, op.Result) {
verifySingleResult(t, res, op.Result)
}
return res.err
case "findOneAndReplace":
res := executeFindOneAndReplace(sess, coll, op.ArgMap)
if !resultHasError(t, op.Result) {
verifySingleResult(t, res, op.Result)
}
return res.err
case "deleteOne":
res, err := executeDeleteOne(sess, coll, op.ArgMap)
if !resultHasError(t, op.Result) {
verifyDeleteResult(t, res, op.Result)
}
return err
case "deleteMany":
res, err := executeDeleteMany(sess, coll, op.ArgMap)
if !resultHasError(t, op.Result) {
verifyDeleteResult(t, res, op.Result)
}
return err
case "updateOne":
res, err := executeUpdateOne(sess, coll, op.ArgMap)
if !resultHasError(t, op.Result) {
verifyUpdateResult(t, res, op.Result)
}
return err
case "updateMany":
res, err := executeUpdateMany(sess, coll, op.ArgMap)
if !resultHasError(t, op.Result) {
verifyUpdateResult(t, res, op.Result)
}
return err
case "replaceOne":
res, err := executeReplaceOne(sess, coll, op.ArgMap)
if !resultHasError(t, op.Result) {
verifyUpdateResult(t, res, op.Result)
}
return err
case "aggregate":
res, err := executeAggregate(sess, coll, op.ArgMap)
if !resultHasError(t, op.Result) {
verifyCursorResult2(t, res, op.Result)
}
return err
case "bulkWrite":
// TODO reenable when bulk writes implemented
t.Skip("Skipping until bulk writes implemented")
}
return nil
}
func executeDatabaseOperation(t *testing.T, op *transOperation, sess *sessionImpl, db *Database) error {
switch op.Name {
case "runCommand":
var result bsonx.Doc
err := executeRunCommand(sess, db, op.ArgMap, op.Arguments).Decode(&result)
if !resultHasError(t, op.Result) {
res, err := result.MarshalBSON()
if err != nil {
return err
}
verifyRunCommandResult(t, res, op.Result)
}
return err
}
return nil
}
func verifyError(t *testing.T, e error, result json.RawMessage) {
expected := getErrorFromResult(t, result)
if expected == nil {
return
}
if cerr, ok := e.(command.Error); ok {
if expected.ErrorCodeName != "" {
require.NotNil(t, cerr)
require.Equal(t, expected.ErrorCodeName, cerr.Name)
}
if expected.ErrorContains != "" {
require.NotNil(t, cerr, "Expected error %v", expected.ErrorContains)
require.Contains(t, strings.ToLower(cerr.Message), strings.ToLower(expected.ErrorContains))
}
if expected.ErrorLabelsContain != nil {
require.NotNil(t, cerr)
for _, l := range expected.ErrorLabelsContain {
require.True(t, cerr.HasErrorLabel(l), "Error missing error label %s", l)
}
}
if expected.ErrorLabelsOmit != nil {
require.NotNil(t, cerr)
for _, l := range expected.ErrorLabelsOmit {
require.False(t, cerr.HasErrorLabel(l))
}
}
} else {
require.Equal(t, expected.ErrorCodeName, "")
require.Equal(t, len(expected.ErrorLabelsContain), 0)
// ErrorLabelsOmit can contain anything, since they are all omitted for e not type command.Error
// so we do not check that here
if expected.ErrorContains != "" {
require.NotNil(t, e, "Expected error %v", expected.ErrorContains)
require.Contains(t, strings.ToLower(e.Error()), strings.ToLower(expected.ErrorContains))
}
}
}
func resultHasError(t *testing.T, result json.RawMessage) bool {
if result == nil {
return false
}
res := getErrorFromResult(t, result)
if res == nil {
return false
}
return res.ErrorLabelsOmit != nil ||
res.ErrorLabelsContain != nil ||
res.ErrorCodeName != "" ||
res.ErrorContains != ""
}
func getErrorFromResult(t *testing.T, result json.RawMessage) *transError {
expectedBytes, err := result.MarshalJSON()
require.NoError(t, err)
var expected transError
err = json.NewDecoder(bytes.NewBuffer(expectedBytes)).Decode(&expected)
if err != nil {
return nil
}
return &expected
}
func checkExpectations(t *testing.T, expectations []*transExpectation, id0 bsonx.Doc, id1 bsonx.Doc) {
for _, expectation := range expectations {
var evt *event.CommandStartedEvent
select {
case evt = <-transStartedChan:
default:
require.Fail(t, "Expected command started event", expectation.CommandStartedEvent.CommandName)
}
require.Equal(t, expectation.CommandStartedEvent.CommandName, evt.CommandName)
require.Equal(t, expectation.CommandStartedEvent.DatabaseName, evt.DatabaseName)
jsonBytes, err := expectation.CommandStartedEvent.Command.MarshalJSON()
require.NoError(t, err)
expected := bsonx.Doc{}
err = bson.UnmarshalExtJSON(jsonBytes, true, &expected)
require.NoError(t, err)
actual := evt.Command
for _, elem := range expected {
key := elem.Key
val := elem.Value
actualVal := actual.Lookup(key)
// Keys that may be nil
if val.Type() == bson.TypeNull {
require.Equal(t, actual.Lookup(key), bson.RawValue{}, "Expected %s to be nil", key)
continue
} else if key == "ordered" {
// TODO: some tests specify that "ordered" must be a key in the event but ordered isn't a valid option for some of these cases (e.g. insertOne)
continue
}
// Keys that should not be nil
require.NotEqual(t, actualVal.Type, bsontype.Null, "Expected %v, got nil for key: %s", elem, key)
require.NoError(t, actualVal.Validate())
if key == "lsid" {
if val.StringValue() == "session0" {
doc, err := bsonx.ReadDoc(actualVal.Document())
require.NoError(t, err)
require.True(t, id0.Equal(doc), "Session ID mismatch")
}
if val.StringValue() == "session1" {
doc, err := bsonx.ReadDoc(actualVal.Document())
require.NoError(t, err)
require.True(t, id1.Equal(doc), "Session ID mismatch")
}
} else if key == "getMore" {
require.NotNil(t, actualVal, "Expected %v, got nil for key: %s", elem, key)
expectedCursorID := val.Int64()
// ignore if equal to 42
if expectedCursorID != 42 {
require.Equal(t, expectedCursorID, actualVal.Int64())
}
} else if key == "readConcern" {
rcExpectDoc := val.Document()
rcActualDoc := actualVal.Document()
clusterTime := rcExpectDoc.Lookup("afterClusterTime")
level := rcExpectDoc.Lookup("level")
if clusterTime.Type() != bsontype.Null {
require.NotNil(t, rcActualDoc.Lookup("afterClusterTime"))
}
if level.Type() != bsontype.Null {
doc, err := bsonx.ReadDoc(rcActualDoc)
require.NoError(t, err)
compareElements(t, rcExpectDoc.LookupElement("level"), doc.LookupElement("level"))
}
} else {
doc, err := bsonx.ReadDoc(actual)
require.NoError(t, err)
compareElements(t, elem, doc.LookupElement(key))
}
}
}
}
// convert operation arguments from raw message into map
func getArgMap(t *testing.T, args json.RawMessage) map[string]interface{} {
if args == nil {
return nil
}
var argmap map[string]interface{}
err := json.Unmarshal(args, &argmap)
require.NoError(t, err)
return argmap
}
func getSessionOptions(opts map[string]interface{}) *options.SessionOptions {
sessOpts := options.Session()
for name, opt := range opts {
switch name {
case "causalConsistency":
sessOpts = sessOpts.SetCausalConsistency(opt.(bool))
case "defaultTransactionOptions":
transOpts := opt.(map[string]interface{})
if transOpts["readConcern"] != nil {
sessOpts = sessOpts.SetDefaultReadConcern(getReadConcern(transOpts["readConcern"]))
}
if transOpts["writeConcern"] != nil {
sessOpts = sessOpts.SetDefaultWriteConcern(getWriteConcern(transOpts["writeConcern"]))
}
if transOpts["readPreference"] != nil {
sessOpts = sessOpts.SetDefaultReadPreference(getReadPref(transOpts["readPreference"]))
}
}
}
return sessOpts
}
func getTransactionOptions(opts map[string]interface{}) *options.TransactionOptions {
transOpts := options.Transaction()
for name, opt := range opts {
switch name {
case "writeConcern":
transOpts = transOpts.SetWriteConcern(getWriteConcern(opt))
case "readPreference":
transOpts = transOpts.SetReadPreference(getReadPref(opt))
case "readConcern":
transOpts = transOpts.SetReadConcern(getReadConcern(opt))
}
}
return transOpts
}
func getWriteConcern(opt interface{}) *writeconcern.WriteConcern {
if w, ok := opt.(map[string]interface{}); ok {
if conv, ok := w["w"].(string); ok && conv == "majority" {
return writeconcern.New(writeconcern.WMajority())
} else if conv, ok := w["w"].(float64); ok {
return writeconcern.New(writeconcern.W(int(conv)))
}
}
return nil
}
func getReadConcern(opt interface{}) *readconcern.ReadConcern {
return readconcern.New(readconcern.Level(opt.(map[string]interface{})["level"].(string)))
}
func getReadPref(opt interface{}) *readpref.ReadPref {
if conv, ok := opt.(map[string]interface{}); ok {
return readPrefFromString(conv["mode"].(string))
}
return nil
}
func readPrefFromString(s string) *readpref.ReadPref {
switch strings.ToLower(s) {
case "primary":
return readpref.Primary()
case "primarypreferred":
return readpref.PrimaryPreferred()
case "secondary":
return readpref.Secondary()
case "secondarypreferred":
return readpref.SecondaryPreferred()
case "nearest":
return readpref.Nearest()
}
return readpref.Primary()
}
// skip if server version less than 4.0 OR not a replica set.
func shouldSkipTransactionsTest(t *testing.T, serverVersion string) bool {
return compareVersions(t, serverVersion, "4.0") < 0 ||
os.Getenv("TOPOLOGY") != "replica_set"
}
|
[
"\"TOPOLOGY\""
] |
[] |
[
"TOPOLOGY"
] |
[]
|
["TOPOLOGY"]
|
go
| 1 | 0 | |
test/e2e/addon_update.go
|
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"bytes"
"fmt"
"io"
"os"
"strings"
"time"
"golang.org/x/crypto/ssh"
"k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
// TODO: it would probably be slightly better to build up the objects
// in the code and then serialize to yaml.
var addon_controller_v1 = `
apiVersion: v1
kind: ReplicationController
metadata:
name: addon-test-v1
namespace: %s
labels:
k8s-app: addon-test
version: v1
kubernetes.io/cluster-service: "true"
spec:
replicas: 2
selector:
k8s-app: addon-test
version: v1
template:
metadata:
labels:
k8s-app: addon-test
version: v1
kubernetes.io/cluster-service: "true"
spec:
containers:
- image: gcr.io/google_containers/serve_hostname:v1.4
name: addon-test
ports:
- containerPort: 9376
protocol: TCP
`
var addon_controller_v2 = `
apiVersion: v1
kind: ReplicationController
metadata:
name: addon-test-v2
namespace: %s
labels:
k8s-app: addon-test
version: v2
kubernetes.io/cluster-service: "true"
spec:
replicas: 2
selector:
k8s-app: addon-test
version: v2
template:
metadata:
labels:
k8s-app: addon-test
version: v2
kubernetes.io/cluster-service: "true"
spec:
containers:
- image: gcr.io/google_containers/serve_hostname:v1.4
name: addon-test
ports:
- containerPort: 9376
protocol: TCP
`
var addon_service_v1 = `
apiVersion: v1
kind: Service
metadata:
name: addon-test
namespace: %s
labels:
k8s-app: addon-test
kubernetes.io/cluster-service: "true"
kubernetes.io/name: addon-test
spec:
ports:
- port: 9376
protocol: TCP
targetPort: 9376
selector:
k8s-app: addon-test
`
var addon_service_v2 = `
apiVersion: v1
kind: Service
metadata:
name: addon-test-updated
namespace: %s
labels:
k8s-app: addon-test
kubernetes.io/cluster-service: "true"
kubernetes.io/name: addon-test
newLabel: newValue
spec:
ports:
- port: 9376
protocol: TCP
targetPort: 9376
selector:
k8s-app: addon-test
`
var invalid_addon_controller_v1 = `
apiVersion: v1
kind: ReplicationController
metadata:
name: invalid-addon-test-v1
namespace: %s
labels:
k8s-app: invalid-addon-test
version: v1
spec:
replicas: 2
selector:
k8s-app: invalid-addon-test
version: v1
template:
metadata:
labels:
k8s-app: invalid-addon-test
version: v1
kubernetes.io/cluster-service: "true"
spec:
containers:
- image: gcr.io/google_containers/serve_hostname:v1.4
name: invalid-addon-test
ports:
- containerPort: 9376
protocol: TCP
`
var invalid_addon_service_v1 = `
apiVersion: v1
kind: Service
metadata:
name: ivalid-addon-test
namespace: %s
labels:
k8s-app: invalid-addon-test
kubernetes.io/name: invalid-addon-test
spec:
ports:
- port: 9377
protocol: TCP
targetPort: 9376
selector:
k8s-app: invalid-addon-test
`
var addonTestPollInterval = 3 * time.Second
var addonTestPollTimeout = 5 * time.Minute
var defaultNsName = api.NamespaceDefault
type stringPair struct {
data, fileName string
}
var _ = framework.KubeDescribe("Addon update", func() {
var dir string
var sshClient *ssh.Client
f := framework.NewDefaultFramework("addon-update-test")
BeforeEach(func() {
// This test requires:
// - SSH master access
// ... so the provider check should be identical to the intersection of
// providers that provide those capabilities.
if !framework.ProviderIs("gce") {
return
}
var err error
sshClient, err = getMasterSSHClient()
Expect(err).NotTo(HaveOccurred())
// Reduce the addon update intervals so that we have faster response
// to changes in the addon directory.
// do not use "service" command because it clears the environment variables
switch framework.TestContext.OSDistro {
case "debian":
sshExecAndVerify(sshClient, "sudo TEST_ADDON_CHECK_INTERVAL_SEC=1 /etc/init.d/kube-addons restart")
case "trusty":
sshExecAndVerify(sshClient, "sudo initctl restart kube-addons TEST_ADDON_CHECK_INTERVAL_SEC=1")
case "coreos":
sshExecAndVerify(sshClient, "sudo systemctl set-environment TEST_ADDON_CHECK_INTERVAL_SEC=1")
sshExecAndVerify(sshClient, "sudo systemctl restart kubernetes-addons")
default:
framework.Failf("Unsupported OS distro type %s", framework.TestContext.OSDistro)
}
})
AfterEach(func() {
if sshClient != nil {
// restart addon_update with the default options
switch framework.TestContext.OSDistro {
case "debian":
sshExec(sshClient, "sudo /etc/init.d/kube-addons restart")
case "trusty":
sshExec(sshClient, "sudo initctl restart kube-addons")
case "coreos":
sshExec(sshClient, "sudo systemctl unset-environment TEST_ADDON_CHECK_INTERVAL_SEC")
sshExec(sshClient, "sudo systemctl restart kubernetes-addons")
default:
framework.Failf("Unsupported OS distro type %s", framework.TestContext.OSDistro)
}
sshClient.Close()
}
})
// WARNING: the test is not parallel-friendly!
It("should propagate add-on file changes", func() {
// This test requires:
// - SSH
// - master access
// ... so the provider check should be identical to the intersection of
// providers that provide those capabilities.
framework.SkipUnlessProviderIs("gce")
//these tests are long, so I squeezed several cases in one scenario
Expect(sshClient).NotTo(BeNil())
dir = f.Namespace.Name // we use it only to give a unique string for each test execution
temporaryRemotePathPrefix := "addon-test-dir"
temporaryRemotePath := temporaryRemotePathPrefix + "/" + dir // in home directory on kubernetes-master
defer sshExec(sshClient, fmt.Sprintf("rm -rf %s", temporaryRemotePathPrefix)) // ignore the result in cleanup
sshExecAndVerify(sshClient, fmt.Sprintf("mkdir -p %s", temporaryRemotePath))
rcv1 := "addon-controller-v1.yaml"
rcv2 := "addon-controller-v2.yaml"
rcInvalid := "invalid-addon-controller-v1.yaml"
svcv1 := "addon-service-v1.yaml"
svcv2 := "addon-service-v2.yaml"
svcInvalid := "invalid-addon-service-v1.yaml"
var remoteFiles []stringPair = []stringPair{
{fmt.Sprintf(addon_controller_v1, defaultNsName), rcv1},
{fmt.Sprintf(addon_controller_v2, f.Namespace.Name), rcv2},
{fmt.Sprintf(addon_service_v1, f.Namespace.Name), svcv1},
{fmt.Sprintf(addon_service_v2, f.Namespace.Name), svcv2},
{fmt.Sprintf(invalid_addon_controller_v1, f.Namespace.Name), rcInvalid},
{fmt.Sprintf(invalid_addon_service_v1, defaultNsName), svcInvalid},
}
for _, p := range remoteFiles {
err := writeRemoteFile(sshClient, p.data, temporaryRemotePath, p.fileName, 0644)
Expect(err).NotTo(HaveOccurred())
}
// directory on kubernetes-master
destinationDirPrefix := "/etc/kubernetes/addons/addon-test-dir"
destinationDir := destinationDirPrefix + "/" + dir
// cleanup from previous tests
_, _, _, err := sshExec(sshClient, fmt.Sprintf("sudo rm -rf %s", destinationDirPrefix))
Expect(err).NotTo(HaveOccurred())
defer sshExec(sshClient, fmt.Sprintf("sudo rm -rf %s", destinationDirPrefix)) // ignore result in cleanup
sshExecAndVerify(sshClient, fmt.Sprintf("sudo mkdir -p %s", destinationDir))
By("copy invalid manifests to the destination dir (without kubernetes.io/cluster-service label)")
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcInvalid, destinationDir, rcInvalid))
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcInvalid, destinationDir, svcInvalid))
// we will verify at the end of the test that the objects weren't created from the invalid manifests
By("copy new manifests")
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcv1, destinationDir, rcv1))
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcv1, destinationDir, svcv1))
waitForServiceInAddonTest(f.Client, f.Namespace.Name, "addon-test", true)
waitForReplicationControllerInAddonTest(f.Client, defaultNsName, "addon-test-v1", true)
By("update manifests")
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcv2, destinationDir, rcv2))
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcv2, destinationDir, svcv2))
sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, rcv1))
sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, svcv1))
/**
* Note that we have a small race condition here - the kube-addon-updater
* May notice that a new rc/service file appeared, while the old one will still be there.
* But it is ok - as long as we don't have rolling update, the result will be the same
*/
waitForServiceInAddonTest(f.Client, f.Namespace.Name, "addon-test-updated", true)
waitForReplicationControllerInAddonTest(f.Client, f.Namespace.Name, "addon-test-v2", true)
waitForServiceInAddonTest(f.Client, f.Namespace.Name, "addon-test", false)
waitForReplicationControllerInAddonTest(f.Client, defaultNsName, "addon-test-v1", false)
By("remove manifests")
sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, rcv2))
sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, svcv2))
waitForServiceInAddonTest(f.Client, f.Namespace.Name, "addon-test-updated", false)
waitForReplicationControllerInAddonTest(f.Client, f.Namespace.Name, "addon-test-v2", false)
By("verify invalid API addons weren't created")
_, err = f.Client.ReplicationControllers(f.Namespace.Name).Get("invalid-addon-test-v1")
Expect(err).To(HaveOccurred())
_, err = f.Client.ReplicationControllers(defaultNsName).Get("invalid-addon-test-v1")
Expect(err).To(HaveOccurred())
_, err = f.Client.Services(f.Namespace.Name).Get("ivalid-addon-test")
Expect(err).To(HaveOccurred())
_, err = f.Client.Services(defaultNsName).Get("ivalid-addon-test")
Expect(err).To(HaveOccurred())
// invalid addons will be deleted by the deferred function
})
})
func waitForServiceInAddonTest(c *client.Client, addonNamespace, name string, exist bool) {
framework.ExpectNoError(framework.WaitForService(c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout))
}
func waitForReplicationControllerInAddonTest(c *client.Client, addonNamespace, name string, exist bool) {
framework.ExpectNoError(framework.WaitForReplicationController(c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout))
}
// TODO marekbiskup 2015-06-11: merge the ssh code into pkg/util/ssh.go after
// kubernetes v1.0 is released. In particular the code of sshExec.
func getMasterSSHClient() (*ssh.Client, error) {
// Get a signer for the provider.
signer, err := framework.GetSigner(framework.TestContext.Provider)
if err != nil {
return nil, fmt.Errorf("error getting signer for provider %s: '%v'", framework.TestContext.Provider, err)
}
config := &ssh.ClientConfig{
User: os.Getenv("USER"),
Auth: []ssh.AuthMethod{ssh.PublicKeys(signer)},
}
host := framework.GetMasterHost() + ":22"
client, err := ssh.Dial("tcp", host, config)
if err != nil {
return nil, fmt.Errorf("error getting SSH client to host %s: '%v'", host, err)
}
return client, err
}
func sshExecAndVerify(client *ssh.Client, cmd string) {
_, _, rc, err := sshExec(client, cmd)
Expect(err).NotTo(HaveOccurred())
Expect(rc).To(Equal(0), "error return code from executing command on the cluster: %s", cmd)
}
func sshExec(client *ssh.Client, cmd string) (string, string, int, error) {
framework.Logf("Executing '%s' on %v", cmd, client.RemoteAddr())
session, err := client.NewSession()
if err != nil {
return "", "", 0, fmt.Errorf("error creating session to host %s: '%v'", client.RemoteAddr(), err)
}
defer session.Close()
// Run the command.
code := 0
var bout, berr bytes.Buffer
session.Stdout, session.Stderr = &bout, &berr
err = session.Run(cmd)
if err != nil {
// Check whether the command failed to run or didn't complete.
if exiterr, ok := err.(*ssh.ExitError); ok {
// If we got an ExitError and the exit code is nonzero, we'll
// consider the SSH itself successful (just that the command run
// errored on the host).
if code = exiterr.ExitStatus(); code != 0 {
err = nil
}
} else {
// Some other kind of error happened (e.g. an IOError); consider the
// SSH unsuccessful.
err = fmt.Errorf("failed running `%s` on %s: '%v'", cmd, client.RemoteAddr(), err)
}
}
return bout.String(), berr.String(), code, err
}
func writeRemoteFile(sshClient *ssh.Client, data, dir, fileName string, mode os.FileMode) error {
framework.Logf(fmt.Sprintf("Writing remote file '%s/%s' on %v", dir, fileName, sshClient.RemoteAddr()))
session, err := sshClient.NewSession()
if err != nil {
return fmt.Errorf("error creating session to host %s: '%v'", sshClient.RemoteAddr(), err)
}
defer session.Close()
fileSize := len(data)
pipe, err := session.StdinPipe()
if err != nil {
return err
}
defer pipe.Close()
if err := session.Start(fmt.Sprintf("scp -t %s", dir)); err != nil {
return err
}
fmt.Fprintf(pipe, "C%#o %d %s\n", mode, fileSize, fileName)
io.Copy(pipe, strings.NewReader(data))
fmt.Fprint(pipe, "\x00")
pipe.Close()
return session.Wait()
}
|
[
"\"USER\""
] |
[] |
[
"USER"
] |
[]
|
["USER"]
|
go
| 1 | 0 | |
topdown/topdown_test.go
|
// Copyright 2016 The OPA Authors. All rights reserved.
// Use of this source code is governed by an Apache2
// license that can be found in the LICENSE file.
package topdown
import (
"context"
"errors"
"fmt"
"os"
"reflect"
"sort"
"strings"
"testing"
"time"
"github.com/open-policy-agent/opa/ast"
"github.com/open-policy-agent/opa/storage"
"github.com/open-policy-agent/opa/storage/inmem"
"github.com/open-policy-agent/opa/types"
"github.com/open-policy-agent/opa/util"
testutil "github.com/open-policy-agent/opa/util/test"
)
func TestTopDownCompleteDoc(t *testing.T) {
tests := []struct {
note string
rule string
expected interface{}
}{
{"undefined", `p = null { false }`, ""}, // "" will be converted to Undefined
{"null", `p = null { true }`, "null"},
{"bool: true", `p = true { true }`, "true"},
{"bool: false", `p = false { true }`, "false"},
{"number: 3", `p = 3 { true }`, "3"},
{"number: 3.0", `p = 3 { true }`, "3"},
{"number: 66.66667", `p = 66.66667 { true }`, "66.66667"},
{`string: "hello"`, `p = "hello" { true }`, `"hello"`},
{`string: ""`, `p = "" { true }`, `""`},
{"array: [1,2,3,4]", `p = [1, 2, 3, 4] { true }`, "[1,2,3,4]"},
{"array: []", `p = [] { true }`, "[]"},
{`object/nested composites: {"a": [1], "b": [2], "c": [3]}`,
`p = {"a": [1], "b": [2], "c": [3]} { true }`,
`{"a": [1], "b": [2], "c": [3]}`},
{"set/nested: {{1,2},{2,3}}", `p = {{1, 2}, {2, 3}} { true }`, "[[1,2], [2,3]]"},
{"vars", `p = {"a": [x, y]} { x = 1; y = 2 }`, `{"a": [1,2]}`},
{"vars conflict", `p = {"a": [x, y]} { xs = [1, 2]; ys = [1, 2]; x = xs[_]; y = ys[_] }`,
completeDocConflictErr(nil)},
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, []string{tc.rule}, tc.expected)
}
}
func TestTopDownQueryIDsUnique(t *testing.T) {
ctx := context.Background()
store := inmem.New()
inputTerm := &ast.Term{}
txn := storage.NewTransactionOrDie(ctx, store)
defer store.Abort(ctx, txn)
compiler := compileModules([]string{
`package x
p { 1 }
p { 2 }`})
tr := []*Event{}
query := NewQuery(ast.MustParseBody("data.x.p")).
WithCompiler(compiler).
WithStore(store).
WithTransaction(txn).
WithTracer((*BufferTracer)(&tr)).
WithInput(inputTerm)
_, err := query.Run(ctx)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
queryIDs := map[uint64]bool{} // set of seen queryIDs (in EnterOps)
for _, evt := range tr {
if evt.Op != EnterOp {
continue
}
if queryIDs[evt.QueryID] {
t.Errorf("duplicate queryID: %v", evt)
}
queryIDs[evt.QueryID] = true
}
}
func TestTopDownIndexExpr(t *testing.T) {
ctx := context.Background()
store := inmem.New()
txn := storage.NewTransactionOrDie(ctx, store)
defer store.Abort(ctx, txn)
compiler := compileModules([]string{
`package test
p = true {
1 > 0
q
}
q = true { true }`})
tr := []*Event{}
query := NewQuery(ast.MustParseBody("data.test.p")).
WithCompiler(compiler).
WithStore(store).
WithTransaction(txn).
WithTracer((*BufferTracer)(&tr))
_, err := query.Run(ctx)
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
exp := []*ast.Expr{
ast.MustParseExpr("data.test.p"),
ast.MustParseExpr("data.test.q"),
}
i := 0
for _, evt := range tr {
if evt.Op != IndexOp {
continue
}
expr, ok := evt.Node.(*ast.Expr)
if !ok {
t.Fatal("Expected expr node but got:", evt.Node)
}
exp[i].Index = i
if ast.Compare(expr, exp[i]) != 0 {
t.Fatalf("Expected %v but got: %v", exp[i], expr)
}
i++
}
}
func TestTopDownPartialSetDoc(t *testing.T) {
tests := []struct {
note string
rule string
expected string
}{
{"array values", `p[x] { a[i] = x }`, `[1, 2, 3, 4]`},
{"array indices", `p[x] { a[x] = _ }`, `[0, 1, 2, 3]`},
{"object keys", `p[x] { b[x] = _ }`, `["v1", "v2"]`},
{"object values", `p[x] { b[i] = x }`, `["hello", "goodbye"]`},
{"nested composites", `p[x] { f[i] = x }`, `[{"xs": [1.0], "ys": [2.0]}, {"xs": [2.0], "ys": [3.0]}]`},
{"deep ref/heterogeneous", `p[x] { c[i][j][k] = x }`, `[null, 3.14159, false, true, "foo"]`},
{"composite var value", `p[x] { x = [i, a[i]] }`, "[[0,1],[1,2],[2,3],[3,4]]"},
{"composite key", `p[[x, {"y": y}]] { x = 1; y = 2 }`, `[[1,{"y": 2}]]`},
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, []string{tc.rule}, tc.expected)
}
}
func TestTopDownPartialObjectDoc(t *testing.T) {
tests := []struct {
note string
rule string
expected interface{}
}{
{"identity", `p[k] = v { b[k] = v }`, `{"v1": "hello", "v2": "goodbye"}`},
{"composites", `p[k] = v { d[k] = v }`, `{"e": ["bar", "baz"]}`},
{"body/join var", `p[k] = v { a[i] = v; g[k][i] = v }`, `{"a": 1, "b": 2, "c": 4}`},
{"composite value", `p[k] = [v1, {"v2": v2}] { g[k] = x; x[v1] = v2; v2 != 0 }`, `{
"a": [0, {"v2": 1}],
"b": [1, {"v2": 2}],
"c": [3, {"v2": 4}]
}`},
{"same key/value pair", `p[k] = 1 { ks = ["a", "b", "c", "a"]; ks[_] = k }`, `{"a":1,"b":1,"c":1}`},
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, []string{tc.rule}, tc.expected)
}
}
func TestTopDownEvalTermExpr(t *testing.T) {
tests := []struct {
note string
rule string
expected string
}{
{"true", `p = true { true }`, "true"},
{"false", `p = true { false }`, ""},
{"number non-zero", `p = true { -3.14 }`, "true"},
{"number zero", `p = true { null }`, "true"},
{"null", `p = true { null }`, "true"},
{"string non-empty", `p = true { "abc" }`, "true"},
{"string empty", `p = true { "" }`, "true"},
{"array non-empty", `p = true { [1, 2, 3] }`, "true"},
{"array empty", `p = true { [] }`, "true"},
{"object non-empty", `p = true { {"a": 1} }`, "true"},
{"object empty", `p = true { {} }`, "true"},
{"set non-empty", `p = true { {1, 2, 3} }`, "true"},
{"set empty", `p = true { set() }`, "true"},
{"ref", `p = true { a[i] }`, "true"},
{"ref undefined", `p = true { data.deadbeef[i] }`, ""},
{"ref undefined (path)", `p = true { data.a[true] }`, ""},
{"ref false", `p = true { data.c[0].x[1] }`, ""},
{"array comprehension", `p = true { [x | x = 1] }`, "true"},
{"array comprehension empty", `p = true { [x | x = 1; x = 2] }`, "true"},
{"arbitrary position", `p = true { a[i] = x; x; i }`, "true"},
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, []string{tc.rule}, tc.expected)
}
}
func TestTopDownEqExpr(t *testing.T) {
tests := []struct {
note string
rule string
expected interface{}
}{
// undefined cases
{"undefined: same type", `p = true { true = false }`, ""},
{"undefined: array order", `p = true { [1, 2, 3] = [1, 3, 2] }`, ""},
{"undefined: ref value", `p = true { a[3] = 9999 }`, ""},
{"undefined: ref values", `p = true { a[i] = 9999 }`, ""},
{"undefined: ground var", `p = true { a[3] = x; x = 3 }`, ""},
{"undefined: array var 1", `p = true { [1, x, x] = [1, 2, 3] }`, ""},
{"undefined: array var 2", `p = true { [1, x, 3] = [1, 2, x] }`, ""},
{"undefined: object var 1", `p = true { {"a": 1, "b": 2} = {"a": a, "b": a} }`, ""},
{"undefined: array deep var 1", `p = true { [[1, x], [3, x]] = [[1, 2], [3, 4]] }`, ""},
{"undefined: array deep var 2", `p = true { [[1, x], [3, 4]] = [[1, 2], [x, 4]] }`, ""},
{"undefined: set", `p = true { {1, 2, 3} = {1, 2, 4} }`, ""},
// ground terms
{"ground: bool", `p = true { true = true }`, "true"},
{"ground: string", `p = true { "string" = "string" }`, "true"},
{"ground: number", `p = true { 17 = 17 }`, "true"},
{"ground: null", `p = true { null = null }`, "true"},
{"ground: array", `p = true { [1, 2, 3] = [1, 2, 3] }`, "true"},
{"ground: set", `p = true { {1, 2, 3} = {3, 2, 1} }`, "true"},
{"ground: object", `p = true { {"b": false, "a": [1, 2, 3]} = {"a": [1, 2, 3], "b": false} }`, "true"},
{"ground: ref 1", `p = true { a[2] = 3 }`, "true"},
{"ground: ref 2", `p = true { b.v2 = "goodbye" }`, "true"},
{"ground: ref 3", `p = true { d.e = ["bar", "baz"] }`, "true"},
{"ground: ref 4", `p = true { c[0].x[1] = c[0].z.q }`, "true"},
// variables
{"var: x=y=z", `p[x] { x = y; z = 42; y = z }`, "[42]"},
{"var: ref value", `p = true { a[3] = x; x = 4 }`, "true"},
{"var: ref values", `p = true { a[i] = x; x = 2 }`, "true"},
{"var: ref key", `p = true { a[i] = 4; x = 3 }`, "true"},
{"var: ref keys", `p = true { a[i] = x; i = 2 }`, "true"},
{"var: ref ground var", `p[x] { i = 2; a[i] = x }`, "[3]"},
{"var: ref ref", `p[x] { c[0].x[i] = c[0].z[j]; x = [i, j] }`, `[[0, "p"], [1, "q"]]`},
// arrays and variables
{"pattern: array", `p[x] { [1, x, 3] = [1, 2, 3] }`, "[2]"},
{"pattern: array 2", `p[x] { [[1, x], [3, 4]] = [[1, 2], [3, 4]] }`, "[2]"},
{"pattern: array same var", `p[x] { [2, x, 3] = [x, 2, 3] }`, "[2]"},
{"pattern: array multiple vars", `p[z] { [1, x, y] = [1, 2, 3]; z = [x, y] }`, "[[2, 3]]"},
{"pattern: array multiple vars 2", `p[z] { [1, x, 3] = [y, 2, 3]; z = [x, y] }`, "[[2, 1]]"},
{"pattern: array ref", `p[x] { [1, 2, 3, x] = [a[0], a[1], a[2], a[3]] }`, "[4]"},
{"pattern: array non-ground ref", `p[x] { [1, 2, 3, x] = [a[0], a[1], a[2], a[i]] }`, "[1,2,3,4]"},
{"pattern: array = ref", `p[x] { [true, false, x] = c[i][j] }`, `["foo"]`},
{"pattern: array = ref (reversed)", `p[x] { c[i][j] = [true, false, x] }`, `["foo"]`},
{"pattern: array = var", `p[y] { [1, 2, x] = y; x = 3 }`, "[[1,2,3]]"},
// objects and variables
{"pattern: object val", `p[y] { {"x": y} = {"x": "y"} }`, `["y"]`},
{"pattern: object same var", `p[x] { {"x": x, "y": x} = {"x": 1, "y": 1} }`, "[1]"},
{"pattern: object multiple vars", `p[z] { {"x": x, "y": y} = {"x": 1, "y": 2}; z = [x, y] }`, "[[1, 2]]"},
{"pattern: object multiple vars 2", `p[z] { {"x": x, "y": 2} = {"x": 1, "y": y}; z = [x, y] }`, "[[1, 2]]"},
{"pattern: object ref", `p[x] { {"p": c[0].x[0], "q": x} = c[i][j] }`, `[false]`},
{"pattern: object non-ground ref", `p[x] { {"a": 1, "b": x} = {"a": 1, "b": c[0].x[i]} }`, `[true, false, "foo"]`},
{"pattern: object = ref", `p[x] { {"p": y, "q": z} = c[i][j]; x = [i, j, y, z] }`, `[[0, "z", true, false]]`},
{"pattern: object = ref (reversed)", `p[x] { c[i][j] = {"p": y, "q": z}; x = [i, j, y, z] }`, `[[0, "z", true, false]]`},
{"pattern: object = var", `p[x] { {"a": 1, "b": y} = x; y = 2 }`, `[{"a": 1, "b": 2}]`},
{"pattern: object/array nested", `p[ys] { f[i] = {"xs": [2], "ys": ys} }`, `[[3.0]]`},
{"pattern: object/array nested 2", `p[v] { f[i] = {"xs": [x], "ys": [y]}; v = [x, y] }`, `[[1.0, 2.0], [2.0, 3.0]]`},
// unordered collections requiring plug
{"unordered: sets", `p[x] { x = 2; {1,x,3} = {1,2,3} }`, `[2]`},
{"unordered: object keys", `p[x] { x = "a"; {x: 1} = {"a": 1} }`, `["a"]`},
{"unordered: object keys (reverse)", `p[x] { x = "a"; {"a": 1} = {x: 1} }`, `["a"]`},
// indexing
{"indexing: intersection", `p = true { a[i] = g[i][j] }`, ""},
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, []string{tc.rule}, tc.expected)
}
}
func TestTopDownUndos(t *testing.T) {
tests := []struct {
note string
rule string
expected interface{}
}{
{
note: "array-type",
rule: "p[x] { arr = [[1, [2]], [1, null], [2, [2]]]; [x, [2]] = arr[_] }",
expected: "[1, 2]",
},
{
note: "arrays-element",
rule: "p[x] { arr = [[1, 2], [1, null], [2, 2]]; arr[_] = [x, 2] }",
expected: "[1, 2]",
},
{
note: "arrays-length",
rule: "p[x] { arr = [[1, [2]], [1, []], [2, [2]]]; arr[_] = [x, [2]] }",
expected: "[1, 2]",
},
{
note: "array-ref-element",
rule: "p[x] { arr = [[1, 2], data.arr_ref, [2, 2]]; arr[_] = [x, 2] }",
expected: "[1, 2]",
},
{
note: "object-type",
rule: `p[x] { obj = {"a": {"x": 1, "y": {"v": 2}}, "b": {"x": 1, "y": null}, "c": {"x": 2, "y": {"v": 2}}}; {"x": x, "y": {"v": 2}} = obj[_] }`,
expected: "[1, 2]",
},
{
note: "objects-element",
rule: `p[x] { obj = {"a": {"x": 1, "y": 2}, "b": {"x": 1, "y": null}, "c": {"x": 2, "y": 2}}; obj[_] = {"x": x, "y": 2}}`,
expected: "[1, 2]",
},
{
note: "objects-length",
rule: `p[x] { obj = {"a": {"x": 1, "y": {"v": 2}}, "b": {"x": 1, "y": {}}, "c": {"x": 2, "y": {"v": 2}}}; obj[_] = {"x": x, "y": {"v": 2}}}`,
expected: "[1, 2]",
},
{
note: "object-ref-element",
rule: `p[x] { obj = {"a": {"x": 1, "y": 2}, "b": obj_ref, "c": {"x": 2, "y": 2}}; obj[_] = {"x": x, "y": 2}}`,
expected: "[1, 2]",
},
{
note: "object-ref-missing-key",
rule: `p[x] { obj = {"a": {"x": 1, "y": 2}, "b": obj_ref_missing_key, "c": {"x": 2, "y": 2}}; obj[_] = {"x": x, "y": 2}}`,
expected: "[1, 2]",
},
}
data := util.MustUnmarshalJSON([]byte(`
{
"arr_ref": [1, null],
"obj_ref": {"x": 1, "y": null},
"obj_ref_missing_key": {"x": 3, "z": 2}
}
`)).(map[string]interface{})
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, []string{tc.rule}, tc.expected)
}
}
func TestTopDownComparisonExpr(t *testing.T) {
tests := []struct {
note string
rule string
expected interface{}
}{
{"equals", `p = true { 1 == 1; a[i] = x; x == 2 }`, "true"},
{"noteq", `p = true { 0 != 1; a[i] = x; x != 2 }`, "true"},
{"gt", `p = true { 1 > 0; a[i] = x; x > 2 }`, "true"},
{"gteq", `p = true { 1 >= 1; a[i] = x; x >= 4 }`, "true"},
{"lt", `p = true { -1 < 0; a[i] = x; x < 5 }`, "true"},
{"lteq", `p = true { -1 <= 0; a[i] = x; x <= 1 }`, "true"},
{"undefined: equals", `p = true { 0 == 1 }`, ""},
{"undefined: noteq", `p = true { 0 != 0 }`, ""},
{"undefined: gt", `p = true { 1 > 2 }`, ""},
{"undefined: gteq", `p = true { 1 >= 2 }`, ""},
{"undefined: lt", `p = true { 1 < -1 }`, ""},
{"undefined: lteq", `p = true { 1 < -1 }`, ""},
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, []string{tc.rule}, tc.expected)
}
}
func TestTopDownVirtualDocs(t *testing.T) {
tests := []struct {
note string
rules []string
expected interface{}
}{
// input to partial set and object docs
{"input: set 1", []string{`p = true { q[1] }`, `q[x] { a[i] = x }`}, "true"},
{"input: set 2", []string{`p[x] { q[1] = x }`, `q[x] { a[i] = x }`}, "[1]"},
{"input: set embedded", []string{`p[x] { x = {"b": [q[2]]} }`, `q[x] { a[i] = x }`}, `[{"b": [2]}]`},
{"input: set undefined", []string{`p = true { q[1000] }`, `q[x] { a[x] = y }`}, ""},
{"input: set dereference", []string{`p = y { x = [1]; q[x][0] = y }`, `q[[x]] { a[_] = x }`}, "1"},
{"input: set ground var", []string{`p[x] { x = 1; q[x] }`, `q[y] { a[y] = i }`}, "[1]"},
{"input: set ground composite (1)", []string{
`p = true { z = [[1, 2], 2]; q[z] }`,
`q[[x, y]] { x = [1, y]; y = 2 }`,
}, "true"},
{"input: set ground composite (2)", []string{
`p = true { y = 2; z = [[1, y], y]; q[z] }`,
`q[[x, y]] { x = [1, y]; y = 2 }`,
}, "true"},
{"input: set ground composite (3)", []string{
`p = true { y = 2; x = [1, y]; z = [x, y]; q[z] }`,
`q[[x, y]] { x = [1, y]; y = 2 }`,
}, "true"},
{"input: set partially ground composite", []string{
`p[u] { y = 2; x = [1, u]; z = [x, y]; q[z] }`, // "u" is not ground here
`q[[x, y]] { x = [1, y]; y = 2 }`,
}, "[2]"},
{"input: object 1", []string{`p = true { q[1] = 2 }`, `q[i] = x { a[i] = x }`}, "true"},
{"input: object 2", []string{`p = true { q[1] = 0 }`, `q[x] = i { a[i] = x }`}, "true"},
{"input: object embedded 1", []string{`p[x] { x = [1, q[3], q[2]] }`, `q[i] = x { a[i] = x }`}, "[[1,4,3]]"},
{"input: object embedded 2", []string{`p[x] { x = {"a": [q[3]], "b": [q[2]]} }`, `q[i] = x { a[i] = x }`}, `[{"a": [4], "b": [3]}]`},
{"input: object undefined val", []string{`p = true { q[1] = 9999 }`, `q[i] = x { a[i] = x }`}, ""},
{"input: object undefined key 1", []string{`p = true { q[9999] = 2 }`, `q[i] = x { a[i] = x }`}, ""},
{"input: object undefined key 2", []string{`p = true { q.foo = 2 }`, `q[i] = x { a[i] = x }`}, ""},
{"input: object dereference ground", []string{`p = true { q[0].x[1] = false }`, `q[i] = x { x = c[i] }`}, "true"},
{"input: object dereference ground 2", []string{`p[v] { x = "a"; q[x][y] = v }`, `q[k] = v { k = "a"; v = data.a }`}, "[1,2,3,4]"},
{"input: object defererence non-ground", []string{`p = true { q[0][x][y] = false }`, `q[i] = x { x = c[i] }`}, "true"},
{"input: object ground var key", []string{`p[y] { x = "b"; q[x] = y }`, `q[k] = v { x = {"a": 1, "b": 2}; x[k] = v }`}, "[2]"},
{"input: variable binding substitution", []string{
`p[x] = y { r[z] = y; q[x] = z }`,
`r[k] = v { x = {"a": 1, "b": 2, "c": 3, "d": 4}; x[k] = v }`,
`q[y] = x { z = {"a": "a", "b": "b", "d": "d"}; z[y] = x }`},
`{"a": 1, "b": 2, "d": 4}`},
// output from partial set and object docs
{"output: set", []string{`p[x] { q[x] }`, `q[y] { a[i] = y }`}, "[1,2,3,4]"},
{"output: set embedded", []string{`p[i] { {i: [i]} = {i: [q[i]]} }`, `q[x] { d.e[i] = x }`}, `["bar", "baz"]`},
{"output: set var binding", []string{`p[x] { q[x] }`, `q[y] { y = [i, j]; i = 1; j = 2 }`}, `[[1,2]]`},
{"output: set dereference", []string{`p[y] { q[x][0] = y }`, `q[[x]] { a[_] = x }`}, `[1,2,3,4]`},
{"output: set dereference deep", []string{`p[y] { q[i][j][k][x] = y }`, `q[{{[1], [2]}, {[3], [4]}}] { true }`}, "[1,2,3,4]"},
{"output: set falsy values", []string{`p[x] { q[x] }`, `q = {0, "", false, null, [], {}, set()} { true }`}, `[0, "", null, [], {}, []]`},
{"output: object key", []string{`p[x] { q[x] = 4 }`, `q[i] = x { a[i] = x }`}, "[3]"},
{"output: object value", []string{`p[x] = y { q[x] = y }`, `q[k] = v { b[k] = v }`}, `{"v1": "hello", "v2": "goodbye"}`},
{"output: object embedded", []string{`p[k] = v { {k: [q[k]]} = {k: [v]} }`, `q[x] = y { b[x] = y }`}, `{"v1": "hello", "v2": "goodbye"}`},
{"output: object dereference ground", []string{`p[i] { q[i].x[1] = false }`, `q[i] = x { x = c[i] }`}, "[0]"},
{"output: object defererence non-ground", []string{
`p[r] { q[x][y][z] = false; r = [x, y, z] }`,
`q[i] = x { x = c[i] }`},
`[[0, "x", 1], [0, "z", "q"]]`},
{"output: object dereference array of refs", []string{
`p[x] { q[_][0].c[_] = x }`,
`q[k] = v { d.e[_] = k; v = [r | r = l[_]] }`,
}, "[1,2,3,4]"},
{"output: object dereference array of refs within object", []string{
`p[x] { q[_].x[0].c[_] = x }`,
`q[k] = v { d.e[_] = k; v = {"x": [r | r = l[_]]} }`,
}, "[1,2,3,4]"},
{"output: object dereference object with key refs", []string{
`p = true { q.bar[1].alice[0] = 1 }`,
`q[k] = v { d.e[_] = k; v = [x | x = {l[_].a: [1]}] }`,
}, "true"},
{"output: object var binding", []string{
`p[z] { q[x] = y; z = [x, y] }`,
`q[k] = v { v = [x, y]; x = "a"; y = "b"; k = "foo" }`},
`[["foo", ["a", "b"]]]`},
{"output: object key var binding", []string{
`p[z] { q[x] = y; z = [x, y] }`,
`q[k] = v { k = y; y = x; x = "a"; v = "foo" }`},
`[["a", "foo"]]`},
{"object: self-join", []string{
`p[[x, y]] { q[x] = 1; q[y] = x }`,
`q[x] = i { a[i] = x }`},
"[[2,3]]"},
// input+output from partial set/object docs
{"i/o: objects", []string{
`p[x] { q[x] = r[x] }`,
`q[x] = y { z = {"a": 1, "b": 2, "d": 4}; z[x] = y }`,
`r[k] = v { x = {"a": 1, "b": 2, "c": 4, "d": 3}; x[k] = v }`},
`["a", "b"]`},
{"i/o: undefined keys", []string{
`p[y] { q[x]; r[x] = y }`,
`q[x] { z = ["a", "b", "c", "d"]; z[y] = x }`,
`r[k] = v { x = {"a": 1, "b": 2, "d": 4}; x[k] = v }`},
`[1, 2, 4]`},
// input/output to/from complete docs
{"input: complete array", []string{`p = true { q[1] = 2 }`, `q = [1, 2, 3, 4] { true }`}, "true"},
{"input: complete object", []string{`p = true { q.b = 2 }`, `q = {"a": 1, "b": 2} { true }`}, "true"},
{"input: complete set", []string{`p = true { q[3] }`, `q = {1, 2, 3, 4} { true }`}, "true"},
{"input: complete array dereference ground", []string{`p = true { q[1][1] = 3 }`, `q = [[0, 1], [2, 3]] { true }`}, "true"},
{"input: complete object dereference ground", []string{`p = true { q.b[1] = 4 }`, `q = {"a": [1, 2], "b": [3, 4]} { true }`}, "true"},
{"input: complete array ground index", []string{`p[x] { z = [1, 2]; z[i] = y; q[y] = x }`, `q = [1, 2, 3, 4] { true }`}, "[2,3]"},
{"input: complete object ground key", []string{`p[x] { z = ["b", "c"]; z[i] = y; q[y] = x }`, `q = {"a": 1, "b": 2, "c": 3, "d": 4} { true }`}, "[2,3]"},
{"input: complete vars", []string{
`p = true { q[1][1] = 2 }`,
`q = [{"x": x, "y": y}, z] { x = 1; y = 2; z = [1, 2, 3] }`,
}, `true`},
{"output: complete array", []string{`p[x] { q[i] = e; x = [i, e] }`, `q = [1, 2, 3, 4] { true }`}, "[[0,1],[1,2],[2,3],[3,4]]"},
{"output: complete object", []string{`p[x] { q[i] = e; x = [i, e] }`, `q = {"a": 1, "b": 2} { true }`}, `[["a", 1], ["b", 2]]`},
{"output: complete set", []string{`p[x] { q[x] }`, `q = {1, 2, 3, 4} { true }`}, "[1,2,3,4]"},
{"output: complete array dereference non-ground", []string{`p[r] { q[i][j] = 2; r = [i, j] }`, `q = [[1, 2], [3, 2]] { true }`}, "[[0, 1], [1, 1]]"},
{"output: complete object defererence non-ground", []string{`p[r] { q[x][y] = 2; r = [x, y] }`, `q = {"a": {"x": 1}, "b": {"y": 2}, "c": {"z": 2}} { true }`}, `[["b", "y"], ["c", "z"]]`},
{"output: complete vars", []string{
`p[x] { q[_][_] = x }`,
`q = [{"x": x, "y": y}, z] { x = 1; y = 2; z = [1, 2, 3] }`,
}, `[1,2,3]`},
// no dereferencing
{"no suffix: complete", []string{`p = true { q }`, `q = true { true }`}, "true"},
{"no suffix: complete vars", []string{
`p = true { q }`, `q = x { x = true }`,
}, "true"},
{"no suffix: complete incr (error)", []string{`p = true { q }`, `q = false { true }`, `q = true { true }`}, completeDocConflictErr(nil)},
{"no suffix: complete incr", []string{`p = true { not q }`, `q = true { false }`, `q = false { true }`}, "true"},
{"no suffix: object", []string{`p[x] = y { q = o; o[x] = y }`, `q[x] = y { b[x] = y }`}, `{"v1": "hello", "v2": "goodbye"}`},
{"no suffix: object incr", []string{
`p[x] = y { q = o; o[x] = y }`,
`q[x] = y { b[x] = y }`,
`q[x1] = y1 { d.e[y1] = x1 }`},
`{"v1": "hello", "v2": "goodbye", "bar": 0, "baz": 1}`},
{"no suffix: chained", []string{
`p = true { q = x; x[i] = 4 }`,
`q[k] = v { r = x; x[k] = v }`,
`r[k] = v { s = x; x[k] = v }`,
`r[k] = v { t = x; x[v] = k }`,
`s = {"a": 1, "b": 2, "c": 4} { true }`,
`t = ["d", "e", "g"] { true }`},
"true"},
{"no suffix: object var binding", []string{
`p[x] { q = x }`,
`q[k] = v { v = [i, j]; k = i; i = "a"; j = 1 }`},
`[{"a": ["a", 1]}]`},
{"no suffix: object composite value", []string{
`p[x] { q = x }`,
`q[k] = {"v": v} { v = [i, j]; k = i; i = "a"; j = 1 }`},
`[{"a": {"v": ["a", 1]}}]`},
// data.c[0].z.p is longer than data.q
{"no suffix: bound ref with long prefix (#238)", []string{
`p = true { q; q }`,
`q = x { x = data.c[0].z.p }`}, "true"},
{"no suffix: object conflict (error)", []string{
`p[x] = y { xs = ["a", "b", "c", "a"]; x = xs[i]; y = a[i] }`},
objectDocKeyConflictErr(nil)},
{"no suffix: set", []string{`p[x] { q = s; s[x] }`, `q[x] { a[i] = x }`}, "[1,2,3,4]"},
{"empty partial set", []string{"p[1] { a[0] = 100 }"}, "[]"},
{"empty partial object", []string{`p["x"] = 1 { a[0] = 100 }`}, "{}"},
{"input: non-ground object keys", []string{
`p = x { q.a.b = x }`,
`q = {x: {y: 1}} { x = "a"; y = "b" }`,
}, "1"},
{"input: non-ground set elements", []string{
`p { q["c"] }`,
`q = {x, "b", z} { x = "a"; z = "c" }`,
}, "true"},
{"output: non-ground object keys", []string{
`p[x] { q[i][j] = x }`,
`q = {x: {x1: 1}, y: {y1: 2}} { x = "a"; y = "b"; x1 = "a1"; y1 = "b1" }`,
}, "[1, 2]"},
{"output: non-ground set elements", []string{
`p[x] { q[x] }`,
`q = {x, "b", z} { x = "a"; z = "c" }`,
}, `["a", "b", "c"]`},
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, tc.rules, tc.expected)
}
}
func TestTopDownBaseAndVirtualDocs(t *testing.T) {
// Define base docs that will overlap with virtual docs.
var data map[string]interface{}
input := `
{
"topdown": {
"a": {
"b": {
"c": {
"x": [100,200],
"y": false,
"z": {
"a": "b"
}
}
}
},
"g": {
"h": {
"k": [1,2,3]
}
},
"set": {
"u": [1,2,3,4]
},
"conflicts": {
"k": "foo"
}
}
}
`
if err := util.UnmarshalJSON([]byte(input), &data); err != nil {
panic(err)
}
compiler := compileModules([]string{
// Define virtual docs that will overlap with base docs.
`package topdown.a.b.c
p = [1, 2] { true }
q = [3, 4] { true }
r["a"] = 1 { true }
r["b"] = 2 { true }`,
`package topdown.a.b.c.s
w = {"f": 10, "g": 9.9} { true }`,
`package topdown.set
v[data.topdown.set.u[_]] { true }`,
`package topdown.no.base.doc
p = true { true }`,
`package topdown.a.b.c.undefined1
p = true { false }
p = true { false }
q = true { false }`,
`package topdown.a.b.c.undefined2
p = true { input.foo }`,
`package topdown.a.b.c.empty`,
`package topdown.g.h
p = true { false }`,
`package topdown.virtual.constants
p = 1
q = 2
r = 1`,
`package topdown.missing.input.value
p = input.deadbeef`,
// Define virtual docs that we can query to obtain merged result.
`package topdown
p[[x1, x2, x3, x4]] { data.topdown.a.b[x1][x2][x3] = x4 }
q[[x1, x2, x3]] { data.topdown.a.b[x1][x2][0] = x3 }
r[[x1, x2]] { data.topdown.a.b[x1] = x2 }
s = data.topdown.no { true }
t = data.topdown.a.b.c.undefined1 { true }
u = data.topdown.missing.input.value { true }
v = data.topdown.g { true }
w = data.topdown.set { true }
iterate_ground[x] { data.topdown.virtual.constants[x] = 1 }
`,
`package topdown.conflicts
k = "bar"`,
})
store := inmem.NewFromObject(data)
assertTopDownWithPath(t, compiler, store, "base/virtual", []string{"topdown", "p"}, "{}", `[
["c", "p", 0, 1],
["c", "p", 1, 2],
["c", "q", 0, 3],
["c", "q", 1, 4],
["c", "r", "a", 1],
["c", "r", "b", 2],
["c", "x", 0, 100],
["c", "x", 1, 200],
["c", "z", "a", "b"],
["c", "s", "w", {"f":10, "g": 9.9}]
]`)
assertTopDownWithPath(t, compiler, store, "base/virtual: ground key", []string{"topdown", "q"}, "{}", `[
["c", "p", 1],
["c", "q", 3],
["c", "x", 100]
]`)
assertTopDownWithPath(t, compiler, store, "base/virtual: prefix", []string{"topdown", "r"}, "{}", `[
["c", {
"p": [1,2],
"q": [3,4],
"r": {"a": 1, "b": 2},
"s": {"w": {"f": 10, "g": 9.9}},
"x": [100,200],
"y": false,
"z": {"a": "b"},
"undefined1": {},
"undefined2": {},
"empty": {}
}]
]`)
assertTopDownWithPath(t, compiler, store, "base/virtual: set", []string{"topdown", "w"}, "{}", `{
"v": [1,2,3,4],
"u": [1,2,3,4]
}`)
assertTopDownWithPath(t, compiler, store, "base/virtual: no base", []string{"topdown", "s"}, "{}", `{"base": {"doc": {"p": true}}}`)
assertTopDownWithPath(t, compiler, store, "base/virtual: undefined", []string{"topdown", "t"}, "{}", "{}")
assertTopDownWithPath(t, compiler, store, "base/virtual: undefined-2", []string{"topdown", "v"}, "{}", `{"h": {"k": [1,2,3]}}`)
assertTopDownWithPath(t, compiler, store, "base/virtual: missing input value", []string{"topdown", "u"}, "{}", "{}")
assertTopDownWithPath(t, compiler, store, "iterate ground", []string{"topdown", "iterate_ground"}, "{}", `["p", "r"]`)
assertTopDownWithPath(t, compiler, store, "base/virtual: conflicts", []string{"topdown.conflicts"}, "{}", fmt.Errorf("base and virtual document keys must be disjoint"))
}
func TestTopDownNestedReferences(t *testing.T) {
tests := []struct {
note string
rules []string
expected interface{}
}{
// nested base document references
{"ground ref", []string{`p = true { a[h[0][0]] = 2 }`}, "true"},
{"non-ground ref", []string{`p[x] { x = a[h[i][j]] }`}, "[2,3,4]"},
{"two deep", []string{`p[x] { x = a[a[a[i]]] }`}, "[3,4]"},
{"two deep", []string{`p[x] { x = a[h[i][a[j]]] }`}, "[3,4]"},
{"two deep repeated var", []string{`p[x] { x = a[h[i][a[i]]] }`}, "[3]"},
{"no suffix", []string{`p = true { 4 = a[three] }`}, "true"},
{"var ref", []string{`p[y] { x = [1, 2, 3]; y = a[x[_]] }`}, "[2,3,4]"},
{"undefined", []string{`p = true { a[three.deadbeef] = x }`}, ""},
// nested virtual document references
{"vdoc ref: complete", []string{`p[x] { x = a[q[_]] }`, `q = [2, 3] { true }`}, "[3,4]"},
{"vdoc ref: complete: ground", []string{`p[x] { x = a[q[1]] }`, `q = [2, 3] { true }`}, "[4]"},
{"vdoc ref: complete: no suffix", []string{`p = true { 2 = a[q] }`, `q = 1 { true }`}, "true"},
{"vdoc ref: partial object", []string{
`p[x] { x = a[q[_]] }`,
`q[k] = v { o = {"a": 2, "b": 3, "c": 100}; o[k] = v }`},
"[3,4]"},
{"vdoc ref: partial object: ground", []string{
`p[x] { x = a[q.b] }`,
`q[k] = v { o = {"a": 2, "b": 3, "c": 100}; o[k] = v }`},
"[4]"},
// mixed cases
{"vdoc ref: complete: nested bdoc ref", []string{
`p[x] { x = a[q[b[_]]] }`,
`q = {"hello": 1, "goodbye": 3, "deadbeef": 1000} { true }`}, "[2,4]"},
{"vdoc ref: partial object: nested bdoc ref", []string{
`p[x] { x = a[q[b[_]]] }`,
// bind to value
`q[k] = v { o = {"hello": 1, "goodbye": 3, "deadbeef": 1000}; o[k] = v }`}, "[2,4]"},
{"vdoc ref: partial object: nested bdoc ref-2", []string{
`p[x] { x = a[q[d.e[_]]] }`,
// bind to reference
`q[k] = v { strings[k] = v }`}, "[3,4]"},
{"vdoc ref: multiple", []string{
`p[x] { x = q[a[_]].v[r[a[_]]] }`,
`q = [{"v": {}}, {"v": [0, 0, 1, 2]}, {"v": [0, 0, 3, 4]}, {"v": [0, 0]}, {}] { true }`,
`r = [1, 2, 3, 4] { true }`}, "[1,2,3,4]"},
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, tc.rules, tc.expected)
}
}
func TestTopDownVarReferences(t *testing.T) {
tests := []struct {
note string
rules []string
expected interface{}
}{
{"ground", []string{`p[x] { v = [[1, 2], [2, 3], [3, 4]]; x = v[2][1] }`}, "[4]"},
{"non-ground", []string{`p[x] { v = [[1, 2], [2, 3], [3, 4]]; x = v[i][j] }`}, "[1,2,3,4]"},
{"mixed", []string{`p[x] = y { v = [{"a": 1, "b": 2}, {"c": 3, "z": [4]}]; y = v[i][x][j] }`}, `{"z": 4}`},
{"ref binding", []string{`p[x] { v = c[i][j]; x = v[k]; x = true }`}, "[true]"},
{"existing ref binding", []string{`p = x { q = a; q[0] = x; q[0] }`}, `1`},
{"embedded", []string{`p[x] { v = [1, 2, 3]; x = [{"a": v[i]}] }`}, `[[{"a": 1}], [{"a": 2}], [{"a": 3}]]`},
{"embedded ref binding", []string{`p[x] { v = c[i][j]; w = [v[0], v[1]]; x = w[y] }`}, "[null, false, true, 3.14159]"},
{"array: ground var", []string{`p[x] { i = [1, 2, 3, 4]; j = [1, 2, 999]; j[k] = y; i[y] = x }`}, "[2,3]"},
{"array: ref", []string{`p[y] { i = [1,2,3,4]; x = data.a[_]; i[x] = y }`}, `[2, 3, 4]`},
{"object: ground var", []string{`p[x] { i = {"a": 1, "b": 2, "c": 3}; j = ["a", "c", "deadbeef"]; j[k] = y; i[y] = x }`}, "[1, 3]"},
{"object: ref", []string{`p[y] { i = {"1": 1, "2": 2, "4": 4}; x = data.numbers[_]; i[x] = y }`}, `[1, 2, 4]`},
{"set: ground var", []string{`p[x] { i = {1, 2, 3, 4}; j = {1, 2, 99}; j[x]; i[x] }`}, "[1,2]"},
{"set: ref", []string{`p[x] { i = {1, 2, 3, 4}; x = data.a[_]; i[x] }`}, `[1, 2, 3, 4]`},
{"set: lookup: base docs", []string{`p = true { v = {[1, 999], [3, 4]}; pair = [a[2], 4]; v[pair] }`}, "true"},
{"set: lookup: embedded", []string{`p = true { x = [{}, {[1, 2], [3, 4]}]; y = [3, 4]; x[i][y] }`}, "true"},
{"set: lookup: dereference", []string{`p[[i, z, r]] { x = [{}, {[1, 2], [3, 4]}]; y = [3, 4]; x[i][y][z] = r }`}, "[[1,0,3], [1,1,4]]"},
{"avoids indexer", []string{`p = true { somevar = [1, 2, 3]; somevar[i] = 2 }`}, "true"},
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, tc.rules, tc.expected)
}
}
func TestTopDownCompositeReferences(t *testing.T) {
tests := []struct {
note string
rule string
expected interface{}
}{
{"array", "p = fixture.r[[1, 2]]", "[1, 2]"},
{"object", `p = fixture.r[{"foo": "bar"}]`, `{"foo": "bar"}`},
{"set", `p = fixture.r[{1, 2}]`, "[1, 2]"},
{"unify array", `p = [x | fixture.r[[1, x]]]`, "[2, 3]"},
{"unify object", `p = [x | fixture.r[{"foo": x}]]`, `["bar"]`},
{"unify partial ground array", `p = [x | fixture.p1[[x,2]]]`, `[1,2]`},
{"complete doc unify", `p = [[x,y] | fixture.s[[x, y]]]`, `[[1, 2], [1, 3], [2, 7], [[1,1], 4]]`},
{"partial doc unify", `p = [[x,y] | fixture.r[[x, y]]]`, `[[1, 2], [1, 3], [2, 7], [[1,1], 4]]`},
{"empty set", `p { fixture.empty[set()]} `, "true"},
{"ref", `p = fixture.r[[fixture.foo.bar, 3]]`, "[1,3]"},
{"nested ref", `p = fixture.r[[fixture.foo[fixture.o.foo], 3]]`, "[1,3]"},
{"comprehension", `p = fixture.s[[[x | x = y[_]; y = [1, 1]], 4]]`, "[[1,1],4]"},
{"missing array", `p = fixture.r[[1, 4]]`, ``},
{"missing object value", `p = fixture.r[{"foo": "baz"}]`, ``},
{"missing set", `p = fixture.r[{1, 3}]`, ``},
}
fixture := `package fixture
empty = {set()}
s = {[1, 2], [1, 3], {"foo": "bar"}, {1, 2}, [2, 7], [[1,1], 4]}
r[x] { s[x] }
a = [1, 2]
o = {"foo": "bar"}
foo = {"bar": 1}
p1[[1,2]]
p1[[1,3]]
p1[[2,2]]
`
for _, tc := range tests {
module := "package test\nimport data.fixture\n" + tc.rule
compiler := compileModules([]string{fixture, module})
assertTopDownWithPath(t, compiler, inmem.New(), tc.note, []string{"test", "p"}, "", tc.expected)
}
}
func TestTopDownDisjunction(t *testing.T) {
tests := []struct {
note string
rules []string
expected interface{}
}{
{"incr: query set", []string{`p[x] { a[i] = x }`, `p[y] { b[j] = y }`}, `[1,2,3,4,"hello","goodbye"]`},
{"incr: query set constants", []string{
`p[100] { true }`,
`p[x] { a[x] }`},
"[0,1,2,3,100]"},
{"incr: query object", []string{
`p[k] = v { b[v] = k }`,
`p[k] = v { a[i] = v; g[k][j] = v }`},
`{"b": 2, "c": 4, "hello": "v1", "goodbye": "v2", "a": 1}`},
{"incr: query object constant key", []string{
`p["a"] = 1 { true }`,
`p["b"] = 2 { true }`},
`{"a": 1, "b": 2}`},
{"incr: iter set", []string{
`p[x] { q[x] }`,
`q[x] { a[i] = x }`,
`q[y] { b[j] = y }`},
`[1,2,3,4,"hello","goodbye"]`},
{"incr: eval set", []string{
`p[x] { q = s; s[x] }`, // make p a set so that test assertion orders result
`q[x] { a[_] = x }`,
`q[y] { b[_] = y }`},
`[1,2,3,4,"hello","goodbye"]`},
{"incr: eval object", []string{
`p[k] = v { q[k] = v }`,
`q[k] = v { b[v] = k }`,
`q[k] = v { a[i] = v; g[k][j] = v }`},
`{"b": 2, "c": 4, "hello": "v1", "goodbye": "v2", "a": 1}`},
{"incr: eval object constant key", []string{
`p[k] = v { q[k] = v }`,
`q["a"] = 1 { true }`,
`q["b"] = 2 { true }`},
`{"a": 1, "b": 2}`},
{"complete: undefined", []string{`p = true { false }`, `p = true { false }`}, ""},
{"complete: error", []string{`p = true { true }`, `p = false { false }`, `p = false { true }`}, completeDocConflictErr(nil)},
{"complete: valid", []string{`p = true { true }`, `p = true { true }`}, "true"},
{"complete: valid-2", []string{`p = true { true }`, `p = false { false }`}, "true"},
{"complete: reference error", []string{`p = true { q }`, `q = true { true }`, `q = false { true }`}, completeDocConflictErr(nil)},
{"complete: reference valid", []string{`p = true { q }`, `q = true { true }`, `q = true { true }`}, "true"},
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, tc.rules, tc.expected)
}
}
func TestTopDownNegation(t *testing.T) {
tests := []struct {
note string
rules []string
expected interface{}
}{
{"neg: constants", []string{`p = true { not true = false }`}, "true"},
{"neg: constants", []string{`p = true { not true = true }`}, ""},
{"neg: set contains", []string{`p = true { not q.v0 }`, `q[x] { b[x] = v }`}, "true"},
{"neg: set contains undefined", []string{`p = true { not q.v2 }`, `q[x] { b[x] = v }`}, ""},
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, tc.rules, tc.expected)
}
}
func TestTopDownComprehensions(t *testing.T) {
tests := []struct {
note string
rules []string
expected interface{}
}{
{"array simple", []string{`p[i] { xs = [x | x = a[_]]; xs[i] > 1 }`}, "[1,2,3]"},
{"array nested", []string{`p[i] { ys = [y | y = x[_]; x = [z | z = a[_]]]; ys[i] > 1 }`}, "[1,2,3]"},
{"array embedded array", []string{`p[i] { xs = [[x | x = a[_]]]; xs[0][i] > 1 }`}, "[1,2,3]"},
{"array embedded object", []string{`p[i] { xs = {"a": [x | x = a[_]]}; xs.a[i] > 1 }`}, "[1,2,3]"},
{"array embedded set", []string{`p = xs { xs = {[x | x = a[_]]} }`}, "[[1,2,3,4]]"},
{"array closure", []string{`p[x] { y = 1; x = [y | y = 1] }`}, "[[1]]"},
{"array dereference embedded", []string{
`p[x] { q.a[2][i] = x }`,
`q[k] = v { k = "a"; v = [y | i[_] = _; i = y; i = [z | z = a[_]]] }`,
}, "[1,2,3,4]"},
{"object simple", []string{`p[i] { xs = {s: x | x = a[_]; format_int(x, 10, s)}; y = xs[i]; y > 1 }`}, `["2","3","4"]`},
{"object nested", []string{`p = r { r = {x: y | z = {i: q | i = b[q]}; x = z[y]}}`}, `{"v1": "hello", "v2": "goodbye"}`},
{"object embedded array", []string{`p[i] { xs = [{s: x | x = a[_]; format_int(x, 10, s)}]; xs[0][i] > 1 }`}, `["2","3","4"]`},
{"object embedded object", []string{`p[i] { xs = {"a": {s: x | x = a[_]; format_int(x, 10, s)}}; xs.a[i] > 1 }`}, `["2","3","4"]`},
{"object embedded set", []string{`p = xs { xs = {{s: x | x = a[_]; format_int(x, 10, s)}} }`}, `[{"1":1,"2":2,"3":3,"4":4}]`},
{"object closure", []string{`p[x] { y = 1; x = {"foo":y | y = 1} }`}, `[{"foo": 1}]`},
{"object dereference embedded", []string{
`a = [4] { true }`,
`p[x] { q.a = x }`,
`q[k] = v { k = "a"; v = {"bar": y | i[_] = _; i = y; i = {"foo": z | z = a[_]}} }`,
}, `[{"bar": {"foo": 4}}]`},
{"object conflict", []string{
`p[x] { q.a = x }`,
`q[k] = v { k = "a"; v = {"bar": y | i[_] = _; i = y; i = {"foo": z | z = a[_]}} }`,
}, objectDocKeyConflictErr(nil)},
{"set simple", []string{`p = y {y = {x | x = a[_]; x > 1}}`}, "[2,3,4]"},
{"set nested", []string{`p[i] { ys = {y | y = x[_]; x = {z | z = a[_]}}; ys[i] > 1 }`}, "[2,3,4]"},
{"set embedded array", []string{`p[i] { xs = [{x | x = a[_]}]; xs[0][i] > 1 }`}, "[2,3,4]"},
{"set embedded object", []string{`p[i] { xs = {"a": {x | x = a[_]}}; xs.a[i] > 1 }`}, "[2,3,4]"},
{"set embedded set", []string{`p = xs { xs = {{x | x = a[_]}} }`}, "[[1,2,3,4]]"},
{"set closure", []string{`p[x] { y = 1; x = {y | y = 1} }`}, "[[1]]"},
{"set dereference embedded", []string{
`p[x] { q.a = x }`,
`q[k] = v { k = "a"; v = {y | i[_] = _; i = y; i = {z | z = a[_]}} }`,
}, "[[[1,2,3,4]]]"},
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, tc.rules, tc.expected)
}
}
func TestTopDownDefaultKeyword(t *testing.T) {
tests := []struct {
note string
rules []string
expected interface{}
}{
{"undefined", []string{`p = 1 { false }`, `default p = 0`, `p = 2 { false }`}, "0"},
{"defined", []string{`default p = 0`, `p = 1 { true }`, `p = 2 { false }`}, `1`},
{"defined-ooo", []string{`p = 1 { true }`, `default p = 0`, `p = 2 { false }`}, "1"},
{"array comprehension", []string{`p = 1 { false }`, `default p = [x | a[_] = x]`}, "[1,2,3,4]"},
{"object comprehension", []string{`p = 1 { false }`, `default p = {x: k | d[k][_] = x}`}, `{"bar": "e", "baz": "e"}`},
{"set comprehension", []string{`p = 1 { false }`, `default p = {x | a[_] = x}`}, `[1,2,3,4]`},
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, tc.rules, tc.expected)
}
}
func TestTopDownArithmetic(t *testing.T) {
tests := []struct {
note string
rules []string
expected interface{}
}{
{"plus", []string{`p[y] { a[i] = x; y = i + x }`}, "[1,3,5,7]"},
{"minus", []string{`p[y] { a[i] = x; y = i - x }`}, "[-1]"},
{"multiply", []string{`p[y] { a[i] = x; y = i * x }`}, "[0,2,6,12]"},
{"divide+round", []string{`p[z] { a[i] = x; y = i / x; round(y, z) }`}, "[0, 1]"},
{"divide+error", []string{`p[y] { a[i] = x; y = x / i }`}, fmt.Errorf("divide by zero")},
{"abs", []string{`p = true { abs(-10, x); x = 10 }`}, "true"},
{"remainder", []string{`p = x { x = 7 % 4 }`}, "3"},
{"remainder+error", []string{`p = x { x = 7 % 0 }`}, fmt.Errorf("modulo by zero")},
{"arity 1 ref dest", []string{`p = true { abs(-4, a[3]) }`}, "true"},
{"arity 1 ref dest (2)", []string{`p = true { not abs(-5, a[3]) }`}, "true"},
{"arity 2 ref dest", []string{`p = true { a[2] = 1 + 2 }`}, "true"},
{"arity 2 ref dest (2)", []string{`p = true { not a[2] = 2 + 3 }`}, "true"},
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, tc.rules, tc.expected)
}
}
func TestTopDownCasts(t *testing.T) {
tests := []struct {
note string
rules []string
expected interface{}
}{
{"to_number", []string{
`p = [x, y, z, i, j] { to_number("-42.0", x); to_number(false, y); to_number(100.1, z); to_number(null, i); to_number(true, j) }`,
},
"[-42.0, 0, 100.1, 0, 1]"},
{"to_number ref dest", []string{`p = true { to_number("3", a[2]) }`}, "true"},
{"to_number ref dest", []string{`p = true { not to_number("-1", a[2]) }`}, "true"},
{"to_number: bad input", []string{`p { to_number("broken", x) }`}, fmt.Errorf("invalid syntax")},
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, tc.rules, tc.expected)
}
}
func TestTopDownTypeBuiltin(t *testing.T) {
tests := []struct {
note string
rules []string
expected interface{}
}{
{"is_number", []string{
`p = [x, y, z] { is_number(-42.0, x); is_number(0, y); is_number(100.1, z) }`,
}, "[true, true, true]"},
{"is_number", []string{
`p = x { is_number(null, x) }`,
}, ""},
{"is_number", []string{
`p = x { is_number(false, x) }`,
}, ""},
{"is_number", []string{
`p[x] {arr = [true, 1]; arr[_] = x; is_number(x) }`,
}, "[1]"},
{"is_string", []string{
`p = [x, y, z] { is_string("Hello", x); is_string("There", y); is_string("OPA", z) }`,
}, "[true, true, true]"},
{"is_string", []string{
`p = x { is_string(null, x) }`,
}, ""},
{"is_string", []string{
`p = x { is_string(false, x) }`,
}, ""},
{"is_string", []string{
`p[x] {arr = [true, 1, "Hey"]; arr[_] = x; is_string(x) }`,
}, "[\"Hey\"]"},
{"is_boolean", []string{
`p = [x, y] { is_boolean(true, x); is_boolean(false, y) }`,
}, "[true, true]"},
{"is_boolean", []string{
`p = x { is_boolean(null, x) }`,
}, ""},
{"is_boolean", []string{
`p = x { is_boolean("Hello", x) }`,
}, ""},
{"is_boolean", []string{
`p[x] {arr = [false, 1, "Hey"]; arr[_] = x; is_boolean(x) }`,
}, "[false]"},
{"is_array", []string{
`p = [x, y] { is_array([1,2,3], x); is_array(["a", "b"], y) }`,
}, "[true, true]"},
{"is_array", []string{
`p = x { is_array({1,2,3}, x) }`,
}, ""},
{"is_set", []string{
`p = [x, y] { is_set({1,2,3}, x); is_set({"a", "b"}, y) }`,
}, "[true, true]"},
{"is_set", []string{
`p = x { is_set([1,2,3], x) }`,
}, ""},
{"is_object", []string{
`p = x { is_object({"foo": yy | yy = 1}, x) }`,
}, "true"},
{"is_object", []string{
`p = x { is_object("foo", x) }`,
}, ""},
{"is_null", []string{
`p = x { is_null(null, x) }`,
}, "true"},
{"is_null", []string{
`p = x { is_null(true, x) }`,
}, ""},
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, tc.rules, tc.expected)
}
}
func TestTopDownTypeNameBuiltin(t *testing.T) {
tests := []struct {
note string
rules []string
expected interface{}
}{
{"type_name", []string{
`p = x { type_name(null, x) }`}, ast.String("null")},
{"type_name", []string{
`p = x { type_name(true, x) }`}, ast.String("boolean")},
{"type_name", []string{
`p = x { type_name(100, x) }`}, ast.String("number")},
{"type_name", []string{
`p = x { type_name("Hello", x) }`}, ast.String("string")},
{"type_name", []string{
`p = x { type_name([1,2,3], x) }`}, ast.String("array")},
{"type_name", []string{
`p = x { type_name({1,2,3}, x) }`}, ast.String("set")},
{"type_name", []string{
`p = x { type_name({"foo": yy | yy = 1}, x) }`}, ast.String("object")},
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, tc.rules, tc.expected)
}
}
func TestTopDownRegexMatch(t *testing.T) {
tests := []struct {
note string
rules []string
expected interface{}
}{
{"re_match", []string{`p = true { re_match("^[a-z]+\\[[0-9]+\\]$", "foo[1]") }`}, "true"},
{"re_match: undefined", []string{`p = true { re_match("^[a-z]+\\[[0-9]+\\]$", "foo[\"bar\"]") }`}, ""},
{"re_match: bad pattern err", []string{`p = true { re_match("][", "foo[\"bar\"]") }`}, fmt.Errorf("re_match: error parsing regexp: missing closing ]: `[`")},
{"re_match: ref", []string{`p[x] { re_match("^b.*$", d.e[x]) }`}, "[0,1]"},
{"re_match: raw", []string{fmt.Sprintf(`p = true { re_match(%s, "foo[1]") }`, "`^[a-z]+\\[[0-9]+\\]$`")}, "true"},
{"re_match: raw: undefined", []string{fmt.Sprintf(`p = true { re_match(%s, "foo[\"bar\"]") }`, "`^[a-z]+\\[[0-9]+\\]$`")}, ""},
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, tc.rules, tc.expected)
}
}
func TestTopDownRegexSplit(t *testing.T) {
tests := []struct {
note string
rules []string
expected interface{}
}{
{"regex.split: empty string", []string{`p = x { regex.split("^[a-z]+\\[[0-9]+\\]$", "", [x]) }`}, `""`},
{"regex.split: non-repeat pattern", []string{`p = [v,w,x,y] { regex.split("a", "banana", [v,w,x,y]) }`}, `["b","n","n",""]`},
{"regex.split: repeat pattern", []string{`p = [v,w] { regex.split("z+", "pizza", [v,w]) }`}, `["pi","a"]`},
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, tc.rules, tc.expected)
}
}
func TestTopDownGlobsMatch(t *testing.T) {
tests := []struct {
note string
rules []string
expected interface{}
}{
{"regex.globs_match", []string{`p = true { regex.globs_match("a.a.[0-9]+z", ".b.b2359825792*594823z") }`}, "true"},
{"regex.globs_match", []string{`p = true { regex.globs_match("[a-z]+", "[0-9]*") }`}, ""},
{"regex.globs_match: bad pattern err", []string{`p = true { regex.globs_match("pqrs]", "[a-b]+") }`}, fmt.Errorf("input:pqrs], pos:5, set-close ']' with no preceding '[': the input provided is invalid")},
{"regex.globs_match: ref", []string{`p[x] { regex.globs_match("b.*", d.e[x]) }`}, "[0,1]"},
{"regex.globs_match: raw", []string{fmt.Sprintf(`p = true { regex.globs_match(%s, "foo\\[1\\]") }`, "`[a-z]+\\[[0-9]+\\]`")}, "true"},
{"regex.globs_match: raw: undefined", []string{fmt.Sprintf(`p = true { regex.globs_match(%s, "foo[\"bar\"]") }`, "`[a-z]+\\[[0-9]+\\]`")}, ""},
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, tc.rules, tc.expected)
}
}
func TestTopDownSets(t *testing.T) {
tests := []struct {
note string
rules []string
expected interface{}
}{
{"set_diff", []string{`p = x { s1 = {1, 2, 3, 4}; s2 = {1, 3}; x = s1 - s2 }`}, `[2,4]`},
{"set_diff: refs", []string{`p = x { s1 = {a[2], a[1], a[0]}; s2 = {a[0], 2}; set_diff(s1, s2, x) }`}, "[3]"},
{"set_diff: ground output", []string{`p = true { {1} = {1, 2, 3} - {2, 3} }`}, "true"},
{"set_diff: virt docs", []string{`p = x { x = s1 - s2 }`, `s1[1] { true }`, `s1[2] { true }`, `s1["c"] { true }`, `s2 = {"c", 1} { true }`}, "[2]"},
{"intersect", []string{`p = x { x = {a[1], a[2], 3} & {a[2], 4, 3} }`}, "[3]"},
{"union", []string{`p = true { {2, 3, 4} = {a[1], a[2], 3} | {a[2], 4, 3} }`}, "true"},
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, tc.rules, tc.expected)
}
}
func TestTopDownStrings(t *testing.T) {
tests := []struct {
note string
rules []string
expected interface{}
}{
{"format_int", []string{`p = x { format_int(15.5, 16, x) }`}, `"f"`},
{"format_int: undefined", []string{`p = true { format_int(15.5, 16, "10000") }`}, ""},
{"format_int: ref dest", []string{`p = true { format_int(3.1, 10, numbers[2]) }`}, "true"},
{"format_int: ref dest (2)", []string{`p = true { not format_int(4.1, 10, numbers[2]) }`}, "true"},
{"format_int: err: bad base", []string{`p = true { format_int(4.1, 199, x) }`}, fmt.Errorf("operand 2 must be one of {2, 8, 10, 16}")},
{"concat", []string{`p = x { concat("/", ["", "foo", "bar", "0", "baz"], x) }`}, `"/foo/bar/0/baz"`},
{"concat: set", []string{`p = x { concat(",", {"1", "2", "3"}, x) }`}, `"1,2,3"`},
{"concat: undefined", []string{`p = true { concat("/", ["a", "b"], "deadbeef") }`}, ""},
{"concat: ref dest", []string{`p = true { concat("", ["f", "o", "o"], c[0].x[2]) }`}, "true"},
{"concat: ref dest (2)", []string{`p = true { not concat("", ["b", "a", "r"], c[0].x[2]) }`}, "true"},
{"indexof", []string{`p = x { indexof("abcdefgh", "cde", x) }`}, "2"},
{"indexof: not found", []string{`p = x { indexof("abcdefgh", "xyz", x) }`}, "-1"},
{"substring", []string{`p = x { substring("abcdefgh", 2, 3, x) }`}, `"cde"`},
{"substring: remainder", []string{`p = x { substring("abcdefgh", 2, -1, x) }`}, `"cdefgh"`},
{"substring: too long", []string{`p = x { substring("abcdefgh", 2, 10000, x) }`}, `"cdefgh"`},
{"contains", []string{`p = true { contains("abcdefgh", "defg") }`}, "true"},
{"contains: undefined", []string{`p = true { contains("abcdefgh", "ac") }`}, ""},
{"startswith", []string{`p = true { startswith("abcdefgh", "abcd") }`}, "true"},
{"startswith: undefined", []string{`p = true { startswith("abcdefgh", "bcd") }`}, ""},
{"endswith", []string{`p = true { endswith("abcdefgh", "fgh") }`}, "true"},
{"endswith: undefined", []string{`p = true { endswith("abcdefgh", "fg") }`}, ""},
{"lower", []string{`p = x { lower("AbCdEf", x) }`}, `"abcdef"`},
{"upper", []string{`p = x { upper("AbCdEf", x) }`}, `"ABCDEF"`},
{"split: empty string", []string{`p = x { split("", ".", [x]) }`}, `""`},
{"split: one", []string{`p = x { split("foo", ".", [x]) }`}, `"foo"`},
{"split: many", []string{`p = [x,y] { split("foo.bar.baz", ".", [x,"bar",y]) }`}, `["foo","baz"]`},
{"replace: empty string", []string{`p = x { replace("", "hi", "bye", x) }`}, `""`},
{"replace: one", []string{`p = x { replace("foo.bar", ".", ",", x) }`}, `"foo,bar"`},
{"replace: many", []string{`p = x { replace("foo.bar.baz", ".", ",", x) }`}, `"foo,bar,baz"`},
{"replace: overlap", []string{`p = x { replace("foo...bar", "..", ",,", x) }`}, `"foo,,.bar"`},
{"trim: empty string", []string{`p = x { trim("", ".", x) }`}, `""`},
{"trim: end", []string{`p = x { trim("foo.bar...", ".", x) }`}, `"foo.bar"`},
{"trim: start", []string{`p = x { trim("...foo.bar", ".", x) }`}, `"foo.bar"`},
{"trim: both", []string{`p = x { trim("...foo.bar...", ".", x) }`}, `"foo.bar"`},
{"trim: multi-cutset", []string{`p = x { trim("...foo.bar...", ".fr", x) }`}, `"oo.ba"`},
{"trim: multi-cutset-none", []string{`p = x { trim("...foo.bar...", ".o", x) }`}, `"foo.bar"`},
{"sprintf: none", []string{`p = x { sprintf("hi", [], x) }`}, `"hi"`},
{"sprintf: string", []string{`p = x { sprintf("hi %s", ["there"], x) }`}, `"hi there"`},
{"sprintf: int", []string{`p = x { sprintf("hi %02d", [5], x) }`}, `"hi 05"`},
{"sprintf: hex", []string{`p = x { sprintf("hi %02X.%02X", [127, 1], x) }`}, `"hi 7F.01"`},
{"sprintf: float", []string{`p = x { sprintf("hi %.2f", [3.1415], x) }`}, `"hi 3.14"`},
{"sprintf: float too big", []string{`p = x { sprintf("hi %v", [2e308], x) }`}, `"hi 2e+308"`},
{"sprintf: bool", []string{`p = x { sprintf("hi %s", [true], x) }`}, `"hi true"`},
{"sprintf: composite", []string{`p = x { sprintf("hi %v", [["there", 5, 3.14]], x) }`}, `"hi [\"there\", 5, 3.14]"`},
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, tc.rules, tc.expected)
}
}
func TestTopDownJSONBuiltins(t *testing.T) {
tests := []struct {
note string
rules []string
expected interface{}
}{
{"marshal", []string{`p = x { json.marshal([{"foo": {1,2,3}}], x) }`}, `"[{\"foo\":[1,2,3]}]"`},
{"unmarshal", []string{`p = x { json.unmarshal("[{\"foo\":[1,2,3]}]", x) }`}, `[{"foo": [1,2,3]}]"`},
{"unmarshal-non-string", []string{`p = x { json.unmarshal(data.a[0], x) }`}, fmt.Errorf("operand 1 must be string but got number")},
{"yaml round-trip", []string{`p = y { yaml.marshal([{"foo": {1,2,3}}], x); yaml.unmarshal(x, y) }`}, `[{"foo": [1,2,3]}]`},
{"yaml unmarshal error", []string{`p { yaml.unmarshal("[1,2,3", _) } `}, fmt.Errorf("yaml: line 1: did not find")},
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, tc.rules, tc.expected)
}
}
func TestTopDownBase64Builtins(t *testing.T) {
tests := []struct {
note string
rules []string
expected interface{}
}{
{"encode-1", []string{`p = x { base64.encode("hello", x) }`}, `"aGVsbG8="`},
{"encode-2", []string{`p = x { base64.encode("there", x) }`}, `"dGhlcmU="`},
{"decode-1", []string{`p = x { base64.decode("aGVsbG8=", x) }`}, `"hello"`},
{"decode-2", []string{`p = x { base64.decode("dGhlcmU=", x) }`}, `"there"`},
{"encode-slash", []string{`p = x { base64.encode("subjects?_d", x) }`}, `"c3ViamVjdHM/X2Q="`},
{"decode-slash", []string{`p = x { base64.decode("c3ViamVjdHM/X2Q=", x) }`}, `"subjects?_d"`},
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, tc.rules, tc.expected)
}
}
func TestTopDownBase64UrlBuiltins(t *testing.T) {
tests := []struct {
note string
rules []string
expected interface{}
}{
{"encode-1", []string{`p = x { base64url.encode("hello", x) }`}, `"aGVsbG8="`},
{"encode-2", []string{`p = x { base64url.encode("there", x) }`}, `"dGhlcmU="`},
{"decode-1", []string{`p = x { base64url.decode("aGVsbG8=", x) }`}, `"hello"`},
{"decode-2", []string{`p = x { base64url.decode("dGhlcmU=", x) }`}, `"there"`},
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, tc.rules, tc.expected)
}
}
func TestTopDownURLBuiltins(t *testing.T) {
tests := []struct {
note string
rules []string
expected interface{}
}{
{"encode", []string{`p = x { urlquery.encode("a=b+1", x) }`}, `"a%3Db%2B1"`},
{"encode empty", []string{`p = x { urlquery.encode("", x) }`}, `""`},
{"decode", []string{`p = x { urlquery.decode("a%3Db%2B1", x) }`}, `"a=b+1"`},
{"encode_object empty", []string{`p = x { urlquery.encode_object({}, x) }`}, `""`},
{"encode_object strings", []string{`p = x { urlquery.encode_object({"a": "b", "c": "d"}, x) }`}, `"a=b&c=d"`},
{"encode_object escape", []string{`p = x { urlquery.encode_object({"a": "c=b+1"}, x) }`}, `"a=c%3Db%2B1"`},
{"encode_object array", []string{`p = x { urlquery.encode_object({"a": ["b+1","c+2"]}, x) }`}, `"a=b%2B1&a=c%2B2"`},
{"encode_object set", []string{`p = x { urlquery.encode_object({"a": {"b+1"}}, x) }`}, `"a=b%2B1"`},
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, tc.rules, tc.expected)
}
}
func TestTopDownJWTBuiltins(t *testing.T) {
params := []struct {
note string
input string
header string
payload string
signature string
err string
}{
{
"simple",
`eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIwIiwiaXNzIjoib3BhIn0.XmVoLoHI3pxMtMO_WRONMSJzGUDP9pDjy8Jp0_tdRXY`,
`{ "alg": "HS256", "typ": "JWT" }`,
`{ "sub": "0", "iss": "opa" }`,
`5e65682e81c8de9c4cb4c3bf59138d3122731940cff690e3cbc269d3fb5d4576`,
"",
},
{
"simple-non-registered",
`eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJuZXciOiJJIGFtIGEgdXNlciBjcmVhdGVkIGZpZWxkIiwiaXNzIjoib3BhIn0.6UmjsclVDGD9jcmX_F8RJzVgHtUZuLu2pxkF_UEQCrE`,
`{ "alg": "HS256", "typ": "JWT" }`,
`{ "new": "I am a user created field", "iss": "opa" }`,
`e949a3b1c9550c60fd8dc997fc5f112735601ed519b8bbb6a71905fd41100ab1`,
"",
},
{
"no-support-jwe",
`eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCIsImVuYyI6ImJsYWgifQ.eyJuZXciOiJJIGFtIGEgdXNlciBjcmVhdGVkIGZpZWxkIiwiaXNzIjoib3BhIn0.McGUb1e-UviZKy6UyQErNNQzEUgeV25Buwk7OHOa8U8`,
``,
``,
``,
"JWT is a JWE object, which is not supported",
},
{
"no-periods",
`eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9eyJzdWIiOiIwIiwiaXNzIjoib3BhIn0XmVoLoHI3pxMtMO_WRONMSJzGUDP9pDjy8Jp0_tdRXY`,
``,
``,
``,
"encoded JWT had no period separators",
},
{
"wrong-period-count",
`eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXV.CJ9eyJzdWIiOiIwIiwiaXNzIjoib3BhIn0XmVoLoHI3pxMtMO_WRONMSJzGUDP9pDjy8Jp0_tdRXY`,
``,
``,
``,
"encoded JWT must have 3 sections, found 2",
},
{
"bad-header-encoding",
`eyJhbGciOiJIU^%zI1NiI+sInR5cCI6IkpXVCJ9.eyJzdWIiOiIwIiwiaXNzIjoib3BhIn0.XmVoLoHI3pxMtMO_WRONMSJzGUDP9pDjy8Jp0_tdRXY`,
``,
``,
``,
"JWT header had invalid encoding: illegal base64 data at input byte 13",
},
{
"bad-payload-encoding",
`eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIwIiwia/XNzIjoib3BhIn0.XmVoLoHI3pxMtMO_WRONMSJzGUDP9pDjy8Jp0_tdRXY`,
``,
``,
``,
"JWT payload had invalid encoding: illegal base64 data at input byte 17",
},
{
"bad-signature-encoding",
`eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIwIiwiaXNzIjoib3BhIn0.XmVoLoHI3pxMtMO(_WRONMSJzGUDP9pDjy8Jp0_tdRXY`,
``,
``,
``,
"JWT signature had invalid encoding: illegal base64 data at input byte 15",
},
{
"nested",
`eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCIsImN0eSI6IkpXVCJ9.ImV5SmhiR2NpT2lKSVV6STFOaUlzSW5SNWNDSTZJa3BYVkNKOS5leUp6ZFdJaU9pSXdJaXdpYVhOeklqb2liM0JoSW4wLlhtVm9Mb0hJM3B4TXRNT19XUk9OTVNKekdVRFA5cERqeThKcDBfdGRSWFki.8W0qx4mLxslmZl7wEMUWBxH7tST3XsEuWXxesXqFnRI`,
`{ "alg": "HS256", "typ": "JWT" }`,
`{ "sub": "0", "iss": "opa" }`,
`5e65682e81c8de9c4cb4c3bf59138d3122731940cff690e3cbc269d3fb5d4576`,
"",
},
{
"double-nested",
`eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCIsImN0eSI6IkpXVCJ9.ImV5SmhiR2NpT2lKSVV6STFOaUlzSW5SNWNDSTZJa3BYVkNJc0ltTjBlU0k2SWtwWFZDSjkuSW1WNVNtaGlSMk5wVDJsS1NWVjZTVEZPYVVselNXNVNOV05EU1RaSmEzQllWa05LT1M1bGVVcDZaRmRKYVU5cFNYZEphWGRwWVZoT2VrbHFiMmxpTTBKb1NXNHdMbGh0Vm05TWIwaEpNM0I0VFhSTlQxOVhVazlPVFZOS2VrZFZSRkE1Y0VScWVUaEtjREJmZEdSU1dGa2kuOFcwcXg0bUx4c2xtWmw3d0VNVVdCeEg3dFNUM1hzRXVXWHhlc1hxRm5SSSI.U8rwnGAJ-bJoGrAYKEzNtbJQWd3x1eW0Y25nLKHDCgo`,
`{ "alg": "HS256", "typ": "JWT" }`,
`{ "sub": "0", "iss": "opa" }`,
`5e65682e81c8de9c4cb4c3bf59138d3122731940cff690e3cbc269d3fb5d4576`,
"",
},
{
"complex-values",
`eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIwIiwiaXNzIjoib3BhIiwiZXh0Ijp7ImFiYyI6IjEyMyIsImNiYSI6WzEwLCIxMCJdfX0.IIxF-uJ6i4K5Dj71xNLnUeqB9jmujl6ujTInhii1PxE`,
`{ "alg": "HS256", "typ": "JWT" }`,
`{ "sub": "0", "iss": "opa", "ext": { "abc": "123", "cba": [10, "10"] } }`,
`208c45fae27a8b82b90e3ef5c4d2e751ea81f639ae8e5eae8d32278628b53f11`,
"",
},
// The test below checks that payloads with duplicate keys
// in their encoding produce a token object that binds the key
// to the last occurring value, as per RFC 7519 Section 4.
// It tests a payload encoding that has 3 duplicates of the
// "iss" key, with the values "not opa", "also not opa" and
// "opa", in that order.
// Go's json.Unmarshal exhibits this behavior, but it is not
// documented, so this test is meant to catch that behavior
// if it changes.
{
"duplicate-keys",
`eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiAiMCIsImlzcyI6ICJub3Qgb3BhIiwgImlzcyI6ICJhbHNvIG5vdCBvcGEiLCAiaXNzIjogIm9wYSJ9.XmVoLoHI3pxMtMO_WRONMSJzGUDP9pDjy8Jp0_tdRXY`,
`{ "alg": "HS256", "typ": "JWT" }`,
`{ "sub": "0", "iss": "opa" }`,
`5e65682e81c8de9c4cb4c3bf59138d3122731940cff690e3cbc269d3fb5d4576`,
"",
},
}
type test struct {
note string
rules []string
expected interface{}
}
tests := []test{}
for _, p := range params {
var exp interface{}
exp = fmt.Sprintf(`[%s, %s, "%s"]`, p.header, p.payload, p.signature)
if p.err != "" {
exp = errors.New(p.err)
}
tests = append(tests, test{
p.note,
[]string{fmt.Sprintf(`p = [x, y, z] { io.jwt.decode("%s", [x, y, z]) }`, p.input)},
exp,
})
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, tc.rules, tc.expected)
}
}
const certPem = `-----BEGIN CERTIFICATE-----\nMIIFiDCCA3ACCQCGV6XsfG/oRTANBgkqhkiG9w0BAQUFADCBhTELMAkGA1UEBhMC\nVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFTATBgNVBAcMDFJlZHdvb2QgQ2l0eTEO\nMAwGA1UECgwFU3R5cmExDDAKBgNVBAsMA0RldjESMBAGA1UEAwwJbG9jYWxob3N0\nMRgwFgYJKoZIhvcNAQkBFglhc2hAc3R5cmEwHhcNMTgwMzA2MDAxNTU5WhcNMTkw\nMzA2MDAxNTU5WjCBhTELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWEx\nFTATBgNVBAcMDFJlZHdvb2QgQ2l0eTEOMAwGA1UECgwFU3R5cmExDDAKBgNVBAsM\nA0RldjESMBAGA1UEAwwJbG9jYWxob3N0MRgwFgYJKoZIhvcNAQkBFglhc2hAc3R5\ncmEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDucnAwTRA0zqDQ671L\nKWOVwhjhycFyzyhZUd7vhsnslOBiYM6TYIDXhETfAk2RQoRE/9xF16woMD8FOglc\nlSuhi+GNfFRif6LfArm84ZFj1ZS1MX2logikhXhRJQ7AOHe5+ED0re3KH5lWyqfz\nR6bQuPYwTQSBJy6Tq7T9RiOM29yadCX64OaCEbzEFmHtNlbb5px4zCVvgskg/fpV\nGGCMpAYjGDatbxE5eAloVs1EJuI5RSqWr1JRm6EejxM04BFdfGn1HgWrsKXtlvBa\n00/AC0zXL5n6LK7+L3WbRguVTZcE4Yu70gDwhmM+VsKeT9LKClX003BNj0NJDRB9\ndw9MaWxsXDNHNOWEfbnASXeP7ZRv3D81ftij6P8SL14ZnxyrRty8TAN4ij3wd41l\nastRQCtrJFi+HzO606XOp6HDzBoWT0DGl8Sn2hZ6RLPyBnD04vvvcSGeCVjHGOQ8\nc3OTroK58u5MR/q4T00sTkeeVAxuKoEWKsjIBYYrJTe/a2mEq9yiDGbPNYDnWnQZ\njSUZm+Us23Y2sm/agZ5zKXcEuoecGL6sYCixr/xeB9BPxEiTthH+0M8OY99qpIhz\nSmj41wdgQfzZi/6B8pIr77V/KywYKxJEmzw8Uy48aC/rZ8WsT8QdKwclo1aiNJhx\n79OvGbZFoeHD/w7igpx+ttpF/wIDAQABMA0GCSqGSIb3DQEBBQUAA4ICAQC3wWUs\nfXz+aSfFVz+O3mLFkr65NIgazbGAySgMgMNVuadheIkPL4k21atyflfpx4pg9FGv\n40vWCLMajpvynfz4oqah0BACnpqzQ8Dx6HYkmlXK8fLB+WtPrZBeUEsGPKuJYt4M\nd5TeY3VpNgWOPXmnE4lvxHZqh/8OwmOpjBfC9E3e2eqgwiwOkXnMaZEPgKP6JiWk\nEFaQ9jgMQqJZnNcv6NmiqqsZeI0/NNjBpkmEWQl+wLegVusHiQ0FMBMQ0taEo21r\nzUwHoNJR3h3wgGQiKxKOH1FUKHBV7hEqObLraD/hfG5xYucJfvvAAP1iH0ycPs+9\nhSccrn5/HY1c9AZnW8Kh7atp/wFP+sHjtECWK/lUmXfhASS293hprCpJk2n9pkmR\nziXKJhjwkxlC8NcHuiVfaxdfDa4+1Qta2gK7GEypbvLoEmIt/dsYUsxUg84lwJJ9\nnyC/pfZ5a8wFSf186JeVH4kHd3bnkzlQz460HndOMSJ/Xi1wSfuZlOVupFf8TVKl\np4j28MTLH2Wqx50NssKThdaX6hoCiMqreYa+EVaN1f/cIGQxZSCzdzMCKqdB8lKB\n3Eax+5zsIa/UyPwGxZcyXBRHAlz5ZnkjuRxInyiMkBWWz3IZXjTe6Fq8BNd2UWNc\nw35+2nO5n1LKXgR2+nzhZUOk8TPsi9WUywRluQ==\n-----END CERTIFICATE-----`
const certPemPs = `-----BEGIN CERTIFICATE-----\nMIIC/DCCAeSgAwIBAgIJAJRvYDU3ei3EMA0GCSqGSIb3DQEBCwUAMBMxETAPBgNV\nBAMMCHdoYXRldmVyMB4XDTE4MDgxMDEwMzgxNloXDTE4MDkwOTEwMzgxNlowEzER\nMA8GA1UEAwwId2hhdGV2ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB\nAQC4kCmzLMW/5jzkzkmN7Me8wPD+ymBUIjsGqliGfMrfFfDV2eTPVtZcYD3IXoB4\nAOUT7XJzWjOsBRFOcVKKEiCPjXiLcwLb/QWQ1x0Budft32r3+N0KQd1rgcRHTPNc\nJoeWCfOgDPp51RTzTT6HQuV4ud+CDhRJP7QMVMIgal9Nuzs49LLZaBPW8/rFsHjk\nJQ4kDujSrpcT6F2FZY3SmWsOJgP7RjVKk5BheYeFKav5ZV4p6iHn/TN4RVpvpNBh\n5z/XoHITJ6lpkHSDpbIaQUTpobU2um8N3biz+HsEAmD9Laa27WUpYSpiM6DDMSXl\ndBDJdumerVRJvXYCtfXqtl17AgMBAAGjUzBRMB0GA1UdDgQWBBRz74MkVzT2K52/\nFJC4mTa9coM/DTAfBgNVHSMEGDAWgBRz74MkVzT2K52/FJC4mTa9coM/DTAPBgNV\nHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQAD1ZE4IaIAetqGG+vt9oz1\nIx0j4EPok0ONyhhmiSsF6rSv8zlNWweVf5y6Z+AoTNY1Fym0T7dbpbqIox0EdKV3\nFLzniWOjznupbnqfXwHX/g1UAZSyt3akSatVhvNpGlnd7efTIAiNinX/TkzIjhZ7\nihMIZCGykT1P0ys1OaeEf57wAzviatD4pEMTIW0OOqY8bdRGhuJR1kKUZ/2Nm8Ln\ny7E0y8uODVbH9cAwGyzWB/QFc+bffNgi9uJaPQQc5Zxwpu9utlqyzFvXgV7MBYUK\nEYSLyxp4g4e5aujtLugaC8H6n9vP1mEBr/+T8HGynBZHNTKlDhhL9qDbpkkNB6/w\n-----END CERTIFICATE-----`
const certPemEs256 = `-----BEGIN CERTIFICATE-----\nMIIBcDCCARagAwIBAgIJAMZmuGSIfvgzMAoGCCqGSM49BAMCMBMxETAPBgNVBAMM\nCHdoYXRldmVyMB4XDTE4MDgxMDE0Mjg1NFoXDTE4MDkwOTE0Mjg1NFowEzERMA8G\nA1UEAwwId2hhdGV2ZXIwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAATPwn3WCEXL\nmjp/bFniDwuwsfu7bASlPae2PyWhqGeWwe23Xlyx+tSqxlkXYe4pZ23BkAAscpGj\nyn5gXHExyDlKo1MwUTAdBgNVHQ4EFgQUElRjSoVgKjUqY5AXz2o74cLzzS8wHwYD\nVR0jBBgwFoAUElRjSoVgKjUqY5AXz2o74cLzzS8wDwYDVR0TAQH/BAUwAwEB/zAK\nBggqhkjOPQQDAgNIADBFAiEA4yQ/88ZrUX68c6kOe9G11u8NUaUzd8pLOtkKhniN\nOHoCIHmNX37JOqTcTzGn2u9+c8NlnvZ0uDvsd1BmKPaUmjmm\n-----END CERTIFICATE-----\n`
const certPemBad = `-----BEGIN CERT-----\nMIIFiDCCA3ACCQCGV6XsfG/oRTANBgkqhkiG9w0BAQUFADCBhTELMAkGA1UEBhMC\nVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFTATBgNVBAcMDFJlZHdvb2QgQ2l0eTEO\nMAwGA1UECgwFU3R5cmExDDAKBgNVBAsMA0RldjESMBAGA1UEAwwJbG9jYWxob3N0\nMRgwFgYJKoZIhvcNAQkBFglhc2hAc3R5cmEwHhcNMTgwMzA2MDAxNTU5WhcNMTkw\nMzA2MDAxNTU5WjCBhTELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWEx\nFTATBgNVBAcMDFJlZHdvb2QgQ2l0eTEOMAwGA1UECgwFU3R5cmExDDAKBgNVBAsM\nA0RldjESMBAGA1UEAwwJbG9jYWxob3N0MRgwFgYJKoZIhvcNAQkBFglhc2hAc3R5\ncmEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDucnAwTRA0zqDQ671L\nKWOVwhjhycFyzyhZUd7vhsnslOBiYM6TYIDXhETfAk2RQoRE/9xF16woMD8FOglc\nlSuhi+GNfFRif6LfArm84ZFj1ZS1MX2logikhXhRJQ7AOHe5+ED0re3KH5lWyqfz\nR6bQuPYwTQSBJy6Tq7T9RiOM29yadCX64OaCEbzEFmHtNlbb5px4zCVvgskg/fpV\nGGCMpAYjGDatbxE5eAloVs1EJuI5RSqWr1JRm6EejxM04BFdfGn1HgWrsKXtlvBa\n00/AC0zXL5n6LK7+L3WbRguVTZcE4Yu70gDwhmM+VsKeT9LKClX003BNj0NJDRB9\ndw9MaWxsXDNHNOWEfbnASXeP7ZRv3D81ftij6P8SL14ZnxyrRty8TAN4ij3wd41l\nastRQCtrJFi+HzO606XOp6HDzBoWT0DGl8Sn2hZ6RLPyBnD04vvvcSGeCVjHGOQ8\nc3OTroK58u5MR/q4T00sTkeeVAxuKoEWKsjIBYYrJTe/a2mEq9yiDGbPNYDnWnQZ\njSUZm+Us23Y2sm/agZ5zKXcEuoecGL6sYCixr/xeB9BPxEiTthH+0M8OY99qpIhz\nSmj41wdgQfzZi/6B8pIr77V/KywYKxJEmzw8Uy48aC/rZ8WsT8QdKwclo1aiNJhx\n79OvGbZFoeHD/w7igpx+ttpF/wIDAQABMA0GCSqGSIb3DQEBBQUAA4ICAQC3wWUs\nfXz+aSfFVz+O3mLFkr65NIgazbGAySgMgMNVuadheIkPL4k21atyflfpx4pg9FGv\n40vWCLMajpvynfz4oqah0BACnpqzQ8Dx6HYkmlXK8fLB+WtPrZBeUEsGPKuJYt4M\nd5TeY3VpNgWOPXmnE4lvxHZqh/8OwmOpjBfC9E3e2eqgwiwOkXnMaZEPgKP6JiWk\nEFaQ9jgMQqJZnNcv6NmiqqsZeI0/NNjBpkmEWQl+wLegVusHiQ0FMBMQ0taEo21r\nzUwHoNJR3h3wgGQiKxKOH1FUKHBV7hEqObLraD/hfG5xYucJfvvAAP1iH0ycPs+9\nhSccrn5/HY1c9AZnW8Kh7atp/wFP+sHjtECWK/lUmXfhASS293hprCpJk2n9pkmR\nziXKJhjwkxlC8NcHuiVfaxdfDa4+1Qta2gK7GEypbvLoEmIt/dsYUsxUg84lwJJ9\nnyC/pfZ5a8wFSf186JeVH4kHd3bnkzlQz460HndOMSJ/Xi1wSfuZlOVupFf8TVKl\np4j28MTLH2Wqx50NssKThdaX6hoCiMqreYa+EVaN1f/cIGQxZSCzdzMCKqdB8lKB\n3Eax+5zsIa/UyPwGxZcyXBRHAlz5ZnkjuRxInyiMkBWWz3IZXjTe6Fq8BNd2UWNc\nw35+2nO5n1LKXgR2+nzhZUOk8TPsi9WUywRluQ==\n-----END CERT-----`
func TestTopDownJWTVerifyRSA(t *testing.T) {
params := []struct {
note string
alg string
input1 string
input2 string
result bool
err string
}{
{
"success",
"rs256",
`eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJuYmYiOjE0NDQ0Nzg0MDB9.N0-EVdv5pvUfZYFRzMGnsWpNLHgwMEgViPwpuLBEtt32682OgnOK-N4X-2gpQEjQIbUr0IFym8YsRQU9GZvqQP72Sd6yOQNGSNeE74DpUZCAjBa9SBIb1UlD2MxZB-e7YJiEyo7pZhimaqorXrgorlaXYGMvsCFWDYmBLzGaGYaGJyEpkZHzHb7ujsDrJJjdEtDV3kh13gTHzLPvqnoXuuxelXye_8LPIhvgDy52gT4shUEso71pJCMv_IqAR19ljVE17lJzoi6VhRn6ReNUE-yg4KfCO4Ypnuu-mcQr7XtmSYoWkX72L5UQ-EyWkoz-w0SYKoJTPzHkTL2thYStksVpeNkGuck25aUdtrQgmPbao0QOWBFlkg03e6mPCD2-aXOt1ofth9mZGjxWMHX-mUqHaNmaWM3WhRztJ73hWrmB1YOdYQtOEHejfvR_td5tqIw4W6ufRy2ScOypGQe7kNaUZxpgxZ1927ZGNiQgawIOAQwXOcFx1JNSEIeg55-cYJrHPxsXGOB9ZxW-qnswmFJp474iUVXjzGhLexJDXBwvKGs_O3JFjMsvyV9_hm7bnQU0vG_HgPYs5i9VOHRMujq1vFBcm52TFVOBGdWaGfb9RRdLLYvVkJLk0Poh19rsCWb7-Vc3mAaGGpvuk4Wv-PnGGNC-V-FQqIbijHDrn_g`,
certPem,
true,
"",
},
{
"success-ps256",
"ps256",
`eyJ0eXAiOiAiSldUIiwgImFsZyI6ICJQUzI1NiJ9.eyJuYmYiOiAxNDQ0NDc4NDAwLCAiZm9vIjogImJhciJ9.i0F3MHWzOsBNLqjQzK1UVeQid9xPMowCoUsoM-C2BDxUY-FMKmCeJ1NJ4TGnS9HzFK1ftEvRnPT7EOxOkHPoCk1rz3feTFgtHtNzQqLM1IBTnz6aHHOrda_bKPHH9ZIYCRQUPXhpC90ivW_IJR-f7Z1WLrMXaJ71i1XteruENHrJJJDn0HedHG6N0VHugBHrak5k57cbE31utAdx83TEd8v2Y8wAkCJXKrdmTa-8419LNxW_yjkvoDD53n3X5CHhYkSymU77p0v6yWO38qDWeKJ-Fm_PrMAo72_rizDBj_yPa5LA3bT_EnsgZtC-sp8_SCDIH41bjiCGpRHhqgZmyw`,
certPemPs,
true,
"",
},
{
"success-es256",
"es256",
`eyJ0eXAiOiAiSldUIiwgImFsZyI6ICJFUzI1NiJ9.eyJuYmYiOiAxNDQ0NDc4NDAwLCAiaXNzIjogInh4eCJ9.lArczfN-pIL8oUU-7PU83u-zfXougXBZj6drFeKFsPEoVhy9WAyiZlRshYqjTSXdaw8yw2L-ovt4zTUZb2PWMg`,
certPemEs256,
true,
"",
},
{
"failure-bad token",
"rs256",
`eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJuYmYiOjE0NDQ0Nzg0MDB9.Yt89BjaPCNgol478rYyH66-XgkHos02TsVwxLH3ZlvOoIVjbhYW8q1_MHehct1-yBf1UOX3g-lUrIjpoDtX1TfAESuaWTjYPixRvjfJ-Nn75JF8QuAl5PD27C6aJ4PjUPNfj0kwYBnNQ_oX-ZFb781xRi7qRDB6swE4eBUxzHqKUJBLaMM2r8k1-9iE3ERNeqTJUhV__p0aSyRj-i62rdZ4TC5nhxtWodiGP4e4GrYlXkdaKduK63cfdJF-kfZfTsoDs_xy84pZOkzlflxuNv9bNqd-3ISAdWe4gsEvWWJ8v70-QWkydnH8rhj95DaqoXrjfzbOgDpKtdxJC4daVPKvntykzrxKhZ9UtWzm3OvJSKeyWujFZlldiTfBLqNDgdi-Boj_VxO5Pdh-67lC3L-pBMm4BgUqf6rakBQvoH7AV6zD5CbFixh7DuqJ4eJHHItWzJwDctMrV3asm-uOE1E2B7GErGo3iX6S9Iun_kvRUp6kyvOaDq5VvXzQOKyLQIQyHGGs0aIV5cFI2IuO5Rt0uUj5mzPQrQWHgI4r6Mc5bzmq2QLxBQE8OJ1RFhRpsuoWQyDM8aRiMQIJe1g3x4dnxbJK4dYheYblKHFepScYqT1hllDp3oUNn89sIjQIhJTe8KFATu4K8ppluys7vhpE2a_tq8i5O0MFxWmsxN4Q`,
certPem,
false,
"",
},
{
"failure-wrong key",
"ps256",
`eyJ0eXAiOiAiSldUIiwgImFsZyI6ICJQUzI1NiJ9.eyJuYmYiOiAxNDQ0NDc4NDAwLCAiZm9vIjogImJhciJ9.i0F3MHWzOsBNLqjQzK1UVeQid9xPMowCoUsoM-C2BDxUY-FMKmCeJ1NJ4TGnS9HzFK1ftEvRnPT7EOxOkHPoCk1rz3feTFgtHtNzQqLM1IBTnz6aHHOrda_bKPHH9ZIYCRQUPXhpC90ivW_IJR-f7Z1WLrMXaJ71i1XteruENHrJJJDn0HedHG6N0VHugBHrak5k57cbE31utAdx83TEd8v2Y8wAkCJXKrdmTa-8419LNxW_yjkvoDD53n3X5CHhYkSymU77p0v6yWO38qDWeKJ-Fm_PrMAo72_rizDBj_yPa5LA3bT_EnsgZtC-sp8_SCDIH41bjiCGpRHhqgZmyw`,
certPem,
false,
"",
},
{
"failure-wrong alg",
"ps256",
`eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJuYmYiOjE0NDQ0Nzg0MDB9.N0-EVdv5pvUfZYFRzMGnsWpNLHgwMEgViPwpuLBEtt32682OgnOK-N4X-2gpQEjQIbUr0IFym8YsRQU9GZvqQP72Sd6yOQNGSNeE74DpUZCAjBa9SBIb1UlD2MxZB-e7YJiEyo7pZhimaqorXrgorlaXYGMvsCFWDYmBLzGaGYaGJyEpkZHzHb7ujsDrJJjdEtDV3kh13gTHzLPvqnoXuuxelXye_8LPIhvgDy52gT4shUEso71pJCMv_IqAR19ljVE17lJzoi6VhRn6ReNUE-yg4KfCO4Ypnuu-mcQr7XtmSYoWkX72L5UQ-EyWkoz-w0SYKoJTPzHkTL2thYStksVpeNkGuck25aUdtrQgmPbao0QOWBFlkg03e6mPCD2-aXOt1ofth9mZGjxWMHX-mUqHaNmaWM3WhRztJ73hWrmB1YOdYQtOEHejfvR_td5tqIw4W6ufRy2ScOypGQe7kNaUZxpgxZ1927ZGNiQgawIOAQwXOcFx1JNSEIeg55-cYJrHPxsXGOB9ZxW-qnswmFJp474iUVXjzGhLexJDXBwvKGs_O3JFjMsvyV9_hm7bnQU0vG_HgPYs5i9VOHRMujq1vFBcm52TFVOBGdWaGfb9RRdLLYvVkJLk0Poh19rsCWb7-Vc3mAaGGpvuk4Wv-PnGGNC-V-FQqIbijHDrn_g`,
certPem,
false,
"",
},
{
"failure-invalid token",
"rs256",
`eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJuYmYiOjE0NDQ0Nzg0MDB9`,
certPem,
false,
"encoded JWT must have 3 sections, found 2",
},
{
"failure-bad cert",
"rs256",
`eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJmb28iOiJiYXIiLCJuYmYiOjE0NDQ0Nzg0MDB9.N0-EVdv5pvUfZYFRzMGnsWpNLHgwMEgViPwpuLBEtt32682OgnOK-N4X-2gpQEjQIbUr0IFym8YsRQU9GZvqQP72Sd6yOQNGSNeE74DpUZCAjBa9SBIb1UlD2MxZB-e7YJiEyo7pZhimaqorXrgorlaXYGMvsCFWDYmBLzGaGYaGJyEpkZHzHb7ujsDrJJjdEtDV3kh13gTHzLPvqnoXuuxelXye_8LPIhvgDy52gT4shUEso71pJCMv_IqAR19ljVE17lJzoi6VhRn6ReNUE-yg4KfCO4Ypnuu-mcQr7XtmSYoWkX72L5UQ-EyWkoz-w0SYKoJTPzHkTL2thYStksVpeNkGuck25aUdtrQgmPbao0QOWBFlkg03e6mPCD2-aXOt1ofth9mZGjxWMHX-mUqHaNmaWM3WhRztJ73hWrmB1YOdYQtOEHejfvR_td5tqIw4W6ufRy2ScOypGQe7kNaUZxpgxZ1927ZGNiQgawIOAQwXOcFx1JNSEIeg55-cYJrHPxsXGOB9ZxW-qnswmFJp474iUVXjzGhLexJDXBwvKGs_O3JFjMsvyV9_hm7bnQU0vG_HgPYs5i9VOHRMujq1vFBcm52TFVOBGdWaGfb9RRdLLYvVkJLk0Poh19rsCWb7-Vc3mAaGGpvuk4Wv-PnGGNC-V-FQqIbijHDrn_g`,
certPemBad,
false,
"failed to decode PEM block containing certificate",
},
}
type test struct {
note string
rules []string
expected interface{}
}
tests := []test{}
for _, p := range params {
var exp interface{}
exp = fmt.Sprintf(`%t`, p.result)
if p.err != "" {
exp = errors.New(p.err)
}
tests = append(tests, test{
p.note,
[]string{fmt.Sprintf(`p = x { io.jwt.verify_%s("%s", "%s", x) }`, p.alg, p.input1, p.input2)},
exp,
})
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, tc.rules, tc.expected)
}
}
func TestTopDownJWTVerifyHS256(t *testing.T) {
params := []struct {
note string
input1 string
input2 string
result bool
err string
}{
{
"success",
`eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyIjoiYWxpY2UiLCJhenAiOiJhbGljZSIsInN1Ym9yZGluYXRlcyI6W10sImhyIjpmYWxzZX0.rz3jTY033z-NrKfwrK89_dcLF7TN4gwCMj-fVBDyLoM`,
"secret",
true,
"",
},
{
"failure-bad token",
`eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyIjoiYWxpY2UiLCJhenAiOiJhbGljZSIsInN1Ym9yZGluYXRlcyI6W10sImhyIjpmYWxzZX0.R0NDxM1gHTucWQKwayMDre2PbMNR9K9efmOfygDZWcE`,
"secret",
false,
"",
},
{
"failure-invalid token",
`eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyIjoiYWxpY2UiLCJhenAiOiJhbGljZSIsInN1Ym9yZGluYXRlcyI6W10sImhyIjpmYWxzZX0`,
"secret",
false,
"encoded JWT must have 3 sections, found 2",
},
}
type test struct {
note string
rules []string
expected interface{}
}
tests := []test{}
for _, p := range params {
var exp interface{}
exp = fmt.Sprintf(`%t`, p.result)
if p.err != "" {
exp = errors.New(p.err)
}
tests = append(tests, test{
p.note,
[]string{fmt.Sprintf(`p = x { io.jwt.verify_hs256("%s", "%s", x) }`, p.input1, p.input2)},
exp,
})
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, tc.rules, tc.expected)
}
}
func TestTopDownJWTDecodeVerify(t *testing.T) {
params := []struct {
note string // test name
token string // JWT
constraints string // constraints argument
valid bool // expected validity value
header string // expected header
payload string // expected claims
err string // expected error or "" for succes
}{
{
"ps256-unconstrained", // no constraints at all (apart from supplying a key)
"eyJhbGciOiAiUFMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4In0.iCePYnD1U13oBe_6ylhmojmkY_VZNYXqVszAej8RImMGv51OEqARmYFkRZYTiYCiVFober7vcDq_stOj1uAJCuttygGW_dpHiN-3EWsU2E2vCnXlygWe0ud38pOC-OVyEFbXxO9-m51vnS-3VmBjEO8G1UE8bLFXTeFOGkUIj9dqlefJSWh5wa8XA3g9mj0jqpuJi-7QgEIeVHk-JzhGpoFqI2f-Df_agVvc2x4V-6fJmj7wV2IsaFPRi36mVQmg8S-dkxu4AlaeCILhyNZl8ewjBHHBjJFRwzcy88L00mzdO51ZxEYsBdQav3ux2sc6vjT9PvvjAwzcthQxEoEaNA",
fmt.Sprintf(`{"cert": "%s"}`, certPemPs),
true,
`{"alg": "PS256", "typ": "JWT"}`,
`{"iss": "xxx"}`,
"",
},
{
"ps256-key-wrong", // wrong key for signature
"eyJhbGciOiAiUFMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4In0.iCePYnD1U13oBe_6ylhmojmkY_VZNYXqVszAej8RImMGv51OEqARmYFkRZYTiYCiVFober7vcDq_stOj1uAJCuttygGW_dpHiN-3EWsU2E2vCnXlygWe0ud38pOC-OVyEFbXxO9-m51vnS-3VmBjEO8G1UE8bLFXTeFOGkUIj9dqlefJSWh5wa8XA3g9mj0jqpuJi-7QgEIeVHk-JzhGpoFqI2f-Df_agVvc2x4V-6fJmj7wV2IsaFPRi36mVQmg8S-dkxu4AlaeCILhyNZl8ewjBHHBjJFRwzcy88L00mzdO51ZxEYsBdQav3ux2sc6vjT9PvvjAwzcthQxEoEaNA",
fmt.Sprintf(`{"cert": "%s"}`, certPem),
false,
`{}`,
`{}`,
"",
},
{
"rs256-key-wrong", // wrong key for signature
"eyJhbGciOiAiUlMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4IiwgImV4cCI6IDMwMDB9.hqDP3AzshNhUZMI02U3nLPrj93QFrgs-74XFrF1Vry2bplrz-NKpdVdfTu8iY_bhmkWf2Om5DdwRZj2ZgpGahtnshnHaRq0RyqF-m3Y7oNj6JL_YMwgxsFIIHtBlagBqDU-gZK99iqSOSGqVhvxqX6gCqFgE7vnEGHeeDedtRM53coAJuwzy8rQV9m3TewoofPdPasGv-dBLQZ3qgmnibkSgb7SmFpjXBy8zL3xJXOZhAHYlgcmcEoFVaWlBguIcWA87WZlpCLYcdYTJzSZweC3QLUhZ4RLJW84-LMKp6xWLLPrp3OgnsduB2G9PYMmYw_qCkuY1KGwfH4PvCQbAzQ",
fmt.Sprintf(`{"cert": "%s"}`, certPem),
false,
`{}`,
`{}`,
"",
},
{
"ps256-iss-ok", // enforce issuer
"eyJhbGciOiAiUFMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4In0.iCePYnD1U13oBe_6ylhmojmkY_VZNYXqVszAej8RImMGv51OEqARmYFkRZYTiYCiVFober7vcDq_stOj1uAJCuttygGW_dpHiN-3EWsU2E2vCnXlygWe0ud38pOC-OVyEFbXxO9-m51vnS-3VmBjEO8G1UE8bLFXTeFOGkUIj9dqlefJSWh5wa8XA3g9mj0jqpuJi-7QgEIeVHk-JzhGpoFqI2f-Df_agVvc2x4V-6fJmj7wV2IsaFPRi36mVQmg8S-dkxu4AlaeCILhyNZl8ewjBHHBjJFRwzcy88L00mzdO51ZxEYsBdQav3ux2sc6vjT9PvvjAwzcthQxEoEaNA",
fmt.Sprintf(`{"cert": "%s", "iss": "xxx"}`, certPemPs),
true,
`{"alg": "PS256", "typ": "JWT"}`,
`{"iss": "xxx"}`,
"",
},
{
"ps256-iss-wrong", // wrong issuer
"eyJhbGciOiAiUFMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4In0.iCePYnD1U13oBe_6ylhmojmkY_VZNYXqVszAej8RImMGv51OEqARmYFkRZYTiYCiVFober7vcDq_stOj1uAJCuttygGW_dpHiN-3EWsU2E2vCnXlygWe0ud38pOC-OVyEFbXxO9-m51vnS-3VmBjEO8G1UE8bLFXTeFOGkUIj9dqlefJSWh5wa8XA3g9mj0jqpuJi-7QgEIeVHk-JzhGpoFqI2f-Df_agVvc2x4V-6fJmj7wV2IsaFPRi36mVQmg8S-dkxu4AlaeCILhyNZl8ewjBHHBjJFRwzcy88L00mzdO51ZxEYsBdQav3ux2sc6vjT9PvvjAwzcthQxEoEaNA",
fmt.Sprintf(`{"cert": "%s", "iss": "yyy"}`, certPemPs),
false,
`{}`,
`{}`,
"",
},
{
"ps256-alg-ok", // constrained algorithm
"eyJhbGciOiAiUFMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4In0.iCePYnD1U13oBe_6ylhmojmkY_VZNYXqVszAej8RImMGv51OEqARmYFkRZYTiYCiVFober7vcDq_stOj1uAJCuttygGW_dpHiN-3EWsU2E2vCnXlygWe0ud38pOC-OVyEFbXxO9-m51vnS-3VmBjEO8G1UE8bLFXTeFOGkUIj9dqlefJSWh5wa8XA3g9mj0jqpuJi-7QgEIeVHk-JzhGpoFqI2f-Df_agVvc2x4V-6fJmj7wV2IsaFPRi36mVQmg8S-dkxu4AlaeCILhyNZl8ewjBHHBjJFRwzcy88L00mzdO51ZxEYsBdQav3ux2sc6vjT9PvvjAwzcthQxEoEaNA",
fmt.Sprintf(`{"cert": "%s", "alg": "PS256"}`, certPemPs),
true,
`{"alg": "PS256", "typ": "JWT"}`,
`{"iss": "xxx"}`,
"",
},
{
"ps256-alg-wrong", // constrained algorithm, and it's wrong
"eyJhbGciOiAiUFMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4In0.iCePYnD1U13oBe_6ylhmojmkY_VZNYXqVszAej8RImMGv51OEqARmYFkRZYTiYCiVFober7vcDq_stOj1uAJCuttygGW_dpHiN-3EWsU2E2vCnXlygWe0ud38pOC-OVyEFbXxO9-m51vnS-3VmBjEO8G1UE8bLFXTeFOGkUIj9dqlefJSWh5wa8XA3g9mj0jqpuJi-7QgEIeVHk-JzhGpoFqI2f-Df_agVvc2x4V-6fJmj7wV2IsaFPRi36mVQmg8S-dkxu4AlaeCILhyNZl8ewjBHHBjJFRwzcy88L00mzdO51ZxEYsBdQav3ux2sc6vjT9PvvjAwzcthQxEoEaNA",
fmt.Sprintf(`{"cert": "%s", "alg": "RS256"}`, certPemPs),
false,
`{}`,
`{}`,
"",
},
{
"rs256-exp-ok", // token expires, and it's still valid
"eyJhbGciOiAiUlMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4IiwgImV4cCI6IDMwMDB9.hqDP3AzshNhUZMI02U3nLPrj93QFrgs-74XFrF1Vry2bplrz-NKpdVdfTu8iY_bhmkWf2Om5DdwRZj2ZgpGahtnshnHaRq0RyqF-m3Y7oNj6JL_YMwgxsFIIHtBlagBqDU-gZK99iqSOSGqVhvxqX6gCqFgE7vnEGHeeDedtRM53coAJuwzy8rQV9m3TewoofPdPasGv-dBLQZ3qgmnibkSgb7SmFpjXBy8zL3xJXOZhAHYlgcmcEoFVaWlBguIcWA87WZlpCLYcdYTJzSZweC3QLUhZ4RLJW84-LMKp6xWLLPrp3OgnsduB2G9PYMmYw_qCkuY1KGwfH4PvCQbAzQ",
fmt.Sprintf(`{"cert": "%s", "time": 2000000000000}`, certPemPs),
true,
`{"alg": "RS256", "typ": "JWT"}`,
`{"iss": "xxx", "exp": 3000}`,
"",
},
{
"rs256-exp-expired", // token expires, and it's stale at a chosen time
"eyJhbGciOiAiUlMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4IiwgImV4cCI6IDMwMDB9.hqDP3AzshNhUZMI02U3nLPrj93QFrgs-74XFrF1Vry2bplrz-NKpdVdfTu8iY_bhmkWf2Om5DdwRZj2ZgpGahtnshnHaRq0RyqF-m3Y7oNj6JL_YMwgxsFIIHtBlagBqDU-gZK99iqSOSGqVhvxqX6gCqFgE7vnEGHeeDedtRM53coAJuwzy8rQV9m3TewoofPdPasGv-dBLQZ3qgmnibkSgb7SmFpjXBy8zL3xJXOZhAHYlgcmcEoFVaWlBguIcWA87WZlpCLYcdYTJzSZweC3QLUhZ4RLJW84-LMKp6xWLLPrp3OgnsduB2G9PYMmYw_qCkuY1KGwfH4PvCQbAzQ",
fmt.Sprintf(`{"cert": "%s", "time": 4000000000000}`, certPemPs),
false,
`{}`,
`{}`,
"",
},
{
"rs256-exp-now-expired", // token expires, and it's stale at the current implicitly specified real time
"eyJhbGciOiAiUlMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4IiwgImV4cCI6IDMwMDB9.hqDP3AzshNhUZMI02U3nLPrj93QFrgs-74XFrF1Vry2bplrz-NKpdVdfTu8iY_bhmkWf2Om5DdwRZj2ZgpGahtnshnHaRq0RyqF-m3Y7oNj6JL_YMwgxsFIIHtBlagBqDU-gZK99iqSOSGqVhvxqX6gCqFgE7vnEGHeeDedtRM53coAJuwzy8rQV9m3TewoofPdPasGv-dBLQZ3qgmnibkSgb7SmFpjXBy8zL3xJXOZhAHYlgcmcEoFVaWlBguIcWA87WZlpCLYcdYTJzSZweC3QLUhZ4RLJW84-LMKp6xWLLPrp3OgnsduB2G9PYMmYw_qCkuY1KGwfH4PvCQbAzQ",
fmt.Sprintf(`{"cert": "%s"}`, certPemPs),
false,
`{}`,
`{}`,
"",
},
{
"rs256-exp-now-explicit-expired", // token expires, and it's stale at the current explicitly specified real time
"eyJhbGciOiAiUlMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4IiwgImV4cCI6IDMwMDB9.hqDP3AzshNhUZMI02U3nLPrj93QFrgs-74XFrF1Vry2bplrz-NKpdVdfTu8iY_bhmkWf2Om5DdwRZj2ZgpGahtnshnHaRq0RyqF-m3Y7oNj6JL_YMwgxsFIIHtBlagBqDU-gZK99iqSOSGqVhvxqX6gCqFgE7vnEGHeeDedtRM53coAJuwzy8rQV9m3TewoofPdPasGv-dBLQZ3qgmnibkSgb7SmFpjXBy8zL3xJXOZhAHYlgcmcEoFVaWlBguIcWA87WZlpCLYcdYTJzSZweC3QLUhZ4RLJW84-LMKp6xWLLPrp3OgnsduB2G9PYMmYw_qCkuY1KGwfH4PvCQbAzQ",
fmt.Sprintf(`{"cert": "%s", "time": now}`, certPemPs),
false,
`{}`,
`{}`,
"",
},
{
"rs256-nbf-ok", // token has a commencement time, and it's commenced at a chosen time
"eyJhbGciOiAiUlMyNTYiLCAidHlwIjogIkpXVCJ9.eyJuYmYiOiAxMDAwLCAiaXNzIjogInh4eCJ9.cwwYDfJhU_ambPIpwBJwDek05miffoudprr41IAYsl0IKekb1ii2uEgwkNM-LJtVXHe9hsK3gANFyfqoJuCZIBvaNMx_3Z0BUdeBs4k1UwBiZCpuud0ofgHKURwvehNgqDvRfchq_-K_Agi2iRdl0oShgLjN-gVbBl8pRwUbQrvASlcsCpZIKUyOzXNtaIZEFh1z6ISDy8UHHOdoieKpN23swya7QAcEb0wXEEKMkkhiRd5QHgWLk37Lnw2K89mKcq4Om0CtV9nHrxxmpYGSMPojCy16Gjdg5-xKyJWvxCfb3YUBUVM4RWa7ICOPRJWPuHxu9pPYG63hb_qDU6NLsw",
fmt.Sprintf(`{"cert": "%s", "time": 2000000000000}`, certPemPs),
true,
`{"alg": "RS256", "typ": "JWT"}`,
`{"iss": "xxx", "nbf": 1000}`,
"",
},
{
"rs256-nbf-now-ok", // token has a commencement time, and it's commenced at the current implicitly specified time
"eyJhbGciOiAiUlMyNTYiLCAidHlwIjogIkpXVCJ9.eyJuYmYiOiAxMDAwLCAiaXNzIjogInh4eCJ9.cwwYDfJhU_ambPIpwBJwDek05miffoudprr41IAYsl0IKekb1ii2uEgwkNM-LJtVXHe9hsK3gANFyfqoJuCZIBvaNMx_3Z0BUdeBs4k1UwBiZCpuud0ofgHKURwvehNgqDvRfchq_-K_Agi2iRdl0oShgLjN-gVbBl8pRwUbQrvASlcsCpZIKUyOzXNtaIZEFh1z6ISDy8UHHOdoieKpN23swya7QAcEb0wXEEKMkkhiRd5QHgWLk37Lnw2K89mKcq4Om0CtV9nHrxxmpYGSMPojCy16Gjdg5-xKyJWvxCfb3YUBUVM4RWa7ICOPRJWPuHxu9pPYG63hb_qDU6NLsw",
fmt.Sprintf(`{"cert": "%s"}`, certPemPs),
true,
`{"alg": "RS256", "typ": "JWT"}`,
`{"iss": "xxx", "nbf": 1000}`,
"",
},
{
"rs256-nbf-toosoon", // token has a commencement time, and the chosen time is too early
"eyJhbGciOiAiUlMyNTYiLCAidHlwIjogIkpXVCJ9.eyJuYmYiOiAxMDAwLCAiaXNzIjogInh4eCJ9.cwwYDfJhU_ambPIpwBJwDek05miffoudprr41IAYsl0IKekb1ii2uEgwkNM-LJtVXHe9hsK3gANFyfqoJuCZIBvaNMx_3Z0BUdeBs4k1UwBiZCpuud0ofgHKURwvehNgqDvRfchq_-K_Agi2iRdl0oShgLjN-gVbBl8pRwUbQrvASlcsCpZIKUyOzXNtaIZEFh1z6ISDy8UHHOdoieKpN23swya7QAcEb0wXEEKMkkhiRd5QHgWLk37Lnw2K89mKcq4Om0CtV9nHrxxmpYGSMPojCy16Gjdg5-xKyJWvxCfb3YUBUVM4RWa7ICOPRJWPuHxu9pPYG63hb_qDU6NLsw",
fmt.Sprintf(`{"cert": "%s", "time": 500000000000}`, certPemPs),
false,
`{}`,
`{}`,
"",
},
{
"rs256-alg-missing", // alg is missing from the JOSE header
"eyJ0eXAiOiAiSldUIiwgImtpZCI6ICJrMSJ9.eyJpc3MiOiAieHh4IiwgInN1YiI6ICJmcmVkIn0.J4J4FgUD_P5fviVVjgvQWJDg-5XYTP_tHCwB3kSlYVKv8vmnZRNh4ke68OxfMP96iM-LZswG2fNqe-_piGIMepF5rCe1iIWAuz3qqkxfS9YVF3hvwoXhjJT0yIgrDMl1lfW5_XipNshZoxddWK3B7dnVW74MFazEEFuefiQm3PdMUX8jWGsmfgPnqBIZTizErNhoIMuRvYaVM1wA2nfrpVGONxMTaw8T0NRwYIuZwubbnNQ1yLhI0y3dsZvQ_lrh9Khtk9fS1V3SRh7aa9AvferJ4T-48qn_V1m3sINPgoA-uLGyyu3k_GkXRYW1yGNC-MH4T2cwhj89WITbIhusgQ",
fmt.Sprintf(`{"cert": "%s"}`, certPemPs),
false,
`{}`,
`{}`,
"",
},
{
"rs256-crit-junk", // the JOSE header contains an unrecognized critical parameter
"eyJjcml0IjogWyJqdW5rIl0sICJraWQiOiAiazEiLCAiYWxnIjogIlJTMjU2IiwgInR5cCI6ICJKV1QiLCAianVuayI6ICJ4eHgifQ.eyJpc3MiOiAieHh4IiwgInN1YiI6ICJmcmVkIn0.YfoUpW5CgDBtxtBuOix3cdYJGT8cX9Mq7wOhIbjDK7eRQUsAmMY_0EQPh7bd7Yi1gLI3e11BKzguf2EHqAa1kbkHWwFniBO-RIi8q42v2uxC4lpEpIjfaaXB5XmsLfAXtYRqh0AObvbSho6VDXBP_Kn81nhIiE2yFbH14_jhRMSxDBs5ToSkXV-XJHw5bONP8NxPqEk9KF3ZJGzN7J_KoD6LjqfYai5K0eLNEIZh4C1WjTdmCKMR4K6ieZRQWZiSsnhSqLSQERir4n22G3QsdY7dOnCp-SS4VYu3V-PfsOSFMvQ-TTAN1geqMZ9A7k1CCLW0wxKBs-KCiYzmRTzwxA",
fmt.Sprintf(`{"cert": "%s"}`, certPemPs),
false,
`{}`,
`{}`,
"",
},
{
"rsa256-nested", // one nesting level
"eyJhbGciOiAiUlMyNTYiLCAidHlwIjogIkpXVCIsICJjdHkiOiAiSldUIn0.ZXlKaGJHY2lPaUFpVWxNeU5UWWlMQ0FpZEhsd0lqb2dJa3BYVkNKOS5leUpwYzNNaU9pQWllSGg0SW4wLnJSUnJlUU9DYW9ZLW1Nazcyak5GZVk1YVlFUWhJZ0lFdFZkUTlYblltUUwyTHdfaDdNbkk0U0VPMVBwa0JIVEpyZnljbEplTHpfalJ2UGdJMlcxaDFCNGNaVDhDZ21pVXdxQXI5c0puZHlVQ1FtSWRrbm53WkI5cXAtX3BTdGRHWEo5WnAzeEo4NXotVEJpWlN0QUNUZFdlUklGSUU3VkxPa20tRmxZdzh5OTdnaUN4TmxUdWl3amxlTjMwZDhnWHUxNkZGQzJTSlhtRjZKbXYtNjJHbERhLW1CWFZ0bGJVSTVlWVUwaTdueTNyQjBYUVQxRkt4ZUZ3OF85N09FdV9jY3VLcl82ZHlHZVFHdnQ5Y3JJeEFBMWFZbDdmbVBrNkVhcjllTTNKaGVYMi00Wkx0d1FOY1RDT01YV0dIck1DaG5MWVc4WEFrTHJEbl9yRmxUaVMtZw.Xicc2sWCZ_Nithucsw9XD7YOKrirUdEnH3MyiPM-Ck3vEU2RsTBsfU2JPhfjp3phc0VOgsAXCzwU5PwyNyUo1490q8YSym-liMyO2Lk-hjH5fAxoizg9yD4II_lK6Wz_Tnpc0bBGDLdbuUhvgvO7yqo-leBQlsfRXOvw4VSPSEy8QPtbURtbnLpWY2jGBKz7vGI_o4qDJ3PicG0kyEiWZNh3wjeeCYRCWvXN8qh7Uk5EA-8J5vX651GqV-7gmaX1n-8DXamhaCQcE-p1cjSj04-X-_bJlQtmb-TT3bSyUPxgHVncvxNUby8jkUTzfi5MMbmIzWWkxI5YtJTdtmCkPQ",
fmt.Sprintf(`{"cert": "%s"}`, certPemPs),
true,
`{"alg": "RS256", "typ": "JWT"}`,
`{"iss": "xxx"}`,
"",
},
{
"rsa256-nested2", // two nesting levels
"eyJhbGciOiAiUlMyNTYiLCAidHlwIjogIkpXVCIsICJjdHkiOiAiSldUIn0.ZXlKaGJHY2lPaUFpVWxNeU5UWWlMQ0FpZEhsd0lqb2dJa3BYVkNJc0lDSmpkSGtpT2lBaVNsZFVJbjAuWlhsS2FHSkhZMmxQYVVGcFZXeE5lVTVVV1dsTVEwRnBaRWhzZDBscWIyZEphM0JZVmtOS09TNWxlVXB3WXpOTmFVOXBRV2xsU0dnMFNXNHdMbkpTVW5KbFVVOURZVzlaTFcxTmF6Y3lhazVHWlZrMVlWbEZVV2hKWjBsRmRGWmtVVGxZYmxsdFVVd3lUSGRmYURkTmJrazBVMFZQTVZCd2EwSklWRXB5Wm5samJFcGxUSHBmYWxKMlVHZEpNbGN4YURGQ05HTmFWRGhEWjIxcFZYZHhRWEk1YzBwdVpIbFZRMUZ0U1dScmJtNTNXa0k1Y1hBdFgzQlRkR1JIV0VvNVduQXplRW80TlhvdFZFSnBXbE4wUVVOVVpGZGxVa2xHU1VVM1ZreFBhMjB0Um14WmR6aDVPVGRuYVVONFRteFVkV2wzYW14bFRqTXdaRGhuV0hVeE5rWkdRekpUU2xodFJqWktiWFl0TmpKSGJFUmhMVzFDV0ZaMGJHSlZTVFZsV1ZVd2FUZHVlVE55UWpCWVVWUXhSa3Q0WlVaM09GODVOMDlGZFY5alkzVkxjbDgyWkhsSFpWRkhkblE1WTNKSmVFRkJNV0ZaYkRkbWJWQnJOa1ZoY2psbFRUTkthR1ZZTWkwMFdreDBkMUZPWTFSRFQwMVlWMGRJY2sxRGFHNU1XVmM0V0VGclRISkVibDl5Um14VWFWTXRady5YaWNjMnNXQ1pfTml0aHVjc3c5WEQ3WU9LcmlyVWRFbkgzTXlpUE0tQ2szdkVVMlJzVEJzZlUySlBoZmpwM3BoYzBWT2dzQVhDendVNVB3eU55VW8xNDkwcThZU3ltLWxpTXlPMkxrLWhqSDVmQXhvaXpnOXlENElJX2xLNld6X1RucGMwYkJHRExkYnVVaHZndk83eXFvLWxlQlFsc2ZSWE92dzRWU1BTRXk4UVB0YlVSdGJuTHBXWTJqR0JLejd2R0lfbzRxREozUGljRzBreUVpV1pOaDN3amVlQ1lSQ1d2WE44cWg3VWs1RUEtOEo1dlg2NTFHcVYtN2dtYVgxbi04RFhhbWhhQ1FjRS1wMWNqU2owNC1YLV9iSmxRdG1iLVRUM2JTeVVQeGdIVm5jdnhOVWJ5OGprVVR6Zmk1TU1ibUl6V1dreEk1WXRKVGR0bUNrUFE.ODBVH_gooCLJxtPVr1MjJC1syG4MnVUFP9LkI9pSaj0QABV4vpfqrBshHn8zOPgUTDeHwbc01Qy96cQlTMQQb94YANmZyL1nzwmdR4piiGXMGSlcCNfDg1o8DK4msMSR-X-j2IkxBDB8rfeFSfLRMgDCjAF0JolW7qWmMD9tBmFNYAjly4vMwToOXosDmFLl5eqyohXDf-3Ohljm5kIjtyMWkt5S9EVuwlIXh2owK5l59c4-TH29gkuaZ3uU4LFPjD7XKUrlOQnEMuu2QD8LAqTyxbnY4JyzUWEvyTM1dVmGnFpLKCg9QBly__y1u2ffhvDsHyuCmEKAbhPE98YvFA",
fmt.Sprintf(`{"cert": "%s"}`, certPemPs),
true,
`{"alg": "RS256", "typ": "JWT"}`,
`{"iss": "xxx"}`,
"",
},
{
"es256-unconstrained", // ECC key, no constraints
"eyJhbGciOiAiRVMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4In0.JvbTLBF06FR70gb7lCbx_ojhp4bk9--B_aULgNlYM0fYf9OSawaqBQp2lwW6FADFtRJ2WFUk5g0zwVOUlnrlzw",
fmt.Sprintf(`{"cert": "%s"}`, certPemEs256),
true,
`{"alg": "ES256", "typ": "JWT"}`,
`{"iss": "xxx"}`,
"",
},
{
"hs256-unconstrained", // HMAC key, no constraints
`eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyIjoiYWxpY2UiLCJhenAiOiJhbGljZSIsInN1Ym9yZGluYXRlcyI6W10sImhyIjpmYWxzZX0.rz3jTY033z-NrKfwrK89_dcLF7TN4gwCMj-fVBDyLoM`,
`{"secret": "secret"}`,
true,
`{"alg": "HS256", "typ": "JWT"}`,
`{"user": "alice", "azp": "alice", "subordinates": [], "hr": false}`,
"",
},
{
"hs256-key-wrong", // HMAC with wrong key
`eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJ1c2VyIjoiYWxpY2UiLCJhenAiOiJhbGljZSIsInN1Ym9yZGluYXRlcyI6W10sImhyIjpmYWxzZX0.rz3jTY033z-NrKfwrK89_dcLF7TN4gwCMj-fVBDyLoM`,
`{"secret": "the wrong key"}`,
false,
`{}`,
`{}`,
"",
},
{
"rs256-aud", // constraint requires an audience, found right one in JWT
"eyJhbGciOiAiUlMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4IiwgImF1ZCI6ICJmcmVkIn0.F-9m2Tx8r1tuQFirazsI4FK05bXX3uP4ut8M2FryJ07k3bQhy262fdwNDmuFcGx0NfL-c80agcwGoTzMWXkVEgZ2KTz0QSAdcdGk3ZWtUy-Mj2IilZ1dzkVvW8LsithYFTGcUtkelFDrJwtMQ0Kum7SXJpC_HCBk4PbftY0XD6jRgHLnQdeT9_J11L4sd19vCdpxxxm3_m_yvUV3ZynzB4vhQbS3CET4EClAVhi-m_gMh9mj85gY1ycIz6-FxWv8xM2Igm2SMeIdyJwAvEGnIauRS928P_OqVCZgCH2Pafnxtzy77Llpxy8XS0xu5PtPw3_azhg33GaXDCFsfz6GpA",
fmt.Sprintf(`{"cert": "%s", "aud": "fred"}`, certPemPs),
true,
`{"alg": "RS256", "typ": "JWT"}`,
`{"aud": "fred", "iss": "xxx"}`,
"",
},
{
"rs256-aud-list", // constraint requires an audience, found list including right one in JWT
"eyJhbGciOiAiUlMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4IiwgImF1ZCI6IFsiZnJlZCIsICJib2IiXX0.k8jW7PUiMkQCKCjnSFBFFKPDO0RXwZgVkLUwUfi8sMdrrcKi12LC8wd5fLBn0YraFtMXWKdMweKf9ZC-K33h5TK7kkTVKOXctF50mleMlUn0Up_XjtdP1v-2WOfivUXcexN1o-hu0kH7sSQnielXIjC2EAleG6A54YUOZFBdzvd1PKHlsxA7x2iiL73uGeFlyxoaMki8E5tx7FY6JGF1RdhWCoIV5A5J8QnwI5EetduJQ505U65Pk7UApWYWu4l2DT7KCCJa5dJaBvCBemVxWaBhCQWtJKU2ZgOEkpiK7b_HsdeRBmpG9Oi1o5mt5ybC09VxSD-lEda_iJO_7i042A",
fmt.Sprintf(`{"cert": "%s", "aud": "bob"}`, certPemPs),
true,
`{"alg": "RS256", "typ": "JWT"}`,
`{"aud": ["fred", "bob"], "iss": "xxx"}`,
"",
},
{
"ps256-no-aud", // constraint requires an audience, none in JWT
"eyJhbGciOiAiUFMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4In0.iCePYnD1U13oBe_6ylhmojmkY_VZNYXqVszAej8RImMGv51OEqARmYFkRZYTiYCiVFober7vcDq_stOj1uAJCuttygGW_dpHiN-3EWsU2E2vCnXlygWe0ud38pOC-OVyEFbXxO9-m51vnS-3VmBjEO8G1UE8bLFXTeFOGkUIj9dqlefJSWh5wa8XA3g9mj0jqpuJi-7QgEIeVHk-JzhGpoFqI2f-Df_agVvc2x4V-6fJmj7wV2IsaFPRi36mVQmg8S-dkxu4AlaeCILhyNZl8ewjBHHBjJFRwzcy88L00mzdO51ZxEYsBdQav3ux2sc6vjT9PvvjAwzcthQxEoEaNA",
fmt.Sprintf(`{"cert": "%s", "aud": "cath"}`, certPemPs),
false,
`{}`,
`{}`,
"",
},
{
"rs256-missing-aud", // constraint requires no audience, found one in JWT
"eyJhbGciOiAiUlMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4IiwgImF1ZCI6ICJmcmVkIn0.F-9m2Tx8r1tuQFirazsI4FK05bXX3uP4ut8M2FryJ07k3bQhy262fdwNDmuFcGx0NfL-c80agcwGoTzMWXkVEgZ2KTz0QSAdcdGk3ZWtUy-Mj2IilZ1dzkVvW8LsithYFTGcUtkelFDrJwtMQ0Kum7SXJpC_HCBk4PbftY0XD6jRgHLnQdeT9_J11L4sd19vCdpxxxm3_m_yvUV3ZynzB4vhQbS3CET4EClAVhi-m_gMh9mj85gY1ycIz6-FxWv8xM2Igm2SMeIdyJwAvEGnIauRS928P_OqVCZgCH2Pafnxtzy77Llpxy8XS0xu5PtPw3_azhg33GaXDCFsfz6GpA",
fmt.Sprintf(`{"cert": "%s"}`, certPemPs),
false,
`{}`,
`{}`,
"",
},
{
"rs256-wrong-aud", // constraint requires an audience, found wrong one in JWT
"eyJhbGciOiAiUlMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4IiwgImF1ZCI6ICJmcmVkIn0.F-9m2Tx8r1tuQFirazsI4FK05bXX3uP4ut8M2FryJ07k3bQhy262fdwNDmuFcGx0NfL-c80agcwGoTzMWXkVEgZ2KTz0QSAdcdGk3ZWtUy-Mj2IilZ1dzkVvW8LsithYFTGcUtkelFDrJwtMQ0Kum7SXJpC_HCBk4PbftY0XD6jRgHLnQdeT9_J11L4sd19vCdpxxxm3_m_yvUV3ZynzB4vhQbS3CET4EClAVhi-m_gMh9mj85gY1ycIz6-FxWv8xM2Igm2SMeIdyJwAvEGnIauRS928P_OqVCZgCH2Pafnxtzy77Llpxy8XS0xu5PtPw3_azhg33GaXDCFsfz6GpA",
fmt.Sprintf(`{"cert": "%s", "aud": "cath"}`, certPemPs),
false,
`{}`,
`{}`,
"",
},
{
"rs256-wrong-aud-list", // constraint requires an audience, found list of wrong ones in JWT
"eyJhbGciOiAiUlMyNTYiLCAidHlwIjogIkpXVCJ9.eyJpc3MiOiAieHh4IiwgImF1ZCI6IFsiZnJlZCIsICJib2IiXX0.k8jW7PUiMkQCKCjnSFBFFKPDO0RXwZgVkLUwUfi8sMdrrcKi12LC8wd5fLBn0YraFtMXWKdMweKf9ZC-K33h5TK7kkTVKOXctF50mleMlUn0Up_XjtdP1v-2WOfivUXcexN1o-hu0kH7sSQnielXIjC2EAleG6A54YUOZFBdzvd1PKHlsxA7x2iiL73uGeFlyxoaMki8E5tx7FY6JGF1RdhWCoIV5A5J8QnwI5EetduJQ505U65Pk7UApWYWu4l2DT7KCCJa5dJaBvCBemVxWaBhCQWtJKU2ZgOEkpiK7b_HsdeRBmpG9Oi1o5mt5ybC09VxSD-lEda_iJO_7i042A",
fmt.Sprintf(`{"cert": "%s", "aud": "cath"}`, certPemPs),
false,
`{}`,
`{}`,
"",
},
}
type test struct {
note string
rules []string
expected interface{}
}
tests := []test{}
for _, p := range params {
var exp interface{}
exp = fmt.Sprintf(`[%#v, %s, %s]`, p.valid, p.header, p.payload)
if p.err != "" {
exp = errors.New(p.err)
}
tests = append(tests, test{
p.note,
[]string{fmt.Sprintf(`p = [x, y, z] { time.now_ns(now); io.jwt.decode_verify("%s", %s, [x, y, z]) }`, p.token, p.constraints)},
exp,
})
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, tc.rules, tc.expected)
}
}
func TestTopDownTime(t *testing.T) {
data := loadSmallTestData()
runTopDownTestCase(t, data, "time caching", []string{`
p { time.now_ns(t0); test.sleep("10ms"); time.now_ns(t1); t1 = t2 }
`}, "true")
runTopDownTestCase(t, data, "parse nanos", []string{`
p = ns { time.parse_ns("2006-01-02T15:04:05Z07:00", "2017-06-02T19:00:00-07:00", ns) }
`}, "1496455200000000000")
runTopDownTestCase(t, data, "parse rfc3339 nanos", []string{`
p = ns { time.parse_rfc3339_ns("2017-06-02T19:00:00-07:00", ns) }
`}, "1496455200000000000")
runTopDownTestCase(t, data, "parse duration nanos", []string{`
p = ns { time.parse_duration_ns("100ms", ns) }
`}, "100000000")
runTopDownTestCase(t, data, "date", []string{`
p = [year, month, day] { [year, month, day] := time.date(1517832000*1000*1000*1000) }`}, "[2018, 2, 5]")
runTopDownTestCase(t, data, "date leap day", []string{`
p = [year, month, day] { [year, month, day] := time.date(1582977600*1000*1000*1000) }`}, "[2020, 2, 29]")
runTopDownTestCase(t, data, "date too big", []string{`
p = [year, month, day] { [year, month, day] := time.date(1582977600*1000*1000*1000*1000) }`}, fmt.Errorf("timestamp too big"))
runTopDownTestCase(t, data, "clock", []string{`
p = [hour, minute, second] { [hour, minute, second] := time.clock(1517832000*1000*1000*1000) }`}, "[12, 0, 0]")
runTopDownTestCase(t, data, "clock leap day", []string{`
p = [hour, minute, second] { [hour, minute, second] := time.clock(1582977600*1000*1000*1000) }`}, "[12, 0, 0]")
runTopDownTestCase(t, data, "clock too big", []string{`
p = [hour, minute, second] { [hour, minute, second] := time.clock(1582977600*1000*1000*1000*1000) }`}, fmt.Errorf("timestamp too big"))
for i, day := range []string{"Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"} {
ts := 1517832000*1000*1000*1000 + i*24*int(time.Hour)
runTopDownTestCase(t, data, "weekday", []string{fmt.Sprintf(`p = weekday { weekday := time.weekday(%d)}`, ts)},
fmt.Sprintf("%q", day))
}
runTopDownTestCase(t, data, "weekday too big", []string{`
p = weekday { weekday := time.weekday(1582977600*1000*1000*1000*1000) }`}, fmt.Errorf("timestamp too big"))
}
func TestTopDownWalkBuiltin(t *testing.T) {
tests := []struct {
note string
rules []string
expected interface{}
}{
{
note: "scalar",
rules: []string{
`p[x] { walk(data.a[0], x) }`,
},
expected: `[
[[], 1]
]`,
},
{
note: "arrays",
rules: []string{
`p[x] { walk(data.a, x) }`,
},
expected: `[
[[], [1,2,3,4]],
[[0], 1],
[[1], 2],
[[2], 3],
[[3], 4]
]`,
},
{
note: "objects",
rules: []string{
"p[x] { walk(data.b, x) }",
},
expected: `[
[[], {"v1": "hello", "v2": "goodbye"}],
[["v1"], "hello"],
[["v2"], "goodbye"]
]`,
},
{
note: "sets",
rules: []string{
"p[x] { walk(q, x) }",
`q = {{1,2,3}} { true }`,
},
expected: `[
[[], [[1,2,3]]],
[[[1,2,3]], [1,2,3]],
[[[1,2,3], 1], 1],
[[[1,2,3], 2], 2],
[[[1,2,3], 3], 3]
]`,
},
{
note: "match and filter",
rules: []string{
`p[[k,x]] { walk(q, [k, x]); contains(k[1], "oo") }`,
`q = [
{
"foo": 1,
"bar": 2,
"bazoo": 3,
}
] { true }`,
},
expected: `[[[0, "foo"], 1], [[0, "bazoo"], 3]]`,
},
{
note: "partially ground path",
rules: []string{
`p[[k1,k2,x]] {
walk(q, [["a", k1, "b", k2], x])
}`,
`q = {
"a": [
{
"b": {"foo": 1, "bar": 2},
},
{
"b": {"baz": 3, "qux": 4},
}
]
} { true }
`,
},
expected: `[[0, "foo", 1], [0, "bar", 2], [1, "baz", 3], [1, "qux", 4]]`,
},
}
data := loadSmallTestData()
for _, tc := range tests {
runTopDownTestCase(t, data, tc.note, tc.rules, tc.expected)
}
}
func TestTopDownEmbeddedVirtualDoc(t *testing.T) {
compiler := compileModules([]string{
`package b.c.d
import data.a
import data.g
p[x] { a[i] = x; q[x] }
q[x] { g[j][k] = x }`})
store := inmem.NewFromObject(loadSmallTestData())
assertTopDownWithPath(t, compiler, store, "deep embedded vdoc", []string{"b", "c", "d", "p"}, "{}", "[1, 2, 4]")
}
func TestTopDownInputValues(t *testing.T) {
compiler := compileModules([]string{
`package z
import data.a
import input.req1
import input.req2 as req2as
import input.req3.a.b
import input.req4.a.b as req4as
p = true { a[i] = x; req1.foo = x; req2as.bar = x; q[x] }
q[x] { req1.foo = x; req2as.bar = x; r[x] }
r[x] { {"foo": req2as.bar, "bar": [x]} = {"foo": x, "bar": [req1.foo]} }
s = true { b.x[0] = 1 }
t = true { req4as.x[0] = 1 }
u[x] { b[_] = x; x > 1 }
w = [[1, 2], [3, 4]] { true }
gt1 = true { req1 > 1 }
keys[x] = y { data.numbers[_] = x; to_number(x, y) }
loopback = input { true }`})
store := inmem.NewFromObject(loadSmallTestData())
assertTopDownWithPath(t, compiler, store, "loopback", []string{"z", "loopback"}, `{"foo": 1}`, `{"foo": 1}`)
assertTopDownWithPath(t, compiler, store, "loopback undefined", []string{"z", "loopback"}, ``, ``)
assertTopDownWithPath(t, compiler, store, "simple", []string{"z", "p"}, `{
"req1": {"foo": 4},
"req2": {"bar": 4}
}`, "true")
assertTopDownWithPath(t, compiler, store, "missing", []string{"z", "p"}, `{
"req1": {"foo": 4}
}`, "")
assertTopDownWithPath(t, compiler, store, "namespaced", []string{"z", "s"}, `{
"req3": {
"a": {
"b": {
"x": [1,2,3,4]
}
}
}
}`, "true")
assertTopDownWithPath(t, compiler, store, "namespaced with alias", []string{"z", "t"}, `{
"req4": {
"a": {
"b": {
"x": [1,2,3,4]
}
}
}
}`, "true")
}
func TestTopDownPartialDocConstants(t *testing.T) {
compiler := compileModules([]string{
`package ex
foo["bar"] = 0
foo["baz"] = 1
foo["*"] = [1, 2, 3] {
input.foo = 7
}
bar["x"]
bar["y"]
bar["*"] {
input.foo = 7
}
`})
store := inmem.NewFromObject(loadSmallTestData())
ctx := context.Background()
txn := storage.NewTransactionOrDie(ctx, store)
defer store.Abort(ctx, txn)
tests := []struct {
note string
path string
input string
expected string
}{
{
note: "obj-1",
path: "ex.foo.bar",
expected: "0",
},
{
note: "obj",
path: "ex.foo",
expected: `{"bar": 0, "baz": 1}`,
},
{
note: "obj-all",
path: "ex.foo",
input: `{"foo": 7}`,
expected: `{"bar": 0, "baz": 1, "*": [1,2,3]}`,
},
{
note: "set-1",
path: "ex.bar.x",
expected: `"x"`,
},
{
note: "set",
path: "ex.bar",
expected: `["x", "y"]`,
},
{
note: "set-all",
path: "ex.bar",
input: `{"foo": 7}`,
expected: `["x", "y", "*"]`,
},
}
for _, tc := range tests {
assertTopDownWithPath(t, compiler, store, tc.note, strings.Split(tc.path, "."), tc.input, tc.expected)
}
}
func TestTopDownFunctions(t *testing.T) {
modules := []string{`package ex
foo(x) = y {
split(x, "i", y)
}
bar[x] = y {
data.l[_].a = x
foo(x, y)
}
chain0(x) = y {
foo(x, y)
}
chain1(a) = b {
chain0(a, b)
}
chain2 = d {
chain1("fooibar", d)
}
cross(x) = [a, b] {
split(x, "i", y)
foo(y[1], b)
data.test.foo(y[2], a)
}
falsy_func(x) = false
falsy_func_else(x) = true { x = 1 } else = false { true }
falsy_undefined {
falsy_func(1)
}
falsy_negation {
not falsy_func(1)
}
falsy_else_value = falsy_func_else(2)
falsy_else_undefined {
falsy_func_else(2)
}
falsy_else_negation {
not falsy_func_else(2)
}
arrays([x, y]) = [a, b] {
foo(x, a)
foo(y, b)
}
arraysrule = y {
arrays(["hih", "foo"], y)
}
objects({"foo": x, "bar": y}) = z {
foo(x, a)
data.test.foo(y, b)
z = [a, b]
}
objectsrule = y {
objects({"foo": "hih", "bar": "hi ho"}, y)
}
refoutput = y {
foo("hih", z)
y = z[1]
}
void(x) {
x = "foo"
}
voidGood {
not void("bar", true)
}
voidBad {
void("bar", true)
}
multi(1, x) = y {
y = x
}
multi(2, x) = y {
a = 2*x
y = a+1
}
multi(3, x) = y {
y = x*10
}
multi("foo", x) = y {
y = "bar"
}
multi1 = y {
multi(1, 2, y)
}
multi2 = y {
multi(2, 2, y)
}
multi3 = y {
multi(3, 2, y)
}
multi4 = y {
multi("foo", 2, y)
}
always_true_fn(x)
always_true {
always_true_fn(1)
}
`,
`
package test
import data.ex
foo(x) = y {
trim(x, "h o", y)
}
cross = y {
ex.cross("hi, my name is foo", y)
}
multi("foo", x) = y {
y = x
}
multi("bar", x) = y {
y = "baz"
}
multi_cross_pkg = [y, z] {
multi("foo", "bar", y)
ex.multi(2, 1, z)
}`,
`
package test
samepkg = y {
foo("how do you do?", y)
}`,
`
package test.l1.l3
g(x) = x`,
`
package test.l1.l2
p = true
f(x) = x`,
`
package test.omit_result
f(x) = x
p { f(1) }
`,
}
compiler := compileModules(modules)
store := inmem.NewFromObject(loadSmallTestData())
ctx := context.Background()
txn := storage.NewTransactionOrDie(ctx, store)
defer store.Abort(ctx, txn)
assertTopDownWithPath(t, compiler, store, "basic call", []string{"ex", "bar", "alice"}, "", `["al", "ce"]`)
assertTopDownWithPath(t, compiler, store, "false result", []string{"ex", "falsy_undefined"}, "", ``)
assertTopDownWithPath(t, compiler, store, "false result negation", []string{"ex", "falsy_negation"}, "", `true`)
assertTopDownWithPath(t, compiler, store, "false else value", []string{"ex", "falsy_else_value"}, "", `false`)
assertTopDownWithPath(t, compiler, store, "false else undefined", []string{"ex", "falsy_else_undefined"}, "", ``)
assertTopDownWithPath(t, compiler, store, "false else negation", []string{"ex", "falsy_else_negation"}, "", `true`)
assertTopDownWithPath(t, compiler, store, "chained", []string{"ex", "chain2"}, "", `["foo", "bar"]`)
assertTopDownWithPath(t, compiler, store, "cross package", []string{"test", "cross"}, "", `["s f", [", my name "]]`)
assertTopDownWithPath(t, compiler, store, "array params", []string{"ex", "arraysrule"}, "", `[["h", "h"], ["foo"]]`)
assertTopDownWithPath(t, compiler, store, "object params", []string{"ex", "objectsrule"}, "", `[["h", "h"], "i"]`)
assertTopDownWithPath(t, compiler, store, "ref func output", []string{"ex", "refoutput"}, "", `"h"`)
assertTopDownWithPath(t, compiler, store, "always_true", []string{"ex.always_true"}, ``, `true`)
assertTopDownWithPath(t, compiler, store, "same package call", []string{"test", "samepkg"}, "", `"w do you do?"`)
assertTopDownWithPath(t, compiler, store, "void good", []string{"ex", "voidGood"}, "", `true`)
assertTopDownWithPath(t, compiler, store, "void bad", []string{"ex", "voidBad"}, "", "")
assertTopDownWithPath(t, compiler, store, "multi1", []string{"ex", "multi1"}, "", `2`)
assertTopDownWithPath(t, compiler, store, "multi2", []string{"ex", "multi2"}, "", `5`)
assertTopDownWithPath(t, compiler, store, "multi3", []string{"ex", "multi3"}, "", `20`)
assertTopDownWithPath(t, compiler, store, "multi4", []string{"ex", "multi4"}, "", `"bar"`)
assertTopDownWithPath(t, compiler, store, "multi cross package", []string{"test", "multi_cross_pkg"}, "", `["bar", 3]`)
assertTopDownWithPath(t, compiler, store, "skip-functions", []string{"test.l1"}, ``, `{"l2": {"p": true}, "l3": {}}`)
assertTopDownWithPath(t, compiler, store, "omit result", []string{"test.omit_result.p"}, ``, `true`)
}
func TestTopDownFunctionErrors(t *testing.T) {
compiler := compileModules([]string{
`
package test1
p(x) = y {
y = x[_]
}
r = y {
p([1, 2, 3], y)
}`,
`
package test2
p(1, x) = y {
y = x
}
p(2, x) = y {
y = x+1
}
r = y {
p(3, 0, y)
}`,
`
package test3
p(1, x) = y {
y = x
}
p(2, x) = y {
y = x+1
}
p(x, y) = z {
z = x
}
r = y {
p(1, 0, y)
}`,
})
store := inmem.NewFromObject(loadSmallTestData())
ctx := context.Background()
txn := storage.NewTransactionOrDie(ctx, store)
defer store.Abort(ctx, txn)
assertTopDownWithPath(t, compiler, store, "function output conflict single", []string{"test1", "r"}, "", functionConflictErr(nil))
assertTopDownWithPath(t, compiler, store, "function input no match", []string{"test2", "r"}, "", "")
assertTopDownWithPath(t, compiler, store, "function output conflict multiple", []string{"test3", "r"}, "", functionConflictErr(nil))
}
func TestTopDownWithKeyword(t *testing.T) {
compiler := compileModules([]string{
`package ex
loopback = input { true }
composite[x] { input.foo[_] = x; x > 2 }
vars = {"foo": input.foo, "bar": input.bar} { true }
input_eq { input.x = 1 }
`,
`package test
import data.ex
basic = true { ex.loopback = true with input as true; ex.loopback = false with input as false }
negation = true { not ex.loopback with input as false; ex.loopback with input as true }
composite[x] { ex.composite[x] with input.foo as [1, 2, 3, 4] }
vars = x { foo = "hello"; bar = "world"; x = ex.vars with input.foo as foo with input.bar as bar }
conflict = true { ex.loopback with input.foo as "x" with input.foo.bar as "y" }
negation_invalidate[x] { data.a[_] = x; not data.ex.input_eq with input.x as x }
`,
})
store := inmem.NewFromObject(loadSmallTestData())
assertTopDownWithPath(t, compiler, store, "with", []string{"test", "basic"}, "", "true")
assertTopDownWithPath(t, compiler, store, "with not", []string{"test", "negation"}, "", "true")
assertTopDownWithPath(t, compiler, store, "with composite", []string{"test", "composite"}, "", "[3,4]")
assertTopDownWithPath(t, compiler, store, "with vars", []string{"test", "vars"}, "", `{"foo": "hello", "bar": "world"}`)
assertTopDownWithPath(t, compiler, store, "with conflict", []string{"test", "conflict"}, "", fmt.Errorf("conflicting input documents"))
assertTopDownWithPath(t, compiler, store, "With invalidate", []string{"test", "negation_invalidate"}, "", "[2,3,4]")
}
func TestTopDownElseKeyword(t *testing.T) {
tests := []struct {
note string
path string
expected interface{}
}{
{"no-op", "ex.no_op", "true"},
{"trivial", "ex.bool", "true"},
{"trivial-non-bool", "ex.non_bool", "[100]"},
{"trivial-3", "ex.triple", `"hello"`},
{"var-head", "ex.vars", `["hello", "goodbye"]`},
{"ref-head", "ex.refs", `["hello", "goodbye"]`},
{"first-match", "ex.multiple_defined", `true`},
{"default-1", "ex.default_1", "2"},
{"default-2", "ex.default_2", "2"},
{"multiple-roots", "ex.multiple_roots", `2`},
{"indexed", "ex.indexed", "2"},
{"conflict-1", "ex.conflict_1", completeDocConflictErr(nil)},
{"conflict-2", "ex.conflict_2", completeDocConflictErr(nil)},
{"functions", "ex.fn_result", `["large", "small", "medium"]`},
}
for _, tc := range tests {
compiler := compileModules([]string{
`package ex
no_op { true } else = false { true }
bool { false } else { true }
non_bool = null { false } else = [100] { true }
triple { false } else { false } else = "hello" { true }
vars { false } else = ["hello", x] { data.b.v2 = x }
refs { false } else = ["hello", data.b.v2] { true }
multiple_defined = false { false } else = true { true } else = false { true }
default default_1 = 1
default_1 { false } default_1 = 2 { true }
default default_2 = 2
default_2 { false } default_2 = 1 { false }
multiple_roots {
false
} else = 1 {
false
} else = 2 {
true
} else = 3 {
true
}
multiple_roots = 2
multiple_roots = 3 {
false
} else = 2 {
true
}
indexed {
data.a[0] = 0
} else = 2 {
data.a[0] = 1
} else = 3 {
data.a[0] = 1
}
indexed {
data.a[0] = 1
data.a[2] = 2
} else {
false
} else = 2 {
data.a[0] = x
x = 1
data.a[2] = 3
}
conflict_1 { false } else { true }
conflict_1 = false { true }
conflict_2 { false } else = false { true }
conflict_2 { false } else = true { true }
fn_result = [x,y,z] { fn(101, true, x); fn(100, true, y); fn(100, false, z) }
fn(x, y) = "large" {
x > 100
} else = "small" {
y = true
} else = "medium" {
true
}
`,
})
store := inmem.NewFromObject(loadSmallTestData())
assertTopDownWithPath(t, compiler, store, tc.note, strings.Split(tc.path, "."), "", tc.expected)
}
}
func TestTopDownSystemDocument(t *testing.T) {
compiler := compileModules([]string{`
package system.somepolicy
foo = "hello"
`, `
package topdown.system
bar = "goodbye"
`})
data := map[string]interface{}{
"system": map[string]interface{}{
"somedata": []interface{}{"a", "b", "c"},
},
"com": map[string]interface{}{
"system": "deadbeef",
},
}
store := inmem.NewFromObject(data)
assertTopDownWithPath(t, compiler, store, "root query", []string{}, `{}`, `{
"topdown": {
"system": {
"bar": "goodbye"
}
},
"com": {
"system": "deadbeef"
}
}`)
}
func TestExample(t *testing.T) {
bd := `
{
"servers": [
{"id": "s1", "name": "app", "protocols": ["https", "ssh"], "ports": ["p1", "p2", "p3"]},
{"id": "s2", "name": "db", "protocols": ["mysql"], "ports": ["p3"]},
{"id": "s3", "name": "cache", "protocols": ["memcache", "http"], "ports": ["p3"]},
{"id": "s4", "name": "dev", "protocols": ["http"], "ports": ["p1", "p2"]}
],
"networks": [
{"id": "n1", "public": false},
{"id": "n2", "public": false},
{"id": "n3", "public": true}
],
"ports": [
{"id": "p1", "networks": ["n1"]},
{"id": "p2", "networks": ["n3"]},
{"id": "p3", "networks": ["n2"]}
]
}
`
vd := `package opa.example
import data.servers
import data.networks
import data.ports
public_servers[server] { server = servers[_]; server.ports[_] = ports[i].id; ports[i].networks[_] = networks[j].id; networks[j].public = true }
violations[server] { server = servers[_]; server.protocols[_] = "http"; public_servers[server] }`
var doc map[string]interface{}
if err := util.UnmarshalJSON([]byte(bd), &doc); err != nil {
panic(err)
}
compiler := compileModules([]string{vd})
store := inmem.NewFromObject(doc)
assertTopDownWithPath(t, compiler, store, "public servers", []string{"opa", "example", "public_servers"}, "{}", `
[
{"id": "s1", "name": "app", "protocols": ["https", "ssh"], "ports": ["p1", "p2", "p3"]},
{"id": "s4", "name": "dev", "protocols": ["http"], "ports": ["p1", "p2"]}
]
`)
assertTopDownWithPath(t, compiler, store, "violations", []string{"opa", "example", "violations"}, "{}", `
[
{"id": "s4", "name": "dev", "protocols": ["http"], "ports": ["p1", "p2"]}
]
`)
assertTopDownWithPath(t, compiler, store, "both", []string{"opa", "example"}, "{}", `
{
"public_servers": [
{"id": "s1", "name": "app", "protocols": ["https", "ssh"], "ports": ["p1", "p2", "p3"]},
{"id": "s4", "name": "dev", "protocols": ["http"], "ports": ["p1", "p2"]}
],
"violations": [
{"id": "s4", "name": "dev", "protocols": ["http"], "ports": ["p1", "p2"]}
]
}
`)
}
func TestTopDownUnsupportedBuiltin(t *testing.T) {
ast.RegisterBuiltin(&ast.Builtin{
Name: "unsupported_builtin",
})
body := ast.MustParseBody(`unsupported_builtin()`)
ctx := context.Background()
compiler := ast.NewCompiler()
store := inmem.New()
txn := storage.NewTransactionOrDie(ctx, store)
q := NewQuery(body).WithCompiler(compiler).WithStore(store).WithTransaction(txn)
_, err := q.Run(ctx)
expected := unsupportedBuiltinErr(body[0].Location)
if !reflect.DeepEqual(err, expected) {
t.Fatalf("Expected %v but got: %v", expected, err)
}
}
func TestTopDownQueryCancellation(t *testing.T) {
ctx := context.Background()
compiler := compileModules([]string{
`
package test
p { data.arr[_] = _; test.sleep("1ms") }
`,
})
data := map[string]interface{}{
"arr": make([]interface{}, 1000),
}
store := inmem.NewFromObject(data)
txn := storage.NewTransactionOrDie(ctx, store)
cancel := NewCancel()
query := NewQuery(ast.MustParseBody("data.test.p")).
WithCompiler(compiler).
WithStore(store).
WithTransaction(txn).
WithCancel(cancel)
go func() {
time.Sleep(time.Millisecond * 50)
cancel.Cancel()
}()
qrs, err := query.Run(ctx)
if err == nil || err.(*Error).Code != CancelErr {
t.Fatalf("Expected cancel error but got: %v (err: %v)", qrs, err)
}
}
type contextPropagationMock struct{}
// contextPropagationStore will accumulate values from the contexts provided to
// read calls so that the test can verify that contexts are being propagated as
// expected.
type contextPropagationStore struct {
storage.WritesNotSupported
storage.TriggersNotSupported
storage.PolicyNotSupported
storage.IndexingNotSupported
calls []interface{}
}
func (m *contextPropagationStore) NewTransaction(context.Context, ...storage.TransactionParams) (storage.Transaction, error) {
return nil, nil
}
func (m *contextPropagationStore) Commit(context.Context, storage.Transaction) error {
return nil
}
func (m *contextPropagationStore) Abort(context.Context, storage.Transaction) {
}
func (m *contextPropagationStore) Read(ctx context.Context, txn storage.Transaction, path storage.Path) (interface{}, error) {
val := ctx.Value(contextPropagationMock{})
m.calls = append(m.calls, val)
return nil, nil
}
func TestTopDownContextPropagation(t *testing.T) {
ctx := context.WithValue(context.Background(), contextPropagationMock{}, "bar")
compiler := ast.NewCompiler()
compiler.Compile(map[string]*ast.Module{
"mod1": ast.MustParseModule(`package ex
p[x] { data.a[i] = x }`,
),
})
mockStore := &contextPropagationStore{}
txn := storage.NewTransactionOrDie(ctx, mockStore)
query := NewQuery(ast.MustParseBody("data.ex.p")).
WithCompiler(compiler).
WithStore(mockStore).
WithTransaction(txn)
_, err := query.Run(ctx)
if err != nil {
t.Fatalf("Unexpected query error: %v", err)
}
expectedCalls := []interface{}{"bar"}
if !reflect.DeepEqual(expectedCalls, mockStore.calls) {
t.Fatalf("Expected %v but got: %v", expectedCalls, mockStore.calls)
}
}
func compileModules(input []string) *ast.Compiler {
mods := map[string]*ast.Module{}
for idx, i := range input {
id := fmt.Sprintf("testMod%d", idx)
mods[id] = ast.MustParseModule(i)
}
c := ast.NewCompiler()
if c.Compile(mods); c.Failed() {
panic(c.Errors)
}
return c
}
func compileRules(imports []string, input []string) (*ast.Compiler, error) {
p := ast.Ref{ast.DefaultRootDocument}
is := []*ast.Import{}
for _, i := range imports {
is = append(is, &ast.Import{
Path: ast.MustParseTerm(i),
})
}
m := &ast.Module{
Package: &ast.Package{
Path: p,
},
Imports: is,
}
rules := []*ast.Rule{}
for i := range input {
rules = append(rules, ast.MustParseRule(input[i]))
rules[i].Module = m
}
m.Rules = rules
for i := range rules {
rules[i].Module = m
}
c := ast.NewCompiler()
if c.Compile(map[string]*ast.Module{"testMod": m}); c.Failed() {
return nil, c.Errors
}
return c, nil
}
// loadSmallTestData returns base documents that are referenced
// throughout the topdown test suite.
//
// Avoid the following top-level keys: i, j, k, p, q, r, v, x, y, z.
// These are used for rule names, local variables, etc.
//
func loadSmallTestData() map[string]interface{} {
var data map[string]interface{}
err := util.UnmarshalJSON([]byte(`{
"a": [1,2,3,4],
"b": {
"v1": "hello",
"v2": "goodbye"
},
"c": [{
"x": [true, false, "foo"],
"y": [null, 3.14159],
"z": {"p": true, "q": false}
}],
"d": {
"e": ["bar", "baz"]
},
"f": [
{"xs": [1.0], "ys": [2.0]},
{"xs": [2.0], "ys": [3.0]}
],
"g": {
"a": [1, 0, 0, 0],
"b": [0, 2, 0, 0],
"c": [0, 0, 0, 4]
},
"h": [
[1,2,3],
[2,3,4]
],
"l": [
{
"a": "bob",
"b": -1,
"c": [1,2,3,4]
},
{
"a": "alice",
"b": 1,
"c": [2,3,4,5],
"d": null
}
],
"strings": {
"foo": 1,
"bar": 2,
"baz": 3
},
"three": 3,
"m": [],
"numbers": [
"1",
"2",
"3",
"4"
]
}`), &data)
if err != nil {
panic(err)
}
return data
}
func runTopDownTestCase(t *testing.T, data map[string]interface{}, note string, rules []string, expected interface{}) {
imports := []string{}
for k := range data {
imports = append(imports, "data."+k)
}
compiler, err := compileRules(imports, rules)
if err != nil {
t.Errorf("%v: Compiler error: %v", note, err)
return
}
store := inmem.NewFromObject(data)
assertTopDownWithPath(t, compiler, store, note, []string{"p"}, "", expected)
}
func assertTopDownWithPath(t *testing.T, compiler *ast.Compiler, store storage.Store, note string, path []string, input string, expected interface{}) {
var inputTerm *ast.Term
if len(input) > 0 {
inputTerm = ast.MustParseTerm(input)
}
ctx := context.Background()
txn := storage.NewTransactionOrDie(ctx, store)
defer store.Abort(ctx, txn)
var lhs *ast.Term
if len(path) == 0 {
lhs = ast.NewTerm(ast.DefaultRootRef)
} else {
lhs = ast.MustParseTerm("data." + strings.Join(path, "."))
}
rhs := ast.VarTerm(ast.WildcardPrefix + "result")
body := ast.NewBody(ast.Equality.Expr(lhs, rhs))
query := NewQuery(body).
WithCompiler(compiler).
WithStore(store).
WithTransaction(txn).
WithInput(inputTerm)
var tracer BufferTracer
if os.Getenv("OPA_TRACE_TEST") != "" {
query = query.WithTracer(&tracer)
}
testutil.Subtest(t, note, func(t *testing.T) {
switch e := expected.(type) {
case error:
result, err := query.Run(ctx)
if err == nil {
t.Errorf("Expected error but got: %v", result)
return
}
if !strings.Contains(err.Error(), e.Error()) {
t.Errorf("Expected error %v but got: %v", e, err)
}
case string:
qrs, err := query.Run(ctx)
if tracer != nil {
PrettyTrace(os.Stdout, tracer)
}
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
if len(e) == 0 {
if len(qrs) != 0 {
t.Fatalf("Expected undefined result but got: %v", qrs)
}
return
}
if len(qrs) == 0 {
t.Fatalf("Expected %v but got undefined", e)
}
result, err := ast.JSON(qrs[0][rhs.Value.(ast.Var)].Value)
if err != nil {
t.Fatal(err)
}
var requiresSort bool
if rules := compiler.GetRulesExact(lhs.Value.(ast.Ref)); len(rules) > 0 && rules[0].Head.DocKind() == ast.PartialSetDoc {
requiresSort = true
}
expected := util.MustUnmarshalJSON([]byte(e))
if requiresSort {
sort.Sort(resultSet(result.([]interface{})))
if sl, ok := expected.([]interface{}); ok {
sort.Sort(resultSet(sl))
}
}
if util.Compare(expected, result) != 0 {
t.Fatalf("Unexpected result:\nGot: %v\nExp:\n%v", result, expected)
}
// If the test case involved the input document, re-run it with partial
// evaluation enabled and input marked as unknown. Then replay the query and
// verify the partial evaluation result is the same. Note, we cannot evaluate
// the result of a query against `data` because the queries need to be
// converted into rules (which would result in recursion.)
if len(path) > 0 {
runTopDownPartialTestCase(ctx, t, compiler, store, txn, inputTerm, rhs, body, requiresSort, expected)
}
}
})
}
func runTopDownPartialTestCase(ctx context.Context, t *testing.T, compiler *ast.Compiler, store storage.Store, txn storage.Transaction, input *ast.Term, output *ast.Term, body ast.Body, requiresSort bool, expected interface{}) {
partialQuery := NewQuery(body).
WithCompiler(compiler).
WithStore(store).
WithUnknowns([]*ast.Term{ast.MustParseTerm("input")}).
WithTransaction(txn)
partials, support, err := partialQuery.PartialRun(ctx)
if err != nil {
t.Fatal("Unexpected error on partial evaluation comparison:", err)
}
module := ast.MustParseModule("package topdown_test_partial")
module.Rules = make([]*ast.Rule, len(partials))
for i, body := range partials {
module.Rules[i] = &ast.Rule{
Head: ast.NewHead(ast.Var("__result__"), nil, output),
Body: body,
Module: module,
}
}
compiler.Modules["topdown_test_partial"] = module
for i, module := range support {
compiler.Modules[fmt.Sprintf("topdown_test_support_%d", i)] = module
}
compiler.Compile(compiler.Modules)
if compiler.Failed() {
t.Fatal("Unexpected error on partial evaluation result compile:", compiler.Errors)
}
query := NewQuery(ast.MustParseBody("data.topdown_test_partial.__result__ = x")).
WithCompiler(compiler).
WithStore(store).
WithTransaction(txn).
WithInput(input)
qrs, err := query.Run(ctx)
if err != nil {
t.Fatal("Unexpected error on query after partial evaluation:", err)
}
if len(qrs) == 0 {
t.Fatalf("Expected %v but got undefined from query after partial evaluation", expected)
}
result, err := ast.JSON(qrs[0][ast.Var("x")].Value)
if err != nil {
t.Fatal(err)
}
if requiresSort {
sort.Sort(resultSet(result.([]interface{})))
if sl, ok := expected.([]interface{}); ok {
sort.Sort(resultSet(sl))
}
}
if util.Compare(expected, result) != 0 {
t.Fatalf("Unexpected result after partial evaluation:\nGot:\n%v\nExp:\n%v", result, expected)
}
}
type resultSet []interface{}
func (rs resultSet) Less(i, j int) bool {
return util.Compare(rs[i], rs[j]) < 0
}
func (rs resultSet) Swap(i, j int) {
tmp := rs[i]
rs[i] = rs[j]
rs[j] = tmp
}
func (rs resultSet) Len() int {
return len(rs)
}
func init() {
ast.RegisterBuiltin(&ast.Builtin{
Name: "test.sleep",
Decl: types.NewFunction(
types.Args(types.S),
types.NewNull(),
),
})
RegisterFunctionalBuiltin1("test.sleep", func(a ast.Value) (ast.Value, error) {
d, _ := time.ParseDuration(string(a.(ast.String)))
time.Sleep(d)
return ast.Null{}, nil
})
}
|
[
"\"OPA_TRACE_TEST\""
] |
[] |
[
"OPA_TRACE_TEST"
] |
[]
|
["OPA_TRACE_TEST"]
|
go
| 1 | 0 | |
modules/multi2vec-clip/module.go
|
// _ _
// __ _____ __ ___ ___ __ _| |_ ___
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
// \ V V / __/ (_| |\ V /| | (_| | || __/
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
//
// Copyright © 2016 - 2022 SeMI Technologies B.V. All rights reserved.
//
// CONTACT: [email protected]
//
package modclip
import (
"context"
"net/http"
"os"
"time"
"github.com/pkg/errors"
"github.com/semi-technologies/weaviate/entities/models"
"github.com/semi-technologies/weaviate/entities/modulecapabilities"
"github.com/semi-technologies/weaviate/entities/moduletools"
"github.com/semi-technologies/weaviate/modules/multi2vec-clip/clients"
"github.com/semi-technologies/weaviate/modules/multi2vec-clip/vectorizer"
"github.com/sirupsen/logrus"
)
func New() *ClipModule {
return &ClipModule{}
}
type ClipModule struct {
imageVectorizer imageVectorizer
nearImageGraphqlProvider modulecapabilities.GraphQLArguments
nearImageSearcher modulecapabilities.Searcher
textVectorizer textVectorizer
nearTextGraphqlProvider modulecapabilities.GraphQLArguments
nearTextSearcher modulecapabilities.Searcher
nearTextTransformer modulecapabilities.TextTransform
metaClient metaClient
}
type metaClient interface {
MetaInfo() (map[string]interface{}, error)
}
type imageVectorizer interface {
Object(ctx context.Context, object *models.Object,
settings vectorizer.ClassSettings) error
VectorizeImage(ctx context.Context, image string) ([]float32, error)
}
type textVectorizer interface {
Texts(ctx context.Context, input []string,
settings vectorizer.ClassSettings) ([]float32, error)
MoveTo(source, target []float32, weight float32) ([]float32, error)
MoveAwayFrom(source, target []float32, weight float32) ([]float32, error)
CombineVectors(vectors [][]float32) []float32
}
func (m *ClipModule) Name() string {
return "multi2vec-clip"
}
func (m *ClipModule) Init(ctx context.Context,
params moduletools.ModuleInitParams) error {
if err := m.initVectorizer(ctx, params.GetLogger()); err != nil {
return errors.Wrap(err, "init vectorizer")
}
if err := m.initNearImage(); err != nil {
return errors.Wrap(err, "init near text")
}
return nil
}
func (m *ClipModule) InitExtension(modules []modulecapabilities.Module) error {
for _, module := range modules {
if module.Name() == m.Name() {
continue
}
if arg, ok := module.(modulecapabilities.TextTransformers); ok {
if arg != nil && arg.TextTransformers() != nil {
m.nearTextTransformer = arg.TextTransformers()["nearText"]
}
}
}
if err := m.initNearText(); err != nil {
return errors.Wrap(err, "init near text")
}
return nil
}
func (m *ClipModule) initVectorizer(ctx context.Context,
logger logrus.FieldLogger) error {
// TODO: proper config management
uri := os.Getenv("CLIP_INFERENCE_API")
if uri == "" {
return errors.Errorf("required variable CLIP_INFERENCE_API is not set")
}
client := clients.New(uri, logger)
if err := client.WaitForStartup(ctx, 1*time.Second); err != nil {
return errors.Wrap(err, "init remote vectorizer")
}
m.imageVectorizer = vectorizer.New(client)
m.textVectorizer = vectorizer.New(client)
m.metaClient = client
return nil
}
func (m *ClipModule) RootHandler() http.Handler {
// TODO: remove once this is a capability interface
return nil
}
func (m *ClipModule) VectorizeObject(ctx context.Context,
obj *models.Object, cfg moduletools.ClassConfig) error {
icheck := vectorizer.NewClassSettings(cfg)
return m.imageVectorizer.Object(ctx, obj, icheck)
}
func (m *ClipModule) MetaInfo() (map[string]interface{}, error) {
return m.metaClient.MetaInfo()
}
// verify we implement the modules.Module interface
var (
_ = modulecapabilities.Module(New())
_ = modulecapabilities.Vectorizer(New())
)
|
[
"\"CLIP_INFERENCE_API\""
] |
[] |
[
"CLIP_INFERENCE_API"
] |
[]
|
["CLIP_INFERENCE_API"]
|
go
| 1 | 0 | |
cmd/influx/cli/cli.go
|
// Package cli contains the logic of the influx command line client.
package cli // import "github.com/adinesh10/influxdb/cmd/influx/cli"
import (
"bytes"
"encoding/csv"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"os/signal"
"path/filepath"
"sort"
"strconv"
"strings"
"syscall"
"text/tabwriter"
"golang.org/x/crypto/ssh/terminal"
"github.com/adinesh10/influxdb/client"
"github.com/adinesh10/influxdb/importer/v8"
"github.com/adinesh10/influxdb/influxql"
"github.com/adinesh10/influxdb/models"
"github.com/peterh/liner"
)
// ErrBlankCommand is returned when a parsed command is empty.
var ErrBlankCommand = errors.New("empty input")
// CommandLine holds CLI configuration and state.
type CommandLine struct {
Line *liner.State
Host string
Port int
Database string
Ssl bool
RetentionPolicy string
ClientVersion string
ServerVersion string
Pretty bool // controls pretty print for json
Format string // controls the output format. Valid values are json, csv, or column
Execute string
ShowVersion bool
Import bool
Chunked bool
Quit chan struct{}
IgnoreSignals bool // Ignore signals normally caught by this process (used primarily for testing)
ForceTTY bool // Force the CLI to act as if it were connected to a TTY
osSignals chan os.Signal
historyFilePath string
Client *client.Client
ClientConfig client.Config // Client config options.
ImporterConfig v8.Config // Importer configuration options.
}
// New returns an instance of CommandLine with the specified client version.
func New(version string) *CommandLine {
return &CommandLine{
ClientVersion: version,
Quit: make(chan struct{}, 1),
osSignals: make(chan os.Signal, 1),
}
}
// Run executes the CLI.
func (c *CommandLine) Run() error {
hasTTY := c.ForceTTY || terminal.IsTerminal(int(os.Stdin.Fd()))
var promptForPassword bool
// determine if they set the password flag but provided no value
for _, v := range os.Args {
v = strings.ToLower(v)
if (strings.HasPrefix(v, "-password") || strings.HasPrefix(v, "--password")) && c.ClientConfig.Password == "" {
promptForPassword = true
break
}
}
// Check if we will be able to prompt for the password later.
if promptForPassword && !hasTTY {
return errors.New("Unable to prompt for a password with no TTY.")
}
// Read environment variables for username/password.
if c.ClientConfig.Username == "" {
c.ClientConfig.Username = os.Getenv("INFLUX_USERNAME")
}
// If we are going to be prompted for a password, always use the entered password.
if promptForPassword {
// Open the liner (temporarily) and prompt for the password.
p, e := func() (string, error) {
l := liner.NewLiner()
defer l.Close()
return l.PasswordPrompt("password: ")
}()
if e != nil {
return errors.New("Unable to parse password")
}
c.ClientConfig.Password = p
} else if c.ClientConfig.Password == "" {
c.ClientConfig.Password = os.Getenv("INFLUX_PASSWORD")
}
if err := c.Connect(""); err != nil {
msg := "Please check your connection settings and ensure 'influxd' is running."
if !c.Ssl && strings.Contains(err.Error(), "malformed HTTP response") {
// Attempt to connect with SSL and disable secure SSL for this test.
c.Ssl = true
unsafeSsl := c.ClientConfig.UnsafeSsl
c.ClientConfig.UnsafeSsl = true
if err := c.Connect(""); err == nil {
msg = "Please use the -ssl flag to connect using SSL."
}
c.Ssl = false
c.ClientConfig.UnsafeSsl = unsafeSsl
} else if c.Ssl && !c.ClientConfig.UnsafeSsl && strings.Contains(err.Error(), "certificate is valid for") {
// Attempt to connect with an insecure connection just to see if it works.
c.ClientConfig.UnsafeSsl = true
if err := c.Connect(""); err == nil {
msg = "You may use -unsafeSsl to connect anyway, but the SSL connection will not be secure."
}
c.ClientConfig.UnsafeSsl = false
}
return fmt.Errorf("Failed to connect to %s: %s\n%s", c.Client.Addr(), err.Error(), msg)
}
// Modify precision.
c.SetPrecision(c.ClientConfig.Precision)
if c.Execute != "" {
// Make the non-interactive mode send everything through the CLI's parser
// the same way the interactive mode works
lines := strings.Split(c.Execute, "\n")
for _, line := range lines {
if err := c.ParseCommand(line); err != nil {
return err
}
}
return nil
}
if c.Import {
addr := net.JoinHostPort(c.Host, strconv.Itoa(c.Port))
u, e := client.ParseConnectionString(addr, c.Ssl)
if e != nil {
return e
}
// Copy the latest importer config and inject the latest client config
// into it.
config := c.ImporterConfig
config.Config = c.ClientConfig
config.URL = u
i := v8.NewImporter(config)
if err := i.Import(); err != nil {
err = fmt.Errorf("ERROR: %s\n", err)
return err
}
return nil
}
if !hasTTY {
cmd, err := ioutil.ReadAll(os.Stdin)
if err != nil {
return err
}
return c.ExecuteQuery(string(cmd))
}
if !c.IgnoreSignals {
// register OS signals for graceful termination
signal.Notify(c.osSignals, syscall.SIGINT, syscall.SIGTERM)
}
c.Line = liner.NewLiner()
defer c.Line.Close()
c.Line.SetMultiLineMode(true)
fmt.Printf("Connected to %s version %s\n", c.Client.Addr(), c.ServerVersion)
c.Version()
// Only load/write history if HOME environment variable is set.
if homeDir := os.Getenv("HOME"); homeDir != "" {
// Attempt to load the history file.
c.historyFilePath = filepath.Join(homeDir, ".influx_history")
if historyFile, err := os.Open(c.historyFilePath); err == nil {
c.Line.ReadHistory(historyFile)
historyFile.Close()
}
}
// read from prompt until exit is run
return c.mainLoop()
}
// mainLoop runs the main prompt loop for the CLI.
func (c *CommandLine) mainLoop() error {
for {
select {
case <-c.osSignals:
c.exit()
return nil
case <-c.Quit:
c.exit()
return nil
default:
l, e := c.Line.Prompt("> ")
if e == io.EOF {
// Instead of die, register that someone exited the program gracefully
l = "exit"
} else if e != nil {
c.exit()
return e
}
if err := c.ParseCommand(l); err != ErrBlankCommand && !strings.HasPrefix(strings.TrimSpace(l), "auth") {
c.Line.AppendHistory(l)
c.saveHistory()
}
}
}
}
// ParseCommand parses an instruction and calls the related method
// or executes the command as a query against InfluxDB.
func (c *CommandLine) ParseCommand(cmd string) error {
lcmd := strings.TrimSpace(strings.ToLower(cmd))
tokens := strings.Fields(lcmd)
if len(tokens) > 0 {
switch tokens[0] {
case "exit", "quit":
close(c.Quit)
case "gopher":
c.gopher()
case "connect":
return c.Connect(cmd)
case "auth":
c.SetAuth(cmd)
case "help":
c.help()
case "history":
c.history()
case "format":
c.SetFormat(cmd)
case "precision":
c.SetPrecision(cmd)
case "consistency":
c.SetWriteConsistency(cmd)
case "settings":
c.Settings()
case "pretty":
c.Pretty = !c.Pretty
if c.Pretty {
fmt.Println("Pretty print enabled")
} else {
fmt.Println("Pretty print disabled")
}
case "use":
c.use(cmd)
case "insert":
return c.Insert(cmd)
case "clear":
c.clear(cmd)
default:
return c.ExecuteQuery(cmd)
}
return nil
}
return ErrBlankCommand
}
// Connect connects to a server.
func (c *CommandLine) Connect(cmd string) error {
// Remove the "connect" keyword if it exists
addr := strings.TrimSpace(strings.Replace(cmd, "connect", "", -1))
if addr == "" {
// If they didn't provide a connection string, use the current settings
addr = net.JoinHostPort(c.Host, strconv.Itoa(c.Port))
}
URL, err := client.ParseConnectionString(addr, c.Ssl)
if err != nil {
return err
}
// Create copy of the current client config and create a new client.
ClientConfig := c.ClientConfig
ClientConfig.UserAgent = "InfluxDBShell/" + c.ClientVersion
ClientConfig.URL = URL
client, err := client.NewClient(ClientConfig)
if err != nil {
return fmt.Errorf("Could not create client %s", err)
}
c.Client = client
_, v, err := c.Client.Ping()
if err != nil {
return err
}
c.ServerVersion = v
// Update the command with the current connection information
if host, port, err := net.SplitHostPort(ClientConfig.URL.Host); err == nil {
c.Host = host
if i, err := strconv.Atoi(port); err == nil {
c.Port = i
}
}
return nil
}
// SetAuth sets client authentication credentials.
func (c *CommandLine) SetAuth(cmd string) {
// If they pass in the entire command, we should parse it
// auth <username> <password>
args := strings.Fields(cmd)
if len(args) == 3 {
args = args[1:]
} else {
args = []string{}
}
if len(args) == 2 {
c.ClientConfig.Username = args[0]
c.ClientConfig.Password = args[1]
} else {
u, e := c.Line.Prompt("username: ")
if e != nil {
fmt.Printf("Unable to process input: %s", e)
return
}
c.ClientConfig.Username = strings.TrimSpace(u)
p, e := c.Line.PasswordPrompt("password: ")
if e != nil {
fmt.Printf("Unable to process input: %s", e)
return
}
c.ClientConfig.Password = p
}
// Update the client as well
c.Client.SetAuth(c.ClientConfig.Username, c.ClientConfig.Password)
}
func (c *CommandLine) clear(cmd string) {
args := strings.Split(strings.TrimSuffix(strings.TrimSpace(cmd), ";"), " ")
v := strings.ToLower(strings.Join(args[1:], " "))
switch v {
case "database", "db":
c.Database = ""
fmt.Println("database context cleared")
return
case "retention policy", "rp":
c.RetentionPolicy = ""
fmt.Println("retention policy context cleared")
return
default:
if len(args) > 1 {
fmt.Printf("invalid command %q.\n", v)
}
fmt.Println(`Possible commands for 'clear' are:
# Clear the database context
clear database
clear db
# Clear the retention policy context
clear retention policy
clear rp
`)
}
}
func (c *CommandLine) use(cmd string) {
args := strings.Split(strings.TrimSuffix(strings.TrimSpace(cmd), ";"), " ")
if len(args) != 2 {
fmt.Printf("Could not parse database name from %q.\n", cmd)
return
}
stmt := args[1]
db, rp, err := parseDatabaseAndRetentionPolicy([]byte(stmt))
if err != nil {
fmt.Printf("Unable to parse database or retention policy from %s", stmt)
return
}
if !c.databaseExists(db) {
return
}
c.Database = db
fmt.Printf("Using database %s\n", db)
if rp != "" {
if !c.retentionPolicyExists(db, rp) {
return
}
c.RetentionPolicy = rp
fmt.Printf("Using retention policy %s\n", rp)
}
}
func (c *CommandLine) databaseExists(db string) bool {
// Validate if specified database exists
response, err := c.Client.Query(client.Query{Command: "SHOW DATABASES"})
if err != nil {
fmt.Printf("ERR: %s\n", err)
return false
} else if err := response.Error(); err != nil {
if c.ClientConfig.Username == "" {
fmt.Printf("ERR: %s\n", err)
return false
}
// TODO(jsternberg): Fix SHOW DATABASES to be user-aware #6397.
// If we are unable to run SHOW DATABASES, display a warning and use the
// database anyway in case the person doesn't have permission to run the
// command, but does have permission to use the database.
fmt.Printf("WARN: %s\n", err)
} else {
// Verify the provided database exists
if databaseExists := func() bool {
for _, result := range response.Results {
for _, row := range result.Series {
if row.Name == "databases" {
for _, values := range row.Values {
for _, database := range values {
if database == db {
return true
}
}
}
}
}
}
return false
}(); !databaseExists {
fmt.Printf("ERR: Database %s doesn't exist. Run SHOW DATABASES for a list of existing databases.\n", db)
return false
}
}
return true
}
func (c *CommandLine) retentionPolicyExists(db, rp string) bool {
// Validate if specified database exists
response, err := c.Client.Query(client.Query{Command: fmt.Sprintf("SHOW RETENTION POLICIES ON %q", db)})
if err != nil {
fmt.Printf("ERR: %s\n", err)
return false
} else if err := response.Error(); err != nil {
if c.ClientConfig.Username == "" {
fmt.Printf("ERR: %s\n", err)
return false
}
fmt.Printf("WARN: %s\n", err)
} else {
// Verify the provided database exists
if retentionPolicyExists := func() bool {
for _, result := range response.Results {
for _, row := range result.Series {
for _, values := range row.Values {
for i, v := range values {
if i != 0 {
continue
}
if v == rp {
return true
}
}
}
}
}
return false
}(); !retentionPolicyExists {
fmt.Printf("ERR: RETENTION POLICY %s doesn't exist. Run SHOW RETENTION POLICIES ON %q for a list of existing retention polices.\n", rp, db)
return false
}
}
return true
}
// SetPrecision sets client precision.
func (c *CommandLine) SetPrecision(cmd string) {
// normalize cmd
cmd = strings.ToLower(cmd)
// Remove the "precision" keyword if it exists
cmd = strings.TrimSpace(strings.Replace(cmd, "precision", "", -1))
switch cmd {
case "h", "m", "s", "ms", "u", "ns":
c.ClientConfig.Precision = cmd
c.Client.SetPrecision(c.ClientConfig.Precision)
case "rfc3339":
c.ClientConfig.Precision = ""
c.Client.SetPrecision(c.ClientConfig.Precision)
default:
fmt.Printf("Unknown precision %q. Please use rfc3339, h, m, s, ms, u or ns.\n", cmd)
}
}
// SetFormat sets output format.
func (c *CommandLine) SetFormat(cmd string) {
// Remove the "format" keyword if it exists
cmd = strings.TrimSpace(strings.Replace(cmd, "format", "", -1))
// normalize cmd
cmd = strings.ToLower(cmd)
switch cmd {
case "json", "csv", "column":
c.Format = cmd
default:
fmt.Printf("Unknown format %q. Please use json, csv, or column.\n", cmd)
}
}
// SetWriteConsistency sets write consistency level.
func (c *CommandLine) SetWriteConsistency(cmd string) {
// Remove the "consistency" keyword if it exists
cmd = strings.TrimSpace(strings.Replace(cmd, "consistency", "", -1))
// normalize cmd
cmd = strings.ToLower(cmd)
_, err := models.ParseConsistencyLevel(cmd)
if err != nil {
fmt.Printf("Unknown consistency level %q. Please use any, one, quorum, or all.\n", cmd)
return
}
c.ClientConfig.WriteConsistency = cmd
}
// isWhitespace returns true if the rune is a space, tab, or newline.
func isWhitespace(ch rune) bool { return ch == ' ' || ch == '\t' || ch == '\n' }
// isLetter returns true if the rune is a letter.
func isLetter(ch rune) bool { return (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') }
// isDigit returns true if the rune is a digit.
func isDigit(ch rune) bool { return (ch >= '0' && ch <= '9') }
// isIdentFirstChar returns true if the rune can be used as the first char in an unquoted identifer.
func isIdentFirstChar(ch rune) bool { return isLetter(ch) || ch == '_' }
// isIdentChar returns true if the rune can be used in an unquoted identifier.
func isNotIdentChar(ch rune) bool { return !(isLetter(ch) || isDigit(ch) || ch == '_') }
func parseUnquotedIdentifier(stmt string) (string, string) {
if fields := strings.FieldsFunc(stmt, isNotIdentChar); len(fields) > 0 {
return fields[0], strings.TrimPrefix(stmt, fields[0])
}
return "", stmt
}
func parseDoubleQuotedIdentifier(stmt string) (string, string) {
escapeNext := false
fields := strings.FieldsFunc(stmt, func(ch rune) bool {
if ch == '\\' {
escapeNext = true
} else if ch == '"' {
if !escapeNext {
return true
}
escapeNext = false
}
return false
})
if len(fields) > 0 {
return fields[0], strings.TrimPrefix(stmt, "\""+fields[0]+"\"")
}
return "", stmt
}
func parseNextIdentifier(stmt string) (ident, remainder string) {
if len(stmt) > 0 {
switch {
case isWhitespace(rune(stmt[0])):
return parseNextIdentifier(stmt[1:])
case isIdentFirstChar(rune(stmt[0])):
return parseUnquotedIdentifier(stmt)
case stmt[0] == '"':
return parseDoubleQuotedIdentifier(stmt)
}
}
return "", stmt
}
func (c *CommandLine) parseInto(stmt string) *client.BatchPoints {
ident, stmt := parseNextIdentifier(stmt)
db, rp := c.Database, c.RetentionPolicy
if strings.HasPrefix(stmt, ".") {
db = ident
ident, stmt = parseNextIdentifier(stmt[1:])
}
if strings.HasPrefix(stmt, " ") {
rp = ident
stmt = stmt[1:]
}
return &client.BatchPoints{
Points: []client.Point{
client.Point{Raw: stmt},
},
Database: db,
RetentionPolicy: rp,
Precision: c.ClientConfig.Precision,
WriteConsistency: c.ClientConfig.WriteConsistency,
}
}
func (c *CommandLine) parseInsert(stmt string) (*client.BatchPoints, error) {
i, point := parseNextIdentifier(stmt)
if !strings.EqualFold(i, "insert") {
return nil, fmt.Errorf("found %s, expected INSERT\n", i)
}
if i, r := parseNextIdentifier(point); strings.EqualFold(i, "into") {
bp := c.parseInto(r)
return bp, nil
}
return &client.BatchPoints{
Points: []client.Point{
client.Point{Raw: point},
},
Database: c.Database,
RetentionPolicy: c.RetentionPolicy,
Precision: c.ClientConfig.Precision,
WriteConsistency: c.ClientConfig.WriteConsistency,
}, nil
}
// Insert runs an INSERT statement.
func (c *CommandLine) Insert(stmt string) error {
bp, err := c.parseInsert(stmt)
if err != nil {
fmt.Printf("ERR: %s\n", err)
return nil
}
if _, err := c.Client.Write(*bp); err != nil {
fmt.Printf("ERR: %s\n", err)
if c.Database == "" {
fmt.Println("Note: error may be due to not setting a database or retention policy.")
fmt.Println(`Please set a database with the command "use <database>" or`)
fmt.Println("INSERT INTO <database>.<retention-policy> <point>")
}
}
return nil
}
// query creates a query struct to be used with the client.
func (c *CommandLine) query(query string) client.Query {
return client.Query{
Command: query,
Database: c.Database,
Chunked: true,
}
}
// ExecuteQuery runs any query statement.
func (c *CommandLine) ExecuteQuery(query string) error {
// If we have a retention policy, we need to rewrite the statement sources
if c.RetentionPolicy != "" {
pq, err := influxql.NewParser(strings.NewReader(query)).ParseQuery()
if err != nil {
fmt.Printf("ERR: %s\n", err)
return err
}
for _, stmt := range pq.Statements {
if selectStatement, ok := stmt.(*influxql.SelectStatement); ok {
influxql.WalkFunc(selectStatement.Sources, func(n influxql.Node) {
if t, ok := n.(*influxql.Measurement); ok {
if t.Database == "" && c.Database != "" {
t.Database = c.Database
}
if t.RetentionPolicy == "" && c.RetentionPolicy != "" {
t.RetentionPolicy = c.RetentionPolicy
}
}
})
}
}
query = pq.String()
}
response, err := c.Client.Query(c.query(query))
if err != nil {
fmt.Printf("ERR: %s\n", err)
return err
}
c.FormatResponse(response, os.Stdout)
if err := response.Error(); err != nil {
fmt.Printf("ERR: %s\n", response.Error())
if c.Database == "" {
fmt.Println("Warning: It is possible this error is due to not setting a database.")
fmt.Println(`Please set a database with the command "use <database>".`)
}
return err
}
return nil
}
// FormatResponse formats output to the previously chosen format.
func (c *CommandLine) FormatResponse(response *client.Response, w io.Writer) {
switch c.Format {
case "json":
c.writeJSON(response, w)
case "csv":
c.writeCSV(response, w)
case "column":
c.writeColumns(response, w)
default:
fmt.Fprintf(w, "Unknown output format %q.\n", c.Format)
}
}
func (c *CommandLine) writeJSON(response *client.Response, w io.Writer) {
var data []byte
var err error
if c.Pretty {
data, err = json.MarshalIndent(response, "", " ")
} else {
data, err = json.Marshal(response)
}
if err != nil {
fmt.Fprintf(w, "Unable to parse json: %s\n", err)
return
}
fmt.Fprintln(w, string(data))
}
func (c *CommandLine) writeCSV(response *client.Response, w io.Writer) {
csvw := csv.NewWriter(w)
for _, result := range response.Results {
// Create a tabbed writer for each result as they won't always line up
rows := c.formatResults(result, "\t")
for _, r := range rows {
csvw.Write(strings.Split(r, "\t"))
}
csvw.Flush()
}
}
func (c *CommandLine) writeColumns(response *client.Response, w io.Writer) {
// Create a tabbed writer for each result as they won't always line up
writer := new(tabwriter.Writer)
writer.Init(w, 0, 8, 1, ' ', 0)
for _, result := range response.Results {
// Print out all messages first
for _, m := range result.Messages {
fmt.Fprintf(w, "%s: %s.\n", m.Level, m.Text)
}
csv := c.formatResults(result, "\t")
for _, r := range csv {
fmt.Fprintln(writer, r)
}
writer.Flush()
}
}
// formatResults will behave differently if you are formatting for columns or csv
func (c *CommandLine) formatResults(result client.Result, separator string) []string {
rows := []string{}
// Create a tabbed writer for each result as they won't always line up
for i, row := range result.Series {
// gather tags
tags := []string{}
for k, v := range row.Tags {
tags = append(tags, fmt.Sprintf("%s=%s", k, v))
sort.Strings(tags)
}
columnNames := []string{}
// Only put name/tags in a column if format is csv
if c.Format == "csv" {
if len(tags) > 0 {
columnNames = append([]string{"tags"}, columnNames...)
}
if row.Name != "" {
columnNames = append([]string{"name"}, columnNames...)
}
}
columnNames = append(columnNames, row.Columns...)
// Output a line separator if we have more than one set or results and format is column
if i > 0 && c.Format == "column" {
rows = append(rows, "")
}
// If we are column format, we break out the name/tag to separate lines
if c.Format == "column" {
if row.Name != "" {
n := fmt.Sprintf("name: %s", row.Name)
rows = append(rows, n)
}
if len(tags) > 0 {
t := fmt.Sprintf("tags: %s", (strings.Join(tags, ", ")))
rows = append(rows, t)
}
}
rows = append(rows, strings.Join(columnNames, separator))
// if format is column, write dashes under each column
if c.Format == "column" {
lines := []string{}
for _, columnName := range columnNames {
lines = append(lines, strings.Repeat("-", len(columnName)))
}
rows = append(rows, strings.Join(lines, separator))
}
for _, v := range row.Values {
var values []string
if c.Format == "csv" {
if row.Name != "" {
values = append(values, row.Name)
}
if len(tags) > 0 {
values = append(values, strings.Join(tags, ","))
}
}
for _, vv := range v {
values = append(values, interfaceToString(vv))
}
rows = append(rows, strings.Join(values, separator))
}
// Output a line separator if in column format
if c.Format == "column" {
rows = append(rows, "")
}
}
return rows
}
func interfaceToString(v interface{}) string {
switch t := v.(type) {
case nil:
return ""
case bool:
return fmt.Sprintf("%v", v)
case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64, uintptr:
return fmt.Sprintf("%d", t)
case float32, float64:
return fmt.Sprintf("%v", t)
default:
return fmt.Sprintf("%v", t)
}
}
// Settings prints current settings.
func (c *CommandLine) Settings() {
w := new(tabwriter.Writer)
w.Init(os.Stdout, 0, 1, 1, ' ', 0)
fmt.Fprintln(w, "Setting\tValue")
fmt.Fprintln(w, "--------\t--------")
if c.Port > 0 {
fmt.Fprintf(w, "Host\t%s:%d\n", c.Host, c.Port)
} else {
fmt.Fprintf(w, "Host\t%s\n", c.Host)
}
fmt.Fprintf(w, "Username\t%s\n", c.ClientConfig.Username)
fmt.Fprintf(w, "Database\t%s\n", c.Database)
fmt.Fprintf(w, "RetentionPolicy\t%s\n", c.RetentionPolicy)
fmt.Fprintf(w, "Pretty\t%v\n", c.Pretty)
fmt.Fprintf(w, "Format\t%s\n", c.Format)
fmt.Fprintf(w, "Write Consistency\t%s\n", c.ClientConfig.WriteConsistency)
fmt.Fprintln(w)
w.Flush()
}
func (c *CommandLine) help() {
fmt.Println(`Usage:
connect <host:port> connects to another node specified by host:port
auth prompts for username and password
pretty toggles pretty print for the json format
use <db_name> sets current database
format <format> specifies the format of the server responses: json, csv, or column
precision <format> specifies the format of the timestamp: rfc3339, h, m, s, ms, u or ns
consistency <level> sets write consistency level: any, one, quorum, or all
history displays command history
settings outputs the current settings for the shell
clear clears settings such as database or retention policy. run 'clear' for help
exit/quit/ctrl+d quits the influx shell
show databases show database names
show series show series information
show measurements show measurement information
show tag keys show tag key information
show field keys show field key information
A full list of influxql commands can be found at:
https://docs.influxdata.com/influxdb/latest/query_language/spec/
`)
}
func (c *CommandLine) history() {
var buf bytes.Buffer
c.Line.WriteHistory(&buf)
fmt.Print(buf.String())
}
func (c *CommandLine) saveHistory() {
if historyFile, err := os.Create(c.historyFilePath); err != nil {
fmt.Printf("There was an error writing history file: %s\n", err)
} else {
c.Line.WriteHistory(historyFile)
historyFile.Close()
}
}
func (c *CommandLine) gopher() {
fmt.Println(`
.-::-::://:-::- .:/++/'
'://:-''/oo+//++o+/.://o- ./+:
.:-. '++- .o/ '+yydhy' o-
.:/. .h: :osoys .smMN- :/
-/:.' s- /MMMymh. '/y/ s'
-+s:'''' d -mMMms// '-/o:
-/++/++/////:. o: '... s- :s.
:+-+s-' ':/' 's- /+ 'o:
'+-'o: /ydhsh. '//. '-o- o-
.y. o: .MMMdm+y ':+++:::/+:.' s:
.-h/ y- 'sdmds'h -+ydds:::-.' 'h.
.//-.d' o: '.' 'dsNMMMNh:.:++' :y
+y. 'd 's. .s:mddds: ++ o/
'N- odd 'o/. './o-s-' .---+++' o-
'N' yNd .://:/:::::. -s -+/s/./s' 'o/'
so' .h '''' ////s: '+. .s +y'
os/-.y' 's' 'y::+ +d'
'.:o/ -+:-:.' so.---.'
o' 'd-.''/s'
.s' :y.''.y
-s mo:::'
:: yh
// '''' /M'
o+ .s///:/. 'N:
:+ /: -s' ho
's- -/s/:+/.+h' +h
ys' ':' '-. -d
oh .h
/o .s
s. .h
-y .d
m/ -h
+d /o
'N- y:
h: m.
s- -d
o- s+
+- 'm'
s/ oo--.
y- /s ':+'
s' 'od--' .d:
-+ ':o: ':+-/+
y- .:+- '
//o- '.:+/.
.-:+/' ''-/+/.
./:' ''.:o+/-'
.+o:/:/+-' ''.-+ooo/-'
o: -h///++////-.
/: .o/
//+ 'y
./sooy.
`)
}
// Version prints the CLI version.
func (c *CommandLine) Version() {
fmt.Println("InfluxDB shell version:", c.ClientVersion)
}
func (c *CommandLine) exit() {
// write to history file
c.saveHistory()
// release line resources
c.Line.Close()
c.Line = nil
}
|
[
"\"INFLUX_USERNAME\"",
"\"INFLUX_PASSWORD\"",
"\"HOME\""
] |
[] |
[
"INFLUX_USERNAME",
"HOME",
"INFLUX_PASSWORD"
] |
[]
|
["INFLUX_USERNAME", "HOME", "INFLUX_PASSWORD"]
|
go
| 3 | 0 | |
agent-launcher/src/main/java/com/thoughtworks/go/agent/launcher/AgentLauncherImpl.java
|
/*
* Copyright 2019 ThoughtWorks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.thoughtworks.go.agent.launcher;
import com.thoughtworks.cruise.agent.common.launcher.AgentLaunchDescriptor;
import com.thoughtworks.cruise.agent.common.launcher.AgentLauncher;
import com.thoughtworks.go.CurrentGoCDVersion;
import com.thoughtworks.go.agent.ServerUrlGenerator;
import com.thoughtworks.go.agent.common.AgentBootstrapperArgs;
import com.thoughtworks.go.agent.common.UrlConstructor;
import com.thoughtworks.go.agent.common.launcher.AgentProcessParent;
import com.thoughtworks.go.agent.common.util.Downloader;
import com.thoughtworks.go.agent.common.util.JarUtil;
import com.thoughtworks.go.logging.LogConfigurator;
import com.thoughtworks.go.util.FileUtil;
import com.thoughtworks.go.util.SslVerificationMode;
import org.apache.commons.io.FileUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.IOException;
import java.math.BigInteger;
import java.net.URLClassLoader;
import java.security.SecureRandom;
import java.util.Map;
import java.util.function.Predicate;
import java.util.jar.JarEntry;
public class AgentLauncherImpl implements AgentLauncher {
public static final int UNKNOWN_EXCEPTION_OCCURRED = -273;
/* 50-60 for launcher error codes*/
public static final int LAUNCHER_NOT_UP_TO_DATE = 60;
public static final String GO_AGENT_BOOTSTRAP_CLASS = "Go-Agent-Bootstrap-Class";
public static final String AGENT_BOOTSTRAPPER_LOCK_FILE = ".agent-bootstrapper.running";
private Lockfile lockFile = new Lockfile(new File(AGENT_BOOTSTRAPPER_LOCK_FILE));
private static final Logger LOG = LoggerFactory.getLogger(AgentLauncherImpl.class);
private final AgentProcessParentRunner agentProcessParentRunner;
public AgentLauncherImpl() {
this(new AgentJarBasedAgentParentRunner());
}
public AgentLauncherImpl(AgentProcessParentRunner agentProcessParentCreator) {
this.agentProcessParentRunner = agentProcessParentCreator;
}
@Override
public int launch(AgentLaunchDescriptor descriptor) {
LogConfigurator logConfigurator = new LogConfigurator("agent-launcher-logback.xml");
return logConfigurator.runWithLogger(() -> doLaunch(descriptor));
}
private Integer doLaunch(AgentLaunchDescriptor descriptor) {
Thread shutdownHook = null;
try {
int returnValue;
if (!lockFile.tryLock()) {
return IRRECOVERABLE_ERROR;
}
shutdownHook = registerShutdownHook();
Map context = descriptor.context();
AgentBootstrapperArgs bootstrapperArgs = AgentBootstrapperArgs.fromProperties(context);
ServerUrlGenerator urlGenerator = new UrlConstructor(bootstrapperArgs.getServerUrl().toExternalForm());
try {
File rootCertFile = bootstrapperArgs.getRootCertFile();
} catch (FileNotFoundException e) {
return;
}
File rootCertFile = bootstrapperArgs.getRootCertFile();
SslVerificationMode sslVerificationMode = SslVerificationMode.valueOf(bootstrapperArgs.getSslMode().name());
ServerBinaryDownloader launcherDownloader = new ServerBinaryDownloader(urlGenerator, rootCertFile, sslVerificationMode);
if (launcherDownloader.downloadIfNecessary(DownloadableFile.LAUNCHER)) {
return LAUNCHER_NOT_UP_TO_DATE;
}
ServerBinaryDownloader agentDownloader = new ServerBinaryDownloader(urlGenerator, rootCertFile, sslVerificationMode);
agentDownloader.downloadIfNecessary(DownloadableFile.AGENT);
returnValue = agentProcessParentRunner.run(getLauncherVersion(), launcherDownloader.getMd5(), urlGenerator, System.getenv(), context);
try {
// Sleep a bit so that if there are problems we don't spin
Thread.sleep(1000);
} catch (InterruptedException e) {
return returnValue;
}
return returnValue;
} catch (Exception e) {
LOG.error("Launch encountered an unknown exception", e);
return UNKNOWN_EXCEPTION_OCCURRED;
} finally {
removeShutDownHook(shutdownHook);
lockFile.delete();
}
}
private void removeShutDownHook(Thread shutdownHook) {
if (shutdownHook != null) {
try {
Runtime.getRuntime().removeShutdownHook(shutdownHook);
} catch (Exception e) {
}
}
}
private Thread registerShutdownHook() {
Thread shutdownHook = new Thread(() -> lockFile.delete());
Runtime.getRuntime().addShutdownHook(shutdownHook);
return shutdownHook;
}
private String getLauncherVersion() {
return CurrentGoCDVersion.getInstance().fullVersion();
}
public static interface AgentProcessParentRunner {
int run(String launcherVersion, String launcherMd5, ServerUrlGenerator urlGenerator, Map<String, String> environmentVariables, Map context);
}
private static class AgentJarBasedAgentParentRunner implements AgentProcessParentRunner {
@Override
public int run(String launcherVersion, String launcherMd5, ServerUrlGenerator urlGenerator, Map<String, String> environmentVariables, Map context) {
String agentProcessParentClassName = JarUtil.getManifestKey(Downloader.AGENT_BINARY_JAR, GO_AGENT_BOOTSTRAP_CLASS);
String tempDirSuffix = new BigInteger(64, new SecureRandom()).toString(16) + "-" + Downloader.AGENT_BINARY_JAR;
File tempDir = new File(FileUtil.TMP_PARENT_DIR, "deps-" + tempDirSuffix);
try {
try (URLClassLoader urlClassLoader = JarUtil.getClassLoaderFromJar(Downloader.AGENT_BINARY_JAR, jarEntryFilter(), tempDir, this.getClass().getClassLoader())) {
Class<?> aClass = urlClassLoader.loadClass(agentProcessParentClassName);
AgentProcessParent agentProcessParent = (AgentProcessParent) aClass.getDeclaredConstructor().newInstance();
return agentProcessParent.run(launcherVersion, launcherMd5, urlGenerator, environmentVariables, context);
} catch (ReflectiveOperationException | IOException e) {
throw new RuntimeException(e);
}
} finally {
FileUtils.deleteQuietly(tempDir);
}
}
private Predicate<JarEntry> jarEntryFilter() {
return jarEntry -> jarEntry.getName().startsWith("lib/") && jarEntry.getName().endsWith(".jar");
}
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
orc8r/cloud/go/services/configurator/storage/sql.go
|
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
package storage
import (
"context"
"database/sql"
"fmt"
"os"
"sort"
"magma/orc8r/cloud/go/sqorc"
"magma/orc8r/cloud/go/storage"
sq "github.com/Masterminds/squirrel"
"github.com/pkg/errors"
"github.com/thoas/go-funk"
)
const (
networksTable = "cfg_networks"
networkConfigTable = "cfg_network_configs"
entityTable = "cfg_entities"
entityAssocTable = "cfg_assocs"
entityAclTable = "cfg_acls"
)
const (
nwIDCol = "id"
nwTypeCol = "type"
nwNameCol = "name"
nwDescCol = "description"
nwVerCol = "version"
nwcIDCol = "network_id"
nwcTypeCol = "type"
nwcValCol = "value"
entPkCol = "pk"
entNidCol = "network_id"
entTypeCol = "type"
entKeyCol = "\"key\""
entGidCol = "graph_id"
entNameCol = "name"
entDescCol = "description"
entPidCol = "physical_id"
entConfCol = "config"
entVerCol = "version"
aFrCol = "from_pk"
aToCol = "to_pk"
aclIdCol = "id"
aclEntCol = "entity_pk"
aclScopeCol = "scope"
aclPermCol = "permission"
aclTypeCol = "type"
aclIdFilterCol = "id_filter"
aclVerCol = "version"
)
// NewSQLConfiguratorStorageFactory returns a ConfiguratorStorageFactory
// implementation backed by a SQL database.
func NewSQLConfiguratorStorageFactory(db *sql.DB, generator storage.IDGenerator, sqlBuilder sqorc.StatementBuilder) ConfiguratorStorageFactory {
return &sqlConfiguratorStorageFactory{db: db, idGenerator: generator, builder: sqlBuilder}
}
type sqlConfiguratorStorageFactory struct {
db *sql.DB
idGenerator storage.IDGenerator
builder sqorc.StatementBuilder
}
func (fact *sqlConfiguratorStorageFactory) InitializeServiceStorage() (err error) {
tx, err := fact.db.BeginTx(context.Background(), &sql.TxOptions{
Isolation: sql.LevelSerializable,
})
if err != nil {
return
}
defer func() {
if err == nil {
err = tx.Commit()
} else {
rollbackErr := tx.Rollback()
if rollbackErr != nil {
err = fmt.Errorf("%s; rollback error: %s", err, rollbackErr)
}
}
}()
// Named return values below so we can automatically decide tx commit/
// rollback in deferred function
_, err = fact.builder.CreateTable(networksTable).
IfNotExists().
Column(nwIDCol).Type(sqorc.ColumnTypeText).PrimaryKey().EndColumn().
Column(nwTypeCol).Type(sqorc.ColumnTypeText).EndColumn().
Column(nwNameCol).Type(sqorc.ColumnTypeText).EndColumn().
Column(nwDescCol).Type(sqorc.ColumnTypeText).EndColumn().
Column(nwVerCol).Type(sqorc.ColumnTypeInt).NotNull().Default(0).EndColumn().
RunWith(tx).
Exec()
if err != nil {
err = errors.Wrap(err, "failed to create networks table")
return
}
// Adding a type column if it doesn't exist already. This will ensure network
// tables that are already created will also have the type column.
// TODO Remove after 1-2 months to ensure service isn't disrupted
_, err = tx.Exec(fmt.Sprintf("ALTER TABLE %s ADD COLUMN IF NOT EXISTS %s text", networksTable, nwTypeCol))
// special case sqlite3 because ADD COLUMN IF NOT EXISTS is not supported
// and we only run sqlite3 for unit tests
if err != nil && os.Getenv("SQL_DRIVER") != "sqlite3" {
err = errors.Wrap(err, "failed to add 'type' field to networks table")
}
_, err = fact.builder.CreateIndex("type_idx").
IfNotExists().
On(networksTable).
Columns(nwTypeCol).
RunWith(tx).
Exec()
if err != nil {
err = errors.Wrap(err, "failed to create network type index")
return
}
_, err = fact.builder.CreateTable(networkConfigTable).
IfNotExists().
Column(nwcIDCol).Type(sqorc.ColumnTypeText).EndColumn().
Column(nwcTypeCol).Type(sqorc.ColumnTypeText).NotNull().EndColumn().
Column(nwcValCol).Type(sqorc.ColumnTypeBytes).EndColumn().
PrimaryKey(nwcIDCol, nwcTypeCol).
ForeignKey(networksTable, map[string]string{nwcIDCol: nwIDCol}, sqorc.ColumnOnDeleteCascade).
RunWith(tx).
Exec()
if err != nil {
err = errors.Wrap(err, "failed to create network configs table")
return
}
// Create an internal-only primary key (UUID) for entities.
// This keeps index size in control and supporting table schemas simpler.
_, err = fact.builder.CreateTable(entityTable).
IfNotExists().
Column(entPkCol).Type(sqorc.ColumnTypeText).PrimaryKey().EndColumn().
Column(entNidCol).Type(sqorc.ColumnTypeText).EndColumn().
Column(entTypeCol).Type(sqorc.ColumnTypeText).NotNull().EndColumn().
Column(entKeyCol).Type(sqorc.ColumnTypeText).NotNull().EndColumn().
Column(entGidCol).Type(sqorc.ColumnTypeText).NotNull().EndColumn().
Column(entNameCol).Type(sqorc.ColumnTypeText).EndColumn().
Column(entDescCol).Type(sqorc.ColumnTypeText).EndColumn().
Column(entPidCol).Type(sqorc.ColumnTypeText).EndColumn().
Column(entConfCol).Type(sqorc.ColumnTypeBytes).EndColumn().
Column(entVerCol).Type(sqorc.ColumnTypeInt).NotNull().Default(0).EndColumn().
Unique(entNidCol, entKeyCol, entTypeCol).
Unique(entPidCol).
ForeignKey(networksTable, map[string]string{entNidCol: nwIDCol}, sqorc.ColumnOnDeleteCascade).
RunWith(tx).
Exec()
if err != nil {
err = errors.Wrap(err, "failed to create entities table")
return
}
_, err = fact.builder.CreateTable(entityAssocTable).
IfNotExists().
Column(aFrCol).Type(sqorc.ColumnTypeText).EndColumn().
Column(aToCol).Type(sqorc.ColumnTypeText).EndColumn().
PrimaryKey(aFrCol, aToCol).
ForeignKey(entityTable, map[string]string{aFrCol: entPkCol}, sqorc.ColumnOnDeleteCascade).
ForeignKey(entityTable, map[string]string{aToCol: entPkCol}, sqorc.ColumnOnDeleteCascade).
RunWith(tx).
Exec()
if err != nil {
err = errors.Wrap(err, "failed to create entity assoc table")
return
}
_, err = fact.builder.CreateTable(entityAclTable).
IfNotExists().
Column(aclIdCol).Type(sqorc.ColumnTypeText).PrimaryKey().EndColumn().
Column(aclEntCol).Type(sqorc.ColumnTypeText).EndColumn().
Column(aclScopeCol).Type(sqorc.ColumnTypeText).NotNull().EndColumn().
Column(aclPermCol).Type(sqorc.ColumnTypeInt).NotNull().EndColumn().
Column(aclTypeCol).Type(sqorc.ColumnTypeText).NotNull().EndColumn().
Column(aclIdFilterCol).Type(sqorc.ColumnTypeText).EndColumn().
Column(aclVerCol).Type(sqorc.ColumnTypeInt).NotNull().Default(0).EndColumn().
ForeignKey(entityTable, map[string]string{aclEntCol: entPkCol}, sqorc.ColumnOnDeleteCascade).
RunWith(tx).
Exec()
if err != nil {
err = errors.Wrap(err, "failed to create entity acl table")
return
}
// Create indexes (index is not implicitly created on a referencing FK)
_, err = fact.builder.CreateIndex("graph_id_idx").
IfNotExists().
On(entityTable).
Columns(entGidCol).
RunWith(tx).
Exec()
if err != nil {
err = errors.Wrap(err, "failed to create graph ID index")
return
}
_, err = fact.builder.CreateIndex("acl_ent_pk_idx").
IfNotExists().
On(entityAclTable).
Columns(aclEntCol).
RunWith(tx).
Exec()
if err != nil {
err = errors.Wrap(err, "failed to create acl ent PK index")
return
}
// Create internal network(s)
_, err = fact.builder.Insert(networksTable).
Columns(nwIDCol, nwTypeCol, nwNameCol, nwDescCol).
Values(InternalNetworkID, internalNetworkType, internalNetworkName, internalNetworkDescription).
OnConflict(nil, nwIDCol).
RunWith(tx).
Exec()
if err != nil {
err = errors.Wrap(err, "error creating internal networks")
return
}
return
}
func (fact *sqlConfiguratorStorageFactory) StartTransaction(ctx context.Context, opts *storage.TxOptions) (ConfiguratorStorage, error) {
tx, err := fact.db.BeginTx(ctx, getSqlOpts(opts))
if err != nil {
return nil, err
}
return &sqlConfiguratorStorage{tx: tx, idGenerator: fact.idGenerator, builder: fact.builder}, nil
}
func getSqlOpts(opts *storage.TxOptions) *sql.TxOptions {
if opts == nil {
return nil
}
if opts.Isolation == 0 {
return &sql.TxOptions{ReadOnly: opts.ReadOnly}
}
return &sql.TxOptions{ReadOnly: opts.ReadOnly, Isolation: sql.IsolationLevel(opts.Isolation)}
}
type sqlConfiguratorStorage struct {
tx *sql.Tx
idGenerator storage.IDGenerator
builder sqorc.StatementBuilder
}
func (store *sqlConfiguratorStorage) Commit() error {
return store.tx.Commit()
}
func (store *sqlConfiguratorStorage) Rollback() error {
return store.tx.Rollback()
}
func (store *sqlConfiguratorStorage) LoadNetworks(filter NetworkLoadFilter, loadCriteria NetworkLoadCriteria) (NetworkLoadResult, error) {
emptyRet := NetworkLoadResult{NetworkIDsNotFound: []string{}, Networks: []*Network{}}
if funk.IsEmpty(filter.Ids) && funk.IsEmpty(filter.TypeFilter) {
return emptyRet, nil
}
selectBuilder := store.getLoadNetworksSelectBuilder(filter, loadCriteria)
if loadCriteria.LoadConfigs {
selectBuilder = selectBuilder.LeftJoin(
fmt.Sprintf(
"%s ON %s.%s = %s.%s",
networkConfigTable, networkConfigTable, nwcIDCol, networksTable, nwIDCol,
),
)
}
rows, err := selectBuilder.RunWith(store.tx).Query()
if err != nil {
return emptyRet, fmt.Errorf("error querying for networks: %s", err)
}
defer sqorc.CloseRowsLogOnError(rows, "LoadNetworks")
loadedNetworksByID, loadedNetworkIDs, err := scanNetworkRows(rows, loadCriteria)
if err != nil {
return emptyRet, err
}
ret := NetworkLoadResult{
NetworkIDsNotFound: getNetworkIDsNotFound(loadedNetworksByID, filter.Ids),
Networks: make([]*Network, 0, len(loadedNetworksByID)),
}
for _, nid := range loadedNetworkIDs {
ret.Networks = append(ret.Networks, loadedNetworksByID[nid])
}
return ret, nil
}
func (store *sqlConfiguratorStorage) LoadAllNetworks(loadCriteria NetworkLoadCriteria) ([]Network, error) {
emptyNetworks := []Network{}
idsToExclude := []string{InternalNetworkID}
selectBuilder := store.builder.Select(getNetworkQueryColumns(loadCriteria)...).
From(networksTable).
Where(sq.NotEq{
fmt.Sprintf("%s.%s", networksTable, nwIDCol): idsToExclude,
})
if loadCriteria.LoadConfigs {
selectBuilder = selectBuilder.LeftJoin(
fmt.Sprintf(
"%s ON %s.%s = %s.%s",
networkConfigTable, networkConfigTable, nwcIDCol, networksTable, nwIDCol,
),
)
}
rows, err := selectBuilder.RunWith(store.tx).Query()
if err != nil {
return emptyNetworks, fmt.Errorf("error querying for networks: %s", err)
}
defer sqorc.CloseRowsLogOnError(rows, "LoadAllNetworks")
loadedNetworksByID, loadedNetworkIDs, err := scanNetworkRows(rows, loadCriteria)
if err != nil {
return emptyNetworks, err
}
networks := make([]Network, 0, len(loadedNetworksByID))
for _, nid := range loadedNetworkIDs {
networks = append(networks, *loadedNetworksByID[nid])
}
return networks, nil
}
func (store *sqlConfiguratorStorage) CreateNetwork(network Network) (Network, error) {
exists, err := store.doesNetworkExist(network.ID)
if err != nil {
return network, err
}
if exists {
return network, fmt.Errorf("a network with ID %s already exists", network.ID)
}
_, err = store.builder.Insert(networksTable).
Columns(nwIDCol, nwTypeCol, nwNameCol, nwDescCol).
Values(network.ID, network.Type, network.Name, network.Description).
RunWith(store.tx).
Exec()
if err != nil {
return network, fmt.Errorf("error inserting network: %s", err)
}
if funk.IsEmpty(network.Configs) {
return network, nil
}
// Sort config keys for deterministic behavior
configKeys := funk.Keys(network.Configs).([]string)
sort.Strings(configKeys)
insertBuilder := store.builder.Insert(networkConfigTable).
Columns(nwcIDCol, nwcTypeCol, nwcValCol)
for _, configKey := range configKeys {
insertBuilder = insertBuilder.Values(network.ID, configKey, network.Configs[configKey])
}
_, err = insertBuilder.RunWith(store.tx).Exec()
if err != nil {
return network, errors.Wrap(err, "error inserting network configs")
}
return network, nil
}
func (store *sqlConfiguratorStorage) UpdateNetworks(updates []NetworkUpdateCriteria) error {
if err := validateNetworkUpdates(updates); err != nil {
return err
}
networksToDelete := []string{}
networksToUpdate := []NetworkUpdateCriteria{}
for _, update := range updates {
if update.DeleteNetwork {
networksToDelete = append(networksToDelete, update.ID)
} else {
networksToUpdate = append(networksToUpdate, update)
}
}
stmtCache := sq.NewStmtCache(store.tx)
defer sqorc.ClearStatementCacheLogOnError(stmtCache, "UpdateNetworks")
// Update networks first
for _, update := range networksToUpdate {
err := store.updateNetwork(update, stmtCache)
if err != nil {
return errors.WithStack(err)
}
}
_, err := store.builder.Delete(networkConfigTable).Where(sq.Eq{nwcIDCol: networksToDelete}).
RunWith(store.tx).
Exec()
if err != nil {
return errors.Wrap(err, "failed to delete configs associated with networks")
}
_, err = store.builder.Delete(networksTable).Where(sq.Eq{nwIDCol: networksToDelete}).
RunWith(store.tx).
Exec()
if err != nil {
return errors.Wrap(err, "failed to delete networks")
}
return nil
}
func (store *sqlConfiguratorStorage) LoadEntities(networkID string, filter EntityLoadFilter, loadCriteria EntityLoadCriteria) (EntityLoadResult, error) {
ret := EntityLoadResult{Entities: []*NetworkEntity{}, EntitiesNotFound: []*EntityID{}}
// We load the requested entities in 3 steps:
// First, we load the entities and their ACLs
// Then, we load assocs if requested by the load criteria. Note that the
// load criteria can specify to load edges to and/or from the requested
// entities.
// For each loaded edge, we need to load the (type, key) corresponding to
// to the PK pair that an edge is represented as. These may be already
// loaded as part of the first load from the entities table, so we can
// be smart here and only load (type, key) for PKs which we don't know.
// Finally, we will update the entity objects to return with their edges.
entsByPk, err := store.loadFromEntitiesTable(networkID, filter, loadCriteria)
if err != nil {
return ret, err
}
assocs, allAssocPks, err := store.loadFromAssocsTable(filter, loadCriteria, entsByPk)
if err != nil {
return ret, err
}
entTksByPk, err := store.loadEntityTypeAndKeys(allAssocPks, entsByPk)
if err != nil {
return ret, err
}
entsByPk, _, err = updateEntitiesWithAssocs(entsByPk, assocs, entTksByPk, loadCriteria)
if err != nil {
return ret, err
}
for _, ent := range entsByPk {
ret.Entities = append(ret.Entities, ent)
}
ret.EntitiesNotFound = calculateEntitiesNotFound(entsByPk, filter.IDs)
// Sort entities for deterministic returns
entComparator := func(a, b *NetworkEntity) bool {
return a.GetTypeAndKey().String() < b.GetTypeAndKey().String()
}
sort.Slice(ret.Entities, func(i, j int) bool { return entComparator(ret.Entities[i], ret.Entities[j]) })
return ret, nil
}
func (store *sqlConfiguratorStorage) CreateEntity(networkID string, entity NetworkEntity) (NetworkEntity, error) {
exists, err := store.doesEntExist(networkID, entity.GetTypeAndKey())
if err != nil {
return NetworkEntity{}, err
}
if exists {
return NetworkEntity{}, fmt.Errorf("an entity (%s) already exists", entity.GetTypeAndKey())
}
// First, we insert the entity and its ACLs. We do this first so we have a
// pk for the entity to reference in edge creation.
// Then we insert the associations as graph edges. This step involves a
// lookup of the associated entities to retrieve their PKs (since we don't
// expose PK to the world).
// Finally, if the created entity "bridges" 1 or more graphs, we merge
// those graphs into a single graph.
// For simplicity, we don't do any cycle detection at the moment. This
// shouldn't be a problem on the load side because we load graphs via
// graph ID, not by traversing edges.
createdEntWithPk, err := store.insertIntoEntityTable(networkID, entity)
if err != nil {
return NetworkEntity{}, err
}
err = store.createPermissions(networkID, createdEntWithPk.pk, createdEntWithPk.Permissions)
if err != nil {
return NetworkEntity{}, err
}
allAssociatedEntsByTk, err := store.createEdges(networkID, createdEntWithPk)
if err != nil {
return NetworkEntity{}, err
}
newGraphID, err := store.mergeGraphs(createdEntWithPk, allAssociatedEntsByTk)
if err != nil {
return NetworkEntity{}, err
}
createdEntWithPk.GraphID = newGraphID
// If we were given duplicate edges, get rid of those
if !funk.IsEmpty(createdEntWithPk.Associations) {
createdEntWithPk.Associations = funk.Chain(createdEntWithPk.Associations).
Map(func(id *EntityID) storage.TypeAndKey { return id.ToTypeAndKey() }).
Uniq().
Map(func(tk storage.TypeAndKey) *EntityID { return (&EntityID{}).FromTypeAndKey(tk) }).
Value().([]*EntityID)
}
createdEntWithPk.NetworkID = networkID
return createdEntWithPk.NetworkEntity, nil
}
func (store *sqlConfiguratorStorage) UpdateEntity(networkID string, update EntityUpdateCriteria) (NetworkEntity, error) {
emptyRet := NetworkEntity{Type: update.Type, Key: update.Key}
entToUpdate, err := store.loadEntToUpdate(networkID, update)
if err != nil && !update.DeleteEntity {
return emptyRet, errors.Wrap(err, "failed to load entity being updated")
}
if entToUpdate == nil {
return emptyRet, nil
}
if update.DeleteEntity {
// Cascading FK relations in the schema will handle the other tables
_, err := store.builder.Delete(entityTable).
Where(sq.And{
sq.Eq{entNidCol: networkID},
sq.Eq{entTypeCol: update.Type},
sq.Eq{entKeyCol: update.Key},
}).
RunWith(store.tx).
Exec()
if err != nil {
return emptyRet, errors.Wrapf(err, "failed to delete entity (%s, %s)", update.Type, update.Key)
}
// Deleting a node could partition its graph
err = store.fixGraph(networkID, entToUpdate.GraphID, entToUpdate)
if err != nil {
return emptyRet, errors.Wrap(err, "failed to fix entity graph after deletion")
}
return emptyRet, nil
}
// Then, update the fields on the entity table
entToUpdate.NetworkID = networkID
err = store.processEntityFieldsUpdate(entToUpdate.pk, update, &entToUpdate.NetworkEntity)
if err != nil {
return entToUpdate.NetworkEntity, errors.WithStack(err)
}
// Next, update permissions
err = store.processPermissionUpdates(networkID, entToUpdate.pk, update, &entToUpdate.NetworkEntity)
if err != nil {
return entToUpdate.NetworkEntity, errors.WithStack(err)
}
// Finally, process edge updates for the graph
err = store.processEdgeUpdates(networkID, update, entToUpdate)
if err != nil {
return entToUpdate.NetworkEntity, errors.WithStack(err)
}
return entToUpdate.NetworkEntity, nil
}
func (store *sqlConfiguratorStorage) LoadGraphForEntity(networkID string, entityID EntityID, loadCriteria EntityLoadCriteria) (EntityGraph, error) {
// Technically you could do this in one DB query with a subquery in the
// WHERE when selecting from the entity table.
// But until we hit some kind of scaling limit, let's keep the code simple
// and delegate to LoadGraph after loading the requested entity.
// We just care about getting the graph ID off this entity so use an empty
// load criteria
loadResult, err := store.loadFromEntitiesTable(networkID, EntityLoadFilter{IDs: []*EntityID{&entityID}}, EntityLoadCriteria{})
if err != nil {
return EntityGraph{}, errors.Wrap(err, "failed to load entity for graph query")
}
var loadedEnt *NetworkEntity
for _, ent := range loadResult {
loadedEnt = ent
}
if loadedEnt == nil {
return EntityGraph{}, errors.Errorf("could not find requested entity (%s) for graph query", entityID.String())
}
internalGraph, err := store.loadGraphInternal(networkID, loadedEnt.GraphID, loadCriteria)
if err != nil {
return EntityGraph{}, errors.WithStack(err)
}
rootPks := findRootNodes(internalGraph)
if funk.IsEmpty(rootPks) {
return EntityGraph{}, errors.Errorf("graph does not have root nodes because it is a ring")
}
// Fill entities with assocs. We will always fill out both directions of
// associations so we'll alter the load criteria for the helper function.
entTksByPk := funk.Map(
internalGraph.entsByPk,
func(pk string, ent *NetworkEntity) (string, storage.TypeAndKey) { return pk, ent.GetTypeAndKey() },
).(map[string]storage.TypeAndKey)
loadCriteria.LoadAssocsToThis, loadCriteria.LoadAssocsFromThis = true, true
_, edges, err := updateEntitiesWithAssocs(internalGraph.entsByPk, internalGraph.edges, entTksByPk, loadCriteria)
if err != nil {
return EntityGraph{}, errors.Wrap(err, "failed to construct graph after loading")
}
// To make testing easier, we'll order the returned entities by TK
retEnts := funk.Map(internalGraph.entsByPk, func(_ string, ent *NetworkEntity) *NetworkEntity { return ent }).([]*NetworkEntity)
retRoots := funk.Map(rootPks, func(pk string) *EntityID { return &EntityID{Type: entTksByPk[pk].Type, Key: entTksByPk[pk].Key} }).([]*EntityID)
sort.Slice(retEnts, func(i, j int) bool {
return storage.IsTKLessThan(retEnts[i].GetTypeAndKey(), retEnts[j].GetTypeAndKey())
})
sort.Slice(retRoots, func(i, j int) bool {
return storage.IsTKLessThan(retRoots[i].ToTypeAndKey(), retRoots[j].ToTypeAndKey())
})
return EntityGraph{
Entities: retEnts,
RootEntities: retRoots,
Edges: edges,
}, nil
}
|
[
"\"SQL_DRIVER\""
] |
[] |
[
"SQL_DRIVER"
] |
[]
|
["SQL_DRIVER"]
|
go
| 1 | 0 | |
CTFd/config.py
|
import os
''' GENERATE SECRET KEY '''
if not os.getenv('SECRET_KEY'):
# Attempt to read the secret from the secret file
# This will fail if the secret has not been written
try:
with open('.ctfd_secret_key', 'rb') as secret:
key = secret.read()
except (OSError, IOError):
key = None
if not key:
key = os.urandom(64)
# Attempt to write the secret file
# This will fail if the filesystem is read-only
try:
with open('.ctfd_secret_key', 'wb') as secret:
secret.write(key)
secret.flush()
except (OSError, IOError):
pass
''' SERVER SETTINGS '''
class Config(object):
"""
CTFd Configuration Object
"""
'''
=== REQUIRED SETTINGS ===
SECRET_KEY:
The secret value used to creation sessions and sign strings. This should be set to a random string. In the
interest of ease, CTFd will automatically create a secret key file for you. If you wish to add this secret key
to your instance you should hard code this value to a random static value.
You can also remove .ctfd_secret_key from the .gitignore file and commit this file into whatever repository
you are using.
http://flask.pocoo.org/docs/latest/quickstart/#sessions
SQLALCHEMY_DATABASE_URI:
The URI that specifies the username, password, hostname, port, and database of the server
used to hold the CTFd database.
e.g. mysql+pymysql://root:<YOUR_PASSWORD_HERE>@localhost/ctfd
CACHE_TYPE:
Specifies how CTFd should cache configuration values. If CACHE_TYPE is set to 'redis', CTFd will make use
of the REDIS_URL specified in environment variables. You can also choose to hardcode the REDIS_URL here.
It is important that you specify some sort of cache as CTFd uses it to store values received from the database. If
no cache is specified, CTFd will default to a simple per-worker cache. The simple cache cannot be effectively used
with multiple workers.
REDIS_URL is the URL to connect to a Redis server.
e.g. redis://user:password@localhost:6379
http://pythonhosted.org/Flask-Caching/#configuring-flask-caching
'''
SECRET_KEY = os.getenv('SECRET_KEY') or key
DATABASE_URL = os.getenv('DATABASE_URL') or 'sqlite:///{}/ctfd.db'.format(os.path.dirname(os.path.abspath(__file__)))
REDIS_URL = os.getenv('REDIS_URL')
SQLALCHEMY_DATABASE_URI = DATABASE_URL
CACHE_REDIS_URL = REDIS_URL
if CACHE_REDIS_URL:
CACHE_TYPE = 'redis'
else:
CACHE_TYPE = 'filesystem'
CACHE_DIR = os.path.join(os.path.dirname(__file__), os.pardir, '.data', 'filesystem_cache')
'''
=== SECURITY ===
SESSION_COOKIE_HTTPONLY:
Controls if cookies should be set with the HttpOnly flag.
PERMANENT_SESSION_LIFETIME:
The lifetime of a session. The default is 604800 seconds.
TRUSTED_PROXIES:
Defines a set of regular expressions used for finding a user's IP address if the CTFd instance
is behind a proxy. If you are running a CTF and users are on the same network as you, you may choose to remove
some proxies from the list.
CTFd only uses IP addresses for cursory tracking purposes. It is ill-advised to do anything complicated based
solely on IP addresses unless you know what you are doing.
'''
SESSION_COOKIE_HTTPONLY = (not os.getenv("SESSION_COOKIE_HTTPONLY")) # Defaults True
PERMANENT_SESSION_LIFETIME = int(os.getenv("PERMANENT_SESSION_LIFETIME") or 604800) # 7 days in seconds
TRUSTED_PROXIES = [
r'^127\.0\.0\.1$',
# Remove the following proxies if you do not trust the local network
# For example if you are running a CTF on your laptop and the teams are
# all on the same network
r'^::1$',
r'^fc00:',
r'^10\.',
r'^172\.(1[6-9]|2[0-9]|3[0-1])\.',
r'^192\.168\.'
]
'''
=== EMAIL ===
MAILFROM_ADDR:
The email address that emails are sent from if not overridden in the configuration panel.
MAIL_SERVER:
The mail server that emails are sent from if not overriden in the configuration panel.
MAIL_PORT:
The mail port that emails are sent from if not overriden in the configuration panel.
'''
MAILFROM_ADDR = os.getenv("MAILFROM_ADDR") or "[email protected]"
MAIL_SERVER = os.getenv("MAIL_SERVER") or None
MAIL_PORT = os.getenv("MAIL_PORT")
MAIL_USERNAME = os.getenv("MAIL_USERNAME")
MAIL_PASSWORD = os.getenv("MAIL_PASSWORD")
MAIL_TLS = os.getenv("MAIL_TLS") or False
MAIL_SSL = os.getenv("MAIL_SSL") or False
MAILGUN_API_KEY = os.getenv("MAILGUN_API_KEY")
MAILGUN_BASE_URL = os.getenv("MAILGUN_BASE_URL")
'''
=== LOGS ===
LOG_FOLDER:
The location where logs are written. These are the logs for CTFd key submissions, registrations, and logins.
The default location is the CTFd/logs folder.
'''
LOG_FOLDER = os.getenv('LOG_FOLDER') or os.path.join(os.path.dirname(os.path.abspath(__file__)), 'logs')
'''
=== UPLOADS ===
UPLOAD_PROVIDER:
Specifies the service that CTFd should use to store files.
UPLOAD_FOLDER:
The location where files are uploaded. The default destination is the CTFd/uploads folder.
AWS_ACCESS_KEY_ID:
AWS access token used to authenticate to the S3 bucket.
AWS_SECRET_ACCESS_KEY:
AWS secret token used to authenticate to the S3 bucket.
AWS_S3_BUCKET:
The unique identifier for your S3 bucket.
AWS_S3_ENDPOINT_URL:
A URL pointing to a custom S3 implementation.
'''
UPLOAD_PROVIDER = os.getenv('UPLOAD_PROVIDER') or 'filesystem'
UPLOAD_FOLDER = os.getenv('UPLOAD_FOLDER') or os.path.join(os.path.dirname(os.path.abspath(__file__)), 'uploads')
if UPLOAD_PROVIDER == 's3':
AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY')
AWS_S3_BUCKET = os.getenv('AWS_S3_BUCKET')
AWS_S3_ENDPOINT_URL = os.getenv('AWS_S3_ENDPOINT_URL')
'''
=== OPTIONAL ===
REVERSE_PROXY:
Specifies whether CTFd is behind a reverse proxy or not. Set to True if using a reverse proxy like nginx.
TEMPLATES_AUTO_RELOAD:
Specifies whether Flask should check for modifications to templates and reload them automatically.
SQLALCHEMY_TRACK_MODIFICATIONS:
Automatically disabled to suppress warnings and save memory. You should only enable this if you need it.
UPDATE_CHECK:
Specifies whether or not CTFd will check whether or not there is a new version of CTFd
APPLICATION_ROOT:
Specifies what path CTFd is mounted under. It can be used to run CTFd in a subdirectory.
Example: /ctfd
'''
REVERSE_PROXY = os.getenv("REVERSE_PROXY") or False
TEMPLATES_AUTO_RELOAD = (not os.getenv("TEMPLATES_AUTO_RELOAD")) # Defaults True
SQLALCHEMY_TRACK_MODIFICATIONS = (not os.getenv("SQLALCHEMY_TRACK_MODIFICATIONS")) # Defaults True
UPDATE_CHECK = (not os.getenv("UPDATE_CHECK")) # Defaults True
APPLICATION_ROOT = os.getenv('APPLICATION_ROOT') or '/'
'''
=== OAUTH ===
MajorLeagueCyber Integration
Register an event at https://majorleaguecyber.org/ and use the Client ID and Client Secret here
'''
OAUTH_CLIENT_ID = os.getenv("OAUTH_CLIENT_ID")
OAUTH_CLIENT_SECRET = os.getenv("OAUTH_CLIENT_SECRET")
class TestingConfig(Config):
SECRET_KEY = 'AAAAAAAAAAAAAAAAAAAA'
PRESERVE_CONTEXT_ON_EXCEPTION = False
TESTING = True
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.getenv('TESTING_DATABASE_URL') or 'sqlite://'
SERVER_NAME = 'localhost'
UPDATE_CHECK = False
REDIS_URL = None
CACHE_TYPE = 'simple'
SAFE_MODE = True
|
[] |
[] |
[
"MAILGUN_API_KEY",
"REVERSE_PROXY",
"LOG_FOLDER",
"UPLOAD_FOLDER",
"SECRET_KEY",
"OAUTH_CLIENT_ID",
"APPLICATION_ROOT",
"DATABASE_URL",
"UPLOAD_PROVIDER",
"SESSION_COOKIE_HTTPONLY",
"MAILGUN_BASE_URL",
"UPDATE_CHECK",
"AWS_S3_ENDPOINT_URL",
"MAIL_PORT",
"MAIL_TLS",
"PERMANENT_SESSION_LIFETIME",
"MAIL_USERNAME",
"OAUTH_CLIENT_SECRET",
"MAILFROM_ADDR",
"SQLALCHEMY_TRACK_MODIFICATIONS",
"MAIL_SERVER",
"AWS_SECRET_ACCESS_KEY",
"TEMPLATES_AUTO_RELOAD",
"MAIL_PASSWORD",
"TESTING_DATABASE_URL",
"MAIL_SSL",
"AWS_ACCESS_KEY_ID",
"REDIS_URL",
"AWS_S3_BUCKET"
] |
[]
|
["MAILGUN_API_KEY", "REVERSE_PROXY", "LOG_FOLDER", "UPLOAD_FOLDER", "SECRET_KEY", "OAUTH_CLIENT_ID", "APPLICATION_ROOT", "DATABASE_URL", "UPLOAD_PROVIDER", "SESSION_COOKIE_HTTPONLY", "MAILGUN_BASE_URL", "UPDATE_CHECK", "AWS_S3_ENDPOINT_URL", "MAIL_PORT", "MAIL_TLS", "PERMANENT_SESSION_LIFETIME", "MAIL_USERNAME", "OAUTH_CLIENT_SECRET", "MAILFROM_ADDR", "SQLALCHEMY_TRACK_MODIFICATIONS", "MAIL_SERVER", "AWS_SECRET_ACCESS_KEY", "TEMPLATES_AUTO_RELOAD", "MAIL_PASSWORD", "TESTING_DATABASE_URL", "MAIL_SSL", "AWS_ACCESS_KEY_ID", "REDIS_URL", "AWS_S3_BUCKET"]
|
python
| 29 | 0 | |
esmvaltool/cmorizers/obs/utilities.py
|
"""Utils module for Python cmorizers."""
from pathlib import Path
import datetime
import logging
import os
import re
from contextlib import contextmanager
import iris
import numpy as np
import yaml
from cf_units import Unit
from dask import array as da
from esmvalcore.cmor.table import CMOR_TABLES
from esmvaltool import __version__ as version, __file__ as esmvaltool_file
logger = logging.getLogger(__name__)
REFERENCES_PATH = Path(esmvaltool_file).absolute().parent / 'references'
def add_height2m(cube):
"""Add scalar coordinate 'height' with value of 2m."""
add_scalar_height_coord(cube, height=2.)
def add_scalar_height_coord(cube, height=2.):
"""Add scalar coordinate 'height' with value of `height`m."""
logger.debug("Adding height coordinate (%sm)", height)
height_coord = iris.coords.AuxCoord(
height,
var_name='height',
standard_name='height',
long_name='height',
units=Unit('m'),
attributes={'positive': 'up'})
cube.add_aux_coord(height_coord, ())
@contextmanager
def constant_metadata(cube):
"""Do cube math without modifying units etc."""
metadata = cube.metadata
yield metadata
cube.metadata = metadata
def convert_timeunits(cube, start_year):
"""Convert time axis from malformed Year 0."""
# TODO any more weird cases?
if cube.coord('time').units == 'months since 0000-01-01 00:00:00':
real_unit = 'months since {}-01-01 00:00:00'.format(str(start_year))
elif cube.coord('time').units == 'days since 0000-01-01 00:00:00':
real_unit = 'days since {}-01-01 00:00:00'.format(str(start_year))
elif cube.coord('time').units == 'days since 1950-1-1':
real_unit = 'days since 1950-1-1 00:00:00'
else:
real_unit = cube.coord('time').units
cube.coord('time').units = real_unit
return cube
def fix_coords(cube, overwrite_time_bounds=True, overwrite_lon_bounds=True,
overwrite_lat_bounds=True, overwrite_lev_bounds=True,
overwrite_airpres_bounds=True):
"""
Fix coordinates to CMOR standards.
Fixes coordinates eg time to have correct units, bounds etc;
longitude to be CMOR-compliant 0-360deg; fixes some attributes
and bounds - the user can avert bounds fixing by using supplied
arguments; if bounds are None they will be fixed regardless.
Parameters
----------
cube: iris.cube.Cube
data cube with coordinates to be fixed.
overwrite_time_bounds: bool (optional)
set to False not to overwrite time bounds.
overwrite_lon_bounds: bool (optional)
set to False not to overwrite longitude bounds.
overwrite_lat_bounds: bool (optional)
set to False not to overwrite latitude bounds.
overwrite_lev_bounds: bool (optional)
set to False not to overwrite depth bounds.
overwrite_airpres_bounds: bool (optional)
set to False not to overwrite air pressure bounds.
Returns
-------
cube: iris.cube.Cube
data cube with fixed coordinates.
"""
# first fix any completely missing coord var names
_fix_dim_coordnames(cube)
# fix individual coords
for cube_coord in cube.coords():
# fix time
if cube_coord.var_name == 'time':
logger.info("Fixing time...")
cube.coord('time').convert_units(
Unit('days since 1950-1-1 00:00:00', calendar='gregorian'))
if overwrite_time_bounds or not cube.coord('time').has_bounds():
_fix_bounds(cube, cube.coord('time'))
# fix longitude
if cube_coord.var_name == 'lon':
logger.info("Fixing longitude...")
if cube_coord.ndim == 1:
if cube_coord.points[0] < 0. and \
cube_coord.points[-1] < 181.:
cube_coord.points = \
cube_coord.points + 180.
if overwrite_lon_bounds or not cube_coord.has_bounds():
_fix_bounds(cube, cube_coord)
cube.attributes['geospatial_lon_min'] = 0.
cube.attributes['geospatial_lon_max'] = 360.
nlon = len(cube_coord.points)
_roll_cube_data(cube, nlon // 2, -1)
# fix latitude
if cube_coord.var_name == 'lat':
logger.info("Fixing latitude...")
if overwrite_lat_bounds or not cube.coord('latitude').has_bounds():
_fix_bounds(cube, cube.coord('latitude'))
# fix depth
if cube_coord.var_name == 'lev':
logger.info("Fixing depth...")
if overwrite_lev_bounds or not cube.coord('depth').has_bounds():
_fix_bounds(cube, cube.coord('depth'))
# fix air_pressure
if cube_coord.var_name == 'air_pressure':
logger.info("Fixing air pressure...")
if overwrite_airpres_bounds \
or not cube.coord('air_pressure').has_bounds():
_fix_bounds(cube, cube.coord('air_pressure'))
# remove CS
cube.coord('latitude').coord_system = None
cube.coord('longitude').coord_system = None
return cube
def fix_var_metadata(cube, var_info):
"""Fix var metadata from CMOR table."""
if var_info.standard_name == '':
cube.standard_name = None
else:
cube.standard_name = var_info.standard_name
cube.var_name = var_info.short_name
cube.long_name = var_info.long_name
_set_units(cube, var_info.units)
return cube
def flip_dim_coord(cube, coord_name):
"""Flip (reverse) dimensional coordinate of cube."""
logger.info("Flipping dimensional coordinate %s...", coord_name)
coord = cube.coord(coord_name, dim_coords=True)
coord_idx = cube.coord_dims(coord)[0]
coord.points = np.flip(coord.points)
if coord.bounds is not None:
coord.bounds = np.flip(coord.bounds, axis=0)
cube.data = da.flip(cube.core_data(), axis=coord_idx)
def read_cmor_config(dataset):
"""Read the associated dataset-specific config file."""
reg_path = os.path.join(os.path.dirname(__file__), 'cmor_config',
dataset + '.yml')
with open(reg_path, 'r') as file:
cfg = yaml.safe_load(file)
cfg['cmor_table'] = \
CMOR_TABLES[cfg['attributes']['project_id']]
if 'comment' not in cfg['attributes']:
cfg['attributes']['comment'] = ''
return cfg
def save_variable(cube, var, outdir, attrs, **kwargs):
"""Saver function."""
_fix_dtype(cube)
# CMOR standard
try:
time = cube.coord('time')
except iris.exceptions.CoordinateNotFoundError:
time_suffix = None
else:
if len(time.points) == 1 and "mon" not in cube.attributes.get('mip'):
year = str(time.cell(0).point.year)
time_suffix = '-'.join([year + '01', year + '12'])
else:
date1 = str(time.cell(0).point.year) + '%02d' % \
time.cell(0).point.month
date2 = str(time.cell(-1).point.year) + '%02d' % \
time.cell(-1).point.month
time_suffix = '-'.join([date1, date2])
name_elements = [
attrs['project_id'],
attrs['dataset_id'],
attrs['modeling_realm'],
attrs['version'],
attrs['mip'],
var,
]
if time_suffix:
name_elements.append(time_suffix)
file_name = '_'.join(name_elements) + '.nc'
file_path = os.path.join(outdir, file_name)
logger.info('Saving: %s', file_path)
status = 'lazy' if cube.has_lazy_data() else 'realized'
logger.info('Cube has %s data [lazy is preferred]', status)
iris.save(cube, file_path, fill_value=1e20, **kwargs)
def extract_doi_value(tags):
"""Extract doi(s) from a bibtex entry."""
reference_doi = []
pattern = r'doi\ = {(.*?)\},'
if not isinstance(tags, list):
tags = [tags]
for tag in tags:
bibtex_file = REFERENCES_PATH / f'{tag}.bibtex'
if bibtex_file.is_file():
reference_entry = bibtex_file.read_text()
if re.search("doi", reference_entry):
reference_doi.append(
f'doi:{re.search(pattern, reference_entry).group(1)}'
)
else:
reference_doi.append('doi not found')
logger.warning(
'The reference file %s does not have a doi.', bibtex_file
)
else:
reference_doi.append('doi not found')
logger.warning(
'The reference file %s does not exist.', bibtex_file
)
return ', '.join(reference_doi)
def set_global_atts(cube, attrs):
"""Complete the cmorized file with global metadata."""
logger.debug("Setting global metadata...")
attrs = dict(attrs)
cube.attributes.clear()
timestamp = datetime.datetime.utcnow()
timestamp_format = "%Y-%m-%d %H:%M:%S"
now_time = timestamp.strftime(timestamp_format)
# Necessary attributes
try:
glob_dict = {
'title': (f"{attrs.pop('dataset_id')} data reformatted for "
f"ESMValTool v{version}"),
'version':
attrs.pop('version'),
'tier':
str(attrs.pop('tier')),
'source':
attrs.pop('source'),
'reference':
extract_doi_value(attrs.pop('reference')),
'comment':
attrs.pop('comment'),
'user':
os.environ.get("USER", "unknown user"),
'host':
os.environ.get("HOSTNAME", "unknown host"),
'history':
f'Created on {now_time}',
'project_id':
attrs.pop('project_id'),
}
except KeyError:
raise KeyError(
"All CMORized datasets need the global attributes 'dataset_id', "
"'version', 'tier', 'source', 'reference', 'comment' and "
"'project_id' specified in the configuration file")
# Additional attributes
glob_dict.update(attrs)
cube.attributes = glob_dict
def var_name_constraint(var_name):
""":mod:`iris.Constraint` using `var_name` of an :mod:`iris.cube.Cube`."""
return iris.Constraint(cube_func=lambda c: c.var_name == var_name)
def _fix_bounds(cube, dim_coord):
"""Reset and fix all bounds."""
if len(cube.coord(dim_coord).points) > 1:
if cube.coord(dim_coord).has_bounds():
cube.coord(dim_coord).bounds = None
cube.coord(dim_coord).guess_bounds()
if cube.coord(dim_coord).has_bounds():
cube.coord(dim_coord).bounds = da.array(
cube.coord(dim_coord).core_bounds(), dtype='float64')
return cube
def _fix_dim_coordnames(cube):
"""Perform a check on dim coordinate names."""
# first check for CMOR standard coord;
for coord in cube.coords():
# guess the CMOR-standard x, y, z and t axes if not there
coord_type = iris.util.guess_coord_axis(coord)
try:
coord = cube.coord(axis=coord_type)
except iris.exceptions.CoordinateNotFoundError:
logger.warning(
'Multiple coordinates for axis %s. '
'This may be an error, specially for regular grids',
coord_type
)
continue
if coord_type == 'T':
coord.var_name = 'time'
coord.attributes = {}
if coord_type == 'X':
coord.var_name = 'lon'
coord.standard_name = 'longitude'
coord.long_name = 'longitude coordinate'
coord.units = Unit('degrees')
coord.attributes = {}
if coord_type == 'Y':
coord.var_name = 'lat'
coord.standard_name = 'latitude'
coord.long_name = 'latitude coordinate'
coord.units = Unit('degrees')
coord.attributes = {}
if coord_type == 'Z':
if coord.var_name == 'depth':
coord.standard_name = 'depth'
coord.long_name = \
'ocean depth coordinate'
coord.var_name = 'lev'
coord.attributes['positive'] = 'down'
if coord.var_name == 'pressure':
coord.standard_name = 'air_pressure'
coord.long_name = 'pressure'
coord.var_name = 'air_pressure'
coord.attributes['positive'] = 'up'
return cube
def _fix_dtype(cube):
"""Fix `dtype` of a cube and its coordinates."""
if cube.dtype != np.float32:
logger.info("Converting data type of data from '%s' to 'float32'",
cube.dtype)
cube.data = cube.core_data().astype(np.float32, casting='same_kind')
for coord in cube.coords():
if coord.dtype != np.float64:
logger.info(
"Converting data type of coordinate points of '%s' from '%s' "
"to 'float64'", coord.name(), coord.dtype)
coord.points = coord.core_points().astype(np.float64,
casting='same_kind')
if coord.has_bounds() and coord.bounds_dtype != np.float64:
logger.info(
"Converting data type of coordinate bounds of '%s' from '%s' "
"to 'float64'", coord.name(), coord.bounds_dtype)
coord.bounds = coord.core_bounds().astype(np.float64,
casting='same_kind')
def _roll_cube_data(cube, shift, axis):
"""Roll a cube data on specified axis."""
cube.data = da.roll(cube.core_data(), shift, axis=axis)
return cube
def _set_units(cube, units):
"""Set units in compliance with cf_unit."""
special = {'psu': 1.e-3, 'Sv': '1e6 m3 s-1'}
if units in list(special.keys()):
cube.units = special[units]
else:
cube.units = Unit(units)
return cube
|
[] |
[] |
[
"USER",
"HOSTNAME"
] |
[]
|
["USER", "HOSTNAME"]
|
python
| 2 | 0 | |
internal/compose/mounts.go
|
// Copyright 2020 Bloomberg Finance L.P.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package compose
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"gopkg.in/yaml.v2"
)
func doSourceMounts(cfg cmpConfig, goList goList, goPath []string) (
args []string, cleanup func() error, err error,
) {
noop := func() error { return nil }
mountsCfg, err := newMountsCfg(cfg, goList, goPath)
if err != nil {
return nil, noop, err
}
if mountsCfg == nil {
return nil, noop, nil
}
mountsFile, err := ioutil.TempFile(".", "docket-source-mounts.*.yaml")
if err != nil {
return nil, noop, fmt.Errorf("failed to create source mounts yaml: %w", err)
}
cleanup = func() error {
if os.Getenv("DOCKET_KEEP_MOUNTS_FILE") != "" {
tracef("Leaving %s alone\n", mountsFile.Name())
return nil
}
return os.Remove(mountsFile.Name())
}
defer func() {
if closeErr := mountsFile.Close(); closeErr != nil {
args = nil
err = closeErr
}
}()
enc := yaml.NewEncoder(mountsFile)
defer func() {
if closeErr := enc.Close(); closeErr != nil {
args = nil
err = closeErr
}
}()
if err := enc.Encode(mountsCfg); err != nil {
return nil, noop, fmt.Errorf("failed to encode yaml: %w", err)
}
return []string{"--file", mountsFile.Name()}, cleanup, nil
}
var errMultipleGOPATHs = fmt.Errorf("docket doesn't support multipart GOPATHs")
type mountsFunc func(goList, []string) ([]cmpVolume, string, error)
// newMountsCfg makes a cmpConfig to bind mount Go sources for the services that need them.
func newMountsCfg(originalCfg cmpConfig, goList goList, goPath []string) (*cmpConfig, error) {
mountsCfg := cmpConfig{
Version: "3.2",
Services: map[string]cmpService{},
Networks: nil,
}
if len(goPath) != 1 {
return nil, errMultipleGOPATHs
}
var mountsFunc mountsFunc
if goList.Module == nil {
mountsFunc = mountsForModuleMode
} else {
mountsFunc = mountsForGOPATHMode
}
volumes, workingDir, err := mountsFunc(goList, goPath)
if err != nil {
return nil, err
}
for name, svc := range originalCfg.Services {
if _, mountGoSources, err := parseDocketLabel(svc); err != nil {
return nil, err
} else if mountGoSources {
mountsCfg.Services[name] = cmpService{
Command: nil,
Environment: nil,
Image: "",
Labels: nil,
Volumes: volumes,
WorkingDir: workingDir,
}
}
}
if len(mountsCfg.Services) == 0 {
return nil, nil
}
return &mountsCfg, nil
}
func mountsForModuleMode(goList goList, goPath []string) ([]cmpVolume, string, error) {
const goPathTarget = "/go"
pkgName, err := findPackageNameFromDirAndGOPATH(goList.Dir, goPath)
if err != nil {
return nil, "", err
}
volumes := []cmpVolume{
{
Type: "bind",
Source: goPath[0],
Target: goPathTarget,
},
}
workingDir := fmt.Sprintf("%s/src/%s", goPathTarget, pkgName)
return volumes, workingDir, nil
}
func mountsForGOPATHMode(goList goList, goPath []string) ([]cmpVolume, string, error) {
const goPathTarget = "/go"
const goModuleDirTarget = "/go-module-dir"
pathInsideModule, err := filepath.Rel(goList.Module.Dir, goList.Dir)
if err != nil {
return nil, "", fmt.Errorf("failed filepath.Rel: %w", err)
}
volumes := []cmpVolume{
{
Type: "bind",
Source: filepath.Join(goPath[0], "pkg", "mod"),
Target: fmt.Sprintf("%s/pkg/mod", goPathTarget),
},
{
Type: "bind",
Source: goList.Module.Dir,
Target: goModuleDirTarget,
},
}
workingDir := fmt.Sprintf("%s/%s", goModuleDirTarget, filepath.ToSlash(pathInsideModule))
return volumes, workingDir, nil
}
|
[
"\"DOCKET_KEEP_MOUNTS_FILE\""
] |
[] |
[
"DOCKET_KEEP_MOUNTS_FILE"
] |
[]
|
["DOCKET_KEEP_MOUNTS_FILE"]
|
go
| 1 | 0 | |
config/wsgi.py
|
"""
WSGI config for django_gumroad project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
import sys
from pathlib import Path
from django.core.wsgi import get_wsgi_application
# This allows easy placement of apps within the interior
# django_gumroad directory.
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent
sys.path.append(str(ROOT_DIR / "django_gumroad"))
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
[] |
[] |
[
"DJANGO_SETTINGS_MODULE"
] |
[]
|
["DJANGO_SETTINGS_MODULE"]
|
python
| 1 | 0 | |
tools/build_defs/repo/utils.bzl
|
# Copyright 2018 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for manipulating external repositories, once fetched.
### Setup
These utility are intended to be used by other repository rules. They
can be loaded as follows.
```python
load(
"@bazel_tools//tools/build_defs/repo:utils.bzl",
"workspace_and_buildfile",
"patch",
"update_attrs",
)
```
"""
def workspace_and_buildfile(ctx):
"""Utility function for writing WORKSPACE and, if requested, a BUILD file.
This rule is inteded to be used in the implementation function of a
repository rule.
It assumes the parameters `name`, `build_file`, `build_file_contents`,
`workspace_file`, and `workspace_file_content` to be
present in `ctx.attr`, the latter four possibly with value None.
Args:
ctx: The repository context of the repository rule calling this utility
function.
"""
if ctx.attr.build_file and ctx.attr.build_file_content:
ctx.fail("Only one of build_file and build_file_content can be provided.")
if ctx.attr.workspace_file and ctx.attr.workspace_file_content:
ctx.fail("Only one of workspace_file and workspace_file_content can be provided.")
if ctx.attr.workspace_file:
ctx.delete("WORKSPACE")
ctx.symlink(ctx.attr.workspace_file, "WORKSPACE")
elif ctx.attr.workspace_file_content:
ctx.delete("WORKSPACE")
ctx.file("WORKSPACE", ctx.attr.workspace_file_content)
else:
ctx.file("WORKSPACE", "workspace(name = \"{name}\")\n".format(name = ctx.name))
if ctx.attr.build_file:
ctx.delete("BUILD.bazel")
ctx.symlink(ctx.attr.build_file, "BUILD.bazel")
elif ctx.attr.build_file_content:
ctx.delete("BUILD.bazel")
ctx.file("BUILD.bazel", ctx.attr.build_file_content)
def _is_windows(ctx):
return ctx.os.name.lower().find("windows") != -1
def _use_native_patch(ctx):
"""If patch_tool is empty and patch_args only contains -p<NUM> options, we use the native patch implementation."""
if ctx.attr.patch_tool:
return False
for arg in ctx.attr.patch_args:
if not arg.startswith("-p"):
return False
return True
def patch(ctx):
"""Implementation of patching an already extracted repository.
This rule is inteded to be used in the implementation function of a
repository rule. It assuumes that the parameters `patches`, `patch_tool`,
`patch_args`, `patch_cmds` and `patch_cmds_win` are present in `ctx.attr`.
Args:
ctx: The repository context of the repository rule calling this utility
function.
"""
bash_exe = ctx.os.environ["BAZEL_SH"] if "BAZEL_SH" in ctx.os.environ else "bash"
powershell_exe = ctx.os.environ["BAZEL_POWERSHELL"] if "BAZEL_POWERSHELL" in ctx.os.environ else "powershell.exe"
if len(ctx.attr.patches) > 0 or len(ctx.attr.patch_cmds) > 0:
ctx.report_progress("Patching repository")
if _use_native_patch(ctx):
if ctx.attr.patch_args:
strip = int(ctx.attr.patch_args[-1][2:])
else:
strip = 0
for patchfile in ctx.attr.patches:
ctx.patch(patchfile, strip)
else:
for patchfile in ctx.attr.patches:
patch_tool = ctx.attr.patch_tool
if not patch_tool:
patch_tool = "patch"
command = "{patchtool} {patch_args} < {patchfile}".format(
patchtool = patch_tool,
patchfile = ctx.path(patchfile),
patch_args = " ".join([
"'%s'" % arg
for arg in ctx.attr.patch_args
]),
)
st = ctx.execute([bash_exe, "-c", command])
if st.return_code:
fail("Error applying patch %s:\n%s%s" %
(str(patchfile), st.stderr, st.stdout))
if _is_windows(ctx) and hasattr(ctx.attr, "patch_cmds_win") and ctx.attr.patch_cmds_win:
for cmd in ctx.attr.patch_cmds_win:
st = ctx.execute([powershell_exe, "/c", cmd])
if st.return_code:
fail("Error applying patch command %s:\n%s%s" %
(cmd, st.stdout, st.stderr))
else:
for cmd in ctx.attr.patch_cmds:
st = ctx.execute([bash_exe, "-c", cmd])
if st.return_code:
fail("Error applying patch command %s:\n%s%s" %
(cmd, st.stdout, st.stderr))
def update_attrs(orig, keys, override):
"""Utility function for altering and adding the specified attributes to a particular repository rule invocation.
This is used to make a rule reproducible.
Args:
orig: dict of actually set attributes (either explicitly or implicitly)
by a particular rule invocation
keys: complete set of attributes defined on this rule
override: dict of attributes to override or add to orig
Returns:
dict of attributes with the keys from override inserted/updated
"""
result = {}
for key in keys:
if getattr(orig, key) != None:
result[key] = getattr(orig, key)
result["name"] = orig.name
result.update(override)
return result
def maybe(repo_rule, name, **kwargs):
"""Utility function for only adding a repository if it's not already present.
This is to implement safe repositories.bzl macro documented in
https://docs.bazel.build/versions/master/skylark/deploying.html#dependencies.
Args:
repo_rule: repository rule function.
name: name of the repository to create.
**kwargs: remaining arguments that are passed to the repo_rule function.
Returns:
Nothing, defines the repository when needed as a side-effect.
"""
if name not in native.existing_rules():
repo_rule(name = name, **kwargs)
def read_netrc(ctx, filename):
"""Utility function to parse at least a basic .netrc file.
Args:
ctx: The repository context of the repository rule calling this utility
function.
filename: the name of the .netrc file to read
Returns:
dict mapping a machine names to a dict with the information provided
about them
"""
# We have to first symlink into the current repository, as ctx.read only
# allows read from the output directory. Alternatively, we could use
# ctx.execute() to call cat(1).
ctx.symlink(filename, ".netrc")
contents = ctx.read(".netrc")
ctx.delete(".netrc")
# Parse the file. This is mainly a token-based update of a simple state
# machine, but we need to keep the line structure to correctly determine
# the end of a `macdef` command.
netrc = {}
currentmachinename = None
currentmachine = {}
macdef = None
currentmacro = ""
cmd = None
for line in contents.splitlines():
if macdef:
# as we're in a macro, just determine if we reached the end.
if line:
currentmacro += line + "\n"
else:
# reached end of macro, add it
currentmachine[macdef] = currentmacro
macdef = None
currentmacro = ""
else:
# Essentially line.split(None) which starlark does not support.
tokens = [
w.strip()
for w in line.split(" ")
if len(w.strip()) > 0
]
for token in tokens:
if cmd:
# we have a command that expects another argument
if cmd == "machine":
# a new machine definition was provided, so save the
# old one, if present
if not currentmachinename == None:
netrc[currentmachinename] = currentmachine
currentmachine = {}
currentmachinename = token
elif cmd == "macdef":
macdef = "macdef %s" % (token,)
# a new macro definition; the documentation says
# "its contents begin with the next .netrc line [...]",
# so should there really be tokens left in the current
# line, they're not part of the macro.
else:
currentmachine[cmd] = token
cmd = None
elif token in [
"machine",
"login",
"password",
"account",
"macdef",
]:
# command takes one argument
cmd = token
elif token == "default":
# defines the default machine; again, store old machine
if not currentmachinename == None:
netrc[currentmachinename] = currentmachine
# We use the empty string for the default machine, as that
# can never be a valid hostname ("default" could be, in the
# default search domain).
currentmachinename = ""
currentmachine = {}
else:
fail("Unexpected token '%s' while reading %s" %
(token, filename))
if not currentmachinename == None:
netrc[currentmachinename] = currentmachine
return netrc
def use_netrc(netrc, urls):
"""compute an auth dict from a parsed netrc file and a list of URLs
Args:
netrc: a netrc file already parsed to a dict, e.g., as obtained from
read_netrc
urls: a list of URLs.
Returns:
dict suitable as auth argument for ctx.download; more precisely, the dict
will map all URLs where the netrc file provides login and password to a
dict containing the corresponding login and passwored, as well as the
mapping of "type" to "basic"
"""
auth = {}
for url in urls:
schemerest = url.split("://", 1)
if len(schemerest) < 2:
continue
if not (schemerest[0] in ["http", "https"]):
# For other protocols, bazel currently does not support
# authentication. So ignore them.
continue
host = schemerest[1].split("/")[0].split(":")[0]
if not host in netrc:
continue
authforhost = netrc[host]
if "login" in authforhost and "password" in authforhost:
auth[url] = {
"type": "basic",
"login": authforhost["login"],
"password": authforhost["password"],
}
return auth
|
[] |
[] |
[
"BAZEL_POWERSHELL",
"BAZEL_SH"
] |
[]
|
["BAZEL_POWERSHELL", "BAZEL_SH"]
|
python
| 2 | 0 | |
bytie/bot.py
|
# bot.py
from messagehandle import message_handlers
import os
import ast
import random
import textwrap
import discord
from dotenv import load_dotenv
load_dotenv()
TOKEN = os.getenv("DISCORD_TOKEN")
client = discord.Client()
@client.event
async def on_ready():
print(f"{client.user.name} has connected to Discord!")
@client.event
async def on_member_join(member):
await member.create_dm()
await member.dm_channel.send(f"Hi {member.name}, welcome to my Discord server!")
@client.event
async def on_group_join(channel, user):
await channel.send(f"{user} is here! :man_detective:")
@client.event
async def on_group_remove(channel, user):
await channel.send(f"{user} is out! :man_detective:")
#@client.event
#async def on_typing(channel: discord.abc.Messageable, user, when):
# if random.random() < 0.01:
# await channel.send(f"{user} is typing something :rolling_eyes:")
@client.event
async def on_message(message):
if message.author == client.user:
return
incoming = message.content
if incoming.startswith("bytie shutdown!"):
await message.channel.send("Goodbye cruel world!")
exit()
for handler in message_handlers:
msg = handler['handler'](incoming)
if not msg:
continue
for part in textwrap.wrap(
msg,
1300,
drop_whitespace=False,
replace_whitespace=False
):
await message.channel.send(part)
client.run(TOKEN)
|
[] |
[] |
[
"DISCORD_TOKEN"
] |
[]
|
["DISCORD_TOKEN"]
|
python
| 1 | 0 | |
manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "allfeed.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
conans/test/unittests/client/build/cmake_test.py
|
import os
import platform
import shutil
import stat
import sys
import unittest
import mock
import six
from parameterized.parameterized import parameterized
from conans.client import tools
from conans.client.build.cmake import CMake
from conans.client.build.cmake_flags import cmake_in_local_cache_var_name
from conans.client.conf import default_settings_yml
from conans.client.tools.oss import cpu_count
from conans.errors import ConanException
from conans.model.build_info import CppInfo, DepsCppInfo
from conans.model.ref import ConanFileReference
from conans.model.settings import Settings
from conans.test.utils.conanfile import ConanFileMock, MockSettings
from conans.test.utils.test_files import temp_folder
from conans.util.files import load, save
def _format_path_as_cmake(pathstr):
if platform.system() == "Windows":
drive, path = os.path.splitdrive(pathstr)
return drive.upper() + path.replace(os.path.sep, "/")
return pathstr
class CMakeTest(unittest.TestCase):
def setUp(self):
self.tempdir = temp_folder(path_with_spaces=False)
self.tempdir2 = temp_folder(path_with_spaces=False)
def tearDown(self):
shutil.rmtree(self.tempdir)
shutil.rmtree(self.tempdir2)
def config_patch_test(self):
conanfile = ConanFileMock()
conanfile.name = "MyPkg"
conanfile.settings = Settings()
conanfile.source_folder = os.path.join(self.tempdir, "src")
conanfile.build_folder = os.path.join(self.tempdir, "build")
conanfile.package_folder = os.path.join(self.tempdir, "pkg")
conanfile.deps_cpp_info = DepsCppInfo()
msg = "FOLDER: " + _format_path_as_cmake(conanfile.package_folder)
for folder in (conanfile.build_folder, conanfile.package_folder):
save(os.path.join(folder, "file1.cmake"), "Nothing")
save(os.path.join(folder, "file2"), msg)
save(os.path.join(folder, "file3.txt"), msg)
save(os.path.join(folder, "file3.cmake"), msg)
save(os.path.join(folder, "sub", "file3.cmake"), msg)
cmake = CMake(conanfile, generator="Unix Makefiles")
cmake.patch_config_paths()
for folder in (conanfile.build_folder, conanfile.package_folder):
self.assertEqual("Nothing", load(os.path.join(folder, "file1.cmake")))
self.assertEqual(msg, load(os.path.join(folder, "file2")))
self.assertEqual(msg, load(os.path.join(folder, "file3.txt")))
self.assertEqual("FOLDER: ${CONAN_MYPKG_ROOT}",
load(os.path.join(folder, "file3.cmake")))
self.assertEqual("FOLDER: ${CONAN_MYPKG_ROOT}",
load(os.path.join(folder, "sub", "file3.cmake")))
def config_patch_deps_test(self):
conanfile = ConanFileMock()
conanfile.name = "MyPkg"
conanfile.settings = Settings()
conanfile.source_folder = os.path.join(self.tempdir, "src")
conanfile.build_folder = os.path.join(self.tempdir, "build")
conanfile.package_folder = os.path.join(self.tempdir, "pkg")
conanfile.deps_cpp_info = DepsCppInfo()
ref = ConanFileReference.loads("MyPkg1/0.1@user/channel")
cpp_info = CppInfo(self.tempdir2)
conanfile.deps_cpp_info.update(cpp_info, ref.name)
self.tempdir = temp_folder(path_with_spaces=False)
self.assertEqual(list(conanfile.deps_cpp_info.deps), ['MyPkg1'])
self.assertEqual(conanfile.deps_cpp_info['MyPkg1'].rootpath,
self.tempdir2)
msg = "FOLDER: " + _format_path_as_cmake(self.tempdir2)
for folder in (conanfile.build_folder, conanfile.package_folder):
save(os.path.join(folder, "file1.cmake"), "Nothing")
save(os.path.join(folder, "file2"), msg)
save(os.path.join(folder, "file3.txt"), msg)
save(os.path.join(folder, "file3.cmake"), msg)
save(os.path.join(folder, "sub", "file3.cmake"), msg)
cmake = CMake(conanfile, generator="Unix Makefiles")
cmake.patch_config_paths()
for folder in (conanfile.build_folder, conanfile.package_folder):
self.assertEqual("Nothing", load(os.path.join(folder, "file1.cmake")))
self.assertEqual(msg, load(os.path.join(folder, "file2")))
self.assertEqual(msg, load(os.path.join(folder, "file3.txt")))
self.assertEqual("FOLDER: ${CONAN_MYPKG1_ROOT}",
load(os.path.join(folder, "file3.cmake")))
self.assertEqual("FOLDER: ${CONAN_MYPKG1_ROOT}",
load(os.path.join(folder, "sub", "file3.cmake")))
def partial_build_test(self):
conanfile = ConanFileMock()
conanfile.settings = Settings()
conanfile.should_configure = False
conanfile.should_build = False
conanfile.should_install = False
conanfile.should_test = False
cmake = CMake(conanfile, generator="Unix Makefiles")
cmake.configure()
self.assertIsNone(conanfile.command)
cmake.build()
self.assertIsNone(conanfile.command)
cmake.install()
self.assertIsNone(conanfile.command)
conanfile.name = None
cmake.patch_config_paths()
cmake.test()
self.assertIsNone(conanfile.command)
def should_flags_test(self):
conanfile = ConanFileMock()
conanfile.settings = Settings()
conanfile.should_configure = False
conanfile.should_build = True
conanfile.should_install = False
conanfile.should_test = True
conanfile.package_folder = temp_folder()
cmake = CMake(conanfile, generator="Unix Makefiles")
cmake.configure()
self.assertIsNone(conanfile.command)
cmake.build()
self.assertIn("cmake --build %s" %
CMakeTest.scape(". -- -j%i" % cpu_count(output=conanfile.output)),
conanfile.command)
cmake.install()
self.assertNotIn("cmake --build %s" %
CMakeTest.scape(". --target install -- -j%i" %
cpu_count(output=conanfile.output)), conanfile.command)
cmake.test()
self.assertIn("cmake --build %s" %
CMakeTest.scape(". --target test -- -j%i" %
cpu_count(output=conanfile.output)),
conanfile.command)
conanfile.should_build = False
cmake.configure()
self.assertNotIn("cd . && cmake", conanfile.command)
cmake.build()
self.assertNotIn("cmake --build %s" %
CMakeTest.scape(". -- -j%i" % cpu_count(output=conanfile.output)),
conanfile.command)
cmake.install()
self.assertNotIn("cmake --build %s" %
CMakeTest.scape(". --target install -- -j%i" %
cpu_count(output=conanfile.output)), conanfile.command)
cmake.test()
self.assertIn("cmake --build %s" %
CMakeTest.scape(". --target test -- -j%i" %
cpu_count(output=conanfile.output)),
conanfile.command)
conanfile.should_install = True
conanfile.should_test = False
cmake.configure()
self.assertNotIn("cd . && cmake", conanfile.command)
cmake.build()
self.assertNotIn("cmake --build %s" %
CMakeTest.scape(". -- -j%i" % cpu_count(output=conanfile.output)),
conanfile.command)
cmake.install()
self.assertIn("cmake --build %s" %
CMakeTest.scape(". --target install -- -j%i" %
cpu_count(output=conanfile.output)), conanfile.command)
cmake.test()
self.assertNotIn("cmake --build %s" %
CMakeTest.scape(". --target test -- -j%i" %
cpu_count(output=conanfile.output)), conanfile.command)
def cmake_generator_test(self):
conanfile = ConanFileMock()
conanfile.settings = Settings()
with tools.environment_append({"CONAN_CMAKE_GENERATOR": "My CMake Generator"}):
cmake = CMake(conanfile)
self.assertIn('-G "My CMake Generator"', cmake.command_line)
def cmake_generator_platform_test(self):
conanfile = ConanFileMock()
conanfile.settings = Settings()
with tools.environment_append({"CONAN_CMAKE_GENERATOR": "Green Hills MULTI",
"CONAN_CMAKE_GENERATOR_PLATFORM": "My CMake Platform"}):
cmake = CMake(conanfile)
self.assertIn('-G "Green Hills MULTI" -A "My CMake Platform"', cmake.command_line)
def cmake_generator_platform_override_test(self):
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "15"
settings.arch = "x86"
conanfile = ConanFileMock()
conanfile.settings = settings
with tools.environment_append({"CONAN_CMAKE_GENERATOR_PLATFORM": "Win64"}):
cmake = CMake(conanfile)
self.assertIn('-G "Visual Studio 15 2017" -A "Win64"', cmake.command_line)
def cmake_generator_platform_gcc_test(self):
settings = Settings.loads(default_settings_yml)
settings.os = "Linux"
settings.os_build = "Linux"
settings.compiler = "gcc"
settings.compiler.version = "8"
settings.compiler.libcxx = "libstdc++"
settings.arch = "x86"
conanfile = ConanFileMock()
conanfile.settings = settings
cmake = CMake(conanfile)
self.assertIn('-G "Unix Makefiles"', cmake.command_line)
self.assertNotIn('-A', cmake.command_line)
@parameterized.expand([('x86', 'Visual Studio 15 2017'),
('x86_64', 'Visual Studio 15 2017 Win64'),
('armv7', 'Visual Studio 15 2017 ARM')])
def cmake_generator_platform_vs2017_test(self, arch, generator):
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "15"
settings.arch = arch
conanfile = ConanFileMock()
conanfile.settings = settings
cmake = CMake(conanfile)
self.assertIn('-G "%s"' % generator, cmake.command_line)
self.assertNotIn('-A', cmake.command_line)
@parameterized.expand([('x86', 'Win32'),
('x86_64', 'x64'),
('armv7', 'ARM'),
('armv8', 'ARM64')])
def cmake_generator_platform_vs2019_test(self, arch, pf):
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "16"
settings.arch = arch
conanfile = ConanFileMock()
conanfile.settings = settings
cmake = CMake(conanfile)
self.assertIn('-G "Visual Studio 16 2019" -A "%s"' % pf, cmake.command_line)
def cmake_generator_platform_vs2019_with_ninja_test(self):
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "16"
settings.arch = "x86_64"
conanfile = ConanFileMock()
conanfile.settings = settings
cmake = CMake(conanfile, generator="Ninja")
self.assertIn('-G "Ninja"', cmake.command_line)
self.assertNotIn("-A", cmake.command_line)
@parameterized.expand([('arm',),
('ppc',),
('86',)])
def cmake_generator_platform_other_test(self, platform):
conanfile = ConanFileMock()
conanfile.settings = Settings()
with tools.environment_append({"CONAN_CMAKE_GENERATOR": "Green Hills MULTI",
"CONAN_CMAKE_GENERATOR_PLATFORM": platform}):
cmake = CMake(conanfile)
self.assertIn('-G "Green Hills MULTI" -A "%s"' % platform, cmake.command_line)
@parameterized.expand([('Ninja',),
('NMake Makefiles',),
('NMake Makefiles JOM',)
])
def test_generator_platform_with_unsupported_generator(self, generator):
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "15"
settings.arch = "x86"
settings.compiler.toolset = "v140"
conanfile = ConanFileMock()
conanfile.settings = settings
with self.assertRaises(ConanException):
cmake = CMake(conanfile, generator=generator, generator_platform="x64")
cmake.command_line
def cmake_fpic_test(self):
settings = Settings.loads(default_settings_yml)
settings.os = "Linux"
settings.compiler = "gcc"
settings.compiler.version = "6.3"
settings.arch = "x86"
def assert_fpic(the_settings, input_shared, input_fpic, expected_option):
options = []
values = {}
if input_shared is not None:
options.append('"shared": [True, False]')
values["shared"] = input_shared
if input_fpic is not None:
options.append('"fPIC": [True, False]')
values["fPIC"] = input_fpic
conanfile = ConanFileMock(options='{%s}' % ", ".join(options),
options_values=values)
conanfile.settings = the_settings
cmake = CMake(conanfile)
cmake.configure()
if expected_option is not None:
self.assertEqual(cmake.definitions["CONAN_CMAKE_POSITION_INDEPENDENT_CODE"],
expected_option)
else:
self.assertNotIn("CONAN_CMAKE_POSITION_INDEPENDENT_CODE", cmake.definitions)
# Test shared=False and fpic=False
assert_fpic(settings, input_shared=False, input_fpic=False, expected_option="OFF")
# Test shared=True and fpic=False
assert_fpic(settings, input_shared=True, input_fpic=False, expected_option="ON")
# Test shared=True and fpic=True
assert_fpic(settings, input_shared=True, input_fpic=True, expected_option="ON")
# Test shared not defined and fpic=True
assert_fpic(settings, input_shared=None, input_fpic=True, expected_option="ON")
# Test shared not defined and fpic not defined
assert_fpic(settings, input_shared=None, input_fpic=None, expected_option=None)
# Test shared True and fpic not defined
assert_fpic(settings, input_shared=True, input_fpic=None, expected_option=None)
# Test nothing in Windows
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "15"
settings.arch = "x86_64"
assert_fpic(settings, input_shared=True, input_fpic=True, expected_option=None)
def cmake_make_program_test(self):
settings = Settings.loads(default_settings_yml)
settings.os = "Linux"
settings.compiler = "gcc"
settings.compiler.version = "6.3"
settings.arch = "x86"
settings.build_type = "Release"
conanfile = ConanFileMock()
conanfile.settings = settings
conanfile.source_folder = os.path.join(self.tempdir, "my_cache_source_folder")
conanfile.build_folder = os.path.join(self.tempdir, "my_cache_build_folder")
# Existing make
make_path = os.path.join(self.tempdir, "make")
save(make_path, "")
st = os.stat(make_path)
os.chmod(make_path, st.st_mode | stat.S_IEXEC)
with tools.environment_append({"CONAN_MAKE_PROGRAM": make_path}):
cmake = CMake(conanfile)
self.assertEqual(cmake.definitions["CMAKE_MAKE_PROGRAM"], make_path)
# Not existing make
with tools.environment_append({"CONAN_MAKE_PROGRAM": "fake_path/make"}):
cmake = CMake(conanfile)
self.assertNotIn("CMAKE_MAKE_PROGRAM", cmake.definitions)
self.assertIn("The specified make program 'fake_path/make' cannot be found",
conanfile.output)
def folders_test(self):
def quote_var(var):
return "'%s'" % var if platform.system() != "Windows" else var
settings = Settings.loads(default_settings_yml)
settings.os = "Linux"
settings.compiler = "gcc"
settings.compiler.version = "6.3"
settings.arch = "x86"
settings.build_type = "Release"
conanfile = ConanFileMock()
conanfile.settings = settings
conanfile.source_folder = os.path.join(self.tempdir, "my_cache_source_folder")
conanfile.build_folder = os.path.join(self.tempdir, "my_cache_build_folder")
with tools.chdir(self.tempdir):
linux_stuff = '-DCMAKE_SYSTEM_NAME="Linux" ' \
'-DCMAKE_SYSROOT="/path/to/sysroot" ' \
if platform.system() != "Linux" else ""
generator = "MinGW Makefiles" if platform.system() == "Windows" else "Unix Makefiles"
flags = '-DCONAN_EXPORTED="1"{} -DCONAN_COMPILER="gcc" ' \
'-DCONAN_COMPILER_VERSION="6.3" ' \
'-DCONAN_CXX_FLAGS="-m32" -DCONAN_SHARED_LINKER_FLAGS="-m32" ' \
'-DCONAN_C_FLAGS="-m32" -DCMAKE_EXPORT_NO_PACKAGE_REGISTRY="ON"'
flags_in_local_cache = flags.format(' -D' + cmake_in_local_cache_var_name + '="ON"')
flags_no_local_cache = flags.format(' -D' + cmake_in_local_cache_var_name + '="OFF"')
base_cmd = 'cmake -G "{generator}" -DCMAKE_BUILD_TYPE="Release" {linux_stuff}' \
'{{flags}} -Wno-dev'
base_cmd = base_cmd.format(generator=generator, linux_stuff=linux_stuff)
full_cmd = "cd {build_expected} && {base_cmd} {source_expected}"
build_expected = quote_var("build")
source_expected = quote_var("../subdir")
cmake = CMake(conanfile)
cmake.configure(source_dir="../subdir", build_dir="build")
self.assertEqual(conanfile.command,
full_cmd.format(build_expected=build_expected,
source_expected=source_expected,
base_cmd=base_cmd.format(flags=flags_no_local_cache)))
cmake = CMake(conanfile)
cmake.configure(build_dir="build")
build_expected = quote_var("build")
source_expected = quote_var(os.path.join(self.tempdir, "my_cache_source_folder"))
self.assertEqual(conanfile.command,
full_cmd.format(build_expected=build_expected,
source_expected=source_expected,
base_cmd=base_cmd.format(flags=flags_no_local_cache)))
cmake = CMake(conanfile)
cmake.configure()
build_expected = quote_var(os.path.join(self.tempdir, "my_cache_build_folder"))
source_expected = quote_var(os.path.join(self.tempdir, "my_cache_source_folder"))
self.assertEqual(conanfile.command,
full_cmd.format(build_expected=build_expected,
source_expected=source_expected,
base_cmd=base_cmd.format(flags=flags_no_local_cache)))
cmake = CMake(conanfile)
cmake.configure(source_folder="source", build_folder="build")
build_expected = quote_var(os.path.join(os.path.join(self.tempdir,
"my_cache_build_folder", "build")))
source_expected = quote_var(os.path.join(os.path.join(self.tempdir,
"my_cache_source_folder",
"source")))
self.assertEqual(conanfile.command,
full_cmd.format(build_expected=build_expected,
source_expected=source_expected,
base_cmd=base_cmd.format(flags=flags_no_local_cache)))
conanfile.in_local_cache = True
cmake = CMake(conanfile)
cmake.configure(source_folder="source", build_folder="build",
cache_build_folder="rel_only_cache")
build_expected = quote_var(os.path.join(self.tempdir, "my_cache_build_folder",
"rel_only_cache"))
source_expected = quote_var(os.path.join(self.tempdir, "my_cache_source_folder",
"source"))
self.assertEqual(conanfile.command,
full_cmd.format(build_expected=build_expected,
source_expected=source_expected,
base_cmd=base_cmd.format(flags=flags_in_local_cache)))
conanfile.in_local_cache = False
cmake = CMake(conanfile)
cmake.configure(source_folder="source", build_folder="build",
cache_build_folder="rel_only_cache")
build_expected = quote_var(os.path.join(self.tempdir, "my_cache_build_folder", "build"))
source_expected = quote_var(os.path.join(self.tempdir, "my_cache_source_folder",
"source"))
self.assertEqual(conanfile.command,
full_cmd.format(build_expected=build_expected,
source_expected=source_expected,
base_cmd=base_cmd.format(flags=flags_no_local_cache)))
conanfile.in_local_cache = True
cmake = CMake(conanfile)
cmake.configure(build_dir="build", cache_build_folder="rel_only_cache")
build_expected = quote_var(os.path.join(self.tempdir, "my_cache_build_folder",
"rel_only_cache"))
source_expected = quote_var(os.path.join(self.tempdir, "my_cache_source_folder"))
self.assertEqual(conanfile.command, full_cmd.format(build_expected=build_expected,
source_expected=source_expected,
base_cmd=base_cmd.format(
flags=flags_in_local_cache)))
# Raise mixing
with six.assertRaisesRegex(self, ConanException, "Use 'build_folder'/'source_folder'"):
cmake = CMake(conanfile)
cmake.configure(source_folder="source", build_dir="build")
def build_type_force_test(self):
# 1: No multi-config generator
settings = Settings.loads(default_settings_yml)
settings.os = "Linux"
settings.compiler = "gcc"
settings.compiler.version = "6.3"
settings.arch = "x86"
settings.build_type = "Release"
conanfile = ConanFileMock()
conanfile.settings = settings
# 2: build_type from settings
cmake = CMake(conanfile)
self.assertNotIn('WARN: Forced CMake build type ', conanfile.output)
self.assertEqual(cmake.build_type, "Release")
# 2: build_type from attribute
cmake.build_type = "Debug"
expected_output = "WARN: Forced CMake build type ('Debug') different from the settings " \
"build type ('Release')"
self.assertIn(expected_output, conanfile.output)
self.assertEqual(cmake.build_type, "Debug")
self.assertIn('-DCMAKE_BUILD_TYPE="Debug"', cmake.command_line)
# 2: build_type from constructor
cmake = CMake(conanfile, build_type="Debug")
expected_output = "WARN: Forced CMake build type ('Debug') different from the settings " \
"build type ('Release')"
self.assertIn(expected_output, conanfile.output)
self.assertEqual(cmake.build_type, "Debug")
self.assertIn('-DCMAKE_BUILD_TYPE="Debug"', cmake.command_line)
# 1: Multi-config generator
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "15"
settings.arch = "x86"
settings.build_type = "Release"
conanfile = ConanFileMock()
conanfile.settings = settings
# 2: build_type from settings
cmake = CMake(conanfile)
self.assertNotIn('-DCMAKE_BUILD_TYPE="Release"', cmake.command_line)
self.assertIn("--config Release", cmake.build_config)
# 2: build_type from attribute
cmake.build_type = "Debug"
self.assertIn(expected_output, conanfile.output)
self.assertEqual(cmake.build_type, "Debug")
self.assertNotIn('-DCMAKE_BUILD_TYPE="Debug"', cmake.command_line)
self.assertIn("--config Debug", cmake.build_config)
# 2: build_type from constructor
cmake = CMake(conanfile, build_type="Debug")
self.assertIn(expected_output, conanfile.output)
self.assertEqual(cmake.build_type, "Debug")
self.assertNotIn('-DCMAKE_BUILD_TYPE="Debug"', cmake.command_line)
self.assertIn("--config Debug", cmake.build_config)
def loads_default_test(self):
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "12"
settings.arch = "x86"
conanfile = ConanFileMock()
conanfile.settings = settings
def check(text, build_config, generator=None, set_cmake_flags=False):
os = str(settings.os)
os_ver = str(settings.os.version) if settings.get_safe('os.version') else None
for cmake_system_name in (True, False):
cross_ver = ("-DCMAKE_SYSTEM_VERSION=\"%s\" " % os_ver) if os_ver else ""
cross = ("-DCMAKE_SYSTEM_NAME=\"%s\" %s-DCMAKE_SYSROOT=\"/path/to/sysroot\" "
% ({"Macos": "Darwin"}.get(os, os), cross_ver)
if (platform.system() != os and cmake_system_name) else "")
cmake = CMake(conanfile, generator=generator, cmake_system_name=cmake_system_name,
set_cmake_flags=set_cmake_flags)
new_text = text.replace("-DCONAN_EXPORTED", "%s-DCONAN_EXPORTED" % cross)
if "Visual Studio" in text:
cores = ('-DCONAN_CXX_FLAGS="/MP{0}" '
'-DCONAN_C_FLAGS="/MP{0}" '.format(tools.cpu_count(conanfile.output)))
new_text = new_text.replace('-DCMAKE_EXPORT_NO_PACKAGE_REGISTRY="ON"',
'%s-DCMAKE_EXPORT_NO_PACKAGE_REGISTRY="ON"' % cores)
self.assertEqual(new_text, cmake.command_line)
self.assertEqual(build_config, cmake.build_config)
check('-G "Visual Studio 12 2013" -DCONAN_EXPORTED="1" -DCONAN_IN_LOCAL_CACHE="OFF" '
'-DCONAN_COMPILER="Visual Studio" -DCONAN_COMPILER_VERSION="12" '
'-DCMAKE_EXPORT_NO_PACKAGE_REGISTRY="ON" -Wno-dev',
"")
check('-G "Custom Generator" -DCONAN_EXPORTED="1" -DCONAN_IN_LOCAL_CACHE="OFF" '
'-DCONAN_COMPILER="Visual Studio" -DCONAN_COMPILER_VERSION="12" '
'-DCMAKE_EXPORT_NO_PACKAGE_REGISTRY="ON" -Wno-dev',
'', generator="Custom Generator")
check('-G "Custom Generator" -DCONAN_EXPORTED="1" -DCONAN_IN_LOCAL_CACHE="OFF" '
'-DCONAN_COMPILER="Visual Studio" -DCONAN_COMPILER_VERSION="12" '
'-DCMAKE_EXPORT_NO_PACKAGE_REGISTRY="ON" -Wno-dev',
'', generator="Custom Generator", set_cmake_flags=True)
settings.build_type = "Debug"
check('-G "Visual Studio 12 2013" -DCONAN_EXPORTED="1" -DCONAN_IN_LOCAL_CACHE="OFF" '
'-DCONAN_COMPILER="Visual Studio" -DCONAN_COMPILER_VERSION="12" '
'-DCMAKE_EXPORT_NO_PACKAGE_REGISTRY="ON" -Wno-dev',
'--config Debug')
settings.arch = "x86_64"
check('-G "Visual Studio 12 2013 Win64" -DCONAN_EXPORTED="1" -DCONAN_IN_LOCAL_CACHE="OFF" '
'-DCONAN_COMPILER="Visual Studio" -DCONAN_COMPILER_VERSION="12" '
'-DCMAKE_EXPORT_NO_PACKAGE_REGISTRY="ON" -Wno-dev',
'--config Debug')
settings.compiler = "gcc"
settings.compiler.version = "4.8"
generator = "MinGW Makefiles" if platform.system() == "Windows" else "Unix Makefiles"
check('-G "%s" -DCMAKE_BUILD_TYPE="Debug" -DCONAN_EXPORTED="1" -DCONAN_IN_LOCAL_CACHE="OFF" '
'-DCONAN_COMPILER="gcc" -DCONAN_COMPILER_VERSION="4.8" -DCONAN_CXX_FLAGS="-m64" '
'-DCONAN_SHARED_LINKER_FLAGS="-m64" -DCONAN_C_FLAGS="-m64" '
'-DCMAKE_EXPORT_NO_PACKAGE_REGISTRY="ON" -Wno-dev' % generator, "")
settings.os = "Linux"
settings.arch = "x86"
check('-G "%s" -DCMAKE_BUILD_TYPE="Debug"'
' -DCONAN_EXPORTED="1" -DCONAN_IN_LOCAL_CACHE="OFF" -DCONAN_COMPILER="gcc" '
'-DCONAN_COMPILER_VERSION="4.8" -DCONAN_CXX_FLAGS="-m32" '
'-DCONAN_SHARED_LINKER_FLAGS="-m32" -DCONAN_C_FLAGS="-m32" '
'-DCMAKE_EXPORT_NO_PACKAGE_REGISTRY="ON" -Wno-dev' % generator,
"")
settings.arch = "x86_64"
check('-G "%s" -DCMAKE_BUILD_TYPE="Debug"'
' -DCONAN_EXPORTED="1" -DCONAN_IN_LOCAL_CACHE="OFF" -DCONAN_COMPILER="gcc" '
'-DCONAN_COMPILER_VERSION="4.8" -DCONAN_CXX_FLAGS="-m64" '
'-DCONAN_SHARED_LINKER_FLAGS="-m64" -DCONAN_C_FLAGS="-m64" '
'-DCMAKE_EXPORT_NO_PACKAGE_REGISTRY="ON" -Wno-dev' % generator,
"")
check('-G "%s" -DCMAKE_BUILD_TYPE="Debug"'
' -DCONAN_EXPORTED="1" -DCONAN_IN_LOCAL_CACHE="OFF" -DCONAN_COMPILER="gcc" '
'-DCONAN_COMPILER_VERSION="4.8" -DCONAN_CXX_FLAGS="-m64" '
'-DCONAN_SHARED_LINKER_FLAGS="-m64" -DCONAN_C_FLAGS="-m64" '
'-DCMAKE_CXX_FLAGS="-m64" -DCMAKE_SHARED_LINKER_FLAGS="-m64" -DCMAKE_C_FLAGS="-m64" '
'-DCMAKE_EXPORT_NO_PACKAGE_REGISTRY="ON" '
'-Wno-dev' % generator,
"", set_cmake_flags=True)
settings.os = "FreeBSD"
settings.compiler = "clang"
settings.compiler.version = "3.8"
settings.arch = "x86"
check('-G "%s" -DCMAKE_BUILD_TYPE="Debug"'
' -DCONAN_EXPORTED="1" -DCONAN_IN_LOCAL_CACHE="OFF" -DCONAN_COMPILER="clang" '
'-DCONAN_COMPILER_VERSION="3.8" -DCONAN_CXX_FLAGS="-m32" '
'-DCONAN_SHARED_LINKER_FLAGS="-m32" -DCONAN_C_FLAGS="-m32" '
'-DCMAKE_EXPORT_NO_PACKAGE_REGISTRY="ON" -Wno-dev' % generator,
"")
settings.arch = "x86_64"
check('-G "%s" -DCMAKE_BUILD_TYPE="Debug"'
' -DCONAN_EXPORTED="1" -DCONAN_IN_LOCAL_CACHE="OFF" -DCONAN_COMPILER="clang" '
'-DCONAN_COMPILER_VERSION="3.8" -DCONAN_CXX_FLAGS="-m64" '
'-DCONAN_SHARED_LINKER_FLAGS="-m64" -DCONAN_C_FLAGS="-m64" '
'-DCMAKE_EXPORT_NO_PACKAGE_REGISTRY="ON" -Wno-dev' % generator,
"")
settings.os = "SunOS"
settings.compiler = "sun-cc"
settings.compiler.version = "5.10"
settings.arch = "x86"
check('-G "%s" -DCMAKE_BUILD_TYPE="Debug"'
' -DCONAN_EXPORTED="1" -DCONAN_IN_LOCAL_CACHE="OFF" -DCONAN_COMPILER="sun-cc" '
'-DCONAN_COMPILER_VERSION="5.10" -DCONAN_CXX_FLAGS="-m32" '
'-DCONAN_SHARED_LINKER_FLAGS="-m32" -DCONAN_C_FLAGS="-m32" '
'-DCMAKE_EXPORT_NO_PACKAGE_REGISTRY="ON" -Wno-dev' % generator,
"")
settings.arch = "x86_64"
check('-G "%s" -DCMAKE_BUILD_TYPE="Debug"'
' -DCONAN_EXPORTED="1" -DCONAN_IN_LOCAL_CACHE="OFF" -DCONAN_COMPILER="sun-cc" '
'-DCONAN_COMPILER_VERSION="5.10" -DCONAN_CXX_FLAGS="-m64" '
'-DCONAN_SHARED_LINKER_FLAGS="-m64" -DCONAN_C_FLAGS="-m64" '
'-DCMAKE_EXPORT_NO_PACKAGE_REGISTRY="ON" -Wno-dev' % generator,
"")
settings.arch = "sparc"
check('-G "%s" -DCMAKE_BUILD_TYPE="Debug" -DCONAN_EXPORTED="1" -DCONAN_IN_LOCAL_CACHE="OFF" '
'-DCONAN_COMPILER="sun-cc" '
'-DCONAN_COMPILER_VERSION="5.10" -DCONAN_CXX_FLAGS="-m32" '
'-DCONAN_SHARED_LINKER_FLAGS="-m32" -DCONAN_C_FLAGS="-m32" '
'-DCMAKE_EXPORT_NO_PACKAGE_REGISTRY="ON" -Wno-dev' % generator,
"")
settings.arch = "sparcv9"
check('-G "%s" -DCMAKE_BUILD_TYPE="Debug" -DCONAN_EXPORTED="1" -DCONAN_IN_LOCAL_CACHE="OFF" '
'-DCONAN_COMPILER="sun-cc" '
'-DCONAN_COMPILER_VERSION="5.10" -DCONAN_CXX_FLAGS="-m64" '
'-DCONAN_SHARED_LINKER_FLAGS="-m64" -DCONAN_C_FLAGS="-m64" '
'-DCMAKE_EXPORT_NO_PACKAGE_REGISTRY="ON" -Wno-dev' % generator,
"")
settings.compiler = "Visual Studio"
settings.compiler.version = "12"
settings.os = "WindowsStore"
settings.os.version = "8.1"
settings.build_type = "Debug"
check('-G "Visual Studio 12 2013" -DCONAN_EXPORTED="1" -DCONAN_IN_LOCAL_CACHE="OFF" '
'-DCONAN_COMPILER="Visual Studio" -DCONAN_COMPILER_VERSION="12" '
'-DCMAKE_EXPORT_NO_PACKAGE_REGISTRY="ON" -Wno-dev',
"--config Debug")
settings.os.version = "10.0"
check('-G "Visual Studio 12 2013" -DCONAN_EXPORTED="1" -DCONAN_IN_LOCAL_CACHE="OFF" '
'-DCONAN_COMPILER="Visual Studio" -DCONAN_COMPILER_VERSION="12" '
'-DCMAKE_EXPORT_NO_PACKAGE_REGISTRY="ON" -Wno-dev',
"--config Debug")
settings.compiler.version = "15"
settings.arch = "armv8"
check('-G "Visual Studio 15 2017 ARM" -DCONAN_EXPORTED="1" -DCONAN_IN_LOCAL_CACHE="OFF" '
'-DCONAN_COMPILER="Visual Studio" -DCONAN_COMPILER_VERSION="15" '
'-DCMAKE_EXPORT_NO_PACKAGE_REGISTRY="ON" -Wno-dev',
"--config Debug")
settings.arch = "x86_64"
check('-G "Visual Studio 15 2017 Win64" -DCONAN_EXPORTED="1" -DCONAN_IN_LOCAL_CACHE="OFF" '
'-DCONAN_COMPILER="Visual Studio" -DCONAN_COMPILER_VERSION="15" '
'-DCMAKE_EXPORT_NO_PACKAGE_REGISTRY="ON" -Wno-dev',
"--config Debug")
settings.compiler = "Visual Studio"
settings.compiler.version = "9"
settings.os = "WindowsCE"
settings.os.platform = "Your platform name (ARMv4I)"
settings.os.version = "7.0"
settings.build_type = "Debug"
check('-G "Visual Studio 9 2008" '
'-A "Your platform name (ARMv4I)" '
'-DCONAN_EXPORTED="1" -DCONAN_IN_LOCAL_CACHE="OFF" '
'-DCONAN_COMPILER="Visual Studio" -DCONAN_COMPILER_VERSION="9" '
'-DCMAKE_EXPORT_NO_PACKAGE_REGISTRY="ON" -Wno-dev',
"--config Debug")
def deleted_os_test(self):
partial_settings = """
os: [Linux]
arch: [x86_64]
compiler:
gcc:
version: ["4.9"]
build_type: [ Release]
"""
settings = Settings.loads(partial_settings)
settings.os = "Linux"
settings.compiler = "gcc"
settings.compiler.version = "4.9"
settings.arch = "x86_64"
conanfile = ConanFileMock()
conanfile.settings = settings
cmake = CMake(conanfile)
generator = "Unix" if platform.system() != "Windows" else "MinGW"
cross = ("-DCMAKE_SYSTEM_NAME=\"Linux\" -DCMAKE_SYSROOT=\"/path/to/sysroot\" "
if platform.system() != "Linux" else "")
self.assertEqual('-G "%s Makefiles" %s-DCONAN_EXPORTED="1" -DCONAN_IN_LOCAL_CACHE="OFF" '
'-DCONAN_COMPILER="gcc" '
'-DCONAN_COMPILER_VERSION="4.9" -DCONAN_CXX_FLAGS="-m64" '
'-DCONAN_SHARED_LINKER_FLAGS="-m64" -DCONAN_C_FLAGS="-m64" '
'-DCMAKE_EXPORT_NO_PACKAGE_REGISTRY="ON" -Wno-dev' % (generator, cross),
cmake.command_line)
def test_sysroot(self):
settings = Settings.loads(default_settings_yml)
conanfile = ConanFileMock()
conanfile.settings = settings
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "12"
settings.arch = "x86"
settings.os = "Windows"
if platform.system() == "Windows":
cmake = CMake(conanfile)
self.assertNotIn("-DCMAKE_SYSROOT=", cmake.flags)
# Now activate cross build and check sysroot and system processor
with(tools.environment_append({"CONAN_CMAKE_SYSTEM_NAME": "Android",
"CONAN_CMAKE_SYSTEM_PROCESSOR": "somevalue"})):
cmake = CMake(conanfile)
self.assertEqual(cmake.definitions["CMAKE_SYSROOT"], "/path/to/sysroot")
self.assertEqual(cmake.definitions["CMAKE_SYSTEM_PROCESSOR"], "somevalue")
def test_deprecated_behaviour(self):
""""Remove when deprecate the old settings parameter to CMake and
conanfile to configure/build/test"""
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
conanfile = ConanFileMock()
conanfile.settings = settings
with self.assertRaises(ConanException):
CMake(settings)
def test_cores_ancient_visual(self):
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "9"
settings.compiler.runtime = "MDd"
settings.arch = "x86"
settings.build_type = None
conanfile = ConanFileMock()
conanfile.settings = settings
cmake = CMake(conanfile)
cmake.build()
self.assertNotIn("/m", conanfile.command)
settings.compiler.version = "10"
cmake = CMake(conanfile)
cmake.build()
self.assertIn("/m", conanfile.command)
def convenient_functions_test(self):
settings = Settings.loads(default_settings_yml)
settings.os = "Android"
settings.os.api_level = 16
settings.os_build = "Windows" # Here we are declaring we are cross building
settings.compiler = "gcc"
settings.compiler.version = "5.4"
settings.arch = "armv7"
settings.build_type = None
if platform.system() == 'Windows':
dot_dir = "."
tempdir = self.tempdir
else:
dot_dir = "'.'"
tempdir = "'" + self.tempdir + "'"
conanfile = ConanFileMock()
conanfile.settings = settings
cmake = CMake(conanfile)
cross = '-DCMAKE_SYSTEM_NAME="Android"' \
' -DCMAKE_SYSTEM_VERSION="{0}"' \
' -DCMAKE_SYSROOT="/path/to/sysroot"' \
' -DCMAKE_ANDROID_ARCH_ABI="armeabi-v7a"' \
' -DANDROID_ABI="armeabi-v7a"' \
' -DANDROID_PLATFORM="android-{0}"' \
' -DANDROID_TOOLCHAIN="{1}"' \
' -DANDROID_STL="none"'.format(settings.os.api_level, settings.compiler)
target_test = CMakeTest.scape('--target test')
cmake.configure()
self.assertEqual('cd {0} && cmake -G "MinGW Makefiles" '
'{1} -DCONAN_EXPORTED="1" -DCONAN_IN_LOCAL_CACHE="OFF"'
' -DCONAN_COMPILER="{2}" -DCONAN_COMPILER_VERSION="{3}"'
' -DCMAKE_EXPORT_NO_PACKAGE_REGISTRY="ON"'
' -Wno-dev {0}'.format(dot_dir, cross, settings.compiler,
settings.compiler.version),
conanfile.command)
cmake.build()
self.assertEqual('cmake --build %s %s' %
(dot_dir, (CMakeTest.scape('-- -j%i' % cpu_count(conanfile.output)))),
conanfile.command)
cmake.test()
self.assertEqual('cmake --build %s %s %s' %
(dot_dir, target_test,
(CMakeTest.scape('-- -j%i' % cpu_count(output=conanfile.output)))),
conanfile.command)
settings.build_type = "Debug"
cmake = CMake(conanfile)
cmake.build()
self.assertEqual('cmake --build %s %s' %
(dot_dir, (CMakeTest.scape('-- -j%i' %
cpu_count(output=conanfile.output)))),
conanfile.command)
cmake.test()
self.assertEqual('cmake --build %s %s %s' %
(dot_dir, target_test,
(CMakeTest.scape('-- -j%i' % cpu_count(output=conanfile.output)))),
conanfile.command)
cmake.configure(source_dir="/source", build_dir=self.tempdir,
args=['--foo "bar"'], defs={"SHARED": True})
if sys.platform == 'win32':
escaped_args = r'"--foo \"bar\"" -DSHARED="True" /source'
else:
escaped_args = "'--foo \"bar\"' -DSHARED=\"True\" '/source'"
self.assertEqual('cd {0} && cmake -G "MinGW Makefiles" -DCMAKE_BUILD_TYPE="Debug" '
'{1} -DCONAN_EXPORTED="1" -DCONAN_IN_LOCAL_CACHE="OFF" '
'-DCONAN_COMPILER="{2}" -DCONAN_COMPILER_VERSION="{3}" '
'-DCMAKE_EXPORT_NO_PACKAGE_REGISTRY="ON" '
'-Wno-dev {4}'.format(tempdir, cross, settings.compiler,
settings.compiler.version, escaped_args),
conanfile.command)
cmake.build(args=["--bar 'foo'"], target="install")
if platform.system() == 'Windows':
escaped_args = '--target install "--bar \'foo\'"'
else:
escaped_args = r"'--target' 'install' '--bar '\''foo'\'''"
self.assertEqual('cmake --build %s %s %s' %
(tempdir, escaped_args,
(CMakeTest.scape('-- -j%i' % cpu_count(output=conanfile.output)))),
conanfile.command)
cmake.test(args=["--bar 'foo'"])
if sys.platform == 'win32':
escaped_args = '%s "--bar \'foo\'"' % target_test
else:
escaped_args = r"%s '--bar '\''foo'\'''" % target_test
self.assertEqual('cmake --build %s %s %s' %
(tempdir, escaped_args,
(CMakeTest.scape('-- -j%i' % cpu_count(output=conanfile.output)))),
conanfile.command)
settings.build_type = "Release"
cmake = CMake(conanfile)
cmake.build()
self.assertEqual('cmake --build %s %s' %
(dot_dir, (CMakeTest.scape('-- -j%i' %
cpu_count(output=conanfile.output)))),
conanfile.command)
cmake.test()
self.assertEqual('cmake --build %s %s %s'
% (dot_dir, target_test,
(CMakeTest.scape('-- -j%i' % cpu_count(output=conanfile.output)))),
conanfile.command)
cmake.build(build_dir=self.tempdir)
self.assertEqual('cmake --build %s %s'
% (tempdir, (CMakeTest.scape('-- -j%i' %
cpu_count(output=conanfile.output)))),
conanfile.command)
cmake.test(build_dir=self.tempdir)
self.assertEqual('cmake --build %s %s %s' %
(tempdir, target_test,
(CMakeTest.scape('-- -j%i' % cpu_count(output=conanfile.output)))),
conanfile.command)
settings.compiler = "gcc"
settings.compiler.version = "5.4"
cmake = CMake(conanfile)
cmake.build()
self.assertEqual('cmake --build %s' %
(CMakeTest.scape('. -- -j%i' % cpu_count(output=conanfile.output))),
conanfile.command)
cmake.test()
self.assertEqual('cmake --build %s' %
(CMakeTest.scape('. --target test -- -j%i' %
cpu_count(output=conanfile.output))),
conanfile.command)
cmake.build(args=['foo', '--', 'bar'])
self.assertEqual('cmake --build %s' %
(CMakeTest.scape('. foo -- bar -j%i' %
cpu_count(output=conanfile.output))),
conanfile.command)
cmake.test(args=['foo', '--', 'bar'])
self.assertEqual('cmake --build %s' %
(CMakeTest.scape('. --target test foo -- bar -j%i' %
cpu_count(output=conanfile.output))),
conanfile.command)
cmake = CMake(conanfile, parallel=False)
cmake.build()
self.assertEqual('cmake --build %s' % CMakeTest.scape('.'), conanfile.command)
cmake.test()
self.assertEqual('cmake --build %s' % CMakeTest.scape('. --target test'),
conanfile.command)
def test_run_tests(self):
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "14"
settings.compiler.runtime = "MDd"
settings.arch = "x86"
settings.build_type = None
conanfile = ConanFileMock()
conanfile.settings = settings
cmake = CMake(conanfile)
cmake.test()
self.assertIn('cmake --build '
'%s' % CMakeTest.scape('. --target RUN_TESTS -- /m:%i' %
cpu_count(output=conanfile.output)),
conanfile.command)
cmake.generator = "Ninja Makefiles"
cmake.test()
self.assertEqual('cmake --build '
'%s' % CMakeTest.scape('. --target test -- -j%i' %
cpu_count(output=conanfile.output)),
conanfile.command)
cmake.generator = "Ninja"
cmake.test()
self.assertEqual('cmake --build '
'%s' % CMakeTest.scape('. --target test -- -j%i' %
cpu_count(output=conanfile.output)),
conanfile.command)
cmake.generator = "NMake Makefiles"
cmake.test()
self.assertEqual('cmake --build '
'%s' % CMakeTest.scape('. --target test'),
conanfile.command)
def test_clean_sh_path(self):
if platform.system() != "Windows":
return
os.environ["PATH"] = os.environ.get("PATH", "") + os.pathsep + self.tempdir
save(os.path.join(self.tempdir, "sh.exe"), "Fake sh")
conanfile = ConanFileMock()
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "12"
settings.arch = "x86"
conanfile.settings = settings
cmake = CMake(conanfile)
cmake.configure()
self.assertIn(self.tempdir, conanfile.path)
cmake.generator = "MinGW Makefiles"
cmake.configure()
self.assertNotIn(self.tempdir, conanfile.path)
# Automatic gcc
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = "gcc"
settings.compiler.version = "5.4"
settings.arch = "x86"
conanfile.settings = settings
cmake = CMake(conanfile)
cmake.configure()
self.assertNotIn(self.tempdir, conanfile.path)
def test_pkg_config_path(self):
conanfile = ConanFileMock()
conanfile.generators = ["pkg_config"]
conanfile.install_folder = "/my_install/folder/"
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "12"
settings.arch = "x86"
conanfile.settings = settings
cmake = CMake(conanfile)
cmake.configure()
self.assertEqual(conanfile.captured_env["PKG_CONFIG_PATH"], "/my_install/folder/")
conanfile.generators = []
cmake = CMake(conanfile)
cmake.configure()
self.assertNotIn("PKG_CONFIG_PATH", conanfile.captured_env)
cmake = CMake(conanfile)
cmake.configure(pkg_config_paths=["reldir1", "/abspath2/to/other"])
self.assertEqual(conanfile.captured_env["PKG_CONFIG_PATH"],
os.path.pathsep.join(["/my_install/folder/reldir1",
"/abspath2/to/other"]))
# If there is already a PKG_CONFIG_PATH do not set it
conanfile.generators = ["pkg_config"]
cmake = CMake(conanfile)
with tools.environment_append({"PKG_CONFIG_PATH": "do_not_mess_with_this"}):
cmake.configure()
self.assertEqual(conanfile.captured_env["PKG_CONFIG_PATH"], "do_not_mess_with_this")
def test_shared(self):
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "12"
settings.arch = "x86"
settings.os = "Windows"
conanfile = ConanFileMock(shared=True)
conanfile.settings = settings
cmake = CMake(conanfile)
self.assertEqual(cmake.definitions["BUILD_SHARED_LIBS"], "ON")
conanfile = ConanFileMock(shared=False)
conanfile.settings = settings
cmake = CMake(conanfile)
self.assertEqual(cmake.definitions["BUILD_SHARED_LIBS"], "OFF")
conanfile = ConanFileMock(shared=None)
conanfile.settings = settings
cmake = CMake(conanfile)
self.assertNotIn("BUILD_SHARED_LIBS", cmake.definitions)
def test_verbose(self):
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "12"
settings.arch = "x86"
conanfile = ConanFileMock()
conanfile.settings = settings
cmake = CMake(conanfile)
self.assertNotIn("CMAKE_VERBOSE_MAKEFILE", cmake.definitions)
cmake.verbose = True
self.assertEqual(cmake.definitions["CMAKE_VERBOSE_MAKEFILE"], "ON")
cmake.verbose = False
self.assertEqual(cmake.definitions["CMAKE_VERBOSE_MAKEFILE"], "OFF")
cmake.definitions["CMAKE_VERBOSE_MAKEFILE"] = True
self.assertTrue(cmake.verbose)
cmake.definitions["CMAKE_VERBOSE_MAKEFILE"] = False
self.assertFalse(cmake.verbose)
del cmake.definitions["CMAKE_VERBOSE_MAKEFILE"]
self.assertFalse(cmake.verbose)
def set_toolset_test(self):
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "15"
settings.arch = "x86"
settings.compiler.toolset = "v140" # Will be overwritten by parameter
conanfile = ConanFileMock()
conanfile.settings = settings
cmake = CMake(conanfile, toolset="v141")
self.assertIn('-T "v141"', cmake.command_line)
# DEPRECATED VARIABLE, NOT MODIFY ANYMORE THE TOOLSET
with tools.environment_append({"CONAN_CMAKE_TOOLSET": "v141"}):
cmake = CMake(conanfile)
self.assertNotIn('-T "v141"', cmake.command_line)
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "15"
settings.arch = "x86"
settings.compiler.toolset = "v140"
conanfile = ConanFileMock()
conanfile.settings = settings
cmake = CMake(conanfile)
self.assertIn('-T "v140"', cmake.command_line)
@parameterized.expand([('Ninja',),
('NMake Makefiles',),
('NMake Makefiles JOM',)
])
def test_toolset_with_unsupported_generator(self, generator):
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "15"
settings.arch = "x86"
settings.compiler.toolset = "v140"
conanfile = ConanFileMock()
conanfile.settings = settings
with self.assertRaises(ConanException):
cmake = CMake(conanfile, generator=generator)
cmake.command_line
def test_missing_settings(self):
def instance_with_os_build(os_build):
settings = Settings.loads(default_settings_yml)
settings.os_build = os_build
conanfile = ConanFileMock()
conanfile.settings = settings
return CMake(conanfile)
cmake = instance_with_os_build("Linux")
self.assertEqual(cmake.generator, "Unix Makefiles")
cmake = instance_with_os_build("Macos")
self.assertEqual(cmake.generator, "Unix Makefiles")
cmake = instance_with_os_build("Windows")
self.assertEqual(cmake.generator, None)
with tools.environment_append({"CONAN_CMAKE_GENERATOR": "MyCoolGenerator"}):
cmake = instance_with_os_build("Windows")
self.assertEqual(cmake.generator, "MyCoolGenerator")
def test_cmake_system_version_android(self):
with tools.environment_append({"CONAN_CMAKE_SYSTEM_NAME": "SomeSystem",
"CONAN_CMAKE_GENERATOR": "SomeGenerator"}):
settings = Settings.loads(default_settings_yml)
settings.os = "WindowsStore"
settings.os.version = "8.1"
conanfile = ConanFileMock()
conanfile.settings = settings
cmake = CMake(conanfile)
self.assertEqual(cmake.definitions["CMAKE_SYSTEM_VERSION"], "8.1")
settings = Settings.loads(default_settings_yml)
settings.os = "Android"
settings.os.api_level = "32"
conanfile = ConanFileMock()
conanfile.settings = settings
cmake = CMake(conanfile)
self.assertEqual(cmake.definitions["CMAKE_SYSTEM_VERSION"], "32")
def install_definitions_test(self):
conanfile = ConanFileMock()
conanfile.package_folder = None
conanfile.settings = Settings.loads(default_settings_yml)
install_defintions = {"CMAKE_INSTALL_PREFIX": conanfile.package_folder,
"CMAKE_INSTALL_BINDIR": "bin",
"CMAKE_INSTALL_SBINDIR": "bin",
"CMAKE_INSTALL_SBINDIR": "bin",
"CMAKE_INSTALL_LIBEXECDIR": "bin",
"CMAKE_INSTALL_LIBDIR": "lib",
"CMAKE_INSTALL_INCLUDEDIR": "include",
"CMAKE_INSTALL_OLDINCLUDEDIR": "include",
"CMAKE_INSTALL_DATAROOTDIR": "share"}
# Without package_folder
cmake = CMake(conanfile)
for key, value in cmake.definitions.items():
self.assertNotIn(key, install_defintions.keys())
# With package_folder
conanfile.package_folder = "my_package_folder"
install_defintions["CMAKE_INSTALL_PREFIX"] = conanfile.package_folder
cmake = CMake(conanfile)
for key, value in install_defintions.items():
self.assertEqual(cmake.definitions[key], value)
@mock.patch('platform.system', return_value="Macos")
def test_cmake_system_version_osx(self, _):
settings = Settings.loads(default_settings_yml)
settings.os = "Macos"
# No version defined
conanfile = ConanFileMock()
conanfile.settings = settings
cmake = CMake(conanfile)
self.assertFalse("CMAKE_OSX_DEPLOYMENT_TARGET" in cmake.definitions)
self.assertFalse("CMAKE_SYSTEM_NAME" in cmake.definitions)
self.assertFalse("CMAKE_SYSTEM_VERSION" in cmake.definitions)
# Version defined using Conan env variable
with tools.environment_append({"CONAN_CMAKE_SYSTEM_VERSION": "23"}):
conanfile = ConanFileMock()
conanfile.settings = settings
cmake = CMake(conanfile)
self.assertEqual(cmake.definitions["CMAKE_SYSTEM_VERSION"], "23")
self.assertEqual(cmake.definitions["CMAKE_OSX_DEPLOYMENT_TARGET"], "23")
# Version defined in settings
settings.os.version = "10.9"
conanfile = ConanFileMock()
conanfile.settings = settings
cmake = CMake(conanfile)
self.assertEqual(cmake.definitions["CMAKE_SYSTEM_VERSION"], "10.9")
self.assertEqual(cmake.definitions["CMAKE_OSX_DEPLOYMENT_TARGET"], "10.9")
# Version defined in settings AND using Conan env variable
with tools.environment_append({"CONAN_CMAKE_SYSTEM_VERSION": "23"}):
conanfile = ConanFileMock()
conanfile.settings = settings
cmake = CMake(conanfile)
self.assertEqual(cmake.definitions["CMAKE_SYSTEM_VERSION"], "23")
self.assertEqual(cmake.definitions["CMAKE_OSX_DEPLOYMENT_TARGET"], "23")
@staticmethod
def scape(args):
pattern = "%s" if sys.platform == "win32" else r"'%s'"
return ' '.join(pattern % i for i in args.split())
@parameterized.expand([('Ninja', 'Visual Studio', 15),
('NMake Makefiles', 'Visual Studio', 15),
('NMake Makefiles JOM', 'Visual Studio', 15),
('Ninja', 'clang', 6.0),
('NMake Makefiles', 'clang', 6.0),
('NMake Makefiles JOM', 'clang', 6.0)
])
def test_vcvars_applied(self, generator, compiler, version):
conanfile = ConanFileMock()
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = compiler
settings.compiler.version = version
conanfile.settings = settings
cmake = CMake(conanfile, generator=generator)
with mock.patch("conans.client.tools.vcvars") as vcvars_mock:
vcvars_mock.__enter__ = mock.MagicMock(return_value=(mock.MagicMock(), None))
vcvars_mock.__exit__ = mock.MagicMock(return_value=None)
cmake.configure()
self.assertTrue(vcvars_mock.called, "vcvars weren't called")
with mock.patch("conans.client.tools.vcvars") as vcvars_mock:
vcvars_mock.__enter__ = mock.MagicMock(return_value=(mock.MagicMock(), None))
vcvars_mock.__exit__ = mock.MagicMock(return_value=None)
cmake.build()
self.assertTrue(vcvars_mock.called, "vcvars weren't called")
def test_cmake_program(self):
conanfile = ConanFileMock()
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "14"
conanfile.settings = settings
cmake = CMake(conanfile, parallel=False)
cmake.build()
self.assertEqual("cmake --build %s" % CMakeTest.scape("."), conanfile.command)
cmake = CMake(conanfile, cmake_program="use_another_cmake", parallel=False)
cmake.build()
self.assertEqual("use_another_cmake --build %s" % CMakeTest.scape("."), conanfile.command)
with tools.environment_append({"CONAN_CMAKE_PROGRAM": "my_custom_cmake"}):
cmake = CMake(conanfile, parallel=False)
cmake.build()
self.assertEqual("my_custom_cmake --build %s" % CMakeTest.scape("."), conanfile.command)
with tools.environment_append({
"CONAN_CMAKE_PROGRAM": "cmake_from_environment_has_priority"
}):
cmake = CMake(conanfile, cmake_program="but_not_cmake_from_the_ctor", parallel=False)
cmake.build()
self.assertEqual("cmake_from_environment_has_priority --build %s" % CMakeTest.scape("."),
conanfile.command)
def test_msbuild_verbosity(self):
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "10"
settings.compiler.runtime = "MDd"
settings.arch = "x86"
settings.build_type = None
conanfile = ConanFileMock()
conanfile.settings = settings
cmake = CMake(conanfile)
cmake.build()
self.assertIn("/verbosity:minimal", conanfile.command)
cmake = CMake(conanfile, msbuild_verbosity="quiet")
cmake.build()
self.assertIn("/verbosity:quiet", conanfile.command)
cmake = CMake(conanfile, msbuild_verbosity=None)
cmake.build()
self.assertNotIn("/verbosity", conanfile.command)
with tools.environment_append({"CONAN_MSBUILD_VERBOSITY": "detailed"}):
cmake = CMake(conanfile)
cmake.build()
self.assertIn("/verbosity:detailed", conanfile.command)
def test_ctest_variables(self):
conanfile = ConanFileMock()
settings = Settings.loads(default_settings_yml)
settings.os = "Windows"
settings.compiler = "Visual Studio"
settings.compiler.version = "14"
conanfile.settings = settings
cmake = CMake(conanfile, parallel=False, generator="NMake Makefiles")
cmake.test()
self.assertEqual(conanfile.captured_env["CTEST_OUTPUT_ON_FAILURE"], "0")
self.assertNotIn("CTEST_PARALLEL_LEVEL", conanfile.captured_env)
with tools.environment_append({"CONAN_CPU_COUNT": "666"}):
cmake = CMake(conanfile, parallel=True, generator="NMake Makefiles")
cmake.test(output_on_failure=True)
self.assertEqual(conanfile.captured_env["CTEST_OUTPUT_ON_FAILURE"], "1")
self.assertEqual(conanfile.captured_env["CTEST_PARALLEL_LEVEL"], "666")
def test_unkown_generator_does_not_raise(self):
# https://github.com/conan-io/conan/issues/4265
settings = MockSettings({"os_build": "Windows", "compiler": "random",
"compiler.version": "15", "build_type": "Release"})
conanfile = ConanFileMock()
conanfile.settings = settings
cmake = CMake(conanfile)
self.assertIsNone(cmake.generator)
self.assertIn("WARN: CMake generator could not be deduced from settings", conanfile.output)
cmake.configure()
cmake.build()
def test_cmake_system_version_windowsce(self):
settings = Settings.loads(default_settings_yml)
settings.os = "WindowsCE"
settings.os.version = "8.0"
conanfile = ConanFileMock()
conanfile.settings = settings
cmake = CMake(conanfile)
self.assertEqual(cmake.definitions["CMAKE_SYSTEM_VERSION"], "8.0")
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
common/keras_utils.py
|
from __future__ import absolute_import
from tensorflow.keras import backend as K
from tensorflow.keras import optimizers
from tensorflow.keras import initializers
from tensorflow.keras.layers import Dropout
from tensorflow.keras.callbacks import Callback
from tensorflow.keras.utils import get_custom_objects
from tensorflow.keras.metrics import binary_crossentropy, mean_squared_error, mean_absolute_error
from scipy.stats.stats import pearsonr
from default_utils import set_seed as set_seed_defaultUtils
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
from sklearn.metrics import r2_score
import os
def set_parallelism_threads():
""" Set the number of parallel threads according to the number available on the hardware
"""
if K.backend() == 'tensorflow' and 'NUM_INTRA_THREADS' in os.environ and 'NUM_INTER_THREADS' in os.environ:
import tensorflow as tf
# print('Using Thread Parallelism: {} NUM_INTRA_THREADS, {} NUM_INTER_THREADS'.format(os.environ['NUM_INTRA_THREADS'], os.environ['NUM_INTER_THREADS']))
session_conf = tf.ConfigProto(inter_op_parallelism_threads=int(os.environ['NUM_INTER_THREADS']),
intra_op_parallelism_threads=int(os.environ['NUM_INTRA_THREADS']))
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
def set_seed(seed):
""" Set the random number seed to the desired value
Parameters
----------
seed : integer
Random number seed.
"""
set_seed_defaultUtils(seed)
if K.backend() == 'tensorflow':
import tensorflow as tf
if tf.__version__ < "2.0.0":
tf.compat.v1.set_random_seed(seed)
else:
tf.random.set_seed(seed)
def get_function(name):
mapping = {}
mapped = mapping.get(name)
if not mapped:
raise Exception('No keras function found for "{}"'.format(name))
return mapped
def build_optimizer(type, lr, kerasDefaults):
""" Set the optimizer to the appropriate Keras optimizer function
based on the input string and learning rate. Other required values
are set to the Keras default values
Parameters
----------
type : string
String to choose the optimizer
Options recognized: 'sgd', 'rmsprop', 'adagrad', adadelta', 'adam'
See the Keras documentation for a full description of the options
lr : float
Learning rate
kerasDefaults : list
List of default parameter values to ensure consistency between frameworks
Returns
----------
The appropriate Keras optimizer function
"""
if type == 'sgd':
return optimizers.SGD(lr=lr, decay=kerasDefaults['decay_lr'],
momentum=kerasDefaults['momentum_sgd'],
nesterov=kerasDefaults['nesterov_sgd']) # ,
# clipnorm=kerasDefaults['clipnorm'],
# clipvalue=kerasDefaults['clipvalue'])
elif type == 'rmsprop':
return optimizers.RMSprop(lr=lr, rho=kerasDefaults['rho'],
epsilon=kerasDefaults['epsilon'],
decay=kerasDefaults['decay_lr']) # ,
# clipnorm=kerasDefaults['clipnorm'],
# clipvalue=kerasDefaults['clipvalue'])
elif type == 'adagrad':
return optimizers.Adagrad(lr=lr,
epsilon=kerasDefaults['epsilon'],
decay=kerasDefaults['decay_lr']) # ,
# clipnorm=kerasDefaults['clipnorm'],
# clipvalue=kerasDefaults['clipvalue'])
elif type == 'adadelta':
return optimizers.Adadelta(lr=lr, rho=kerasDefaults['rho'],
epsilon=kerasDefaults['epsilon'],
decay=kerasDefaults['decay_lr']) # ,
# clipnorm=kerasDefaults['clipnorm'],
# clipvalue=kerasDefaults['clipvalue'])
elif type == 'adam':
return optimizers.Adam(lr=lr, beta_1=kerasDefaults['beta_1'],
beta_2=kerasDefaults['beta_2'],
epsilon=kerasDefaults['epsilon'],
decay=kerasDefaults['decay_lr']) # ,
# clipnorm=kerasDefaults['clipnorm'],
# clipvalue=kerasDefaults['clipvalue'])
# Not generally available
# elif type == 'adamax':
# return optimizers.Adamax(lr=lr, beta_1=kerasDefaults['beta_1'],
# beta_2=kerasDefaults['beta_2'],
# epsilon=kerasDefaults['epsilon'],
# decay=kerasDefaults['decay_lr'])
# elif type == 'nadam':
# return optimizers.Nadam(lr=lr, beta_1=kerasDefaults['beta_1'],
# beta_2=kerasDefaults['beta_2'],
# epsilon=kerasDefaults['epsilon'],
# schedule_decay=kerasDefaults['decay_schedule_lr'])
def build_initializer(type, kerasDefaults, seed=None, constant=0.):
""" Set the initializer to the appropriate Keras initializer function
based on the input string and learning rate. Other required values
are set to the Keras default values
Parameters
----------
type : string
String to choose the initializer
Options recognized: 'constant', 'uniform', 'normal',
'glorot_uniform', 'lecun_uniform', 'he_normal'
See the Keras documentation for a full description of the options
kerasDefaults : list
List of default parameter values to ensure consistency between frameworks
seed : integer
Random number seed
constant : float
Constant value (for the constant initializer only)
Return
----------
The appropriate Keras initializer function
"""
if type == 'constant':
return initializers.Constant(value=constant)
elif type == 'uniform':
return initializers.RandomUniform(minval=kerasDefaults['minval_uniform'],
maxval=kerasDefaults['maxval_uniform'],
seed=seed)
elif type == 'normal':
return initializers.RandomNormal(mean=kerasDefaults['mean_normal'],
stddev=kerasDefaults['stddev_normal'],
seed=seed)
elif type == 'glorot_normal':
# aka Xavier normal initializer. keras default
return initializers.glorot_normal(seed=seed)
elif type == 'glorot_uniform':
return initializers.glorot_uniform(seed=seed)
elif type == 'lecun_uniform':
return initializers.lecun_uniform(seed=seed)
elif type == 'he_normal':
return initializers.he_normal(seed=seed)
def xent(y_true, y_pred):
return binary_crossentropy(y_true, y_pred)
def r2(y_true, y_pred):
SS_res = K.sum(K.square(y_true - y_pred))
SS_tot = K.sum(K.square(y_true - K.mean(y_true)))
return (1 - SS_res / (SS_tot + K.epsilon()))
def mae(y_true, y_pred):
return mean_absolute_error(y_true, y_pred)
def mse(y_true, y_pred):
return mean_squared_error(y_true, y_pred)
def covariance(x, y):
return K.mean(x * y) - K.mean(x) * K.mean(y)
def corr(y_true, y_pred):
cov = covariance(y_true, y_pred)
var1 = covariance(y_true, y_true)
var2 = covariance(y_pred, y_pred)
return cov / (K.sqrt(var1 * var2) + K.epsilon())
def evaluate_autoencoder(y_pred, y_test):
mse = mean_squared_error(y_pred, y_test)
r2 = r2_score(y_test, y_pred)
corr, _ = pearsonr(y_pred.flatten(), y_test.flatten())
# print('Mean squared error: {}%'.format(mse))
return {'mse': mse, 'r2_score': r2, 'correlation': corr}
class PermanentDropout(Dropout):
def __init__(self, rate, **kwargs):
super(PermanentDropout, self).__init__(rate, **kwargs)
self.uses_learning_phase = False
def call(self, x, mask=None):
if 0. < self.rate < 1.:
noise_shape = self._get_noise_shape(x)
x = K.dropout(x, self.rate, noise_shape)
return x
def register_permanent_dropout():
get_custom_objects()['PermanentDropout'] = PermanentDropout
class LoggingCallback(Callback):
def __init__(self, print_fcn=print):
Callback.__init__(self)
self.print_fcn = print_fcn
def on_epoch_end(self, epoch, logs={}):
msg = "[Epoch: %i] %s" % (epoch, ", ".join("%s: %f" % (k, v) for k, v in sorted(logs.items())))
self.print_fcn(msg)
|
[] |
[] |
[
"NUM_INTRA_THREADS",
"NUM_INTER_THREADS"
] |
[]
|
["NUM_INTRA_THREADS", "NUM_INTER_THREADS"]
|
python
| 2 | 0 | |
cmd/root.go
|
// Copyright (c) 2020, Amazon.com, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package cmd ...
package cmd
import (
"context"
"fmt"
"os"
"github.com/awslabs/ssosync/internal"
"github.com/awslabs/ssosync/internal/config"
"github.com/aws/aws-lambda-go/lambda"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/secretsmanager"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
var (
version = "dev"
commit = "none"
date = "unknown"
builtBy = "unknown"
)
var cfg *config.Config
var rootCmd = &cobra.Command{
Version: "dev",
Use: "ssosync",
Short: "SSO Sync, making AWS SSO be populated automagically",
Long: `A command line tool to enable you to synchronise your Google
Apps (Google Workspace) users to AWS Single Sign-on (AWS SSO)
Complete documentation is available at https://github.com/awslabs/ssosync`,
RunE: func(cmd *cobra.Command, args []string) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
err := internal.DoSync(ctx, cfg)
if err != nil {
return err
}
return nil
},
}
// Execute is the entry point of the command. If we are
// running inside of AWS Lambda, we use the Lambda
// execution path.
func Execute() {
if cfg.IsLambda {
lambda.Start(rootCmd.Execute)
}
if err := rootCmd.Execute(); err != nil {
log.Fatal(err)
}
}
func init() {
// init config
cfg = config.New()
cfg.IsLambda = len(os.Getenv("AWS_EXECUTION_ENV")) > 0
// initialize cobra
cobra.OnInitialize(initConfig)
addFlags(rootCmd, cfg)
rootCmd.SetVersionTemplate(fmt.Sprintf("%s, commit %s, built at %s by %s\n", version, commit, date, builtBy))
// silence on the root cmd
rootCmd.SilenceUsage = true
rootCmd.SilenceErrors = true
}
// initConfig reads in config file and ENV variables if set.
func initConfig() {
// allow to read in from environment
viper.SetEnvPrefix("ssosync")
viper.AutomaticEnv()
appEnvVars := []string{
"google_admin",
"google_customer_id",
"google_credentials",
"scim_access_token",
"scim_endpoint",
"log_level",
"log_format",
"ignore_users",
"ignore_groups",
"include_groups",
"user_match",
"group_match",
"sync_method",
}
for _, e := range appEnvVars {
if err := viper.BindEnv(e); err != nil {
log.Fatalf(errors.Wrap(err, "cannot bind environment variable").Error())
}
}
if err := viper.Unmarshal(&cfg); err != nil {
log.Fatalf(errors.Wrap(err, "cannot unmarshal config").Error())
}
// config logger
logConfig(cfg)
if cfg.IsLambda {
configLambda()
}
}
func configLambda() {
s := session.Must(session.NewSession())
svc := secretsmanager.New(s)
secrets := config.NewSecrets(svc)
unwrap, err := secrets.GoogleAdminEmail()
if err != nil {
log.Fatalf(errors.Wrap(err, "cannot read config").Error())
}
cfg.GoogleAdmin = unwrap
unwrap, err = secrets.GoogleCredentials()
if err != nil {
log.Fatalf(errors.Wrap(err, "cannot read config").Error())
}
cfg.GoogleCredentials = unwrap
unwrap, err = secrets.SCIMAccessToken()
if err != nil {
log.Fatalf(errors.Wrap(err, "cannot read config").Error())
}
cfg.SCIMAccessToken = unwrap
unwrap, err = secrets.SCIMEndpointUrl()
if err != nil {
log.Fatalf(errors.Wrap(err, "cannot read config").Error())
}
cfg.SCIMEndpoint = unwrap
}
func addFlags(cmd *cobra.Command, cfg *config.Config) {
rootCmd.PersistentFlags().StringVarP(&cfg.GoogleCredentials, "google-admin", "a", config.DefaultGoogleCredentials, "path to find credentials file for Google Workspace")
rootCmd.PersistentFlags().BoolVarP(&cfg.Debug, "debug", "d", config.DefaultDebug, "enable verbose / debug logging")
rootCmd.PersistentFlags().StringVarP(&cfg.LogFormat, "log-format", "", config.DefaultLogFormat, "log format")
rootCmd.PersistentFlags().StringVarP(&cfg.LogLevel, "log-level", "", config.DefaultLogLevel, "log level")
rootCmd.Flags().StringVarP(&cfg.SCIMAccessToken, "access-token", "t", "", "AWS SSO SCIM API Access Token")
rootCmd.Flags().StringVarP(&cfg.SCIMEndpoint, "endpoint", "e", "", "AWS SSO SCIM API Endpoint")
rootCmd.Flags().StringVarP(&cfg.GoogleCredentials, "google-credentials", "c", config.DefaultGoogleCredentials, "path to Google Workspace credentials file")
rootCmd.Flags().StringVarP(&cfg.GoogleAdmin, "google-admin", "u", "", "Google Workspace admin user email")
rootCmd.Flags().StringVarP(&cfg.GoogleCustomerId, "google-customer-id", "", config.DefaultGoogleCustomerId, "Google Workspace customer id")
rootCmd.Flags().StringSliceVar(&cfg.IgnoreUsers, "ignore-users", []string{}, "ignores these Google Workspace users")
rootCmd.Flags().StringSliceVar(&cfg.IgnoreGroups, "ignore-groups", []string{}, "ignores these Google Workspace groups")
rootCmd.Flags().StringSliceVar(&cfg.IncludeGroups, "include-groups", []string{}, "include only these Google Workspace groups, NOTE: only works when --sync-method 'users_groups'")
rootCmd.Flags().StringSliceVarP(&cfg.UserMatch, "user-match", "m", []string{""}, "Google Workspace Users filter query parameter, example: 'name:John* email:admin*', see: https://developers.google.com/admin-sdk/directory/v1/guides/search-users (You can specify this flag multiple times for OR clause)")
rootCmd.Flags().StringSliceVarP(&cfg.GroupMatch, "group-match", "g", []string{""}, "Google Workspace Groups filter query parameter, example: 'name:Admin* email:aws-*', see: https://developers.google.com/admin-sdk/directory/v1/guides/search-groups (You can specify this flag multiple times for OR clause)")
rootCmd.Flags().StringVarP(&cfg.SyncMethod, "sync-method", "s", config.DefaultSyncMethod, "Sync method to use (users_groups|groups)")
}
func logConfig(cfg *config.Config) {
// reset log format
if cfg.LogFormat == "json" {
log.SetFormatter(&log.JSONFormatter{})
}
if cfg.Debug {
cfg.LogLevel = "debug"
}
// set the configured log level
if level, err := log.ParseLevel(cfg.LogLevel); err == nil {
log.SetLevel(level)
}
}
|
[
"\"AWS_EXECUTION_ENV\""
] |
[] |
[
"AWS_EXECUTION_ENV"
] |
[]
|
["AWS_EXECUTION_ENV"]
|
go
| 1 | 0 | |
tests/common/impala_test_suite.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# The base class that should be used for almost all Impala tests
import grp
import json
import logging
import os
import pprint
import pwd
import pytest
import re
import requests
import shutil
import socket
import subprocess
import tempfile
import time
from functools import wraps
from getpass import getuser
from random import choice
from subprocess import check_call
from tests.common.base_test_suite import BaseTestSuite
from tests.common.errors import Timeout
from tests.common.impala_connection import create_connection
from tests.common.impala_service import ImpaladService
from tests.common.test_dimensions import (
ALL_BATCH_SIZES,
ALL_CLUSTER_SIZES,
ALL_DISABLE_CODEGEN_OPTIONS,
ALL_NODES_ONLY,
TableFormatInfo,
create_exec_option_dimension,
get_dataset_from_workload,
load_table_info_dimension)
from tests.common.test_result_verifier import (
try_compile_regex,
verify_raw_results,
verify_runtime_profile)
from tests.common.test_vector import ImpalaTestDimension
from tests.performance.query import Query
from tests.performance.query_exec_functions import execute_using_jdbc
from tests.performance.query_executor import JdbcQueryExecConfig
from tests.util.filesystem_utils import (
IS_S3,
IS_ABFS,
IS_ADLS,
S3_BUCKET_NAME,
ADLS_STORE_NAME,
FILESYSTEM_PREFIX,
FILESYSTEM_NAME)
from tests.util.hdfs_util import (
HdfsConfig,
get_hdfs_client,
get_hdfs_client_from_conf,
NAMENODE)
from tests.util.s3_util import S3Client
from tests.util.abfs_util import ABFSClient
from tests.util.test_file_parser import (
QueryTestSectionReader,
parse_query_test_file,
write_test_file)
from tests.util.thrift_util import create_transport
# Imports required for Hive Metastore Client
from hive_metastore import ThriftHiveMetastore
from thrift.protocol import TBinaryProtocol
# Initializing the logger before conditional imports, since we will need it
# for them.
LOG = logging.getLogger('impala_test_suite')
# The ADLS python client isn't downloaded when ADLS isn't the target FS, so do a
# conditional import.
if IS_ADLS:
try:
from tests.util.adls_util import ADLSClient
except ImportError:
LOG.error("Need the ADLSClient for testing with ADLS")
IMPALAD_HOST_PORT_LIST = pytest.config.option.impalad.split(',')
assert len(IMPALAD_HOST_PORT_LIST) > 0, 'Must specify at least 1 impalad to target'
IMPALAD = IMPALAD_HOST_PORT_LIST[0]
IMPALAD_BEESWAX_HOST_PORT = IMPALAD_HOST_PORT_LIST[0]
IMPALAD_HS2_HOST_PORT =\
IMPALAD.split(':')[0] + ":" + pytest.config.option.impalad_hs2_port
HIVE_HS2_HOST_PORT = pytest.config.option.hive_server2
WORKLOAD_DIR = os.environ['IMPALA_WORKLOAD_DIR']
HDFS_CONF = HdfsConfig(pytest.config.option.minicluster_xml_conf)
TARGET_FILESYSTEM = os.getenv("TARGET_FILESYSTEM") or "hdfs"
IMPALA_HOME = os.getenv("IMPALA_HOME")
INTERNAL_LISTEN_HOST = os.getenv("INTERNAL_LISTEN_HOST")
# Some tests use the IP instead of the host.
INTERNAL_LISTEN_IP = socket.gethostbyname_ex(INTERNAL_LISTEN_HOST)[2][0]
EE_TEST_LOGS_DIR = os.getenv("IMPALA_EE_TEST_LOGS_DIR")
# Match any SET statement. Assume that query options' names
# only contain alphabets, underscores and digits after position 1.
# The statement may include SQL line comments starting with --, which we need to
# strip out. The test file parser already strips out comments starting with #.
COMMENT_LINES_REGEX = r'(?:\s*--.*\n)*'
SET_PATTERN = re.compile(
COMMENT_LINES_REGEX + r'\s*set\s*([a-zA-Z_][a-zA-Z0-9_]*)\s*=*', re.I)
METRICS_URL = 'http://localhost:25000/metrics?json'
# Base class for Impala tests. All impala test cases should inherit from this class
class ImpalaTestSuite(BaseTestSuite):
@classmethod
def add_test_dimensions(cls):
"""
A hook for adding additional dimensions.
By default load the table_info and exec_option dimensions, but if a test wants to
add more dimensions or different dimensions they can override this function.
"""
super(ImpalaTestSuite, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(
cls.create_table_info_dimension(cls.exploration_strategy()))
cls.ImpalaTestMatrix.add_dimension(cls.__create_exec_option_dimension())
# Execute tests through Beeswax by default. Individual tests that have been converted
# to work with the HS2 client can add HS2 in addition to or instead of beeswax.
cls.ImpalaTestMatrix.add_dimension(ImpalaTestDimension('protocol', 'beeswax'))
@classmethod
def setup_class(cls):
"""Setup section that runs before each test suite"""
cls.hive_client, cls.client, cls.hs2_client = [None, None, None]
# Create a Hive Metastore Client (used for executing some test SETUP steps
metastore_host, metastore_port = pytest.config.option.metastore_server.split(':')
trans_type = 'buffered'
if pytest.config.option.use_kerberos:
trans_type = 'kerberos'
cls.hive_transport = create_transport(
host=metastore_host,
port=metastore_port,
service=pytest.config.option.hive_service_name,
transport_type=trans_type)
protocol = TBinaryProtocol.TBinaryProtocol(cls.hive_transport)
cls.hive_client = ThriftHiveMetastore.Client(protocol)
cls.hive_transport.open()
# Create a connection to Impala, self.client is Beeswax so that existing tests that
# assume beeswax do not need modification (yet).
cls.client = cls.create_impala_client(protocol='beeswax')
try:
cls.hs2_client = cls.create_impala_client(protocol='hs2')
except Exception, e:
# HS2 connection can fail for benign reasons, e.g. running with unsupported auth.
LOG.info("HS2 connection setup failed, continuing...: {0}", e)
# Default query options are populated on demand.
cls.default_query_options = {}
cls.impalad_test_service = cls.create_impala_service()
cls.hdfs_client = cls.create_hdfs_client()
cls.filesystem_client = cls.hdfs_client
if IS_S3:
cls.filesystem_client = S3Client(S3_BUCKET_NAME)
elif IS_ABFS:
cls.filesystem_client = ABFSClient()
elif IS_ADLS:
cls.filesystem_client = ADLSClient(ADLS_STORE_NAME)
# Override the shell history path so that commands run by any tests
# don't write any history into the developer's file.
os.environ['IMPALA_HISTFILE'] = '/dev/null'
@classmethod
def teardown_class(cls):
"""Setup section that runs after each test suite"""
# Cleanup the Impala and Hive Metastore client connections
if cls.hive_transport:
cls.hive_transport.close()
if cls.client:
cls.client.close()
if cls.hs2_client:
cls.hs2_client.close()
@classmethod
def create_impala_client(cls, host_port=None, protocol='beeswax'):
if host_port is None:
host_port = cls.__get_default_host_port(protocol)
client = create_connection(host_port=host_port,
use_kerberos=pytest.config.option.use_kerberos, protocol=protocol)
client.connect()
return client
@classmethod
def __get_default_host_port(cls, protocol):
if protocol == 'beeswax':
return IMPALAD
else:
assert protocol == 'hs2'
return IMPALAD_HS2_HOST_PORT
@classmethod
def __get_cluster_host_ports(cls, protocol):
"""Return a list of host/port combinations for all impalads in the cluster."""
if protocol == 'beeswax':
return IMPALAD_HOST_PORT_LIST
else:
assert protocol == 'hs2'
# TODO: support running tests against multiple coordinators for HS2. It should work,
# we just need to update all test runners to pass in all host/port combinations for
# the cluster and then handle it here.
raise NotImplementedError(
"Not yet implemented: only one HS2 host/port can be configured")
@classmethod
def create_impala_service(cls, host_port=IMPALAD, webserver_port=25000):
host, port = host_port.split(':')
return ImpaladService(host, beeswax_port=port, webserver_port=webserver_port)
@classmethod
def create_hdfs_client(cls):
if pytest.config.option.namenode_http_address is None:
hdfs_client = get_hdfs_client_from_conf(HDFS_CONF)
else:
host, port = pytest.config.option.namenode_http_address.split(":")
hdfs_client = get_hdfs_client(host, port)
return hdfs_client
@classmethod
def all_db_names(cls):
results = cls.client.execute("show databases").data
# Extract first column - database name
return [row.split("\t")[0] for row in results]
@classmethod
def cleanup_db(cls, db_name, sync_ddl=1):
cls.client.execute("use default")
cls.client.set_configuration({'sync_ddl': sync_ddl})
cls.client.execute("drop database if exists `" + db_name + "` cascade")
def __restore_query_options(self, query_options_changed, impalad_client):
"""
Restore the list of modified query options to their default values.
"""
# Populate the default query option if it's empty.
if not self.default_query_options:
query_options = impalad_client.get_default_configuration()
for key, value in query_options.iteritems():
self.default_query_options[key.upper()] = value
# Restore all the changed query options.
for query_option in query_options_changed:
query_option = query_option.upper()
if not query_option in self.default_query_options:
continue
default_val = self.default_query_options[query_option]
query_str = 'SET ' + query_option + '="' + default_val + '"'
try:
impalad_client.execute(query_str)
except Exception as e:
LOG.info('Unexpected exception when executing ' + query_str + ' : ' + str(e))
def get_impala_partition_info(self, table_name, *include_fields):
"""
Find information about partitions of a table, as returned by a SHOW PARTITION
statement. Return a list that contains one tuple for each partition.
If 'include_fields' is not specified, the tuples will contain all the fields returned
by SHOW PARTITION. Otherwise, return only those fields whose names are listed in
'include_fields'. Field names are compared case-insensitively.
"""
exec_result = self.client.execute('show partitions %s' % table_name)
fieldSchemas = exec_result.schema.fieldSchemas
fields_dict = {}
for idx, fs in enumerate(fieldSchemas):
fields_dict[fs.name.lower()] = idx
rows = exec_result.get_data().split('\n')
rows.pop()
fields_idx = []
for fn in include_fields:
fn = fn.lower()
assert fn in fields_dict, 'Invalid field: %s' % fn
fields_idx.append(fields_dict[fn])
result = []
for row in rows:
fields = row.split('\t')
if not fields_idx:
result_fields = fields
else:
result_fields = []
for i in fields_idx:
result_fields.append(fields[i])
result.append(tuple(result_fields))
return result
def get_debug_page(self, page_url):
"""Returns the content of the debug page 'page_url' as json."""
response = requests.get(page_url)
assert response.status_code == requests.codes.ok
return json.loads(response.text)
def get_metric(self, name):
"""Finds the metric with name 'name' and returns its value as an int."""
def iter_metrics(group):
for m in group['metrics']:
yield m
for c in group['child_groups']:
for m in iter_metrics(c):
yield m
metrics = self.get_debug_page(METRICS_URL)['metric_group']
for m in iter_metrics(metrics):
if m['name'] == name:
return int(m['value'])
assert False, "Could not find metric: %s" % name
def __verify_exceptions(self, expected_strs, actual_str, use_db):
"""
Verifies that at least one of the strings in 'expected_str' is either:
* A row_regex: line that matches the actual exception string 'actual_str'
* A substring of the actual exception string 'actual_str'.
"""
actual_str = actual_str.replace('\n', '')
for expected_str in expected_strs:
# In error messages, some paths are always qualified and some are not.
# So, allow both $NAMENODE and $FILESYSTEM_PREFIX to be used in CATCH.
expected_str = expected_str.strip() \
.replace('$FILESYSTEM_PREFIX', FILESYSTEM_PREFIX) \
.replace('$FILESYSTEM_NAME', FILESYSTEM_NAME) \
.replace('$NAMENODE', NAMENODE) \
.replace('$IMPALA_HOME', IMPALA_HOME) \
.replace('$INTERNAL_LISTEN_HOST', INTERNAL_LISTEN_HOST)\
.replace('$INTERNAL_LISTEN_IP', INTERNAL_LISTEN_IP)
if use_db: expected_str = expected_str.replace('$DATABASE', use_db)
# Strip newlines so we can split error message into multiple lines
expected_str = expected_str.replace('\n', '')
expected_regex = try_compile_regex(expected_str)
if expected_regex:
if expected_regex.match(actual_str): return
else:
# Not a regex - check if expected substring is present in actual.
if expected_str in actual_str: return
assert False, 'Unexpected exception string. Expected: %s\nNot found in actual: %s' % \
(expected_str, actual_str)
def __verify_results_and_errors(self, vector, test_section, result, use_db):
"""Verifies that both results and error sections are as expected. Rewrites both
by replacing $NAMENODE, $DATABASE and $IMPALA_HOME with their actual values, and
optionally rewriting filenames with __HDFS_FILENAME__, to ensure that expected and
actual values are easily compared.
"""
replace_filenames_with_placeholder = True
for section_name in ('RESULTS', 'DBAPI_RESULTS', 'ERRORS'):
if section_name in test_section:
if "$NAMENODE" in test_section[section_name]:
replace_filenames_with_placeholder = False
test_section[section_name] = test_section[section_name] \
.replace('$NAMENODE', NAMENODE) \
.replace('$IMPALA_HOME', IMPALA_HOME) \
.replace('$USER', getuser()) \
.replace('$FILESYSTEM_NAME', FILESYSTEM_NAME) \
.replace('$INTERNAL_LISTEN_HOST',
INTERNAL_LISTEN_HOST) \
.replace('$INTERNAL_LISTEN_IP', INTERNAL_LISTEN_IP)
if use_db:
test_section[section_name] = test_section[section_name].replace('$DATABASE', use_db)
result_section, type_section = 'RESULTS', 'TYPES'
if vector.get_value('protocol') == 'hs2':
if 'DBAPI_RESULTS' in test_section:
assert 'RESULTS' in test_section,\
"Base RESULTS section must always be included alongside DBAPI_RESULTS"
# In some cases Impyla (the HS2 dbapi client) is expected to return different
# results, so use the dbapi-specific section if present.
result_section = 'DBAPI_RESULTS'
if 'HS2_TYPES' in test_section:
assert 'TYPES' in test_section,\
"Base TYPES section must always be included alongside HS2_TYPES"
# In some cases HS2 types are expected differ from Beeswax types (e.g. see
# IMPALA-914), so use the HS2-specific section if present.
type_section = 'HS2_TYPES'
verify_raw_results(test_section, result, vector.get_value('table_format').file_format,
result_section, type_section, pytest.config.option.update_results,
replace_filenames_with_placeholder)
def run_test_case(self, test_file_name, vector, use_db=None, multiple_impalad=False,
encoding=None, test_file_vars=None):
"""
Runs the queries in the specified test based on the vector values
Runs the query using targeting the file format/compression specified in the test
vector and the exec options specified in the test vector. If multiple_impalad=True
a connection to a random impalad will be chosen to execute each test section.
Otherwise, the default impalad client will be used. If 'protocol' (either 'hs2' or
'beeswax') is set in the vector, a client for that protocol is used. Otherwise we
use the default: beeswax.
Additionally, the encoding for all test data can be specified using the 'encoding'
parameter. This is useful when data is ingested in a different encoding (ex.
latin). If not set, the default system encoding will be used.
If a dict 'test_file_vars' is provided, then all keys will be replaced with their
values in queries before they are executed. Callers need to avoid using reserved key
names, see 'reserved_keywords' below.
"""
table_format_info = vector.get_value('table_format')
exec_options = vector.get_value('exec_option')
protocol = vector.get_value('protocol')
# Resolve the current user's primary group name.
group_id = pwd.getpwnam(getuser()).pw_gid
group_name = grp.getgrgid(group_id).gr_name
target_impalad_clients = list()
if multiple_impalad:
target_impalad_clients =\
[ImpalaTestSuite.create_impala_client(host_port, protocol=protocol)
for host_port in self.__get_cluster_host_ports(protocol)]
else:
if protocol == 'beeswax':
target_impalad_clients = [self.client]
else:
assert protocol == 'hs2'
target_impalad_clients = [self.hs2_client]
# Change the database to reflect the file_format, compression codec etc, or the
# user specified database for all targeted impalad.
for impalad_client in target_impalad_clients:
ImpalaTestSuite.change_database(impalad_client,
table_format_info, use_db, pytest.config.option.scale_factor)
impalad_client.set_configuration(exec_options)
sections = self.load_query_test_file(self.get_workload(), test_file_name,
encoding=encoding)
for test_section in sections:
if 'SHELL' in test_section:
assert len(test_section) == 1, \
"SHELL test sections can't contain other sections"
cmd = test_section['SHELL']\
.replace('$FILESYSTEM_PREFIX', FILESYSTEM_PREFIX)\
.replace('$FILESYSTEM_NAME', FILESYSTEM_NAME)\
.replace('$IMPALA_HOME', IMPALA_HOME)
if use_db: cmd = cmd.replace('$DATABASE', use_db)
LOG.info("Shell command: " + cmd)
check_call(cmd, shell=True)
continue
if 'QUERY' not in test_section:
assert 0, 'Error in test file %s. Test cases require a -- QUERY section.\n%s' %\
(test_file_name, pprint.pformat(test_section))
if 'SETUP' in test_section:
self.execute_test_case_setup(test_section['SETUP'], table_format_info)
# TODO: support running query tests against different scale factors
query = QueryTestSectionReader.build_query(test_section['QUERY']
.replace('$GROUP_NAME', group_name)
.replace('$IMPALA_HOME', IMPALA_HOME)
.replace('$FILESYSTEM_PREFIX', FILESYSTEM_PREFIX)
.replace('$FILESYSTEM_NAME', FILESYSTEM_NAME)
.replace('$SECONDARY_FILESYSTEM', os.getenv("SECONDARY_FILESYSTEM") or str())
.replace('$USER', getuser())
.replace('$INTERNAL_LISTEN_HOST', INTERNAL_LISTEN_HOST)
.replace('$INTERNAL_LISTEN_IP', INTERNAL_LISTEN_IP))
if use_db: query = query.replace('$DATABASE', use_db)
reserved_keywords = ["$DATABASE", "$FILESYSTEM_PREFIX", "$FILESYSTEM_NAME",
"$GROUP_NAME", "$IMPALA_HOME", "$NAMENODE", "$QUERY",
"$SECONDARY_FILESYSTEM", "$USER"]
if test_file_vars:
for key, value in test_file_vars.iteritems():
if key in reserved_keywords:
raise RuntimeError("Key {0} is reserved".format(key))
query = query.replace(key, value)
if 'QUERY_NAME' in test_section:
LOG.info('Query Name: \n%s\n' % test_section['QUERY_NAME'])
# Support running multiple queries within the same test section, only verifying the
# result of the final query. The main use case is to allow for 'USE database'
# statements before a query executes, but it is not limited to that.
# TODO: consider supporting result verification of all queries in the future
result = None
target_impalad_client = choice(target_impalad_clients)
query_options_changed = []
try:
user = None
if 'USER' in test_section:
# Create a new client so the session will use the new username.
user = test_section['USER'].strip()
target_impalad_client = self.create_impala_client(protocol=protocol)
for query in query.split(';'):
set_pattern_match = SET_PATTERN.match(query)
if set_pattern_match != None:
query_options_changed.append(set_pattern_match.groups()[0])
assert set_pattern_match.groups()[0] not in vector.get_value("exec_option"), \
"%s cannot be set in the '.test' file since it is in the test vector. " \
"Consider deepcopy()-ing the vector and removing this option in the " \
"python test." % set_pattern_match.groups()[0]
result = self.__execute_query(target_impalad_client, query, user=user)
except Exception as e:
if 'CATCH' in test_section:
self.__verify_exceptions(test_section['CATCH'], str(e), use_db)
continue
raise
finally:
if len(query_options_changed) > 0:
self.__restore_query_options(query_options_changed, target_impalad_client)
if 'CATCH' in test_section and '__NO_ERROR__' not in test_section['CATCH']:
expected_str = " or ".join(test_section['CATCH']).strip() \
.replace('$FILESYSTEM_PREFIX', FILESYSTEM_PREFIX) \
.replace('$FILESYSTEM_NAME', FILESYSTEM_NAME) \
.replace('$NAMENODE', NAMENODE) \
.replace('$IMPALA_HOME', IMPALA_HOME)
assert False, "Expected exception: %s" % expected_str
assert result is not None
assert result.success
# Decode the results read back if the data is stored with a specific encoding.
if encoding: result.data = [row.decode(encoding) for row in result.data]
# Replace $NAMENODE in the expected results with the actual namenode URI.
if 'RESULTS' in test_section:
# Combining 'RESULTS' with 'DML_RESULTS" is currently unsupported because
# __verify_results_and_errors calls verify_raw_results which always checks
# ERRORS, TYPES, LABELS, etc. which doesn't make sense if there are two
# different result sets to consider (IMPALA-4471).
assert 'DML_RESULTS' not in test_section
self.__verify_results_and_errors(vector, test_section, result, use_db)
else:
# TODO: Can't validate errors without expected results for now.
assert 'ERRORS' not in test_section,\
"'ERRORS' sections must have accompanying 'RESULTS' sections"
# If --update_results, then replace references to the namenode URI with $NAMENODE.
if pytest.config.option.update_results and 'RESULTS' in test_section:
test_section['RESULTS'] = test_section['RESULTS'] \
.replace(NAMENODE, '$NAMENODE') \
.replace('$IMPALA_HOME', IMPALA_HOME) \
.replace(INTERNAL_LISTEN_HOST, '$INTERNAL_LISTEN_HOST') \
.replace(INTERNAL_LISTEN_IP, '$INTERNAL_LISTEN_IP')
rt_profile_info = None
if 'RUNTIME_PROFILE_%s' % table_format_info.file_format in test_section:
# If this table format has a RUNTIME_PROFILE section specifically for it, evaluate
# that section and ignore any general RUNTIME_PROFILE sections.
rt_profile_info = 'RUNTIME_PROFILE_%s' % table_format_info.file_format
elif 'RUNTIME_PROFILE' in test_section:
rt_profile_info = 'RUNTIME_PROFILE'
if rt_profile_info is not None:
rt_profile = verify_runtime_profile(test_section[rt_profile_info],
result.runtime_profile,
update_section=pytest.config.option.update_results)
if pytest.config.option.update_results:
test_section[rt_profile_info] = "".join(rt_profile)
if 'DML_RESULTS' in test_section:
assert 'ERRORS' not in test_section
# The limit is specified to ensure the queries aren't unbounded. We shouldn't have
# test files that are checking the contents of tables larger than that anyways.
dml_results_query = "select * from %s limit 1000" % \
test_section['DML_RESULTS_TABLE']
dml_result = self.__execute_query(target_impalad_client, dml_results_query)
verify_raw_results(test_section, dml_result,
vector.get_value('table_format').file_format, result_section='DML_RESULTS',
update_section=pytest.config.option.update_results)
if pytest.config.option.update_results:
output_file = os.path.join(EE_TEST_LOGS_DIR,
test_file_name.replace('/','_') + ".test")
write_test_file(output_file, sections, encoding=encoding)
def execute_test_case_setup(self, setup_section, table_format):
"""
Executes a test case 'SETUP' section
The test case 'SETUP' section is mainly used for insert tests. These tests need to
have some actions performed before each test case to ensure the target tables are
empty. The current supported setup actions:
RESET <table name> - Drop and recreate the table
DROP PARTITIONS <table name> - Drop all partitions from the table
"""
setup_section = QueryTestSectionReader.build_query(setup_section)
for row in setup_section.split('\n'):
row = row.lstrip()
if row.startswith('RESET'):
db_name, table_name = QueryTestSectionReader.get_table_name_components(\
table_format, row.split('RESET')[1])
self.__reset_table(db_name, table_name)
self.client.execute("invalidate metadata " + db_name + "." + table_name)
elif row.startswith('DROP PARTITIONS'):
db_name, table_name = QueryTestSectionReader.get_table_name_components(\
table_format, row.split('DROP PARTITIONS')[1])
self.__drop_partitions(db_name, table_name)
self.client.execute("invalidate metadata " + db_name + "." + table_name)
else:
assert False, 'Unsupported setup command: %s' % row
@classmethod
def change_database(cls, impala_client, table_format=None,
db_name=None, scale_factor=None):
if db_name == None:
assert table_format != None
db_name = QueryTestSectionReader.get_db_name(table_format,
scale_factor if scale_factor else '')
query = 'use %s' % db_name
# Clear the exec_options before executing a USE statement.
# The USE statement should not fail for negative exec_option tests.
impala_client.clear_configuration()
impala_client.execute(query)
def execute_wrapper(function):
"""
Issues a use database query before executing queries.
Database names are dependent on the input format for table, which the table names
remaining the same. A use database is issued before query execution. As such,
database names need to be build pre execution, this method wraps around the different
execute methods and provides a common interface to issue the proper use command.
"""
@wraps(function)
def wrapper(*args, **kwargs):
table_format = None
if kwargs.get('table_format'):
table_format = kwargs.get('table_format')
del kwargs['table_format']
if kwargs.get('vector'):
table_format = kwargs.get('vector').get_value('table_format')
del kwargs['vector']
# self is the implicit first argument
if table_format is not None:
args[0].change_database(args[0].client, table_format)
return function(*args, **kwargs)
return wrapper
@classmethod
@execute_wrapper
def execute_query_expect_success(cls, impalad_client, query, query_options=None,
user=None):
"""Executes a query and asserts if the query fails"""
result = cls.__execute_query(impalad_client, query, query_options, user)
assert result.success
return result
@execute_wrapper
def execute_query_expect_failure(self, impalad_client, query, query_options=None,
user=None):
"""Executes a query and asserts if the query succeeds"""
result = None
try:
result = self.__execute_query(impalad_client, query, query_options, user)
except Exception, e:
return e
assert not result.success, "No failure encountered for query %s" % query
return result
@execute_wrapper
def execute_query_unchecked(self, impalad_client, query, query_options=None, user=None):
return self.__execute_query(impalad_client, query, query_options, user)
@execute_wrapper
def execute_query(self, query, query_options=None):
return self.__execute_query(self.client, query, query_options)
def execute_query_using_client(self, client, query, vector):
self.change_database(client, vector.get_value('table_format'))
query_options = vector.get_value('exec_option')
if query_options is not None: client.set_configuration(query_options)
return client.execute(query)
def execute_query_async_using_client(self, client, query, vector):
self.change_database(client, vector.get_value('table_format'))
query_options = vector.get_value('exec_option')
if query_options is not None: client.set_configuration(query_options)
return client.execute_async(query)
def close_query_using_client(self, client, query):
return client.close_query(query)
@execute_wrapper
def execute_query_async(self, query, query_options=None):
if query_options is not None: self.client.set_configuration(query_options)
return self.client.execute_async(query)
@execute_wrapper
def close_query(self, query):
return self.client.close_query(query)
@execute_wrapper
def execute_scalar(self, query, query_options=None):
result = self.__execute_query(self.client, query, query_options)
assert len(result.data) <= 1, 'Multiple values returned from scalar'
return result.data[0] if len(result.data) == 1 else None
def exec_and_compare_hive_and_impala_hs2(self, stmt, compare = lambda x, y: x == y):
"""Compare Hive and Impala results when executing the same statment over HS2"""
# execute_using_jdbc expects a Query object. Convert the query string into a Query
# object
query = Query()
query.query_str = stmt
# Run the statement targeting Hive
exec_opts = JdbcQueryExecConfig(impalad=HIVE_HS2_HOST_PORT, transport='SASL')
hive_results = execute_using_jdbc(query, exec_opts).data
# Run the statement targeting Impala
exec_opts = JdbcQueryExecConfig(impalad=IMPALAD_HS2_HOST_PORT, transport='NOSASL')
impala_results = execute_using_jdbc(query, exec_opts).data
# Compare the results
assert (impala_results is not None) and (hive_results is not None)
assert compare(impala_results, hive_results)
def load_query_test_file(self, workload, file_name, valid_section_names=None,
encoding=None):
"""
Loads/Reads the specified query test file. Accepts the given section names as valid.
Uses a default list of valid section names if valid_section_names is None.
"""
test_file_path = os.path.join(WORKLOAD_DIR, workload, 'queries', file_name + '.test')
if not os.path.isfile(test_file_path):
assert False, 'Test file not found: %s' % file_name
return parse_query_test_file(test_file_path, valid_section_names, encoding=encoding)
def __drop_partitions(self, db_name, table_name):
"""Drops all partitions in the given table"""
for partition in self.hive_client.get_partition_names(db_name, table_name, -1):
assert self.hive_client.drop_partition_by_name(db_name, table_name, \
partition, True), 'Could not drop partition: %s' % partition
@classmethod
def __execute_query(cls, impalad_client, query, query_options=None, user=None):
"""Executes the given query against the specified Impalad"""
if query_options is not None: impalad_client.set_configuration(query_options)
return impalad_client.execute(query, user=user)
def __reset_table(self, db_name, table_name):
"""Resets a table (drops and recreates the table)"""
table = self.hive_client.get_table(db_name, table_name)
assert table is not None
self.hive_client.drop_table(db_name, table_name, True)
self.hive_client.create_table(table)
def clone_table(self, src_tbl, dst_tbl, recover_partitions, vector):
src_loc = self._get_table_location(src_tbl, vector)
self.client.execute("create external table {0} like {1} location '{2}'"\
.format(dst_tbl, src_tbl, src_loc))
if recover_partitions:
self.client.execute("alter table {0} recover partitions".format(dst_tbl))
def appx_equals(self, a, b, diff_perc):
"""Returns True if 'a' and 'b' are within 'diff_perc' percent of each other,
False otherwise. 'diff_perc' must be a float in [0,1]."""
if a == b: return True # Avoid division by 0
assert abs(a - b) / float(max(a,b)) <= diff_perc
def _get_table_location(self, table_name, vector):
""" Returns the HDFS location of the table """
result = self.execute_query_using_client(self.client,
"describe formatted %s" % table_name, vector)
for row in result.data:
if 'Location:' in row:
return row.split('\t')[1]
# This should never happen.
assert 0, 'Unable to get location for table: ' + table_name
# TODO(todd) make this use Thrift to connect to HS2 instead of shelling
# out to beeline for better performance
def run_stmt_in_hive(self, stmt, username=getuser()):
"""
Run a statement in Hive, returning stdout if successful and throwing
RuntimeError(stderr) if not.
"""
# When HiveServer2 is configured to use "local" mode (i.e., MR jobs are run
# in-process rather than on YARN), Hadoop's LocalDistributedCacheManager has a
# race, wherein it tires to localize jars into
# /tmp/hadoop-$USER/mapred/local/<millis>. Two simultaneous Hive queries
# against HS2 can conflict here. Weirdly LocalJobRunner handles a similar issue
# (with the staging directory) by appending a random number. To overcome this,
# in the case that HS2 is on the local machine (which we conflate with also
# running MR jobs locally), we move the temporary directory into a unique
# directory via configuration. This workaround can be removed when
# https://issues.apache.org/jira/browse/MAPREDUCE-6441 is resolved.
# A similar workaround is used in bin/load-data.py.
tmpdir = None
beeline_opts = []
if pytest.config.option.hive_server2.startswith("localhost:"):
tmpdir = tempfile.mkdtemp(prefix="impala-tests-")
beeline_opts += ['--hiveconf', 'mapreduce.cluster.local.dir={0}'.format(tmpdir)]
try:
# Remove HADOOP_CLASSPATH from environment. Beeline doesn't need it,
# and doing so avoids Hadoop 3's classpath de-duplication code from
# placing $HADOOP_CONF_DIR too late in the classpath to get the right
# log4j configuration file picked up. Some log4j configuration files
# in Hadoop's jars send logging to stdout, confusing Impala's test
# framework.
env = os.environ.copy()
env.pop("HADOOP_CLASSPATH", None)
call = subprocess.Popen(
['beeline',
'--outputformat=csv2',
'-u', 'jdbc:hive2://' + pytest.config.option.hive_server2,
'-n', username,
'-e', stmt] + beeline_opts,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
# Beeline in Hive 2.1 will read from stdin even when "-e"
# is specified; explicitly make sure there's nothing to
# read to avoid hanging, especially when running interactively
# with py.test.
stdin=file("/dev/null"),
env=env)
(stdout, stderr) = call.communicate()
call.wait()
if call.returncode != 0:
raise RuntimeError(stderr)
return stdout
finally:
if tmpdir is not None: shutil.rmtree(tmpdir)
def hive_partition_names(self, table_name):
"""Find the names of the partitions of a table, as Hive sees them.
The return format is a list of strings. Each string represents a partition
value of a given column in a format like 'column1=7/column2=8'.
"""
return self.run_stmt_in_hive(
'show partitions %s' % table_name).split('\n')[1:-1]
@classmethod
def create_table_info_dimension(cls, exploration_strategy):
# If the user has specified a specific set of table formats to run against, then
# use those. Otherwise, load from the workload test vectors.
if pytest.config.option.table_formats:
table_formats = list()
for tf in pytest.config.option.table_formats.split(','):
dataset = get_dataset_from_workload(cls.get_workload())
table_formats.append(TableFormatInfo.create_from_string(dataset, tf))
tf_dimensions = ImpalaTestDimension('table_format', *table_formats)
else:
tf_dimensions = load_table_info_dimension(cls.get_workload(), exploration_strategy)
# If 'skip_hbase' is specified or the filesystem is isilon, s3 or local, we don't
# need the hbase dimension.
if pytest.config.option.skip_hbase or TARGET_FILESYSTEM.lower() \
in ['s3', 'isilon', 'local', 'abfs', 'adls']:
for tf_dimension in tf_dimensions:
if tf_dimension.value.file_format == "hbase":
tf_dimensions.remove(tf_dimension)
break
return tf_dimensions
@classmethod
def __create_exec_option_dimension(cls):
cluster_sizes = ALL_CLUSTER_SIZES
disable_codegen_options = ALL_DISABLE_CODEGEN_OPTIONS
batch_sizes = ALL_BATCH_SIZES
exec_single_node_option = [0]
if cls.exploration_strategy() == 'core':
disable_codegen_options = [False]
cluster_sizes = ALL_NODES_ONLY
return create_exec_option_dimension(cluster_sizes, disable_codegen_options,
batch_sizes,
exec_single_node_option=exec_single_node_option,
disable_codegen_rows_threshold_options=[0])
@classmethod
def exploration_strategy(cls):
default_strategy = pytest.config.option.exploration_strategy
if pytest.config.option.workload_exploration_strategy:
workload_strategies = pytest.config.option.workload_exploration_strategy.split(',')
for workload_strategy in workload_strategies:
workload_strategy = workload_strategy.split(':')
if len(workload_strategy) != 2:
raise ValueError, 'Invalid workload:strategy format: %s' % workload_strategy
if cls.get_workload() == workload_strategy[0]:
return workload_strategy[1]
return default_strategy
def wait_for_state(self, handle, expected_state, timeout):
"""Waits for the given 'query_handle' to reach the 'expected_state'. If it does not
reach the given state within 'timeout' seconds, the method throws an AssertionError.
"""
self.wait_for_any_state(handle, [expected_state], timeout)
def wait_for_any_state(self, handle, expected_states, timeout):
"""Waits for the given 'query_handle' to reach one of 'expected_states'. If it does
not reach one of the given states within 'timeout' seconds, the method throws an
AssertionError. Returns the final state.
"""
start_time = time.time()
actual_state = self.client.get_state(handle)
while actual_state not in expected_states and time.time() - start_time < timeout:
actual_state = self.client.get_state(handle)
time.sleep(0.5)
if actual_state not in expected_states:
raise Timeout("query {0} did not reach one of the expected states {1}, "
"last known state {2}".format(handle.get_handle().id, expected_states,
actual_state))
return actual_state
def wait_for_db_to_appear(self, db_name, timeout_s):
"""Wait until the database with 'db_name' is present in the impalad's local catalog.
Fail after timeout_s if the doesn't appear."""
start_time = time.time()
while time.time() - start_time < timeout_s:
try:
# This will throw an exception if the database is not present.
self.client.execute("describe database `{db_name}`".format(db_name=db_name))
return
except Exception:
time.sleep(0.2)
continue
raise Exception("DB {0} didn't show up after {1}s", db_name, timeout_s)
def wait_for_table_to_appear(self, db_name, table_name, timeout_s):
"""Wait until the table with 'table_name' in 'db_name' is present in the
impalad's local catalog. Fail after timeout_s if the doesn't appear."""
start_time = time.time()
while time.time() - start_time < timeout_s:
try:
# This will throw an exception if the table is not present.
self.client.execute("describe `{db_name}`.`{table_name}`".format(
db_name=db_name, table_name=table_name))
return
except Exception, ex:
print str(ex)
time.sleep(0.2)
continue
raise Exception("Table {0}.{1} didn't show up after {2}s", db_name, table_name,
timeout_s)
def assert_impalad_log_contains(self, level, line_regex, expected_count=1):
"""
Convenience wrapper around assert_log_contains for impalad logs.
"""
self.assert_log_contains("impalad", level, line_regex, expected_count)
def assert_catalogd_log_contains(self, level, line_regex, expected_count=1):
"""
Convenience wrapper around assert_log_contains for catalogd logs.
"""
self.assert_log_contains("catalogd", level, line_regex, expected_count)
def assert_log_contains(self, daemon, level, line_regex, expected_count=1):
"""
Assert that the daemon log with specified level (e.g. ERROR, WARNING, INFO) contains
expected_count lines with a substring matching the regex. When expected_count is -1,
at least one match is expected.
When using this method to check log files of running processes, the caller should
make sure that log buffering has been disabled, for example by adding
'-logbuflevel=-1' to the daemon startup options.
"""
pattern = re.compile(line_regex)
found = 0
if hasattr(self, "impala_log_dir"):
log_dir = self.impala_log_dir
else:
log_dir = EE_TEST_LOGS_DIR
log_file_path = os.path.join(log_dir, daemon + "." + level)
# Resolve symlinks to make finding the file easier.
log_file_path = os.path.realpath(log_file_path)
with open(log_file_path) as log_file:
for line in log_file:
if pattern.search(line):
found += 1
if expected_count == -1:
assert found > 0, "Expected at least one line in file %s matching regex '%s'"\
", but found none." % (log_file_path, line_regex)
else:
assert found == expected_count, "Expected %d lines in file %s matching regex '%s'"\
", but found %d lines. Last line was: \n%s" %\
(expected_count, log_file_path, line_regex, found, line)
|
[] |
[] |
[
"IMPALA_HISTFILE",
"IMPALA_HOME",
"IMPALA_WORKLOAD_DIR",
"TARGET_FILESYSTEM",
"SECONDARY_FILESYSTEM",
"INTERNAL_LISTEN_HOST",
"IMPALA_EE_TEST_LOGS_DIR"
] |
[]
|
["IMPALA_HISTFILE", "IMPALA_HOME", "IMPALA_WORKLOAD_DIR", "TARGET_FILESYSTEM", "SECONDARY_FILESYSTEM", "INTERNAL_LISTEN_HOST", "IMPALA_EE_TEST_LOGS_DIR"]
|
python
| 7 | 0 | |
Sheets/sheet2.py
|
# -*- coding: utf-8 -*-
"""
Authors: Gonzalo Espinoza
UNESCO-IHE 2016
Contact: [email protected]
Repository: https://github.com/wateraccounting/wa
Module: Sheets/sheet2
"""
import os
import pandas as pd
import subprocess
import time
import xml.etree.ElementTree as ET
def create_sheet2(basin, period, units, data, output, template=False,
tolerance=0.2):
"""
Keyword arguments:
basin -- The name of the basin
period -- The period of analysis
units -- The units of the data
data -- A csv file that contains the water data. The csv file has to
follow an specific format. A sample csv is available in the link:
https://github.com/wateraccounting/wa/tree/master/Sheets/csv
output -- The output path of the jpg file for the sheet.
template -- A svg file of the sheet. Use False (default) to use the
standard svg file.
tolerance -- Tolerance (in km3/year) of the difference in total ET
measured from (1) evaporation and transpiration and
(2) beneficial and non-beneficial ET.
Example:
from watools.Sheets import *
create_sheet2(basin='Nile Basin', period='2010', units='km3/year',
data=r'C:\Sheets\csv\Sample_sheet2.csv',
output=r'C:\Sheets\sheet_2.jpg')
"""
# Read table
df = pd.read_csv(data, sep=';')
# Data frames
df_Pr = df.loc[df.LAND_USE == "PROTECTED"]
df_Ut = df.loc[df.LAND_USE == "UTILIZED"]
df_Mo = df.loc[df.LAND_USE == "MODIFIED"]
df_Mc = df.loc[df.LAND_USE == "MANAGED CONVENTIONAL"]
df_Mn = df.loc[df.LAND_USE == "MANAGED NON_CONVENTIONAL"]
# Column 1: Transpiration
c1r1_t1 = float(df_Pr.loc[df_Pr.CLASS == "Forest"].TRANSPIRATION)
c1r2_t1 = float(df_Pr.loc[df_Pr.CLASS == "Shrubland"].TRANSPIRATION)
c1r3_t1 = float(df_Pr.loc[df_Pr.CLASS == "Natural grasslands"].TRANSPIRATION)
c1r4_t1 = float(df_Pr.loc[df_Pr.CLASS == "Natural water bodies"].TRANSPIRATION)
c1r5_t1 = float(df_Pr.loc[df_Pr.CLASS == "Wetlands"].TRANSPIRATION)
c1r6_t1 = float(df_Pr.loc[df_Pr.CLASS == "Glaciers"].TRANSPIRATION)
c1r7_t1 = float(df_Pr.loc[df_Pr.CLASS == "Others"].TRANSPIRATION)
c1_t1_total = c1r1_t1 + c1r2_t1 + c1r3_t1 + c1r4_t1 + c1r5_t1 + \
c1r6_t1 + c1r7_t1
c1r1_t2 = float(df_Ut.loc[df_Ut.CLASS == "Forest"].TRANSPIRATION)
c1r2_t2 = float(df_Ut.loc[df_Ut.CLASS == "Shrubland"].TRANSPIRATION)
c1r3_t2 = float(df_Ut.loc[df_Ut.CLASS == "Natural grasslands"].TRANSPIRATION)
c1r4_t2 = float(df_Ut.loc[df_Ut.CLASS == "Natural water bodies"].TRANSPIRATION)
c1r5_t2 = float(df_Ut.loc[df_Ut.CLASS == "Wetlands"].TRANSPIRATION)
c1r6_t2 = float(df_Ut.loc[df_Ut.CLASS == "Others"].TRANSPIRATION)
c1_t2_total = c1r1_t2 + c1r2_t2 + c1r3_t2 + c1r4_t2 + c1r5_t2 + c1r6_t2
c1r1_t3 = float(df_Mo.loc[df_Mo.CLASS == "Rainfed crops"].TRANSPIRATION)
c1r2_t3 = float(df_Mo.loc[df_Mo.CLASS == "Forest plantations"].TRANSPIRATION)
c1r3_t3 = float(df_Mo.loc[df_Mo.CLASS == "Settlements"].TRANSPIRATION)
c1r4_t3 = float(df_Mo.loc[df_Mo.CLASS == "Others"].TRANSPIRATION)
c1_t3_total = c1r1_t3 + c1r2_t3 + c1r3_t3 + c1r4_t3
c1r1_t4 = float(df_Mc.loc[df_Mc.CLASS == "Irrigated crops"].TRANSPIRATION)
c1r2_t4 = float(df_Mc.loc[df_Mc.CLASS == "Managed water bodies"].TRANSPIRATION)
c1r3_t4 = float(df_Mc.loc[df_Mc.CLASS == "Residential"].TRANSPIRATION)
c1r4_t4 = float(df_Mc.loc[df_Mc.CLASS == "Industry"].TRANSPIRATION)
c1r5_t4 = float(df_Mc.loc[df_Mc.CLASS == "Others"].TRANSPIRATION)
c1_t4_total = c1r1_t4 + c1r2_t4 + c1r3_t4 + c1r4_t4 + c1r5_t4
c1r1_t5 = float(df_Mn.loc[df_Mn.CLASS == "Indoor domestic"].TRANSPIRATION)
c1r2_t5 = float(df_Mn.loc[df_Mn.CLASS == "Indoor industry"].TRANSPIRATION)
c1r3_t5 = float(df_Mn.loc[df_Mn.CLASS == "Greenhouses"].TRANSPIRATION)
c1r4_t5 = float(df_Mn.loc[df_Mn.CLASS == "Livestock and husbandry"].TRANSPIRATION)
c1r5_t5 = float(df_Mn.loc[df_Mn.CLASS == "Power and energy"].TRANSPIRATION)
c1r6_t5 = float(df_Mn.loc[df_Mn.CLASS == "Others"].TRANSPIRATION)
c1_t5_total = c1r1_t5 + c1r2_t5 + c1r3_t5 + c1r4_t5 + c1r5_t5 + c1r6_t5
# Column 2: Water
c2r1_t1 = float(df_Pr.loc[df_Pr.CLASS == "Forest"].WATER)
c2r2_t1 = float(df_Pr.loc[df_Pr.CLASS == "Shrubland"].WATER)
c2r3_t1 = float(df_Pr.loc[df_Pr.CLASS == "Natural grasslands"].WATER)
c2r4_t1 = float(df_Pr.loc[df_Pr.CLASS == "Natural water bodies"].WATER)
c2r5_t1 = float(df_Pr.loc[df_Pr.CLASS == "Wetlands"].WATER)
c2r6_t1 = float(df_Pr.loc[df_Pr.CLASS == "Glaciers"].WATER)
c2r7_t1 = float(df_Pr.loc[df_Pr.CLASS == "Others"].WATER)
c2_t1_total = c2r1_t1 + c2r2_t1 + c2r3_t1 + c2r4_t1 + c2r5_t1 + \
c2r6_t1 + c2r7_t1
c2r1_t2 = float(df_Ut.loc[df_Ut.CLASS == "Forest"].WATER)
c2r2_t2 = float(df_Ut.loc[df_Ut.CLASS == "Shrubland"].WATER)
c2r3_t2 = float(df_Ut.loc[df_Ut.CLASS == "Natural grasslands"].WATER)
c2r4_t2 = float(df_Ut.loc[df_Ut.CLASS == "Natural water bodies"].WATER)
c2r5_t2 = float(df_Ut.loc[df_Ut.CLASS == "Wetlands"].WATER)
c2r6_t2 = float(df_Ut.loc[df_Ut.CLASS == "Others"].WATER)
c2_t2_total = c2r1_t2 + c2r2_t2 + c2r3_t2 + c2r4_t2 + c2r5_t2 + c2r6_t2
c2r1_t3 = float(df_Mo.loc[df_Mo.CLASS == "Rainfed crops"].WATER)
c2r2_t3 = float(df_Mo.loc[df_Mo.CLASS == "Forest plantations"].WATER)
c2r3_t3 = float(df_Mo.loc[df_Mo.CLASS == "Settlements"].WATER)
c2r4_t3 = float(df_Mo.loc[df_Mo.CLASS == "Others"].WATER)
c2_t3_total = c2r1_t3 + c2r2_t3 + c2r3_t3 + c2r4_t3
c2r1_t4 = float(df_Mc.loc[df_Mc.CLASS == "Irrigated crops"].WATER)
c2r2_t4 = float(df_Mc.loc[df_Mc.CLASS == "Managed water bodies"].WATER)
c2r3_t4 = float(df_Mc.loc[df_Mc.CLASS == "Residential"].WATER)
c2r4_t4 = float(df_Mc.loc[df_Mc.CLASS == "Industry"].WATER)
c2r5_t4 = float(df_Mc.loc[df_Mc.CLASS == "Others"].WATER)
c2_t4_total = c2r1_t4 + c2r2_t4 + c2r3_t4 + c2r4_t4 + c2r5_t4
c2r1_t5 = float(df_Mn.loc[df_Mn.CLASS == "Indoor domestic"].WATER)
c2r2_t5 = float(df_Mn.loc[df_Mn.CLASS == "Indoor industry"].WATER)
c2r3_t5 = float(df_Mn.loc[df_Mn.CLASS == "Greenhouses"].WATER)
c2r4_t5 = float(df_Mn.loc[df_Mn.CLASS == "Livestock and husbandry"].WATER)
c2r5_t5 = float(df_Mn.loc[df_Mn.CLASS == "Power and energy"].WATER)
c2r6_t5 = float(df_Mn.loc[df_Mn.CLASS == "Others"].WATER)
c2_t5_total = c2r1_t5 + c2r2_t5 + c2r3_t5 + c2r4_t5 + c2r5_t5 + c2r6_t5
# Column 3: Soil
c3r1_t1 = float(df_Pr.loc[df_Pr.CLASS == "Forest"].SOIL)
c3r2_t1 = float(df_Pr.loc[df_Pr.CLASS == "Shrubland"].SOIL)
c3r3_t1 = float(df_Pr.loc[df_Pr.CLASS == "Natural grasslands"].SOIL)
c3r4_t1 = float(df_Pr.loc[df_Pr.CLASS == "Natural water bodies"].SOIL)
c3r5_t1 = float(df_Pr.loc[df_Pr.CLASS == "Wetlands"].SOIL)
c3r6_t1 = float(df_Pr.loc[df_Pr.CLASS == "Glaciers"].SOIL)
c3r7_t1 = float(df_Pr.loc[df_Pr.CLASS == "Others"].SOIL)
c3_t1_total = c3r1_t1 + c3r2_t1 + c3r3_t1 + c3r4_t1 + c3r5_t1 + \
c3r6_t1 + c3r7_t1
c3r1_t2 = float(df_Ut.loc[df_Ut.CLASS == "Forest"].SOIL)
c3r2_t2 = float(df_Ut.loc[df_Ut.CLASS == "Shrubland"].SOIL)
c3r3_t2 = float(df_Ut.loc[df_Ut.CLASS == "Natural grasslands"].SOIL)
c3r4_t2 = float(df_Ut.loc[df_Ut.CLASS == "Natural water bodies"].SOIL)
c3r5_t2 = float(df_Ut.loc[df_Ut.CLASS == "Wetlands"].SOIL)
c3r6_t2 = float(df_Ut.loc[df_Ut.CLASS == "Others"].SOIL)
c3_t2_total = c3r1_t2 + c3r2_t2 + c3r3_t2 + c3r4_t2 + c3r5_t2 + c3r6_t2
c3r1_t3 = float(df_Mo.loc[df_Mo.CLASS == "Rainfed crops"].SOIL)
c3r2_t3 = float(df_Mo.loc[df_Mo.CLASS == "Forest plantations"].SOIL)
c3r3_t3 = float(df_Mo.loc[df_Mo.CLASS == "Settlements"].SOIL)
c3r4_t3 = float(df_Mo.loc[df_Mo.CLASS == "Others"].SOIL)
c3_t3_total = c3r1_t3 + c3r2_t3 + c3r3_t3 + c3r4_t3
c3r1_t4 = float(df_Mc.loc[df_Mc.CLASS == "Irrigated crops"].SOIL)
c3r2_t4 = float(df_Mc.loc[df_Mc.CLASS == "Managed water bodies"].SOIL)
c3r3_t4 = float(df_Mc.loc[df_Mc.CLASS == "Residential"].SOIL)
c3r4_t4 = float(df_Mc.loc[df_Mc.CLASS == "Industry"].SOIL)
c3r5_t4 = float(df_Mc.loc[df_Mc.CLASS == "Others"].SOIL)
c3_t4_total = c3r1_t4 + c3r2_t4 + c3r3_t4 + c3r4_t4 + c3r5_t4
c3r1_t5 = float(df_Mn.loc[df_Mn.CLASS == "Indoor domestic"].SOIL)
c3r2_t5 = float(df_Mn.loc[df_Mn.CLASS == "Indoor industry"].SOIL)
c3r3_t5 = float(df_Mn.loc[df_Mn.CLASS == "Greenhouses"].SOIL)
c3r4_t5 = float(df_Mn.loc[df_Mn.CLASS == "Livestock and husbandry"].SOIL)
c3r5_t5 = float(df_Mn.loc[df_Mn.CLASS == "Power and energy"].SOIL)
c3r6_t5 = float(df_Mn.loc[df_Mn.CLASS == "Others"].SOIL)
c3_t5_total = c3r1_t5 + c3r2_t5 + c3r3_t5 + c3r4_t5 + c3r5_t5 + c3r6_t5
# Column 4: INTERCEPTION
c4r1_t1 = float(df_Pr.loc[df_Pr.CLASS == "Forest"].INTERCEPTION)
c4r2_t1 = float(df_Pr.loc[df_Pr.CLASS == "Shrubland"].INTERCEPTION)
c4r3_t1 = float(df_Pr.loc[df_Pr.CLASS == "Natural grasslands"].INTERCEPTION)
c4r4_t1 = float(df_Pr.loc[df_Pr.CLASS == "Natural water bodies"].INTERCEPTION)
c4r5_t1 = float(df_Pr.loc[df_Pr.CLASS == "Wetlands"].INTERCEPTION)
c4r6_t1 = float(df_Pr.loc[df_Pr.CLASS == "Glaciers"].INTERCEPTION)
c4r7_t1 = float(df_Pr.loc[df_Pr.CLASS == "Others"].INTERCEPTION)
c4_t1_total = c4r1_t1 + c4r2_t1 + c4r3_t1 + c4r4_t1 + c4r5_t1 + \
c4r6_t1 + c4r7_t1
c4r1_t2 = float(df_Ut.loc[df_Ut.CLASS == "Forest"].INTERCEPTION)
c4r2_t2 = float(df_Ut.loc[df_Ut.CLASS == "Shrubland"].INTERCEPTION)
c4r3_t2 = float(df_Ut.loc[df_Ut.CLASS == "Natural grasslands"].INTERCEPTION)
c4r4_t2 = float(df_Ut.loc[df_Ut.CLASS == "Natural water bodies"].INTERCEPTION)
c4r5_t2 = float(df_Ut.loc[df_Ut.CLASS == "Wetlands"].INTERCEPTION)
c4r6_t2 = float(df_Ut.loc[df_Ut.CLASS == "Others"].INTERCEPTION)
c4_t2_total = c4r1_t2 + c4r2_t2 + c4r3_t2 + c4r4_t2 + c4r5_t2 + c4r6_t2
c4r1_t3 = float(df_Mo.loc[df_Mo.CLASS == "Rainfed crops"].INTERCEPTION)
c4r2_t3 = float(df_Mo.loc[df_Mo.CLASS == "Forest plantations"].INTERCEPTION)
c4r3_t3 = float(df_Mo.loc[df_Mo.CLASS == "Settlements"].INTERCEPTION)
c4r4_t3 = float(df_Mo.loc[df_Mo.CLASS == "Others"].INTERCEPTION)
c4_t3_total = c4r1_t3 + c4r2_t3 + c4r3_t3 + c4r4_t3
c4r1_t4 = float(df_Mc.loc[df_Mc.CLASS == "Irrigated crops"].INTERCEPTION)
c4r2_t4 = float(df_Mc.loc[df_Mc.CLASS == "Managed water bodies"].INTERCEPTION)
c4r3_t4 = float(df_Mc.loc[df_Mc.CLASS == "Residential"].INTERCEPTION)
c4r4_t4 = float(df_Mc.loc[df_Mc.CLASS == "Industry"].INTERCEPTION)
c4r5_t4 = float(df_Mc.loc[df_Mc.CLASS == "Others"].INTERCEPTION)
c4_t4_total = c4r1_t4 + c4r2_t4 + c4r3_t4 + c4r4_t4 + c4r5_t4
c4r1_t5 = float(df_Mn.loc[df_Mn.CLASS == "Indoor domestic"].INTERCEPTION)
c4r2_t5 = float(df_Mn.loc[df_Mn.CLASS == "Indoor industry"].INTERCEPTION)
c4r3_t5 = float(df_Mn.loc[df_Mn.CLASS == "Greenhouses"].INTERCEPTION)
c4r4_t5 = float(df_Mn.loc[df_Mn.CLASS == "Livestock and husbandry"].INTERCEPTION)
c4r5_t5 = float(df_Mn.loc[df_Mn.CLASS == "Power and energy"].INTERCEPTION)
c4r6_t5 = float(df_Mn.loc[df_Mn.CLASS == "Others"].INTERCEPTION)
c4_t5_total = c4r1_t5 + c4r2_t5 + c4r3_t5 + c4r4_t5 + c4r5_t5 + c4r6_t5
# Column 6: AGRICULTURE
c6r1_t1 = float(df_Pr.loc[df_Pr.CLASS == "Forest"].AGRICULTURE)
c6r2_t1 = float(df_Pr.loc[df_Pr.CLASS == "Shrubland"].AGRICULTURE)
c6r3_t1 = float(df_Pr.loc[df_Pr.CLASS == "Natural grasslands"].AGRICULTURE)
c6r4_t1 = float(df_Pr.loc[df_Pr.CLASS == "Natural water bodies"].AGRICULTURE)
c6r5_t1 = float(df_Pr.loc[df_Pr.CLASS == "Wetlands"].AGRICULTURE)
c6r6_t1 = float(df_Pr.loc[df_Pr.CLASS == "Glaciers"].AGRICULTURE)
c6r7_t1 = float(df_Pr.loc[df_Pr.CLASS == "Others"].AGRICULTURE)
c6_t1_total = c6r1_t1 + c6r2_t1 + c6r3_t1 + c6r4_t1 + c6r5_t1 + \
c6r6_t1 + c6r7_t1
c6r1_t2 = float(df_Ut.loc[df_Ut.CLASS == "Forest"].AGRICULTURE)
c6r2_t2 = float(df_Ut.loc[df_Ut.CLASS == "Shrubland"].AGRICULTURE)
c6r3_t2 = float(df_Ut.loc[df_Ut.CLASS == "Natural grasslands"].AGRICULTURE)
c6r4_t2 = float(df_Ut.loc[df_Ut.CLASS == "Natural water bodies"].AGRICULTURE)
c6r5_t2 = float(df_Ut.loc[df_Ut.CLASS == "Wetlands"].AGRICULTURE)
c6r6_t2 = float(df_Ut.loc[df_Ut.CLASS == "Others"].AGRICULTURE)
c6_t2_total = c6r1_t2 + c6r2_t2 + c6r3_t2 + c6r4_t2 + c6r5_t2 + c6r6_t2
c6r1_t3 = float(df_Mo.loc[df_Mo.CLASS == "Rainfed crops"].AGRICULTURE)
c6r2_t3 = float(df_Mo.loc[df_Mo.CLASS == "Forest plantations"].AGRICULTURE)
c6r3_t3 = float(df_Mo.loc[df_Mo.CLASS == "Settlements"].AGRICULTURE)
c6r4_t3 = float(df_Mo.loc[df_Mo.CLASS == "Others"].AGRICULTURE)
c6_t3_total = c6r1_t3 + c6r2_t3 + c6r3_t3 + c6r4_t3
c6r1_t4 = float(df_Mc.loc[df_Mc.CLASS == "Irrigated crops"].AGRICULTURE)
c6r2_t4 = float(df_Mc.loc[df_Mc.CLASS == "Managed water bodies"].AGRICULTURE)
c6r3_t4 = float(df_Mc.loc[df_Mc.CLASS == "Residential"].AGRICULTURE)
c6r4_t4 = float(df_Mc.loc[df_Mc.CLASS == "Industry"].AGRICULTURE)
c6r5_t4 = float(df_Mc.loc[df_Mc.CLASS == "Others"].AGRICULTURE)
c6_t4_total = c6r1_t4 + c6r2_t4 + c6r3_t4 + c6r4_t4 + c6r5_t4
c6r1_t5 = float(df_Mn.loc[df_Mn.CLASS == "Indoor domestic"].AGRICULTURE)
c6r2_t5 = float(df_Mn.loc[df_Mn.CLASS == "Indoor industry"].AGRICULTURE)
c6r3_t5 = float(df_Mn.loc[df_Mn.CLASS == "Greenhouses"].AGRICULTURE)
c6r4_t5 = float(df_Mn.loc[df_Mn.CLASS == "Livestock and husbandry"].AGRICULTURE)
c6r5_t5 = float(df_Mn.loc[df_Mn.CLASS == "Power and energy"].AGRICULTURE)
c6r6_t5 = float(df_Mn.loc[df_Mn.CLASS == "Others"].AGRICULTURE)
c6_t5_total = c6r1_t5 + c6r2_t5 + c6r3_t5 + c6r4_t5 + c6r5_t5 + c6r6_t5
# Column 7: ENVIRONMENT
c7r1_t1 = float(df_Pr.loc[df_Pr.CLASS == "Forest"].ENVIRONMENT)
c7r2_t1 = float(df_Pr.loc[df_Pr.CLASS == "Shrubland"].ENVIRONMENT)
c7r3_t1 = float(df_Pr.loc[df_Pr.CLASS == "Natural grasslands"].ENVIRONMENT)
c7r4_t1 = float(df_Pr.loc[df_Pr.CLASS == "Natural water bodies"].ENVIRONMENT)
c7r5_t1 = float(df_Pr.loc[df_Pr.CLASS == "Wetlands"].ENVIRONMENT)
c7r6_t1 = float(df_Pr.loc[df_Pr.CLASS == "Glaciers"].ENVIRONMENT)
c7r7_t1 = float(df_Pr.loc[df_Pr.CLASS == "Others"].ENVIRONMENT)
c7_t1_total = c7r1_t1 + c7r2_t1 + c7r3_t1 + c7r4_t1 + c7r5_t1 + \
c7r6_t1 + c7r7_t1
c7r1_t2 = float(df_Ut.loc[df_Ut.CLASS == "Forest"].ENVIRONMENT)
c7r2_t2 = float(df_Ut.loc[df_Ut.CLASS == "Shrubland"].ENVIRONMENT)
c7r3_t2 = float(df_Ut.loc[df_Ut.CLASS == "Natural grasslands"].ENVIRONMENT)
c7r4_t2 = float(df_Ut.loc[df_Ut.CLASS == "Natural water bodies"].ENVIRONMENT)
c7r5_t2 = float(df_Ut.loc[df_Ut.CLASS == "Wetlands"].ENVIRONMENT)
c7r6_t2 = float(df_Ut.loc[df_Ut.CLASS == "Others"].ENVIRONMENT)
c7_t2_total = c7r1_t2 + c7r2_t2 + c7r3_t2 + c7r4_t2 + c7r5_t2 + c7r6_t2
c7r1_t3 = float(df_Mo.loc[df_Mo.CLASS == "Rainfed crops"].ENVIRONMENT)
c7r2_t3 = float(df_Mo.loc[df_Mo.CLASS == "Forest plantations"].ENVIRONMENT)
c7r3_t3 = float(df_Mo.loc[df_Mo.CLASS == "Settlements"].ENVIRONMENT)
c7r4_t3 = float(df_Mo.loc[df_Mo.CLASS == "Others"].ENVIRONMENT)
c7_t3_total = c7r1_t3 + c7r2_t3 + c7r3_t3 + c7r4_t3
c7r1_t4 = float(df_Mc.loc[df_Mc.CLASS == "Irrigated crops"].ENVIRONMENT)
c7r2_t4 = float(df_Mc.loc[df_Mc.CLASS == "Managed water bodies"].ENVIRONMENT)
c7r3_t4 = float(df_Mc.loc[df_Mc.CLASS == "Residential"].ENVIRONMENT)
c7r4_t4 = float(df_Mc.loc[df_Mc.CLASS == "Industry"].ENVIRONMENT)
c7r5_t4 = float(df_Mc.loc[df_Mc.CLASS == "Others"].ENVIRONMENT)
c7_t4_total = c7r1_t4 + c7r2_t4 + c7r3_t4 + c7r4_t4 + c7r5_t4
c7r1_t5 = float(df_Mn.loc[df_Mn.CLASS == "Indoor domestic"].ENVIRONMENT)
c7r2_t5 = float(df_Mn.loc[df_Mn.CLASS == "Indoor industry"].ENVIRONMENT)
c7r3_t5 = float(df_Mn.loc[df_Mn.CLASS == "Greenhouses"].ENVIRONMENT)
c7r4_t5 = float(df_Mn.loc[df_Mn.CLASS == "Livestock and husbandry"].ENVIRONMENT)
c7r5_t5 = float(df_Mn.loc[df_Mn.CLASS == "Power and energy"].ENVIRONMENT)
c7r6_t5 = float(df_Mn.loc[df_Mn.CLASS == "Others"].ENVIRONMENT)
c7_t5_total = c7r1_t5 + c7r2_t5 + c7r3_t5 + c7r4_t5 + c7r5_t5 + c7r6_t5
# Column 8: ECONOMY
c8r1_t1 = float(df_Pr.loc[df_Pr.CLASS == "Forest"].ECONOMY)
c8r2_t1 = float(df_Pr.loc[df_Pr.CLASS == "Shrubland"].ECONOMY)
c8r3_t1 = float(df_Pr.loc[df_Pr.CLASS == "Natural grasslands"].ECONOMY)
c8r4_t1 = float(df_Pr.loc[df_Pr.CLASS == "Natural water bodies"].ECONOMY)
c8r5_t1 = float(df_Pr.loc[df_Pr.CLASS == "Wetlands"].ECONOMY)
c8r6_t1 = float(df_Pr.loc[df_Pr.CLASS == "Glaciers"].ECONOMY)
c8r7_t1 = float(df_Pr.loc[df_Pr.CLASS == "Others"].ECONOMY)
c8_t1_total = c8r1_t1 + c8r2_t1 + c8r3_t1 + c8r4_t1 + c8r5_t1 + \
c8r6_t1 + c8r7_t1
c8r1_t2 = float(df_Ut.loc[df_Ut.CLASS == "Forest"].ECONOMY)
c8r2_t2 = float(df_Ut.loc[df_Ut.CLASS == "Shrubland"].ECONOMY)
c8r3_t2 = float(df_Ut.loc[df_Ut.CLASS == "Natural grasslands"].ECONOMY)
c8r4_t2 = float(df_Ut.loc[df_Ut.CLASS == "Natural water bodies"].ECONOMY)
c8r5_t2 = float(df_Ut.loc[df_Ut.CLASS == "Wetlands"].ECONOMY)
c8r6_t2 = float(df_Ut.loc[df_Ut.CLASS == "Others"].ECONOMY)
c8_t2_total = c8r1_t2 + c8r2_t2 + c8r3_t2 + c8r4_t2 + c8r5_t2 + c8r6_t2
c8r1_t3 = float(df_Mo.loc[df_Mo.CLASS == "Rainfed crops"].ECONOMY)
c8r2_t3 = float(df_Mo.loc[df_Mo.CLASS == "Forest plantations"].ECONOMY)
c8r3_t3 = float(df_Mo.loc[df_Mo.CLASS == "Settlements"].ECONOMY)
c8r4_t3 = float(df_Mo.loc[df_Mo.CLASS == "Others"].ECONOMY)
c8_t3_total = c8r1_t3 + c8r2_t3 + c8r3_t3 + c8r4_t3
c8r1_t4 = float(df_Mc.loc[df_Mc.CLASS == "Irrigated crops"].ECONOMY)
c8r2_t4 = float(df_Mc.loc[df_Mc.CLASS == "Managed water bodies"].ECONOMY)
c8r3_t4 = float(df_Mc.loc[df_Mc.CLASS == "Residential"].ECONOMY)
c8r4_t4 = float(df_Mc.loc[df_Mc.CLASS == "Industry"].ECONOMY)
c8r5_t4 = float(df_Mc.loc[df_Mc.CLASS == "Others"].ECONOMY)
c8_t4_total = c8r1_t4 + c8r2_t4 + c8r3_t4 + c8r4_t4 + c8r5_t4
c8r1_t5 = float(df_Mn.loc[df_Mn.CLASS == "Indoor domestic"].ECONOMY)
c8r2_t5 = float(df_Mn.loc[df_Mn.CLASS == "Indoor industry"].ECONOMY)
c8r3_t5 = float(df_Mn.loc[df_Mn.CLASS == "Greenhouses"].ECONOMY)
c8r4_t5 = float(df_Mn.loc[df_Mn.CLASS == "Livestock and husbandry"].ECONOMY)
c8r5_t5 = float(df_Mn.loc[df_Mn.CLASS == "Power and energy"].ECONOMY)
c8r6_t5 = float(df_Mn.loc[df_Mn.CLASS == "Others"].ECONOMY)
c8_t5_total = c8r1_t5 + c8r2_t5 + c8r3_t5 + c8r4_t5 + c8r5_t5 + c8r6_t5
# Column 9: ENERGY
c9r1_t1 = float(df_Pr.loc[df_Pr.CLASS == "Forest"].ENERGY)
c9r2_t1 = float(df_Pr.loc[df_Pr.CLASS == "Shrubland"].ENERGY)
c9r3_t1 = float(df_Pr.loc[df_Pr.CLASS == "Natural grasslands"].ENERGY)
c9r4_t1 = float(df_Pr.loc[df_Pr.CLASS == "Natural water bodies"].ENERGY)
c9r5_t1 = float(df_Pr.loc[df_Pr.CLASS == "Wetlands"].ENERGY)
c9r6_t1 = float(df_Pr.loc[df_Pr.CLASS == "Glaciers"].ENERGY)
c9r7_t1 = float(df_Pr.loc[df_Pr.CLASS == "Others"].ENERGY)
c9_t1_total = c9r1_t1 + c9r2_t1 + c9r3_t1 + c9r4_t1 + c9r5_t1 + \
c9r6_t1 + c9r7_t1
c9r1_t2 = float(df_Ut.loc[df_Ut.CLASS == "Forest"].ENERGY)
c9r2_t2 = float(df_Ut.loc[df_Ut.CLASS == "Shrubland"].ENERGY)
c9r3_t2 = float(df_Ut.loc[df_Ut.CLASS == "Natural grasslands"].ENERGY)
c9r4_t2 = float(df_Ut.loc[df_Ut.CLASS == "Natural water bodies"].ENERGY)
c9r5_t2 = float(df_Ut.loc[df_Ut.CLASS == "Wetlands"].ENERGY)
c9r6_t2 = float(df_Ut.loc[df_Ut.CLASS == "Others"].ENERGY)
c9_t2_total = c9r1_t2 + c9r2_t2 + c9r3_t2 + c9r4_t2 + c9r5_t2 + c9r6_t2
c9r1_t3 = float(df_Mo.loc[df_Mo.CLASS == "Rainfed crops"].ENERGY)
c9r2_t3 = float(df_Mo.loc[df_Mo.CLASS == "Forest plantations"].ENERGY)
c9r3_t3 = float(df_Mo.loc[df_Mo.CLASS == "Settlements"].ENERGY)
c9r4_t3 = float(df_Mo.loc[df_Mo.CLASS == "Others"].ENERGY)
c9_t3_total = c9r1_t3 + c9r2_t3 + c9r3_t3 + c9r4_t3
c9r1_t4 = float(df_Mc.loc[df_Mc.CLASS == "Irrigated crops"].ENERGY)
c9r2_t4 = float(df_Mc.loc[df_Mc.CLASS == "Managed water bodies"].ENERGY)
c9r3_t4 = float(df_Mc.loc[df_Mc.CLASS == "Residential"].ENERGY)
c9r4_t4 = float(df_Mc.loc[df_Mc.CLASS == "Industry"].ENERGY)
c9r5_t4 = float(df_Mc.loc[df_Mc.CLASS == "Others"].ENERGY)
c9_t4_total = c9r1_t4 + c9r2_t4 + c9r3_t4 + c9r4_t4 + c9r5_t4
c9r1_t5 = float(df_Mn.loc[df_Mn.CLASS == "Indoor domestic"].ENERGY)
c9r2_t5 = float(df_Mn.loc[df_Mn.CLASS == "Indoor industry"].ENERGY)
c9r3_t5 = float(df_Mn.loc[df_Mn.CLASS == "Greenhouses"].ENERGY)
c9r4_t5 = float(df_Mn.loc[df_Mn.CLASS == "Livestock and husbandry"].ENERGY)
c9r5_t5 = float(df_Mn.loc[df_Mn.CLASS == "Power and energy"].ENERGY)
c9r6_t5 = float(df_Mn.loc[df_Mn.CLASS == "Others"].ENERGY)
c9_t5_total = c9r1_t5 + c9r2_t5 + c9r3_t5 + c9r4_t5 + c9r5_t5 + c9r6_t5
# Column 10: LEISURE
c10r1_t1 = float(df_Pr.loc[df_Pr.CLASS == "Forest"].LEISURE)
c10r2_t1 = float(df_Pr.loc[df_Pr.CLASS == "Shrubland"].LEISURE)
c10r3_t1 = float(df_Pr.loc[df_Pr.CLASS == "Natural grasslands"].LEISURE)
c10r4_t1 = float(df_Pr.loc[df_Pr.CLASS == "Natural water bodies"].LEISURE)
c10r5_t1 = float(df_Pr.loc[df_Pr.CLASS == "Wetlands"].LEISURE)
c10r6_t1 = float(df_Pr.loc[df_Pr.CLASS == "Glaciers"].LEISURE)
c10r7_t1 = float(df_Pr.loc[df_Pr.CLASS == "Others"].LEISURE)
c10_t1_total = c10r1_t1 + c10r2_t1 + c10r3_t1 + c10r4_t1 + c10r5_t1 + \
c10r6_t1 + c10r7_t1
c10r1_t2 = float(df_Ut.loc[df_Ut.CLASS == "Forest"].LEISURE)
c10r2_t2 = float(df_Ut.loc[df_Ut.CLASS == "Shrubland"].LEISURE)
c10r3_t2 = float(df_Ut.loc[df_Ut.CLASS == "Natural grasslands"].LEISURE)
c10r4_t2 = float(df_Ut.loc[df_Ut.CLASS == "Natural water bodies"].LEISURE)
c10r5_t2 = float(df_Ut.loc[df_Ut.CLASS == "Wetlands"].LEISURE)
c10r6_t2 = float(df_Ut.loc[df_Ut.CLASS == "Others"].LEISURE)
c10_t2_total = c10r1_t2 + c10r2_t2 + c10r3_t2 + c10r4_t2 + \
c10r5_t2 + c10r6_t2
c10r1_t3 = float(df_Mo.loc[df_Mo.CLASS == "Rainfed crops"].LEISURE)
c10r2_t3 = float(df_Mo.loc[df_Mo.CLASS == "Forest plantations"].LEISURE)
c10r3_t3 = float(df_Mo.loc[df_Mo.CLASS == "Settlements"].LEISURE)
c10r4_t3 = float(df_Mo.loc[df_Mo.CLASS == "Others"].LEISURE)
c10_t3_total = c10r1_t3 + c10r2_t3 + c10r3_t3 + c10r4_t3
c10r1_t4 = float(df_Mc.loc[df_Mc.CLASS == "Irrigated crops"].LEISURE)
c10r2_t4 = float(df_Mc.loc[df_Mc.CLASS == "Managed water bodies"].LEISURE)
c10r3_t4 = float(df_Mc.loc[df_Mc.CLASS == "Residential"].LEISURE)
c10r4_t4 = float(df_Mc.loc[df_Mc.CLASS == "Industry"].LEISURE)
c10r5_t4 = float(df_Mc.loc[df_Mc.CLASS == "Others"].LEISURE)
c10_t4_total = c10r1_t4 + c10r2_t4 + c10r3_t4 + c10r4_t4 + c10r5_t4
c10r1_t5 = float(df_Mn.loc[df_Mn.CLASS == "Indoor domestic"].LEISURE)
c10r2_t5 = float(df_Mn.loc[df_Mn.CLASS == "Indoor industry"].LEISURE)
c10r3_t5 = float(df_Mn.loc[df_Mn.CLASS == "Greenhouses"].LEISURE)
c10r4_t5 = float(df_Mn.loc[df_Mn.CLASS == "Livestock and husbandry"].LEISURE)
c10r5_t5 = float(df_Mn.loc[df_Mn.CLASS == "Power and energy"].LEISURE)
c10r6_t5 = float(df_Mn.loc[df_Mn.CLASS == "Others"].LEISURE)
c10_t5_total = c10r1_t5 + c10r2_t5 + c10r3_t5 + c10r4_t5 + \
c10r5_t5 + c10r6_t5
# Column 11: NON_BENEFICIAL
c11r1_t1 = float(df_Pr.loc[df_Pr.CLASS == "Forest"].NON_BENEFICIAL)
c11r2_t1 = float(df_Pr.loc[df_Pr.CLASS == "Shrubland"].NON_BENEFICIAL)
c11r3_t1 = float(df_Pr.loc[df_Pr.CLASS == "Natural grasslands"].NON_BENEFICIAL)
c11r4_t1 = float(df_Pr.loc[df_Pr.CLASS == "Natural water bodies"].NON_BENEFICIAL)
c11r5_t1 = float(df_Pr.loc[df_Pr.CLASS == "Wetlands"].NON_BENEFICIAL)
c11r6_t1 = float(df_Pr.loc[df_Pr.CLASS == "Glaciers"].NON_BENEFICIAL)
c11r7_t1 = float(df_Pr.loc[df_Pr.CLASS == "Others"].NON_BENEFICIAL)
c11_t1_total = c11r1_t1 + c11r2_t1 + c11r3_t1 + c11r4_t1 + c11r5_t1 + \
c11r6_t1 + c11r7_t1
c11r1_t2 = float(df_Ut.loc[df_Ut.CLASS == "Forest"].NON_BENEFICIAL)
c11r2_t2 = float(df_Ut.loc[df_Ut.CLASS == "Shrubland"].NON_BENEFICIAL)
c11r3_t2 = float(df_Ut.loc[df_Ut.CLASS == "Natural grasslands"].NON_BENEFICIAL)
c11r4_t2 = float(df_Ut.loc[df_Ut.CLASS == "Natural water bodies"].NON_BENEFICIAL)
c11r5_t2 = float(df_Ut.loc[df_Ut.CLASS == "Wetlands"].NON_BENEFICIAL)
c11r6_t2 = float(df_Ut.loc[df_Ut.CLASS == "Others"].NON_BENEFICIAL)
c11_t2_total = c11r1_t2 + c11r2_t2 + c11r3_t2 + c11r4_t2 + \
c11r5_t2 + c11r6_t2
c11r1_t3 = float(df_Mo.loc[df_Mo.CLASS == "Rainfed crops"].NON_BENEFICIAL)
c11r2_t3 = float(df_Mo.loc[df_Mo.CLASS == "Forest plantations"].NON_BENEFICIAL)
c11r3_t3 = float(df_Mo.loc[df_Mo.CLASS == "Settlements"].NON_BENEFICIAL)
c11r4_t3 = float(df_Mo.loc[df_Mo.CLASS == "Others"].NON_BENEFICIAL)
c11_t3_total = c11r1_t3 + c11r2_t3 + c11r3_t3 + c11r4_t3
c11r1_t4 = float(df_Mc.loc[df_Mc.CLASS == "Irrigated crops"].NON_BENEFICIAL)
c11r2_t4 = float(df_Mc.loc[df_Mc.CLASS == "Managed water bodies"].NON_BENEFICIAL)
c11r3_t4 = float(df_Mc.loc[df_Mc.CLASS == "Residential"].NON_BENEFICIAL)
c11r4_t4 = float(df_Mc.loc[df_Mc.CLASS == "Industry"].NON_BENEFICIAL)
c11r5_t4 = float(df_Mc.loc[df_Mc.CLASS == "Others"].NON_BENEFICIAL)
c11_t4_total = c11r1_t4 + c11r2_t4 + c11r3_t4 + c11r4_t4 + c11r5_t4
c11r1_t5 = float(df_Mn.loc[df_Mn.CLASS == "Indoor domestic"].NON_BENEFICIAL)
c11r2_t5 = float(df_Mn.loc[df_Mn.CLASS == "Indoor industry"].NON_BENEFICIAL)
c11r3_t5 = float(df_Mn.loc[df_Mn.CLASS == "Greenhouses"].NON_BENEFICIAL)
c11r4_t5 = float(df_Mn.loc[df_Mn.CLASS == "Livestock and husbandry"].NON_BENEFICIAL)
c11r5_t5 = float(df_Mn.loc[df_Mn.CLASS == "Power and energy"].NON_BENEFICIAL)
c11r6_t5 = float(df_Mn.loc[df_Mn.CLASS == "Others"].NON_BENEFICIAL)
c11_t5_total = c11r1_t5 + c11r2_t5 + c11r3_t5 + c11r4_t5 + \
c11r5_t5 + c11r6_t5
# Check if left and right side agree
# Table 1
r1_t1_bene = c6r1_t1 + c7r1_t1 + c8r1_t1 + c9r1_t1 + c10r1_t1
r2_t1_bene = c6r2_t1 + c7r2_t1 + c8r2_t1 + c9r2_t1 + c10r2_t1
r3_t1_bene = c6r3_t1 + c7r3_t1 + c8r3_t1 + c9r3_t1 + c10r3_t1
r4_t1_bene = c6r4_t1 + c7r4_t1 + c8r4_t1 + c9r4_t1 + c10r4_t1
r5_t1_bene = c6r5_t1 + c7r5_t1 + c8r5_t1 + c9r5_t1 + c10r5_t1
r6_t1_bene = c6r6_t1 + c7r6_t1 + c8r6_t1 + c9r6_t1 + c10r6_t1
r7_t1_bene = c6r7_t1 + c7r7_t1 + c8r7_t1 + c9r7_t1 + c10r7_t1
c5r1_t1_left = c1r1_t1 + c2r1_t1 + c3r1_t1 + c4r1_t1
c5r2_t1_left = c1r2_t1 + c2r2_t1 + c3r2_t1 + c4r2_t1
c5r3_t1_left = c1r3_t1 + c2r3_t1 + c3r3_t1 + c4r3_t1
c5r4_t1_left = c1r4_t1 + c2r4_t1 + c3r4_t1 + c4r4_t1
c5r5_t1_left = c1r5_t1 + c2r5_t1 + c3r5_t1 + c4r5_t1
c5r6_t1_left = c1r6_t1 + c2r6_t1 + c3r6_t1 + c4r6_t1
c5r7_t1_left = c1r7_t1 + c2r7_t1 + c3r7_t1 + c4r7_t1
c5r1_t1_right = r1_t1_bene + c11r1_t1
c5r2_t1_right = r2_t1_bene + c11r2_t1
c5r3_t1_right = r3_t1_bene + c11r3_t1
c5r4_t1_right = r4_t1_bene + c11r4_t1
c5r5_t1_right = r5_t1_bene + c11r5_t1
c5r6_t1_right = r6_t1_bene + c11r6_t1
c5r7_t1_right = r7_t1_bene + c11r7_t1
# Table 2
r1_t2_bene = c6r1_t2 + c7r1_t2 + c8r1_t2 + c9r1_t2 + c10r1_t2
r2_t2_bene = c6r2_t2 + c7r2_t2 + c8r2_t2 + c9r2_t2 + c10r2_t2
r3_t2_bene = c6r3_t2 + c7r3_t2 + c8r3_t2 + c9r3_t2 + c10r3_t2
r4_t2_bene = c6r4_t2 + c7r4_t2 + c8r4_t2 + c9r4_t2 + c10r4_t2
r5_t2_bene = c6r5_t2 + c7r5_t2 + c8r5_t2 + c9r5_t2 + c10r5_t2
r6_t2_bene = c6r6_t2 + c7r6_t2 + c8r6_t2 + c9r6_t2 + c10r6_t2
c5r1_t2_left = c1r1_t2 + c2r1_t2 + c3r1_t2 + c4r1_t2
c5r2_t2_left = c1r2_t2 + c2r2_t2 + c3r2_t2 + c4r2_t2
c5r3_t2_left = c1r3_t2 + c2r3_t2 + c3r3_t2 + c4r3_t2
c5r4_t2_left = c1r4_t2 + c2r4_t2 + c3r4_t2 + c4r4_t2
c5r5_t2_left = c1r5_t2 + c2r5_t2 + c3r5_t2 + c4r5_t2
c5r6_t2_left = c1r6_t2 + c2r6_t2 + c3r6_t2 + c4r6_t2
c5r1_t2_right = r1_t2_bene + c11r1_t2
c5r2_t2_right = r2_t2_bene + c11r2_t2
c5r3_t2_right = r3_t2_bene + c11r3_t2
c5r4_t2_right = r4_t2_bene + c11r4_t2
c5r5_t2_right = r5_t2_bene + c11r5_t2
c5r6_t2_right = r6_t2_bene + c11r6_t2
# Table 3
r1_t3_bene = c6r1_t3 + c7r1_t3 + c8r1_t3 + c9r1_t3 + c10r1_t3
r2_t3_bene = c6r2_t3 + c7r2_t3 + c8r2_t3 + c9r2_t3 + c10r2_t3
r3_t3_bene = c6r3_t3 + c7r3_t3 + c8r3_t3 + c9r3_t3 + c10r3_t3
r4_t3_bene = c6r4_t3 + c7r4_t3 + c8r4_t3 + c9r4_t3 + c10r4_t3
c5r1_t3_left = c1r1_t3 + c2r1_t3 + c3r1_t3 + c4r1_t3
c5r2_t3_left = c1r2_t3 + c2r2_t3 + c3r2_t3 + c4r2_t3
c5r3_t3_left = c1r3_t3 + c2r3_t3 + c3r3_t3 + c4r3_t3
c5r4_t3_left = c1r4_t3 + c2r4_t3 + c3r4_t3 + c4r4_t3
c5r1_t3_right = r1_t3_bene + c11r1_t3
c5r2_t3_right = r2_t3_bene + c11r2_t3
c5r3_t3_right = r3_t3_bene + c11r3_t3
c5r4_t3_right = r4_t3_bene + c11r4_t3
# Table 4
r1_t4_bene = c6r1_t4 + c7r1_t4 + c8r1_t4 + c9r1_t4 + c10r1_t4
r2_t4_bene = c6r2_t4 + c7r2_t4 + c8r2_t4 + c9r2_t4 + c10r2_t4
r3_t4_bene = c6r3_t4 + c7r3_t4 + c8r3_t4 + c9r3_t4 + c10r3_t4
r4_t4_bene = c6r4_t4 + c7r4_t4 + c8r4_t4 + c9r4_t4 + c10r4_t4
r5_t4_bene = c6r5_t4 + c7r5_t4 + c8r5_t4 + c9r5_t4 + c10r5_t4
c5r1_t4_left = c1r1_t4 + c2r1_t4 + c3r1_t4 + c4r1_t4
c5r2_t4_left = c1r2_t4 + c2r2_t4 + c3r2_t4 + c4r2_t4
c5r3_t4_left = c1r3_t4 + c2r3_t4 + c3r3_t4 + c4r3_t4
c5r4_t4_left = c1r4_t4 + c2r4_t4 + c3r4_t4 + c4r4_t4
c5r5_t4_left = c1r5_t4 + c2r5_t4 + c3r5_t4 + c4r5_t4
c5r1_t4_right = r1_t4_bene + c11r1_t4
c5r2_t4_right = r2_t4_bene + c11r2_t4
c5r3_t4_right = r3_t4_bene + c11r3_t4
c5r4_t4_right = r4_t4_bene + c11r4_t4
c5r5_t4_right = r5_t4_bene + c11r5_t4
# Table 5
r1_t5_bene = c6r1_t5 + c7r1_t5 + c8r1_t5 + c9r1_t5 + c10r1_t5
r2_t5_bene = c6r2_t5 + c7r2_t5 + c8r2_t5 + c9r2_t5 + c10r2_t5
r3_t5_bene = c6r3_t5 + c7r3_t5 + c8r3_t5 + c9r3_t5 + c10r3_t5
r4_t5_bene = c6r4_t5 + c7r4_t5 + c8r4_t5 + c9r4_t5 + c10r4_t5
r5_t5_bene = c6r5_t5 + c7r5_t5 + c8r5_t5 + c9r5_t5 + c10r5_t5
r6_t5_bene = c6r6_t5 + c7r6_t5 + c8r6_t5 + c9r6_t5 + c10r6_t5
c5r1_t5_left = c1r1_t5 + c2r1_t5 + c3r1_t5 + c4r1_t5
c5r2_t5_left = c1r2_t5 + c2r2_t5 + c3r2_t5 + c4r2_t5
c5r3_t5_left = c1r3_t5 + c2r3_t5 + c3r3_t5 + c4r3_t5
c5r4_t5_left = c1r4_t5 + c2r4_t5 + c3r4_t5 + c4r4_t5
c5r5_t5_left = c1r5_t5 + c2r5_t5 + c3r5_t5 + c4r5_t5
c5r6_t5_left = c1r6_t5 + c2r6_t5 + c3r6_t5 + c4r6_t5
c5r1_t5_right = r1_t5_bene + c11r1_t5
c5r2_t5_right = r2_t5_bene + c11r2_t5
c5r3_t5_right = r3_t5_bene + c11r3_t5
c5r4_t5_right = r4_t5_bene + c11r4_t5
c5r5_t5_right = r5_t5_bene + c11r5_t5
c5r6_t5_right = r6_t5_bene + c11r6_t5
# t1
if abs(c5r1_t1_left - c5r1_t1_right) > tolerance:
raise ValueError('The left and rigth sides \
do not add up ({0} table \
and {1} row)'.format('PROTECTED', 'Forest'))
elif abs(c5r2_t1_left - c5r2_t1_right) > tolerance:
raise ValueError('The left and rigth sides \
do not add up ({0} table \
and {1} row)'.format('PROTECTED', 'Shrubland'))
elif abs(c5r3_t1_left - c5r3_t1_right) > tolerance:
raise ValueError('The left and rigth sides \
do not add up ({0} table \
and {1} row)'.format('PROTECTED',
'Natural grasslands'))
elif abs(c5r4_t1_left - c5r4_t1_right) > tolerance:
raise ValueError('The left and rigth sides \
do not add up ({0} table \
and {1} row)'.format('PROTECTED',
'Natural water bodies'))
elif abs(c5r5_t1_left - c5r5_t1_right) > tolerance:
raise ValueError('The left and rigth sides \
do not add up ({0} table \
and {1} row)'.format('PROTECTED', 'Wetlands'))
elif abs(c5r6_t1_left - c5r6_t1_right) > tolerance:
raise ValueError('The left and rigth sides \
do not add up ({0} table \
and {1} row)'.format('PROTECTED', 'Glaciers'))
elif abs(c5r7_t1_left - c5r7_t1_right) > tolerance:
raise ValueError('The left and rigth sides \
do not add up ({0} table \
and {1} row)'.format('PROTECTED', 'Others'))
# t2
elif abs(c5r1_t2_left - c5r1_t2_right) > tolerance:
raise ValueError('The left and rigth sides \
do not add up ({0} table \
and {1} row)'.format('UTILIZED', 'Forest'))
elif abs(c5r2_t2_left - c5r2_t2_right) > tolerance:
raise ValueError('The left and rigth sides \
do not add up ({0} table \
and {1} row)'.format('UTILIZED', 'Shrubland'))
elif abs(c5r3_t2_left - c5r3_t2_right) > tolerance:
raise ValueError('The left and rigth sides \
do not add up ({0} table \
and {1} row)'.format('UTILIZED',
'Natural grasslands'))
elif abs(c5r4_t2_left - c5r4_t2_right) > tolerance:
raise ValueError('The left and rigth sides \
do not add up ({0} table \
and {1} row)'.format('UTILIZED',
'Natural water bodies'))
elif abs(c5r5_t2_left - c5r5_t2_right) > tolerance:
raise ValueError('The left and rigth sides \
do not add up ({0} table \
and {1} row)'.format('UTILIZED', 'Wetlands'))
elif abs(c5r6_t2_left - c5r6_t2_right) > tolerance:
raise ValueError('The left and rigth sides \
do not add up ({0} table \
and {1} row)'.format('UTILIZED', 'Others'))
# t3
elif abs(c5r1_t3_left - c5r1_t3_right) > tolerance:
raise ValueError('The left and rigth sides \
do not add up ({0} table \
and {1} row)'.format('MODIFIED', 'Rainfed crops'))
elif abs(c5r2_t3_left - c5r2_t3_right) > tolerance:
raise ValueError('The left and rigth sides \
do not add up ({0} table \
and {1} row)'.format('MODIFIED',
'Forest plantations'))
elif abs(c5r3_t3_left - c5r3_t3_right) > tolerance:
raise ValueError('The left and rigth sides \
do not add up ({0} table \
and {1} row)'.format('MODIFIED', 'Settlements'))
elif abs(c5r4_t3_left - c5r4_t3_right) > tolerance:
raise ValueError('The left and rigth sides \
do not add up ({0} table \
and {1} row)'.format('MODIFIED', 'Others'))
# t4
elif abs(c5r1_t4_left - c5r1_t4_right) > tolerance:
raise ValueError('The left and rigth sides \
do not add up ({0} table \
and {1} row)'.format('MANAGED CONVENTIONAL',
'Irrigated crops'))
elif abs(c5r2_t4_left - c5r2_t4_right) > tolerance:
raise ValueError('The left and rigth sides \
do not add up ({0} table \
and {1} row)'.format('MANAGED CONVENTIONAL',
'Managed water bodies'))
elif abs(c5r3_t4_left - c5r3_t4_right) > tolerance:
raise ValueError('The left and rigth sides \
do not add up ({0} table \
and {1} row)'.format('MANAGED CONVENTIONAL',
'Residential'))
elif abs(c5r4_t4_left - c5r4_t4_right) > tolerance:
raise ValueError('The left and rigth sides \
do not add up ({0} table \
and {1} row)'.format('MANAGED CONVENTIONAL',
'Industry'))
elif abs(c5r5_t4_left - c5r5_t4_right) > tolerance:
raise ValueError('The left and rigth sides \
do not add up ({0} table \
and {1} row)'.format('MANAGED CONVENTIONAL',
'Others'))
# t5
elif abs(c5r1_t5_left - c5r1_t5_right) > tolerance:
raise ValueError('The left and rigth sides \
do not add up ({0} table \
and {1} row)'.format('MANAGED NON_CONVENTIONAL',
'Indoor domestic'))
elif abs(c5r2_t5_left - c5r2_t5_right) > tolerance:
raise ValueError('The left and rigth sides \
do not add up ({0} table \
and {1} row)'.format('MANAGED NON_CONVENTIONAL',
'Indoor industrial'))
elif abs(c5r3_t5_left - c5r3_t5_right) > tolerance:
raise ValueError('The left and rigth sides \
do not add up ({0} table \
and {1} row)'.format('MANAGED NON_CONVENTIONAL',
'Greenhouses'))
elif abs(c5r4_t5_left - c5r4_t5_right) > tolerance:
raise ValueError('The left and rigth sides \
do not add up ({0} table \
and {1} row)'.format('MANAGED NON_CONVENTIONAL',
'Livestock and husbandry'))
elif abs(c5r5_t5_left - c5r5_t5_right) > tolerance:
raise ValueError('The left and rigth sides \
do not add up ({0} table \
and {1} row)'.format('MANAGED NON_CONVENTIONAL',
'Power and energy'))
elif abs(c5r6_t5_left - c5r6_t5_right) > tolerance:
raise ValueError('The left and rigth sides \
do not add up ({0} table \
and {1} row)'.format('MANAGED NON_CONVENTIONAL',
'Others'))
# Calculations & modify svg
if not template:
path = os.path.dirname(os.path.abspath(__file__))
svg_template_path = os.path.join(path, 'svg', 'sheet_2.svg')
else:
svg_template_path = os.path.abspath(template)
tree = ET.parse(svg_template_path)
# Titles
xml_txt_box = tree.findall('''.//*[@id='basin']''')[0]
xml_txt_box.getchildren()[0].text = 'Basin: ' + basin
xml_txt_box = tree.findall('''.//*[@id='period']''')[0]
xml_txt_box.getchildren()[0].text = 'Period: ' + period
xml_txt_box = tree.findall('''.//*[@id='units']''')[0]
xml_txt_box.getchildren()[0].text = 'Sheet 2: Evapotranspiration (' + units + ')'
# Total ET
total_et_t1 = c5r1_t1_left + c5r2_t1_left + c5r3_t1_left + c5r4_t1_left + \
c5r5_t1_left + c5r6_t1_left + c5r7_t1_left
total_et_t2 = c5r1_t2_left + c5r2_t2_left + c5r3_t2_left + c5r4_t2_left + \
c5r5_t2_left + c5r6_t2_left
total_et_t3 = c5r1_t3_left + c5r2_t3_left + c5r3_t3_left + c5r4_t3_left
total_et_t4 = c5r1_t4_left + c5r2_t4_left + c5r3_t4_left + c5r4_t4_left + \
c5r5_t4_left
total_et_t5 = c5r1_t5_left + c5r2_t5_left + c5r3_t5_left + c5r4_t5_left + \
c5r5_t5_left + c5r6_t5_left
total_et = total_et_t1 + total_et_t2 + total_et_t3 + \
total_et_t4 + total_et_t5
et_total_managed_lu = total_et_t4 + total_et_t5
et_total_managed = total_et_t3 + et_total_managed_lu
t_total_managed_lu = c1_t4_total + c1_t5_total
xml_txt_box = tree.findall('''.//*[@id='total_et']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % total_et
xml_txt_box = tree.findall('''.//*[@id='non-manageble']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % total_et_t1
xml_txt_box = tree.findall('''.//*[@id='manageble']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % total_et_t2
xml_txt_box = tree.findall('''.//*[@id='managed']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % et_total_managed
# Totals land use
xml_txt_box = tree.findall('''.//*[@id='protected_lu_et']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % total_et_t1
xml_txt_box = tree.findall('''.//*[@id='protected_lu_t']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c1_t1_total
xml_txt_box = tree.findall('''.//*[@id='utilized_lu_et']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % total_et_t2
xml_txt_box = tree.findall('''.//*[@id='utilized_lu_t']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c1_t2_total
xml_txt_box = tree.findall('''.//*[@id='modified_lu_et']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % total_et_t3
xml_txt_box = tree.findall('''.//*[@id='modified_lu_t']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c1_t3_total
xml_txt_box = tree.findall('''.//*[@id='managed_lu_et']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % et_total_managed_lu
xml_txt_box = tree.findall('''.//*[@id='managed_lu_t']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % t_total_managed_lu
# Table 1
xml_txt_box = tree.findall('''.//*[@id='plu_et_forest']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c5r1_t1_left
xml_txt_box = tree.findall('''.//*[@id='plu_t_forest']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c1r1_t1
xml_txt_box = tree.findall('''.//*[@id='plu_et_shrubland']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c5r2_t1_left
xml_txt_box = tree.findall('''.//*[@id='plu_t_shrubland']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c1r2_t1
xml_txt_box = tree.findall('''.//*[@id='plu_et_grasslands']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c5r3_t1_left
xml_txt_box = tree.findall('''.//*[@id='plu_t_grasslands']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c1r3_t1
xml_txt_box = tree.findall('''.//*[@id='plu_et_waterbodies']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c5r4_t1_left
xml_txt_box = tree.findall('''.//*[@id='plu_t_waterbodies']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c1r4_t1
xml_txt_box = tree.findall('''.//*[@id='plu_et_wetlands']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c5r5_t1_left
xml_txt_box = tree.findall('''.//*[@id='plu_t_wetlands']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c1r5_t1
xml_txt_box = tree.findall('''.//*[@id='plu_et_glaciers']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c5r6_t1_left
xml_txt_box = tree.findall('''.//*[@id='plu_t_glaciers']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c1r6_t1
xml_txt_box = tree.findall('''.//*[@id='plu_et_others']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c5r7_t1_left
xml_txt_box = tree.findall('''.//*[@id='plu_t_others']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c1r7_t1
# Table 2
xml_txt_box = tree.findall('''.//*[@id='ulu_et_forest']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c5r1_t2_left
xml_txt_box = tree.findall('''.//*[@id='ulu_t_forest']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c1r1_t2
xml_txt_box = tree.findall('''.//*[@id='ulu_et_shrubland']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c5r2_t2_left
xml_txt_box = tree.findall('''.//*[@id='ulu_t_shrubland']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c1r2_t2
xml_txt_box = tree.findall('''.//*[@id='ulu_et_grasslands']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c5r3_t2_left
xml_txt_box = tree.findall('''.//*[@id='ulu_t_grasslands']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c1r3_t2
xml_txt_box = tree.findall('''.//*[@id='ulu_et_waterbodies']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c5r4_t2_left
xml_txt_box = tree.findall('''.//*[@id='ulu_t_waterbodies']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c1r4_t2
xml_txt_box = tree.findall('''.//*[@id='ulu_et_wetlands']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c5r5_t2_left
xml_txt_box = tree.findall('''.//*[@id='ulu_t_wetlands']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c1r5_t2
xml_txt_box = tree.findall('''.//*[@id='ulu_et_others']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c5r6_t2_left
xml_txt_box = tree.findall('''.//*[@id='ulu_t_others']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c1r6_t2
# Table 3
xml_txt_box = tree.findall('''.//*[@id='molu_et_rainfed']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c5r1_t3_left
xml_txt_box = tree.findall('''.//*[@id='molu_t_rainfed']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c1r1_t3
xml_txt_box = tree.findall('''.//*[@id='molu_et_forest']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c5r2_t3_left
xml_txt_box = tree.findall('''.//*[@id='molu_t_forest']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c1r2_t3
xml_txt_box = tree.findall('''.//*[@id='molu_et_settlements']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c5r3_t3_left
xml_txt_box = tree.findall('''.//*[@id='molu_t_settlements']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c1r3_t3
xml_txt_box = tree.findall('''.//*[@id='molu_et_others']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c5r4_t3_left
xml_txt_box = tree.findall('''.//*[@id='molu_t_others']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c1r4_t3
# Table 4
xml_txt_box = tree.findall('''.//*[@id='malu_et_crops']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c5r1_t4_left
xml_txt_box = tree.findall('''.//*[@id='malu_t_crops']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c1r1_t4
xml_txt_box = tree.findall('''.//*[@id='malu_et_waterbodies']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c5r2_t4_left
xml_txt_box = tree.findall('''.//*[@id='malu_t_waterbodies']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c1r2_t4
xml_txt_box = tree.findall('''.//*[@id='malu_et_residential']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c5r3_t4_left
xml_txt_box = tree.findall('''.//*[@id='malu_t_residential']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c1r3_t4
xml_txt_box = tree.findall('''.//*[@id='malu_et_industry']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c5r4_t4_left
xml_txt_box = tree.findall('''.//*[@id='malu_t_industry']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c1r4_t4
xml_txt_box = tree.findall('''.//*[@id='malu_et_others1']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c5r5_t4_left
xml_txt_box = tree.findall('''.//*[@id='malu_t_others1']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c1r5_t4
# Table 5
xml_txt_box = tree.findall('''.//*[@id='malu_et_idomestic']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c5r1_t5_left
xml_txt_box = tree.findall('''.//*[@id='malu_t_idomestic']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c1r1_t5
xml_txt_box = tree.findall('''.//*[@id='malu_et_iindustry']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c5r2_t5_left
xml_txt_box = tree.findall('''.//*[@id='malu_t_iindustry']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c1r2_t5
xml_txt_box = tree.findall('''.//*[@id='malu_et_greenhouses']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c5r3_t5_left
xml_txt_box = tree.findall('''.//*[@id='malu_t_greenhouses']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c1r3_t5
xml_txt_box = tree.findall('''.//*[@id='malu_et_livestock']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c5r4_t5_left
xml_txt_box = tree.findall('''.//*[@id='malu_t_livestock']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c1r4_t5
xml_txt_box = tree.findall('''.//*[@id='malu_et_powerandenergy']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c5r5_t5_left
xml_txt_box = tree.findall('''.//*[@id='malu_t_powerandenergy']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c1r5_t5
xml_txt_box = tree.findall('''.//*[@id='malu_et_others2']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c5r6_t5_left
xml_txt_box = tree.findall('''.//*[@id='malu_t_others2']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % c1r6_t5
# Right box
total_t = c1_t1_total + c1_t2_total + c1_t3_total + \
c1_t4_total + c1_t5_total
total_e = total_et - total_t
total_water = c2_t1_total + c2_t2_total + c2_t3_total + \
c2_t4_total + c2_t5_total
total_soil = c3_t1_total + c3_t2_total + c3_t3_total + \
c3_t4_total + c3_t5_total
total_interception = c4_t1_total + c4_t2_total + c4_t3_total + \
c4_t4_total + c4_t5_total
xml_txt_box = tree.findall('''.//*[@id='evaporation']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % total_e
xml_txt_box = tree.findall('''.//*[@id='transpiration']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % total_t
xml_txt_box = tree.findall('''.//*[@id='water']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % total_water
xml_txt_box = tree.findall('''.//*[@id='soil']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % total_soil
xml_txt_box = tree.findall('''.//*[@id='interception']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % total_interception
total_agr = c6_t1_total + c6_t2_total + c6_t3_total + \
c6_t4_total + c6_t5_total
total_env = c7_t1_total + c7_t2_total + c7_t3_total + \
c7_t4_total + c7_t5_total
total_eco = c8_t1_total + c8_t2_total + c8_t3_total + \
c8_t4_total + c8_t5_total
total_ene = c9_t1_total + c9_t2_total + c9_t3_total + \
c9_t4_total + c9_t5_total
total_lei = c10_t1_total + c10_t2_total + c10_t3_total + \
c10_t4_total + c10_t5_total
total_bene = total_agr + total_env + total_eco + total_ene + total_lei
total_non_bene = c11_t1_total + c11_t2_total + c11_t3_total + \
c11_t4_total + c11_t5_total
xml_txt_box = tree.findall('''.//*[@id='non-beneficial']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % total_non_bene
xml_txt_box = tree.findall('''.//*[@id='beneficial']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % total_bene
xml_txt_box = tree.findall('''.//*[@id='agriculture']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % total_agr
xml_txt_box = tree.findall('''.//*[@id='environment']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % total_env
xml_txt_box = tree.findall('''.//*[@id='economy']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % total_eco
xml_txt_box = tree.findall('''.//*[@id='energy']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % total_ene
xml_txt_box = tree.findall('''.//*[@id='leisure']''')[0]
xml_txt_box.getchildren()[0].text = '%.1f' % total_lei
# svg to string
ET.register_namespace("", "http://www.w3.org/2000/svg")
# Get the paths based on the environment variable
if os.name == 'posix':
Path_Inkscape = 'inkscape'
else:
WA_env_paths = os.environ["WA_PATHS"].split(';')
Inkscape_env_path = WA_env_paths[1]
Path_Inkscape = os.path.join(Inkscape_env_path,'inkscape.exe')
# Export svg to png
tempout_path = output.replace('.pdf', '_temporary.svg')
tree.write(tempout_path)
subprocess.call([Path_Inkscape,tempout_path,'--export-pdf='+output, '-d 300'])
time.sleep(10)
os.remove(tempout_path)
# Return
return output
|
[] |
[] |
[
"WA_PATHS"
] |
[]
|
["WA_PATHS"]
|
python
| 1 | 0 | |
main.py
|
from unityagents import UnityEnvironment
import numpy as np
from collections import deque
import os
from maddpg import MADDPGAgent
import matplotlib.pyplot as plt
import datetime
import torch
#device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device = torch.device("cpu")
def plot(scores=[], ylabels=["Scores"], xlabel="Episode #", title="", text=""):
fig, ax = plt.subplots()
for score, label in zip(scores, ylabels):
ax.plot(np.arange(len(score)), score, label=label)
ax.grid()
ax.legend(loc='upper left', shadow=False, fontsize='x-large')
fig.tight_layout()
fig.savefig(f"plot_{datetime.datetime.now().isoformat().replace(':', '')}.png")
plt.show()
def experiment(n_episodes=20000, ou_noise = 2.0, ou_noise_decay_rate = 0.998, train_mode=True,
threshold=0.5, buffer_size=1000000, batch_size=512, update_every=2, tau=0.01,
lr_actor=0.001, lr_critic=0.001):
"""
Multi-Agent Deep Deterministic Policy Gradient (MADDPG)
:param n_episodes: maximum number of training episodes
:param train_mode: when 'True' set environment to training mode
:param threshold: score after which the environment is solved
:return scores_all, moving_average: List of all scores and moving average.
"""
env = UnityEnvironment(file_name="Tennis/Tennis", base_port=64738)
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
env_info = env.reset(train_mode=True)[brain_name]
action_size = brain.vector_action_space_size
num_agents = len(env_info.agents)
states = env_info.vector_observations
state_size = states.shape[1]
maddpgagent = MADDPGAgent(state_size=state_size, action_size=action_size, num_agents=num_agents,
random_seed=0, buffer_size=buffer_size, device=device,
batch_size=batch_size, update_every=update_every, tau=tau, lr_actor=lr_actor,
lr_critic=lr_critic)
scores_window = deque(maxlen=100)
scores_all = []
moving_average = []
for episode in range(1, n_episodes):
env_info = env.reset(train_mode=train_mode)[brain_name]
states = env_info.vector_observations
maddpgagent.reset()
scores = np.zeros(num_agents)
while True:
actions = maddpgagent.act(states, noise = ou_noise)
env_info = env.step(actions)[brain_name]
next_states = env_info.vector_observations
rewards = np.asarray(env_info.rewards)
dones = np.asarray(env_info.local_done).astype(np.uint8)
maddpgagent.step(states, actions, rewards, next_states, dones)
scores += rewards
states = next_states
if np.any(dones):
break
best_score = np.max(scores)
scores_window.append(best_score)
scores_all.append(best_score)
moving_average.append(np.mean(scores_window))
ou_noise *= ou_noise_decay_rate #decaying noise speeds up training.
print('\rEpisode {}\tAverage Training Score: {:.3f}\tMin:{:.3f}\tMax:{:.3f}'
.format(episode, np.mean(scores_window), np.min(scores_window), np.max(scores_window)), end='')
if episode % 100 == 0:
print('\rEpisode {}\tAverage Training Score: {:.3f}\tMin:{:.3f}\tMax:{:.3f}\tMoving Average: {:.3f}'
.format(episode, np.mean(scores_window), np.min(scores_window), np.max(scores_window),moving_average[-1]))
if moving_average[-1] > threshold:
print(' Environment solved after {:d} episodes! \n Moving Average: {:.3f}'.format(episode, moving_average[-1]))
maddpgagent.save_models()
break
return scores_all, moving_average
def main():
os.environ['NO_PROXY'] = 'localhost,127.0.0.*'
scores_all, moving_average = experiment(n_episodes=20000, ou_noise=2.0, ou_noise_decay_rate=0.998, train_mode=True,
threshold=0.5, buffer_size=1000000, batch_size=512, update_every=2, tau=0.01,
lr_actor=0.001, lr_critic=0.001)
plot(scores=[scores_all, moving_average], ylabels=["Scores", "Average Score"], xlabel="Episode #", title="", text="")
if __name__ == "__main__":
main()
|
[] |
[] |
[
"NO_PROXY"
] |
[]
|
["NO_PROXY"]
|
python
| 1 | 0 | |
client.go
|
package main
import (
"net/http"
"net/url"
"os"
)
const MTBA_API = "https://api-v3.mbta.com"
func newClient() *http.Client {
// TLS certs should be added here
return &http.Client{}
}
func newRequest(base, path string, params url.Values) (*http.Request, error) {
uri, err := url.Parse(base)
if err != nil {
return nil, err
}
// if an api key is provided, use it
// TODO: use something better than env var
if k := os.Getenv("MBTA_API_KEY"); k != "" {
params.Add("api_key", k)
}
uri.Path += path
uri.RawQuery = params.Encode()
return http.NewRequest("GET", uri.String(), nil)
}
|
[
"\"MBTA_API_KEY\""
] |
[] |
[
"MBTA_API_KEY"
] |
[]
|
["MBTA_API_KEY"]
|
go
| 1 | 0 | |
fuzzers/005-tilegrid/bram/top.py
|
import os
import random
random.seed(int(os.getenv("SEED"), 16))
from prjxray import util
from prjxray.db import Database
def gen_sites():
db = Database(util.get_db_root(), util.get_part())
grid = db.grid()
for tile_name in sorted(grid.tiles()):
loc = grid.loc_of_tilename(tile_name)
gridinfo = grid.gridinfo_at_loc(loc)
for site_name, site_type in gridinfo.sites.items():
if site_type in ['FIFO18E1']:
yield tile_name, site_name
def write_params(params):
pinstr = 'tile,val,site\n'
for tile, (site, val) in sorted(params.items()):
pinstr += '%s,%s,%s\n' % (tile, val, site)
open('params.csv', 'w').write(pinstr)
def run():
print(
'''
module top(input clk, stb, di, output do);
localparam integer DIN_N = 8;
localparam integer DOUT_N = 8;
reg [DIN_N-1:0] din;
wire [DOUT_N-1:0] dout;
reg [DIN_N-1:0] din_shr;
reg [DOUT_N-1:0] dout_shr;
always @(posedge clk) begin
din_shr <= {din_shr, di};
dout_shr <= {dout_shr, din_shr[DIN_N-1]};
if (stb) begin
din <= din_shr;
dout_shr <= dout;
end
end
assign do = dout_shr[DOUT_N-1];
''')
params = {}
sites = list(gen_sites())
for (tile_name, site_name), isone in zip(sites,
util.gen_fuzz_states(len(sites))):
params[tile_name] = (site_name, isone)
print(
'''
(* KEEP, DONT_TOUCH, LOC = "%s" *)
RAMB18E1 #(
.DOA_REG(%u)
) bram_%s (
.CLKARDCLK(),
.CLKBWRCLK(),
.ENARDEN(),
.ENBWREN(),
.REGCEAREGCE(),
.REGCEB(),
.RSTRAMARSTRAM(),
.RSTRAMB(),
.RSTREGARSTREG(),
.RSTREGB(),
.ADDRARDADDR(),
.ADDRBWRADDR(),
.DIADI(),
.DIBDI(),
.DIPADIP(),
.DIPBDIP(),
.WEA(),
.WEBWE(),
.DOADO(),
.DOBDO(),
.DOPADOP(),
.DOPBDOP());
''' % (site_name, isone, site_name))
print("endmodule")
write_params(params)
if __name__ == '__main__':
run()
|
[] |
[] |
[
"SEED"
] |
[]
|
["SEED"]
|
python
| 1 | 0 | |
src/main/resources/dags/comet_watch.py
|
import os
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from datetime import datetime, timedelta
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': datetime(2018, 11, 2),
'email': ['[email protected]'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 0,
'retry_delay': timedelta(minutes=5),
# 'queue': 'bash_queue',
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
}
dag = DAG('comet_watcher', max_active_runs=1, catchup=False, default_args=default_args, schedule_interval="*/1 * * * *")
COMET_SPARK_CMD = os.environ.get('COMET_SPARK_CMD', '')
COMET_DOMAIN = os.environ.get('COMET_DOMAIN', '')
# t1, t2 and t3 are examples of tasks created by instantiating operators
t1 = BashOperator(
task_id='comet_watcher',
bash_command=COMET_SPARK_CMD + ' watch ' + COMET_DOMAIN,
dag=dag)
|
[] |
[] |
[
"COMET_SPARK_CMD",
"COMET_DOMAIN"
] |
[]
|
["COMET_SPARK_CMD", "COMET_DOMAIN"]
|
python
| 2 | 0 | |
setup.py
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="degiroapi",
version="0.9.7",
author="Lorenz Kraus",
author_email="[email protected]",
description="An unofficial API for the trading platform Degiro written in Python",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/lolokraus/DegiroAPI",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
install_requires=[
'requests'
]
)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
mgr/runner_test.go
|
package mgr
import (
"encoding/json"
"fmt"
"io/ioutil"
"log/syslog"
"math/rand"
"os"
"path/filepath"
"strings"
"sync"
"testing"
"time"
"github.com/json-iterator/go"
"github.com/stretchr/testify/assert"
"github.com/qiniu/log"
"github.com/qiniu/logkit/cleaner"
"github.com/qiniu/logkit/conf"
"github.com/qiniu/logkit/parser"
parserConf "github.com/qiniu/logkit/parser/config"
"github.com/qiniu/logkit/parser/qiniu"
"github.com/qiniu/logkit/reader"
readerConf "github.com/qiniu/logkit/reader/config"
"github.com/qiniu/logkit/router"
"github.com/qiniu/logkit/sender"
_ "github.com/qiniu/logkit/sender/builtin"
senderConf "github.com/qiniu/logkit/sender/config"
"github.com/qiniu/logkit/sender/discard"
"github.com/qiniu/logkit/sender/mock"
"github.com/qiniu/logkit/sender/pandora"
"github.com/qiniu/logkit/transforms"
_ "github.com/qiniu/logkit/transforms/builtin"
"github.com/qiniu/logkit/transforms/ip"
"github.com/qiniu/logkit/transforms/mutate"
"github.com/qiniu/logkit/utils/equeue"
. "github.com/qiniu/logkit/utils/models"
)
func cleanMetaFolder(path string) {
err := os.Remove(path + "/buf.dat")
if err != nil {
log.Println(err)
}
err = os.Remove(path + "/buf.meta")
if err != nil {
log.Println(err)
}
err = os.Remove(path + "/file.meta")
if err != nil {
log.Println(err)
}
}
func Test_Run(t *testing.T) {
dir := "Test_RunForErrData"
if err := os.Mkdir(dir, DefaultDirPerm); err != nil {
log.Fatalf("Test_Run error mkdir %v %v", dir, err)
}
defer os.RemoveAll(dir)
logpath := dir + "/logdir"
logpathLink := dir + "/logdirlink"
metapath := dir + "/meta_mock_csv"
if err := os.Mkdir(logpath, DefaultDirPerm); err != nil {
log.Fatalf("Test_Run error mkdir %v %v", logpath, err)
}
absLogpath, err := filepath.Abs(logpath)
if err != nil {
t.Fatalf("filepath.Abs %v, %v", logpath, err)
}
absLogpathLink, err := filepath.Abs(logpathLink)
if err != nil {
t.Fatalf("filepath.Abs %v, %v", logpathLink, err)
}
if err := os.Symlink(absLogpath, absLogpathLink); err != nil {
log.Fatalf("Test_Run error symbol link %v to %v: %v", absLogpathLink, logpath, err)
}
if err := os.Mkdir(metapath, DefaultDirPerm); err != nil {
log.Fatalf("Test_Run error mkdir %v %v", metapath, err)
}
log1 := `hello 123
xx 1
`
log2 := `
`
log3 := `h 456
x 789`
if err := ioutil.WriteFile(filepath.Join(logpath, "log1"), []byte(log1), 0666); err != nil {
log.Fatalf("write log1 fail %v", err)
}
time.Sleep(time.Second)
if err := ioutil.WriteFile(filepath.Join(logpath, "log2"), []byte(log2), 0666); err != nil {
log.Fatalf("write log3 fail %v", err)
}
time.Sleep(time.Second)
if err := ioutil.WriteFile(filepath.Join(logpath, "log3"), []byte(log3), 0666); err != nil {
log.Fatalf("write log2 fail %v", err)
}
exppath1 := filepath.Join(absLogpath, "log1")
exppath3 := filepath.Join(absLogpath, "log3")
exppaths := []string{exppath1, exppath1, exppath3, exppath3}
rinfo := RunnerInfo{
RunnerName: "test_runner",
MaxBatchLen: 1,
MaxBatchSize: 2048,
}
readerConfig := conf.MapConf{
"log_path": logpathLink,
"meta_path": metapath,
"mode": "dir",
"read_from": "oldest",
"datasource_tag": "testtag",
"reader_buf_size": "16",
}
meta, err := reader.NewMetaWithConf(readerConfig)
if err != nil {
t.Error(err)
}
isFromWeb := false
r, err := reader.NewFileBufReader(readerConfig, isFromWeb)
if err != nil {
t.Error(err)
}
cleanChan := make(chan cleaner.CleanSignal)
cleanerConfig := conf.MapConf{
"delete_enable": "true",
}
c, err := cleaner.NewCleaner(cleanerConfig, meta, cleanChan, meta.LogPath())
if err != nil {
t.Error(err)
}
parseConf := conf.MapConf{
"name": "req_csv",
"type": parserConf.TypeCSV,
"csv_schema": "logtype string, xx long",
"csv_splitter": " ",
"disable_record_errdata": "true",
}
ps := parser.NewRegistry()
pparser, err := ps.NewLogParser(parseConf)
if err != nil {
t.Error(err)
}
senderConfigs := []conf.MapConf{
conf.MapConf{
"name": "mock_sender",
"sender_type": "mock",
},
}
var senders []sender.Sender
raws, err := mock.NewSender(senderConfigs[0])
s, succ := raws.(*mock.Sender)
if !succ {
t.Error("sender should be mock sender")
}
if err != nil {
t.Error(err)
}
senders = append(senders, s)
runner, err := NewLogExportRunnerWithService(rinfo, r, c, pparser, nil, senders, nil, meta)
if err != nil {
t.Error(err)
}
cleanInfo := CleanInfo{
enable: true,
logdir: absLogpath,
}
assert.Equal(t, cleanInfo, runner.Cleaner())
go runner.Run()
timer := time.NewTimer(20 * time.Second).C
for {
if s.SendCount() >= 4 {
break
}
select {
case <-timer:
t.Error("runner didn't stop within ticker time")
return
default:
time.Sleep(time.Second)
}
}
var dts []Data
rawData := runner.senders[0].Name()[len("mock_sender "):]
err = jsoniter.Unmarshal([]byte(rawData), &dts)
if err != nil {
t.Error(err)
}
if len(dts) != 4 {
t.Errorf("got sender data not match error,expect 4 but %v", len(dts))
}
for idx, dt := range dts {
assert.Equal(t, exppaths[idx], dt["testtag"])
}
}
func Test_RunForEnvTag(t *testing.T) {
dir := "Test_RunForEnvTag"
if err := os.Mkdir(dir, DefaultDirPerm); err != nil {
log.Fatalf("Test_RunForEnvTag error mkdir %v %v", dir, err)
}
defer os.RemoveAll(dir)
originEnv := os.Getenv("Test_RunForEnvTag")
defer func() {
os.Setenv("Test_RunForEnvTag", originEnv)
}()
if err := os.Setenv("Test_RunForEnvTag", "{\"Test_RunForEnvTag\":\"env_value\"}"); err != nil {
t.Fatalf("set env %v to %v error %v", "Test_RunForEnvTag", "env_value", err)
}
logpath := dir + "/logdir"
logpathLink := dir + "/logdirlink"
metapath := dir + "/meta_mock_csv"
if err := os.Mkdir(logpath, DefaultDirPerm); err != nil {
log.Fatalf("Test_RunForEnvTag error mkdir %v %v", logpath, err)
}
absLogpath, err := filepath.Abs(logpath)
if err != nil {
t.Fatalf("filepath.Abs %v, %v", logpath, err)
}
absLogpathLink, err := filepath.Abs(logpathLink)
if err != nil {
t.Fatalf("filepath.Abs %v, %v", logpathLink, err)
}
if err := os.Symlink(absLogpath, absLogpathLink); err != nil {
log.Fatalf("Test_Run error symbol link %v to %v: %v", absLogpathLink, logpath, err)
}
if err := os.Mkdir(metapath, DefaultDirPerm); err != nil {
log.Fatalf("Test_Run error mkdir %v %v", metapath, err)
}
log1 := `hello 123
xx 1
`
log2 := `
`
log3 := `h 456
x 789`
if err := ioutil.WriteFile(filepath.Join(logpath, "log1"), []byte(log1), 0666); err != nil {
log.Fatalf("write log1 fail %v", err)
}
time.Sleep(time.Second)
if err := ioutil.WriteFile(filepath.Join(logpath, "log2"), []byte(log2), 0666); err != nil {
log.Fatalf("write log3 fail %v", err)
}
time.Sleep(time.Second)
if err := ioutil.WriteFile(filepath.Join(logpath, "log3"), []byte(log3), 0666); err != nil {
log.Fatalf("write log2 fail %v", err)
}
rinfo := RunnerInfo{
RunnerName: "test_runner",
MaxBatchLen: 1,
MaxBatchSize: 2048,
ExtraInfo: true,
EnvTag: "Test_RunForEnvTag",
}
readerConfig := conf.MapConf{
"log_path": logpathLink,
"meta_path": metapath,
"mode": "dir",
"read_from": "oldest",
"reader_buf_size": "16",
}
meta, err := reader.NewMetaWithConf(readerConfig)
if err != nil {
t.Error(err)
}
isFromWeb := false
reader, err := reader.NewFileBufReader(readerConfig, isFromWeb)
if err != nil {
t.Error(err)
}
cleanChan := make(chan cleaner.CleanSignal)
cleanerConfig := conf.MapConf{
"delete_enable": "true",
}
cleaner, err := cleaner.NewCleaner(cleanerConfig, meta, cleanChan, meta.LogPath())
if err != nil {
t.Error(err)
}
parseConf := conf.MapConf{
"name": "req_csv",
"type": "csv",
"csv_schema": "logtype string, xx long",
"csv_splitter": " ",
"disable_record_errdata": "true",
}
ps := parser.NewRegistry()
pparser, err := ps.NewLogParser(parseConf)
if err != nil {
t.Error(err)
}
senderConfigs := []conf.MapConf{
{
"name": "mock_sender",
"sender_type": "mock",
},
}
var senders []sender.Sender
raws, err := mock.NewSender(senderConfigs[0])
s, succ := raws.(*mock.Sender)
if !succ {
t.Error("sender should be mock sender")
}
if err != nil {
t.Error(err)
}
senders = append(senders, s)
r, err := NewLogExportRunnerWithService(rinfo, reader, cleaner, pparser, nil, senders, nil, meta)
if err != nil {
t.Error(err)
}
cleanInfo := CleanInfo{
enable: true,
logdir: absLogpath,
}
assert.Equal(t, cleanInfo, r.Cleaner())
go r.Run()
timer := time.NewTimer(20 * time.Second).C
for {
if s.SendCount() >= 4 {
break
}
select {
case <-timer:
t.Error("runner didn't stop within ticker time")
return
default:
time.Sleep(time.Second)
}
}
var dts []Data
rawData := r.senders[0].Name()[len("mock_sender "):]
err = jsoniter.Unmarshal([]byte(rawData), &dts)
if err != nil {
t.Error(err)
}
if len(dts) != 4 {
t.Errorf("got sender data not match error,expect 4 but %v", len(dts))
}
for _, d := range dts {
if v, ok := d["Test_RunForEnvTag"]; !ok {
t.Fatalf("Test_RunForEnvTag error, exp got Test_RunForEnvTag:env_value, but not found")
} else {
assert.Equal(t, "env_value", v)
}
}
}
func Test_RunForErrData(t *testing.T) {
dir := "Test_Run"
if err := os.Mkdir(dir, DefaultDirPerm); err != nil {
log.Fatalf("Test_Run error mkdir %v %v", dir, err)
}
defer os.RemoveAll(dir)
logpath := dir + "/logdir"
logpathLink := dir + "/logdirlink"
metapath := dir + "/meta_mock_csv"
if err := os.Mkdir(logpath, DefaultDirPerm); err != nil {
log.Fatalf("Test_Run error mkdir %v %v", logpath, err)
}
absLogpath, err := filepath.Abs(logpath)
if err != nil {
t.Fatalf("filepath.Abs %v, %v", logpath, err)
}
absLogpathLink, err := filepath.Abs(logpathLink)
if err != nil {
t.Fatalf("filepath.Abs %v, %v", logpathLink, err)
}
if err := os.Symlink(absLogpath, absLogpathLink); err != nil {
log.Fatalf("Test_Run error symbol link %v to %v: %v", absLogpathLink, logpath, err)
}
if err := os.Mkdir(metapath, DefaultDirPerm); err != nil {
log.Fatalf("Test_Run error mkdir %v %v", metapath, err)
}
log1 := `hello 123
xx 1
`
log2 := `
`
log3 := `h 456
x 789`
if err := ioutil.WriteFile(filepath.Join(logpath, "log1"), []byte(log1), 0666); err != nil {
log.Fatalf("write log1 fail %v", err)
}
time.Sleep(time.Second)
if err := ioutil.WriteFile(filepath.Join(logpath, "log2"), []byte(log2), 0666); err != nil {
log.Fatalf("write log2 fail %v", err)
}
time.Sleep(time.Second)
if err := ioutil.WriteFile(filepath.Join(logpath, "log3"), []byte(log3), 0666); err != nil {
log.Fatalf("write log3 fail %v", err)
}
exppath1 := filepath.Join(absLogpath, "log1")
exppath3 := filepath.Join(absLogpath, "log3")
exppaths := []string{exppath1, exppath1, exppath3, exppath3}
rinfo := RunnerInfo{
RunnerName: "test_runner",
MaxBatchLen: 1,
MaxBatchSize: 2048,
}
readerConfig := conf.MapConf{
"log_path": logpathLink,
"meta_path": metapath,
"mode": "dir",
"read_from": "oldest",
"datasource_tag": "testtag",
"reader_buf_size": "16",
}
meta, err := reader.NewMetaWithConf(readerConfig)
if err != nil {
t.Error(err)
}
isFromWeb := false
reader, err := reader.NewFileBufReader(readerConfig, isFromWeb)
if err != nil {
t.Error(err)
}
cleanChan := make(chan cleaner.CleanSignal)
cleanerConfig := conf.MapConf{
"delete_enable": "true",
}
cleaner, err := cleaner.NewCleaner(cleanerConfig, meta, cleanChan, meta.LogPath())
if err != nil {
t.Error(err)
}
parseConf := conf.MapConf{
"name": "req_csv",
"type": "csv",
"csv_schema": "logtype string, xx long",
"csv_splitter": " ",
"disable_record_errdata": "false",
}
ps := parser.NewRegistry()
pparser, err := ps.NewLogParser(parseConf)
if err != nil {
t.Error(err)
}
senderConfigs := []conf.MapConf{
{
"name": "mock_sender",
"sender_type": "mock",
},
}
var senders []sender.Sender
raws, err := mock.NewSender(senderConfigs[0])
s, succ := raws.(*mock.Sender)
if !succ {
t.Error("sender should be mock sender")
}
if err != nil {
t.Error(err)
}
senders = append(senders, s)
r, err := NewLogExportRunnerWithService(rinfo, reader, cleaner, pparser, nil, senders, nil, meta)
if err != nil {
t.Error(err)
}
cleanInfo := CleanInfo{
enable: true,
logdir: absLogpath,
}
assert.Equal(t, cleanInfo, r.Cleaner())
go r.Run()
timer := time.NewTimer(20 * time.Second).C
for {
if s.SendCount() >= 4 {
break
}
select {
case <-timer:
t.Error("runner didn't stop within ticker time")
return
default:
time.Sleep(time.Second)
}
}
var dts []Data
rawData := r.senders[0].Name()[len("mock_sender "):]
err = jsoniter.Unmarshal([]byte(rawData), &dts)
if err != nil {
t.Error(err)
}
assert.Equal(t, 4, len(dts), "got sender data not match")
for idx, dt := range dts {
if _, ok := dt[KeyPandoraStash]; ok {
if dt["testtag"] == nil {
t.Errorf("data source should be added")
}
} else {
assert.Equal(t, exppaths[idx], dt["testtag"])
}
}
}
func Test_Compatible(t *testing.T) {
rc := RunnerConfig{
ReaderConfig: conf.MapConf{
"log_path": "/path1",
"meta_path": "meta",
"mode": "dir",
"read_from": "oldest",
"datasource_tag": "testtag",
},
ParserConf: conf.MapConf{
"type": "qiniulog",
},
}
exprc := RunnerConfig{
ReaderConfig: conf.MapConf{
"log_path": "/path1",
"meta_path": "meta",
"mode": "dir",
"read_from": "oldest",
"datasource_tag": "testtag",
"head_pattern": "^" + qiniu.HeadPatthern,
},
ParserConf: conf.MapConf{
"type": "qiniulog",
},
}
rc = Compatible(rc)
assert.Equal(t, exprc, rc)
rc2 := RunnerConfig{
ReaderConfig: conf.MapConf{
"log_path": "/path1",
"meta_path": "meta",
"mode": "dir",
"read_from": "oldest",
"datasource_tag": "testtag",
},
ParserConf: conf.MapConf{
"type": "qiniulog",
"qiniulog_prefix": "PREX",
},
}
exprc2 := RunnerConfig{
ReaderConfig: conf.MapConf{
"log_path": "/path1",
"meta_path": "meta",
"mode": "dir",
"read_from": "oldest",
"datasource_tag": "testtag",
"head_pattern": "^PREX " + qiniu.HeadPatthern,
},
ParserConf: conf.MapConf{
"type": "qiniulog",
"qiniulog_prefix": "PREX",
},
}
rc2 = Compatible(rc2)
assert.Equal(t, exprc2, rc2)
}
func Test_QiniulogRun(t *testing.T) {
dir := "Test_QiniulogRun"
//clean dir first
os.RemoveAll(dir)
if err := os.Mkdir(dir, DefaultDirPerm); err != nil {
log.Errorf("Test_QiniulogRun error mkdir %v %v", dir, err)
}
defer os.RemoveAll(dir)
logpath := dir + "/logdir"
logpathLink := dir + "/logdirlink"
metapath := dir + "/meta_mock_csv"
if err := os.Mkdir(logpath, DefaultDirPerm); err != nil {
log.Errorf("Test_Run error mkdir %v %v", logpath, err)
}
absLogpath, err := filepath.Abs(logpath)
if err != nil {
t.Fatalf("filepath.Abs %v, %v", logpath, err)
}
absLogpathLink, err := filepath.Abs(logpathLink)
if err != nil {
t.Fatalf("filepath.Abs %v, %v", logpathLink, err)
}
if err := os.Symlink(absLogpath, absLogpathLink); err != nil {
log.Fatalf("Test_Run error symbol link %v to %v: %v", absLogpathLink, logpath, err)
}
if err := os.Mkdir(metapath, DefaultDirPerm); err != nil {
log.Fatalf("Test_Run error mkdir %v %v", metapath, err)
}
log1 := `2017/01/22 11:16:08.885550 [X-ZsU][INFO] disk.go:123: [REQ_END] 200 0.010k 3.792ms
[WARN][SLdoIrCDZj7pmZsU] disk.go <job.freezeDeamon> pop() failed: not found
2017/01/22 11:15:54.947217 [2pyKMukqvwSd-ZsU][INFO] disk.go:124: Service: POST 10.200.20.25:9100/user/info, Code: 200, Xlog: AC, Time: 1ms
`
log2 := `2016/10/20 17:20:30.642666 [ERROR] disk.go:125: github.com/qiniu/logkit/queue/disk.go:241
1234 3243xsaxs
2016/10/20 17:20:30.642662 [123][WARN] disk.go:241: github.com/qiniu/logkit/queue/disk.go 1
`
log3 := `2016/10/20 17:20:30.642662 [124][WARN] disk.go:456: xxxxxx`
expfiles := []string{`[REQ_END] 200 0.010k 3.792ms
[WARN][SLdoIrCDZj7pmZsU] disk.go <job.freezeDeamon> pop() failed: not found`,
`Service: POST 10.200.20.25:9100/user/info, Code: 200, Xlog: AC, Time: 1ms`,
`github.com/qiniu/logkit/queue/disk.go:241
1234 3243xsaxs`, `github.com/qiniu/logkit/queue/disk.go 1`,
`xxxxxx`}
expreqid := []string{"X-ZsU", "2pyKMukqvwSd-ZsU", "", "123", "124"}
if err := ioutil.WriteFile(filepath.Join(logpath, "log1"), []byte(log1), 0666); err != nil {
log.Fatalf("write log1 fail %v", err)
}
time.Sleep(time.Second)
if err := ioutil.WriteFile(filepath.Join(logpath, "log2"), []byte(log2), 0666); err != nil {
log.Fatalf("write log2 fail %v", err)
}
rinfo := RunnerInfo{
RunnerName: "test_runner",
MaxBatchLen: 1,
MaxBatchSize: 2048,
}
readerConfig := conf.MapConf{
"log_path": logpathLink,
"meta_path": metapath,
"mode": "dir",
"read_from": "oldest",
"datasource_tag": "testtag",
}
parseConf := conf.MapConf{
"name": "qiniu",
"type": parserConf.TypeLogv1,
}
senderConfigs := []conf.MapConf{
{
"name": "mock_sender",
"sender_type": "mock",
},
}
rc := RunnerConfig{
RunnerInfo: rinfo,
ReaderConfig: readerConfig,
ParserConf: parseConf,
SendersConfig: senderConfigs,
IsInWebFolder: false,
}
rc = Compatible(rc)
meta, err := reader.NewMetaWithConf(rc.ReaderConfig)
if err != nil {
t.Error(err)
}
r, err := reader.NewFileBufReader(rc.ReaderConfig, rc.IsInWebFolder)
if err != nil {
t.Error(err)
}
ps := parser.NewRegistry()
pparser, err := ps.NewLogParser(parseConf)
if err != nil {
t.Error(err)
}
var senders []sender.Sender
raws, err := mock.NewSender(senderConfigs[0])
s, succ := raws.(*mock.Sender)
if !succ {
t.Error("sender should be mock sender")
}
if err != nil {
t.Error(err)
}
senders = append(senders, s)
runner, err := NewLogExportRunnerWithService(rinfo, r, nil, pparser, nil, senders, nil, meta)
if err != nil {
t.Error(err)
}
go runner.Run()
time.Sleep(time.Second)
if err := ioutil.WriteFile(filepath.Join(logpath, "log3"), []byte(log3), 0666); err != nil {
log.Fatalf("write log3 fail %v", err)
}
time.Sleep(time.Second)
timer := time.NewTimer(20 * time.Second).C
for {
if s.SendCount() >= 4 {
break
}
select {
case <-timer:
t.Error("runner didn't stop within ticker time")
return
default:
time.Sleep(time.Second)
}
}
var dts []Data
rawData := runner.senders[0].Name()[len("mock_sender "):]
err = jsoniter.Unmarshal([]byte(rawData), &dts)
if err != nil {
t.Error(err)
}
if len(dts) != 5 {
t.Errorf("got sender data not match error,expect 5 but %v", len(dts))
}
for idx, dt := range dts {
assert.Equal(t, expfiles[idx], dt["log"], "equl log test")
if expreqid[idx] == "" {
assert.Nil(t, dt["reqid"])
} else {
assert.Equal(t, expreqid[idx], dt["reqid"], "equal reqid test")
}
}
ls, err := runner.LagStats()
assert.NoError(t, err)
assert.Equal(t, &LagInfo{0, "bytes", 0, 0}, ls)
}
func TestCreateTransforms(t *testing.T) {
config1 := `{
"name":"test2.csv",
"reader":{
"log_path":"./tests/logdir",
"mode":"dir"
},
"parser":{
"name":"test2_csv_parser",
"type":"csv",
"csv_schema":"t1 string"
},
"transforms":[{
"type":"IP",
"key": "ip",
"data_path": "../transforms/ip/test_data/17monipdb.dat"
}],
"senders":[{
"name":"file_sender",
"sender_type":"file",
"file_send_path":"./test2/test2_csv_file.txt"
}]
}`
rc := RunnerConfig{}
err := jsoniter.Unmarshal([]byte(config1), &rc)
assert.NoError(t, err)
transformers, _ := createTransformers(rc)
datas := []Data{{"ip": "111.2.3.4"}}
exp := []Data{{
"ip": "111.2.3.4",
"Region": "浙江",
"City": "宁波",
"Country": "中国",
"Isp": "N/A"}}
for k := range transformers {
datas, err = transformers[k].Transform(datas)
assert.NoError(t, err)
}
assert.Equal(t, exp, datas)
}
func TestReplaceTransforms(t *testing.T) {
config1 := `{
"name":"test2.csv",
"reader":{
"log_path":"./tests/logdir",
"mode":"dir"
},
"parser":{
"name":"jsonps",
"type":"json"
},
"transforms":[{
"type":"replace",
"stage":"before_parser",
"old":"\\x",
"new":"\\\\x"
}],
"senders":[{
"name":"file_sender",
"sender_type":"file",
"file_send_path":"./test2/test2_csv_file.txt"
}]
}`
newData := make([]Data, 0)
rc := RunnerConfig{}
err := jsoniter.Unmarshal([]byte(config1), &rc)
assert.NoError(t, err)
transformers, _ := createTransformers(rc)
datas := []string{`{"status":"200","request_method":"POST","request_body":"<xml>\x0A","content_type":"text/xml"}`, `{"status":"200","request_method":"POST","request_body":"<xml>x0A","content_type":"text/xml"}`}
for k := range transformers {
datas, err = transformers[k].RawTransform(datas)
assert.NoError(t, err)
for i := range datas {
var da Data
err = jsoniter.Unmarshal([]byte(datas[i]), &da)
assert.NoError(t, err)
newData = append(newData, da)
}
}
exp := []Data{
{
"status": "200",
"request_method": "POST",
"request_body": "<xml>\\x0A",
"content_type": "text/xml",
},
{
"status": "200",
"request_method": "POST",
"request_body": "<xml>x0A",
"content_type": "text/xml",
},
}
assert.Equal(t, exp, newData)
}
func TestDateTransforms(t *testing.T) {
config1 := `{
"name":"test2.csv",
"reader":{
"log_path":"./tests/logdir",
"mode":"dir"
},
"parser":{
"name":"jsonps",
"type":"json"
},
"transforms":[{
"type":"date",
"key":"status",
"offset":1,
"time_layout_before":"",
"time_layout_after":"2006-01-02T15:04:05"
}],
"senders":[{
"name":"file_sender",
"sender_type":"file",
"file_send_path":"./test2/test2_csv_file.txt"
}]
}`
rc := RunnerConfig{}
err := jsoniter.Unmarshal([]byte(config1), &rc)
assert.NoError(t, err)
transformers, _ := createTransformers(rc)
datas := []Data{{"status": "02/01/2016--15:04:05"}, {"status": "2006-01-02 15:04:15"}}
for k := range transformers {
datas, err = transformers[k].Transform(datas)
}
exp := []Data{
{
"status": "2016-01-02T16:04:05",
},
{
"status": "2006-01-02T16:04:15",
},
}
assert.Equal(t, exp, datas)
}
func TestSplitAndConvertTransforms(t *testing.T) {
config1 := `{
"name":"test2.csv",
"reader":{
"log_path":"./tests/logdir",
"mode":"dir"
},
"parser":{
"name":"jsonps",
"type":"json"
},
"transforms":[{
"type":"split",
"key":"status",
"sep":",",
"newfield":"newarray"
},{
"type":"convert",
"dsl":"newarray array(long)"
}],
"senders":[{
"name":"file_sender",
"sender_type":"file",
"file_send_path":"./test2/test2_csv_file.txt"
}]
}`
rc := RunnerConfig{}
err := jsoniter.Unmarshal([]byte(config1), &rc)
assert.NoError(t, err)
transformers, _ := createTransformers(rc)
datas := []Data{{"status": "1,2,3"}, {"status": "4,5,6"}}
for k := range transformers {
datas, err = transformers[k].Transform(datas)
}
exp := []Data{
{
"status": "1,2,3",
"newarray": []interface{}{int64(1), int64(2), int64(3)},
},
{
"status": "4,5,6",
"newarray": []interface{}{int64(4), int64(5), int64(6)},
},
}
assert.Equal(t, exp, datas)
}
func TestGetTrend(t *testing.T) {
assert.Equal(t, SpeedUp, getTrend(0, 1))
assert.Equal(t, SpeedDown, getTrend(1, 0))
assert.Equal(t, SpeedStable, getTrend(1, 1))
}
func TestSpeedTrend(t *testing.T) {
tests := []struct {
olds StatsInfo
news StatsInfo
etime float64
exp StatsInfo
}{
{
olds: StatsInfo{
Success: 1,
Speed: 1.0,
},
news: StatsInfo{
Success: 2,
},
etime: 1.0,
exp: StatsInfo{
Success: 2,
Speed: 1.0,
Trend: SpeedStable,
},
},
{
olds: StatsInfo{},
news: StatsInfo{},
etime: 0,
exp: StatsInfo{
Success: 0,
Speed: 0,
Trend: SpeedStable,
},
},
{
olds: StatsInfo{
Success: 1,
Speed: 1.0,
},
news: StatsInfo{
Success: 10,
},
etime: 1.0,
exp: StatsInfo{
Success: 10,
Speed: 9.0,
Trend: SpeedUp,
},
},
{
olds: StatsInfo{
Success: 10,
Speed: 10.0,
},
news: StatsInfo{
Success: 11,
},
etime: 1.0,
exp: StatsInfo{
Success: 11,
Speed: 1.0,
Trend: SpeedDown,
},
},
}
for _, ti := range tests {
ti.news.Speed, ti.news.Trend = calcSpeedTrend(ti.olds, ti.news, ti.etime)
assert.Equal(t, ti.exp, ti.news)
}
}
func TestCopyStats(t *testing.T) {
tests := []struct {
src RunnerStatus
dst RunnerStatus
exp RunnerStatus
}{
{
src: RunnerStatus{
ReadDataSize: 10,
ReadDataCount: 10,
SenderStats: map[string]StatsInfo{
"a": {
Success: 11,
Speed: 1.0,
Trend: SpeedDown,
},
"c": {
Success: 12,
Speed: 1.0,
Trend: SpeedDown,
},
},
TransformStats: map[string]StatsInfo{
"x": {
Success: 2,
Speed: 5.0,
Trend: SpeedDown,
},
},
ReadSpeedKB: 10,
ReadSpeed: 10,
},
exp: RunnerStatus{
ReadDataSize: 10,
ReadDataCount: 10,
SenderStats: map[string]StatsInfo{
"a": {
Success: 11,
Speed: 1.0,
Trend: SpeedDown,
},
"c": {
Success: 12,
Speed: 1.0,
Trend: SpeedDown,
},
},
TransformStats: map[string]StatsInfo{
"x": {
Success: 2,
Speed: 5.0,
Trend: SpeedDown,
},
},
ReadSpeedKB: 10,
ReadSpeed: 10,
},
dst: RunnerStatus{
ReadDataSize: 5,
ReadDataCount: 0,
SenderStats: map[string]StatsInfo{
"x": {
Success: 0,
Speed: 2.0,
Trend: SpeedDown,
},
"b": {
Success: 5,
Speed: 1.0,
Trend: SpeedDown,
},
},
TransformStats: map[string]StatsInfo{
"s": {
Success: 21,
Speed: 50.0,
Trend: SpeedUp,
},
},
ReadSpeedKB: 11,
ReadSpeed: 2,
},
},
}
for _, ti := range tests {
ti.dst = (&ti.src).Clone()
for i, v := range ti.src.SenderStats {
v.Speed = 0
v.Success = 0
ti.src.SenderStats[i] = v
}
assert.Equal(t, ti.exp, ti.dst)
}
}
func TestSyslogRunnerX(t *testing.T) {
metaDir := "TestSyslogRunner"
os.Mkdir(metaDir, DefaultDirPerm)
defer os.RemoveAll(metaDir)
config := `{
"name":"TestSyslogRunner",
"batch_len":1,
"reader":{
"mode":"socket",
"meta_path":"TestSyslogRunner",
"socket_service_address":"tcp://:5142"
},
"parser":{
"name":"syslog",
"type":"raw"
},
"senders":[{
"name":"file_sender",
"sender_type":"file",
"file_send_path":"./TestSyslogRunner/syslog.txt"
}]
}`
rc := RunnerConfig{}
err := jsoniter.Unmarshal([]byte(config), &rc)
assert.NoError(t, err)
rr, err := NewCustomRunner(rc, make(chan cleaner.CleanSignal), reader.NewRegistry(), parser.NewRegistry(), sender.NewRegistry())
assert.NoError(t, err)
go rr.Run()
time.Sleep(1 * time.Second)
sysLog, err := syslog.Dial("tcp", "127.0.0.1:5142",
syslog.LOG_WARNING|syslog.LOG_DAEMON, "demotag")
if err != nil {
log.Fatal(err)
}
err = sysLog.Emerg("And this is a daemon emergency with demotag.")
assert.NoError(t, err)
err = sysLog.Emerg("this is OK")
assert.NoError(t, err)
time.Sleep(2 * time.Second)
data, err := ioutil.ReadFile("./TestSyslogRunner/syslog.txt")
assert.NoError(t, err)
if !strings.Contains(string(data), "this is OK") || !strings.Contains(string(data), "And this is a daemon emergency with demotag.") {
t.Error("syslog parse error")
}
}
func TestAddDatasource(t *testing.T) {
sourceFroms := []string{"a", "b", "c", "d", "e", "f"}
se := &StatsError{
DatasourceSkipIndex: []int{0, 3, 5},
}
datas := []Data{
{
"f1": "2",
},
{
"f2": "1",
},
{
"f3": "3",
},
}
datasourceTagName := "source"
runnername := "runner1"
exp := []Data{
{
"f1": "2",
"source": "b",
},
{
"f2": "1",
"source": "c",
},
{
"f3": "3",
"source": "e",
},
}
gots := addSourceToData(sourceFroms, se, datas, datasourceTagName, runnername)
assert.Equal(t, exp, gots)
se = nil
exp = []Data{
{
"f1": "2",
"source": "a",
},
{
"f2": "1",
"source": "b",
},
{
"f3": "3",
"source": "c",
},
}
datas = []Data{
{
"f1": "2",
},
{
"f2": "1",
},
{
"f3": "3",
},
}
gots = addSourceToData(sourceFroms, se, datas, datasourceTagName, runnername)
assert.Equal(t, exp, gots)
}
func TestAddDatasourceForErrData(t *testing.T) {
sourceFroms := []string{"a", "b", "c", "d", "e", "f"}
se := &StatsError{
DatasourceSkipIndex: []int{0, 3, 5},
}
datas := []Data{
{
"pandora_stash": "rawdata1",
},
{
"f1": "2",
},
{
"f2": "1",
},
{
"pandora_stash": "rawdata2",
},
{
"f3": "3",
},
{
"pandora_stash": "rawdata3",
},
}
datasourceTagName := "source"
runnername := "runner1"
exp := []Data{
{
"pandora_stash": "rawdata1",
"source": "a",
},
{
"f1": "2",
"source": "b",
},
{
"f2": "1",
"source": "c",
},
{
"pandora_stash": "rawdata2",
"source": "d",
},
{
"f3": "3",
"source": "e",
},
{
"pandora_stash": "rawdata3",
"source": "f",
},
}
gots := addSourceToData(sourceFroms, se, datas, datasourceTagName, runnername)
assert.Equal(t, exp, gots)
}
func TestAddDatasourceForRawData(t *testing.T) {
dir := "TestAddDatasource"
metaDir := filepath.Join(dir, "meta")
if err := os.Mkdir(dir, DefaultDirPerm); err != nil {
log.Fatalf("TestAddDatasource error mkdir %v %v", dir, err)
}
filename := []string{"test1.log", "test2.log", "test3.log", "test4.log"}
content := []string{"1 fufu 3.14\n", "3 fufu 3.16\n", "hfdjsak,dadiajd,dsaud\n", "4 fufu 3.17\n"}
var realPaths []string
for i := range filename {
logPath := filepath.Join(dir, filename[i])
readPath, err := filepath.Abs(logPath)
assert.NoError(t, err)
realPaths = append(realPaths, readPath)
err = ioutil.WriteFile(logPath, []byte(content[i]), DefaultDirPerm)
assert.NoError(t, err)
}
defer os.RemoveAll(dir)
defer os.RemoveAll(metaDir)
config1 := `{
"name":"TestAddDatasource",
"batch_len":4,
"reader":{
"mode":"dir",
"log_path":"./TestAddDatasource/",
"datasource_tag":"datasource"
},
"parser":{
"name":"testcsv",
"type":"csv",
"csv_schema":"a long,b string,c float",
"csv_splitter":" ",
"disable_record_errdata":"true",
"keep_raw_data":"true"
},
"senders":[{
"name":"file_sender",
"sender_type":"file",
"file_send_path":"./TestAddDatasource/filesend.csv"
}]
}`
rc := RunnerConfig{}
err := jsoniter.Unmarshal([]byte(config1), &rc)
assert.NoError(t, err)
rr, err := NewCustomRunner(rc, make(chan cleaner.CleanSignal), reader.NewRegistry(), parser.NewRegistry(), sender.NewRegistry())
assert.NoError(t, err)
go rr.Run()
time.Sleep(2 * time.Second)
data, err := ioutil.ReadFile("./TestAddDatasource/filesend.csv")
var res []Data
err = jsoniter.Unmarshal(data, &res)
if err != nil {
t.Error(err)
}
exp := []Data{
{
"c": float64(3.14),
"raw_data": content[0],
"datasource": realPaths[0],
"a": float64(1),
"b": "fufu",
},
{
"a": float64(3),
"b": "fufu",
"c": float64(3.16),
"raw_data": content[1],
"datasource": realPaths[1],
},
{
"raw_data": content[2],
"datasource": realPaths[2],
},
{
"b": "fufu",
"c": float64(3.17),
"raw_data": content[3],
"datasource": realPaths[3],
"a": float64(4),
},
}
assert.Equal(t, len(exp), len(res))
// res 多了 lst 键值对
for idx := range exp {
for expKey, expVal := range exp[idx] {
assert.Equal(t, expVal, res[idx][expKey])
}
}
}
func TestAddDatatags(t *testing.T) {
dir := "TestAddDatatags"
metaDir := filepath.Join(dir, "meta")
if err := os.Mkdir(dir, DefaultDirPerm); err != nil {
log.Fatalf("TestAddDatatags error mkdir %v %v", dir, err)
}
tagFile := filepath.Join(dir, "tagFile.json")
err := ioutil.WriteFile(tagFile, []byte(`{
"Title":"tags",
"Author":["john","ada","alice"],
"IsTrue":true,
"Host":99
}`), DefaultDirPerm)
assert.NoError(t, err)
logPath := filepath.Join(dir, "test.log")
err = ioutil.WriteFile(logPath, []byte(`{"f1": "2","f2": "1","f3": "3"}`), DefaultDirPerm)
assert.NoError(t, err)
defer os.RemoveAll(dir)
defer os.RemoveAll(metaDir)
config1 := `{
"name":"TestAddDatatags",
"batch_len":1,
"reader":{
"mode":"file",
"meta_path":"./TestAddDatatags/meta",
"file_done":"./TestAddDatatags/meta",
"log_path":"./TestAddDatatags/test.log",
"tag_file":"./TestAddDatatags/tagFile.json"
},
"parser":{
"name":"testjson",
"type":"json"
},
"senders":[{
"name":"file_sender",
"sender_type":"file",
"file_send_path":"./TestAddDatatags/filesend.json"
}]
}`
rc := RunnerConfig{}
err = jsoniter.Unmarshal([]byte(config1), &rc)
assert.NoError(t, err)
rr, err := NewCustomRunner(rc, make(chan cleaner.CleanSignal), reader.NewRegistry(), parser.NewRegistry(), sender.NewRegistry())
assert.NoError(t, err)
go rr.Run()
time.Sleep(2 * time.Second)
data, err := ioutil.ReadFile("./TestAddDatatags/filesend.json")
var res []Data
err = jsoniter.Unmarshal(data, &res)
if err != nil {
t.Error(err)
}
exp := []Data{
{
"f1": "2",
"f2": "1",
"f3": "3",
"Title": "tags",
"Author": []interface{}{"john", "ada", "alice"},
"IsTrue": bool(true),
"Host": float64(99),
},
}
assert.Equal(t, len(exp), len(res))
// res 多了 lst 键值对
for idx := range exp {
for expKey, expVal := range exp[idx] {
assert.Equal(t, expVal, res[idx][expKey])
}
}
}
func TestRunWithExtra(t *testing.T) {
dir := "TestRunWithExtra"
metaDir := filepath.Join(dir, "meta")
if err := os.Mkdir(dir, DefaultDirPerm); err != nil {
log.Fatalf("TestRunWithExtra error mkdir %v %v", dir, err)
}
logPath := filepath.Join(dir, "test.log")
err := ioutil.WriteFile(logPath, []byte(`{"f1": "2","f2": "1","f3": "3"}`), DefaultDirPerm)
assert.NoError(t, err)
defer os.RemoveAll(dir)
defer os.RemoveAll(metaDir)
config1 := `{
"name":"TestRunWithExtra",
"batch_len":1,
"extra_info":true,
"reader":{
"mode":"file",
"meta_path":"./TestRunWithExtra/meta",
"log_path":"./TestRunWithExtra/test.log"
},
"parser":{
"name":"testjson",
"type":"json"
},
"senders":[{
"name":"file_sender",
"sender_type":"file",
"file_send_path":"./TestRunWithExtra/filesend.json"
}]
}`
rc := RunnerConfig{}
err = jsoniter.Unmarshal([]byte(config1), &rc)
assert.NoError(t, err)
rr, err := NewCustomRunner(rc, make(chan cleaner.CleanSignal), reader.NewRegistry(), parser.NewRegistry(), sender.NewRegistry())
assert.NoError(t, err)
go rr.Run()
time.Sleep(2 * time.Second)
data, err := ioutil.ReadFile("./TestRunWithExtra/filesend.json")
var res []Data
err = jsoniter.Unmarshal(data, &res)
if err != nil {
t.Error(err)
}
// res 多了 lst 键值对
assert.Equal(t, 8, len(res[0]))
}
func TestRunWithDataSource(t *testing.T) {
cur, err := os.Getwd()
assert.NoError(t, err)
dir := filepath.Join(cur, "TestRunWithDataSource")
os.RemoveAll(dir)
metaDir := filepath.Join(dir, "meta")
if err := os.Mkdir(dir, DefaultDirPerm); err != nil {
log.Fatalf("TestRunWithDataSource error mkdir %v %v", dir, err)
}
logPath := filepath.Join(dir, "test.log")
err = ioutil.WriteFile(logPath, []byte("a\nb\n\n\nc\n"), DefaultDirPerm)
assert.NoError(t, err)
defer os.RemoveAll(dir)
defer os.RemoveAll(metaDir)
config1 := `{
"name":"TestRunWithDataSource",
"batch_len":3,
"reader":{
"mode":"file",
"meta_path":"./TestRunWithDataSource/meta",
"log_path":"` + logPath + `",
"datasource_tag":"datasource"
},
"parser":{
"name":"testjson",
"type":"raw",
"timestamp":"false"
},
"senders":[{
"name":"file_sender",
"sender_type":"file",
"file_send_path":"./TestRunWithDataSource/filesend.json"
}]
}`
rc := RunnerConfig{}
err = jsoniter.Unmarshal([]byte(config1), &rc)
assert.NoError(t, err)
rr, err := NewCustomRunner(rc, make(chan cleaner.CleanSignal), reader.NewRegistry(), parser.NewRegistry(), sender.NewRegistry())
assert.NoError(t, err)
assert.NotNil(t, rr)
go rr.Run()
time.Sleep(2 * time.Second)
data, err := ioutil.ReadFile("./TestRunWithDataSource/filesend.json")
var res []Data
err = json.Unmarshal(data, &res)
if err != nil {
t.Error(err, string(data))
}
exp := []Data{
{
"raw": "a\n",
"datasource": logPath,
},
{
"raw": "b\n",
"datasource": logPath,
},
{
"raw": "c\n",
"datasource": logPath,
},
}
assert.Equal(t, len(exp), len(res))
// res 多了 lst 键值对
for idx := range exp {
for expKey, expVal := range exp[idx] {
assert.Equal(t, expVal, res[idx][expKey])
}
}
}
func TestRunWithDataSourceFial(t *testing.T) {
cur, err := os.Getwd()
assert.NoError(t, err)
dir := filepath.Join(cur, "TestRunWithDataSourceFial")
metaDir := filepath.Join(dir, "meta")
os.RemoveAll(dir)
if err := os.Mkdir(dir, DefaultDirPerm); err != nil {
log.Fatalf("TestRunWithDataSource error mkdir %v %v", dir, err)
}
logPath := filepath.Join(dir, "test.log")
err = ioutil.WriteFile(logPath, []byte("a\n"), DefaultDirPerm)
assert.NoError(t, err)
defer os.RemoveAll(dir)
defer os.RemoveAll(metaDir)
config1 := `{
"name":"TestRunWithDataSourceFial",
"batch_len":1,
"reader":{
"mode":"file",
"log_path":"` + logPath + `",
"meta_path":"./TestRunWithDataSourceFial/meta",
"datasource_tag":"datasource"
},
"parser":{
"name":"testjson",
"type":"json"
},
"senders":[{
"name":"file_sender",
"sender_type":"file",
"file_send_path":"./TestRunWithDataSourceFial/filesend.json"
}]
}`
rc := RunnerConfig{}
err = jsoniter.Unmarshal([]byte(config1), &rc)
assert.NoError(t, err)
rr, err := NewCustomRunner(rc, make(chan cleaner.CleanSignal), reader.NewRegistry(), parser.NewRegistry(), sender.NewRegistry())
assert.NoError(t, err)
assert.NotNil(t, rr)
go rr.Run()
time.Sleep(2 * time.Second)
data, err := ioutil.ReadFile("./TestRunWithDataSourceFial/filesend.json")
var res []Data
err = json.Unmarshal(data, &res)
if err != nil {
t.Error(err, string(data))
}
exp := []Data{
{
"pandora_stash": "a",
"datasource": logPath,
},
}
assert.Equal(t, len(exp), len(res))
// res 多了 lst 键值对
for idx := range exp {
for expKey, expVal := range exp[idx] {
assert.Equal(t, expVal, res[idx][expKey])
}
}
}
func TestClassifySenderData(t *testing.T) {
{
senders := []sender.Sender{&mock.Sender{}, &mock.Sender{}, &mock.Sender{}}
numSenders := len(senders)
datas := []Data{
{
"a": "a",
"b": "b",
"c": "c",
"d": "d",
},
{
"a": "A",
"b": "b",
"c": "c",
"d": "d",
},
{
"a": "B",
"b": "b",
"c": "c",
"d": "d",
},
{
"a": "C",
"b": "b",
"c": "c",
"d": "d",
},
}
routerConf := router.RouterConfig{
KeyName: "a",
MatchType: "equal",
DefaultIndex: 0,
Routes: map[string]int{
"a": 2,
"A": 1,
},
}
r, err := router.NewSenderRouter(routerConf, numSenders)
senderDataList := classifySenderData(senders, datas, r)
assert.Equal(t, numSenders, len(senderDataList))
assert.Equal(t, 2, len(senderDataList[0]))
assert.Equal(t, 1, len(senderDataList[1]))
assert.Equal(t, 1, len(senderDataList[2]))
// 测试没有配置 router 的情况
routerConf.KeyName = ""
r, err = router.NewSenderRouter(routerConf, numSenders)
assert.Nil(t, r)
assert.NoError(t, err)
senderDataList = classifySenderData(senders, datas, r)
assert.Equal(t, numSenders, len(senderDataList))
assert.Equal(t, 4, len(senderDataList[0]))
assert.Equal(t, 4, len(senderDataList[1]))
assert.Equal(t, 4, len(senderDataList[2]))
}
// --> 测试 SkipDeepCopySender 检查是否生效 <--
// 存在数据改动的 sender 后有其它 sender
{
senders := []sender.Sender{&mock.Sender{}, &pandora.Sender{}, &mock.Sender{}}
datas := []Data{
{
"a": "a",
"b": "b",
"c": "c",
"d": "d",
},
}
senderDataList := classifySenderData(senders, datas, nil)
assert.Len(t, senderDataList, len(senders))
assert.True(t, fmt.Sprintf("%p", datas) == fmt.Sprintf("%p", senderDataList[0]))
assert.False(t, fmt.Sprintf("%p", datas) == fmt.Sprintf("%p", senderDataList[1]))
assert.True(t, fmt.Sprintf("%p", datas) == fmt.Sprintf("%p", senderDataList[2]))
}
// 存在数据改动的 sender 为最后一个
{
senders := []sender.Sender{&mock.Sender{}, &pandora.Sender{}}
datas := []Data{
{
"a": "a",
"b": "b",
"c": "c",
"d": "d",
},
}
senderDataList := classifySenderData(senders, datas, nil)
assert.Len(t, senderDataList, len(senders))
assert.True(t, fmt.Sprintf("%p", datas) == fmt.Sprintf("%p", senderDataList[0]))
assert.True(t, fmt.Sprintf("%p", datas) == fmt.Sprintf("%p", senderDataList[1]))
}
// 仅存在数据改动的 sender
{
senders := []sender.Sender{&pandora.Sender{}}
datas := []Data{
{
"a": "a",
"b": "b",
"c": "c",
"d": "d",
},
}
senderDataList := classifySenderData(senders, datas, nil)
assert.Len(t, senderDataList, len(senders))
assert.True(t, fmt.Sprintf("%p", datas) == fmt.Sprintf("%p", senderDataList[0]))
}
}
// Reponse from Clearbit API. Size: 2.4kb
var mediumFixture []byte = []byte(`{
"person": {
"id": "d50887ca-a6ce-4e59-b89f-14f0b5d03b03",
"name": {
"fullName": "Leonid Bugaev",
"givenName": "Leonid",
"familyName": "Bugaev"
},
"email": "[email protected]",
"gender": "male",
"location": "Saint Petersburg, Saint Petersburg, RU",
"geo": {
"city": "Saint Petersburg",
"state": "Saint Petersburg",
"country": "Russia",
"lat": 59.9342802,
"lng": 30.3350986
},
"bio": "Senior engineer at Granify.com",
"site": "http://flickfaver.com",
"avatar": "https://d1ts43dypk8bqh.cloudfront.net/v1/avatars/d50887ca-a6ce-4e59-b89f-14f0b5d03b03",
"employment": {
"name": "www.latera.ru",
"title": "Software Engineer",
"domain": "gmail.com"
},
"facebook": {
"handle": "leonid.bugaev"
},
"github": {
"handle": "buger",
"id": 14009,
"avatar": "https://avatars.githubusercontent.com/u/14009?v=3",
"company": "Granify",
"blog": "http://leonsbox.com",
"followers": 95,
"following": 10
},
"twitter": {
"handle": "flickfaver",
"id": 77004410,
"bio": null,
"followers": 2,
"following": 1,
"statuses": 5,
"favorites": 0,
"location": "",
"site": "http://flickfaver.com",
"avatar": null
},
"linkedin": {
"handle": "in/leonidbugaev"
},
"googleplus": {
"handle": null
},
"angellist": {
"handle": "leonid-bugaev",
"id": 61541,
"bio": "Senior engineer at Granify.com",
"blog": "http://buger.github.com",
"site": "http://buger.github.com",
"followers": 41,
"avatar": "https://d1qb2nb5cznatu.cloudfront.net/users/61541-medium_jpg?1405474390"
},
"klout": {
"handle": null,
"score": null
},
"foursquare": {
"handle": null
},
"aboutme": {
"handle": "leonid.bugaev",
"bio": null,
"avatar": null
},
"gravatar": {
"handle": "buger",
"urls": [
],
"avatar": "http://1.gravatar.com/avatar/f7c8edd577d13b8930d5522f28123510",
"avatars": [
{
"url": "http://1.gravatar.com/avatar/f7c8edd577d13b8930d5522f28123510",
"type": "thumbnail"
}
]
},
"fuzzy": false
},
"company": null
}`)
type CBAvatar struct {
Url string `json:"url"`
}
type CBGravatar struct {
Avatars []*CBAvatar `json:"avatars"`
}
type CBGithub struct {
Followers int `json:"followers"`
}
type CBName struct {
FullName string `json:"fullName"`
}
type CBPerson struct {
Name *CBName `json:"name"`
Github *CBGithub `json:"github"`
Gravatar *CBGravatar `json:"gravatar"`
}
type MediumPayload struct {
Person *CBPerson `json:"person"`
Company string `json:"compnay"`
}
func BenchmarkDecodeStdStructMedium(b *testing.B) {
b.ReportAllocs()
var data MediumPayload
for i := 0; i < b.N; i++ {
jsoniter.Unmarshal(mediumFixture, &data)
}
}
func BenchmarkEncodeStdStructMedium(b *testing.B) {
var data MediumPayload
jsoniter.Unmarshal(mediumFixture, &data)
b.ReportAllocs()
for i := 0; i < b.N; i++ {
jsoniter.Marshal(data)
}
}
func BenchmarkDecodeJsoniterStructMedium(b *testing.B) {
b.ReportAllocs()
var data MediumPayload
for i := 0; i < b.N; i++ {
jsoniter.Unmarshal(mediumFixture, &data)
}
}
func BenchmarkEncodeJsoniterStructMedium(b *testing.B) {
var data MediumPayload
jsoniter.Unmarshal(mediumFixture, &data)
b.ReportAllocs()
for i := 0; i < b.N; i++ {
jsoniter.Marshal(data)
}
}
func BenchmarkEncodeJsoniterCompatibleStructMedium(b *testing.B) {
var data MediumPayload
jsoniter.Unmarshal(mediumFixture, &data)
b.ReportAllocs()
jsonc := jsoniter.ConfigCompatibleWithStandardLibrary
for i := 0; i < b.N; i++ {
jsonc.Marshal(data)
}
}
/*
BenchmarkDecodeStdStructMedium-4 50000 39162 ns/op 1960 B/op 99 allocs/op
BenchmarkEncodeStdStructMedium-4 1000000 2106 ns/op 712 B/op 5 allocs/op
BenchmarkDecodeJsoniterStructMedium-4 200000 7676 ns/op 320 B/op 36 allocs/op
BenchmarkEncodeJsoniterStructMedium-4 1000000 1046 ns/op 240 B/op 3 allocs/op
BenchmarkEncodeJsoniterCompatibleStructMedium-4 1000000 1023 ns/op 240 B/op 3 allocs/op
PASS
性能明显提升
*/
func TestMergeEnvTags(t *testing.T) {
key := "TestMergeEnvTags"
os.Setenv(key, `{"a":"hello"}`)
defer os.Unsetenv(key)
tags := MergeEnvTags(key, nil)
assert.Equal(t, map[string]interface{}{"a": "hello"}, tags)
os.Setenv(key, `{"b":"123","c":"nihao"}`)
tags = MergeEnvTags(key, tags)
assert.Equal(t, map[string]interface{}{"a": "hello", "b": "123", "c": "nihao"}, tags)
}
func TestMergeExtraInfoTags(t *testing.T) {
meta, err := reader.NewMetaWithConf(conf.MapConf{
ExtraInfo: "true",
readerConf.KeyMode: readerConf.ModeMySQL,
})
assert.NoError(t, err)
tags := MergeExtraInfoTags(meta, nil)
assert.Equal(t, 4, len(tags))
//再次写入,应该不会产生变化。
tags = MergeExtraInfoTags(meta, tags)
assert.Equal(t, 4, len(tags))
}
func TestTailxCleaner(t *testing.T) {
cur, err := os.Getwd()
assert.NoError(t, err)
dir := filepath.Join(cur, "TestTailxCleaner")
metaDir := filepath.Join(dir, "meta")
os.RemoveAll(dir)
if err := os.Mkdir(dir, DefaultDirPerm); err != nil {
log.Fatalf("TestTailxCleaner error mkdir %v %v", dir, err)
}
defer os.RemoveAll(dir)
dira := filepath.Join(dir, "a")
os.MkdirAll(dira, DefaultDirPerm)
logPatha := filepath.Join(dira, "a.log")
assert.NoError(t, ioutil.WriteFile(logPatha, []byte("a\n"), 0666))
dirb := filepath.Join(dir, "b")
os.MkdirAll(dirb, DefaultDirPerm)
logPathb := filepath.Join(dirb, "b.log")
assert.NoError(t, ioutil.WriteFile(logPathb, []byte("b\n"), 0666))
readfile := filepath.Join(dir, "*", "*.log")
config := `
{
"name": "TestTailxCleaner",
"batch_size": 2097152,
"batch_interval": 1,
"reader": {
"expire": "24h",
"log_path": "` + readfile + `",
"meta_path":"` + metaDir + `",
"mode": "tailx",
"read_from": "oldest",
"stat_interval": "1s"
},
"cleaner": {
"delete_enable": "true",
"delete_interval": "1",
"reserve_file_number": "1",
"reserve_file_size": "2048"
},
"parser": {
"disable_record_errdata": "false",
"timestamp": "true",
"type": "raw"
},
"senders": [
{
"sender_type": "discard"
}
]
}`
rc := RunnerConfig{}
assert.NoError(t, jsoniter.Unmarshal([]byte(config), &rc))
cleanChan := make(chan cleaner.CleanSignal)
rr, err := NewLogExportRunner(rc, cleanChan, reader.NewRegistry(), parser.NewRegistry(), sender.NewRegistry())
assert.NoError(t, err)
assert.NotNil(t, rr)
go rr.Run()
time.Sleep(2 * time.Second)
logPatha1 := filepath.Join(dira, "a.log.1")
assert.NoError(t, os.Rename(logPatha, logPatha1))
assert.NoError(t, ioutil.WriteFile(logPatha, []byte("bbbb\n"), 0666))
time.Sleep(5 * time.Second)
logPatha2 := filepath.Join(dira, "a.log.2")
assert.NoError(t, os.Rename(logPatha, logPatha2))
assert.NoError(t, ioutil.WriteFile(logPatha, []byte("cccc\n"), 0666))
time.Sleep(2 * time.Second)
assert.NotNil(t, rr.Cleaner())
var ret, dft int
DONE:
for {
select {
case sig := <-cleanChan:
ret++
assert.Equal(t, "a.log.1", sig.Filename)
assert.NoError(t, os.Remove(filepath.Join(sig.Logdir, sig.Filename)))
assert.Equal(t, readerConf.ModeTailx, sig.ReadMode)
break DONE
default:
dft++
}
time.Sleep(50 * time.Millisecond)
if dft > 200 {
break
}
}
assert.Equal(t, 1, ret)
}
func Test_setSenderConfig(t *testing.T) {
senderConfig := conf.MapConf{
senderConf.KeySenderType: senderConf.TypePandora,
}
serverConfigs := []map[string]interface{}{
{
transforms.KeyType: ip.Name,
transforms.TransformAt: ip.Server,
},
}
actualConfig, err := setPandoraServerConfig(senderConfig, serverConfigs)
assert.NoError(t, err)
assert.Equal(t, "", actualConfig[senderConf.KeyPandoraAutoCreate])
serverConfigs = []map[string]interface{}{
{
transforms.KeyType: ip.Name,
transforms.TransformAt: ip.Server,
"key": "ip",
},
}
actualConfig, err = setPandoraServerConfig(senderConfig, serverConfigs)
assert.NoError(t, err)
assert.Equal(t, "ip ip", actualConfig[senderConf.KeyPandoraAutoCreate])
senderConfig = conf.MapConf{
senderConf.KeySenderType: senderConf.TypePandora,
}
serverConfigs = []map[string]interface{}{
{
transforms.KeyType: ip.Name,
transforms.TransformAt: ip.Local,
"key": "a.b",
},
}
actualConfig, err = setPandoraServerConfig(senderConfig, serverConfigs)
assert.NoError(t, err)
assert.Equal(t, "", actualConfig[senderConf.KeyPandoraAutoCreate])
serverConfigs = []map[string]interface{}{
{
transforms.KeyType: "other",
},
}
actualConfig, err = setPandoraServerConfig(senderConfig, serverConfigs)
assert.NoError(t, err)
assert.Equal(t, "", actualConfig[senderConf.KeyPandoraAutoCreate])
serverConfigs = []map[string]interface{}{
{
transforms.KeyType: ip.Name,
transforms.TransformAt: ip.Server,
"key": "ip.ip",
},
}
actualConfig, err = setPandoraServerConfig(senderConfig, serverConfigs)
assert.Error(t, err)
}
func Test_removeServerIPSchema(t *testing.T) {
tests := []struct {
autoCreate string
key string
expect string
}{
{
autoCreate: "a ip,a ip",
key: "a",
expect: "",
},
{
autoCreate: "pandora_stash string,a ip,b string",
key: "a",
expect: "pandora_stash string,b string",
},
{
autoCreate: "",
key: "a",
expect: "",
},
{
autoCreate: "a ip,b string",
key: "a",
expect: "b string",
},
{
autoCreate: "a ip",
key: "a",
expect: "",
},
}
for _, test := range tests {
res := removeServerIPSchema(test.autoCreate, test.key)
assert.Equal(t, test.expect, res)
}
}
//之前:5000 242788 ns/op 126474 B/op 758 allocs/op
//现在:5000 266301 ns/op 145645 B/op 1572 allocs/op
// 需要优化
func BenchmarkStatusRestore(b *testing.B) {
logkitConf := conf.MapConf{
readerConf.KeyMetaPath: "BenchmarkStatusRestore",
readerConf.KeyMode: readerConf.ModeMongo,
}
meta, err := reader.NewMetaWithConf(logkitConf)
if err != nil {
b.Fatal(err)
}
defer os.RemoveAll("BenchmarkStatusRestore")
r1 := &LogExportRunner{
meta: meta,
rs: &RunnerStatus{},
lastRs: &RunnerStatus{},
historyError: &ErrorsList{},
}
r2 := &LogExportRunner{
meta: meta,
rs: &RunnerStatus{},
lastRs: &RunnerStatus{},
historyError: &ErrorsList{},
}
b.ReportAllocs()
for i := 0; i < b.N; i++ {
r1.StatusRestore()
r2.StatusRestore()
}
}
func randinsert(l *equeue.ErrorQueue, num int) {
for i := 0; i < num; i++ {
l.Put(equeue.ErrorInfo{
Error: fmt.Sprintf("err %v", rand.Intn(100)),
Count: int64(rand.Intn(100) + 1),
})
}
}
func TestBackupRestoreHistory(t *testing.T) {
logkitConf := conf.MapConf{
readerConf.KeyMetaPath: "meta",
readerConf.KeyMode: readerConf.ModeMongo,
}
meta, err := reader.NewMetaWithConf(logkitConf)
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll("meta")
rq := equeue.New(10)
randinsert(rq, 12)
pq := equeue.New(10)
randinsert(pq, 12)
tq := equeue.New(10)
randinsert(tq, 12)
sq := equeue.New(10)
randinsert(sq, 12)
s1, _ := discard.NewSender(conf.MapConf{"name": "s1"})
r1 := &LogExportRunner{
meta: meta,
rsMutex: new(sync.RWMutex),
rs: &RunnerStatus{
TransformStats: map[string]StatsInfo{"pick-0": {Success: 1}},
SenderStats: map[string]StatsInfo{"s1": {Success: 1}},
},
historyError: &ErrorsList{
ReadErrors: rq,
ParseErrors: pq,
TransformErrors: map[string]*equeue.ErrorQueue{
"pick-0": tq,
},
SendErrors: map[string]*equeue.ErrorQueue{
"s1": sq,
},
},
lastRs: &RunnerStatus{},
transformers: []transforms.Transformer{&mutate.Pick{}},
senders: []sender.Sender{s1},
}
r1.StatusBackup()
r2 := &LogExportRunner{
meta: meta,
rs: &RunnerStatus{
TransformStats: map[string]StatsInfo{},
SenderStats: map[string]StatsInfo{},
},
historyError: &ErrorsList{},
lastRs: &RunnerStatus{},
transformers: []transforms.Transformer{&mutate.Pick{}},
senders: []sender.Sender{s1},
}
r2.StatusRestore()
//保证restore与前面一致
assert.Equal(t, r1.historyError.ReadErrors.List(), r2.historyError.ReadErrors.List())
assert.Equal(t, r1.historyError.ParseErrors.List(), r2.historyError.ParseErrors.List())
for k, v := range r1.historyError.TransformErrors {
assert.Equal(t, v.List(), r2.historyError.TransformErrors[k].List())
}
for k, v := range r1.historyError.SendErrors {
assert.Equal(t, v.List(), r2.historyError.SendErrors[k].List())
}
}
|
[
"\"Test_RunForEnvTag\""
] |
[] |
[
"Test_RunForEnvTag"
] |
[]
|
["Test_RunForEnvTag"]
|
go
| 1 | 0 | |
train.py
|
import os
import warnings
import sys
import pandas as pd
import numpy as np
from itertools import cycle
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.model_selection import train_test_split
from sklearn.linear_model import ElasticNet
from sklearn.linear_model import lasso_path, enet_path
filePath = "PD_Commercial_Train.csv"
os.environ['MLFLOW_TRACKING_URI'] = 'http://localhost:5000'
os.environ['GIT_PYTHON_REFRESH'] = 'quiet'
df_credit = pd.read_csv(filePath)
cols = df_credit.columns
data = df_credit[cols].apply(pd.to_numeric, errors='coerce')
data = data.fillna(0)
X = data.drop(["PD"], axis=1)
y = data[["PD"]]
# Import mlflow
import mlflow
import mlflow.sklearn
# Evaluate metrics
def eval_metrics(actual, pred):
rmse = np.sqrt(mean_squared_error(actual, pred))
mae = mean_absolute_error(actual, pred)
r2 = r2_score(actual, pred)
return rmse, mae, r2
#new
if __name__ == "__main__":
warnings.filterwarnings("ignore")
np.random.seed(40)
mlflow.set_experiment('riskAnalysis')
# Split the data into training and test sets. (0.75, 0.25) split.
train, test = train_test_split(data)
#Predict PD
train_x = train.drop(["PD"], axis=1)
test_x = test.drop(["PD"], axis=1)
train_y = train[["PD"]]
test_y = test[["PD"]]
alpha = float(sys.argv[1]) if len(sys.argv) > 1 else 0.05
l1_ratio = float(sys.argv[2]) if len(sys.argv) > 2 else 0.05
# Run ElasticNet
lr = ElasticNet(alpha=alpha, l1_ratio=l1_ratio, random_state=42)
lr.fit(train_x, train_y)
predicted_qualities = lr.predict(test_x)
(rmse, mae, r2) = eval_metrics(test_y, predicted_qualities)
# Print out ElasticNet model metrics
print("Elasticnet model (alpha=%f, l1_ratio=%f):" % (alpha, l1_ratio))
print(" RMSE: %s" % rmse)
print(" MAE: %s" % mae)
print(" R2: %s" % r2)
# Log mlflow attributes for mlflow UI
mlflow.log_param("alpha", alpha)
mlflow.log_param("l1_ratio", l1_ratio)
mlflow.log_metric("rmse", rmse)
mlflow.log_metric("r2", r2)
mlflow.log_metric("mae", mae)
mlflow.sklearn.log_model(lr, "model")
|
[] |
[] |
[
"GIT_PYTHON_REFRESH",
"MLFLOW_TRACKING_URI"
] |
[]
|
["GIT_PYTHON_REFRESH", "MLFLOW_TRACKING_URI"]
|
python
| 2 | 0 | |
code/script_experiment.py
|
import numpy as np
import tensorflow as tf
import random
# The below is necessary in Python 3.2.3 onwards to
# have reproducible behavior for certain hash-based operations.
# See these references for further details:
# https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSEED
# https://github.com/keras-team/keras/issues/2280#issuecomment-306959926
import os
os.environ['PYTHONHASHSEED'] = '0'
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(1234)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
random.seed(12345)
# Force TensorFlow to use single thread.
# Multiple threads are a potential source of
# non-reproducible results.
# For further details, see:
# https://stackoverflow.com/questions/42022950/which-seeds-have-to-be-set-where-to-realize-100-reproducibility-of-training-res
session_conf = tf.ConfigProto(
intra_op_parallelism_threads=0,
inter_op_parallelism_threads=0
)
session_conf.gpu_options.allow_growth = True
from keras import backend as K
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further details, see:
# https://www.tensorflow.org/api_docs/python/tf/set_random_seed
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
##############################################################################
import sys
import preprocessing
import json
import scipy
import keras
import tensorflow as tf
from keras import optimizers, initializers, regularizers, constraints
from utils import *
from generator import DataGenerator
from models import getNetwork
from sklearn import metrics
print('Keras:', keras.__version__)
print('Tensorflow:', tf.__version__)
# 1. Logging
if len(sys.argv) == 4:
CONFIG_FILE = str(sys.argv[1])
SUBJECT = int(sys.argv[2])
TIMESTAMP = int(sys.argv[3])
else:
print('Expected different number of arguments. {} were given'.format(len(sys.argv) - 1))
sys.exit()
with open(CONFIG_FILE) as json_file:
config_params = json.load(json_file)
LOGGING_FILE_PREFIX = config_params['logging']['log_file'] + '_' + str(TIMESTAMP)
if config_params['logging']['enable']:
LOGGING_FILE = '../results/L_' + LOGGING_FILE_PREFIX + '.log'
LOGGING_TENSORBOARD_FILE = '../results/tblogs/L_' + LOGGING_FILE_PREFIX
if config_params['model']['save']:
MODEL_SAVE_FILE = '../results/models/O1_' + LOGGING_FILE_PREFIX + '_{}.json'
MODEL_WEIGHTS_SAVE_FILE = '../results/models/O2_' + LOGGING_FILE_PREFIX + '_{}.h5'
METRICS_SAVE_FILE = '../results/metrics/O3_' + LOGGING_FILE_PREFIX + '_{}.mat'
if not os.path.exists(os.path.dirname(METRICS_SAVE_FILE)):
try:
os.makedirs(os.path.dirname(METRICS_SAVE_FILE))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
if not os.path.exists(os.path.dirname(MODEL_SAVE_FILE)):
try:
os.makedirs(os.path.dirname(MODEL_SAVE_FILE))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
if not os.path.exists(os.path.dirname(LOGGING_TENSORBOARD_FILE)):
try:
os.makedirs(os.path.dirname(LOGGING_TENSORBOARD_FILE))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
print('Logging file: {}'.format(LOGGING_FILE))
print('Tensorboard file: {}'.format(LOGGING_TENSORBOARD_FILE))
print('Model JSON file: {}'.format(MODEL_SAVE_FILE))
print('Model H5 file: {}'.format(MODEL_WEIGHTS_SAVE_FILE))
print('Metrics file: {}'.format(METRICS_SAVE_FILE))
# 2. Config params generator
PARAMS_TRAIN_GENERATOR = DEFAULT_GENERATOR_PARAMS.copy()
params_gen = config_params['dataset'].get('train_generator', {}).copy()
for key in params_gen.keys():
PARAMS_TRAIN_GENERATOR[key] = params_gen[key]
PARAMS_VALID_GENERATOR = DEFAULT_GENERATOR_PARAMS.copy()
params_gen = config_params['dataset'].get('valid_generator', {}).copy()
for key in params_gen.keys():
PARAMS_VALID_GENERATOR[key] = params_gen[key]
# 3. Initialization
INPUT_DIRECTORY = '../dataset/Ninapro-DB1-Proc'
PARAMS_TRAIN_GENERATOR['preprocess_function_1'] = [preprocessing.lpf]
PARAMS_TRAIN_GENERATOR['preprocess_function_1_extra'] = [{'fs': 100}]
PARAMS_TRAIN_GENERATOR['data_type'] = 'rms'
PARAMS_TRAIN_GENERATOR['classes'] = [i for i in range(53)]
PARAMS_VALID_GENERATOR['preprocess_function_1'] = [preprocessing.lpf]
PARAMS_VALID_GENERATOR['preprocess_function_1_extra'] = [{'fs': 100}]
PARAMS_VALID_GENERATOR['data_type'] = 'rms'
PARAMS_VALID_GENERATOR['classes'] = [i for i in range(53)]
SUBJECTS = config_params['dataset'].get('subjects', [i for i in range(1, 28)])
if np.min(SUBJECTS) <= 0 or np.max(SUBJECTS) >= 28:
raise AssertionError('Subject IDs should be between 1 and 27 inclusive for DB1. Were given {}\n'.format(SUBJECTS))
PARAMS_TRAIN_GENERATOR.pop('input_directory', '')
PARAMS_VALID_GENERATOR.pop('input_directory', '')
MODEL = getNetwork(config_params['model']['name'])
mean_train, mean_test, mean_test_3, mean_test_5 = [], [], [], []
mean_cm = []
mean_train_loss, mean_test_loss = [], []
if config_params['logging']['enable']:
if os.path.isfile(LOGGING_FILE) == False:
with open(LOGGING_FILE, 'w') as f:
f.write(
'TIMESTAMP: {}\n'
'KERAS: {}\n'
'TENSORFLOW: {}\n'
'DATASET: {}\n'
'TRAIN_GENERATOR: {}\n'
'VALID_GENERATOR: {}\n'
'MODEL: {}\n'
'MODEL_PARAMS: {}\n'
'TRAIN_PARAMS: {}\n'.format(
TIMESTAMP,
keras.__version__, tf.__version__,
config_params['dataset']['name'], PARAMS_TRAIN_GENERATOR,
PARAMS_VALID_GENERATOR,
config_params['model']['name'], config_params['model']['extra'],
config_params['training']
)
)
f.write(
'SUBJECT,TRAIN_SHAPE,TEST_SHAPE,TRAIN_LOSS,TRAIN_ACC,TEST_LOSS,TEST_ACC,TEST_TOP_3_ACC,TEST_TOP_5_ACC\n')
print('Subject: {}'.format(SUBJECT))
input_dir = '{}/subject-{:02d}'.format(INPUT_DIRECTORY, SUBJECT)
train_generator = DataGenerator(input_directory=input_dir, **PARAMS_TRAIN_GENERATOR)
valid_generator = DataGenerator(input_directory=input_dir, **PARAMS_VALID_GENERATOR)
X_test, Y_test, test_reps = valid_generator.get_data()
# print('Train generator:')
# print(train_generator)
# print('Test generator:')
# print(valid_generator)
model = MODEL(
input_shape=(None, 10),
classes=train_generator.n_classes,
**config_params['model']['extra'])
#model.summary()
if config_params['training']['optimizer'] == 'adam':
optimizer = optimizers.Adam(lr=config_params['training']['l_rate'], epsilon=0.001)
elif config_params['training']['optimizer'] == 'sgd':
optimizer = optimizers.SGD(lr=config_params['training']['l_rate'], momentum=0.9)
model.compile(optimizer=optimizer, loss='categorical_crossentropy', metrics=['accuracy', top_3_accuracy, top_5_accuracy])
train_callbacks = []
if config_params['logging']['enable']:
tensorboardCallback = MyTensorboard(log_dir=LOGGING_TENSORBOARD_FILE + "/{}".format(SUBJECT),
batch_size=100,
histogram_freq=10)
train_callbacks.append(tensorboardCallback)
lrScheduler = MyLRScheduler(**config_params['training']['l_rate_schedule'])
train_callbacks.append(lrScheduler)
history = model.fit_generator(train_generator, epochs=config_params['training']['epochs'],
validation_data=(X_test,Y_test), callbacks=train_callbacks, verbose=2)
Y_pred = model.predict(X_test)
y_pred = np.argmax(Y_pred, axis=1)
y_test = np.argmax(Y_test, axis=1)
if config_params['model']['save']:
# serialize model to JSON
model_json = model.to_json()
with open(MODEL_SAVE_FILE.format(SUBJECT), "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights(MODEL_WEIGHTS_SAVE_FILE.format(SUBJECT))
print("Saved model to disk")
# Confusion Matrix
# C_{i, j} is equal to the number of observations known to be in group i but predicted to be in group j.
cnf_matrix_frame = metrics.confusion_matrix(y_test, y_pred)
if np.array(mean_cm).shape != cnf_matrix_frame.shape:
mean_cm = cnf_matrix_frame
else:
mean_cm += cnf_matrix_frame
mean_train.append(history.history['acc'][-1])
mean_test.append(history.history['val_acc'][-1])
mean_train_loss.append(history.history['loss'][-1])
mean_test_loss.append(history.history['val_loss'][-1])
mean_test_3.append(history.history['val_top_3_accuracy'][-1])
mean_test_5.append(history.history['val_top_5_accuracy'][-1])
if config_params['logging']['enable']:
with open(LOGGING_FILE, 'a') as f:
f.write('{},{},{},{},{},{},{},{},{}\n'.format(SUBJECT, train_generator.__len__() * PARAMS_TRAIN_GENERATOR['batch_size'], valid_generator.__len__(),
mean_train_loss[-1], mean_train[-1], mean_test_loss[-1], mean_test[-1], mean_test_3[-1], mean_test_5[-1]))
metrics_dict = {
'mean_cm': mean_cm,
'mean_test': mean_test,
'mean_test_3': mean_test_3,
'mean_test_5': mean_test_5,
'mean_train': mean_train,
'mean_train_loss': mean_train_loss,
'mean_test_loss': mean_test_loss
}
scipy.io.savemat(METRICS_SAVE_FILE.format(SUBJECT), metrics_dict)
|
[] |
[] |
[
"PYTHONHASHSEED"
] |
[]
|
["PYTHONHASHSEED"]
|
python
| 1 | 0 | |
misc/decidermid.py
|
from __future__ import division
import os
import pandas as pd
import numpy as np
import pickle
import warnings
import argparse
import nibabel as nib
import sys
import shutil
sys.path.append(os.path.join(os.environ.get("CODEDIR"),'04_connectome','01_timeseries_cleaning'))
from utils import nodes
outdir = os.environ.get("CONGROUPDIR")
prepdir = os.environ.get('PREPDIR')
# read in patients data
patients = pd.read_csv("/scratch/PI/russpold/data/psychosis/09_tables/REDCAP_clean.csv")
subjects = patients['scan_alt'].tolist()
# check BidsSLURM
subsincon = set([x[4:] for x in os.listdir(os.environ.get("CONDIR")) if x[0]=='s'])
subsindb = set(patients.scan_alt)
tobedeleted = list(subsincon - subsindb)
for subject in tobedeleted:
#bidsdir = os.path.join(os.environ.get("BIDSDIR"),'sub-'+subject)
#prepdir = os.path.join(os.environ.get("PREPDIR"),'sub-'+subject)
condir = os.path.join(os.environ.get("CONDIR"),'sub-'+subject)
#print(condir)
shutil.rmtree(condir)
# make list of correlation files
noprep = []
LR1 = []
RL1 = []
LR2 = []
RL2 = []
for subject in np.sort(subjects):
# if subject in ['S6463DJD','S3040SHP','S2451MWP','S8555KAD']:
# continue
subprep = os.path.join(prepdir,"sub-"+subject,"MNINonLinear/Results")
if not os.path.isdir(subprep):
print("not preprocessed: %s"%subject)
noprep.append(subject)
else:
funcprep = os.listdir(subprep)
keys = [x for x in funcprep if 'rest' in x]
for run in keys:
longmovementfile = os.path.join(subprep,run,"Movement_Regressors.txt")
movementfile = os.path.join(subprep,run,"Movement_Regressors_removed_first10.txt")
movement = pd.read_csv(longmovementfile,delim_whitespace=True,header=None,engine='python')
movement = movement.drop(range(10))
movement = movement.reset_index()
movement = movement.drop('index',1)
movement.to_csv(movementfile,index=False,header=None)
# compute FD
FD = nodes.ComputeFD(movementfile)
rmid = np.where(FD > 0.5)[0]
rmid = np.unique(np.concatenate((rmid,rmid+1,rmid-1)))
short = np.append(False,np.logical_and(np.diff(rmid)>1,np.diff(rmid)<5))
#gives Bool for indices when closer than 5 frames (but evidently more than 1)
allrmid = [range(rmid[i-1],rmid[i])[1:] for i,val in enumerate(short) if val==True]
allrmid = np.sort([item for sublist in allrmid for item in sublist]+rmid.tolist())
percrem = len(allrmid)/len(FD)
if percrem > 0.2:
if run == 'task-rest_acq-LR_run-1_bold':
LR1.append(subject)
elif run == 'task-rest_acq-LR_run-2_bold':
LR2.append(subject)
elif run == 'task-rest_acq-RL_run-1_bold':
RL1.append(subject)
elif run == 'task-rest_acq-RL_run-2_bold':
RL2.append(subject)
else:
print("subject %s - run %s discarded due to motion"%(subject,run))
|
[] |
[] |
[
"PREPDIR",
"CONDIR",
"BIDSDIR",
"CONGROUPDIR",
"CODEDIR"
] |
[]
|
["PREPDIR", "CONDIR", "BIDSDIR", "CONGROUPDIR", "CODEDIR"]
|
python
| 5 | 0 | |
manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'finalPython.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
integration/credential_test.go
|
package integration
import (
"bufio"
"bytes"
"crypto/aes"
"crypto/cipher"
"strings"
"github.com/CRORCR/alibaba-cloud-sdk-go/sdk"
"github.com/CRORCR/alibaba-cloud-sdk-go/sdk/auth/credentials/provider"
"github.com/CRORCR/alibaba-cloud-sdk-go/sdk/requests"
"github.com/CRORCR/alibaba-cloud-sdk-go/services/airec"
"github.com/CRORCR/alibaba-cloud-sdk-go/services/ecs"
"github.com/stretchr/testify/assert"
"os"
"testing"
)
func Test_DescribeRegionsWithRPCrequestWithAK(t *testing.T) {
client, err := ecs.NewClientWithAccessKey(os.Getenv("REGION_ID"), os.Getenv("ACCESS_KEY_ID"), os.Getenv("ACCESS_KEY_SECRET"))
assert.Nil(t, err)
assert.NotNil(t, client)
request := ecs.CreateDescribeRegionsRequest()
request.Scheme = "https"
response, err := client.DescribeRegions(request)
assert.Nil(t, err)
assert.NotNil(t, response)
assert.Equal(t, 36, len(response.RequestId))
assert.True(t, len(response.Regions.Region) > 0)
}
func Test_DescribeRegionsWithRPCrequestWithSTStoken(t *testing.T) {
assumeresponse, err := createAssumeRole()
assert.Nil(t, err)
credential := assumeresponse.Credentials
client, err := ecs.NewClientWithStsToken(os.Getenv("REGION_ID"), credential.AccessKeyId, credential.AccessKeySecret, credential.SecurityToken)
assert.Nil(t, err)
assert.NotNil(t, client)
request := ecs.CreateDescribeRegionsRequest()
request.Scheme = "https"
response, err := client.DescribeRegions(request)
assert.Nil(t, err)
assert.NotNil(t, response)
assert.Equal(t, 36, len(response.RequestId))
assert.True(t, len(response.Regions.Region) > 0)
}
func Test_CreateDiversifyWithROArequestWithAK(t *testing.T) {
client, err := airec.NewClientWithAccessKey(os.Getenv("REGION_ID"), os.Getenv("ACCESS_KEY_ID"), os.Getenv("ACCESS_KEY_SECRET"))
assert.Nil(t, err)
request := airec.CreateCreateDiversifyRequest()
request.SetDomain("airec.cn-hangzhou.aliyuncs.com")
request.QueryParams["RegionId"] = os.Getenv("REGION_ID")
request.Method = "GET"
response, err := client.CreateDiversify(request)
assert.NotNil(t, err)
assert.Equal(t, 400, response.GetHttpStatus())
assert.Contains(t, err.Error(), "Request url is invalid")
}
func Test_DescribeRegionsWithRPCrequestWithArn(t *testing.T) {
subaccesskeyid, subaccesskeysecret, err := createAccessKey()
assert.Nil(t, err)
client, err := ecs.NewClientWithRamRoleArn(os.Getenv("REGION_ID"), subaccesskeyid, subaccesskeysecret, rolearn, "alice_test")
assert.Nil(t, err)
request := ecs.CreateDescribeRegionsRequest()
request.Scheme = "https"
request.Domain = "ecs.aliyuncs.com"
response, err := client.DescribeRegions(request)
assert.Nil(t, err)
assert.Equal(t, 36, len(response.RequestId))
}
func TestDescribeRegionsWithProviderAndAk(t *testing.T) {
os.Setenv(provider.ENVAccessKeyID, os.Getenv("ACCESS_KEY_ID"))
os.Setenv(provider.ENVAccessKeySecret, os.Getenv("ACCESS_KEY_SECRET"))
request := requests.NewCommonRequest()
request.Version = "2014-05-26"
request.Product = "Ecs"
request.ApiName = "DescribeRegions"
request.SetDomain("ecs.aliyuncs.com")
request.TransToAcsRequest()
client, err := sdk.NewClientWithProvider(os.Getenv("REGION_ID"))
assert.Nil(t, err)
response, err := client.ProcessCommonRequest(request)
assert.Nil(t, err)
assert.True(t, response.IsSuccess())
}
func TestDescribeRegionsWithProviderAndRsaKeyPair(t *testing.T) {
request := requests.NewCommonRequest()
request.Version = "2014-05-26"
request.Product = "Ecs"
request.ApiName = "DescribeRegions"
request.SetDomain("ecs.ap-northeast-1.aliyuncs.com")
request.TransToAcsRequest()
key := os.Getenv("RSA_FILE_AES_KEY")
srcfile, err := os.Open("./encyptfile")
assert.Nil(t, err)
defer srcfile.Close()
buf := new(bytes.Buffer)
read := bufio.NewReader(srcfile)
read.WriteTo(buf)
block, err := aes.NewCipher([]byte(key))
assert.Nil(t, err)
origData := buf.Bytes()
blockdec := cipher.NewCBCDecrypter(block, []byte(key)[:block.BlockSize()])
orig := make([]byte, len(origData))
blockdec.CryptBlocks(orig, origData)
orig = PKCS7UnPadding(orig)
cyphbuf := bytes.NewBuffer(orig)
scan := bufio.NewScanner(cyphbuf)
var data string
for scan.Scan() {
if strings.HasPrefix(scan.Text(), "----") {
continue
}
data += scan.Text() + "\n"
}
client, err := sdk.NewClientWithRsaKeyPair("ap-northeast-1", os.Getenv("PUBLIC_KEY_ID"), data, 3600)
assert.Nil(t, err)
response, err := client.ProcessCommonRequest(request)
assert.Nil(t, err)
assert.True(t, response.IsSuccess())
}
func PKCS7UnPadding(origData []byte) []byte {
length := len(origData)
unpadding := int(origData[length-1])
return origData[:(length - unpadding)]
}
func TestDescribeRegionsWithBearToken(t *testing.T) {
request := requests.NewCommonRequest()
request.Version = "2017-07-05"
request.Product = "CCC"
request.ApiName = "ListRoles "
request.SetDomain("ccc.cn-shanghai.aliyuncs.com")
request.TransToAcsRequest()
client, err := sdk.NewClientWithBearerToken("cn-shanghai", "eyJhbGciOiJSUzI1NiIsImsyaWQiOiJlNE92NnVOUDhsMEY2RmVUMVhvek5wb1NBcVZLblNGRyIsImtpZCI6IkpDOXd4enJocUowZ3RhQ0V0MlFMVWZldkVVSXdsdEZodWk0TzFiaDY3dFUifQ.TjU2UldwZzFzRE1oVEN5UStjYlZLV1dzNW45cFBOSWdNRDhzQmVXYmVpLytWY012MEJqYjdTdnB3SE9LcHBiZkorUGdvclAxRy9GTjdHeldmaWZFVndoa05ueUNTem80dU0rUVFKdDFSY2V0bmFQcml5WFljTDhmNUZ2c1pFd3BhTDFOajVvRW9QVG83S1NVU3JpTFdKQmNnVHB1U094cUd4cGpCeFdXS0pDVnN0L3lzRkp4RTVlSFNzUm1Qa1FBVTVwS1lmaXE0QVFSd3lPQjdYSk1uUGFKU1BiSWhyWVFVS21WOVd5K2d3PT0.jxdCiNimyes3swDRBSxdsgaL4IlOD2Kz49Gf5w0VZ0Xap9ozUyxvSSywGzMrKvCTIoeh9QMCMjCpnt9A-nQxENj3YGAeBk8Wy19uHiT-4OVo-CiCKmKxILpzxcpOptNO-LER1swVLbt0NiTuTH4KB5CUaRwJKIFJuUwa57HcsWbvWQyZa1ms0NNOccNfGJl4177eY2LTUyyXWi4wYNA_L0YMTkZz4sOFM_Mdzks8bHXiSbGkkjfWQy0QblkLz6Bboh1OYlg3_RCLSWby_FMNoxU_eG2lGAsDnYxZDmCAq2jedY0x1RzZodo9HYRQN7DujlBhfzqm4hOBNvA3LiJfzw")
assert.Nil(t, err)
response, err := client.ProcessCommonRequest(request)
assert.True(t, strings.Contains(err.Error(), "Bearertoken has expired"))
assert.False(t, response.IsSuccess())
}
|
[
"\"REGION_ID\"",
"\"ACCESS_KEY_ID\"",
"\"ACCESS_KEY_SECRET\"",
"\"REGION_ID\"",
"\"REGION_ID\"",
"\"ACCESS_KEY_ID\"",
"\"ACCESS_KEY_SECRET\"",
"\"REGION_ID\"",
"\"REGION_ID\"",
"\"ACCESS_KEY_ID\"",
"\"ACCESS_KEY_SECRET\"",
"\"REGION_ID\"",
"\"RSA_FILE_AES_KEY\"",
"\"PUBLIC_KEY_ID\""
] |
[] |
[
"PUBLIC_KEY_ID",
"RSA_FILE_AES_KEY",
"REGION_ID",
"ACCESS_KEY_SECRET",
"ACCESS_KEY_ID"
] |
[]
|
["PUBLIC_KEY_ID", "RSA_FILE_AES_KEY", "REGION_ID", "ACCESS_KEY_SECRET", "ACCESS_KEY_ID"]
|
go
| 5 | 0 | |
main.go
|
package main
import (
"flag"
"math"
"math/rand"
"os"
"strconv"
"time"
"github.com/elastic/hey-apm/benchmark"
"github.com/elastic/hey-apm/models"
"github.com/elastic/hey-apm/worker"
)
func main() {
var err error
input := parseFlags()
if input.IsBenchmark {
err = benchmark.Run(input)
} else {
_, err = worker.Run(input)
}
if err != nil {
os.Exit(1)
}
}
func parseFlags() models.Input {
// run options
runTimeout := flag.Duration("run", 30*time.Second, "stop run after this duration")
flushTimeout := flag.Duration("flush", 10*time.Second, "wait timeout for agent flush")
seed := flag.Int64("seed", time.Now().Unix(), "random seed")
// convenience for https://www.elastic.co/guide/en/apm/agent/go/current/configuration.html
serviceName := os.Getenv("ELASTIC_APM_SERVICE_NAME")
if serviceName == "" {
serviceName = *flag.String("service-name", "hey-service", "service name") // ELASTIC_APM_SERVICE_NAME
}
// apm-server options
apmServerSecret := flag.String("apm-secret", "", "apm server secret token") // ELASTIC_APM_SECRET_TOKEN
apmServerUrl := flag.String("apm-url", "http://localhost:8200", "apm server url") // ELASTIC_APM_SERVER_URL
elasticsearchUrl := flag.String("es-url", "http://localhost:9200", "elasticsearch url for reporting")
elasticsearchAuth := flag.String("es-auth", "", "elasticsearch username:password reporting")
apmElasticsearchUrl := flag.String("apm-es-url", "http://localhost:9200", "elasticsearch output host for apm-server under load")
apmElasticsearchAuth := flag.String("apm-es-auth", "", "elasticsearch output username:password for apm-server under load")
isBench := flag.Bool("bench", false, "execute a benchmark with fixed parameters")
regressionMargin := flag.Float64("rm", 1.1, "margin of acceptable performance decrease to not consider a regression (only in combination with -bench)")
regressionDays := flag.String("rd", "7", "number of days back to check for regressions (only in combination with -bench)")
// payload options
errorLimit := flag.Int("e", math.MaxInt64, "max errors to generate (only if -bench is not passed)")
errorFrequency := flag.Duration("ef", 1*time.Nanosecond, "error frequency. "+
"generate errors up to once in this duration (only if -bench is not passed)")
errorFrameMaxLimit := flag.Int("ex", 10, "max error frames to per error (only if -bench is not passed)")
errorFrameMinLimit := flag.Int("em", 0, "max error frames to per error (only if -bench is not passed)")
spanMaxLimit := flag.Int("sx", 10, "max spans to per transaction (only if -bench is not passed)")
spanMinLimit := flag.Int("sm", 1, "min spans to per transaction (only if -bench is not passed)")
transactionLimit := flag.Int("t", math.MaxInt64, "max transactions to generate (only if -bench is not passed)")
transactionFrequency := flag.Duration("tf", 1*time.Nanosecond, "transaction frequency. "+
"generate transactions up to once in this duration (only if -bench is not passed)")
flag.Parse()
if *spanMaxLimit < *spanMinLimit {
spanMaxLimit = spanMinLimit
}
rand.Seed(*seed)
input := models.Input{
IsBenchmark: *isBench,
ApmServerUrl: *apmServerUrl,
ApmServerSecret: *apmServerSecret,
ElasticsearchUrl: *elasticsearchUrl,
ElasticsearchAuth: *elasticsearchAuth,
ApmElasticsearchUrl: *apmElasticsearchUrl,
ApmElasticsearchAuth: *apmElasticsearchAuth,
ServiceName: serviceName,
RunTimeout: *runTimeout,
FlushTimeout: *flushTimeout,
}
if *isBench {
if _, err := strconv.Atoi(*regressionDays); err != nil {
panic(err)
}
input.RegressionDays = *regressionDays
input.RegressionMargin = *regressionMargin
return input
}
input.TransactionFrequency = *transactionFrequency
input.TransactionLimit = *transactionLimit
input.SpanMaxLimit = *spanMaxLimit
input.SpanMinLimit = *spanMinLimit
input.ErrorFrequency = *errorFrequency
input.ErrorLimit = *errorLimit
input.ErrorFrameMaxLimit = *errorFrameMaxLimit
input.ErrorFrameMinLimit = *errorFrameMinLimit
return input
}
|
[
"\"ELASTIC_APM_SERVICE_NAME\""
] |
[] |
[
"ELASTIC_APM_SERVICE_NAME"
] |
[]
|
["ELASTIC_APM_SERVICE_NAME"]
|
go
| 1 | 0 | |
main.go
|
package main
import (
"log"
"net/http"
"os"
"github.com/gorilla/mux"
"github.com/joho/godotenv"
)
var (
// list of actions supported
actionsFlag = map[string]func() error{
"init": SetupEnv,
"delete-cache": deleteCache,
"serve": serve,
}
)
func main() {
var commName string
if len(os.Args) < 2 {
commName = "serve"
} else {
commName = os.Args[1]
}
comm, ok := actionsFlag[commName]
if !ok {
log.Fatalln(ErrInvalidFlag)
return
}
if commName != "init" {
loadEnv()
}
err := comm()
if err != nil {
log.Fatalln(err)
}
}
// loadEnv loads the server.env file and puts the values
// in the env vars
func loadEnv() {
err := godotenv.Load("server.env")
if err != nil {
log.Fatal("Error loading .env file")
}
}
// serve starts the webapp
func serve() error {
// setup default routes
router := mux.NewRouter()
router.HandleFunc("/", HandleNotAllowedMethod).Methods("GET")
router.HandleFunc("/", HandleUpload).Methods("POST")
router.HandleFunc("/{image}.{extension}", HandleDownload).Methods("GET")
// serve
serverPort := os.Getenv("SERVER_PORT")
return http.ListenAndServe(":"+serverPort, router)
}
|
[
"\"SERVER_PORT\""
] |
[] |
[
"SERVER_PORT"
] |
[]
|
["SERVER_PORT"]
|
go
| 1 | 0 | |
tests/contrib/utils/gcp_authenticator.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import os
import subprocess
from airflow import settings, AirflowException
from airflow.models import Connection
from tests.contrib.utils.logging_command_executor import LoggingCommandExecutor
# Please keep these variables in alphabetical order.
GCP_AI_KEY = 'gcp_ai.json'
GCP_BIGTABLE_KEY = 'gcp_bigtable.json'
GCP_CLOUD_BUILD_KEY = 'gcp_cloud_build.json'
GCP_CLOUDSQL_KEY = 'gcp_cloudsql.json'
GCP_COMPUTE_KEY = 'gcp_compute.json'
GCP_DATAPROC_KEY = 'gcp_dataproc.json'
GCP_DLP_KEY = 'gcp_dlp.json'
GCP_FUNCTION_KEY = 'gcp_function.json'
GCP_GCS_KEY = 'gcp_gcs.json'
GCP_GCS_TRANSFER_KEY = 'gcp_gcs_transfer.json'
GCP_SPANNER_KEY = 'gcp_spanner.json'
KEYPATH_EXTRA = 'extra__google_cloud_platform__key_path'
KEYFILE_DICT_EXTRA = 'extra__google_cloud_platform__keyfile_dict'
SCOPE_EXTRA = 'extra__google_cloud_platform__scope'
PROJECT_EXTRA = 'extra__google_cloud_platform__project'
AIRFLOW_MAIN_FOLDER = os.path.realpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, os.pardir, os.pardir)
)
class GcpAuthenticator(LoggingCommandExecutor):
"""
Manages authentication to Google Cloud Platform. It helps to manage
connection - it can authenticate with the gcp key name specified
"""
original_account = None
def __init__(self, gcp_key, project_extra=None):
"""
Initialises the authenticator.
:param gcp_key: name of the key to use for authentication (see GCP_*_KEY values)
:param project_extra: optional extra project parameter passed to google cloud
connection
"""
super().__init__()
self.gcp_key = gcp_key
self.project_extra = project_extra
self.project_id = self.get_project_id()
self.full_key_path = None
self._set_key_path()
@staticmethod
def get_project_id():
return os.environ.get('GCP_PROJECT_ID')
def set_key_path_in_airflow_connection(self):
"""
Set key path in 'google_cloud_default' connection to point to the full
key path
:return: None
"""
session = settings.Session()
try:
conn = session.query(Connection).filter(Connection.conn_id == 'google_cloud_default')[0]
extras = conn.extra_dejson
extras[KEYPATH_EXTRA] = self.full_key_path
if extras.get(KEYFILE_DICT_EXTRA):
del extras[KEYFILE_DICT_EXTRA]
extras[SCOPE_EXTRA] = 'https://www.googleapis.com/auth/cloud-platform'
extras[PROJECT_EXTRA] = self.project_extra if self.project_extra else self.project_id
conn.extra = json.dumps(extras)
session.commit()
except BaseException as ex:
self.log.info('Airflow DB Session error:' + str(ex))
session.rollback()
raise
finally:
session.close()
def set_dictionary_in_airflow_connection(self):
"""
Set dictionary in 'google_cloud_default' connection to contain content
of the json service account file.
:return: None
"""
session = settings.Session()
try:
conn = session.query(Connection).filter(Connection.conn_id == 'google_cloud_default')[0]
extras = conn.extra_dejson
with open(self.full_key_path, "r") as path_file:
content = json.load(path_file)
extras[KEYFILE_DICT_EXTRA] = json.dumps(content)
if extras.get(KEYPATH_EXTRA):
del extras[KEYPATH_EXTRA]
extras[SCOPE_EXTRA] = 'https://www.googleapis.com/auth/cloud-platform'
extras[PROJECT_EXTRA] = self.project_extra
conn.extra = json.dumps(extras)
session.commit()
except BaseException as ex:
self.log.info('Airflow DB Session error:' + str(ex))
session.rollback()
raise
finally:
session.close()
def _set_key_path(self):
"""
Sets full key path - if GCP_CONFIG_DIR points to absolute
directory, it tries to find the key in this directory. Otherwise it assumes
that Airflow is run from directory where configuration i checked out
next to airflow directory in config directory
it tries to find the key folder in the workspace's config
directory.
:param : name of the key file to find.
"""
if "GCP_CONFIG_DIR" in os.environ:
gcp_config_dir = os.environ["GCP_CONFIG_DIR"]
else:
gcp_config_dir = os.path.join(AIRFLOW_MAIN_FOLDER, os.pardir, "config")
if not os.path.isdir(gcp_config_dir):
self.log.info("The {} is not a directory".format(gcp_config_dir))
key_dir = os.path.join(gcp_config_dir, "keys")
if not os.path.isdir(key_dir):
self.log.info("The {} is not a directory".format(key_dir))
return
key_path = os.path.join(key_dir, self.gcp_key)
if not os.path.isfile(key_path):
self.log.info("The {} is missing".format(key_path))
self.full_key_path = key_path
def _validate_key_set(self):
if self.full_key_path is None:
raise AirflowException("The gcp_key is not set!")
if not os.path.isfile(self.full_key_path):
raise AirflowException(
"The key {} could not be found. Please copy it to the {} path.".format(
self.gcp_key, self.full_key_path
)
)
def gcp_authenticate(self):
"""
Authenticate with service account specified via key name.
"""
self._validate_key_set()
self.log.info("Setting the GCP key to {}".format(self.full_key_path))
# Checking if we can authenticate using service account credentials provided
self.execute_cmd(
[
'gcloud',
'auth',
'activate-service-account',
'--key-file={}'.format(self.full_key_path),
'--project={}'.format(self.project_id),
]
)
self.set_key_path_in_airflow_connection()
def gcp_revoke_authentication(self):
"""
Change default authentication to none - which is not existing one.
"""
self._validate_key_set()
self.log.info("Revoking authentication - setting it to none")
self.execute_cmd(['gcloud', 'config', 'get-value', 'account', '--project={}'.format(self.project_id)])
self.execute_cmd(
['gcloud', 'config', 'set', 'account', 'none', '--project={}'.format(self.project_id)]
)
def gcp_store_authentication(self):
"""
Store authentication as it was originally so it can be restored and revoke
authentication.
"""
self._validate_key_set()
if not GcpAuthenticator.original_account:
GcpAuthenticator.original_account = self.check_output(
['gcloud', 'config', 'get-value', 'account', '--project={}'.format(self.project_id)]
).decode('utf-8')
self.log.info("Storing account: to restore it later {}".format(GcpAuthenticator.original_account))
def gcp_restore_authentication(self):
"""
Restore authentication to the original one one.
"""
self._validate_key_set()
if GcpAuthenticator.original_account:
self.log.info("Restoring original account stored: {}".format(GcpAuthenticator.original_account))
subprocess.call(
[
'gcloud',
'config',
'set',
'account',
GcpAuthenticator.original_account,
'--project={}'.format(self.project_id),
]
)
else:
self.log.info("Not restoring the original GCP account: it is not set")
|
[] |
[] |
[
"GCP_PROJECT_ID",
"GCP_CONFIG_DIR"
] |
[]
|
["GCP_PROJECT_ID", "GCP_CONFIG_DIR"]
|
python
| 2 | 0 | |
src/github.com/toggl/go-basecamp/examples/main.go
|
package main
import (
"../"
"log"
"os"
)
func main() {
var (
err error
accountId int
accessToken string
accounts []*basecamp.Account
projects []*basecamp.Project
people []*basecamp.Person
todoLists []*basecamp.TodoList
)
if accessToken = os.Getenv("BASECAMP_ACCESS_TOKEN"); accessToken == "" {
log.Println("ERROR: Unable to retrieve BASECAMP_ACCESS_TOKEN environment variable!")
return
}
log.Println("Fetching accounts, projects and users...")
c := basecamp.Client{AccessToken: accessToken}
if accounts, err = c.GetAccounts(); err != nil {
log.Printf("ERROR %q", err)
return
}
accountId = accounts[1].Id
if projects, err = c.GetProjects(accountId); err != nil {
log.Printf("ERROR %q", err)
return
}
if people, err = c.GetPeople(accountId); err != nil {
log.Printf("ERROR %q", err)
return
}
if todoLists, err = c.GetTodoLists(accountId); err != nil {
log.Printf("ERROR %q", err)
return
}
for _, account := range accounts {
log.Printf("Account: %+v", account)
}
for _, person := range people {
log.Printf("Person: %+v", person)
}
for _, project := range projects {
log.Printf("Project: %+v", *project)
}
for _, todoList := range todoLists {
log.Printf("Todolist: %+v", *todoList)
}
}
|
[
"\"BASECAMP_ACCESS_TOKEN\""
] |
[] |
[
"BASECAMP_ACCESS_TOKEN"
] |
[]
|
["BASECAMP_ACCESS_TOKEN"]
|
go
| 1 | 0 | |
main.go
|
package main
import (
"errors"
"fmt"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
"github.com/fedragon/cuttlefish/git"
"github.com/fedragon/cuttlefish/globals"
"github.com/fedragon/cuttlefish/paths"
"github.com/fedragon/cuttlefish/ssh"
"github.com/knadh/koanf"
"github.com/knadh/koanf/parsers/yaml"
"github.com/knadh/koanf/providers/file"
)
var k = koanf.New(".")
// ConfigFileName is the name of the config file used by this script
const ConfigFileName = ".cuttlefish.yaml"
var sysTmpDir = os.Getenv("TMPDIR")
// TmpFile is the tmp file where the script stores the path to the previous config, if any
var TmpFile = filepath.Join(sysTmpDir, "cuttlefish-lastvisited")
func main() {
config, err := find(os.Getenv("PWD"))
if err != nil {
// signal that there's nothing to source
os.Exit(1)
}
previousConfig, err := ioutil.ReadFile(TmpFile)
if err != nil {
previousConfig = []byte("")
}
if string(previousConfig) == config {
// nothing to do
os.Exit(1)
}
if err := k.Load(file.Provider(config), yaml.Parser()); err != nil {
log.Fatal(err)
}
var cmds []string
if strings.TrimSpace(string(previousConfig)) == "" {
cmds = withoutPreviousConfig()
} else {
cmds, err = withPreviousConfig(string(previousConfig))
if err != nil {
log.Fatal(err)
}
}
if err = ioutil.WriteFile(TmpFile, []byte(config), 0644); err != nil {
log.Fatal(err)
}
fmt.Println(strings.Join(cmds, "\n"))
}
func find(pwd string) (string, error) {
absPwd, err := filepath.Abs(pwd)
if err != nil {
return "", err
}
if !strings.HasPrefix(absPwd, os.Getenv("HOME")) {
return "", errors.New("only checking in home dir")
}
fullPath := filepath.Join(absPwd, ConfigFileName)
_, err = os.Stat(fullPath)
if os.IsNotExist(err) {
if os.Getenv("HOME") == absPwd {
return "", fmt.Errorf("no %v found in any parent (stopped at %v)", ConfigFileName, os.Getenv("HOME"))
}
return find(filepath.Dir(absPwd))
}
return fullPath, nil
}
func withoutPreviousConfig() []string {
cmds := paths.Set(k.Strings("user_paths"), nil)
cmds = append(cmds, ssh.Set(k.Strings("ssh_identities"), nil)...)
cmds = append(cmds, globals.Set(k.StringMap("global_variables"), nil)...)
return append(cmds, git.SetIdentity(k.String("git_config.email")))
}
func withPreviousConfig(previous string) ([]string, error) {
var pk = koanf.New(".")
if err := pk.Load(file.Provider(previous), yaml.Parser()); err != nil {
return nil, err
}
cmds := paths.Set(k.Strings("user_paths"), pk.Strings("user_paths"))
cmds = append(cmds, git.SetIdentity(k.String("git_config.email")))
cmds = append(cmds, ssh.Set(k.Strings("ssh_identities"), pk.Strings("ssh_identities"))...)
vars := pk.StringMap("global_variables")
keys := make([]string, 0, len(vars))
for key := range vars {
keys = append(keys, key)
}
return append(cmds, globals.Set(k.StringMap("global_variables"), keys)...), nil
}
|
[
"\"TMPDIR\"",
"\"PWD\"",
"\"HOME\"",
"\"HOME\"",
"\"HOME\""
] |
[] |
[
"PWD",
"HOME",
"TMPDIR"
] |
[]
|
["PWD", "HOME", "TMPDIR"]
|
go
| 3 | 0 | |
soracom/generated/cmd/subscribers_put_bundles.go
|
// Code generated by soracom-cli generate-cmd. DO NOT EDIT.
package cmd
import (
"fmt"
"io/ioutil"
"net/url"
"os"
"strings"
"github.com/spf13/cobra"
)
// SubscribersPutBundlesCmdImsi holds value of 'imsi' option
var SubscribersPutBundlesCmdImsi string
// SubscribersPutBundlesCmdBody holds contents of request body to be sent
var SubscribersPutBundlesCmdBody string
func init() {
SubscribersPutBundlesCmd.Flags().StringVar(&SubscribersPutBundlesCmdImsi, "imsi", "", TRAPI("IMSI of the target subscriber."))
SubscribersPutBundlesCmd.Flags().StringVar(&SubscribersPutBundlesCmdBody, "body", "", TRCLI("cli.common_params.body.short_help"))
SubscribersCmd.AddCommand(SubscribersPutBundlesCmd)
}
// SubscribersPutBundlesCmd defines 'put-bundles' subcommand
var SubscribersPutBundlesCmd = &cobra.Command{
Use: "put-bundles",
Short: TRAPI("/subscribers/{imsi}/bundles:put:summary"),
Long: TRAPI(`/subscribers/{imsi}/bundles:put:description`),
RunE: func(cmd *cobra.Command, args []string) error {
opt := &apiClientOptions{
BasePath: "/v1",
Language: getSelectedLanguage(),
}
ac := newAPIClient(opt)
if v := os.Getenv("SORACOM_VERBOSE"); v != "" {
ac.SetVerbose(true)
}
err := authHelper(ac, cmd, args)
if err != nil {
cmd.SilenceUsage = true
return err
}
param, err := collectSubscribersPutBundlesCmdParams(ac)
if err != nil {
return err
}
body, err := ac.callAPI(param)
if err != nil {
cmd.SilenceUsage = true
return err
}
if body == "" {
return nil
}
if rawOutput {
_, err = os.Stdout.Write([]byte(body))
} else {
return prettyPrintStringAsJSON(body)
}
return err
},
}
func collectSubscribersPutBundlesCmdParams(ac *apiClient) (*apiParams, error) {
body, err := buildBodyForSubscribersPutBundlesCmd()
if err != nil {
return nil, err
}
contentType := "application/json"
if SubscribersPutBundlesCmdImsi == "" {
if body == "" {
return nil, fmt.Errorf("required parameter '%s' is not specified", "imsi")
}
}
return &apiParams{
method: "PUT",
path: buildPathForSubscribersPutBundlesCmd("/subscribers/{imsi}/bundles"),
query: buildQueryForSubscribersPutBundlesCmd(),
contentType: contentType,
body: body,
noRetryOnError: noRetryOnError,
}, nil
}
func buildPathForSubscribersPutBundlesCmd(path string) string {
escapedImsi := url.PathEscape(SubscribersPutBundlesCmdImsi)
path = strReplace(path, "{"+"imsi"+"}", escapedImsi, -1)
return path
}
func buildQueryForSubscribersPutBundlesCmd() url.Values {
result := url.Values{}
return result
}
func buildBodyForSubscribersPutBundlesCmd() (string, error) {
var b []byte
var err error
if SubscribersPutBundlesCmdBody != "" {
if strings.HasPrefix(SubscribersPutBundlesCmdBody, "@") {
fname := strings.TrimPrefix(SubscribersPutBundlesCmdBody, "@")
// #nosec
b, err = ioutil.ReadFile(fname)
} else if SubscribersPutBundlesCmdBody == "-" {
b, err = ioutil.ReadAll(os.Stdin)
} else {
b = []byte(SubscribersPutBundlesCmdBody)
}
if err != nil {
return "", err
}
}
if b == nil {
b = []byte{}
}
return string(b), nil
}
|
[
"\"SORACOM_VERBOSE\""
] |
[] |
[
"SORACOM_VERBOSE"
] |
[]
|
["SORACOM_VERBOSE"]
|
go
| 1 | 0 | |
pkg/operator/ceph/controller/spec.go
|
/*
Copyright 2018 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package controller provides Kubernetes controller/pod/container spec items used for many Ceph daemons
package controller
import (
"fmt"
"os"
"path"
"strings"
"github.com/coreos/pkg/capnslog"
"github.com/pkg/errors"
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
cephclient "github.com/rook/rook/pkg/daemon/ceph/client"
"github.com/rook/rook/pkg/operator/ceph/config"
"github.com/rook/rook/pkg/operator/ceph/config/keyring"
"github.com/rook/rook/pkg/operator/k8sutil"
"github.com/rook/rook/pkg/util/display"
v1 "k8s.io/api/core/v1"
)
const (
// ConfigInitContainerName is the name which is given to the config initialization container
// in all Ceph pods.
ConfigInitContainerName = "config-init"
logVolumeName = "rook-ceph-log"
volumeMountSubPath = "data"
crashVolumeName = "rook-ceph-crash"
daemonSocketDir = "/run/ceph"
initialDelaySecondsNonOSDDaemon int32 = 10
initialDelaySecondsOSDDaemon int32 = 45
logCollector = "log-collector"
)
type daemonConfig struct {
daemonType string
daemonID string
}
var logger = capnslog.NewPackageLogger("github.com/rook/rook", "ceph-spec")
var (
cronLogRotate = `
set -xe
CEPH_CLIENT_ID=%s
PERIODICITY=%s
LOG_ROTATE_CEPH_FILE=/etc/logrotate.d/ceph
if [ -z "$PERIODICITY" ]; then
PERIODICITY=24h
fi
# edit the logrotate file to only rotate a specific daemon log
# otherwise we will logrotate log files without reloading certain daemons
# this might happen when multiple daemons run on the same machine
sed -i "s|*.log|$CEPH_CLIENT_ID.log|" "$LOG_ROTATE_CEPH_FILE"
while true; do
sleep "$PERIODICITY"
echo "starting log rotation"
logrotate --verbose --force "$LOG_ROTATE_CEPH_FILE"
echo "I am going to sleep now, see you in $PERIODICITY"
done
`
)
// return the volume and matching volume mount for mounting the config override ConfigMap into
// containers as "/etc/ceph/ceph.conf".
func configOverrideConfigMapVolumeAndMount() (v1.Volume, v1.VolumeMount) {
secretAndConfigMapVolumeProjections := []v1.VolumeProjection{}
name := k8sutil.ConfigOverrideName // configmap name and name of volume
dir := config.EtcCephDir
file := "ceph.conf"
// TL;DR: mount the configmap's "config" to a file called "ceph.conf" with 0444 permissions
// security: allow to be read by everyone since now ceph processes run as 'ceph' and not 'root' user
// Further investigation needs to be done to copy the ceph.conf and change its ownership
// since configuring a owner of a ConfigMap secret is currently impossible
// This also works around the following issue: https://tracker.ceph.com/issues/38606
//
// This design choice avoids the crash/restart situation in Rook
// If we don't set 0444 to the ceph.conf configuration file during its respawn (with exec) the ceph-mgr
// won't be able to read the ceph.conf and the container will die, the "restart" count will increase in k8s
// This will mislead users thinking something won't wrong but that a false positive
mode := int32(0444)
projectionConfigMap := &v1.ConfigMapProjection{Items: []v1.KeyToPath{{Key: k8sutil.ConfigOverrideVal, Path: file, Mode: &mode}}}
projectionConfigMap.Name = name
configMapProjection := v1.VolumeProjection{
ConfigMap: projectionConfigMap,
}
secretAndConfigMapVolumeProjections = append(secretAndConfigMapVolumeProjections, configMapProjection)
v := v1.Volume{
Name: name,
VolumeSource: v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: secretAndConfigMapVolumeProjections,
},
},
}
// configmap's "config" to "/etc/ceph/ceph.conf"
m := v1.VolumeMount{
Name: name,
ReadOnly: true, // should be no reason to write to the config in pods, so enforce this
MountPath: dir,
}
return v, m
}
// ConfGeneratedInPodVolumeAndMount generate an empty dir of /etc/ceph
func ConfGeneratedInPodVolumeAndMount() (v1.Volume, v1.VolumeMount) {
name := "ceph-conf-emptydir"
dir := config.EtcCephDir
v := v1.Volume{Name: name, VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{}}}
// configmap's "config" to "/etc/ceph/ceph.conf"
m := v1.VolumeMount{
Name: name,
MountPath: dir,
}
return v, m
}
// PodVolumes fills in the volumes parameter with the common list of Kubernetes volumes for use in Ceph pods.
// This function is only used for OSDs.
func PodVolumes(dataPaths *config.DataPathMap, dataDirHostPath string, confGeneratedInPod bool) []v1.Volume {
dataDirSource := v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}
if dataDirHostPath != "" {
dataDirSource = v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: dataDirHostPath}}
}
configVolume, _ := configOverrideConfigMapVolumeAndMount()
if confGeneratedInPod {
configVolume, _ = ConfGeneratedInPodVolumeAndMount()
}
v := []v1.Volume{
{Name: k8sutil.DataDirVolume, VolumeSource: dataDirSource},
configVolume,
}
v = append(v, StoredLogAndCrashVolume(dataPaths.HostLogDir(), dataPaths.HostCrashDir())...)
return v
}
// CephVolumeMounts returns the common list of Kubernetes volume mounts for Ceph containers.
// This function is only used for OSDs.
func CephVolumeMounts(dataPaths *config.DataPathMap, confGeneratedInPod bool) []v1.VolumeMount {
_, configMount := configOverrideConfigMapVolumeAndMount()
if confGeneratedInPod {
_, configMount = ConfGeneratedInPodVolumeAndMount()
}
v := []v1.VolumeMount{
{Name: k8sutil.DataDirVolume, MountPath: k8sutil.DataDir},
configMount,
// Rook doesn't run in ceph containers, so it doesn't need the config override mounted
}
v = append(v, StoredLogAndCrashVolumeMount(dataPaths.ContainerLogDir(), dataPaths.ContainerCrashDir())...)
return v
}
// RookVolumeMounts returns the common list of Kubernetes volume mounts for Rook containers.
// This function is only used by OSDs.
func RookVolumeMounts(dataPaths *config.DataPathMap, confGeneratedInPod bool) []v1.VolumeMount {
return CephVolumeMounts(dataPaths, confGeneratedInPod)
}
// DaemonVolumesBase returns the common / static set of volumes.
func DaemonVolumesBase(dataPaths *config.DataPathMap, keyringResourceName string) []v1.Volume {
configOverrideVolume, _ := configOverrideConfigMapVolumeAndMount()
vols := []v1.Volume{
configOverrideVolume,
}
if keyringResourceName != "" {
vols = append(vols, keyring.Volume().Resource(keyringResourceName))
}
if dataPaths.HostLogAndCrashDir != "" {
// logs are not persisted to host
vols = append(vols, StoredLogAndCrashVolume(dataPaths.HostLogDir(), dataPaths.HostCrashDir())...)
}
return vols
}
// DaemonVolumesDataPVC returns a PVC volume source for daemon container data.
func DaemonVolumesDataPVC(pvcName string) v1.Volume {
return v1.Volume{
Name: "ceph-daemon-data",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvcName,
},
},
}
}
// DaemonVolumesDataHostPath returns HostPath volume source for daemon container
// data.
func DaemonVolumesDataHostPath(dataPaths *config.DataPathMap) []v1.Volume {
vols := []v1.Volume{}
if dataPaths.ContainerDataDir == "" {
// no data is stored in container, and therefore no data can be persisted to host
return vols
}
// when data is not persisted to host, the data may still be shared between init/run containers
src := v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}
if dataPaths.HostDataDir != "" {
// data is persisted to host
src = v1.VolumeSource{HostPath: &v1.HostPathVolumeSource{Path: dataPaths.HostDataDir}}
}
return append(vols, v1.Volume{Name: "ceph-daemon-data", VolumeSource: src})
}
// DaemonVolumesContainsPVC returns true if a volume exists with a volume source
// configured with a persistent volume claim.
func DaemonVolumesContainsPVC(volumes []v1.Volume) bool {
for _, volume := range volumes {
if volume.VolumeSource.PersistentVolumeClaim != nil {
return true
}
}
return false
}
// DaemonVolumes returns the pod volumes used by all Ceph daemons. If keyring resource name is
// empty, there will be no keyring volume created from a secret.
func DaemonVolumes(dataPaths *config.DataPathMap, keyringResourceName string) []v1.Volume {
vols := DaemonVolumesBase(dataPaths, keyringResourceName)
vols = append(vols, DaemonVolumesDataHostPath(dataPaths)...)
return vols
}
// DaemonVolumeMounts returns volume mounts which correspond to the DaemonVolumes. These
// volume mounts are shared by most all Ceph daemon containers, both init and standard. If keyring
// resource name is empty, there will be no keyring mounted in the container.
func DaemonVolumeMounts(dataPaths *config.DataPathMap, keyringResourceName string) []v1.VolumeMount {
_, configOverrideMount := configOverrideConfigMapVolumeAndMount()
mounts := []v1.VolumeMount{
configOverrideMount,
}
if keyringResourceName != "" {
mounts = append(mounts, keyring.VolumeMount().Resource(keyringResourceName))
}
if dataPaths.HostLogAndCrashDir != "" {
// logs are not persisted to host, so no mount is needed
mounts = append(mounts, StoredLogAndCrashVolumeMount(dataPaths.ContainerLogDir(), dataPaths.ContainerCrashDir())...)
}
if dataPaths.ContainerDataDir == "" {
// no data is stored in container, so there are no more mounts
return mounts
}
return append(mounts,
v1.VolumeMount{Name: "ceph-daemon-data", MountPath: dataPaths.ContainerDataDir},
)
}
// see AddVolumeMountSubPath
func addVolumeMountSubPathContainer(c *v1.Container, volumeMountName string) {
for i := range c.VolumeMounts {
v := &c.VolumeMounts[i]
if v.Name == volumeMountName {
v.SubPath = volumeMountSubPath
}
}
}
// AddVolumeMountSubPath updates each init and regular container of the podspec
// such that each volume mount attached to a container is mounted under a
// subpath in the source volume. This is important because some daemons may not
// start if the volume mount directory is non-empty. When the volume is the root
// of an ext4 file system, one may find a "lost+found" directory.
func AddVolumeMountSubPath(podSpec *v1.PodSpec, volumeMountName string) {
for i := range podSpec.InitContainers {
c := &podSpec.InitContainers[i]
addVolumeMountSubPathContainer(c, volumeMountName)
}
for i := range podSpec.Containers {
c := &podSpec.Containers[i]
addVolumeMountSubPathContainer(c, volumeMountName)
}
}
// DaemonFlags returns the command line flags used by all Ceph daemons.
func DaemonFlags(cluster *cephclient.ClusterInfo, spec *cephv1.ClusterSpec, daemonID string) []string {
flags := append(
config.DefaultFlags(cluster.FSID, keyring.VolumeMount().KeyringFilePath()),
config.NewFlag("id", daemonID),
// Ceph daemons in Rook will run as 'ceph' instead of 'root'
// If we run on a version of Ceph does not these flags it will simply ignore them
//run ceph daemon process under the 'ceph' user
config.NewFlag("setuser", "ceph"),
// run ceph daemon process under the 'ceph' group
config.NewFlag("setgroup", "ceph"),
)
if spec.Network.IPFamily == cephv1.IPv6 {
flags = append(flags, config.NewFlag("ms-bind-ipv6", "true"))
}
return flags
}
// AdminFlags returns the command line flags used for Ceph commands requiring admin authentication.
func AdminFlags(cluster *cephclient.ClusterInfo) []string {
return append(
config.DefaultFlags(cluster.FSID, keyring.VolumeMount().AdminKeyringFilePath()),
config.NewFlag("setuser", "ceph"),
config.NewFlag("setgroup", "ceph"),
)
}
// ContainerEnvVarReference returns a reference to a Kubernetes container env var of the given name
// which can be used in command or argument fields.
func ContainerEnvVarReference(envVarName string) string {
return fmt.Sprintf("$(%s)", envVarName)
}
// DaemonEnvVars returns the container environment variables used by all Ceph daemons.
func DaemonEnvVars(image string) []v1.EnvVar {
return append(
k8sutil.ClusterDaemonEnvVars(image),
config.StoredMonHostEnvVars()...,
)
}
// AppLabels returns labels common for all Rook-Ceph applications which may be useful for admins.
// App name is the name of the application: e.g., 'rook-ceph-mon', 'rook-ceph-mgr', etc.
func AppLabels(appName, namespace string) map[string]string {
return map[string]string{
k8sutil.AppAttr: appName,
k8sutil.ClusterAttr: namespace,
}
}
// CephDaemonAppLabels returns pod labels common to all Rook-Ceph pods which may be useful for admins.
// App name is the name of the application: e.g., 'rook-ceph-mon', 'rook-ceph-mgr', etc.
// Daemon type is the Ceph daemon type: "mon", "mgr", "osd", "mds", "rgw"
// Daemon ID is the ID portion of the Ceph daemon name: "a" for "mon.a"; "c" for "mds.c"
func CephDaemonAppLabels(appName, namespace, daemonType, daemonID string, includeNewLabels bool) map[string]string {
labels := AppLabels(appName, namespace)
// New labels cannot be applied to match selectors during upgrade
if includeNewLabels {
labels["ceph_daemon_type"] = daemonType
}
labels["ceph_daemon_id"] = daemonID
// Also report the daemon id keyed by its daemon type: "mon: a", "mds: c", etc.
labels[daemonType] = daemonID
return labels
}
// CheckPodMemory verify pod's memory limit is valid
func CheckPodMemory(name string, resources v1.ResourceRequirements, cephPodMinimumMemory uint64) error {
// Ceph related PR: https://github.com/ceph/ceph/pull/26856
podMemoryLimit := resources.Limits.Memory()
podMemoryRequest := resources.Requests.Memory()
// If nothing was provided let's just return
// This means no restrictions on pod's resources
if podMemoryLimit.IsZero() && podMemoryRequest.IsZero() {
return nil
}
if !podMemoryLimit.IsZero() {
// This means LIMIT and REQUEST are either identical or different but still we use LIMIT as a reference
if uint64(podMemoryLimit.Value()) < display.MbTob(cephPodMinimumMemory) {
// allow the configuration if less than the min, but print a warning
logger.Warningf("running the %q daemon(s) with %dmb of ram, but at least %dmb is recommended", name, display.BToMb(uint64(podMemoryLimit.Value())), cephPodMinimumMemory)
}
// This means LIMIT < REQUEST
// Kubernetes will refuse to schedule that pod however it's still valuable to indicate that user's input was incorrect
if uint64(podMemoryLimit.Value()) < uint64(podMemoryRequest.Value()) {
extraErrorLine := `\n
User has specified a pod memory limit %dmb below the pod memory request %dmb in the cluster CR.\n
Rook will create pods that are expected to fail to serve as a more apparent error indicator to the user.`
return errors.Errorf(extraErrorLine, display.BToMb(uint64(podMemoryLimit.Value())), display.BToMb(uint64(podMemoryRequest.Value())))
}
}
return nil
}
// ChownCephDataDirsInitContainer returns an init container which `chown`s the given data
// directories as the `ceph:ceph` user in the container. It also `chown`s the Ceph log dir in the
// container automatically.
// Doing a chown in a post start lifecycle hook does not reliably complete before the OSD
// process starts, which can cause the pod to fail without the lifecycle hook's chown command
// completing. It can take an arbitrarily long time for a pod restart to successfully chown the
// directory. This is a race condition for all daemons; therefore, do this in an init container.
// See more discussion here: https://github.com/rook/rook/pull/3594#discussion_r312279176
func ChownCephDataDirsInitContainer(
dpm config.DataPathMap,
containerImage string,
volumeMounts []v1.VolumeMount,
resources v1.ResourceRequirements,
securityContext *v1.SecurityContext,
) v1.Container {
args := make([]string, 0, 5)
args = append(args,
"--verbose",
"--recursive",
"ceph:ceph",
config.VarLogCephDir,
config.VarLibCephCrashDir,
)
if dpm.ContainerDataDir != "" {
args = append(args, dpm.ContainerDataDir)
}
return v1.Container{
Name: "chown-container-data-dir",
Command: []string{"chown"},
Args: args,
Image: containerImage,
VolumeMounts: volumeMounts,
Resources: resources,
SecurityContext: securityContext,
}
}
// GenerateMinimalCephConfInitContainer returns an init container that will generate the most basic
// Ceph config for connecting non-Ceph daemons to a Ceph cluster (e.g., nfs-ganesha). Effectively
// what this means is that it generates '/etc/ceph/ceph.conf' with 'mon_host' populated and a
// keyring path associated with the user given. 'mon_host' is determined by the 'ROOK_CEPH_MON_HOST'
// env var present in other Ceph daemon pods, and the keyring is expected to be mounted into the
// container with a Kubernetes pod volume+mount.
func GenerateMinimalCephConfInitContainer(
username, keyringPath string,
containerImage string,
volumeMounts []v1.VolumeMount,
resources v1.ResourceRequirements,
securityContext *v1.SecurityContext,
) v1.Container {
cfgPath := cephclient.DefaultConfigFilePath()
// Note that parameters like $(PARAM) will be replaced by Kubernetes with env var content before
// container creation.
confScript := `
set -xEeuo pipefail
cat << EOF > ` + cfgPath + `
[global]
mon_host = $(ROOK_CEPH_MON_HOST)
[` + username + `]
keyring = ` + keyringPath + `
EOF
chmod 444 ` + cfgPath + `
cat ` + cfgPath + `
`
return v1.Container{
Name: "generate-minimal-ceph-conf",
Command: []string{"/bin/bash", "-c", confScript},
Args: []string{},
Image: containerImage,
VolumeMounts: volumeMounts,
Env: config.StoredMonHostEnvVars(),
Resources: resources,
SecurityContext: securityContext,
}
}
// StoredLogAndCrashVolume returns a pod volume sourced from the stored log and crashes files.
func StoredLogAndCrashVolume(hostLogDir, hostCrashDir string) []v1.Volume {
return []v1.Volume{
{
Name: logVolumeName,
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: hostLogDir},
},
},
{
Name: crashVolumeName,
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{Path: hostCrashDir},
},
},
}
}
// StoredLogAndCrashVolumeMount returns a pod volume sourced from the stored log and crashes files.
func StoredLogAndCrashVolumeMount(varLogCephDir, varLibCephCrashDir string) []v1.VolumeMount {
return []v1.VolumeMount{
{
Name: logVolumeName,
ReadOnly: false,
MountPath: varLogCephDir,
},
{
Name: crashVolumeName,
ReadOnly: false,
MountPath: varLibCephCrashDir,
},
}
}
// GenerateLivenessProbeExecDaemon makes sure a daemon has a socket and that it can be called and returns 0
func GenerateLivenessProbeExecDaemon(daemonType, daemonID string) *v1.Probe {
confDaemon := getDaemonConfig(daemonType, daemonID)
initialDelaySeconds := initialDelaySecondsNonOSDDaemon
if daemonType == config.OsdType {
initialDelaySeconds = initialDelaySecondsOSDDaemon
}
return &v1.Probe{
Handler: v1.Handler{
Exec: &v1.ExecAction{
// Run with env -i to clean env variables in the exec context
// This avoids conflict with the CEPH_ARGS env
//
// Example:
// env -i sh -c "ceph --admin-daemon /run/ceph/ceph-osd.0.asok status"
Command: []string{
"env",
"-i",
"sh",
"-c",
fmt.Sprintf("ceph --admin-daemon %s %s", confDaemon.buildSocketPath(), confDaemon.buildAdminSocketCommand()),
},
},
},
InitialDelaySeconds: initialDelaySeconds,
}
}
func getDaemonConfig(daemonType, daemonID string) *daemonConfig {
return &daemonConfig{
daemonType: string(daemonType),
daemonID: daemonID,
}
}
func (c *daemonConfig) buildSocketName() string {
return fmt.Sprintf("ceph-%s.%s.asok", c.daemonType, c.daemonID)
}
func (c *daemonConfig) buildSocketPath() string {
return path.Join(daemonSocketDir, c.buildSocketName())
}
func (c *daemonConfig) buildAdminSocketCommand() string {
command := "status"
if c.daemonType == config.MonType {
command = "mon_status"
}
return command
}
// PodSecurityContext detects if the pod needs privileges to run
func PodSecurityContext() *v1.SecurityContext {
privileged := false
if os.Getenv("ROOK_HOSTPATH_REQUIRES_PRIVILEGED") == "true" {
privileged = true
}
return &v1.SecurityContext{
Privileged: &privileged,
}
}
// LogCollectorContainer runs a cron job to rotate logs
func LogCollectorContainer(daemonID, ns string, c cephv1.ClusterSpec) *v1.Container {
return &v1.Container{
Name: logCollectorContainerName(daemonID),
Command: []string{
"/bin/bash",
"-c",
fmt.Sprintf(cronLogRotate, daemonID, c.LogCollector.Periodicity),
},
Image: c.CephVersion.Image,
VolumeMounts: DaemonVolumeMounts(config.NewDatalessDaemonDataPathMap(ns, c.DataDirHostPath), ""),
SecurityContext: PodSecurityContext(),
Resources: cephv1.GetLogCollectorResources(c.Resources),
}
}
func logCollectorContainerName(daemon string) string {
return fmt.Sprintf("%s-%s", strings.Replace(daemon, ".", "-", -1), logCollector)
}
|
[
"\"ROOK_HOSTPATH_REQUIRES_PRIVILEGED\""
] |
[] |
[
"ROOK_HOSTPATH_REQUIRES_PRIVILEGED"
] |
[]
|
["ROOK_HOSTPATH_REQUIRES_PRIVILEGED"]
|
go
| 1 | 0 | |
internal/provider/resource_signalwire_sip_endpoint_test.go
|
package provider
import (
"errors"
"fmt"
"os"
"testing"
"github.com/hashicorp/terraform-plugin-sdk/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/terraform"
)
func TestAccSignalwireSipEndpoint_basic(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckSignalwireSipEndpointDestroy,
Steps: []resource.TestStep{
{
Config: testAccSignalwireSipEndpointConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckSignalwireSipEndpointExists("signalwire_sip_endpoint.test_endpoint"),
),
},
},
})
}
func testAccCheckSignalwireSipEndpointDestroy(s *terraform.State) error {
client := testAccProvider.Meta().(*Client)
for _, rs := range s.RootModule().Resources {
if rs.Type != "signalwire_sip_endpoint" {
continue
}
resp, err := client.Req("GET", os.Getenv("SIGNALWIRE_SPACE"), "endpoints/sip/", nil)
if err != nil {
return err
}
endpoints := resp["data"].([]interface{})
if len(endpoints) > 0 {
return errors.New("Endpoints still exists")
}
return nil
}
return nil
}
func testAccCheckSignalwireSipEndpointExists(n string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return errors.New("No key ID is set")
}
client := testAccProvider.Meta().(*Client)
resp, err := client.Req("GET", os.Getenv("SIGNALWIRE_SPACE"), "endpoints/sip/"+rs.Primary.ID, nil)
if err != nil {
return err
}
if resp["id"] != rs.Primary.ID {
return errors.New("SIP Endpoint does not match")
}
return nil
}
}
var testAccSignalwireSipEndpointConfig = fmt.Sprintf(`
resource "signalwire_sip_endpoint" "test_endpoint" {
space = "%[1]s"
username = "c3p0"
password = "password"
caller_id = "C-3P0"
ciphers = [
"AEAD_AES_256_GCM_8",
"AES_256_CM_HMAC_SHA1_80",
"AES_CM_128_HMAC_SHA1_80",
"AES_256_CM_HMAC_SHA1_32",
"AES_CM_128_HMAC_SHA1_32"
]
codecs = [
"OPUS",
"G722",
"PCMU",
"PCMA",
"VP8",
"H264"
]
encryption = "optional"
}
`, testSignalwireSpace)
|
[
"\"SIGNALWIRE_SPACE\"",
"\"SIGNALWIRE_SPACE\""
] |
[] |
[
"SIGNALWIRE_SPACE"
] |
[]
|
["SIGNALWIRE_SPACE"]
|
go
| 1 | 0 | |
hbase-it/src/test/java/org/apache/hadoop/hbase/HBaseClusterManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import java.io.File;
import java.io.IOException;
import java.util.Locale;
import java.util.Map;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.hbase.HBaseClusterManager.CommandProvider.Operation;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.RetryCounter;
import org.apache.hadoop.hbase.util.RetryCounter.RetryConfig;
import org.apache.hadoop.hbase.util.RetryCounterFactory;
import org.apache.hadoop.util.Shell;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A default cluster manager for HBase. Uses SSH, and hbase shell scripts
* to manage the cluster. Assumes Unix-like commands are available like 'ps',
* 'kill', etc. Also assumes the user running the test has enough "power" to start & stop
* servers on the remote machines (for example, the test user could be the same user as the
* user the daemon is running as)
*/
@InterfaceAudience.Private
public class HBaseClusterManager extends Configured implements ClusterManager {
private static final String SIGKILL = "SIGKILL";
private static final String SIGSTOP = "SIGSTOP";
private static final String SIGCONT = "SIGCONT";
protected static final Logger LOG = LoggerFactory.getLogger(HBaseClusterManager.class);
private String sshUserName;
private String sshOptions;
/**
* The command format that is used to execute the remote command. Arguments:
* 1 SSH options, 2 user name , 3 "@" if username is set, 4 host,
* 5 original command, 6 service user.
*/
private static final String DEFAULT_TUNNEL_CMD =
"timeout 30 /usr/bin/ssh %1$s %2$s%3$s%4$s \"sudo -u %6$s %5$s\"";
private String tunnelCmd;
private static final String RETRY_ATTEMPTS_KEY = "hbase.it.clustermanager.retry.attempts";
private static final int DEFAULT_RETRY_ATTEMPTS = 5;
private static final String RETRY_SLEEP_INTERVAL_KEY = "hbase.it.clustermanager.retry.sleep.interval";
private static final int DEFAULT_RETRY_SLEEP_INTERVAL = 1000;
protected RetryCounterFactory retryCounterFactory;
@Override
public void setConf(Configuration conf) {
super.setConf(conf);
if (conf == null) {
// Configured gets passed null before real conf. Why? I don't know.
return;
}
sshUserName = conf.get("hbase.it.clustermanager.ssh.user", "");
String extraSshOptions = conf.get("hbase.it.clustermanager.ssh.opts", "");
sshOptions = System.getenv("HBASE_SSH_OPTS");
if (!extraSshOptions.isEmpty()) {
sshOptions = StringUtils.join(new Object[] { sshOptions, extraSshOptions }, " ");
}
sshOptions = (sshOptions == null) ? "" : sshOptions;
sshUserName = (sshUserName == null) ? "" : sshUserName;
tunnelCmd = conf.get("hbase.it.clustermanager.ssh.cmd", DEFAULT_TUNNEL_CMD);
// Print out ssh special config if any.
if ((sshUserName != null && sshUserName.length() > 0) ||
(sshOptions != null && sshOptions.length() > 0)) {
LOG.info("Running with SSH user [" + sshUserName + "] and options [" + sshOptions + "]");
}
this.retryCounterFactory = new RetryCounterFactory(new RetryConfig()
.setMaxAttempts(conf.getInt(RETRY_ATTEMPTS_KEY, DEFAULT_RETRY_ATTEMPTS))
.setSleepInterval(conf.getLong(RETRY_SLEEP_INTERVAL_KEY, DEFAULT_RETRY_SLEEP_INTERVAL)));
}
private String getServiceUser(ServiceType service) {
Configuration conf = getConf();
switch (service) {
case HADOOP_DATANODE:
case HADOOP_NAMENODE:
return conf.get("hbase.it.clustermanager.hadoop.hdfs.user", "hdfs");
case ZOOKEEPER_SERVER:
return conf.get("hbase.it.clustermanager.zookeeper.user", "zookeeper");
default:
return conf.get("hbase.it.clustermanager.hbase.user", "hbase");
}
}
/**
* Executes commands over SSH
*/
protected class RemoteShell extends Shell.ShellCommandExecutor {
private String hostname;
private String user;
public RemoteShell(String hostname, String[] execString, File dir, Map<String, String> env,
long timeout) {
super(execString, dir, env, timeout);
this.hostname = hostname;
}
public RemoteShell(String hostname, String[] execString, File dir, Map<String, String> env) {
super(execString, dir, env);
this.hostname = hostname;
}
public RemoteShell(String hostname, String[] execString, File dir) {
super(execString, dir);
this.hostname = hostname;
}
public RemoteShell(String hostname, String[] execString) {
super(execString);
this.hostname = hostname;
}
public RemoteShell(String hostname, String user, String[] execString) {
super(execString);
this.hostname = hostname;
this.user = user;
}
@Override
public String[] getExecString() {
String at = sshUserName.isEmpty() ? "" : "@";
String remoteCmd = StringUtils.join(super.getExecString(), " ");
String cmd = String.format(tunnelCmd, sshOptions, sshUserName, at, hostname, remoteCmd, user);
LOG.info("Executing full command [" + cmd + "]");
return new String[] { "/usr/bin/env", "bash", "-c", cmd };
}
@Override
public void execute() throws IOException {
super.execute();
}
}
/**
* Provides command strings for services to be executed by Shell. CommandProviders are
* pluggable, and different deployments(windows, bigtop, etc) can be managed by
* plugging-in custom CommandProvider's or ClusterManager's.
*/
static abstract class CommandProvider {
enum Operation {
START, STOP, RESTART
}
public abstract String getCommand(ServiceType service, Operation op);
public String isRunningCommand(ServiceType service) {
return findPidCommand(service);
}
protected String findPidCommand(ServiceType service) {
return String.format("ps ux | grep proc_%s | grep -v grep | tr -s ' ' | cut -d ' ' -f2",
service);
}
public String signalCommand(ServiceType service, String signal) {
return String.format("%s | xargs kill -s %s", findPidCommand(service), signal);
}
}
/**
* CommandProvider to manage the service using bin/hbase-* scripts
*/
static class HBaseShellCommandProvider extends CommandProvider {
private final String hbaseHome;
private final String confDir;
HBaseShellCommandProvider(Configuration conf) {
hbaseHome = conf.get("hbase.it.clustermanager.hbase.home",
System.getenv("HBASE_HOME"));
String tmp = conf.get("hbase.it.clustermanager.hbase.conf.dir",
System.getenv("HBASE_CONF_DIR"));
if (tmp != null) {
confDir = String.format("--config %s", tmp);
} else {
confDir = "";
}
}
@Override
public String getCommand(ServiceType service, Operation op) {
return String.format("%s/bin/hbase-daemon.sh %s %s %s", hbaseHome, confDir,
op.toString().toLowerCase(Locale.ROOT), service);
}
}
/**
* CommandProvider to manage the service using sbin/hadoop-* scripts.
*/
static class HadoopShellCommandProvider extends CommandProvider {
private final String hadoopHome;
private final String confDir;
HadoopShellCommandProvider(Configuration conf) throws IOException {
hadoopHome = conf.get("hbase.it.clustermanager.hadoop.home",
System.getenv("HADOOP_HOME"));
String tmp = conf.get("hbase.it.clustermanager.hadoop.conf.dir",
System.getenv("HADOOP_CONF_DIR"));
if (hadoopHome == null) {
throw new IOException("Hadoop home configuration parameter i.e. " +
"'hbase.it.clustermanager.hadoop.home' is not configured properly.");
}
if (tmp != null) {
confDir = String.format("--config %s", tmp);
} else {
confDir = "";
}
}
@Override
public String getCommand(ServiceType service, Operation op) {
return String.format("%s/sbin/hadoop-daemon.sh %s %s %s", hadoopHome, confDir,
op.toString().toLowerCase(Locale.ROOT), service);
}
}
/**
* CommandProvider to manage the service using bin/zk* scripts.
*/
static class ZookeeperShellCommandProvider extends CommandProvider {
private final String zookeeperHome;
private final String confDir;
ZookeeperShellCommandProvider(Configuration conf) throws IOException {
zookeeperHome = conf.get("hbase.it.clustermanager.zookeeper.home",
System.getenv("ZOOBINDIR"));
String tmp = conf.get("hbase.it.clustermanager.zookeeper.conf.dir",
System.getenv("ZOOCFGDIR"));
if (zookeeperHome == null) {
throw new IOException("ZooKeeper home configuration parameter i.e. " +
"'hbase.it.clustermanager.zookeeper.home' is not configured properly.");
}
if (tmp != null) {
confDir = String.format("--config %s", tmp);
} else {
confDir = "";
}
}
@Override
public String getCommand(ServiceType service, Operation op) {
return String.format("%s/bin/zkServer.sh %s", zookeeperHome, op.toString().toLowerCase(Locale.ROOT));
}
@Override
protected String findPidCommand(ServiceType service) {
return String.format("ps ux | grep %s | grep -v grep | tr -s ' ' | cut -d ' ' -f2",
service);
}
}
public HBaseClusterManager() {
}
protected CommandProvider getCommandProvider(ServiceType service) throws IOException {
switch (service) {
case HADOOP_DATANODE:
case HADOOP_NAMENODE:
return new HadoopShellCommandProvider(getConf());
case ZOOKEEPER_SERVER:
return new ZookeeperShellCommandProvider(getConf());
default:
return new HBaseShellCommandProvider(getConf());
}
}
/**
* Execute the given command on the host using SSH
* @return pair of exit code and command output
* @throws IOException if something goes wrong.
*/
private Pair<Integer, String> exec(String hostname, ServiceType service, String... cmd)
throws IOException {
LOG.info("Executing remote command: " + StringUtils.join(cmd, " ") + " , hostname:" + hostname);
RemoteShell shell = new RemoteShell(hostname, getServiceUser(service), cmd);
try {
shell.execute();
} catch (Shell.ExitCodeException ex) {
// capture the stdout of the process as well.
String output = shell.getOutput();
// add output for the ExitCodeException.
throw new Shell.ExitCodeException(ex.getExitCode(), "stderr: " + ex.getMessage()
+ ", stdout: " + output);
}
LOG.info("Executed remote command, exit code:" + shell.getExitCode()
+ " , output:" + shell.getOutput());
return new Pair<>(shell.getExitCode(), shell.getOutput());
}
private Pair<Integer, String> execWithRetries(String hostname, ServiceType service, String... cmd)
throws IOException {
RetryCounter retryCounter = retryCounterFactory.create();
while (true) {
try {
return exec(hostname, service, cmd);
} catch (IOException e) {
retryOrThrow(retryCounter, e, hostname, cmd);
}
try {
retryCounter.sleepUntilNextRetry();
} catch (InterruptedException ex) {
// ignore
LOG.warn("Sleep Interrupted:" + ex);
}
}
}
private <E extends Exception> void retryOrThrow(RetryCounter retryCounter, E ex,
String hostname, String[] cmd) throws E {
if (retryCounter.shouldRetry()) {
LOG.warn("Remote command: " + StringUtils.join(cmd, " ") + " , hostname:" + hostname
+ " failed at attempt " + retryCounter.getAttemptTimes() + ". Retrying until maxAttempts: "
+ retryCounter.getMaxAttempts() + ". Exception: " + ex.getMessage());
return;
}
throw ex;
}
private void exec(String hostname, ServiceType service, Operation op) throws IOException {
execWithRetries(hostname, service, getCommandProvider(service).getCommand(service, op));
}
@Override
public void start(ServiceType service, String hostname, int port) throws IOException {
exec(hostname, service, Operation.START);
}
@Override
public void stop(ServiceType service, String hostname, int port) throws IOException {
exec(hostname, service, Operation.STOP);
}
@Override
public void restart(ServiceType service, String hostname, int port) throws IOException {
exec(hostname, service, Operation.RESTART);
}
public void signal(ServiceType service, String signal, String hostname) throws IOException {
execWithRetries(hostname, service, getCommandProvider(service).signalCommand(service, signal));
}
@Override
public boolean isRunning(ServiceType service, String hostname, int port) throws IOException {
String ret = execWithRetries(hostname, service,
getCommandProvider(service).isRunningCommand(service)).getSecond();
return ret.length() > 0;
}
@Override
public void kill(ServiceType service, String hostname, int port) throws IOException {
signal(service, SIGKILL, hostname);
}
@Override
public void suspend(ServiceType service, String hostname, int port) throws IOException {
signal(service, SIGSTOP, hostname);
}
@Override
public void resume(ServiceType service, String hostname, int port) throws IOException {
signal(service, SIGCONT, hostname);
}
}
|
[
"\"HBASE_SSH_OPTS\"",
"\"HBASE_HOME\"",
"\"HBASE_CONF_DIR\"",
"\"HADOOP_HOME\"",
"\"HADOOP_CONF_DIR\"",
"\"ZOOBINDIR\"",
"\"ZOOCFGDIR\""
] |
[] |
[
"ZOOCFGDIR",
"HBASE_CONF_DIR",
"HADOOP_CONF_DIR",
"HBASE_SSH_OPTS",
"HADOOP_HOME",
"HBASE_HOME",
"ZOOBINDIR"
] |
[]
|
["ZOOCFGDIR", "HBASE_CONF_DIR", "HADOOP_CONF_DIR", "HBASE_SSH_OPTS", "HADOOP_HOME", "HBASE_HOME", "ZOOBINDIR"]
|
java
| 7 | 0 | |
models/AI-Model-Zoo/VAI-1.3-Model-Zoo-Code/PyTorch/pt_facereid-large_96_96_515M_1.3/code/test.py
|
# Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import logging
import os
import sys
if os.environ["W_QUANT"]=='1':
import pytorch_nndct
from pytorch_nndct.apis import torch_quantizer, dump_xmodel
import torch
from torch import nn
import network
from core.config import opt, update_config
from core.loader import get_data_provider
from core.solver import Solver
from ipdb import set_trace
FORMAT = '[%(levelname)s]: %(message)s'
logging.basicConfig(
level=logging.INFO,
format=FORMAT,
stream=sys.stdout
)
def test(args):
logging.info('======= args ======')
logging.info(print(args))
logging.info('======= end ======')
_, test_data, num_query, num_class = get_data_provider(opt, args.dataset, args.dataset_root)
net = getattr(network, opt.network.name)(num_class, opt.network.last_stride)
checkpoint = torch.load(args.load_model, map_location=opt.device)
if args.load_model[-3:] == 'tar':
checkpoint = torch.load(args.load_model)['state_dict']
for i in checkpoint:
if 'classifier' in i or 'fc' in i:
continue
net.state_dict()[i].copy_(checkpoint[i])
else:
net = net.load_state_dict(torch.load(args.load_model))
logging.info('Load model checkpoint: {}'.format(args.load_model))
if args.device == 'gpu' and args.quant_mode=='float':
net = nn.DataParallel(net).to(opt.device)
net = net.to(opt.device)
resize_wh = opt.aug.resize_size
x = torch.randn(1,3,80,80).cuda()
x = torch.randn(1,3,resize_wh[0],resize_wh[1]).to(opt.device)
if args.quant_mode == 'float':
quant_model = net
else:
quantizer = torch_quantizer(args.quant_mode, net, (x), output_dir=args.output_path, device=opt.device)
quant_model = quantizer.quant_model.to(opt.device)
quant_model.eval()
mod = Solver(opt, quant_model)
mod.test_func(test_data, num_query)
if args.quant_mode == 'calib':
quantizer.export_quant_config()
if args.quant_mode == 'test' and args.dump_xmodel:
dump_xmodel(output_dir=args.output_path, deploy_check=True)
def main():
parser = argparse.ArgumentParser(description='reid model testing')
parser.add_argument('--dataset', type=str, default = 'facereid',
help = 'set the dataset for test')
parser.add_argument('--dataset_root', type=str, default = '../data/face_reid',
help = 'dataset path')
parser.add_argument('--config_file', type=str, required=True,
help='Optional config file for params')
parser.add_argument('--load_model', type=str, required=True,
help='load trained model for testing')
parser.add_argument('--device', type=str, default='gpu', choices=['gpu','cpu'],
help='set running device')
parser.add_argument('--quant_mode', default='calib', choices=['float', 'calib', 'test'],
help='quantization mode. 0: no quantization, evaluate float model, calib: quantize, test: evaluate quantized model')
parser.add_argument('--dump_xmodel', dest='dump_xmodel', action='store_true',
help='dump xmodel after test')
parser.add_argument('--batch_size',default=32, type=int)
parser.add_argument('--output_path', default='quantize_result')
args = parser.parse_args()
update_config(args.config_file)
if args.dump_xmodel:
args.batch_size=1
args.device='cpu'
opt.test.batch_size = args.batch_size
if args.device=='gpu':
opt.device = torch.device('cuda')
else:
opt.device = torch.device('cpu')
test(args)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"W_QUANT"
] |
[]
|
["W_QUANT"]
|
python
| 1 | 0 | |
rulemgt/src/main/java/org/onap/holmes/rulemgt/RuleActiveApp.java
|
/**
* Copyright 2017-2020 ZTE Corporation.
* <p>
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.onap.holmes.rulemgt;
import io.dropwizard.setup.Environment;
import org.onap.holmes.common.config.MicroServiceConfig;
import org.onap.holmes.common.dropwizard.ioc.bundle.IOCApplication;
import org.onap.holmes.common.utils.CommonUtils;
import org.onap.holmes.common.utils.transactionid.TransactionIdFilter;
import org.onap.holmes.rulemgt.dcae.DcaeConfigurationPolling;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import javax.servlet.DispatcherType;
import java.util.EnumSet;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
public class RuleActiveApp extends IOCApplication<RuleAppConfig> {
public static void main(String[] args) throws Exception {
new RuleActiveApp().run(args);
}
@Override
public void run(RuleAppConfig configuration, Environment environment) throws Exception {
super.run(configuration, environment);
if (!"1".equals(System.getenv("TESTING"))) {
ScheduledExecutorService service = Executors.newSingleThreadScheduledExecutor();
service.scheduleAtFixedRate(
new DcaeConfigurationPolling(CommonUtils.getEnv(MicroServiceConfig.HOSTNAME)), 0,
DcaeConfigurationPolling.POLLING_PERIOD, TimeUnit.MILLISECONDS);
}
environment.servlets().addFilter("customFilter", new TransactionIdFilter()).addMappingForUrlPatterns(EnumSet
.allOf(DispatcherType.class), true, "/*");
}
}
|
[
"\"TESTING\""
] |
[] |
[
"TESTING"
] |
[]
|
["TESTING"]
|
java
| 1 | 0 | |
pbx_gs_python_utils/utils/Assert.py
|
import re
from pbx_gs_python_utils.utils.Misc import Misc
class Assert:
def __init__(self ,target):
self.target = target
def is_class(self, name):
assert Misc.class_name(self.target) in name
def contains(self, text):
assert text in self.target
def field_is_equal(self, field_name, expected_value=None):
field_value = self.target.get(field_name)
assert field_value == expected_value , "{0} != {1}".format(field_value, expected_value)
return self
def is_bigger_than(self, value):
if type(self.target) is list:
list_len = len(self.target)
assert list_len > value , "array with len {0} was not bigger than {1}".format(list_len, value)
else:
assert self.target > value , "value {0} was not bigger than {1}".format(self.target, value)
return self
def is_smaller_than(self, value):
if type(self.target) is list:
list_len = len(self.target)
assert list_len < value , "array with len {0} was not smaller than {1}".format(list_len, value)
else:
assert self.target < value , "value {0} was not smaller than {1}".format(self.target, value)
return self
def is_equal(self, to):
assert self.target == to
def match_regex(self, regex):
assert re.compile(regex).match(self.target) is not None
def size_is(self, to):
assert len(self.target) == to
def regex_not_match(self,regex):
assert re.compile(regex).match(self.target) is None
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
termine-be/config/config.py
|
import json
import os
import pytz
def _bool_convert(value):
truthy = {"t", "true", "on", "y", "yes", "1", 1, 1.0, True}
falsy = {"f", "false", "off", "n", "no", "0", 0, 0.0, False}
if isinstance(value, str):
value = value.lower()
if value in truthy:
return True
if value in falsy:
return False
return bool(value)
class Db:
if os.environ.get('DB_USERNAME') is not None:
db_username = os.environ.get('DB_USERNAME')
db_password = os.environ.get('DB_PASSWORD')
db_port = os.environ.get('DB_PORT', '5432')
db_host = os.environ.get('DB_HOST', 'localhost')
db_database = os.environ.get('DB_DATABASE', 'termine')
url = f"postgresql://{db_username}:{db_password}@{db_host}:{db_port}/{db_database}"
else:
url = os.environ.get(
"DB_URL", 'postgresql://postgres:example@localhost:5432/termine')
class Settings:
claim_timeout_min = int(os.environ.get("CLAIM_TIMEOUT_MIN", 5))
num_display_slots = int(os.environ.get("DISPLAY_SLOTS_COUNT", 150))
tz = pytz.timezone(os.environ.get("TERMINE_TIME_ZONE", 'Europe/Berlin'))
disable_auth_for_booking = _bool_convert(
os.environ.get("DISABLE_AUTH", False))
use_ldap = _bool_convert(os.environ.get("USE_LDAP", False))
jwt_key = os.environ.get("JWT_SECRET_KEY", "")
class Ldap:
url = os.environ.get("LDAP_URL", "")
user_dn = os.environ.get("LDAP_SYSTEM_DN", "")
user_pw = os.environ.get("LDAP_SYSTEM_USER_PW", "")
user_coupon_number = int(os.environ.get("LDAP_USER_COUPONS", 3))
search_base = os.environ.get("LDAP_SEARCH_BASE", "")
search_filter = os.environ.get("LDAP_SEARCH_FILTER", "")
search_attribute = os.environ.get("LDAP_ATTRIBUTE", "")
use_tls = _bool_convert(os.environ.get("LDAP_USE_TLS", False))
port = int(os.environ.get("LDAP_PORT", 389))
tls_port = int(os.environ.get("LDAP_TLS_PORT", 636))
class FrontendSettings:
_inst = None
@classmethod
def by_env(cls):
env_name = os.environ.get("ENVIRONMENT", "local")
with open(os.path.join("config", 'by_env', f'{env_name}.json')) as file:
frontend_conf = json.load(file)
return frontend_conf
@classmethod
def instance_by_env(cls):
if not cls._inst:
cls._inst = cls.by_env()
return cls._inst
@classmethod
def json_by_env(cls):
return json.dumps(cls.instance_by_env())
seed = os.environ.get("PASSWORD_HASH_SEED_DO_NOT_CHANGE", 'Wir sind SEEED')
|
[] |
[] |
[
"DB_HOST",
"DB_USERNAME",
"DB_PORT",
"LDAP_SEARCH_BASE",
"DISPLAY_SLOTS_COUNT",
"LDAP_ATTRIBUTE",
"DB_URL",
"JWT_SECRET_KEY",
"LDAP_USER_COUPONS",
"PASSWORD_HASH_SEED_DO_NOT_CHANGE",
"CLAIM_TIMEOUT_MIN",
"LDAP_TLS_PORT",
"ENVIRONMENT",
"LDAP_SYSTEM_USER_PW",
"LDAP_SEARCH_FILTER",
"LDAP_PORT",
"USE_LDAP",
"LDAP_URL",
"DB_PASSWORD",
"DB_DATABASE",
"LDAP_SYSTEM_DN",
"LDAP_USE_TLS",
"DISABLE_AUTH",
"TERMINE_TIME_ZONE"
] |
[]
|
["DB_HOST", "DB_USERNAME", "DB_PORT", "LDAP_SEARCH_BASE", "DISPLAY_SLOTS_COUNT", "LDAP_ATTRIBUTE", "DB_URL", "JWT_SECRET_KEY", "LDAP_USER_COUPONS", "PASSWORD_HASH_SEED_DO_NOT_CHANGE", "CLAIM_TIMEOUT_MIN", "LDAP_TLS_PORT", "ENVIRONMENT", "LDAP_SYSTEM_USER_PW", "LDAP_SEARCH_FILTER", "LDAP_PORT", "USE_LDAP", "LDAP_URL", "DB_PASSWORD", "DB_DATABASE", "LDAP_SYSTEM_DN", "LDAP_USE_TLS", "DISABLE_AUTH", "TERMINE_TIME_ZONE"]
|
python
| 24 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.