filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
db/sql/sql.go | // Copyright (C) 2015 NTT Innovation Institute, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sql
import (
"context"
"database/sql"
"encoding/json"
"fmt"
"os"
"strconv"
"strings"
"time"
"github.com/cloudwan/gohan/db/options"
"github.com/cloudwan/gohan/db/pagination"
"github.com/cloudwan/gohan/db/transaction"
"github.com/cloudwan/gohan/extension/goext"
"github.com/cloudwan/gohan/metrics"
"github.com/cloudwan/gohan/schema"
"github.com/cloudwan/gohan/util"
"github.com/jmoiron/sqlx"
sq "github.com/lann/squirrel"
// Import mysql lib
_ "github.com/go-sql-driver/mysql"
// Import go-sqlite3 lib
_ "github.com/mattn/go-sqlite3"
// Import go-fakedb lib
_ "github.com/nati/go-fakedb"
"github.com/pkg/errors"
)
const retryDB = 50
const retryDBWait = 10
const (
configVersionColumnName = "config_version"
stateVersionColumnName = "state_version"
stateErrorColumnName = "state_error"
stateColumnName = "state"
stateMonitoringColumnName = "state_monitoring"
)
//DB is sql implementation of DB
type DB struct {
sqlType, connectionString string
handlers map[string]propertyHandler
DB *sqlx.DB
// options
options options.Options
}
//Transaction is sql implementation of Transaction
type Transaction struct {
transaction *sqlx.Tx
db *DB
closed bool
isolationLevel transaction.Type
}
type TxInterface transaction.Transaction
func mapTxOptions(options *transaction.TxOptions) (*sql.TxOptions, error) {
sqlOptions := &sql.TxOptions{}
switch options.IsolationLevel {
case transaction.ReadCommited:
sqlOptions.Isolation = sql.LevelReadCommitted
case transaction.ReadUncommitted:
sqlOptions.Isolation = sql.LevelReadUncommitted
case transaction.RepeatableRead:
sqlOptions.Isolation = sql.LevelRepeatableRead
case transaction.Serializable:
sqlOptions.Isolation = sql.LevelSerializable
default:
msg := fmt.Sprintf("Unknown transaction isolation level: %s", options.IsolationLevel)
log.Error(msg)
return nil, fmt.Errorf(msg)
}
return sqlOptions, nil
}
//NewDB constructor
func NewDB(options options.Options) *DB {
handlers := make(map[string]propertyHandler)
//TODO(nati) dynamic configuration
handlers["string"] = &stringHandler{}
handlers["number"] = &numberHandler{}
handlers["integer"] = &integerHandler{}
handlers["object"] = &jsonHandler{}
handlers["array"] = &jsonHandler{}
handlers["boolean"] = &boolHandler{}
return &DB{handlers: handlers, options: options}
}
//Options returns DB options
func (db *DB) Options() options.Options {
return db.options
}
//propertyHandler for each propertys
type propertyHandler interface {
encode(*schema.Property, interface{}) (interface{}, error)
decode(*schema.Property, interface{}) (interface{}, error)
dataType(*schema.Property) string
}
type defaultHandler struct {
}
func (handler *defaultHandler) encode(property *schema.Property, data interface{}) (interface{}, error) {
return data, nil
}
func (handler *defaultHandler) decode(property *schema.Property, data interface{}) (interface{}, error) {
return data, nil
}
func (handler *defaultHandler) dataType(property *schema.Property) (res string) {
// TODO(marcin) extend types for schema. Here is pretty ugly guessing
if property.ID == "id" || property.Relation != "" || property.Unique {
res = "varchar(255)"
} else {
res = "text"
}
return
}
type stringHandler struct {
defaultHandler
}
func (handler *stringHandler) encode(property *schema.Property, data interface{}) (interface{}, error) {
switch t := data.(type) {
case goext.MaybeString:
if t.HasValue() {
return t.Value, nil
}
return nil, nil
}
return data, nil
}
func (handler *stringHandler) decode(property *schema.Property, data interface{}) (interface{}, error) {
if bytes, ok := data.([]byte); ok {
return string(bytes), nil
}
return data, nil
}
type boolHandler struct{}
func (handler *boolHandler) encode(property *schema.Property, data interface{}) (interface{}, error) {
switch t := data.(type) {
case goext.MaybeBool:
if t.HasValue() {
return t.Value, nil
}
return nil, nil
}
return data, nil
}
func (handler *boolHandler) decode(property *schema.Property, data interface{}) (res interface{}, err error) {
// different SQL drivers encode result with different type
// so we need to do manual checks
if data == nil {
return nil, nil
}
switch t := data.(type) {
default:
err = fmt.Errorf("unknown type %T", t)
return
case []uint8: // mysql
res, err = strconv.ParseUint(string(t), 10, 64)
res = res.(uint64) != 0
case int64: //apparently also mysql
res = data.(int64) != 0
case bool: // sqlite3
res = data
}
return
}
func (handler *boolHandler) dataType(property *schema.Property) string {
return "boolean"
}
type numberHandler struct{}
func (handler *numberHandler) encode(property *schema.Property, data interface{}) (interface{}, error) {
return data, nil
}
func (handler *numberHandler) decode(property *schema.Property, data interface{}) (res interface{}, err error) {
if data == nil {
return nil, nil
}
switch t := data.(type) {
default:
return nil, fmt.Errorf("number: unknown type %T", t)
case []uint8: // mysql
res, _ = strconv.ParseFloat(string(t), 64)
case float64: // sqlite3
res = float64(t)
case uint64: // sqlite3
res = float64(t)
case goext.MaybeFloat:
if t.HasValue() {
res = t.Value
} else {
res = nil
}
}
return
}
func (handler *numberHandler) dataType(property *schema.Property) string {
return "real"
}
type integerHandler struct{}
func (handler *integerHandler) encode(property *schema.Property, data interface{}) (interface{}, error) {
switch t := data.(type) {
case goext.MaybeInt:
if t.HasValue() {
return t.Value, nil
}
return nil, nil
}
return data, nil
}
func (handler *integerHandler) decode(property *schema.Property, data interface{}) (res interface{}, err error) {
// different SQL drivers encode result with different type
// so we need to do manual checks
if data == nil {
return nil, nil
}
switch t := data.(type) {
default:
return data, nil
case []uint8: // mysql
res, _ = strconv.ParseInt(string(t), 10, 64)
res = int(res.(int64))
case int64: // sqlite3
res = int(t)
}
return
}
func (handler *integerHandler) dataType(property *schema.Property) string {
return "numeric"
}
type jsonHandler struct {
}
func (handler *jsonHandler) encode(property *schema.Property, data interface{}) (interface{}, error) {
bytes, err := json.Marshal(data)
//TODO(nati) should handle encoding err
if err != nil {
return nil, err
}
return string(bytes), nil
}
func (handler *jsonHandler) decode(property *schema.Property, data interface{}) (interface{}, error) {
if bytes, ok := data.([]byte); ok {
var ret interface{}
err := json.Unmarshal(bytes, &ret)
return ret, err
}
return data, nil
}
func (handler *jsonHandler) dataType(property *schema.Property) string {
return "text"
}
func quote(str string) string {
return fmt.Sprintf("`%s`", str)
}
func foreignKeyName(fromTable, fromProperty, toTable, toProperty string) string {
name := fmt.Sprintf("%s_%s_%s_%s", fromTable, fromProperty, toTable, toProperty)
if len(name) > 64 {
diff := len(name) - 64
return name[diff:]
}
return name
}
func (db *DB) measureTime(timeStarted time.Time, action string) {
metrics.UpdateTimer(timeStarted, "db.%s", action)
}
func (db *DB) updateCounter(delta int64, counter string) {
metrics.UpdateCounter(delta, "db.%s", counter)
}
//Connect connects to the db
func (db *DB) Connect(sqlType, conn string, maxOpenConn int) (err error) {
defer db.measureTime(time.Now(), "connect")
db.sqlType = sqlType
db.connectionString = conn
rawDB, err := sql.Open(db.sqlType, db.connectionString)
if err != nil {
return err
}
rawDB.SetMaxOpenConns(maxOpenConn)
rawDB.SetMaxIdleConns(maxOpenConn)
db.DB = sqlx.NewDb(rawDB, db.sqlType)
if db.sqlType == "sqlite3" {
db.DB.Exec("PRAGMA foreign_keys = ON;")
}
for i := 0; i < retryDB; i++ {
err = db.DB.Ping()
if err == nil {
return nil
}
time.Sleep(retryDBWait * time.Second)
log.Info("Retrying db connection... (%s)", err)
}
return fmt.Errorf("Failed to connect db")
}
// Close closes db connection
func (db *DB) Close() {
defer db.measureTime(time.Now(), "close")
db.DB.Close()
}
//Begin starts new transaction
func (db *DB) Begin() (tx transaction.Transaction, err error) {
defer db.measureTime(time.Now(), "begin")
db.updateCounter(1, "begin.waiting")
defer db.updateCounter(-1, "begin.waiting")
var transx Transaction
rawTx, err := db.DB.Beginx()
if err != nil {
db.updateCounter(1, "begin.failed")
return nil, err
}
db.updateCounter(1, "active")
if db.sqlType == "sqlite3" {
rawTx.Exec("PRAGMA foreign_keys = ON;")
}
transx = Transaction{
db: db,
transaction: rawTx,
closed: false,
isolationLevel: transaction.RepeatableRead,
}
if os.Getenv("FUZZY_DB_TX") == "true" {
log.Notice("FUZZY_DB_TX is enabled")
tx = &transaction.FuzzyTransaction{Tx: transaction.Transaction(&transx)}
} else {
tx = MakeCachedTransaction(&transx)
}
log.Debug("[%p] Created transaction %#v, isolation level: %s", rawTx, rawTx, transx.GetIsolationLevel())
return
}
//BeginTx starts new transaction with given transaction options
func (db *DB) BeginTx(ctx context.Context, options *transaction.TxOptions) (tx transaction.Transaction, err error) {
defer db.measureTime(time.Now(), "begin_tx")
db.updateCounter(1, "begin.waiting")
defer db.updateCounter(-1, "begin.waiting")
var transx Transaction
sqlOptions, err := mapTxOptions(options)
if err != nil {
return nil, err
}
rawTx, err := db.DB.BeginTxx(ctx, sqlOptions)
if err != nil {
db.updateCounter(1, "begin.failed")
return nil, err
}
db.updateCounter(1, "active")
if db.sqlType == "sqlite3" {
rawTx.Exec("PRAGMA foreign_keys = ON;")
}
transx = Transaction{
db: db,
transaction: rawTx,
closed: false,
isolationLevel: options.IsolationLevel,
}
if transx.isolationLevel == transaction.RepeatableRead || transx.isolationLevel == transaction.Serializable {
tx = MakeCachedTransaction(&transx)
} else {
tx = &transx
}
log.Debug("[%p] Created transaction %#v, isolation level: %s", rawTx, rawTx, transx.GetIsolationLevel())
return
}
func (db *DB) genTableCols(s *schema.Schema, cascade bool, exclude []string) ([]string, []string, []string) {
var cols []string
var relations []string
var indices []string
schemaManager := schema.GetManager()
for _, property := range s.Properties {
if util.ContainsString(exclude, property.ID) {
continue
}
handler := db.handlers[property.Type]
sqlDataType := property.SQLType
sqlDataProperties := ""
if db.sqlType == "sqlite3" {
sqlDataType = strings.Replace(sqlDataType, "auto_increment", "autoincrement", 1)
}
if sqlDataType == "" {
sqlDataType = handler.dataType(&property)
if property.ID == "id" {
sqlDataProperties = " primary key"
}
}
if property.ID != "id" {
if property.Nullable {
sqlDataProperties = " null"
} else {
sqlDataProperties = " not null"
}
if property.Unique {
sqlDataProperties = " unique"
}
}
query := "`" + property.ID + "` " + sqlDataType + sqlDataProperties
cols = append(cols, query)
if property.Relation != "" {
foreignSchema, _ := schemaManager.Schema(property.Relation)
if foreignSchema != nil {
cascadeString := ""
if cascade ||
property.OnDeleteCascade ||
(property.Relation == s.Parent && s.OnParentDeleteCascade) {
cascadeString = "on delete cascade"
}
relationColumn := "id"
if property.RelationColumn != "" {
relationColumn = property.RelationColumn
}
relations = append(relations, fmt.Sprintf("constraint %s foreign key(`%s`) REFERENCES `%s`(%s) %s",
quote(foreignKeyName(s.GetDbTableName(), property.ID, foreignSchema.GetDbTableName(), relationColumn)),
property.ID, foreignSchema.GetDbTableName(), relationColumn, cascadeString))
}
}
if property.Indexed {
prefix := ""
// mysql cannot index TEXT without prefix spec, while SQLite3 doesn't allow specifying key size
if sqlDataType == "text" && db.sqlType == "mysql" {
prefix = "(255)"
}
indices = append(indices, fmt.Sprintf("CREATE INDEX %s_%s_idx ON `%s`(`%s`%s);", s.Plural, property.ID,
s.Plural, property.ID, prefix))
}
}
for _, index := range s.Indexes {
quotedColumns := make([]string, len(index.Columns))
for i, column := range index.Columns {
quotedColumns[i] = quote(column)
}
if db.sqlType == "sqlite3" && (index.Type == schema.Spatial || index.Type == schema.FullText) {
log.Error("index %s won't be created since sqlite doesn't support spatial and fulltext index types", index.Name)
continue
}
createIndexQuery := fmt.Sprintf(
"CREATE %s INDEX %s ON %s(%s);",
index.Type, index.Name, quote(s.GetDbTableName()), strings.Join(quotedColumns, ","))
indices = append(indices, createIndexQuery)
}
return cols, relations, indices
}
//AlterTableDef generates alter table sql
func (db *DB) AlterTableDef(s *schema.Schema, cascade bool) (string, []string, error) {
var existing []string
rows, err := db.DB.Query(fmt.Sprintf("select * from `%s` limit 1;", s.GetDbTableName()))
if err == nil {
defer rows.Close()
existing, err = rows.Columns()
}
if err != nil {
return "", nil, err
}
cols, relations, indices := db.genTableCols(s, cascade, existing)
cols = append(cols, relations...)
if len(cols) == 0 {
return "", nil, nil
}
alterTable := fmt.Sprintf("alter table`%s` add (%s);\n", s.GetDbTableName(), strings.Join(cols, ","))
log.Debug("Altering table: " + alterTable)
log.Debug("Altering indices: " + strings.Join(indices, ""))
return alterTable, indices, nil
}
//GenTableDef generates create table sql
func (db *DB) GenTableDef(s *schema.Schema, cascade bool) (string, []string) {
cols, relations, indices := db.genTableCols(s, cascade, nil)
if s.StateVersioning() {
cols = append(cols, quote(configVersionColumnName)+"int not null default 1")
cols = append(cols, quote(stateVersionColumnName)+"int not null default 0")
cols = append(cols, quote(stateErrorColumnName)+"text not null default ''")
cols = append(cols, quote(stateColumnName)+"text not null default ''")
cols = append(cols, quote(stateMonitoringColumnName)+"text not null default ''")
}
cols = append(cols, relations...)
tableSQL := fmt.Sprintf("create table `%s` (%s);\n", s.GetDbTableName(), strings.Join(cols, ","))
log.Debug("Creating table: " + tableSQL)
log.Debug("Creating indices: " + strings.Join(indices, ""))
return tableSQL, indices
}
//RegisterTable creates table in the db
func (db *DB) RegisterTable(s *schema.Schema, cascade, migrate bool) error {
if s.IsAbstract() {
return nil
}
tableDef, indices, err := db.AlterTableDef(s, cascade)
if !migrate {
if tableDef != "" || (indices != nil && len(indices) > 0) {
return fmt.Errorf("needs migration, run \"gohan migrate\"")
}
}
if err != nil {
tableDef, indices = db.GenTableDef(s, cascade)
}
if tableDef != "" {
if _, err = db.DB.Exec(tableDef); err != nil {
return errors.Errorf("error when exec table stmt: '%s': %s", tableDef, err)
}
}
for _, indexSQL := range indices {
if _, err = db.DB.Exec(indexSQL); err != nil {
return errors.Errorf("error when exec index stmt: '%s': %s", indexSQL, err)
}
}
return err
}
//DropTable drop table definition
func (db *DB) DropTable(s *schema.Schema) error {
if s.IsAbstract() {
return nil
}
sql := fmt.Sprintf("drop table if exists %s\n", quote(s.GetDbTableName()))
_, err := db.DB.Exec(sql)
return err
}
func escapeID(ID string) string {
return strings.Replace(ID, "-", "_escape_", -1)
}
func (tx *Transaction) logQuery(sql string, args ...interface{}) {
sqlFormat := strings.Replace(sql, "?", "%s", -1)
query := fmt.Sprintf(sqlFormat, args...)
log.Debug("[%p] Executing SQL query '%s'", tx.transaction, query)
}
func (tx *Transaction) measureTime(timeStarted time.Time, schemaId, action string) {
metrics.UpdateTimer(timeStarted, "tx.%s.%s", schemaId, action)
}
func (tx *Transaction) Exec(sql string, args ...interface{}) error {
return tx.ExecContext(context.Background(), sql, args...)
}
// Exec executes sql in transaction
func (tx *Transaction) ExecContext(ctx context.Context, sql string, args ...interface{}) error {
defer tx.measureTime(time.Now(), "unknown_schema", "exec")
return tx.exec(ctx, sql, args...)
}
func (tx *Transaction) exec(ctx context.Context, sql string, args ...interface{}) error {
tx.logQuery(sql, args...)
_, err := tx.transaction.ExecContext(ctx, sql, args...)
return err
}
func (tx *Transaction) Create(resource *schema.Resource) error {
return tx.CreateContext(context.Background(), resource)
}
//Create create resource in the db
func (tx *Transaction) CreateContext(ctx context.Context, resource *schema.Resource) error {
defer tx.measureTime(time.Now(), resource.Schema().ID, "create")
var cols []string
var values []interface{}
db := tx.db
s := resource.Schema()
data := resource.Data()
q := sq.Insert(quote(s.GetDbTableName()))
for _, attr := range s.Properties {
//TODO(nati) support optional value
if _, ok := data[attr.ID]; ok {
handler := db.handler(&attr)
cols = append(cols, quote(attr.ID))
encoded, err := handler.encode(&attr, data[attr.ID])
if err != nil {
return fmt.Errorf("SQL Create encoding error: %s", err)
}
values = append(values, encoded)
}
}
q = q.Columns(cols...).Values(values...)
sql, args, err := q.ToSql()
if err != nil {
return err
}
return tx.exec(ctx, sql, args...)
}
func (tx *Transaction) updateQuery(resource *schema.Resource) (sq.UpdateBuilder, error) {
s := resource.Schema()
db := tx.db
data := resource.Data()
q := sq.Update(quote(s.GetDbTableName()))
for _, attr := range s.Properties {
//TODO(nati) support optional value
if _, ok := data[attr.ID]; ok {
handler := db.handler(&attr)
encoded, err := handler.encode(&attr, data[attr.ID])
if err != nil {
return q, fmt.Errorf("SQL Update encoding error: %s", err)
}
q = q.Set(quote(attr.ID), encoded)
}
}
if s.Parent != "" {
q = q.Set(s.ParentSchemaPropertyID(), resource.ParentID())
}
return q, nil
}
func (tx *Transaction) Update(resource *schema.Resource) error {
return tx.UpdateContext(context.Background(), resource)
}
//Update update resource in the db
func (tx *Transaction) UpdateContext(ctx context.Context, resource *schema.Resource) error {
defer tx.measureTime(time.Now(), resource.Schema().ID, "update")
q, err := tx.updateQuery(resource)
if err != nil {
return err
}
sql, args, err := q.ToSql()
if err != nil {
return err
}
if resource.Schema().StateVersioning() {
sql += ", `" + configVersionColumnName + "` = `" + configVersionColumnName + "` + 1"
}
sql += " WHERE id = ?"
args = append(args, resource.ID())
return tx.exec(ctx, sql, args...)
}
func (tx *Transaction) StateUpdate(resource *schema.Resource, state *transaction.ResourceState) error {
return tx.StateUpdateContext(context.Background(), resource, state)
}
//StateUpdate update resource state
func (tx *Transaction) StateUpdateContext(ctx context.Context, resource *schema.Resource, state *transaction.ResourceState) error {
defer tx.measureTime(time.Now(), resource.Schema().ID, "state_update")
q, err := tx.updateQuery(resource)
if err != nil {
return err
}
if resource.Schema().StateVersioning() && state != nil {
q = q.Set(quote(stateVersionColumnName), state.StateVersion)
q = q.Set(quote(stateErrorColumnName), state.Error)
q = q.Set(quote(stateColumnName), state.State)
q = q.Set(quote(stateMonitoringColumnName), state.Monitoring)
}
q = q.Where(sq.Eq{"id": resource.ID()})
sql, args, err := q.ToSql()
if err != nil {
return err
}
return tx.exec(ctx, sql, args...)
}
func (tx *Transaction) Delete(s *schema.Schema, resourceID interface{}) error {
return tx.DeleteContext(context.Background(), s, resourceID)
}
//Delete delete resource from db
func (tx *Transaction) DeleteContext(ctx context.Context, s *schema.Schema, resourceID interface{}) error {
defer tx.measureTime(time.Now(), s.ID, "delete")
sql, args, err := sq.Delete(quote(s.GetDbTableName())).Where(sq.Eq{"id": resourceID}).ToSql()
if err != nil {
return err
}
return tx.exec(ctx, sql, args...)
}
func (db *DB) handler(property *schema.Property) propertyHandler {
handler, ok := db.handlers[property.Type]
if ok {
return handler
}
return &defaultHandler{}
}
func makeColumnID(tableName string, property schema.Property) string {
return fmt.Sprintf("%s__%s", tableName, property.ID)
}
func makeColumn(tableName string, property schema.Property) string {
return fmt.Sprintf("%s.%s", tableName, quote(property.ID))
}
func makeAliasTableName(tableName string, property schema.Property) string {
return fmt.Sprintf("%s__%s", tableName, property.RelationProperty)
}
// MakeColumns generates an array that has Gohan style column names
func MakeColumns(s *schema.Schema, tableName string, fields []string, join bool) []string {
manager := schema.GetManager()
var include map[string]bool
if fields != nil {
include = make(map[string]bool)
for _, f := range fields {
include[f] = true
}
}
var cols []string
for _, property := range s.Properties {
if property.RelationProperty != "" && join {
relatedSchema, ok := manager.Schema(property.Relation)
if !ok {
panic(fmt.Sprintf("missing schema %s", property.Relation))
}
aliasTableName := makeAliasTableName(tableName, property)
cols = append(cols, MakeColumns(relatedSchema, aliasTableName, fields, true)...)
}
if include != nil && !include[normField(property.ID, s.ID)] {
continue
}
cols = append(cols, makeColumn(tableName, property)+" as "+quote(makeColumnID(tableName, property)))
}
return cols
}
func makeStateColumns(s *schema.Schema) (cols []string) {
dbTableName := s.GetDbTableName()
cols = append(cols, dbTableName+"."+configVersionColumnName+" as "+quote(configVersionColumnName))
cols = append(cols, dbTableName+"."+stateVersionColumnName+" as "+quote(stateVersionColumnName))
cols = append(cols, dbTableName+"."+stateErrorColumnName+" as "+quote(stateErrorColumnName))
cols = append(cols, dbTableName+"."+stateColumnName+" as "+quote(stateColumnName))
cols = append(cols, dbTableName+"."+stateMonitoringColumnName+" as "+quote(stateMonitoringColumnName))
return cols
}
func makeJoin(s *schema.Schema, tableName string, q sq.SelectBuilder) sq.SelectBuilder {
manager := schema.GetManager()
for _, property := range s.Properties {
if property.RelationProperty == "" {
continue
}
relatedSchema, _ := manager.Schema(property.Relation)
aliasTableName := makeAliasTableName(tableName, property)
q = q.LeftJoin(
fmt.Sprintf("%s as %s on %s.%s = %s.id", quote(relatedSchema.GetDbTableName()), quote(aliasTableName),
quote(tableName), quote(property.ID), quote(aliasTableName)))
q = makeJoin(relatedSchema, aliasTableName, q)
}
return q
}
func decodeState(data map[string]interface{}, state *transaction.ResourceState) error {
var ok bool
state.ConfigVersion, ok = data[configVersionColumnName].(int64)
if !ok {
return fmt.Errorf("Wrong state column %s returned from query", configVersionColumnName)
}
state.StateVersion, ok = data[stateVersionColumnName].(int64)
if !ok {
return fmt.Errorf("Wrong state column %s returned from query", stateVersionColumnName)
}
stateError, ok := data[stateErrorColumnName].([]byte)
if !ok {
return fmt.Errorf("Wrong state column %s returned from query", stateErrorColumnName)
}
state.Error = string(stateError)
stateState, ok := data[stateColumnName].([]byte)
if !ok {
return fmt.Errorf("Wrong state column %s returned from query", stateColumnName)
}
state.State = string(stateState)
stateMonitoring, ok := data[stateMonitoringColumnName].([]byte)
if !ok {
return fmt.Errorf("Wrong state column %s returned from query", stateMonitoringColumnName)
}
state.Monitoring = string(stateMonitoring)
return nil
}
//normFields runs normFields on all the fields.
func normFields(fields []string, s *schema.Schema) []string {
if fields != nil {
for i, f := range fields {
fields[i] = normField(f, s.ID)
}
}
return fields
}
//normField returns field prefixed with schema ID.
func normField(field, schemaID string) string {
if strings.Contains(field, ".") {
return field
}
return fmt.Sprintf("%s.%s", schemaID, field)
}
type selectContext struct {
schema *schema.Schema
filter transaction.Filter
fields []string
join bool
paginator *pagination.Paginator
}
func buildSelect(sc *selectContext) (string, []interface{}, error) {
t := sc.schema.GetDbTableName()
cols := MakeColumns(sc.schema, t, sc.fields, sc.join)
q := sq.Select(cols...).From(quote(t))
q, err := AddFilterToQuery(sc.schema, q, sc.filter, sc.join)
if err != nil {
return "", nil, err
}
if sc.paginator != nil {
if sc.paginator.Key != "" {
property, err := sc.schema.GetPropertyByID(sc.paginator.Key)
if err == nil {
q = q.OrderBy(makeColumn(t, *property) + " " + sc.paginator.Order)
}
}
if sc.paginator.Limit > 0 {
q = q.Limit(sc.paginator.Limit)
}
if sc.paginator.Offset > 0 {
q = q.Offset(sc.paginator.Offset)
}
}
if sc.join {
q = makeJoin(sc.schema, t, q)
}
return q.ToSql()
}
func (tx *Transaction) executeSelect(ctx context.Context, sc *selectContext, sql string, args []interface{}) (list []*schema.Resource, total uint64, err error) {
tx.logQuery(sql, args...)
rows, err := tx.transaction.QueryxContext(ctx, sql, args...)
if err != nil {
return
}
defer rows.Close()
list, err = tx.decodeRows(sc.schema, rows, list, sc.fields != nil, sc.join)
if err != nil {
return nil, 0, err
}
total, err = tx.CountContext(ctx, sc.schema, sc.filter)
return
}
func (tx *Transaction) List(s *schema.Schema, filter transaction.Filter, options *transaction.ViewOptions, pg *pagination.Paginator) (list []*schema.Resource, total uint64, err error) {
return tx.ListContext(context.Background(), s, filter, options, pg)
}
//List resources in the db
func (tx *Transaction) ListContext(ctx context.Context, s *schema.Schema, filter transaction.Filter, options *transaction.ViewOptions, pg *pagination.Paginator) (list []*schema.Resource, total uint64, err error) {
defer tx.measureTime(time.Now(), s.ID, "list")
sc := listContextHelper(s, filter, options, pg)
sql, args, err := buildSelect(sc)
if err != nil {
return nil, 0, err
}
return tx.executeSelect(ctx, sc, sql, args)
}
func listContextHelper(s *schema.Schema, filter transaction.Filter, options *transaction.ViewOptions, pg *pagination.Paginator) *selectContext {
sc := &selectContext{
schema: s,
filter: filter,
join: true,
paginator: pg,
}
if options != nil {
sc.fields = normFields(options.Fields, s)
sc.join = options.Details
}
return sc
}
func shouldJoin(policy schema.LockPolicy) bool {
switch policy {
case schema.LockRelatedResources:
return true
case schema.SkipRelatedResources:
return false
default:
log.Fatalf("Unknown lock policy %+v", policy)
panic("Unexpected locking policy")
}
}
func (tx *Transaction) LockList(s *schema.Schema, filter transaction.Filter, options *transaction.ViewOptions, pg *pagination.Paginator, lockPolicy schema.LockPolicy) (list []*schema.Resource, total uint64, err error) {
return tx.LockListContext(context.Background(), s, filter, options, pg, lockPolicy)
}
// LockList locks resources in the db
func (tx *Transaction) LockListContext(ctx context.Context, s *schema.Schema, filter transaction.Filter, options *transaction.ViewOptions, pg *pagination.Paginator, lockPolicy schema.LockPolicy) (list []*schema.Resource, total uint64, err error) {
defer tx.measureTime(time.Now(), s.ID, "lock_list")
sc := lockListContextHelper(s, filter, options, pg, lockPolicy)
sql, args, err := buildSelect(sc)
if err != nil {
return nil, 0, err
}
if tx.db.sqlType == "mysql" {
sql += " FOR UPDATE"
}
// update join for recursive
if options != nil {
sc.join = options.Details
} else {
sc.join = true
}
return tx.executeSelect(ctx, sc, sql, args)
}
func lockListContextHelper(s *schema.Schema, filter transaction.Filter, options *transaction.ViewOptions, pg *pagination.Paginator, lockPolicy schema.LockPolicy) *selectContext {
policyJoin := shouldJoin(lockPolicy)
sc := &selectContext{
schema: s,
filter: filter,
join: policyJoin,
paginator: pg,
}
if options != nil {
sc.fields = normFields(options.Fields, s)
sc.join = policyJoin && options.Details
}
return sc
}
func (tx *Transaction) Query(s *schema.Schema, query string, arguments []interface{}) (list []*schema.Resource, err error) {
return tx.QueryContext(context.Background(), s, query, arguments)
}
// Query with raw sql string
func (tx *Transaction) QueryContext(ctx context.Context, s *schema.Schema, query string, arguments []interface{}) (list []*schema.Resource, err error) {
defer tx.measureTime(time.Now(), s.ID, "query")
tx.logQuery(query, arguments...)
rows, err := tx.transaction.QueryxContext(ctx, query, arguments...)
if err != nil {
return nil, fmt.Errorf("Failed to run query: %s", query)
}
defer rows.Close()
list, err = tx.decodeRows(s, rows, list, false, false)
if err != nil {
return nil, err
}
return
}
func (tx *Transaction) decodeRows(s *schema.Schema, rows *sqlx.Rows, list []*schema.Resource, skipNil, recursive bool) ([]*schema.Resource, error) {
for rows.Next() {
data := map[string]interface{}{}
rows.MapScan(data)
var resource *schema.Resource
resourceData := tx.decode(s, s.GetDbTableName(), skipNil, recursive, data)
resource, err := schema.NewResource(s, resourceData)
if err != nil {
return nil, fmt.Errorf("Failed to decode rows")
}
list = append(list, resource)
}
return list, nil
}
func (tx *Transaction) decode(s *schema.Schema, tableName string, skipNil, recursive bool, data map[string]interface{}) map[string]interface{} {
resourceData := map[string]interface{}{}
manager := schema.GetManager()
db := tx.db
for _, property := range s.Properties {
handler := db.handler(&property)
value := data[makeColumnID(tableName, property)]
if value != nil || (property.Nullable && !skipNil) {
decoded, err := handler.decode(&property, value)
if err != nil {
log.Error(fmt.Sprintf("SQL List decoding error: %s", err))
}
resourceData[property.ID] = decoded
}
if property.RelationProperty != "" && recursive {
relatedSchema, _ := manager.Schema(property.Relation)
aliasTableName := makeAliasTableName(tableName, property)
relatedResourceData := tx.decode(relatedSchema, aliasTableName, skipNil, recursive, data)
if len(relatedResourceData) > 0 || !skipNil {
resourceData[property.RelationProperty] = relatedResourceData
}
}
}
return resourceData
}
//CountContext count all matching resources in the db
func (tx *Transaction) CountContext(ctx context.Context, s *schema.Schema, filter transaction.Filter) (res uint64, err error) {
defer tx.measureTime(time.Now(), s.ID, "count")
q := sq.Select("Count(id) as count").From(quote(s.GetDbTableName()))
//Filter get already tested
q, _ = AddFilterToQuery(s, q, filter, false)
sql, args, err := q.ToSql()
if err != nil {
return
}
result := map[string]interface{}{}
err = tx.transaction.QueryRowxContext(ctx, sql, args...).MapScan(result)
if err != nil {
return
}
count, _ := result["count"]
decoder := &integerHandler{}
decoded, decodeErr := decoder.decode(nil, count)
if decodeErr != nil {
err = fmt.Errorf("SQL List decoding error: %s", decodeErr)
return
}
res = uint64(decoded.(int))
return
}
func (tx *Transaction) Fetch(s *schema.Schema, filter transaction.Filter, options *transaction.ViewOptions) (*schema.Resource, error) {
return tx.FetchContext(context.Background(), s, filter, options)
}
//Fetch resources by ID in the db
func (tx *Transaction) FetchContext(ctx context.Context, s *schema.Schema, filter transaction.Filter, options *transaction.ViewOptions) (*schema.Resource, error) {
defer tx.measureTime(time.Now(), s.ID, "fetch")
list, _, err := tx.ListContext(ctx, s, filter, options, nil)
return fetchContextHelper(list, err, filter)
}
func fetchContextHelper(list []*schema.Resource, err error, filter transaction.Filter) (*schema.Resource, error) {
if err != nil {
return nil, fmt.Errorf("Failed to fetch %s: %s", filter, err)
}
if len(list) < 1 {
return nil, transaction.ErrResourceNotFound
}
return list[0], nil
}
func (tx *Transaction) LockFetch(s *schema.Schema, filter transaction.Filter, lockPolicy schema.LockPolicy, options *transaction.ViewOptions) (*schema.Resource, error) {
return tx.LockFetchContext(context.Background(), s, filter, lockPolicy, options)
}
// LockFetch fetches & locks a resource
func (tx *Transaction) LockFetchContext(ctx context.Context, s *schema.Schema, filter transaction.Filter, lockPolicy schema.LockPolicy, options *transaction.ViewOptions) (*schema.Resource, error) {
defer tx.measureTime(time.Now(), s.ID, "lock_fetch")
list, _, err := tx.LockListContext(ctx, s, filter, nil, nil, lockPolicy)
return lockFetchContextHelper(err, list, filter)
}
func lockFetchContextHelper(err error, list []*schema.Resource, filter transaction.Filter) (*schema.Resource, error) {
if err != nil {
return nil, fmt.Errorf("Failed to fetch and lock %s: %s", filter, err)
}
if len(list) < 1 {
return nil, transaction.ErrResourceNotFound
}
return list[0], nil
}
func (tx *Transaction) StateFetch(s *schema.Schema, filter transaction.Filter) (state transaction.ResourceState, err error) {
return tx.StateFetchContext(context.Background(), s, filter)
}
//StateFetch fetches the state of the specified resource
func (tx *Transaction) StateFetchContext(ctx context.Context, s *schema.Schema, filter transaction.Filter) (state transaction.ResourceState, err error) {
defer tx.measureTime(time.Now(), s.ID, "state_fetch")
if !s.StateVersioning() {
err = fmt.Errorf("Schema %s does not support state versioning", s.ID)
return
}
cols := makeStateColumns(s)
q := sq.Select(cols...).From(quote(s.GetDbTableName()))
q, _ = AddFilterToQuery(s, q, filter, true)
sql, args, err := q.ToSql()
if err != nil {
return
}
tx.logQuery(sql, args...)
rows, err := tx.transaction.QueryxContext(ctx, sql, args...)
if err != nil {
return
}
defer rows.Close()
if !rows.Next() {
err = transaction.ErrResourceNotFound
return
}
data := map[string]interface{}{}
rows.MapScan(data)
err = decodeState(data, &state)
return
}
//RawTransaction returns raw transaction
func (tx *Transaction) RawTransaction() *sqlx.Tx {
return tx.transaction
}
//Commit commits transaction
func (tx *Transaction) Commit() error {
defer tx.db.measureTime(time.Now(), "commit")
defer tx.db.updateCounter(-1, "active")
log.Debug("[%p] Committing transaction %#v", tx.transaction, tx)
err := tx.transaction.Commit()
if err != nil {
log.Error("[%p] Commit %#v failed: %s", tx.transaction, tx, err)
tx.db.updateCounter(1, "commit.failed")
return err
}
tx.closed = true
return nil
}
//Close closes connection
func (tx *Transaction) Close() error {
defer tx.db.measureTime(time.Now(), "rollback")
//Rollback if it isn't committed yet
log.Debug("[%p] Closing transaction %#v", tx.transaction, tx)
var err error
if !tx.closed {
defer tx.db.updateCounter(-1, "active")
log.Debug("[%p] Rolling back %#v", tx.transaction, tx)
err = tx.transaction.Rollback()
if err != nil {
log.Error("[%p] Rolling back %#v failed: %s", tx.transaction, tx, err)
tx.db.updateCounter(1, "rollback.failed")
return err
}
tx.closed = true
}
return nil
}
//Closed returns whether the transaction is closed
func (tx *Transaction) Closed() bool {
return tx.closed
}
// GetIsolationLevel returns tx isolation level
func (tx *Transaction) GetIsolationLevel() transaction.Type {
return tx.isolationLevel
}
const (
OrCondition = "__or__"
AndCondition = "__and__"
)
func AddFilterToQuery(s *schema.Schema, q sq.SelectBuilder, filter map[string]interface{}, join bool) (sq.SelectBuilder, error) {
if filter == nil {
return q, nil
}
for key, value := range filter {
if key == OrCondition {
orFilter, err := addOrToQuery(s, q, value, join)
if err != nil {
return q, err
}
q = q.Where(orFilter)
continue
} else if key == AndCondition {
andFilter, err := addAndToQuery(s, q, value, join)
if err != nil {
return q, err
}
q = q.Where(andFilter)
continue
}
property, err := s.GetPropertyByID(key)
if err != nil {
return q, err
}
var column string
if join {
column = makeColumn(s.GetDbTableName(), *property)
} else {
column = quote(key)
}
queryValues, ok := value.([]string)
if ok && property.Type == "boolean" {
v := make([]bool, len(queryValues))
for i, j := range queryValues {
v[i], _ = strconv.ParseBool(j)
}
q = q.Where(sq.Eq{column: v})
} else {
q = q.Where(sq.Eq{column: value})
}
}
return q, nil
}
func addOrToQuery(s *schema.Schema, q sq.SelectBuilder, filter interface{}, join bool) (sq.Or, error) {
return addToFilter(s, q, filter, join, sq.Or{})
}
func addAndToQuery(s *schema.Schema, q sq.SelectBuilder, filter interface{}, join bool) (sq.And, error) {
return addToFilter(s, q, filter, join, sq.And{})
}
func addToFilter(s *schema.Schema, q sq.SelectBuilder, filter interface{}, join bool, sqlizer []sq.Sqlizer) ([]sq.Sqlizer, error) {
filters := filter.([]map[string]interface{})
for _, filter := range filters {
if match, ok := filter[OrCondition]; ok {
res, err := addOrToQuery(s, q, match, join)
if err != nil {
return nil, err
}
sqlizer = append(sqlizer, res)
} else if match, ok := filter[AndCondition]; ok {
res, err := addAndToQuery(s, q, match, join)
if err != nil {
return nil, err
}
sqlizer = append(sqlizer, res)
} else {
key := filter["property"].(string)
property, err := s.GetPropertyByID(key)
if err != nil {
return nil, err
}
var column string
if join {
column = makeColumn(s.GetDbTableName(), *property)
} else {
column = quote(key)
}
// TODO: add other operators
value := filter["value"]
switch filter["type"] {
case "eq":
sqlizer = append(sqlizer, sq.Eq{column: value})
case "neq":
sqlizer = append(sqlizer, sq.NotEq{column: value})
default:
panic("type has to be one of [eq, neq]")
}
}
}
return sqlizer, nil
}
//SetMaxOpenConns limit maximum connections
func (db *DB) SetMaxOpenConns(maxIdleConns int) {
// db.DB.SetMaxOpenConns(maxIdleConns)
// db.DB.SetMaxIdleConns(maxIdleConns)
}
| [
"\"FUZZY_DB_TX\""
]
| []
| [
"FUZZY_DB_TX"
]
| [] | ["FUZZY_DB_TX"] | go | 1 | 0 | |
rest/request-lev-options_test.go | package rest_test
import (
"fmt"
"os"
"strings"
"testing"
"time"
"github.com/bwparker/go-ftx/auth"
"github.com/bwparker/go-ftx/rest"
"github.com/bwparker/go-ftx/rest/private/leveraged"
"github.com/bwparker/go-ftx/rest/private/options"
"github.com/bwparker/go-ftx/types"
"github.com/stretchr/testify/assert"
)
/*
# Leveraged tokens
*/
func TestLevTokens(t *testing.T) {
c := rest.New(auth.New(os.Getenv("FTXKEY"), os.Getenv("FTXSECRET")))
res, err := c.LvTokens(&leveraged.RequestForLvTokens{})
assert.NoError(t, err)
list := res.Products()
fmt.Printf("%+v\n", strings.Join(list, "\n"))
}
func TestLvToken(t *testing.T) {
c := rest.New(auth.New(os.Getenv("FTXKEY"), os.Getenv("FTXSECRET")))
res, err := c.LvToken(&leveraged.RequestForLvToken{
ProductCode: "BULL",
})
assert.NoError(t, err)
fmt.Printf("%+v\n", res)
}
func TestCreatedLvTokens(t *testing.T) {
c := rest.New(auth.New(os.Getenv("FTXKEY"), os.Getenv("FTXSECRET")))
res, err := c.CreatedLvTokens(&leveraged.RequestForCreatedLvTokens{})
assert.NoError(t, err)
fmt.Printf("%+v\n", res)
}
func TestCreatedLvToken(t *testing.T) {
c := rest.New(auth.New(os.Getenv("FTXKEY"), os.Getenv("FTXSECRET")))
res, err := c.CreatedLvToken(&leveraged.RequestForCreatedLvToken{
ProductCode: "BULL",
})
assert.NoError(t, err)
fmt.Printf("%+v\n", res)
}
func TestRedemptionLvTokens(t *testing.T) {
c := rest.New(auth.New(os.Getenv("FTXKEY"), os.Getenv("FTXSECRET")))
res, err := c.RedemptionLvTokens(&leveraged.RequestForRedemptionLvTokens{})
assert.NoError(t, err)
fmt.Printf("%+v\n", res)
}
func TestRedemptionLvToken(t *testing.T) {
c := rest.New(auth.New(os.Getenv("FTXKEY"), os.Getenv("FTXSECRET")))
res, err := c.RedemptionLvToken(&leveraged.RequestForRedemptionLvToken{
ProductCode: "BULL",
})
assert.NoError(t, err)
fmt.Printf("%+v\n", res)
}
func TestLvBalances(t *testing.T) {
c := rest.New(auth.New(os.Getenv("FTXKEY"), os.Getenv("FTXSECRET")))
res, err := c.LvBalances(&leveraged.RequestForLvBalances{})
assert.NoError(t, err)
fmt.Printf("%+v\n", res)
}
/*
# Options
*/
func TestOpQuoteRequests(t *testing.T) {
c := rest.New(auth.New(os.Getenv("FTXKEY"), os.Getenv("FTXSECRET")))
res, err := c.OpQuoteRequests(&options.RequestForOpQuoteRequests{})
assert.NoError(t, err)
fmt.Printf("%+v\n", res)
}
func TestMyOpQuoteRequests(t *testing.T) {
c := rest.New(auth.New(os.Getenv("FTXKEY"), os.Getenv("FTXSECRET")))
res, err := c.MyOpQuoteRequests(&options.RequestForMyOpQuoteRequests{})
assert.NoError(t, err)
fmt.Printf("%+v\n", res)
}
func TestMyOpQuoteRequest(t *testing.T) {
c := rest.New(auth.New(os.Getenv("FTXKEY"), os.Getenv("FTXSECRET")))
res, err := c.MyOpQuoteRequest(&options.RequestForMyOpQuoteRequest{
RequestID: 1,
})
assert.NoError(t, err)
fmt.Printf("%+v\n", res)
}
func TestCreateOpQuoteRequest(t *testing.T) {
c := rest.New(auth.New(os.Getenv("FTXKEY"), os.Getenv("FTXSECRET")))
res, err := c.CreateOpQuoteRequest(&options.RequestForCreateOpQuoteRequest{
Underlying: "BTC",
Type: "call",
Strike: 6200,
Expiry: time.Now().Add(10 * time.Hour).Unix(),
Side: types.BUY,
Size: 1,
// Optionals
// LimitPrice: 6800,
// HideLimitPrice: true,
// RequestExpiry: time.Now().Add(10 * time.Hour).Unix(),
// CounterpartyID: 1,
})
assert.NoError(t, err)
fmt.Printf("%+v\n", res)
}
func TestModifyOpQuoteRequest(t *testing.T) {
c := rest.New(auth.New(os.Getenv("FTXKEY"), os.Getenv("FTXSECRET")))
res, err := c.ModifyOpQuoteRequest(&options.RequestForModifyOpQuoteRequest{
RequestID: 1,
})
assert.NoError(t, err)
fmt.Printf("%+v\n", res)
}
func TestCancelOpQuoteRequest(t *testing.T) {
c := rest.New(auth.New(os.Getenv("FTXKEY"), os.Getenv("FTXSECRET")))
res, err := c.CancelOpQuoteRequest(&options.RequestForCancelOpQuoteRequest{
RequestID: 1,
})
assert.NoError(t, err)
fmt.Printf("%+v\n", res)
}
func TestMyOpQuotes(t *testing.T) {
c := rest.New(auth.New(os.Getenv("FTXKEY"), os.Getenv("FTXSECRET")))
res, err := c.MyOpQuotes(&options.RequestForMyOpQuotes{})
assert.NoError(t, err)
fmt.Printf("%+v\n", res)
}
// func TestCreateOpQuote(t *testing.T) {
// c := rest.New(auth.New(os.Getenv("FTXKEY"), os.Getenv("FTXSECRET")))
// res, err := c.CreateOpQuote(&options.RequestForCreateOpQuote{})
// assert.NoError(t, err)
// fmt.Printf("%+v\n", res)
// }
func TestCancelOpQuote(t *testing.T) {
c := rest.New(auth.New(os.Getenv("FTXKEY"), os.Getenv("FTXSECRET")))
res, err := c.CancelOpQuote(&options.RequestForCancelOpQuote{})
assert.NoError(t, err)
fmt.Printf("%+v\n", res)
}
func TestAcceptOpQuote(t *testing.T) {
c := rest.New(auth.New(os.Getenv("FTXKEY"), os.Getenv("FTXSECRET")))
res, err := c.AcceptOpQuote(&options.RequestForAcceptOpQuote{
QuoteID: 1,
})
assert.NoError(t, err)
fmt.Printf("%+v\n", res)
}
func TestOpPositions(t *testing.T) {
c := rest.New(auth.New(os.Getenv("FTXKEY"), os.Getenv("FTXSECRET")))
res, err := c.OpPositions(&options.RequestForOpPositions{})
assert.NoError(t, err)
fmt.Printf("%+v\n", res)
}
func TestOpTrades(t *testing.T) {
c := rest.New(auth.New(os.Getenv("FTXKEY"), os.Getenv("FTXSECRET")))
res, err := c.OpTrades(&options.RequestForOpTrades{})
assert.NoError(t, err)
fmt.Printf("%+v\n", res)
}
func TestOpFills(t *testing.T) {
c := rest.New(auth.New(os.Getenv("FTXKEY"), os.Getenv("FTXSECRET")))
res, err := c.OpFills(&options.RequestForOpFills{})
assert.NoError(t, err)
fmt.Printf("%+v\n", res)
}
| [
"\"FTXKEY\"",
"\"FTXSECRET\"",
"\"FTXKEY\"",
"\"FTXSECRET\"",
"\"FTXKEY\"",
"\"FTXSECRET\"",
"\"FTXKEY\"",
"\"FTXSECRET\"",
"\"FTXKEY\"",
"\"FTXSECRET\"",
"\"FTXKEY\"",
"\"FTXSECRET\"",
"\"FTXKEY\"",
"\"FTXSECRET\"",
"\"FTXKEY\"",
"\"FTXSECRET\"",
"\"FTXKEY\"",
"\"FTXSECRET\"",
"\"FTXKEY\"",
"\"FTXSECRET\"",
"\"FTXKEY\"",
"\"FTXSECRET\"",
"\"FTXKEY\"",
"\"FTXSECRET\"",
"\"FTXKEY\"",
"\"FTXSECRET\"",
"\"FTXKEY\"",
"\"FTXSECRET\"",
"\"FTXKEY\"",
"\"FTXSECRET\"",
"\"FTXKEY\"",
"\"FTXSECRET\"",
"\"FTXKEY\"",
"\"FTXSECRET\"",
"\"FTXKEY\"",
"\"FTXSECRET\"",
"\"FTXKEY\"",
"\"FTXSECRET\"",
"\"FTXKEY\"",
"\"FTXSECRET\""
]
| []
| [
"FTXKEY",
"FTXSECRET"
]
| [] | ["FTXKEY", "FTXSECRET"] | go | 2 | 0 | |
tests/e2e/e2e_test.go | // Copyright (C) 2019-2022, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
// e2e implements the e2e tests.
package e2e_test
import (
"context"
"flag"
"io/fs"
"io/ioutil"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/ava-labs/avalanche-network-runner/client"
"github.com/ava-labs/avalanche-network-runner/pkg/color"
"github.com/ava-labs/avalanche-network-runner/pkg/logutil"
"github.com/ava-labs/avalanche-network-runner/server"
"github.com/ava-labs/avalanche-network-runner/utils"
"github.com/ava-labs/avalanchego/api/admin"
"github.com/ava-labs/avalanchego/ids"
"github.com/ava-labs/avalanchego/message"
"github.com/ava-labs/avalanchego/utils/constants"
ginkgo "github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
"github.com/prometheus/client_golang/prometheus"
)
func TestE2e(t *testing.T) {
if os.Getenv("RUN_E2E") == "" {
t.Skip("Environment variable RUN_E2E not set; skipping E2E tests")
}
gomega.RegisterFailHandler(ginkgo.Fail)
ginkgo.RunSpecs(t, "network-runner-example e2e test suites")
}
var (
logLevel string
gRPCEp string
gRPCGatewayEp string
execPath1 string
execPath2 string
newNodeName = "test-add-node"
customNodeConfigs = map[string]string{
"node1": `{"api-admin-enabled":true}`,
"node2": `{"api-admin-enabled":true}`,
"node3": `{"api-admin-enabled":true}`,
"node4": `{"api-admin-enabled":false}`,
"node5": `{"api-admin-enabled":false}`,
"node6": `{"api-admin-enabled":false}`,
"node7": `{"api-admin-enabled":false}`,
}
numNodes = uint32(5)
)
func init() {
flag.StringVar(
&logLevel,
"log-level",
logutil.DefaultLogLevel,
"log level",
)
flag.StringVar(
&gRPCEp,
"grpc-endpoint",
"0.0.0.0:8080",
"gRPC server endpoint",
)
flag.StringVar(
&gRPCGatewayEp,
"grpc-gateway-endpoint",
"0.0.0.0:8081",
"gRPC gateway endpoint",
)
flag.StringVar(
&execPath1,
"avalanchego-path-1",
"",
"avalanchego executable path (to upgrade from)",
)
flag.StringVar(
&execPath2,
"avalanchego-path-2",
"",
"avalanchego executable path (to upgrade to)",
)
}
var cli client.Client
var _ = ginkgo.BeforeSuite(func() {
var err error
cli, err = client.New(client.Config{
LogLevel: logLevel,
Endpoint: gRPCEp,
DialTimeout: 10 * time.Second,
})
gomega.Ω(err).Should(gomega.BeNil())
})
var _ = ginkgo.AfterSuite(func() {
color.Outf("{{red}}shutting down cluster{{/}}\n")
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
_, err := cli.Stop(ctx)
cancel()
gomega.Ω(err).Should(gomega.BeNil())
color.Outf("{{red}}shutting down client{{/}}\n")
err = cli.Close()
gomega.Ω(err).Should(gomega.BeNil())
})
var _ = ginkgo.Describe("[Start/Remove/Restart/Add/Stop]", func() {
ginkgo.It("can start", func() {
ginkgo.By("start request with invalid exec path should fail", func() {
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
_, err := cli.Start(ctx, "")
cancel()
gomega.Ω(err.Error()).Should(gomega.ContainSubstring(utils.ErrInvalidExecPath.Error()))
})
ginkgo.By("start request with invalid exec path should fail", func() {
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
_, err := cli.Start(ctx, "invalid")
cancel()
gomega.Ω(err.Error()).Should(gomega.ContainSubstring(utils.ErrNotExists.Error()))
})
ginkgo.By("start request with invalid custom VM path should fail", func() {
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
_, err := cli.Start(ctx, execPath1,
client.WithPluginDir(os.TempDir()),
client.WithCustomVMs(map[string]string{"invalid": "{0}"}),
)
cancel()
gomega.Ω(err.Error()).Should(gomega.ContainSubstring(utils.ErrNotExistsPlugin.Error()))
})
ginkgo.By("start request with invalid custom VM name format should fail", func() {
f, err := os.CreateTemp(os.TempDir(), strings.Repeat("a", 33))
gomega.Ω(err).Should(gomega.BeNil())
filePath := f.Name()
gomega.Ω(f.Close()).Should(gomega.BeNil())
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
_, err = cli.Start(ctx, execPath1,
client.WithPluginDir(filepath.Dir(filePath)),
client.WithCustomVMs(map[string]string{filepath.Base(filePath): "{0}"}),
)
cancel()
gomega.Ω(err.Error()).Should(gomega.ContainSubstring(server.ErrInvalidVMName.Error()))
os.RemoveAll(filePath)
})
ginkgo.By("start request with missing plugin dir should fail", func() {
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
_, err := cli.Start(ctx, execPath1,
client.WithCustomVMs(map[string]string{"test": "{0}"}),
)
cancel()
gomega.Ω(err.Error()).Should(gomega.ContainSubstring(server.ErrPluginDirEmptyButCustomVMsNotEmpty.Error()))
})
ginkgo.By("start request with missing custom VMs should fail", func() {
f, err := os.CreateTemp(os.TempDir(), strings.Repeat("a", 33))
gomega.Ω(err).Should(gomega.BeNil())
filePath := f.Name()
gomega.Ω(f.Close()).Should(gomega.BeNil())
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
_, err = cli.Start(ctx, execPath1,
client.WithPluginDir(filepath.Dir(filePath)),
)
cancel()
gomega.Ω(err.Error()).Should(gomega.ContainSubstring(server.ErrPluginDirNonEmptyButCustomVMsEmpty.Error()))
os.RemoveAll(filePath)
})
ginkgo.By("start request with invalid custom VM genesis path should fail", func() {
vmID, err := utils.VMID("hello")
gomega.Ω(err).Should(gomega.BeNil())
filePath := filepath.Join(os.TempDir(), vmID.String())
gomega.Ω(ioutil.WriteFile(filePath, []byte{0}, fs.ModePerm)).Should(gomega.BeNil())
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
_, err = cli.Start(ctx, execPath1,
client.WithPluginDir(filepath.Dir(filePath)),
client.WithCustomVMs(map[string]string{"hello": "invalid"}),
)
cancel()
gomega.Ω(err.Error()).Should(gomega.ContainSubstring(utils.ErrNotExistsPluginGenesis.Error()))
os.RemoveAll(filePath)
})
ginkgo.By("calling start API with the valid binary path", func() {
color.Outf("{{green}}sending 'start' with the valid binary path:{{/}} %q\n", execPath1)
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
resp, err := cli.Start(ctx, execPath1)
cancel()
gomega.Ω(err).Should(gomega.BeNil())
color.Outf("{{green}}successfully started:{{/}} %+v\n", resp.ClusterInfo.NodeNames)
})
})
ginkgo.It("can wait for health", func() {
// start is async, so wait some time for cluster health
// TODO: Don't sleep. Use polling or other mechanism. Apply to all Sleeps in the test.
time.Sleep(30 * time.Second)
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
_, err := cli.Health(ctx)
cancel()
gomega.Ω(err).Should(gomega.BeNil())
})
ginkgo.It("can get URIs", func() {
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
uris, err := cli.URIs(ctx)
cancel()
gomega.Ω(err).Should(gomega.BeNil())
color.Outf("{{blue}}URIs:{{/}} %q\n", uris)
})
ginkgo.It("can fetch status", func() {
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
_, err := cli.Status(ctx)
cancel()
gomega.Ω(err).Should(gomega.BeNil())
})
ginkgo.It("can poll status", func() {
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
defer cancel()
ch, err := cli.StreamStatus(ctx, 5*time.Second)
gomega.Ω(err).Should(gomega.BeNil())
for info := range ch {
color.Outf("{{green}}fetched info:{{/}} %+v\n", info.NodeNames)
if info.Healthy {
break
}
}
})
time.Sleep(10 * time.Second)
ginkgo.It("can remove", func() {
ginkgo.By("calling remove API with the first binary", func() {
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
resp, err := cli.RemoveNode(ctx, "node5")
cancel()
gomega.Ω(err).Should(gomega.BeNil())
color.Outf("{{green}}successfully removed:{{/}} %+v\n", resp.ClusterInfo.NodeNames)
})
})
time.Sleep(10 * time.Second)
ginkgo.It("can restart", func() {
ginkgo.By("calling restart API with the second binary", func() {
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
resp, err := cli.RestartNode(ctx, "node4", client.WithExecPath(execPath2))
cancel()
gomega.Ω(err).Should(gomega.BeNil())
color.Outf("{{green}}successfully restarted:{{/}} %+v\n", resp.ClusterInfo.NodeNames)
})
})
time.Sleep(10 * time.Second)
ginkgo.It("can attach a peer", func() {
ginkgo.By("calling attach peer API", func() {
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
resp, err := cli.AttachPeer(ctx, "node1")
cancel()
gomega.Ω(err).Should(gomega.BeNil())
v, ok := resp.ClusterInfo.AttachedPeerInfos["node1"]
gomega.Ω(ok).Should(gomega.BeTrue())
color.Outf("{{green}}successfully attached peer:{{/}} %+v\n", v.Peers)
mc, err := message.NewCreator(
prometheus.NewRegistry(),
true,
"",
10*time.Second,
)
gomega.Ω(err).Should(gomega.BeNil())
containerIDs := []ids.ID{
ids.GenerateTestID(),
ids.GenerateTestID(),
ids.GenerateTestID(),
}
requestID := uint32(42)
chainID := constants.PlatformChainID
msg, err := mc.Chits(chainID, requestID, containerIDs)
gomega.Ω(err).Should(gomega.BeNil())
ctx, cancel = context.WithTimeout(context.Background(), 2*time.Minute)
sresp, err := cli.SendOutboundMessage(ctx, "node1", v.Peers[0].Id, uint32(msg.Op()), msg.Bytes())
cancel()
gomega.Ω(err).Should(gomega.BeNil())
gomega.Ω(sresp.Sent).Should(gomega.BeTrue())
})
})
time.Sleep(10 * time.Second)
ginkgo.It("can add a node", func() {
ginkgo.By("calling AddNode", func() {
color.Outf("{{green}}calling 'add-node' with the valid binary path:{{/}} %q\n", execPath1)
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
resp, err := cli.AddNode(ctx, newNodeName, execPath1)
cancel()
gomega.Ω(err).Should(gomega.BeNil())
color.Outf("{{green}}successfully started:{{/}} %+v\n", resp.ClusterInfo.NodeNames)
})
ginkgo.By("calling AddNode with existing node name, should fail", func() {
color.Outf("{{green}}calling 'add-node' with the valid binary path:{{/}} %q\n", execPath1)
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
resp, err := cli.AddNode(ctx, newNodeName, execPath1)
cancel()
gomega.Ω(err.Error()).Should(gomega.ContainSubstring("already exists"))
gomega.Ω(resp).Should(gomega.BeNil())
color.Outf("{{green}}add-node failed as expected")
})
})
ginkgo.It("can start with custom config", func() {
ginkgo.By("stopping network first", func() {
color.Outf("{{red}}shutting down cluster{{/}}\n")
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
_, err := cli.Stop(ctx)
cancel()
gomega.Ω(err).Should(gomega.BeNil())
color.Outf("{{red}}shutting down client{{/}}\n")
gomega.Ω(err).Should(gomega.BeNil())
})
ginkgo.By("calling start API with custom config", func() {
color.Outf("{{green}}sending 'start' with the valid binary path:{{/}} %q\n", execPath1)
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
opts := []client.OpOption{
client.WithNumNodes(numNodes),
client.WithCustomNodeConfigs(customNodeConfigs),
}
resp, err := cli.Start(ctx, execPath1, opts...)
cancel()
gomega.Ω(err).Should(gomega.BeNil())
color.Outf("{{green}}successfully started:{{/}} %+v\n", resp.ClusterInfo.NodeNames)
})
ginkgo.By("can wait for health", func() {
// start is async, so wait some time for cluster health
time.Sleep(30 * time.Second)
ctx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
_, err := cli.Health(ctx)
cancel()
gomega.Ω(err).Should(gomega.BeNil())
})
ginkgo.By("overrides num-nodes", func() {
color.Outf("{{green}}checking that given num-nodes %d have been overriden by custom configs with %d:\n", numNodes, len(customNodeConfigs))
ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second)
uris, err := cli.URIs(ctx)
cancel()
gomega.Ω(err).Should(gomega.BeNil())
gomega.Ω(uris).Should(gomega.HaveLen(len(customNodeConfigs)))
color.Outf("{{green}}expected number of nodes up:{{/}} %q\n", len(customNodeConfigs))
color.Outf("{{green}}checking correct admin APIs are enabled resp. disabled")
// we have 7 nodes, 3 have the admin API enabled, the other 4 disabled
// therefore we expect exactly 4 calls to fail and exactly 3 to succeed.
ctx, cancel = context.WithTimeout(context.Background(), 15*time.Second)
errCnt := 0
for i := 0; i < len(uris); i++ {
cli := admin.NewClient(uris[i])
_, err := cli.LockProfile(ctx)
if err != nil {
errCnt++
}
}
cancel()
gomega.Ω(errCnt).Should(gomega.Equal(4))
})
})
})
| [
"\"RUN_E2E\""
]
| []
| [
"RUN_E2E"
]
| [] | ["RUN_E2E"] | go | 1 | 0 | |
meiduo_mall/celery_tasks/main.py | from celery import Celery
# 为celery使用django配置文件进行设置
import os
if not os.getenv('DJANGO_SETTINGS_MODULE'):
os.environ['DJANGO_SETTINGS_MODULE'] = 'meiduo_mall.settings.dev'
# 创建celery应用
app = Celery('meiduo')
# 导入celery配置
app.config_from_object('celery_tasks.config')
# 自动注册celery任务。它会自动注册改路径下的tasks.py文件
app.autodiscover_tasks(['celery_tasks.sms_code', 'celery_tasks.email', 'celery_tasks.static_html'])
# 启动celery应用,-A 找到任务路径,-l显示信息详情
# celery -A celery_tasks.main worker -l info
| []
| []
| [
"DJANGO_SETTINGS_MODULE"
]
| [] | ["DJANGO_SETTINGS_MODULE"] | python | 1 | 0 | |
couchdb_test.go | package main
import (
"context"
"os"
"sync"
"testing"
"time"
"github.com/go-kivik/couchdb" // The CouchDB driver
"github.com/go-kivik/kivik" // Development version of Kivik
"github.com/google/uuid"
"github.com/stretchr/testify/assert"
)
var (
couchdbSetupLock sync.Mutex
couchdbState = "unknown"
)
func setupCouchDB(tb testing.TB) {
couchdbSetupLock.Lock()
defer couchdbSetupLock.Unlock()
if couchdbState == "ready" {
tb.Log("Skipping setup as couchdb benchmark ready")
return
}
client, err := kivik.New("couch", os.Getenv("COUCHDB_URL"))
assert.Nil(tb, err, "Failed to connect to couchdb")
assert.NotNil(tb, client, "Failed to connect to couchdb")
err = client.Authenticate(context.TODO(), couchdb.BasicAuth("admin", "password"))
assert.Nil(tb, err, "Failed to auth to couchdb")
exist, err := client.DBExists(context.TODO(), "tweets", nil)
assert.Nil(tb, err, "Failed to check db on couchdb")
if exist {
err = client.DestroyDB(context.TODO(), "tweets", nil)
assert.Nil(tb, err, "Failed to create db on couchdb")
}
err = client.CreateDB(context.TODO(), "tweets", nil)
assert.Nil(tb, err, "Failed to create db on couchdb")
tb.Log("Setting up couchdb benchmark")
//TODO setup database tweets;
time.Sleep(5 * time.Second)
couchdbState = "ready"
client.Close(context.TODO())
}
func BenchmarkCouchDB(b *testing.B) {
b.StopTimer()
if os.Getenv("COUCHDB_URL") == "" {
b.Skip("Env. variable COUCHDB_URL not set -> Skipping couchdb tests")
}
setupCouchDB(b)
client, err := kivik.New("couch", os.Getenv("COUCHDB_URL"))
assert.Nil(b, err, "Failed to connect to couchdb")
assert.NotNil(b, client, "Failed to connect to couchdb")
err = client.Authenticate(context.TODO(), couchdb.BasicAuth("admin", "password"))
assert.Nil(b, err, "Failed to auth to couchdb")
db := client.DB(context.TODO(), "tweets", nil)
defer client.Close(context.TODO())
b.StartTimer()
b.Run("AddTweet", func(b *testing.B) {
for n := 0; n < b.N; n++ {
// insert a tweet
_, err := db.Put(context.TODO(), uuid.New().String(), map[string]interface{}{
"timeline": "me",
"text": "hello world",
})
assert.Nil(b, err, "Failed to add data to cluster")
}
})
}
| [
"\"COUCHDB_URL\"",
"\"COUCHDB_URL\"",
"\"COUCHDB_URL\""
]
| []
| [
"COUCHDB_URL"
]
| [] | ["COUCHDB_URL"] | go | 1 | 0 | |
docs/source/conf.py | # -*- coding: utf-8 -*-
#
# MolVS documentation build configuration file, created by sphinx-quickstart on Thu Apr 24 14:35:38 2014.
# This file is execfile()d with the current directory set to its containing dir.
# Note that not all possible configuration values are present in this autogenerated file.
# All configuration values have a default; values that are commented out serve to show the default.
import sys
import os
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here.
# If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx
# (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.extlinks',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'MolVS'
copyright = u'2016, Matt Swain'
# The version info for the project you're documenting, acts as replacement for |version| and |release|, also used in
# various other places throughout the built documents.
# The short X.Y version.
version = '0.1.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.1'
# The language for content autogenerated by Sphinx. Refer to documentation for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and directories to ignore when looking for source
# files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes.
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
#html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme further. For a list of options available
# for each theme, see the documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the docs. This file should be a Windows icon
# file (.ico) being 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are
# copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or .htaccess) here, relative to this directory.
# These files are copied directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will contain a <link> tag referring to it. The
# value of this option must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'MolVSdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '12pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'MolVS.tex', u'MolVS Documentation', u'Matt Swain', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts, not chapters.
latex_use_parts = False
# If true, show page references after internal links.
latex_show_pagerefs = True
# If true, show URL addresses after external links.
latex_show_urls = True
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples (source start file, name, description, authors, manual section).
man_pages = [
('index', 'molvs', u'MolVS Documentation', [u'Matt Swain'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author, dir menu entry, description, category)
texinfo_documents = [
('index', 'MolVS', u'MolVS Documentation', u'Matt Swain', 'MolVS',
'One line description of project.', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('http://docs.python.org/', None)}
# Sort autodoc members by the order they appear in the source code
autodoc_member_order = 'bysource'
# Concatenate the class and __init__ docstrings together
autoclass_content = 'both'
# Define an rdkit shortcut for external links to the RDKit docs
extlinks = {'rdkit': ('http://www.rdkit.org/Python_Docs/rdkit.%s', '')}
def process_docstring(app, what, name, obj, options, lines):
"""Filter out meta fields from module docstrings when used by autodoc."""
if not what == 'module':
return
for l in reversed(lines):
if l.startswith(':copyright:') or l.startswith(':license:'):
lines.remove(l)
def setup(app):
app.connect('autodoc-process-docstring', process_docstring)
class Mock(object):
"""Mock."""
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
if args and callable(args[0]):
return args[0]
return Mock()
def __getattribute__(self, name):
return Mock()
# Mock rdkit imports so autodoc works even when rdkit isn't installed
for mod_name in ['rdkit', 'rdkit.Chem', 'rdkit.Chem.rdchem']:
sys.modules[mod_name] = Mock()
| []
| []
| [
"READTHEDOCS"
]
| [] | ["READTHEDOCS"] | python | 1 | 0 | |
setup.py | #
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import binascii
import os
import os.path
import pathlib
import platform
import shutil
import subprocess
import textwrap
import setuptools
from setuptools import extension as setuptools_extension
from setuptools.command import build_ext as setuptools_build_ext
from setuptools.command import develop as setuptools_develop
import distutils
from distutils.command import build as distutils_build
try:
import setuptools_rust
except ImportError:
setuptools_rust = None
RUNTIME_DEPS = [
'edgedb>=0.18.0a1',
'asyncpg~=0.24.0',
'httptools>=0.3.0',
'immutables>=0.16',
'uvloop~=0.16.0',
'click~=7.1',
'cryptography~=3.4',
'graphql-core~=3.1.5',
'parsing~=2.0',
'psutil~=5.8',
'setproctitle~=1.2',
'wcwidth~=0.2',
]
CYTHON_DEPENDENCY = 'Cython(>=0.29.24,<0.30.0)'
DOCS_DEPS = [
'docutils~=0.17.0',
'lxml~=4.6.3',
'Pygments~=2.10.0',
'Sphinx~=4.1.2',
'sphinxcontrib-asyncio~=0.3.0',
]
TEST_DEPS = [
# Code QA
'black~=21.7b0',
'coverage~=5.5',
'flake8~=3.9.2',
'flake8-bugbear~=21.4.3',
'pycodestyle~=2.7.0',
'pyflakes~=2.3.1',
# Needed for test_docs_sphinx_ext
'requests-xml~=0.2.3',
# For rebuilding GHA workflows
'Jinja2~=2.11',
'MarkupSafe~=1.1',
'PyYAML~=5.4',
'mypy==0.910',
# mypy stub packages; when updating, you can use mypy --install-types
# to install stub packages and then pip freeze to read out the specifier
'types-click~=7.1',
'types-docutils~=0.17.0',
'types-Jinja2~=2.11',
'types-MarkupSafe~=1.1',
'types-pkg-resources~=0.1.3',
'types-typed-ast~=1.4.2',
] + DOCS_DEPS
BUILD_DEPS = [
CYTHON_DEPENDENCY,
'packaging>=21.0',
'setuptools-rust~=0.12.1',
]
RUST_VERSION = '1.53.0' # Also update docs/internal/dev.rst
EDGEDBCLI_REPO = 'https://github.com/edgedb/edgedb-cli'
EXTRA_DEPS = {
'test': TEST_DEPS,
'docs': DOCS_DEPS,
}
EXT_CFLAGS = ['-O2']
EXT_LDFLAGS = []
ROOT_PATH = pathlib.Path(__file__).parent.resolve()
if platform.uname().system != 'Windows':
EXT_CFLAGS.extend([
'-std=c99', '-fsigned-char', '-Wall', '-Wsign-compare', '-Wconversion'
])
def _compile_parsers(build_lib, inplace=False):
import parsing
import edb.edgeql.parser.grammar.single as edgeql_spec
import edb.edgeql.parser.grammar.block as edgeql_spec2
import edb.edgeql.parser.grammar.sdldocument as schema_spec
for spec in (edgeql_spec, edgeql_spec2, schema_spec):
spec_path = pathlib.Path(spec.__file__).parent
subpath = pathlib.Path(str(spec_path)[len(str(ROOT_PATH)) + 1:])
pickle_name = spec.__name__.rpartition('.')[2] + '.pickle'
pickle_path = subpath / pickle_name
cache = build_lib / pickle_path
cache.parent.mkdir(parents=True, exist_ok=True)
parsing.Spec(spec, pickleFile=str(cache), verbose=True)
if inplace:
shutil.copy2(cache, ROOT_PATH / pickle_path)
def _compile_build_meta(build_lib, version, pg_config, runstatedir,
shared_dir, version_suffix):
from edb.common import verutils
parsed_version = verutils.parse_version(version)
vertuple = list(parsed_version._asdict().values())
vertuple[2] = int(vertuple[2])
if version_suffix:
vertuple[4] = tuple(version_suffix.split('.'))
vertuple = tuple(vertuple)
content = textwrap.dedent('''\
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
#
# THIS FILE HAS BEEN AUTOMATICALLY GENERATED.
#
PG_CONFIG_PATH = {pg_config!r}
RUNSTATE_DIR = {runstatedir!r}
SHARED_DATA_DIR = {shared_dir!r}
VERSION = {version!r}
''').format(
version=vertuple,
pg_config=pg_config,
runstatedir=runstatedir,
shared_dir=shared_dir,
)
directory = build_lib / 'edb'
if not directory.exists():
directory.mkdir(parents=True)
with open(directory / '_buildmeta.py', 'w+t') as f:
f.write(content)
def _compile_postgres(build_base, *,
force_build=False, fresh_build=True,
run_configure=True, build_contrib=True):
proc = subprocess.run(
['git', 'submodule', 'status', 'postgres'],
stdout=subprocess.PIPE, universal_newlines=True, check=True)
status = proc.stdout
if status[0] == '-':
print('postgres submodule not initialized, '
'run `git submodule init; git submodule update`')
exit(1)
source_stamp = _get_pg_source_stamp()
postgres_build = (build_base / 'postgres').resolve()
postgres_src = ROOT_PATH / 'postgres'
postgres_build_stamp = postgres_build / 'stamp'
if postgres_build_stamp.exists():
with open(postgres_build_stamp, 'r') as f:
build_stamp = f.read()
else:
build_stamp = None
is_outdated = source_stamp != build_stamp
if is_outdated or force_build:
system = platform.system()
if system == 'Darwin':
uuidlib = 'e2fs'
elif system == 'Linux':
uuidlib = 'e2fs'
else:
raise NotImplementedError('unsupported system: {}'.format(system))
if fresh_build and postgres_build.exists():
shutil.rmtree(postgres_build)
build_dir = postgres_build / 'build'
if not build_dir.exists():
build_dir.mkdir(parents=True)
if run_configure or fresh_build or is_outdated:
subprocess.run([
str(postgres_src / 'configure'),
'--prefix=' + str(postgres_build / 'install'),
'--with-uuid=' + uuidlib,
], check=True, cwd=str(build_dir))
subprocess.run(
['make', 'MAKELEVEL=0', '-j', str(max(os.cpu_count() - 1, 1))],
cwd=str(build_dir), check=True)
if build_contrib or fresh_build or is_outdated:
subprocess.run(
[
'make', '-C', 'contrib', 'MAKELEVEL=0', '-j',
str(max(os.cpu_count() - 1, 1))
],
cwd=str(build_dir), check=True)
subprocess.run(
['make', 'MAKELEVEL=0', 'install'],
cwd=str(build_dir), check=True)
if build_contrib or fresh_build or is_outdated:
subprocess.run(
['make', '-C', 'contrib', 'MAKELEVEL=0', 'install'],
cwd=str(build_dir), check=True)
with open(postgres_build_stamp, 'w') as f:
f.write(source_stamp)
def _check_rust():
import packaging.version
try:
rustc_ver = (
subprocess.check_output(["rustc", '-V'], text=True).split()[1]
.rstrip("-nightly")
)
if (
packaging.version.parse(rustc_ver)
< packaging.version.parse(RUST_VERSION)
):
raise RuntimeError(
f'please upgrade Rust to {RUST_VERSION} to compile '
f'edgedb from source')
except FileNotFoundError:
raise RuntimeError(
f'please install rustc >= {RUST_VERSION} to compile '
f'edgedb from source (see https://rustup.rs/)')
def _get_edgedbcli_rev():
output = subprocess.check_output(
['git', 'ls-remote', EDGEDBCLI_REPO, 'master'],
universal_newlines=True,
)
rev, _ = output.split()
return rev
def _get_pg_source_stamp():
output = subprocess.check_output(
['git', 'submodule', 'status', 'postgres'], universal_newlines=True,
)
revision, _, _ = output[1:].partition(' ')
# I don't know why we needed the first empty char, but we don't want to
# force everyone to rebuild postgres either
source_stamp = output[0] + revision
return source_stamp
def _compile_cli(build_base, build_temp):
_check_rust()
rust_root = build_base / 'cli'
env = dict(os.environ)
env['CARGO_TARGET_DIR'] = str(build_temp / 'rust' / 'cli')
env['PSQL_DEFAULT_PATH'] = build_base / 'postgres' / 'install' / 'bin'
git_rev = env.get("EDGEDBCLI_GIT_REV")
if not git_rev:
git_rev = _get_edgedbcli_rev()
subprocess.run(
[
'cargo', 'install',
'--verbose', '--verbose',
'--git', EDGEDBCLI_REPO,
'--rev', git_rev,
'--bin', 'edgedb',
'--root', rust_root,
'--features=dev_mode',
'--locked',
'--debug',
],
env=env,
check=True,
)
shutil.copy(
rust_root / 'bin' / 'edgedb',
ROOT_PATH / 'edb' / 'cli' / 'edgedb',
)
class build(distutils_build.build):
user_options = distutils_build.build.user_options + [
('pg-config=', None, 'path to pg_config to use with this build'),
('runstatedir=', None, 'directory to use for the runtime state'),
('shared-dir=', None, 'directory to use for shared data'),
('version-suffix=', None, 'dot-separated local version suffix'),
]
def initialize_options(self):
super().initialize_options()
self.pg_config = None
self.runstatedir = None
self.shared_dir = None
self.version_suffix = None
def finalize_options(self):
super().finalize_options()
if self.pg_config is None:
self.pg_config = os.environ.get("EDGEDB_BUILD_PG_CONFIG")
if self.runstatedir is None:
self.runstatedir = os.environ.get("EDGEDB_BUILD_RUNSTATEDIR")
if self.shared_dir is None:
self.shared_dir = os.environ.get("EDGEDB_BUILD_SHARED_DIR")
if self.version_suffix is None:
self.version_suffix = os.environ.get("EDGEDB_BUILD_VERSION_SUFFIX")
def run(self, *args, **kwargs):
super().run(*args, **kwargs)
build_lib = pathlib.Path(self.build_lib)
_compile_parsers(build_lib)
if (
self.pg_config
or self.runstatedir
or self.shared_dir
or self.version_suffix
):
_compile_build_meta(
build_lib,
self.distribution.metadata.version,
self.pg_config,
self.runstatedir,
self.shared_dir,
self.version_suffix,
)
class develop(setuptools_develop.develop):
def run(self, *args, **kwargs):
build = self.get_finalized_command('build')
build_temp = pathlib.Path(build.build_temp).resolve()
build_base = pathlib.Path(build.build_base).resolve()
_compile_cli(build_base, build_temp)
scripts = self.distribution.entry_points['console_scripts']
patched_scripts = []
for s in scripts:
if 'rustcli' not in s:
s = f'{s}_dev'
patched_scripts.append(s)
patched_scripts.append('edb = edb.tools.edb:edbcommands')
self.distribution.entry_points['console_scripts'] = patched_scripts
super().run(*args, **kwargs)
_compile_parsers(build_base / 'lib', inplace=True)
_compile_postgres(build_base)
class ci_helper(setuptools.Command):
description = "echo specified hash or build info for CI"
user_options = [
('type=', None,
'one of: cli, rust, ext, parsers, postgres, bootstrap, '
'build_temp, build_lib'),
]
def run(self):
import edb as _edb
from edb.buildmeta import hash_dirs, get_cache_src_dirs
build = self.get_finalized_command('build')
pkg_dir = pathlib.Path(_edb.__path__[0])
if self.type == 'parsers':
parser_hash = hash_dirs(
[(pkg_dir / 'edgeql/parser/grammar', '.py')],
extra_files=[pkg_dir / 'edgeql-parser/src/keywords.rs'],
)
print(binascii.hexlify(parser_hash).decode())
elif self.type == 'postgres':
print(_get_pg_source_stamp().strip())
elif self.type == 'bootstrap':
bootstrap_hash = hash_dirs(
get_cache_src_dirs(),
extra_files=[pkg_dir / 'server/bootstrap.py'],
)
print(binascii.hexlify(bootstrap_hash).decode())
elif self.type == 'rust':
rust_hash = hash_dirs([
(pkg_dir / 'edgeql-parser', '.rs'),
(pkg_dir / 'edgeql-rust', '.rs'),
(pkg_dir / 'graphql-rewrite', '.rs'),
], extra_files=[
pkg_dir / 'edgeql-parser/Cargo.toml',
pkg_dir / 'edgeql-rust/Cargo.toml',
pkg_dir / 'graphql-rewrite/Cargo.toml',
])
print(binascii.hexlify(rust_hash).decode())
elif self.type == 'ext':
ext_hash = hash_dirs([
(pkg_dir, '.pyx'),
(pkg_dir, '.pyi'),
(pkg_dir, '.pxd'),
(pkg_dir, '.pxi'),
])
print(binascii.hexlify(ext_hash).decode())
elif self.type == 'cli':
print(_get_edgedbcli_rev())
elif self.type == 'build_temp':
print(pathlib.Path(build.build_temp).resolve())
elif self.type == 'build_lib':
print(pathlib.Path(build.build_lib).resolve())
else:
raise RuntimeError(
f'Illegal --type={self.type}; can only be: '
'cli, rust, ext, postgres, bootstrap, parsers,'
'build_temp or build_lib'
)
def initialize_options(self):
self.type = None
def finalize_options(self):
pass
class build_postgres(setuptools.Command):
description = "build postgres"
user_options = [
('configure', None, 'run ./configure'),
('build-contrib', None, 'build contrib'),
('fresh-build', None, 'rebuild from scratch'),
]
def initialize_options(self):
self.configure = False
self.build_contrib = False
self.fresh_build = False
def finalize_options(self):
pass
def run(self, *args, **kwargs):
build = self.get_finalized_command('build')
_compile_postgres(
pathlib.Path(build.build_base).resolve(),
force_build=True,
fresh_build=self.fresh_build,
run_configure=self.configure,
build_contrib=self.build_contrib)
class build_ext(setuptools_build_ext.build_ext):
user_options = setuptools_build_ext.build_ext.user_options + [
('cython-annotate', None,
'Produce a colorized HTML version of the Cython source.'),
('cython-directives=', None,
'Cython compiler directives'),
]
def initialize_options(self):
# initialize_options() may be called multiple times on the
# same command object, so make sure not to override previously
# set options.
if getattr(self, '_initialized', False):
return
super(build_ext, self).initialize_options()
if os.environ.get('EDGEDB_DEBUG'):
self.cython_always = True
self.cython_annotate = True
self.cython_directives = "linetrace=True"
self.define = 'PG_DEBUG,CYTHON_TRACE,CYTHON_TRACE_NOGIL'
self.debug = True
else:
self.cython_always = False
self.cython_annotate = None
self.cython_directives = None
self.debug = False
self.build_mode = os.environ.get('BUILD_EXT_MODE', 'both')
def finalize_options(self):
# finalize_options() may be called multiple times on the
# same command object, so make sure not to override previously
# set options.
if getattr(self, '_initialized', False):
return
if self.build_mode not in {'both', 'py-only', 'skip'}:
raise RuntimeError(f'Illegal BUILD_EXT_MODE={self.build_mode}; '
f'can only be "both", "py-only" or "skip".')
if self.build_mode == 'skip':
super(build_ext, self).finalize_options()
return
import pkg_resources
# Double check Cython presence in case setup_requires
# didn't go into effect (most likely because someone
# imported Cython before setup_requires injected the
# correct egg into sys.path.
try:
import Cython
except ImportError:
raise RuntimeError(
'please install {} to compile edgedb from source'.format(
CYTHON_DEPENDENCY))
cython_dep = pkg_resources.Requirement.parse(CYTHON_DEPENDENCY)
if Cython.__version__ not in cython_dep:
raise RuntimeError(
'edgedb requires {}, got Cython=={}'.format(
CYTHON_DEPENDENCY, Cython.__version__
))
from Cython.Build import cythonize
directives = {
'language_level': '3'
}
if self.cython_directives:
for directive in self.cython_directives.split(','):
k, _, v = directive.partition('=')
if v.lower() == 'false':
v = False
if v.lower() == 'true':
v = True
directives[k] = v
self.distribution.ext_modules[:] = cythonize(
self.distribution.ext_modules,
compiler_directives=directives,
annotate=self.cython_annotate,
include_path=["edb/server/pgproto/"])
super(build_ext, self).finalize_options()
def run(self):
if self.build_mode == 'both' and self.distribution.rust_extensions:
distutils.log.info("running build_rust")
_check_rust()
build_rust = self.get_finalized_command("build_rust")
build_rust.inplace = self.inplace
build_rust.plat_name = self.plat_name
build_rust.debug = self.debug
build_rust.run()
if self.build_mode != 'skip':
super().run()
class build_cli(setuptools.Command):
description = "build the EdgeDB CLI"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self, *args, **kwargs):
build = self.get_finalized_command('build')
_compile_cli(
pathlib.Path(build.build_base).resolve(),
pathlib.Path(build.build_temp).resolve(),
)
class build_parsers(setuptools.Command):
description = "build the parsers"
user_options = [
('inplace', None,
'ignore build-lib and put compiled parsers into the source directory '
'alongside your pure Python modules')]
def initialize_options(self):
self.inplace = None
def finalize_options(self):
pass
def run(self, *args, **kwargs):
build = self.get_finalized_command('build')
if self.inplace:
build_base = pathlib.Path(build.build_base).resolve()
_compile_parsers(build_base / 'lib', inplace=True)
else:
build_lib = pathlib.Path(build.build_lib)
_compile_parsers(build_lib)
COMMAND_CLASSES = {
'build': build,
'build_ext': build_ext,
'develop': develop,
'build_postgres': build_postgres,
'build_cli': build_cli,
'build_parsers': build_parsers,
'ci_helper': ci_helper,
}
if setuptools_rust is not None:
rust_extensions = [
setuptools_rust.RustExtension(
"edb._edgeql_rust",
path="edb/edgeql-rust/Cargo.toml",
binding=setuptools_rust.Binding.RustCPython,
),
setuptools_rust.RustExtension(
"edb._graphql_rewrite",
path="edb/graphql-rewrite/Cargo.toml",
binding=setuptools_rust.Binding.RustCPython,
),
]
class build_rust(setuptools_rust.build.build_rust):
def run(self):
_check_rust()
build_ext = self.get_finalized_command("build_ext")
copy_list = []
if not build_ext.inplace:
for ext in self.distribution.rust_extensions:
# Always build in-place because later stages of the build
# may depend on the modules having been built
dylib_path = pathlib.Path(
build_ext.get_ext_fullpath(ext.name))
build_ext.inplace = True
target_path = pathlib.Path(
build_ext.get_ext_fullpath(ext.name))
build_ext.inplace = False
copy_list.append((dylib_path, target_path))
# Workaround a bug in setuptools-rust: it uses
# shutil.copyfile(), which is not safe w.r.t mmap,
# so if the target module has been previously loaded
# bad things will happen.
if target_path.exists():
target_path.unlink()
target_path.parent.mkdir(parents=True, exist_ok=True)
os.environ['CARGO_TARGET_DIR'] = str(
pathlib.Path(build_ext.build_temp) / 'rust' / 'extensions',
)
super().run()
for src, dst in copy_list:
shutil.copyfile(src, dst)
COMMAND_CLASSES['build_rust'] = build_rust
else:
rust_extensions = []
def _version():
from edb import buildmeta
return buildmeta.get_version_from_scm(ROOT_PATH)
setuptools.setup(
version=_version(),
setup_requires=RUNTIME_DEPS + BUILD_DEPS,
python_requires='>=3.9.0',
name='edgedb-server',
description='EdgeDB Server',
author='MagicStack Inc.',
author_email='[email protected]',
packages=['edb'],
include_package_data=True,
cmdclass=COMMAND_CLASSES,
entry_points={
'console_scripts': [
'edgedb-server = edb.server.main:main',
'edgedb = edb.cli:rustcli',
],
},
ext_modules=[
setuptools_extension.Extension(
"edb.server.cache.stmt_cache",
["edb/server/cache/stmt_cache.pyx"],
extra_compile_args=EXT_CFLAGS,
extra_link_args=EXT_LDFLAGS),
setuptools_extension.Extension(
"edb.protocol.protocol",
["edb/protocol/protocol.pyx"],
extra_compile_args=EXT_CFLAGS,
extra_link_args=EXT_LDFLAGS),
setuptools_extension.Extension(
"edb.server.pgproto.pgproto",
["edb/server/pgproto/pgproto.pyx"],
extra_compile_args=EXT_CFLAGS,
extra_link_args=EXT_LDFLAGS),
setuptools_extension.Extension(
"edb.server.dbview.dbview",
["edb/server/dbview/dbview.pyx"],
extra_compile_args=EXT_CFLAGS,
extra_link_args=EXT_LDFLAGS),
setuptools_extension.Extension(
"edb.server.protocol.binary",
["edb/server/protocol/binary.pyx"],
extra_compile_args=EXT_CFLAGS,
extra_link_args=EXT_LDFLAGS),
setuptools_extension.Extension(
"edb.server.protocol.notebook_ext",
["edb/server/protocol/notebook_ext.pyx"],
extra_compile_args=EXT_CFLAGS,
extra_link_args=EXT_LDFLAGS),
setuptools_extension.Extension(
"edb.server.protocol.edgeql_ext",
["edb/server/protocol/edgeql_ext.pyx"],
extra_compile_args=EXT_CFLAGS,
extra_link_args=EXT_LDFLAGS),
setuptools_extension.Extension(
"edb.server.protocol.protocol",
["edb/server/protocol/protocol.pyx"],
extra_compile_args=EXT_CFLAGS,
extra_link_args=EXT_LDFLAGS),
setuptools_extension.Extension(
"edb.server.pgcon.pgcon",
["edb/server/pgcon/pgcon.pyx"],
extra_compile_args=EXT_CFLAGS,
extra_link_args=EXT_LDFLAGS),
setuptools_extension.Extension(
"edb.graphql.extension",
["edb/graphql/extension.pyx"],
extra_compile_args=EXT_CFLAGS,
extra_link_args=EXT_LDFLAGS),
],
rust_extensions=rust_extensions,
install_requires=RUNTIME_DEPS,
extras_require=EXTRA_DEPS,
)
| []
| []
| [
"EDGEDB_DEBUG",
"EDGEDB_BUILD_PG_CONFIG",
"BUILD_EXT_MODE",
"EDGEDB_BUILD_SHARED_DIR",
"CARGO_TARGET_DIR",
"EDGEDB_BUILD_VERSION_SUFFIX",
"EDGEDB_BUILD_RUNSTATEDIR"
]
| [] | ["EDGEDB_DEBUG", "EDGEDB_BUILD_PG_CONFIG", "BUILD_EXT_MODE", "EDGEDB_BUILD_SHARED_DIR", "CARGO_TARGET_DIR", "EDGEDB_BUILD_VERSION_SUFFIX", "EDGEDB_BUILD_RUNSTATEDIR"] | python | 7 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jarvis.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
integration-tests/integration_test.go | package integration_tests
import (
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"testing"
"text/template"
"time"
"github.com/goguardian/goguardian-go-kcl/runner"
)
const defaultTimeout = 30 * time.Second
type propertiesVar struct {
StreamName string
AppName string
}
func TestRecordsReceived(t *testing.T) {
now := time.Now()
testStreamName := fmt.Sprintf("stream_%d", now.Unix())
testAppName := fmt.Sprintf("app_%d", now.Unix())
fmt.Println("Getting local kinesis client")
tClient, err := GetLocalKinesisClient()
if err != nil {
t.Fatal(err)
}
fmt.Println("deleting kinesis stream if present")
err = tClient.DeleteStream(testStreamName, defaultTimeout)
if err != nil {
t.Fatal(err)
}
fmt.Println("creating new kinesis stream")
err = tClient.CreateStream(testStreamName, 4, defaultTimeout)
if err != nil {
t.Fatal(err)
}
fmt.Println("putting records in kinesis stream")
err = tClient.PutRecords(testStreamName, []string{"alice", "bob", "charlie"})
if err != nil {
t.Fatal(err)
}
// Create properties file for this test consumer
tmpl, err := template.ParseFiles("test-app/test_app_properties.tmpl")
if err != nil {
t.Fatal("failed to parse properties template file")
}
propertiesFile, err := ioutil.TempFile("", "test_app_properties")
if err != nil {
t.Fatal("failed to create properties file")
}
err = tmpl.Execute(propertiesFile, propertiesVar{
AppName: testAppName,
StreamName: testStreamName,
})
if err != nil {
t.Fatal("failed to populate properties file")
}
propertiesFile.Close()
javaHome := os.Getenv("JAVA_HOME")
if javaHome == "" {
t.Fatal("JAVA_HOME environment variable not specified")
}
r, err := runner.GetRunner(
runner.WithPathToJarFolder("../jar"),
runner.WithPathToPropertiesFile(propertiesFile.Name()),
runner.WithPathToJavaBinary(javaHome+"/bin/java"),
runner.WithLogger(log.New(os.Stdout, "CUSTOM PREFIX:", 0)),
)
if err != nil {
t.Fatal("failed to get runner")
}
testPassed := make(chan bool)
receiver := GetMessageReceiver()
go func() {
receivedRecords := map[string]bool{}
for {
req := <-receiver.processRecordsChan
for _, record := range req.Records {
receivedRecords[string(record.Data)] = true
}
if receivedRecords["alice"] && receivedRecords["bob"] && receivedRecords["charlie"] {
fmt.Println("found all the records")
testPassed <- true
}
}
}()
var cmd *exec.Cmd
go func() {
var err error
cmd, err = r.RunJavaDaemon("-Daws.accessKeyId=some_key", "-Daws.secretKey=some_secret_key")
if err != nil {
t.Fatal(err)
}
}()
<-testPassed
cmd.Process.Kill()
}
| [
"\"JAVA_HOME\""
]
| []
| [
"JAVA_HOME"
]
| [] | ["JAVA_HOME"] | go | 1 | 0 | |
cmd/do/config.go | // Copyright (C) 2017 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// The do command wraps CMake, simplifying the building GAPID in common
// configurations.
package main
import (
"bufio"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"os/user"
"reflect"
"regexp"
"runtime"
"strings"
"github.com/google/gapid/core/os/file"
)
type enum interface {
Options() []string
String() string
Set(string) bool
}
type Flavor string
func (Flavor) Options() []string { return []string{"release", "debug"} }
func (f Flavor) String() string { return string(f) }
func (f *Flavor) Set(v string) bool {
for _, o := range f.Options() {
if o == v {
*f = Flavor(o)
return true
}
}
return false
}
// Config is the structure that holds all the configuration settings.
type Config struct {
Flavor Flavor `desc:"Build flavor"`
OutRoot file.Path `desc:"Build output directory"`
JavaHome file.Path `desc:"Path to JDK root" type:"dir"`
AndroidSDKRoot file.Path `desc:"Path to Android SDK" type:"dir"`
AndroidNDKRoot file.Path `desc:"Path to Android NDK r15" type:"dir"`
CMakePath file.Path `desc:"Path to CMake executable" type:"file"`
NinjaPath file.Path `desc:"Path to ninja executable" type:"file"`
PythonPath file.Path `desc:"Path to python executable" type:"file"`
MSYS2Path file.Path `desc:"Path to msys2 root" type:"dir" os:"windows"`
ArmLinuxGapii bool `desc:"Build additional gapii for armlinux"`
}
func defaults() Config {
u, _ := user.Current()
cfg := Config{}
cfg.Flavor = "release"
cfg.OutRoot = file.Abs(u.HomeDir).Join("gapid")
cfg.JavaHome = file.Abs(os.Getenv("JAVA_HOME"))
cfg.AndroidSDKRoot = file.Abs(os.Getenv("ANDROID_HOME"))
cfg.AndroidNDKRoot = file.Abs(os.Getenv("ANDROID_NDK_ROOT"))
cfg.CMakePath, _ = file.FindExecutable("cmake")
cfg.NinjaPath, _ = file.FindExecutable("ninja")
cfg.PythonPath, _ = file.FindExecutable("python")
return cfg
}
func (cfg Config) out() file.Path { return cfg.OutRoot.Join(cfg.Flavor.String()) }
func (cfg Config) bin() file.Path { return cfg.out().Join("bin") }
func (cfg Config) pkg() file.Path { return cfg.out().Join("pkg") }
func (cfg Config) versionFile() file.Path { return cfg.out().Join("do-version.txt") }
func (cfg Config) cacheFile() file.Path { return cfg.out().Join("CMakeCache.txt") }
func (cfg Config) loadBuildVersion() (int, int) {
data, err := ioutil.ReadFile(cfg.versionFile().System())
if err != nil {
return 0, 0
}
var major, minor int
fmt.Sscanf(string(data), "%d.%d", &major, &minor)
return major, minor
}
func (cfg Config) storeBuildVersion() {
str := fmt.Sprintf("%d.%d", versionMajor, versionMinor)
ioutil.WriteFile(cfg.versionFile().System(), []byte(str), 0666)
}
func readConfig() (Config, bool) {
def := defaults()
data, err := ioutil.ReadFile(cfgPath)
if err != nil {
return def, false
}
cfg := def
if err := json.Unmarshal(data, &cfg); err != nil {
return def, false
}
return cfg, true
}
func writeConfig(cfg Config) {
data, err := json.MarshalIndent(cfg, "", " ")
if err != nil {
panic(err)
}
if err := ioutil.WriteFile(cfgPath, data, 0666); err != nil {
panic(err)
}
}
func fetchValidConfig(ctx context.Context, options ConfigOptions) Config {
cfg, found := readConfig()
if options.Reset {
cfg = defaults()
}
askForAll := !found || options.Interactive
v := reflect.ValueOf(&cfg).Elem()
t := v.Type()
for i, c := 0, t.NumField(); i < c; i++ {
f, t := v.Field(i), t.Field(i)
if os := t.Tag.Get("os"); os != "" && os != runtime.GOOS {
continue
}
v := f.Addr().Interface()
if !askForAll {
err := vaildateField(v, t)
if err != nil {
fmt.Println(err)
} else {
continue
}
}
retrys := 0
for {
if retrys == 10 {
fmt.Println("Aborting after 10 failed attempts")
os.Exit(1)
}
retrys++
if err := inputField(v, t); err != nil {
fmt.Println(err)
continue
}
if err := vaildateField(v, t); err != nil {
fmt.Println(err)
continue
}
break
}
}
writeConfig(cfg)
return cfg
}
func inputField(v interface{}, t reflect.StructField) error {
desc := t.Tag.Get("desc")
if desc == "" {
desc = t.Name
}
switch v := v.(type) {
case enum:
options := v.Options()
fmt.Printf(" • %s. One of: %v [Default: %v]\n", desc, strings.Join(options, ", "), v)
if in := readLine(); in != "" {
if !v.Set(in) {
return fmt.Errorf("Must be one of: %v", strings.Join(options, ", "))
}
}
case *string:
fmt.Printf(" • %s [Default: %q]\n", desc, *v)
if in := readLine(); in != "" {
*v = in
}
case *bool:
fmt.Printf(" • %s [Default: %v]\n", desc, *v)
if in := readLine(); in != "" {
val, ok := parseBool(in)
if !ok {
return fmt.Errorf("Must be yes/true or no/false")
}
*v = val
}
case *file.Path:
fmt.Printf(" • %s [Default: %v]\n", desc, v.System())
if in := readLine(); in != "" {
*v = file.Abs(in)
}
default:
panic(fmt.Sprintf("Unknown type %T in config struct", v))
}
return nil
}
type validator struct{}
// ValidateAndroidNDKRoot checks the AndroidNDKRoot field.
func (validator) ValidateAndroidNDKRoot(path file.Path) error {
text, err := ioutil.ReadFile(path.Join("source.properties").System())
if err == nil {
re := regexp.MustCompile(`Pkg\.Revision = ([0-9]+).([0-9]+).([0-9]+)`)
for _, line := range strings.Split(string(text), "\n") {
groups := re.FindStringSubmatch(line)
if len(groups) < 4 {
continue
}
major, minor := groups[1], groups[2]
if major != "15" && major != "16" {
return fmt.Errorf("Found NDK %v.%v. Must be r15 or r16", major, minor)
}
return nil
}
}
return fmt.Errorf("Couldn't determine version of the NDK. Must be r15")
}
func vaildateField(v interface{}, t reflect.StructField) error {
m, ok := reflect.TypeOf(validator{}).MethodByName("Validate" + t.Name)
if ok {
err := m.Func.Call([]reflect.Value{
reflect.ValueOf(validator{}),
reflect.ValueOf(v).Elem()},
)[0].Interface()
if err != nil {
return err.(error)
}
}
switch v := v.(type) {
case *file.Path:
switch t.Tag.Get("type") {
case "file":
if !v.Exists() {
return fmt.Errorf("Path does not exist")
}
if v.IsDir() {
return fmt.Errorf("The provided path is a directory, please provide the path to the executable")
}
case "dir":
if !v.Exists() {
return fmt.Errorf("Path does not exist")
}
if !v.IsDir() {
return fmt.Errorf("The provided path is not a directory")
}
}
}
return nil
}
func readLine() string {
r := bufio.NewReader(os.Stdin)
l, _ := r.ReadString('\n')
return strings.Trim(l, "\n\r")
}
func parseBool(str string) (val, ok bool) {
switch strings.ToLower(str) {
case "yes", "y", "true":
return true, true
case "no", "n", "false":
return false, true
}
return false, false
}
| [
"\"JAVA_HOME\"",
"\"ANDROID_HOME\"",
"\"ANDROID_NDK_ROOT\""
]
| []
| [
"JAVA_HOME",
"ANDROID_HOME",
"ANDROID_NDK_ROOT"
]
| [] | ["JAVA_HOME", "ANDROID_HOME", "ANDROID_NDK_ROOT"] | go | 3 | 0 | |
ipb/wsgi.py | """
WSGI config for ipb project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ipb.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
pkg/cli/common.go | package cli
// the cli package contains urfave/cli related structs that help make up
// the command line for buildah commands. it resides here so other projects
// that vendor in this code can use them too.
import (
"fmt"
"os"
"runtime"
"strings"
"github.com/containers/buildah/define"
"github.com/containers/buildah/pkg/completion"
"github.com/containers/buildah/pkg/parse"
commonComp "github.com/containers/common/pkg/completion"
"github.com/containers/common/pkg/config"
"github.com/containers/storage/pkg/unshare"
"github.com/opencontainers/runtime-spec/specs-go"
"github.com/pkg/errors"
"github.com/spf13/pflag"
)
// LayerResults represents the results of the layer flags
type LayerResults struct {
ForceRm bool
Layers bool
}
// UserNSResults represents the results for the UserNS flags
type UserNSResults struct {
UserNS string
UserNSUIDMap []string
UserNSGIDMap []string
UserNSUIDMapUser string
UserNSGIDMapGroup string
}
// NameSpaceResults represents the results for Namespace flags
type NameSpaceResults struct {
Cgroup string
IPC string
Network string
CNIConfigDir string
CNIPlugInPath string
PID string
UTS string
}
// BudResults represents the results for Build flags
type BudResults struct {
AllPlatforms bool
Annotation []string
Authfile string
BuildArg []string
BuildContext []string
CacheFrom string
CertDir string
Compress bool
Creds string
CPPFlags []string
DisableCompression bool
DisableContentTrust bool
IgnoreFile string
File []string
Format string
From string
Iidfile string
Label []string
Logfile string
Manifest string
NoHosts bool
NoCache bool
Timestamp int64
OmitHistory bool
Pull string
PullAlways bool
PullNever bool
Quiet bool
IdentityLabel bool
Rm bool
Runtime string
RuntimeFlags []string
Secrets []string
SSH []string
SignaturePolicy string
SignBy string
Squash bool
Stdin bool
Tag []string
BuildOutput string
Target string
TLSVerify bool
Jobs int
LogRusage bool
RusageLogFile string
UnsetEnvs []string
Envs []string
OSFeatures []string
OSVersion string
}
// FromAndBugResults represents the results for common flags
// in build and from
type FromAndBudResults struct {
AddHost []string
BlobCache string
CapAdd []string
CapDrop []string
CgroupParent string
CPUPeriod uint64
CPUQuota int64
CPUSetCPUs string
CPUSetMems string
CPUShares uint64
DecryptionKeys []string
Devices []string
DNSSearch []string
DNSServers []string
DNSOptions []string
HTTPProxy bool
Isolation string
Memory string
MemorySwap string
SecurityOpt []string
ShmSize string
Ulimit []string
Volumes []string
}
// GetUserNSFlags returns the common flags for usernamespace
func GetUserNSFlags(flags *UserNSResults) pflag.FlagSet {
usernsFlags := pflag.FlagSet{}
usernsFlags.StringVar(&flags.UserNS, "userns", "", "'container', `path` of user namespace to join, or 'host'")
usernsFlags.StringSliceVar(&flags.UserNSUIDMap, "userns-uid-map", []string{}, "`containerUID:hostUID:length` UID mapping to use in user namespace")
usernsFlags.StringSliceVar(&flags.UserNSGIDMap, "userns-gid-map", []string{}, "`containerGID:hostGID:length` GID mapping to use in user namespace")
usernsFlags.StringVar(&flags.UserNSUIDMapUser, "userns-uid-map-user", "", "`name` of entries from /etc/subuid to use to set user namespace UID mapping")
usernsFlags.StringVar(&flags.UserNSGIDMapGroup, "userns-gid-map-group", "", "`name` of entries from /etc/subgid to use to set user namespace GID mapping")
return usernsFlags
}
// GetUserNSFlagsCompletions returns the FlagCompletions for the userns flags
func GetUserNSFlagsCompletions() commonComp.FlagCompletions {
flagCompletion := commonComp.FlagCompletions{}
flagCompletion["userns"] = completion.AutocompleteNamespaceFlag
flagCompletion["userns-uid-map"] = commonComp.AutocompleteNone
flagCompletion["userns-gid-map"] = commonComp.AutocompleteNone
flagCompletion["userns-uid-map-user"] = commonComp.AutocompleteSubuidName
flagCompletion["userns-gid-map-group"] = commonComp.AutocompleteSubgidName
return flagCompletion
}
// GetNameSpaceFlags returns the common flags for a namespace menu
func GetNameSpaceFlags(flags *NameSpaceResults) pflag.FlagSet {
fs := pflag.FlagSet{}
fs.StringVar(&flags.Cgroup, "cgroupns", "", "'private', or 'host'")
fs.StringVar(&flags.IPC, string(specs.IPCNamespace), "", "'private', `path` of IPC namespace to join, or 'host'")
fs.StringVar(&flags.Network, string(specs.NetworkNamespace), "", "'private', 'none', 'ns:path' of network namespace to join, or 'host'")
fs.StringVar(&flags.CNIConfigDir, "cni-config-dir", "", "`directory` of CNI configuration files")
_ = fs.MarkHidden("cni-config-dir")
fs.StringVar(&flags.CNIPlugInPath, "cni-plugin-path", "", "`path` of CNI network plugins")
_ = fs.MarkHidden("cni-plugin-path")
fs.StringVar(&flags.PID, string(specs.PIDNamespace), "", "private, `path` of PID namespace to join, or 'host'")
fs.StringVar(&flags.UTS, string(specs.UTSNamespace), "", "private, :`path` of UTS namespace to join, or 'host'")
return fs
}
// GetNameSpaceFlagsCompletions returns the FlagCompletions for the namespace flags
func GetNameSpaceFlagsCompletions() commonComp.FlagCompletions {
flagCompletion := commonComp.FlagCompletions{}
flagCompletion["cgroupns"] = completion.AutocompleteNamespaceFlag
flagCompletion[string(specs.IPCNamespace)] = completion.AutocompleteNamespaceFlag
flagCompletion[string(specs.NetworkNamespace)] = completion.AutocompleteNamespaceFlag
flagCompletion[string(specs.PIDNamespace)] = completion.AutocompleteNamespaceFlag
flagCompletion[string(specs.UTSNamespace)] = completion.AutocompleteNamespaceFlag
return flagCompletion
}
// GetLayerFlags returns the common flags for layers
func GetLayerFlags(flags *LayerResults) pflag.FlagSet {
fs := pflag.FlagSet{}
fs.BoolVar(&flags.ForceRm, "force-rm", false, "always remove intermediate containers after a build, even if the build is unsuccessful.")
fs.BoolVar(&flags.Layers, "layers", UseLayers(), "cache intermediate layers during build. Use BUILDAH_LAYERS environment variable to override.")
return fs
}
// Note: GetLayerFlagsCompletion is not needed since GetLayerFlags only contains bool flags
// GetBudFlags returns common build flags
func GetBudFlags(flags *BudResults) pflag.FlagSet {
fs := pflag.FlagSet{}
fs.BoolVar(&flags.AllPlatforms, "all-platforms", false, "attempt to build for all base image platforms")
fs.String("arch", runtime.GOARCH, "set the ARCH of the image to the provided value instead of the architecture of the host")
fs.StringArrayVar(&flags.Annotation, "annotation", []string{}, "set metadata for an image (default [])")
fs.StringVar(&flags.Authfile, "authfile", "", "path of the authentication file.")
fs.StringArrayVar(&flags.BuildArg, "build-arg", []string{}, "`argument=value` to supply to the builder")
fs.StringArrayVar(&flags.BuildContext, "build-context", []string{}, "`argument=value` to supply additional build context to the builder")
fs.StringVar(&flags.CacheFrom, "cache-from", "", "images to utilise as potential cache sources. The build process does not currently support caching so this is a NOOP.")
fs.StringVar(&flags.CertDir, "cert-dir", "", "use certificates at the specified path to access the registry")
fs.BoolVar(&flags.Compress, "compress", false, "this is a legacy option, which has no effect on the image")
fs.StringArrayVar(&flags.CPPFlags, "cpp-flag", []string{}, "set additional flag to pass to C preprocessor (cpp)")
fs.StringVar(&flags.Creds, "creds", "", "use `[username[:password]]` for accessing the registry")
fs.BoolVarP(&flags.DisableCompression, "disable-compression", "D", true, "don't compress layers by default")
fs.BoolVar(&flags.DisableContentTrust, "disable-content-trust", false, "this is a Docker specific option and is a NOOP")
fs.StringArrayVar(&flags.Envs, "env", []string{}, "set environment variable for the image")
fs.StringVar(&flags.From, "from", "", "image name used to replace the value in the first FROM instruction in the Containerfile")
fs.StringVar(&flags.IgnoreFile, "ignorefile", "", "path to an alternate .dockerignore file")
fs.StringSliceVarP(&flags.File, "file", "f", []string{}, "`pathname or URL` of a Dockerfile")
fs.StringVar(&flags.Format, "format", DefaultFormat(), "`format` of the built image's manifest and metadata. Use BUILDAH_FORMAT environment variable to override.")
fs.StringVar(&flags.Iidfile, "iidfile", "", "`file` to write the image ID to")
fs.IntVar(&flags.Jobs, "jobs", 1, "how many stages to run in parallel")
fs.StringArrayVar(&flags.Label, "label", []string{}, "set metadata for an image (default [])")
fs.StringVar(&flags.Logfile, "logfile", "", "log to `file` instead of stdout/stderr")
fs.Int("loglevel", 0, "NO LONGER USED, flag ignored, and hidden")
if err := fs.MarkHidden("loglevel"); err != nil {
panic(fmt.Sprintf("error marking the loglevel flag as hidden: %v", err))
}
fs.BoolVar(&flags.LogRusage, "log-rusage", false, "log resource usage at each build step")
if err := fs.MarkHidden("log-rusage"); err != nil {
panic(fmt.Sprintf("error marking the log-rusage flag as hidden: %v", err))
}
fs.StringVar(&flags.RusageLogFile, "rusage-logfile", "", "destination file to which rusage should be logged to instead of stdout (= the default).")
if err := fs.MarkHidden("rusage-logfile"); err != nil {
panic(fmt.Sprintf("error marking the rusage-logfile flag as hidden: %v", err))
}
fs.StringVar(&flags.Manifest, "manifest", "", "add the image to the specified manifest list. Creates manifest list if it does not exist")
fs.BoolVar(&flags.NoHosts, "no-hosts", false, "do not create new /etc/hosts files for RUN instructions, use the one from the base image.")
fs.BoolVar(&flags.NoCache, "no-cache", false, "do not use existing cached images for the container build. Build from the start with a new set of cached layers.")
fs.String("os", runtime.GOOS, "set the OS to the provided value instead of the current operating system of the host")
fs.StringArrayVar(&flags.OSFeatures, "os-feature", []string{}, "set required OS `feature` for the target image in addition to values from the base image")
fs.StringVar(&flags.OSVersion, "os-version", "", "set required OS `version` for the target image instead of the value from the base image")
fs.StringVar(&flags.Pull, "pull", "true", "pull the image from the registry if newer or not present in store, if false, only pull the image if not present, if always, pull the image even if the named image is present in store, if never, only use the image present in store if available")
fs.Lookup("pull").NoOptDefVal = "true" //allow `--pull ` to be set to `true` as expected.
fs.BoolVar(&flags.PullAlways, "pull-always", false, "pull the image even if the named image is present in store")
if err := fs.MarkHidden("pull-always"); err != nil {
panic(fmt.Sprintf("error marking the pull-always flag as hidden: %v", err))
}
fs.BoolVar(&flags.PullNever, "pull-never", false, "do not pull the image, use the image present in store if available")
if err := fs.MarkHidden("pull-never"); err != nil {
panic(fmt.Sprintf("error marking the pull-never flag as hidden: %v", err))
}
fs.BoolVarP(&flags.Quiet, "quiet", "q", false, "refrain from announcing build instructions and image read/write progress")
fs.BoolVar(&flags.OmitHistory, "omit-history", false, "omit build history information from built image")
fs.BoolVar(&flags.IdentityLabel, "identity-label", true, "add default identity label")
fs.BoolVar(&flags.Rm, "rm", true, "remove intermediate containers after a successful build")
// "runtime" definition moved to avoid name collision in podman build. Defined in cmd/buildah/build.go.
fs.StringSliceVar(&flags.RuntimeFlags, "runtime-flag", []string{}, "add global flags for the container runtime")
fs.StringArrayVar(&flags.Secrets, "secret", []string{}, "secret file to expose to the build")
fs.StringVar(&flags.SignBy, "sign-by", "", "sign the image using a GPG key with the specified `FINGERPRINT`")
fs.StringVar(&flags.SignaturePolicy, "signature-policy", "", "`pathname` of signature policy file (not usually used)")
if err := fs.MarkHidden("signature-policy"); err != nil {
panic(fmt.Sprintf("error marking the signature-policy flag as hidden: %v", err))
}
fs.BoolVar(&flags.Squash, "squash", false, "squash newly built layers into a single new layer")
fs.StringArrayVar(&flags.SSH, "ssh", []string{}, "SSH agent socket or keys to expose to the build. (format: default|<id>[=<socket>|<key>[,<key>]])")
fs.BoolVar(&flags.Stdin, "stdin", false, "pass stdin into containers")
fs.StringArrayVarP(&flags.Tag, "tag", "t", []string{}, "tagged `name` to apply to the built image")
fs.StringVarP(&flags.BuildOutput, "output", "o", "", "output destination (format: type=local,dest=path)")
fs.StringVar(&flags.Target, "target", "", "set the target build stage to build")
fs.Int64Var(&flags.Timestamp, "timestamp", 0, "set created timestamp to the specified epoch seconds to allow for deterministic builds, defaults to current time")
fs.BoolVar(&flags.TLSVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry")
fs.String("variant", "", "override the `variant` of the specified image")
fs.StringSliceVar(&flags.UnsetEnvs, "unsetenv", nil, "unset environment variable from final image")
return fs
}
// GetBudFlagsCompletions returns the FlagCompletions for the common build flags
func GetBudFlagsCompletions() commonComp.FlagCompletions {
flagCompletion := commonComp.FlagCompletions{}
flagCompletion["annotation"] = commonComp.AutocompleteNone
flagCompletion["arch"] = commonComp.AutocompleteNone
flagCompletion["authfile"] = commonComp.AutocompleteDefault
flagCompletion["build-arg"] = commonComp.AutocompleteNone
flagCompletion["build-context"] = commonComp.AutocompleteNone
flagCompletion["cache-from"] = commonComp.AutocompleteNone
flagCompletion["cert-dir"] = commonComp.AutocompleteDefault
flagCompletion["cpp-flag"] = commonComp.AutocompleteNone
flagCompletion["creds"] = commonComp.AutocompleteNone
flagCompletion["env"] = commonComp.AutocompleteNone
flagCompletion["file"] = commonComp.AutocompleteDefault
flagCompletion["format"] = commonComp.AutocompleteNone
flagCompletion["from"] = commonComp.AutocompleteDefault
flagCompletion["ignorefile"] = commonComp.AutocompleteDefault
flagCompletion["iidfile"] = commonComp.AutocompleteDefault
flagCompletion["jobs"] = commonComp.AutocompleteNone
flagCompletion["label"] = commonComp.AutocompleteNone
flagCompletion["logfile"] = commonComp.AutocompleteDefault
flagCompletion["manifest"] = commonComp.AutocompleteDefault
flagCompletion["os"] = commonComp.AutocompleteNone
flagCompletion["os-feature"] = commonComp.AutocompleteNone
flagCompletion["os-version"] = commonComp.AutocompleteNone
flagCompletion["output"] = commonComp.AutocompleteNone
flagCompletion["pull"] = commonComp.AutocompleteDefault
flagCompletion["runtime-flag"] = commonComp.AutocompleteNone
flagCompletion["secret"] = commonComp.AutocompleteNone
flagCompletion["sign-by"] = commonComp.AutocompleteNone
flagCompletion["signature-policy"] = commonComp.AutocompleteNone
flagCompletion["ssh"] = commonComp.AutocompleteNone
flagCompletion["tag"] = commonComp.AutocompleteNone
flagCompletion["target"] = commonComp.AutocompleteNone
flagCompletion["timestamp"] = commonComp.AutocompleteNone
flagCompletion["unsetenv"] = commonComp.AutocompleteNone
flagCompletion["variant"] = commonComp.AutocompleteNone
return flagCompletion
}
// GetFromAndBudFlags returns from and build flags
func GetFromAndBudFlags(flags *FromAndBudResults, usernsResults *UserNSResults, namespaceResults *NameSpaceResults) (pflag.FlagSet, error) {
fs := pflag.FlagSet{}
defaultContainerConfig, err := config.Default()
if err != nil {
return fs, errors.Wrapf(err, "failed to get container config")
}
fs.StringSliceVar(&flags.AddHost, "add-host", []string{}, "add a custom host-to-IP mapping (`host:ip`) (default [])")
fs.StringVar(&flags.BlobCache, "blob-cache", "", "assume image blobs in the specified directory will be available for pushing")
if err := fs.MarkHidden("blob-cache"); err != nil {
panic(fmt.Sprintf("error marking net flag as hidden: %v", err))
}
fs.StringSliceVar(&flags.CapAdd, "cap-add", []string{}, "add the specified capability when running (default [])")
fs.StringSliceVar(&flags.CapDrop, "cap-drop", []string{}, "drop the specified capability when running (default [])")
fs.StringVar(&flags.CgroupParent, "cgroup-parent", "", "optional parent cgroup for the container")
fs.Uint64Var(&flags.CPUPeriod, "cpu-period", 0, "limit the CPU CFS (Completely Fair Scheduler) period")
fs.Int64Var(&flags.CPUQuota, "cpu-quota", 0, "limit the CPU CFS (Completely Fair Scheduler) quota")
fs.Uint64VarP(&flags.CPUShares, "cpu-shares", "c", 0, "CPU shares (relative weight)")
fs.StringVar(&flags.CPUSetCPUs, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)")
fs.StringVar(&flags.CPUSetMems, "cpuset-mems", "", "memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.")
fs.StringSliceVar(&flags.DecryptionKeys, "decryption-key", nil, "key needed to decrypt the image")
fs.StringArrayVar(&flags.Devices, "device", defaultContainerConfig.Containers.Devices, "additional devices to be used within containers (default [])")
fs.StringSliceVar(&flags.DNSSearch, "dns-search", defaultContainerConfig.Containers.DNSSearches, "set custom DNS search domains")
fs.StringSliceVar(&flags.DNSServers, "dns", defaultContainerConfig.Containers.DNSServers, "set custom DNS servers or disable it completely by setting it to 'none', which prevents the automatic creation of `/etc/resolv.conf`.")
fs.StringSliceVar(&flags.DNSOptions, "dns-option", defaultContainerConfig.Containers.DNSOptions, "set custom DNS options")
fs.BoolVar(&flags.HTTPProxy, "http-proxy", true, "pass through HTTP Proxy environment variables")
fs.StringVar(&flags.Isolation, "isolation", DefaultIsolation(), "`type` of process isolation to use. Use BUILDAH_ISOLATION environment variable to override.")
fs.StringVarP(&flags.Memory, "memory", "m", "", "memory limit (format: <number>[<unit>], where unit = b, k, m or g)")
fs.StringVar(&flags.MemorySwap, "memory-swap", "", "swap limit equal to memory plus swap: '-1' to enable unlimited swap")
fs.String("arch", runtime.GOARCH, "set the ARCH of the image to the provided value instead of the architecture of the host")
fs.String("os", runtime.GOOS, "prefer `OS` instead of the running OS when pulling images")
fs.StringSlice("platform", []string{parse.DefaultPlatform()}, "set the OS/ARCH/VARIANT of the image to the provided value instead of the current operating system and architecture of the host (for example `linux/arm`)")
fs.String("variant", "", "override the `variant` of the specified image")
fs.StringArrayVar(&flags.SecurityOpt, "security-opt", []string{}, "security options (default [])")
fs.StringVar(&flags.ShmSize, "shm-size", defaultContainerConfig.Containers.ShmSize, "size of '/dev/shm'. The format is `<number><unit>`.")
fs.StringSliceVar(&flags.Ulimit, "ulimit", defaultContainerConfig.Containers.DefaultUlimits, "ulimit options")
fs.StringArrayVarP(&flags.Volumes, "volume", "v", defaultContainerConfig.Containers.Volumes, "bind mount a volume into the container")
// Add in the usernamespace and namespaceflags
usernsFlags := GetUserNSFlags(usernsResults)
namespaceFlags := GetNameSpaceFlags(namespaceResults)
fs.AddFlagSet(&usernsFlags)
fs.AddFlagSet(&namespaceFlags)
return fs, nil
}
// GetFromAndBudFlagsCompletions returns the FlagCompletions for the from and build flags
func GetFromAndBudFlagsCompletions() commonComp.FlagCompletions {
flagCompletion := commonComp.FlagCompletions{}
flagCompletion["arch"] = commonComp.AutocompleteNone
flagCompletion["add-host"] = commonComp.AutocompleteNone
flagCompletion["blob-cache"] = commonComp.AutocompleteNone
flagCompletion["cap-add"] = commonComp.AutocompleteCapabilities
flagCompletion["cap-drop"] = commonComp.AutocompleteCapabilities
flagCompletion["cgroup-parent"] = commonComp.AutocompleteDefault // FIXME: This would be a path right?!
flagCompletion["cpu-period"] = commonComp.AutocompleteNone
flagCompletion["cpu-quota"] = commonComp.AutocompleteNone
flagCompletion["cpu-shares"] = commonComp.AutocompleteNone
flagCompletion["cpuset-cpus"] = commonComp.AutocompleteNone
flagCompletion["cpuset-mems"] = commonComp.AutocompleteNone
flagCompletion["decryption-key"] = commonComp.AutocompleteNone
flagCompletion["device"] = commonComp.AutocompleteDefault
flagCompletion["dns-search"] = commonComp.AutocompleteNone
flagCompletion["dns"] = commonComp.AutocompleteNone
flagCompletion["dns-option"] = commonComp.AutocompleteNone
flagCompletion["isolation"] = commonComp.AutocompleteNone
flagCompletion["memory"] = commonComp.AutocompleteNone
flagCompletion["memory-swap"] = commonComp.AutocompleteNone
flagCompletion["os"] = commonComp.AutocompleteNone
flagCompletion["platform"] = commonComp.AutocompleteNone
flagCompletion["security-opt"] = commonComp.AutocompleteNone
flagCompletion["shm-size"] = commonComp.AutocompleteNone
flagCompletion["ulimit"] = commonComp.AutocompleteNone
flagCompletion["volume"] = commonComp.AutocompleteDefault
flagCompletion["variant"] = commonComp.AutocompleteNone
// Add in the usernamespace and namespace flag completions
userNsComp := GetUserNSFlagsCompletions()
for name, comp := range userNsComp {
flagCompletion[name] = comp
}
namespaceComp := GetNameSpaceFlagsCompletions()
for name, comp := range namespaceComp {
flagCompletion[name] = comp
}
return flagCompletion
}
// UseLayers returns true if BUILDAH_LAYERS is set to "1" or "true"
// otherwise it returns false
func UseLayers() bool {
layers := os.Getenv("BUILDAH_LAYERS")
if strings.ToLower(layers) == "true" || layers == "1" {
return true
}
return false
}
// DefaultFormat returns the default image format
func DefaultFormat() string {
format := os.Getenv("BUILDAH_FORMAT")
if format != "" {
return format
}
return define.OCI
}
// DefaultIsolation returns the default image format
func DefaultIsolation() string {
isolation := os.Getenv("BUILDAH_ISOLATION")
if isolation != "" {
return isolation
}
if unshare.IsRootless() {
return "rootless"
}
return define.OCI
}
// DefaultHistory returns the default add-history setting
func DefaultHistory() bool {
history := os.Getenv("BUILDAH_HISTORY")
if strings.ToLower(history) == "true" || history == "1" {
return true
}
return false
}
func VerifyFlagsArgsOrder(args []string) error {
for _, arg := range args {
if strings.HasPrefix(arg, "-") {
return errors.Errorf("No options (%s) can be specified after the image or container name", arg)
}
}
return nil
}
// aliasFlags is a function to handle backwards compatibility with old flags
func AliasFlags(f *pflag.FlagSet, name string) pflag.NormalizedName {
switch name {
case "net":
name = "network"
case "override-arch":
name = "arch"
case "override-os":
name = "os"
case "purge":
name = "rm"
case "tty":
name = "terminal"
}
return pflag.NormalizedName(name)
}
| [
"\"BUILDAH_LAYERS\"",
"\"BUILDAH_FORMAT\"",
"\"BUILDAH_ISOLATION\"",
"\"BUILDAH_HISTORY\""
]
| []
| [
"BUILDAH_ISOLATION",
"BUILDAH_HISTORY",
"BUILDAH_FORMAT",
"BUILDAH_LAYERS"
]
| [] | ["BUILDAH_ISOLATION", "BUILDAH_HISTORY", "BUILDAH_FORMAT", "BUILDAH_LAYERS"] | go | 4 | 0 | |
main.go | package main
import (
"fmt"
"net/http"
"os"
"os/signal"
"syscall"
"github.com/gomodule/redigo/redis"
"github.com/gorilla/mux"
"github.com/viveknathani/kkrh/cache"
"github.com/viveknathani/kkrh/database"
"github.com/viveknathani/kkrh/server"
"github.com/viveknathani/kkrh/service"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
// Hold environment variables
var (
port string = ""
databaseServer string = ""
redisServer string = ""
redisUsername string = ""
redisPassword string = ""
jwtSecret string = ""
)
// Setup environment variables
func init() {
mode := os.Getenv("MODE")
if mode == "dev" {
port = os.Getenv("DEV_PORT")
databaseServer = os.Getenv("DEV_DATABASE_URL")
redisServer = os.Getenv("DEV_REDIS_URL")
jwtSecret = os.Getenv("DEV_JWT_SECRET")
fmt.Println("here")
}
if mode == "prod" {
port = os.Getenv("PORT")
databaseServer = os.Getenv("DATABASE_URL")
redisServer = os.Getenv("REDIS_URL")
redisUsername = os.Getenv("REDIS_USERNAME")
redisPassword = os.Getenv("REDIS_PASSWORD")
jwtSecret = os.Getenv("JWT_SECRET")
}
}
// getLogger will configure and return a uber/zap logger
func getLogger() *zap.Logger {
cfg := zap.Config{
Encoding: "json",
Level: zap.NewAtomicLevel(),
OutputPaths: []string{"stdout"},
ErrorOutputPaths: []string{"stderr"},
EncoderConfig: zapcore.EncoderConfig{
MessageKey: "message",
LevelKey: "level",
EncodeLevel: zapcore.CapitalLevelEncoder,
TimeKey: "ts",
EncodeTime: zapcore.EpochMillisTimeEncoder,
},
}
logger, err := cfg.Build()
if err != nil {
fmt.Print(err)
os.Exit(1)
}
return logger
}
// getDatabase will init and return a db
func getDatabase() *database.Database {
db := &database.Database{}
err := db.Initialize(databaseServer)
if err != nil {
fmt.Print(err)
os.Exit(1)
}
return db
}
// getCache will return a connection to Redis from the pool
func getCache() (*cache.Cache, redis.Conn) {
memory := &cache.Cache{}
memory.Initialize(redisServer, redisUsername, redisPassword)
memoryConn := memory.Pool.Get()
return memory, memoryConn
}
func main() {
logger := getLogger()
db := getDatabase()
memory, memoryConn := getCache()
// Setup the web server
srv := &server.Server{
Service: &service.Service{
Repo: db,
Conn: memoryConn,
JwtSecret: []byte(jwtSecret),
Logger: logger,
},
Router: mux.NewRouter(),
}
srv.SetupRoutes()
var secureHandler *server.SecurityHandler = nil
// middleware for better HTTP headers
if os.Getenv("MODE") == "prod" {
secureHandler = server.NewSecurityHandler(srv)
}
done := make(chan os.Signal, 1)
signal.Notify(done, os.Interrupt, syscall.SIGINT, syscall.SIGTERM)
// Listen
go func() {
if secureHandler != nil {
err := http.ListenAndServe(":"+port, secureHandler)
if err != nil {
fmt.Print(err)
os.Exit(1)
}
} else {
err := http.ListenAndServe(":"+port, srv)
if err != nil {
fmt.Print(err)
os.Exit(1)
}
}
}()
fmt.Println("Server started!")
<-done
shutdown(srv, db, memory)
}
func shutdown(srv *server.Server, db *database.Database, memory *cache.Cache) {
err := srv.Service.Conn.Close()
if err != nil {
fmt.Print(err)
}
err = memory.Close()
if err != nil {
fmt.Print(err)
}
err = db.Close()
if err != nil {
fmt.Print(err)
}
fmt.Println("goodbye!")
}
| [
"\"MODE\"",
"\"DEV_PORT\"",
"\"DEV_DATABASE_URL\"",
"\"DEV_REDIS_URL\"",
"\"DEV_JWT_SECRET\"",
"\"PORT\"",
"\"DATABASE_URL\"",
"\"REDIS_URL\"",
"\"REDIS_USERNAME\"",
"\"REDIS_PASSWORD\"",
"\"JWT_SECRET\"",
"\"MODE\""
]
| []
| [
"PORT",
"JWT_SECRET",
"DEV_DATABASE_URL",
"DATABASE_URL",
"REDIS_PASSWORD",
"MODE",
"DEV_PORT",
"DEV_REDIS_URL",
"DEV_JWT_SECRET",
"REDIS_USERNAME",
"REDIS_URL"
]
| [] | ["PORT", "JWT_SECRET", "DEV_DATABASE_URL", "DATABASE_URL", "REDIS_PASSWORD", "MODE", "DEV_PORT", "DEV_REDIS_URL", "DEV_JWT_SECRET", "REDIS_USERNAME", "REDIS_URL"] | go | 11 | 0 | |
examples/FasterRCNN/train.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: train.py
import argparse
import itertools
import numpy as np
import os
import shutil
import cv2
import six
assert six.PY3, "FasterRCNN requires Python 3!"
import tensorflow as tf
import tqdm
import tensorpack.utils.viz as tpviz
from tensorpack import *
from tensorpack.tfutils import optimizer, collect_env_info
from tensorpack.tfutils.common import get_tf_version_tuple
from tensorpack.tfutils.summary import add_moving_summary
import model_frcnn
import model_mrcnn
from basemodel import image_preprocess, resnet_c4_backbone, resnet_conv5, resnet_fpn_backbone
from dataset import DetectionDataset
from config import finalize_configs, config as cfg
from data import get_all_anchors, get_all_anchors_fpn, get_eval_dataflow, get_train_dataflow
from eval import DetectionResult, predict_image, multithread_predict_dataflow, EvalCallback
from model_box import RPNAnchors, clip_boxes, crop_and_resize, roi_align
from model_cascade import CascadeRCNNHead
from model_fpn import fpn_model, generate_fpn_proposals, multilevel_roi_align, multilevel_rpn_losses
from model_frcnn import BoxProposals, FastRCNNHead, fastrcnn_outputs, fastrcnn_predictions, sample_fast_rcnn_targets
from model_mrcnn import maskrcnn_loss, maskrcnn_upXconv_head
from model_rpn import generate_rpn_proposals, rpn_head, rpn_losses
from viz import draw_annotation, draw_final_outputs, draw_predictions, draw_proposal_recall
try:
import horovod.tensorflow as hvd
except ImportError:
pass
class DetectionModel(ModelDesc):
def preprocess(self, image):
image = tf.expand_dims(image, 0)
image = image_preprocess(image, bgr=True)
return tf.transpose(image, [0, 3, 1, 2])
@property
def training(self):
return get_current_tower_context().is_training
def optimizer(self):
lr = tf.get_variable('learning_rate', initializer=0.003, trainable=False)
tf.summary.scalar('learning_rate-summary', lr)
# The learning rate in the config is set for 8 GPUs, and we use trainers with average=False.
lr = lr / 8.
opt = tf.train.MomentumOptimizer(lr, 0.9)
if cfg.TRAIN.NUM_GPUS < 8:
opt = optimizer.AccumGradOptimizer(opt, 8 // cfg.TRAIN.NUM_GPUS)
return opt
def get_inference_tensor_names(self):
"""
Returns two lists of tensor names to be used to create an inference callable.
Returns:
[str]: input names
[str]: output names
"""
out = ['output/boxes', 'output/scores', 'output/labels']
if cfg.MODE_MASK:
out.append('output/masks')
return ['image'], out
def build_graph(self, *inputs):
inputs = dict(zip(self.input_names, inputs))
image = self.preprocess(inputs['image']) # 1CHW
features = self.backbone(image)
anchor_inputs = {k: v for k, v in inputs.items() if k.startswith('anchor_')}
proposals, rpn_losses = self.rpn(image, features, anchor_inputs) # inputs?
targets = [inputs[k] for k in ['gt_boxes', 'gt_labels', 'gt_masks'] if k in inputs]
head_losses = self.roi_heads(image, features, proposals, targets)
if self.training:
wd_cost = regularize_cost(
'.*/W', l2_regularizer(cfg.TRAIN.WEIGHT_DECAY), name='wd_cost')
total_cost = tf.add_n(
rpn_losses + head_losses + [wd_cost], 'total_cost')
add_moving_summary(total_cost, wd_cost)
return total_cost
class ResNetC4Model(DetectionModel):
def inputs(self):
ret = [
tf.TensorSpec((None, None, 3), tf.float32, 'image'),
tf.TensorSpec((None, None, cfg.RPN.NUM_ANCHOR), tf.int32, 'anchor_labels'),
tf.TensorSpec((None, None, cfg.RPN.NUM_ANCHOR, 4), tf.float32, 'anchor_boxes'),
tf.TensorSpec((None, 4), tf.float32, 'gt_boxes'),
tf.TensorSpec((None,), tf.int64, 'gt_labels')] # all > 0
if cfg.MODE_MASK:
ret.append(
tf.TensorSpec((None, None, None), tf.uint8, 'gt_masks')
) # NR_GT x height x width
return ret
def backbone(self, image):
return [resnet_c4_backbone(image, cfg.BACKBONE.RESNET_NUM_BLOCKS[:3])]
def rpn(self, image, features, inputs):
featuremap = features[0]
rpn_label_logits, rpn_box_logits = rpn_head('rpn', featuremap, cfg.RPN.HEAD_DIM, cfg.RPN.NUM_ANCHOR)
anchors = RPNAnchors(get_all_anchors(), inputs['anchor_labels'], inputs['anchor_boxes'])
anchors = anchors.narrow_to(featuremap)
image_shape2d = tf.shape(image)[2:] # h,w
pred_boxes_decoded = anchors.decode_logits(rpn_box_logits) # fHxfWxNAx4, floatbox
proposal_boxes, proposal_scores = generate_rpn_proposals(
tf.reshape(pred_boxes_decoded, [-1, 4]),
tf.reshape(rpn_label_logits, [-1]),
image_shape2d,
cfg.RPN.TRAIN_PRE_NMS_TOPK if self.training else cfg.RPN.TEST_PRE_NMS_TOPK,
cfg.RPN.TRAIN_POST_NMS_TOPK if self.training else cfg.RPN.TEST_POST_NMS_TOPK)
if self.training:
losses = rpn_losses(
anchors.gt_labels, anchors.encoded_gt_boxes(), rpn_label_logits, rpn_box_logits)
else:
losses = []
return BoxProposals(proposal_boxes), losses
def roi_heads(self, image, features, proposals, targets):
image_shape2d = tf.shape(image)[2:] # h,w
featuremap = features[0]
gt_boxes, gt_labels, *_ = targets
if self.training:
# sample proposal boxes in training
proposals = sample_fast_rcnn_targets(proposals.boxes, gt_boxes, gt_labels)
# The boxes to be used to crop RoIs.
# Use all proposal boxes in inference
boxes_on_featuremap = proposals.boxes * (1.0 / cfg.RPN.ANCHOR_STRIDE)
roi_resized = roi_align(featuremap, boxes_on_featuremap, 14)
feature_fastrcnn = resnet_conv5(roi_resized, cfg.BACKBONE.RESNET_NUM_BLOCKS[-1]) # nxcx7x7
# Keep C5 feature to be shared with mask branch
feature_gap = GlobalAvgPooling('gap', feature_fastrcnn, data_format='channels_first')
fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_outputs('fastrcnn', feature_gap, cfg.DATA.NUM_CLASS)
fastrcnn_head = FastRCNNHead(proposals, fastrcnn_box_logits, fastrcnn_label_logits, gt_boxes,
tf.constant(cfg.FRCNN.BBOX_REG_WEIGHTS, dtype=tf.float32))
if self.training:
all_losses = fastrcnn_head.losses()
if cfg.MODE_MASK:
gt_masks = targets[2]
# maskrcnn loss
# In training, mask branch shares the same C5 feature.
fg_feature = tf.gather(feature_fastrcnn, proposals.fg_inds())
mask_logits = maskrcnn_upXconv_head(
'maskrcnn', fg_feature, cfg.DATA.NUM_CATEGORY, num_convs=0) # #fg x #cat x 14x14
target_masks_for_fg = crop_and_resize(
tf.expand_dims(gt_masks, 1),
proposals.fg_boxes(),
proposals.fg_inds_wrt_gt, 14,
pad_border=False) # nfg x 1x14x14
target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets')
all_losses.append(maskrcnn_loss(mask_logits, proposals.fg_labels(), target_masks_for_fg))
return all_losses
else:
decoded_boxes = fastrcnn_head.decoded_output_boxes()
decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes')
label_scores = fastrcnn_head.output_scores(name='fastrcnn_all_scores')
final_boxes, final_scores, final_labels = fastrcnn_predictions(
decoded_boxes, label_scores, name_scope='output')
if cfg.MODE_MASK:
roi_resized = roi_align(featuremap, final_boxes * (1.0 / cfg.RPN.ANCHOR_STRIDE), 14)
feature_maskrcnn = resnet_conv5(roi_resized, cfg.BACKBONE.RESNET_NUM_BLOCKS[-1])
mask_logits = maskrcnn_upXconv_head(
'maskrcnn', feature_maskrcnn, cfg.DATA.NUM_CATEGORY, 0) # #result x #cat x 14x14
indices = tf.stack([tf.range(tf.size(final_labels)), tf.cast(final_labels, tf.int32) - 1], axis=1)
final_mask_logits = tf.gather_nd(mask_logits, indices) # #resultx14x14
tf.sigmoid(final_mask_logits, name='output/masks')
return []
class ResNetFPNModel(DetectionModel):
def inputs(self):
ret = [
tf.TensorSpec((None, None, 3), tf.float32, 'image')]
num_anchors = len(cfg.RPN.ANCHOR_RATIOS)
for k in range(len(cfg.FPN.ANCHOR_STRIDES)):
ret.extend([
tf.TensorSpec((None, None, num_anchors), tf.int32,
'anchor_labels_lvl{}'.format(k + 2)),
tf.TensorSpec((None, None, num_anchors, 4), tf.float32,
'anchor_boxes_lvl{}'.format(k + 2))])
ret.extend([
tf.TensorSpec((None, 4), tf.float32, 'gt_boxes'),
tf.TensorSpec((None,), tf.int64, 'gt_labels')]) # all > 0
if cfg.MODE_MASK:
ret.append(
tf.TensorSpec((None, None, None), tf.uint8, 'gt_masks')
) # NR_GT x height x width
return ret
def slice_feature_and_anchors(self, p23456, anchors):
for i, stride in enumerate(cfg.FPN.ANCHOR_STRIDES):
with tf.name_scope('FPN_slice_lvl{}'.format(i)):
anchors[i] = anchors[i].narrow_to(p23456[i])
def backbone(self, image):
c2345 = resnet_fpn_backbone(image, cfg.BACKBONE.RESNET_NUM_BLOCKS)
p23456 = fpn_model('fpn', c2345)
return p23456
def rpn(self, image, features, inputs):
assert len(cfg.RPN.ANCHOR_SIZES) == len(cfg.FPN.ANCHOR_STRIDES)
image_shape2d = tf.shape(image)[2:] # h,w
all_anchors_fpn = get_all_anchors_fpn()
multilevel_anchors = [RPNAnchors(
all_anchors_fpn[i],
inputs['anchor_labels_lvl{}'.format(i + 2)],
inputs['anchor_boxes_lvl{}'.format(i + 2)]) for i in range(len(all_anchors_fpn))]
self.slice_feature_and_anchors(features, multilevel_anchors)
# Multi-Level RPN Proposals
rpn_outputs = [rpn_head('rpn', pi, cfg.FPN.NUM_CHANNEL, len(cfg.RPN.ANCHOR_RATIOS))
for pi in features]
multilevel_label_logits = [k[0] for k in rpn_outputs]
multilevel_box_logits = [k[1] for k in rpn_outputs]
multilevel_pred_boxes = [anchor.decode_logits(logits)
for anchor, logits in zip(multilevel_anchors, multilevel_box_logits)]
proposal_boxes, proposal_scores = generate_fpn_proposals(
multilevel_pred_boxes, multilevel_label_logits, image_shape2d)
if self.training:
losses = multilevel_rpn_losses(
multilevel_anchors, multilevel_label_logits, multilevel_box_logits)
else:
losses = []
return BoxProposals(proposal_boxes), losses
def roi_heads(self, image, features, proposals, targets):
image_shape2d = tf.shape(image)[2:] # h,w
assert len(features) == 5, "Features have to be P23456!"
gt_boxes, gt_labels, *_ = targets
if self.training:
proposals = sample_fast_rcnn_targets(proposals.boxes, gt_boxes, gt_labels)
fastrcnn_head_func = getattr(model_frcnn, cfg.FPN.FRCNN_HEAD_FUNC)
if not cfg.FPN.CASCADE:
roi_feature_fastrcnn = multilevel_roi_align(features[:4], proposals.boxes, 7)
head_feature = fastrcnn_head_func('fastrcnn', roi_feature_fastrcnn)
fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_outputs(
'fastrcnn/outputs', head_feature, cfg.DATA.NUM_CLASS)
fastrcnn_head = FastRCNNHead(proposals, fastrcnn_box_logits, fastrcnn_label_logits,
gt_boxes, tf.constant(cfg.FRCNN.BBOX_REG_WEIGHTS, dtype=tf.float32))
else:
def roi_func(boxes):
return multilevel_roi_align(features[:4], boxes, 7)
fastrcnn_head = CascadeRCNNHead(
proposals, roi_func, fastrcnn_head_func,
(gt_boxes, gt_labels), image_shape2d, cfg.DATA.NUM_CLASS)
if self.training:
all_losses = fastrcnn_head.losses()
if cfg.MODE_MASK:
gt_masks = targets[2]
# maskrcnn loss
roi_feature_maskrcnn = multilevel_roi_align(
features[:4], proposals.fg_boxes(), 14,
name_scope='multilevel_roi_align_mask')
maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC)
mask_logits = maskrcnn_head_func(
'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) # #fg x #cat x 28 x 28
target_masks_for_fg = crop_and_resize(
tf.expand_dims(gt_masks, 1),
proposals.fg_boxes(),
proposals.fg_inds_wrt_gt, 28,
pad_border=False) # fg x 1x28x28
target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets')
all_losses.append(maskrcnn_loss(mask_logits, proposals.fg_labels(), target_masks_for_fg))
return all_losses
else:
decoded_boxes = fastrcnn_head.decoded_output_boxes()
decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes')
label_scores = fastrcnn_head.output_scores(name='fastrcnn_all_scores')
final_boxes, final_scores, final_labels = fastrcnn_predictions(
decoded_boxes, label_scores, name_scope='output')
if cfg.MODE_MASK:
# Cascade inference needs roi transform with refined boxes.
roi_feature_maskrcnn = multilevel_roi_align(features[:4], final_boxes, 14)
maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC)
mask_logits = maskrcnn_head_func(
'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) # #fg x #cat x 28 x 28
indices = tf.stack([tf.range(tf.size(final_labels)), tf.cast(final_labels, tf.int32) - 1], axis=1)
final_mask_logits = tf.gather_nd(mask_logits, indices) # #resultx28x28
tf.sigmoid(final_mask_logits, name='output/masks')
return []
def do_visualize(model, model_path, nr_visualize=100, output_dir='output'):
"""
Visualize some intermediate results (proposals, raw predictions) inside the pipeline.
"""
df = get_train_dataflow() # we don't visualize mask stuff
df.reset_state()
pred = OfflinePredictor(PredictConfig(
model=model,
session_init=get_model_loader(model_path),
input_names=['image', 'gt_boxes', 'gt_labels'],
output_names=[
'generate_{}_proposals/boxes'.format('fpn' if cfg.MODE_FPN else 'rpn'),
'generate_{}_proposals/scores'.format('fpn' if cfg.MODE_FPN else 'rpn'),
'fastrcnn_all_scores',
'output/boxes',
'output/scores',
'output/labels',
]))
if os.path.isdir(output_dir):
shutil.rmtree(output_dir)
utils.fs.mkdir_p(output_dir)
with tqdm.tqdm(total=nr_visualize) as pbar:
for idx, dp in itertools.islice(enumerate(df), nr_visualize):
img, gt_boxes, gt_labels = dp['image'], dp['gt_boxes'], dp['gt_labels']
rpn_boxes, rpn_scores, all_scores, \
final_boxes, final_scores, final_labels = pred(img, gt_boxes, gt_labels)
# draw groundtruth boxes
gt_viz = draw_annotation(img, gt_boxes, gt_labels)
# draw best proposals for each groundtruth, to show recall
proposal_viz, good_proposals_ind = draw_proposal_recall(img, rpn_boxes, rpn_scores, gt_boxes)
# draw the scores for the above proposals
score_viz = draw_predictions(img, rpn_boxes[good_proposals_ind], all_scores[good_proposals_ind])
results = [DetectionResult(*args) for args in
zip(final_boxes, final_scores, final_labels,
[None] * len(final_labels))]
final_viz = draw_final_outputs(img, results)
viz = tpviz.stack_patches([
gt_viz, proposal_viz,
score_viz, final_viz], 2, 2)
if os.environ.get('DISPLAY', None):
tpviz.interactive_imshow(viz)
cv2.imwrite("{}/{:03d}.png".format(output_dir, idx), viz)
pbar.update()
def do_evaluate(pred_config, output_file):
num_gpu = cfg.TRAIN.NUM_GPUS
graph_funcs = MultiTowerOfflinePredictor(
pred_config, list(range(num_gpu))).get_predictors()
for dataset in cfg.DATA.VAL:
logger.info("Evaluating {} ...".format(dataset))
dataflows = [
get_eval_dataflow(dataset, shard=k, num_shards=num_gpu)
for k in range(num_gpu)]
all_results = multithread_predict_dataflow(dataflows, graph_funcs)
output = output_file + '-' + dataset
DetectionDataset().eval_or_save_inference_results(all_results, dataset, output)
def do_predict(pred_func, input_file):
img = cv2.imread(input_file, cv2.IMREAD_COLOR)
results = predict_image(img, pred_func)
final = draw_final_outputs(img, results)
viz = np.concatenate((img, final), axis=1)
cv2.imwrite("output.png", viz)
logger.info("Inference output for {} written to output.png".format(input_file))
tpviz.interactive_imshow(viz)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--load', help='load a model for evaluation or training. Can overwrite BACKBONE.WEIGHTS')
parser.add_argument('--logdir', help='log directory', default='train_log/maskrcnn')
parser.add_argument('--visualize', action='store_true', help='visualize intermediate results')
parser.add_argument('--evaluate', help="Run evaluation. "
"This argument is the path to the output json evaluation file")
parser.add_argument('--predict', help="Run prediction on a given image. "
"This argument is the path to the input image file", nargs='+')
parser.add_argument('--config', help="A list of KEY=VALUE to overwrite those defined in config.py",
nargs='+')
if get_tf_version_tuple() < (1, 6):
# https://github.com/tensorflow/tensorflow/issues/14657
logger.warn("TF<1.6 has a bug which may lead to crash in FasterRCNN if you're unlucky.")
args = parser.parse_args()
if args.config:
cfg.update_args(args.config)
MODEL = ResNetFPNModel() if cfg.MODE_FPN else ResNetC4Model()
DetectionDataset() # initialize the config with information from our dataset
if args.visualize or args.evaluate or args.predict:
if not tf.test.is_gpu_available():
from tensorflow.python.framework import test_util
assert get_tf_version_tuple() >= (1, 7) and test_util.IsMklEnabled(), \
"Inference requires either GPU support or MKL support!"
assert args.load
finalize_configs(is_training=False)
if args.predict or args.visualize:
cfg.TEST.RESULT_SCORE_THRESH = cfg.TEST.RESULT_SCORE_THRESH_VIS
if args.visualize:
do_visualize(MODEL, args.load)
else:
predcfg = PredictConfig(
model=MODEL,
session_init=get_model_loader(args.load),
input_names=MODEL.get_inference_tensor_names()[0],
output_names=MODEL.get_inference_tensor_names()[1])
if args.predict:
predictor = OfflinePredictor(predcfg)
for image_file in args.predict:
do_predict(predictor, image_file)
elif args.evaluate:
assert args.evaluate.endswith('.json'), args.evaluate
do_evaluate(predcfg, args.evaluate)
else:
is_horovod = cfg.TRAINER == 'horovod'
if is_horovod:
hvd.init()
logger.info("Horovod Rank={}, Size={}".format(hvd.rank(), hvd.size()))
if not is_horovod or hvd.rank() == 0:
logger.set_logger_dir(args.logdir, 'd')
logger.info("Environment Information:\n" + collect_env_info())
finalize_configs(is_training=True)
stepnum = cfg.TRAIN.STEPS_PER_EPOCH
# warmup is step based, lr is epoch based
init_lr = cfg.TRAIN.WARMUP_INIT_LR * min(8. / cfg.TRAIN.NUM_GPUS, 1.)
warmup_schedule = [(0, init_lr), (cfg.TRAIN.WARMUP, cfg.TRAIN.BASE_LR)]
warmup_end_epoch = cfg.TRAIN.WARMUP * 1. / stepnum
lr_schedule = [(int(warmup_end_epoch + 0.5), cfg.TRAIN.BASE_LR)]
factor = 8. / cfg.TRAIN.NUM_GPUS
for idx, steps in enumerate(cfg.TRAIN.LR_SCHEDULE[:-1]):
mult = 0.1 ** (idx + 1)
lr_schedule.append(
(steps * factor // stepnum, cfg.TRAIN.BASE_LR * mult))
logger.info("Warm Up Schedule (steps, value): " + str(warmup_schedule))
logger.info("LR Schedule (epochs, value): " + str(lr_schedule))
train_dataflow = get_train_dataflow()
# This is what's commonly referred to as "epochs"
total_passes = cfg.TRAIN.LR_SCHEDULE[-1] * 8 / train_dataflow.size()
logger.info("Total passes of the training set is: {:.5g}".format(total_passes))
callbacks = [
PeriodicCallback(
ModelSaver(max_to_keep=10, keep_checkpoint_every_n_hours=1),
every_k_epochs=20),
# linear warmup
ScheduledHyperParamSetter(
'learning_rate', warmup_schedule, interp='linear', step_based=True),
ScheduledHyperParamSetter('learning_rate', lr_schedule),
PeakMemoryTracker(),
EstimatedTimeLeft(median=True),
SessionRunTimeout(60000).set_chief_only(True), # 1 minute timeout
]
if cfg.TRAIN.EVAL_PERIOD > 0:
callbacks.extend([
EvalCallback(dataset, *MODEL.get_inference_tensor_names(), args.logdir)
for dataset in cfg.DATA.VAL
])
if not is_horovod:
callbacks.append(GPUUtilizationTracker())
if is_horovod and hvd.rank() > 0:
session_init = None
else:
if args.load:
session_init = get_model_loader(args.load)
else:
session_init = get_model_loader(cfg.BACKBONE.WEIGHTS) if cfg.BACKBONE.WEIGHTS else None
traincfg = TrainConfig(
model=MODEL,
data=QueueInput(train_dataflow),
callbacks=callbacks,
steps_per_epoch=stepnum,
max_epoch=cfg.TRAIN.LR_SCHEDULE[-1] * factor // stepnum,
session_init=session_init,
starting_epoch=cfg.TRAIN.STARTING_EPOCH
)
if is_horovod:
trainer = HorovodTrainer(average=False)
else:
# nccl mode appears faster than cpu mode
trainer = SyncMultiGPUTrainerReplicated(cfg.TRAIN.NUM_GPUS, average=False, mode='nccl')
launch_train_with_config(traincfg, trainer)
| []
| []
| [
"DISPLAY"
]
| [] | ["DISPLAY"] | python | 1 | 0 | |
go/src/infra/tools/relnotes/relnotes.go | // Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
/*
CLI tool to generate release notes based on git logs in the current directory.
Usage examples:
go run relnotes.go -since-hash 7bb5fff0fcb57b467a8f907aeee9117e09106d06
or
go run relnotes.go -since-date 2016-02-04
*/
package main
import (
"bufio"
"flag"
"fmt"
"io/ioutil"
"os"
"os/exec"
"regexp"
"strings"
"text/template"
"time"
"golang.org/x/net/context"
"golang.org/x/oauth2/google"
"gopkg.in/yaml.v2"
appengine "google.golang.org/api/appengine/v1"
"github.com/luci/luci-go/common/data/stringset"
)
const monorailURL = "https://bugs.chromium.org/p/%s/issues/detail?id=%s"
var (
appName = flag.String("app", "", "Name of the application")
date = flag.String("date", "", "YYYY-MM-DD. Release date.")
sinceDate = flag.String("since-date", "", "YYYY-MM-DD. All changes since this date.")
sinceHash = flag.String("since-hash", "", "All changes since this long hash.")
bugRE = regexp.MustCompile(`\n BUG[=:][\s]*([0-9]+)`)
monorailRE = regexp.MustCompile(`\n BUG[=:][\s]*([a-z]+):([0-9]+)`)
authorRE = regexp.MustCompile("\nAuthor:.+<(.+)>")
hashRE = regexp.MustCompile("commit (.*)\n")
reviewRE = regexp.MustCompile("\n (Review-Url|Reviewed-on): (.*)\n")
extraPaths = flag.String("extra-paths", "", "Comma-separated list of extra paths to check.")
markdownTxt = `
# Release Notes {{.AppName}} {{.Date}}
- {{len .Commits}} commits, {{.NumBugs}} bugs affected since {{.Since}}
- {{len .Authors}} Authors:
{{- range .Authors}}
- {{ . }}
{{- end}}
## Changes in this release
{{range .Commits -}}
- [{{.Summary}}]({{.ReviewURL}}) ({{.Author}})
{{end}}
## Bugs updated, by author
{{range $author, $bugs := .Bugs -}}
- {{$author}}:
{{range $bug, $unused := $bugs -}}
- [{{$bug}}]({{$bug}})
{{end}}
{{end}}
`
markdownTmpl = template.Must(template.New("markdown").Parse(markdownTxt))
)
type tmplData struct {
AppName string
Date string
NumBugs int
Since string
Authors []string
Commits []*commit
Bugs map[string]stringset.Set
}
type commit struct {
hash string
Author string
committer string
Summary string
ReviewURL string
bugs []string
}
func parsecommit(s string) *commit {
c := &commit{}
bugs := bugRE.FindAllStringSubmatch(strings.ToUpper(s), -1)
for _, b := range bugs {
c.bugs = append(c.bugs, fmt.Sprintf("https://crbug.com/%s", b[1]))
}
monorailBugs := monorailRE.FindAllStringSubmatch(strings.ToUpper(s), -1)
for _, b := range monorailBugs {
c.bugs = append(c.bugs, fmt.Sprintf(monorailURL, b[1], b[2]))
}
authors := authorRE.FindAllStringSubmatch(s, -1)
for _, a := range authors {
c.Author = a[1]
}
hashes := hashRE.FindAllStringSubmatch(s, -1)
for _, h := range hashes {
c.hash = h[1]
}
c.Summary = strings.Trim(strings.Split(s, "\n")[4], " \t")
reviewURL := reviewRE.FindAllStringSubmatch(s, -1)
if len(reviewURL) > 0 && len(reviewURL[0]) > 2 {
c.ReviewURL = reviewURL[0][2]
}
if strings.Trim(c.Author, "\n\t ") == "" {
fmt.Print(s)
}
return c
}
func usage() {
fmt.Fprintf(os.Stderr, "Usage of %s <flags> [relative path]:\n", os.Args[0])
flag.PrintDefaults()
}
func gaeService() (*appengine.APIService, error) {
creds := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS")
if creds == "" {
fmt.Printf("Warning: you do not have the GOOGLE_APPLICATION_CREDENTIALS environment variable set. Cloud API calls may not work properly.\n")
} else {
fmt.Printf("Using GOOGLE_APPLICATION_CREDENTIALS: %s\n", creds)
}
ctx := context.Background()
client, err := google.DefaultClient(ctx, appengine.CloudPlatformScope)
if err != nil {
return nil, err
}
appengineService, err := appengine.New(client)
return appengineService, err
}
// getDeployedApp returns the hash and date string, or an error.
func getDeployedApp(service, module string) (string, string, error) {
gaeSvc, err := gaeService()
if err != nil {
return "", "", err
}
appsSvc := appengine.NewAppsService(gaeSvc)
versionsListCall := appsSvc.Services.Versions.List(*appName, "default")
versionsList, err := versionsListCall.Do()
if err != nil {
return "", "", err
}
var deployedVers *appengine.Version
// This is a heuristic to determine which version is "deployed" - use
// the latest verison (by creation timestamp) that is "SERVING". More
// accurate would be to look at traffic splits and pick the one that
// has the most (or all) traffic going to it. Unfortunately the API
// doesn't appear to expose that information(!).
for _, vers := range versionsList.Versions {
if vers.ServingStatus == "SERVING" && (deployedVers == nil ||
deployedVers.CreateTime < vers.CreateTime) {
deployedVers = vers
}
}
if deployedVers == nil {
return "", "", fmt.Errorf("could not determine currently deployed version")
}
versRE := regexp.MustCompile("([0-9]+)-([0-9a-f]+)")
matches := versRE.FindAllStringSubmatch(deployedVers.Id, -1)
return matches[0][2], deployedVers.CreateTime, nil
}
func getAppNameFromYAML() (string, error) {
type appStruct struct {
Application string
}
in, err := os.Open("app.yaml")
if err != nil {
return "", err
}
b, err := ioutil.ReadAll(in)
if err != nil {
return "", err
}
app := &appStruct{}
if err := yaml.Unmarshal(b, app); err != nil {
return "", err
}
return app.Application, nil
}
func getUpdates(path string) (stringset.Set, []*commit, stringset.Set, map[string]stringset.Set) {
var cmd *exec.Cmd
switch {
case *sinceHash != "":
cmd = exec.Command("git", "log", fmt.Sprintf("%s..", *sinceHash), path)
case *sinceDate != "":
cmd = exec.Command("git", "log", "--since", *sinceDate, path)
default:
fmt.Printf("Please specify either --since-hash or --since-date\n")
os.Exit(1)
}
cmd.Stderr = os.Stderr
stdout, err := cmd.StdoutPipe()
if err != nil {
fmt.Printf("Error getting stdout: %v", err)
os.Exit(1)
}
cmd.Start()
r := bufio.NewReader(stdout)
bytes, err := ioutil.ReadAll(r)
if err != nil {
fmt.Printf("Error reading stdout: %v", err)
os.Exit(1)
}
text := string(bytes)
re := regexp.MustCompile("(^|\n)commit ")
commitMsgs := re.Split(text, -1)[1:]
commitsByBug := map[string][]*commit{}
commitsByAuthor := map[string][]*commit{}
authors := stringset.New(5)
bugs := stringset.New(5)
bugsByAuthor := map[string]stringset.Set{}
summaries := []string{}
commits := []*commit{}
for _, cstr := range commitMsgs {
c := parsecommit(cstr)
if c.ReviewURL == "" {
continue
}
commits = append(commits, c)
summaries = append(summaries, c.Summary)
for _, b := range c.bugs {
commitsByBug[b] = append(commitsByBug[b], c)
bugs.Add(b)
if _, ok := bugsByAuthor[c.Author]; !ok {
bugsByAuthor[c.Author] = stringset.New(5)
}
bugsByAuthor[c.Author].Add(b)
}
commitsByAuthor[c.Author] = append(commitsByAuthor[c.Author], c)
authors.Add(c.Author)
}
return authors, commits, bugs, bugsByAuthor
}
func main() {
flag.Usage = usage
flag.Parse()
paths := flag.Args()
if len(paths) == 0 {
paths = []string{"."}
}
if *appName == "" {
s, err := getAppNameFromYAML()
if err != nil {
fmt.Printf("Error getting app name from app.yaml: %v", err)
os.Exit(1)
}
appName = &s
fmt.Printf("Got app name from app.yaml: %s\n", *appName)
}
if *sinceHash == "" && *sinceDate == "" {
hash, date, err := getDeployedApp(*appName, "default")
if err != nil {
fmt.Printf("Error trying to get currently deployed app hash: %v\n", err)
fmt.Printf("Please specify either --since-hash or --since-date\n")
os.Exit(1)
}
sinceHash = &hash
sinceDate = &date
}
authors, commits, bugs, bugsByAuthor := stringset.New(5), []*commit{}, stringset.New(5), map[string]stringset.Set{}
for _, path := range paths {
a, c, b, bba := getUpdates(path)
authors = authors.Union(a)
commits = append(commits, c...)
bugs = bugs.Union(b)
for author, bugs := range bba {
if _, ok := bugsByAuthor[author]; !ok {
bugsByAuthor[author] = stringset.New(5)
}
bugsByAuthor[author] = bugsByAuthor[author].Union(bugs)
}
}
if *date == "" {
today := time.Now().Format("2006-01-02")
date = &today
}
data := tmplData{
AppName: *appName,
Date: *date,
NumBugs: bugs.Len(),
Since: fmt.Sprintf("%s (%s)", *sinceHash, *sinceDate),
Authors: authors.ToSlice(),
Commits: commits,
Bugs: bugsByAuthor,
}
f := bufio.NewWriter(os.Stdout)
markdownTmpl.Execute(f, data)
f.Flush()
}
| [
"\"GOOGLE_APPLICATION_CREDENTIALS\""
]
| []
| [
"GOOGLE_APPLICATION_CREDENTIALS"
]
| [] | ["GOOGLE_APPLICATION_CREDENTIALS"] | go | 1 | 0 | |
DJANGO/TrainingApp2/TrainingApp/asgi.py | """
ASGI config for TrainingApp project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'TrainingApp.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
mysite/wsgi.py | """
WSGI config for mysite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysite.settings")
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
tests/commands/test_init.py | # Copyright 2020-2021 Canonical Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For further info, check https://github.com/canonical/charmcraft
import os
import pwd
import subprocess
import sys
from argparse import Namespace
from unittest.mock import patch
import pytest
from charmcraft.cmdbase import CommandError
from charmcraft.commands.init import InitCommand
from charmcraft.utils import S_IXALL
from tests.test_infra import pep8_test, get_python_filepaths, pep257_test
def test_init_pep257(tmp_path, config):
cmd = InitCommand("group", config)
cmd.run(Namespace(name="my-charm", author="J Doe", series="k8s", force=False))
paths = get_python_filepaths(roots=[str(tmp_path / "src")], python_paths=[])
pep257_test(paths)
def test_init_pep8(tmp_path, config, *, author="J Doe"):
cmd = InitCommand("group", config)
cmd.run(Namespace(name="my-charm", author=author, series="k8s", force=False))
paths = get_python_filepaths(
roots=[str(tmp_path / "src"), str(tmp_path / "tests")], python_paths=[]
)
pep8_test(paths)
def test_init_non_ascii_author(tmp_path, config):
test_init_pep8(tmp_path, config, author="فلانة الفلانية")
def test_all_the_files(tmp_path, config):
cmd = InitCommand("group", config)
cmd.run(Namespace(name="my-charm", author="ಅಪರಿಚಿತ ವ್ಯಕ್ತಿ", series="k8s", force=False))
assert sorted(str(p.relative_to(tmp_path)) for p in tmp_path.glob("**/*")) == [
".flake8",
".gitignore",
".jujuignore",
"CONTRIBUTING.md",
"LICENSE",
"README.md",
"actions.yaml",
"charmcraft.yaml",
"config.yaml",
"metadata.yaml",
"requirements-dev.txt",
"requirements.txt",
"run_tests",
"src",
"src/charm.py",
"tests",
"tests/__init__.py",
"tests/test_charm.py",
]
def test_force(tmp_path, config):
cmd = InitCommand("group", config)
tmp_file = tmp_path / "README.md"
with tmp_file.open("w") as f:
f.write("This is a nonsense readme")
cmd.run(Namespace(name="my-charm", author="ಅಪರಿಚಿತ ವ್ಯಕ್ತಿ", series="k8s", force=True))
# Check that init ran
assert (tmp_path / "LICENSE").exists()
# Check that init did not overwrite files
with tmp_file.open("r") as f:
assert f.read() == "This is a nonsense readme"
def test_bad_name(config):
cmd = InitCommand("group", config)
with pytest.raises(CommandError):
cmd.run(Namespace(name="1234", author="שראלה ישראל", series="k8s", force=False))
def test_executables(tmp_path, config):
cmd = InitCommand("group", config)
cmd.run(Namespace(name="my-charm", author="홍길동", series="k8s", force=False))
assert (tmp_path / "run_tests").stat().st_mode & S_IXALL == S_IXALL
assert (tmp_path / "src/charm.py").stat().st_mode & S_IXALL == S_IXALL
def test_tests(tmp_path, config):
# fix the PYTHONPATH and PATH so the tests in the initted environment use our own
# virtualenv libs and bins (if any), as they need them, but we're not creating a
# venv for the local tests (note that for CI doesn't use a venv)
env = os.environ.copy()
env_paths = [p for p in sys.path if "env/lib/python" in p]
if env_paths:
if "PYTHONPATH" in env:
env["PYTHONPATH"] += ":" + ":".join(env_paths)
else:
env["PYTHONPATH"] = ":".join(env_paths)
for path in env_paths:
bin_path = path[: path.index("env/lib/python")] + "env/bin"
env["PATH"] = bin_path + ":" + env["PATH"]
cmd = InitCommand("group", config)
cmd.run(Namespace(name="my-charm", author="だれだれ", series="k8s", force=False))
subprocess.run(["./run_tests"], cwd=str(tmp_path), check=True, env=env)
def test_gecos_missing_in_getpwuid_response(config):
"""No GECOS field in getpwuid response."""
cmd = InitCommand("group", config)
with patch("pwd.getpwuid") as mock_pwd:
# return a fack passwd struct with an empty gecos (5th parameter)
mock_pwd.return_value = pwd.struct_passwd(("user", "pass", 1, 1, "", "dir", "shell"))
msg = "Author not given, and nothing in GECOS field"
with pytest.raises(CommandError, match=msg):
cmd.run(Namespace(name="my-charm", author=None, series="k8s", force=False))
def test_gecos_missing_user_information(config):
"""No information at all for the requested user."""
cmd = InitCommand("group", config)
with patch("pwd.getpwuid") as mock_pwd:
mock_pwd.side_effect = KeyError("no user")
msg = "Author not given, and nothing in GECOS field"
with pytest.raises(CommandError, match=msg):
cmd.run(Namespace(name="my-charm", author=None, series="k8s", force=False))
| []
| []
| []
| [] | [] | python | 0 | 0 | |
sdk/go/keepclient/support.go | // Copyright (C) The Arvados Authors. All rights reserved.
//
// SPDX-License-Identifier: Apache-2.0
package keepclient
import (
"crypto/md5"
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"os"
"strings"
"git.arvados.org/arvados.git/sdk/go/arvadosclient"
)
// Function used to emit debug messages. The easiest way to enable
// keepclient debug messages in your application is to assign
// log.Printf to DebugPrintf.
var DebugPrintf = func(string, ...interface{}) {}
func init() {
if arvadosclient.StringBool(os.Getenv("ARVADOS_DEBUG")) {
DebugPrintf = log.Printf
}
}
type keepService struct {
Uuid string `json:"uuid"`
Hostname string `json:"service_host"`
Port int `json:"service_port"`
SSL bool `json:"service_ssl_flag"`
SvcType string `json:"service_type"`
ReadOnly bool `json:"read_only"`
}
// Md5String returns md5 hash for the bytes in the given string
func Md5String(s string) string {
return fmt.Sprintf("%x", md5.Sum([]byte(s)))
}
type svcList struct {
Items []keepService `json:"items"`
}
type uploadStatus struct {
err error
url string
statusCode int
replicas_stored int
response string
}
func (this *KeepClient) uploadToKeepServer(host string, hash string, body io.Reader,
upload_status chan<- uploadStatus, expectedLength int64, reqid string) {
var req *http.Request
var err error
var url = fmt.Sprintf("%s/%s", host, hash)
if req, err = http.NewRequest("PUT", url, nil); err != nil {
DebugPrintf("DEBUG: [%s] Error creating request PUT %v error: %v", reqid, url, err.Error())
upload_status <- uploadStatus{err, url, 0, 0, ""}
return
}
req.ContentLength = expectedLength
if expectedLength > 0 {
req.Body = ioutil.NopCloser(body)
} else {
// "For client requests, a value of 0 means unknown if
// Body is not nil." In this case we do want the body
// to be empty, so don't set req.Body.
}
req.Header.Add("X-Request-Id", reqid)
req.Header.Add("Authorization", "OAuth2 "+this.Arvados.ApiToken)
req.Header.Add("Content-Type", "application/octet-stream")
req.Header.Add(X_Keep_Desired_Replicas, fmt.Sprint(this.Want_replicas))
if len(this.StorageClasses) > 0 {
req.Header.Add("X-Keep-Storage-Classes", strings.Join(this.StorageClasses, ", "))
}
var resp *http.Response
if resp, err = this.httpClient().Do(req); err != nil {
DebugPrintf("DEBUG: [%s] Upload failed %v error: %v", reqid, url, err.Error())
upload_status <- uploadStatus{err, url, 0, 0, err.Error()}
return
}
rep := 1
if xr := resp.Header.Get(X_Keep_Replicas_Stored); xr != "" {
fmt.Sscanf(xr, "%d", &rep)
}
defer resp.Body.Close()
defer io.Copy(ioutil.Discard, resp.Body)
respbody, err2 := ioutil.ReadAll(&io.LimitedReader{R: resp.Body, N: 4096})
response := strings.TrimSpace(string(respbody))
if err2 != nil && err2 != io.EOF {
DebugPrintf("DEBUG: [%s] Upload %v error: %v response: %v", reqid, url, err2.Error(), response)
upload_status <- uploadStatus{err2, url, resp.StatusCode, rep, response}
} else if resp.StatusCode == http.StatusOK {
DebugPrintf("DEBUG: [%s] Upload %v success", reqid, url)
upload_status <- uploadStatus{nil, url, resp.StatusCode, rep, response}
} else {
if resp.StatusCode >= 300 && response == "" {
response = resp.Status
}
DebugPrintf("DEBUG: [%s] Upload %v error: %v response: %v", reqid, url, resp.StatusCode, response)
upload_status <- uploadStatus{errors.New(resp.Status), url, resp.StatusCode, rep, response}
}
}
func (this *KeepClient) putReplicas(
hash string,
getReader func() io.Reader,
expectedLength int64) (locator string, replicas int, err error) {
reqid := this.getRequestID()
// Calculate the ordering for uploading to servers
sv := NewRootSorter(this.WritableLocalRoots(), hash).GetSortedRoots()
// The next server to try contacting
nextServer := 0
// The number of active writers
active := 0
// Used to communicate status from the upload goroutines
upload_status := make(chan uploadStatus)
defer func() {
// Wait for any abandoned uploads (e.g., we started
// two uploads and the first replied with replicas=2)
// to finish before closing the status channel.
go func() {
for active > 0 {
<-upload_status
}
close(upload_status)
}()
}()
replicasDone := 0
replicasTodo := this.Want_replicas
replicasPerThread := this.replicasPerService
if replicasPerThread < 1 {
// unlimited or unknown
replicasPerThread = replicasTodo
}
retriesRemaining := 1 + this.Retries
var retryServers []string
lastError := make(map[string]string)
for retriesRemaining > 0 {
retriesRemaining -= 1
nextServer = 0
retryServers = []string{}
for replicasTodo > 0 {
for active*replicasPerThread < replicasTodo {
// Start some upload requests
if nextServer < len(sv) {
DebugPrintf("DEBUG: [%s] Begin upload %s to %s", reqid, hash, sv[nextServer])
go this.uploadToKeepServer(sv[nextServer], hash, getReader(), upload_status, expectedLength, reqid)
nextServer += 1
active += 1
} else {
if active == 0 && retriesRemaining == 0 {
msg := "Could not write sufficient replicas: "
for _, resp := range lastError {
msg += resp + "; "
}
msg = msg[:len(msg)-2]
return locator, replicasDone, InsufficientReplicasError(errors.New(msg))
}
break
}
}
DebugPrintf("DEBUG: [%s] Replicas remaining to write: %v active uploads: %v",
reqid, replicasTodo, active)
// Now wait for something to happen.
if active > 0 {
status := <-upload_status
active -= 1
if status.statusCode == 200 {
// good news!
replicasDone += status.replicas_stored
replicasTodo -= status.replicas_stored
locator = status.response
delete(lastError, status.url)
} else {
msg := fmt.Sprintf("[%d] %s", status.statusCode, status.response)
if len(msg) > 100 {
msg = msg[:100]
}
lastError[status.url] = msg
}
if status.statusCode == 0 || status.statusCode == 408 || status.statusCode == 429 ||
(status.statusCode >= 500 && status.statusCode != 503) {
// Timeout, too many requests, or other server side failure
// Do not retry when status code is 503, which means the keep server is full
retryServers = append(retryServers, status.url[0:strings.LastIndex(status.url, "/")])
}
} else {
break
}
}
sv = retryServers
}
return locator, replicasDone, nil
}
| [
"\"ARVADOS_DEBUG\""
]
| []
| [
"ARVADOS_DEBUG"
]
| [] | ["ARVADOS_DEBUG"] | go | 1 | 0 | |
cave/com.raytheon.viz.gfe/localization/gfe/userPython/textUtilities/FormatterRunner.py | ##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
import TimeRange, AbsTime
import logging
import TextFormatter
import time, os, string, inspect, sys
import JUtil, VarDictGroker
import RedirectLogging
import UFStatusHandler
from com.raytheon.uf.viz.core import VizApp
from com.raytheon.uf.common.gfe.ifpclient import PyFPClient
#
# Runs the text formatter to generate text products
#
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------- -------- --------- ---------------------------------------------
# May 29, 2008 njensen Initial Creation.
# Dev 10, 2014 14946 ryu Add getTimeZones() function.
# Apr 16, 2015 14946 ryu Fix getTimeZones to return the office TZ if timezone
# is not set for any zone in a segment.
# Apr 20, 2015 4027 randerso Fixes for formatter autotests
# Apr 25, 2015 4952 njensen Updated for new JEP API
# May 06, 2015 4467 randerso Convert to upper case before writing to files if
# mixed case is not enabled for the product.
# Cleaned up file writing code
# Jul 29, 2015 4263 dgilling Support updated TextProductManager.
# Nov 30, 2015 5129 dgilling Support new IFPClient.
# Sep 28, 2016 19293 randerso Log formatter exceptions to formatter log file
# Feb 07, 2017 6092 randerso Changed startTime and endTime to be time.struct_times
# Feb 26, 2018 7230 mapeters Don't reset DRT time to real time
#
##
##
# This is a base file that is not intended to be overridden.
##
displayNameDict = {}
# Set up logging info
PLUGIN_NAME = 'com.raytheon.viz.gfe'
CATEGORY = 'GFE'
DEFAULT_LOG_FILENAME = '/tmp/gfe.log'
FILEMODE='w'
PATH_MGR = None
try:
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("FormatterRunner")
formatter = logging.Formatter("%(asctime)s:%(name)s:%(levelname)s:%(message)s")
# Get the information for the file logger
from com.raytheon.uf.common.localization import PathManagerFactory
from com.raytheon.uf.common.localization import LocalizationContext
LocalizationType = LocalizationContext.LocalizationType
LocalizationLevel = LocalizationContext.LocalizationLevel
PATH_MGR = PathManagerFactory.getPathManager()
except:
logging.basicConfig(filename=DEFAULT_LOG_FILENAME,level=logging.DEBUG)
logger = logging.getLogger()
logger.exception("Exception occurred")
## TODO: Remove use of DataManager in this code. Will need to coordinate with
## the field developers to ensure local site overrides aren't relying on having
## access to it.
def executeFromJava(databaseID, site, username, dataMgr, forecastList, logFile, cmdLineVarDict=None,
drtTime=None, vtecMode=None, vtecActiveTable="active", testMode=0 ):
if type(forecastList) is not list:
forecastList = [str(forecastList)]
# Set up the file logger for this product
# ctx = PATH_MGR.getContext(LocalizationType.valueOf('CAVE_STATIC'), LocalizationLevel.valueOf('USER'))
# logFile = PATH_MGR.getFile(ctx, os.path.join('gfe', 'logs', forecastList[0])).getPath()
logger.info("logFile: " + str(logFile))
fh = logging.FileHandler(filename=logFile, mode=FILEMODE)
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
logger.addHandler(fh)
# redirect stdout and stderr to logger
RedirectLogging.redirect(logger, stdout=True, stderr=True)
logger.info(forecastList[0])
site = str(site)
databaseID = str(databaseID)
username = str(username)
startTime = time.time()
logger.info("Text Formatter Starting")
try:
forecasts = runFormatter(databaseID=databaseID, site=site, forecastList=forecastList, testMode=testMode,
cmdLineVarDict=cmdLineVarDict, vtecMode=vtecMode, username=username,
dataMgr=dataMgr, drtTime=drtTime, vtecActiveTable=vtecActiveTable)
except:
logger.exception("Error generating text product")
raise
elapsedTime = (time.time() - startTime)*1000
logger.info("Text Formatter Finished, took: %d ms",elapsedTime)
RedirectLogging.restore()
return forecasts
def getPid(forecast):
# taken from ProductParser.py
import re
sl = r'^' # start of line
el = r'\s*?\n' # end of line
id3 = r'[A-Za-z]{3}' # 3 charater word
empty = r'^\s*' + el # empty line
wmoid = r'(?P<wmoid>[A-Z]{4}\d{2})' # wmoid
fsid = r'(?P<fsid>[A-Z]{4})' # full station id
pit = r'(?P<pit>\d{6})' # product issuance time UTC
ff = r'(?P<funnyfield> ' + id3 + ')?' # "funny" field
# CI block
ci_start = sl + wmoid + ' ' + fsid + ' ' + pit + ff + el
awipsid = r'(?P<pil>(?P<cat>[A-Z0-9]{3})(?P<lid>[A-Z0-9]{1,3}))' + el
ci_block = r'(?P<ciblock>' + ci_start + awipsid + '\n?)'
ci_re = re.compile(ci_block)
pid = None
m = ci_re.search(forecast)
if m is not None:
pid = m.group('cat')
return pid
def runFormatter(databaseID, site, forecastList, cmdLineVarDict, vtecMode,
username, dataMgr, serverFile=None,
editAreas=[], timeRanges=[], timePeriod=None, drtTime=None,
vtecActiveTable='active', testMode=0, experimentalMode=0, serverOutputFile=None,
startTime=None, endTime=None, language=None, outputFile=None, appendFile=None
):
if cmdLineVarDict:
exec "cmdLineVarDict = " + cmdLineVarDict
else:
cmdLineVarDict = {}
# Set default Forecast Type
if len(forecastList) == 0:
usage()
logger.error("ForecastList [-t] is empty or missing")
return
# Can't have both T and E modes
if testMode and experimentalMode:
usage()
logger.error("Can't have both -T and -E switches")
return
if drtTime:
import offsetTime
offsetTime.setDrtOffset(drtTime)
# Create Time Range
useRawTR = 0
if startTime is not None and endTime is not None:
start = decodeTimeStruct(startTime)
end = decodeTimeStruct(endTime)
timeRange = TimeRange.TimeRange(start, end)
# Set so this time range will override all others
useRawTR = 1
else:
timeRange = None
# Handle the VTEC modes
if vtecMode is not None and vtecMode not in ['X','O','T','E']:
usage()
logger.error("-v vtecMode must be ['X', 'O', 'T', 'E']")
sys.exit(1)
#force VTEC mode to "T" if in TEST mode and another vtecCode is specified
if testMode and vtecMode is not None:
vtecMode = "T"
#force VTEC mode to "E" if in EXPERIMENTAL mode and another vtecCode
#is specified
elif experimentalMode and vtecMode is not None:
vtecMode = "E"
#force into TEST mode, if vtec code is 'T'
if vtecMode == "T":
testMode = 1
experimentalMode = 0
elif vtecMode == "E":
experimentalMode = 1
testMode = 0
# Create an ifpClient
ifpClient = PyFPClient(VizApp.getWsId(), site)
global GridLoc
GridLoc = ifpClient.getDBGridLocation()
#importer = TextIFPImporter(ifpClient)
#importer.install()
import Utility
import ForecastNarrative
import ForecastTable
import Analysis
site = str(ifpClient.getSiteID()[0])
# Create dictionary of arguments
argDict = {
#"host" : host,
#"port" : port,
"databaseID": databaseID,
"site" : site,
"cmdLineVarDict": cmdLineVarDict,
"serverFile": serverFile,
"editAreas": editAreas,
"timeRanges": timeRanges,
"timeRange": timeRange,
"timePeriod": timePeriod,
"useRawTR": useRawTR,
"vtecMode": vtecMode,
"vtecActiveTable": vtecActiveTable,
"testMode": testMode,
"experimentalMode": experimentalMode,
"serverOutputFile": serverOutputFile,
}
# Handle command line switches for variables that can be
# set elsewhere i.e. in the command line varDict OR the
# product definition section.
# If there was a command line switch for these items,
# make an entry in argDict. Otherwise, do not.
for item in ["language", "outputFile", "appendFile"]:
exec "if " + item + " is not None: argDict['" + item + "'] = " + item
logger.info("Arguments: " + str(argDict))
argDict["ifpClient"] = ifpClient
argDict["utility"] = Utility.Utility(None, None, ifpClient)
#argDict["AFPS"] = AFPS
#argDict["AFPSSup"] = AFPSSup
argDict["Analysis"] = Analysis
argDict["ForecastNarrative"] = ForecastNarrative
argDict["ForecastTable"] = ForecastTable
# get product creation time to the minute - almost all fmtrs use this
argDict['creationTime'] = int(time.time()/60)*60.0
# Set the Site Time Zone
tz = str(ifpClient.getSiteTimeZone())
os.environ['TZ'] = tz
time.tzset()
# Create the formatter
formatter = TextFormatter.TextFormatter(dataMgr, ifpClient)
# For each Forecast Type,
# Create generate forecast
forecasts = "" # returned value
outForecasts = "" # written to output files
for forecastType in forecastList:
forecast = formatter.getForecast(forecastType, argDict)
forecasts = forecasts + forecast
# Convert data written to files to upper case if required
mixedCase = False
pid = getPid(forecast)
if pid is None:
logger.warning("Unable to determine PID: defaulting to upper case")
else:
from com.raytheon.uf.common.dataplugin.text.db import MixedCaseProductSupport
mixedCase = MixedCaseProductSupport.isMixedCase(str(pid))
if mixedCase:
outForecasts = outForecasts + forecast
else:
outForecasts = outForecasts + forecast.upper()
logger.info("Text:\n" + str(forecasts))
try:
outputFile = argDict["outputFile"]
success = writeToFile(outForecasts, outputFile, "w")
if success == 0:
print "Couldn't open output file", outputFile
logger.error("Couldn't open output file: ", outputFile)
sys.exit(1)
except:
pass
try:
outputFile = argDict["serverOutputFile"]
success = writeToFile(outForecasts, outputFile, "w")
if success == 0:
print "Couldn't open output file", outputFile
logger.error("Couldn't open output file: ", outputFile)
sys.exit(1)
except:
pass
try:
appendFile = argDict["appendFile"]
success = writeToFile(outForecasts, appendFile, "a")
if success == 0:
print "Couldn't open append file", appendFile
logger.error("Couldn't write to append file: ", appendFile)
sys.exit(1)
except:
pass
try:
serverFile = argDict["serverFile"]
writeToSite = (username == "SITE")
success = writeToServerFile(outForecasts, serverFile, writeToSite)
if success == 0:
print "Couldn't open server output file", serverFile
logger.error("Couldn't open server output file: ", serverFile)
sys.exit(1)
except:
pass
del outForecasts
# Remove any lat/lon areas created temporarily
#global LatLonIds
#argDict["ifpClient"].deleteReferenceData(LatLonIds)
# Somebody is holding onto an ifpClient and thus the C++
# object is not being destroyed. This causes the network
# connection to stay open. Below is a kludge to force
# the destruction of the C++ object.
#del ifpClient.this
# This also means that you may not import any new modules after this
# point!!!!!!!!!!!!!!!
return forecasts
def decodeTimeStruct(timeStruct):
return AbsTime.absTimeYMD(timeStruct.tm_year, timeStruct.tm_mon,
timeStruct.tm_mday,
timeStruct.tm_hour, timeStruct.tm_min)
def writeToFile(forecasts, outputFile, mode):
if outputFile:
logger.info("Writing forecast to " + outputFile)
try:
with open(outputFile, mode) as outfile:
outfile.write(forecasts)
os.chmod(outputFile, 0644)
except:
logger.exception("Error writing forecast to "+outputFile)
return 0
return 1
def writeToServerFile(forecasts, outputFile, writeToSite):
if outputFile:
try:
if writeToSite:
ctx = PATH_MGR.getContext(LocalizationType.COMMON_STATIC, LocalizationLevel.SITE)
else:
ctx = PATH_MGR.getContext(LocalizationType.COMMON_STATIC, LocalizationLevel.USER)
filePath = PATH_MGR.SEPARATOR.join(["gfe", "text", "PRODGEN", outputFile + ".PRODGEN"])
lFile = PATH_MGR.getLocalizationFile(ctx, filePath)
logger.info("Writing forecast to " + str(lFile))
from LockingFile import File
with File(lFile.getFile(), "", 'w') as outfile:
outfile.write(forecasts)
return lFile.save()
except:
logger.exception("Error writing forecast to " + str(lFile))
return 0
return 1
def importModules(paths):
global displayNameDict
displayNameDict = {}
split = paths.split(os.path.pathsep)
for path in split:
if not path in sys.path:
sys.path.append(path)
inv = []
if os.path.exists(path):
inv = os.listdir(path)
inv = filter(filterScripts, inv)
for pid in inv:
name = os.path.splitext(pid)[0]
if sys.modules.has_key(name):
del sys.modules[name]
try:
mod = __import__(name)
except:
logger.exception("Import Failed " + name)
mod = None
definition = None
if mod is not None:
d = mod.__dict__
#search for Definition at top-level
definition = d.get('Definition', None)
if definition is None:
# search for definition within class name
definition = d.get(name, None)
if definition is None:
tp = d.get('TextProduct', None)
if tp is not None:
#search for definition within TextProduct class
definition = getattr(tp, 'Definition', None)
if definition is None or type(definition) is not dict:
logger.info("Formatter: No Definition Found " +
name)
continue
dspName = getDisplayName(definition)
if dspName is None or dspName == "None":
continue
displayNameDict[dspName] = (mod, definition)
def getScripts(paths, getVtecCodes):
from java.util import ArrayList
from com.raytheon.uf.common.dataplugin.gfe.textproduct import ProductDefinition
from com.raytheon.viz.gfe.textformatter import TextProductConfigData
from com.raytheon.viz.gfe.textformatter import TextProductMetadata
logger.info("TextProduct FormatterLauncher Processing....")
importModules(paths)
textProducts = ArrayList()
for (displayName, value) in displayNameDict.items():
(module, definition) = value
moduleName = module.__name__
pdef = ProductDefinition(JUtil.pyDictToJavaMap(definition))
productMetadata = TextProductMetadata(moduleName, displayName, pdef)
textProducts.add(productMetadata)
vtecCodes = {}
if getVtecCodes:
import VTECMessageType
vtecCodes = VTECMessageType.VTECMessageTypeDict
logger.info("TextProduct FormatterLauncher Done....")
return TextProductConfigData(JUtil.pyValToJavaObj(vtecCodes), textProducts)
def filterScripts(name):
(filename, ext) = os.path.splitext(name)
return ext == ".py" and not filename.endswith("Definition")
def getDisplayName(definition):
try:
dspName = definition['displayName']
except:
dspName = None
return dspName
def ppDef(definition):
"pretty prints the definition to make it more readable. Returns string."
s = "\n"
if definition is None:
return "<Definition is None>"
if type(definition) == dict and len(definition.keys()):
keys = definition.keys()
keys.sort()
#get maximum length of key
maxL = 0
for k in keys:
maxL = max(len(k), maxL)
# output the data, formatted
fmt = "%-" + `maxL` + "s"
for k in keys:
s = s + fmt % k + ": " + str(definition[k]) + '\n'
return s
else:
return "<Definition not dictionary>\n" + `definition`
## TODO: Investigate if the dependency on DataManager can be removed here.
## At the moment this passes through to ValuesDialog for building special
## widgets in the DialogAreaComposite.
def getVarDict(paths, dspName, dataMgr, ifpClient, issuedBy, dataSource):
importModules(paths)
tz = str(ifpClient.getSiteTimeZone())
os.environ['TZ'] = tz
time.tzset()
productDef = displayNameDict[dspName][1]
productDef['database'] = dataSource
vdg = VarDictGroker.VarDictGroker(displayNameDict[dspName][0], productDef, dspName, issuedBy, dataMgr)
return vdg.getVarDict()
def getVTECMessageType(productCategory):
import VTECMessageType
return VTECMessageType.getVTECMessageType(productCategory)
def getTimeZones(zones, officeTZ):
import AreaDictionary
timezones = []
if zones is not None:
for zone in zones:
zdict = AreaDictionary.AreaDictionary.get(zone, {})
tzs = zdict.get("ugcTimeZone", [])
if type(tzs) is str:
tzs = [tzs]
for tz in tzs:
if tz not in timezones:
timezones.append(tz)
if officeTZ in timezones and officeTZ != timezones[0]:
timezones.remove(officeTZ)
timezones.insert(0, officeTZ)
if len(timezones) == 0:
timezones.append(officeTZ)
return JUtil.pylistToJavaStringList(timezones)
def reloadModule(moduleName):
# m = __import__(moduleName)
# reload(m)
if sys.modules.has_key(moduleName):
del sys.modules[moduleName]
try:
__import__(moduleName)
except:
logger.exception("Import Failed " + moduleName)
| []
| []
| [
"TZ"
]
| [] | ["TZ"] | python | 1 | 0 | |
lib_pypy/pypy_tools/build_cffi_imports.py | from __future__ import print_function
import sys, shutil, os, tempfile, hashlib, collections
import sysconfig
from os.path import join
try:
import _multiprocessing
except ImportError:
# The only function we need from multiprocessing is cpu_count(), which is
# written in the pure Python part of multiprocessing. We can make it work
# in environments that don't have _multiprocessing by adding an empty
# module in place of _multiprocessing.
import types
sys.modules['_multiprocessing'] = types.ModuleType('fake _multiprocessing')
import multiprocessing
# do not use the long-running runsubprocess._run here, since building some of
# the extensions enable importing them later
os.environ['PYPY_DONT_RUN_SUBPROCESS'] = '1'
class MissingDependenciesError(Exception):
pass
cffi_build_scripts = collections.OrderedDict({
("_ctypes._ctypes_cffi",
"_ctypes/_ctypes_build.py" if sys.platform == 'darwin' else None),
("_pypy_util_cffi_inner", "_pypy_util_build.py"), # this needs to come before ssl
("_ssl", "_ssl_build.py"),
("sqlite3", "_sqlite3_build.py"),
("audioop", "_audioop_build.py"),
("_tkinter", "_tkinter/tklib_build.py"),
("curses", "_curses_build.py" if sys.platform != "win32" else None),
("syslog", "_syslog_build.py" if sys.platform != "win32" else None),
("gdbm", "_gdbm_build.py" if sys.platform != "win32" else None),
("grp", "_pwdgrp_build.py" if sys.platform != "win32" else None),
("resource", "_resource_build.py" if sys.platform != "win32" else None),
# ("_decimal", "_decimal_build.py"), # issue 3024
("xx", None), # for testing: 'None' should be completely ignored
})
# for distribution, we may want to fetch dependencies not provided by
# the OS, such as a recent openssl/libressl.
curdir = os.path.abspath(os.path.dirname(__file__))
deps_destdir = os.path.join(curdir, 'dest')
configure_args = ['./configure',
'--prefix=/usr',
'--disable-shared',
'--enable-silent-rules',
'--disable-dependency-tracking',
]
# please note the deliberate use of a mirror site: we can't use HTTPS
# without an _ssl module, but the OpenSSL download site redirect HTTP
# to HTTPS
cffi_dependencies = {
'lzma': ('http://distfiles.macports.org/xz/xz-5.2.5.tar.bz2',
'5117f930900b341493827d63aa910ff5e011e0b994197c3b71c08a20228a42df',
[configure_args,
['make', '-s', '-j', str(multiprocessing.cpu_count())],
['make', 'install', 'DESTDIR={}/'.format(deps_destdir)],
]),
'_ssl1': ('http://artfiles.org/openssl.org/source/openssl-1.1.1n.tar.gz',
'40dceb51a4f6a5275bde0e6bf20ef4b91bfc32ed57c0552e2e8e15463372b17a',
[
['./config', '--prefix=/usr', 'no-shared'],
['make', '-s', '-j', str(multiprocessing.cpu_count())],
['make', 'install', 'DESTDIR={}/'.format(deps_destdir)],
]),
'_ssl3': ('http://artfiles.org/openssl.org/source/openssl-3.0.1.tar.gz',
'c311ad853353bce796edad01a862c50a8a587f62e7e2100ef465ab53ec9b06d1',
[
['./config', '--prefix=/usr', 'no-shared', 'enable-fips'],
['make', '-s', '-j', str(multiprocessing.cpu_count())],
['make', 'install', 'DESTDIR={}/'.format(deps_destdir)],
]),
}
cffi_dependencies['_ssl'] = cffi_dependencies['_ssl1']
if sys.platform == 'darwin':
# this does not compile on the buildbot, linker is missing '_history_list'
cffi_dependencies['gdbm'] = (
'http://distfiles.macports.org/gdbm/gdbm-1.18.1.tar.gz',
'86e613527e5dba544e73208f42b78b7c022d4fa5a6d5498bf18c8d6f745b91dc',
[configure_args + ['--without-readline'],
['make', '-s', '-j', str(multiprocessing.cpu_count())],
['make', 'install', 'DESTDIR={}/'.format(deps_destdir)],
])
def _unpack_tarfile(filename, extract_dir):
"""Unpack tar/tar.gz/tar.bz2/tar.xz `filename` to `extract_dir`
"""
import tarfile # late import for breaking circular dependency
try:
tarobj = tarfile.open(filename)
except tarfile.TarError:
raise ReadError(
"%s is not a compressed or uncompressed tar file" % filename)
try:
tarobj.extractall(extract_dir)
finally:
tarobj.close()
def _sha256(filename):
dgst = hashlib.sha256()
with open(filename, 'rb') as fp:
dgst.update(fp.read())
return dgst.hexdigest()
def _build_dependency(name, patches=[]):
import shutil
from rpython.tool.runsubprocess import run_subprocess
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
try:
url, dgst, build_cmds = cffi_dependencies[name]
except KeyError:
return 0, None, None
archive_dir = os.path.join(tempfile.gettempdir(), 'pypy-archives')
if not os.path.isdir(archive_dir):
os.makedirs(archive_dir)
archive = os.path.join(archive_dir, url.rsplit('/', 1)[-1])
# next, fetch the archive to disk, if needed
if not os.path.exists(archive) or _sha256(archive) != dgst:
print('fetching archive', url, file=sys.stderr)
urlretrieve(url, archive)
# make sure the hash matches
if _sha256(archive) != dgst:
return 1, '{} archive {} hash mismatch'.format(name, archive), ''
shutil.rmtree(deps_destdir, ignore_errors=True)
os.makedirs(deps_destdir)
# extract the into our destination directory
print('unpacking archive', archive, file=sys.stderr)
_unpack_tarfile(archive, deps_destdir)
sources = os.path.join(
deps_destdir,
os.path.basename(archive).rsplit('.', 2)[0],
)
# apply any patches
if patches:
for patch in patches:
print('applying patch', patch, file=sys.stderr)
status, stdout, stderr = run_subprocess(
'/usr/bin/patch', ['-p1', '-i', patch], cwd=sources,
)
if status != 0:
return status, stdout, stderr
env = os.environ
if sys.platform == 'darwin':
target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
if target:
# override the value for building support libraries
env = os.environ.copy()
env['MACOSX_DEPLOYMENT_TARGET'] = target
print('setting MACOSX_DEPLOYMENT_TARGET to "{}"'.format(target))
for args in build_cmds:
print('running', ' '.join(args), 'in', sources, file=sys.stderr)
status, stdout, stderr = run_subprocess(args[0], args[1:],
cwd=sources, env=env)
if status != 0:
break
return status, stdout, stderr
def create_cffi_import_libraries(pypy_c, options, basedir, only=None,
embed_dependencies=False, rebuild=False):
from rpython.tool.runsubprocess import run_subprocess
print('calling create_cffi_import_libraries with "embed_dependencies"', embed_dependencies)
shutil.rmtree(str(join(basedir,'lib_pypy','__pycache__')),
ignore_errors=True)
# be sure pip, setuptools are installed in a fresh pypy
# allows proper functioning of cffi on win32 with newer vc compilers
# XXX move this to a build slave step?
env = os.environ
if sys.platform == 'win32':
env = os.environ.copy()
env['INCLUDE'] = r'..\externals\include;' + env.get('INCLUDE', '')
env['LIB'] = r'..\externals\lib;' + env.get('LIB', '')
env['PATH'] = r'..\externals\bin;' + env.get('PATH', '')
status, stdout, stderr = run_subprocess(str(pypy_c), ['-c', 'import setuptools'])
if status != 0:
status, stdout, stderr = run_subprocess(str(pypy_c), ['-m', 'ensurepip'])
failures = []
for key, module in cffi_build_scripts.items():
if only and key not in only:
print("* SKIPPING", key, '(not specified in --only)')
continue
if module is None or getattr(options, 'no_' + key, False):
continue
if not rebuild:
# the key is the module name, has it already been built?
status, stdout, stderr = run_subprocess(str(pypy_c),
['-c', 'import %s' % key], env=env)
if status == 0:
print('*', ' %s already built' % key, file=sys.stderr)
continue
if module.endswith('.py'):
args = [module]
cwd = str(join(basedir,'lib_pypy'))
else:
args = ['-c', 'import ' + module]
cwd = None
print('*', ' '.join(args), file=sys.stderr)
if embed_dependencies and key in cffi_dependencies:
status, stdout, stderr = _build_dependency(key)
if status != 0:
failures.append((key, module))
print("stdout:")
print(stdout.decode('utf-8'))
print("stderr:")
print(stderr.decode('utf-8'))
continue
env['CPPFLAGS'] = '-I{}/usr/include {}'.format(
deps_destdir, env.get('CPPFLAGS', ''))
env['LDFLAGS'] = '-L{}/usr/lib64 -L{}/usr/lib {}'.format(
deps_destdir, deps_destdir, env.get('LDFLAGS', ''))
try:
status, bld_stdout, bld_stderr = run_subprocess(str(pypy_c), args,
cwd=cwd, env=env)
if status != 0:
print("stdout:")
print(bld_stdout, file=sys.stderr)
print("stderr:")
print(bld_stderr, file=sys.stderr)
raise RuntimeError('building {} failed'.format(key))
except:
import traceback;traceback.print_exc()
failures.append((key, module))
else:
# Make sure it worked
status, stdout, stderr = run_subprocess(str(pypy_c),
['-c', "print('testing {0}'); import {0}".format(key)],
env=env)
if status != 0:
failures.append((key, module))
print("build stdout:")
print(bld_stdout, file=sys.stderr)
print("build stderr:")
print(bld_stderr, file=sys.stderr)
print("test stdout:")
print(stdout, file=sys.stderr)
print("test stderr:")
print(stderr, file=sys.stderr)
if os.path.exists(deps_destdir):
shutil.rmtree(deps_destdir, ignore_errors=True)
return failures
if __name__ == '__main__':
import argparse
if '__pypy__' not in sys.builtin_module_names:
print('Call with a pypy interpreter', file=sys.stderr)
sys.exit(1)
tool_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
base_dir = os.path.dirname(os.path.dirname(tool_dir))
sys.path.insert(0, base_dir)
class Options(object):
pass
parser = argparse.ArgumentParser(description='Build all cffi backends in lib_pypy')
parser.add_argument('--exefile', dest='exefile', default=sys.executable,
help='instead of executing sys.executable' \
' you can specify an alternative pypy vm here')
parser.add_argument('--rebuild', dest='rebuild', action='store_true',
help='Rebuild the module even if it already appears to have been built.')
parser.add_argument('--only', dest='only', default=None,
help='Only build the modules delimited by a colon. E.g. _ssl,sqlite')
parser.add_argument('--embed-dependencies', dest='embed_dependencies', action='store_true',
help='embed dependencies for distribution')
args = parser.parse_args()
exename = join(os.getcwd(), args.exefile)
basedir = exename
while not os.path.exists(join(basedir,'include')):
_basedir = os.path.dirname(basedir)
if _basedir == basedir:
raise ValueError('interpreter %s not inside pypy repo',
str(exename))
basedir = _basedir
options = Options()
if args.only is None:
only = None
else:
only = set(args.only.split(','))
failures = create_cffi_import_libraries(exename, options, basedir, only=only,
embed_dependencies=args.embed_dependencies,
rebuild=args.rebuild)
if len(failures) > 0:
print('*** failed to build the CFFI modules %r' % (
[f[1] for f in failures],), file=sys.stderr)
print('''
PyPy can still be used as long as you don't need the corresponding
modules. If you do need them, please install the missing headers and
libraries (see error messages just above) and then re-run the command:
%s %s
''' % (sys.executable, ' '.join(sys.argv)), file=sys.stderr)
sys.exit(1)
if len(sys.argv) > 1 and sys.argv[1] == '--test':
# monkey patch a failure, just to test
print('This line should be followed by a traceback', file=sys.stderr)
for k in cffi_build_scripts:
setattr(options, 'no_' + k, True)
must_fail = '_missing_build_script.py'
assert not os.path.exists(str(join(join(basedir,'lib_pypy'),must_fail)))
cffi_build_scripts['should_fail'] = must_fail
failures = create_cffi_import_libraries(exename, options, basedir, only=only)
assert len(failures) == 1
| []
| []
| [
"PYPY_DONT_RUN_SUBPROCESS"
]
| [] | ["PYPY_DONT_RUN_SUBPROCESS"] | python | 1 | 0 | |
internal/suites/suite_haproxy.go | package suites
import (
"fmt"
"os"
"time"
)
var haproxySuiteName = "HAProxy"
func init() {
dockerEnvironment := NewDockerEnvironment([]string{
"internal/suites/docker-compose.yml",
"internal/suites/HAProxy/docker-compose.yml",
"internal/suites/example/compose/authelia/docker-compose.backend.{}.yml",
"internal/suites/example/compose/authelia/docker-compose.frontend.{}.yml",
"internal/suites/example/compose/nginx/backend/docker-compose.yml",
"internal/suites/example/compose/haproxy/docker-compose.yml",
"internal/suites/example/compose/smtp/docker-compose.yml",
"internal/suites/example/compose/httpbin/docker-compose.yml",
})
if os.Getenv("CI") == t {
dockerEnvironment = NewDockerEnvironment([]string{
"internal/suites/docker-compose.yml",
"internal/suites/HAProxy/docker-compose.yml",
"internal/suites/example/compose/authelia/docker-compose.backend.{}.yml",
"internal/suites/example/compose/nginx/backend/docker-compose.yml",
"internal/suites/example/compose/haproxy/docker-compose.yml",
"internal/suites/example/compose/smtp/docker-compose.yml",
"internal/suites/example/compose/httpbin/docker-compose.yml",
})
}
setup := func(suitePath string) error {
if err := dockerEnvironment.Up(); err != nil {
return err
}
return waitUntilAutheliaIsReady(dockerEnvironment, haproxySuiteName)
}
displayAutheliaLogs := func() error {
backendLogs, err := dockerEnvironment.Logs("authelia-backend", nil)
if err != nil {
return err
}
fmt.Println(backendLogs)
if os.Getenv("CI") != t {
frontendLogs, err := dockerEnvironment.Logs("authelia-frontend", nil)
if err != nil {
return err
}
fmt.Println(frontendLogs)
}
haproxyLogs, err := dockerEnvironment.Logs("haproxy", nil)
if err != nil {
return err
}
fmt.Println(haproxyLogs)
return nil
}
teardown := func(suitePath string) error {
err := dockerEnvironment.Down()
return err
}
GlobalRegistry.Register(haproxySuiteName, Suite{
SetUp: setup,
SetUpTimeout: 5 * time.Minute,
OnSetupTimeout: displayAutheliaLogs,
OnError: displayAutheliaLogs,
TestTimeout: 2 * time.Minute,
TearDown: teardown,
TearDownTimeout: 2 * time.Minute,
})
}
| [
"\"CI\"",
"\"CI\""
]
| []
| [
"CI"
]
| [] | ["CI"] | go | 1 | 0 | |
webserver/ressources/templates.go | package ressources
import (
"bytes"
"log"
"net/http"
"os"
"path/filepath"
"text/template"
)
var MainLayout *template.Template
func init() {
webRoot := os.Getenv("WEBROOT")
//lytDir := http.Dir(filepath.Join(webRoot, "layouts"))
mainLytPath := filepath.Join(webRoot, "layouts", "*.html")
log.Printf("WEBROOT/layouts=%s", mainLytPath)
MainLayout = template.Must(template.New("main").ParseGlob(mainLytPath))
}
func RenderTemplate(w http.ResponseWriter, r *http.Request, tpl *template.Template, name string, data interface{}) {
buf := new(bytes.Buffer)
if err := tpl.ExecuteTemplate(buf, name, data); err != nil {
log.Printf("Error%s", MainLayout.DefinedTemplates())
log.Printf("\nRender Error: %v\n", err)
return
}
w.Write(buf.Bytes())
}
// Push the given resource to the client (HTTP/2).
func Push(w http.ResponseWriter, resource string) {
pusher, ok := w.(http.Pusher)
if ok {
if err := pusher.Push(resource, nil); err == nil {
return
}
}
}
| [
"\"WEBROOT\""
]
| []
| [
"WEBROOT"
]
| [] | ["WEBROOT"] | go | 1 | 0 | |
main.go | package main
import (
"fmt"
"log"
"net/http"
"os"
"strings"
"github.com/ara-ta3/reviewer-notification/service"
"github.com/ara-ta3/reviewer-notification/slack"
"encoding/json"
)
var logger = log.New(os.Stdout, "", log.Ldate+log.Ltime+log.Lshortfile)
func main() {
u := os.Getenv("SLACK_WEBHOOK_URL")
token := os.Getenv("TOKEN")
labels := strings.Split(os.Getenv("TARGET_LABELS"), ",")
p := os.Getenv("PORT")
accountMap := parseAccountMap(os.Getenv("ACCOUNT_MAP"))
slackChannel := os.Getenv("SLACK_CHANNEL")
if p == "" {
p = "80"
}
logger.Printf("target labels: %#v\n", labels)
logger.Printf("port: %#v\n", p)
logger.Printf("slack channel id: %#v\n", slackChannel)
logger.Printf("account map: %#v\n", accountMap)
h := service.GithubNotificationHandler{
NotificationService: service.NewReviewerNotification(
slack.NewSlackClient(u, slackChannel),
token,
labels,
logger,
accountMap,
),
Logger: *logger,
}
http.Handle("/", h)
http.HandleFunc("/accounts", func(res http.ResponseWriter, req *http.Request) {
j, e := json.Marshal(accountMap)
res.Header().Set("Content-Type", "application/json")
if e != nil {
res.WriteHeader(500)
logger.Printf("%#v\n", e)
return
}
res.WriteHeader(200)
res.Write(j)
})
http.ListenAndServe(fmt.Sprintf(":%s", p), nil)
}
func parseAccountMap(s string) map[string]string {
ms := strings.Split(s, ",")
r := map[string]string{}
for _, m := range ms {
if m == "" {
continue
}
x := strings.Split(m, ":")
key := strings.TrimSpace(x[0])
r[key] = x[1]
}
return r
}
| [
"\"SLACK_WEBHOOK_URL\"",
"\"TOKEN\"",
"\"TARGET_LABELS\"",
"\"PORT\"",
"\"ACCOUNT_MAP\"",
"\"SLACK_CHANNEL\""
]
| []
| [
"PORT",
"TOKEN",
"SLACK_CHANNEL",
"ACCOUNT_MAP",
"SLACK_WEBHOOK_URL",
"TARGET_LABELS"
]
| [] | ["PORT", "TOKEN", "SLACK_CHANNEL", "ACCOUNT_MAP", "SLACK_WEBHOOK_URL", "TARGET_LABELS"] | go | 6 | 0 | |
fastfood/fastfood/asgi.py | """
ASGI config for fastfood project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fastfood.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
tests/test_configuration.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import unittest
import warnings
from collections import OrderedDict
from unittest import mock
from airflow import configuration
from airflow.configuration import AirflowConfigParser, conf, parameterized_config
@unittest.mock.patch.dict('os.environ', {
'AIRFLOW__TESTSECTION__TESTKEY': 'testvalue',
'AIRFLOW__TESTSECTION__TESTPERCENT': 'with%percent'
})
class TestConf(unittest.TestCase):
@classmethod
def setUpClass(cls):
conf.set('core', 'percent', 'with%%inside')
def test_airflow_home_default(self):
with unittest.mock.patch.dict('os.environ'):
if 'AIRFLOW_HOME' in os.environ:
del os.environ['AIRFLOW_HOME']
self.assertEqual(
configuration.get_airflow_home(),
configuration.expand_env_var('~/airflow'))
def test_airflow_home_override(self):
with unittest.mock.patch.dict('os.environ', AIRFLOW_HOME='/path/to/airflow'):
self.assertEqual(
configuration.get_airflow_home(),
'/path/to/airflow')
def test_airflow_config_default(self):
with unittest.mock.patch.dict('os.environ'):
if 'AIRFLOW_CONFIG' in os.environ:
del os.environ['AIRFLOW_CONFIG']
self.assertEqual(
configuration.get_airflow_config('/home/airflow'),
configuration.expand_env_var('/home/airflow/airflow.cfg'))
def test_airflow_config_override(self):
with unittest.mock.patch.dict('os.environ', AIRFLOW_CONFIG='/path/to/airflow/airflow.cfg'):
self.assertEqual(
configuration.get_airflow_config('/home//airflow'),
'/path/to/airflow/airflow.cfg')
def test_case_sensitivity(self):
# section and key are case insensitive for get method
# note: this is not the case for as_dict method
self.assertEqual(conf.get("core", "percent"), "with%inside")
self.assertEqual(conf.get("core", "PERCENT"), "with%inside")
self.assertEqual(conf.get("CORE", "PERCENT"), "with%inside")
def test_env_var_config(self):
opt = conf.get('testsection', 'testkey')
self.assertEqual(opt, 'testvalue')
opt = conf.get('testsection', 'testpercent')
self.assertEqual(opt, 'with%percent')
self.assertTrue(conf.has_option('testsection', 'testkey'))
with unittest.mock.patch.dict(
'os.environ',
AIRFLOW__KUBERNETES_ENVIRONMENT_VARIABLES__AIRFLOW__TESTSECTION__TESTKEY='nested'
):
opt = conf.get('kubernetes_environment_variables', 'AIRFLOW__TESTSECTION__TESTKEY')
self.assertEqual(opt, 'nested')
@mock.patch.dict(
'os.environ',
AIRFLOW__KUBERNETES_ENVIRONMENT_VARIABLES__AIRFLOW__TESTSECTION__TESTKEY='nested'
)
def test_conf_as_dict(self):
cfg_dict = conf.as_dict()
# test that configs are picked up
self.assertEqual(cfg_dict['core']['unit_test_mode'], 'True')
self.assertEqual(cfg_dict['core']['percent'], 'with%inside')
# test env vars
self.assertEqual(cfg_dict['testsection']['testkey'], '< hidden >')
self.assertEqual(
cfg_dict['kubernetes_environment_variables']['AIRFLOW__TESTSECTION__TESTKEY'],
'< hidden >')
def test_conf_as_dict_source(self):
# test display_source
cfg_dict = conf.as_dict(display_source=True)
self.assertEqual(
cfg_dict['core']['load_examples'][1], 'airflow.cfg')
self.assertEqual(
cfg_dict['testsection']['testkey'], ('< hidden >', 'env var'))
def test_conf_as_dict_sensitive(self):
# test display_sensitive
cfg_dict = conf.as_dict(display_sensitive=True)
self.assertEqual(cfg_dict['testsection']['testkey'], 'testvalue')
self.assertEqual(cfg_dict['testsection']['testpercent'], 'with%percent')
# test display_source and display_sensitive
cfg_dict = conf.as_dict(display_sensitive=True, display_source=True)
self.assertEqual(
cfg_dict['testsection']['testkey'], ('testvalue', 'env var'))
def test_conf_as_dict_raw(self):
# test display_sensitive
cfg_dict = conf.as_dict(raw=True, display_sensitive=True)
self.assertEqual(cfg_dict['testsection']['testkey'], 'testvalue')
# Values with '%' in them should be escaped
self.assertEqual(cfg_dict['testsection']['testpercent'], 'with%%percent')
self.assertEqual(cfg_dict['core']['percent'], 'with%%inside')
def test_conf_as_dict_exclude_env(self):
# test display_sensitive
cfg_dict = conf.as_dict(include_env=False, display_sensitive=True)
# Since testsection is only created from env vars, it shouldn't be
# present at all if we don't ask for env vars to be included.
self.assertNotIn('testsection', cfg_dict)
def test_command_precedence(self):
TEST_CONFIG = '''[test]
key1 = hello
key2_cmd = printf cmd_result
key3 = airflow
key4_cmd = printf key4_result
'''
TEST_CONFIG_DEFAULT = '''[test]
key1 = awesome
key2 = airflow
[another]
key6 = value6
'''
test_conf = AirflowConfigParser(
default_config=parameterized_config(TEST_CONFIG_DEFAULT))
test_conf.read_string(TEST_CONFIG)
test_conf.as_command_stdout = test_conf.as_command_stdout | {
('test', 'key2'),
('test', 'key4'),
}
self.assertEqual('hello', test_conf.get('test', 'key1'))
self.assertEqual('cmd_result', test_conf.get('test', 'key2'))
self.assertEqual('airflow', test_conf.get('test', 'key3'))
self.assertEqual('key4_result', test_conf.get('test', 'key4'))
self.assertEqual('value6', test_conf.get('another', 'key6'))
self.assertEqual('hello', test_conf.get('test', 'key1', fallback='fb'))
self.assertEqual('value6', test_conf.get('another', 'key6', fallback='fb'))
self.assertEqual('fb', test_conf.get('another', 'key7', fallback='fb'))
self.assertEqual(True, test_conf.getboolean('another', 'key8_boolean', fallback='True'))
self.assertEqual(10, test_conf.getint('another', 'key8_int', fallback='10'))
self.assertEqual(1.0, test_conf.getfloat('another', 'key8_float', fallback='1'))
self.assertTrue(test_conf.has_option('test', 'key1'))
self.assertTrue(test_conf.has_option('test', 'key2'))
self.assertTrue(test_conf.has_option('test', 'key3'))
self.assertTrue(test_conf.has_option('test', 'key4'))
self.assertFalse(test_conf.has_option('test', 'key5'))
self.assertTrue(test_conf.has_option('another', 'key6'))
cfg_dict = test_conf.as_dict(display_sensitive=True)
self.assertEqual('cmd_result', cfg_dict['test']['key2'])
self.assertNotIn('key2_cmd', cfg_dict['test'])
# If we exclude _cmds then we should still see the commands to run, not
# their values
cfg_dict = test_conf.as_dict(include_cmds=False, display_sensitive=True)
self.assertNotIn('key4', cfg_dict['test'])
self.assertEqual('printf key4_result', cfg_dict['test']['key4_cmd'])
def test_getboolean(self):
"""Test AirflowConfigParser.getboolean"""
TEST_CONFIG = """
[type_validation]
key1 = non_bool_value
[true]
key2 = t
key3 = true
key4 = 1
[false]
key5 = f
key6 = false
key7 = 0
[inline-comment]
key8 = true #123
"""
test_conf = AirflowConfigParser(default_config=TEST_CONFIG)
with self.assertRaises(ValueError):
test_conf.getboolean('type_validation', 'key1')
self.assertTrue(isinstance(test_conf.getboolean('true', 'key3'), bool))
self.assertEqual(True, test_conf.getboolean('true', 'key2'))
self.assertEqual(True, test_conf.getboolean('true', 'key3'))
self.assertEqual(True, test_conf.getboolean('true', 'key4'))
self.assertEqual(False, test_conf.getboolean('false', 'key5'))
self.assertEqual(False, test_conf.getboolean('false', 'key6'))
self.assertEqual(False, test_conf.getboolean('false', 'key7'))
self.assertEqual(True, test_conf.getboolean('inline-comment', 'key8'))
def test_getint(self):
"""Test AirflowConfigParser.getint"""
TEST_CONFIG = """
[invalid]
key1 = str
[valid]
key2 = 1
"""
test_conf = AirflowConfigParser(default_config=TEST_CONFIG)
with self.assertRaises(ValueError):
test_conf.getint('invalid', 'key1')
self.assertTrue(isinstance(test_conf.getint('valid', 'key2'), int))
self.assertEqual(1, test_conf.getint('valid', 'key2'))
def test_getfloat(self):
"""Test AirflowConfigParser.getfloat"""
TEST_CONFIG = """
[invalid]
key1 = str
[valid]
key2 = 1.23
"""
test_conf = AirflowConfigParser(default_config=TEST_CONFIG)
with self.assertRaises(ValueError):
test_conf.getfloat('invalid', 'key1')
self.assertTrue(isinstance(test_conf.getfloat('valid', 'key2'), float))
self.assertEqual(1.23, test_conf.getfloat('valid', 'key2'))
def test_has_option(self):
TEST_CONFIG = '''[test]
key1 = value1
'''
test_conf = AirflowConfigParser()
test_conf.read_string(TEST_CONFIG)
self.assertTrue(test_conf.has_option('test', 'key1'))
self.assertFalse(test_conf.has_option('test', 'key_not_exists'))
self.assertFalse(test_conf.has_option('section_not_exists', 'key1'))
def test_remove_option(self):
TEST_CONFIG = '''[test]
key1 = hello
key2 = airflow
'''
TEST_CONFIG_DEFAULT = '''[test]
key1 = awesome
key2 = airflow
'''
test_conf = AirflowConfigParser(
default_config=parameterized_config(TEST_CONFIG_DEFAULT))
test_conf.read_string(TEST_CONFIG)
self.assertEqual('hello', test_conf.get('test', 'key1'))
test_conf.remove_option('test', 'key1', remove_default=False)
self.assertEqual('awesome', test_conf.get('test', 'key1'))
test_conf.remove_option('test', 'key2')
self.assertFalse(test_conf.has_option('test', 'key2'))
def test_getsection(self):
TEST_CONFIG = '''
[test]
key1 = hello
'''
TEST_CONFIG_DEFAULT = '''
[test]
key1 = awesome
key2 = airflow
[testsection]
key3 = value3
'''
test_conf = AirflowConfigParser(
default_config=parameterized_config(TEST_CONFIG_DEFAULT))
test_conf.read_string(TEST_CONFIG)
self.assertEqual(
OrderedDict([('key1', 'hello'), ('key2', 'airflow')]),
test_conf.getsection('test')
)
self.assertEqual(
OrderedDict([
('key3', 'value3'),
('testkey', 'testvalue'),
('testpercent', 'with%percent')]),
test_conf.getsection('testsection')
)
def test_kubernetes_environment_variables_section(self):
TEST_CONFIG = '''
[kubernetes_environment_variables]
key1 = hello
AIRFLOW_HOME = /root/airflow
'''
TEST_CONFIG_DEFAULT = '''
[kubernetes_environment_variables]
'''
test_conf = AirflowConfigParser(
default_config=parameterized_config(TEST_CONFIG_DEFAULT))
test_conf.read_string(TEST_CONFIG)
self.assertEqual(
OrderedDict([('key1', 'hello'), ('AIRFLOW_HOME', '/root/airflow')]),
test_conf.getsection('kubernetes_environment_variables')
)
def test_broker_transport_options(self):
section_dict = conf.getsection("celery_broker_transport_options")
self.assertTrue(isinstance(section_dict['visibility_timeout'], int))
self.assertTrue(isinstance(section_dict['_test_only_bool'], bool))
self.assertTrue(isinstance(section_dict['_test_only_float'], float))
self.assertTrue(isinstance(section_dict['_test_only_string'], str))
def test_deprecated_options(self):
# Guarantee we have a deprecated setting, so we test the deprecation
# lookup even if we remove this explicit fallback
conf.deprecated_options['celery'] = {
'worker_concurrency': 'celeryd_concurrency',
}
# Remove it so we are sure we use the right setting
conf.remove_option('celery', 'worker_concurrency')
with self.assertWarns(DeprecationWarning):
with mock.patch.dict('os.environ', AIRFLOW__CELERY__CELERYD_CONCURRENCY="99"):
self.assertEqual(conf.getint('celery', 'worker_concurrency'), 99)
with self.assertWarns(DeprecationWarning):
conf.set('celery', 'celeryd_concurrency', '99')
self.assertEqual(conf.getint('celery', 'worker_concurrency'), 99)
conf.remove_option('celery', 'celeryd_concurrency')
def test_deprecated_options_cmd(self):
# Guarantee we have a deprecated setting, so we test the deprecation
# lookup even if we remove this explicit fallback
conf.deprecated_options['celery'] = {'result_backend': 'celery_result_backend'}
conf.as_command_stdout.add(('celery', 'celery_result_backend'))
conf.remove_option('celery', 'result_backend')
conf.set('celery', 'celery_result_backend_cmd', '/bin/echo 99')
with self.assertWarns(DeprecationWarning):
tmp = None
if 'AIRFLOW__CELERY__RESULT_BACKEND' in os.environ:
tmp = os.environ.pop('AIRFLOW__CELERY__RESULT_BACKEND')
self.assertEqual(conf.getint('celery', 'result_backend'), 99)
if tmp:
os.environ['AIRFLOW__CELERY__RESULT_BACKEND'] = tmp
def test_deprecated_values(self):
def make_config():
test_conf = AirflowConfigParser(default_config='')
# Guarantee we have a deprecated setting, so we test the deprecation
# lookup even if we remove this explicit fallback
test_conf.deprecated_values = {
'core': {
'task_runner': ('BashTaskRunner', 'StandardTaskRunner', '2.0'),
},
}
test_conf.read_dict({
'core': {
'executor': 'SequentialExecutor',
'task_runner': 'BashTaskRunner',
'sql_alchemy_conn': 'sqlite://',
},
})
return test_conf
with self.assertWarns(FutureWarning):
test_conf = make_config()
self.assertEqual(test_conf.get('core', 'task_runner'), 'StandardTaskRunner')
with self.assertWarns(FutureWarning):
with unittest.mock.patch.dict('os.environ', AIRFLOW__CORE__TASK_RUNNER='BashTaskRunner'):
test_conf = make_config()
self.assertEqual(test_conf.get('core', 'task_runner'), 'StandardTaskRunner')
with warnings.catch_warnings(record=True) as w:
with unittest.mock.patch.dict('os.environ', AIRFLOW__CORE__TASK_RUNNER='NotBashTaskRunner'):
test_conf = make_config()
self.assertEqual(test_conf.get('core', 'task_runner'), 'NotBashTaskRunner')
self.assertListEqual([], w)
def test_deprecated_funcs(self):
for func in ['load_test_config', 'get', 'getboolean', 'getfloat', 'getint', 'has_option',
'remove_option', 'as_dict', 'set']:
with mock.patch('airflow.configuration.{}'.format(func)):
with self.assertWarns(DeprecationWarning):
getattr(configuration, func)()
| []
| []
| [
"AIRFLOW__CELERY__RESULT_BACKEND",
"AIRFLOW_HOME",
"AIRFLOW_CONFIG"
]
| [] | ["AIRFLOW__CELERY__RESULT_BACKEND", "AIRFLOW_HOME", "AIRFLOW_CONFIG"] | python | 3 | 0 | |
tests/__init__.py | """Tests for georss-generic-client library."""
import os
def load_fixture(filename):
"""Load a fixture."""
path = os.path.join(os.path.dirname(__file__), "fixtures", filename)
with open(path, encoding="utf-8") as fptr:
return fptr.read()
| []
| []
| []
| [] | [] | python | null | null | null |
tests/unit/gapic/compute_v1/test_ssl_certificates.py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import mock
import grpc
from grpc.experimental import aio
import json
import math
import pytest
from proto.marshal.rules.dates import DurationRule, TimestampRule
from requests import Response
from requests import Request, PreparedRequest
from requests.sessions import Session
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.compute_v1.services.ssl_certificates import SslCertificatesClient
from google.cloud.compute_v1.services.ssl_certificates import pagers
from google.cloud.compute_v1.services.ssl_certificates import transports
from google.cloud.compute_v1.types import compute
from google.oauth2 import service_account
import google.auth
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert SslCertificatesClient._get_default_mtls_endpoint(None) is None
assert (
SslCertificatesClient._get_default_mtls_endpoint(api_endpoint)
== api_mtls_endpoint
)
assert (
SslCertificatesClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
SslCertificatesClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
SslCertificatesClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert (
SslCertificatesClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
)
@pytest.mark.parametrize("client_class", [SslCertificatesClient,])
def test_ssl_certificates_client_from_service_account_info(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "compute.googleapis.com:443"
@pytest.mark.parametrize(
"transport_class,transport_name",
[(transports.SslCertificatesRestTransport, "rest"),],
)
def test_ssl_certificates_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize("client_class", [SslCertificatesClient,])
def test_ssl_certificates_client_from_service_account_file(client_class):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json("dummy/file/path.json")
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == "compute.googleapis.com:443"
def test_ssl_certificates_client_get_transport_class():
transport = SslCertificatesClient.get_transport_class()
available_transports = [
transports.SslCertificatesRestTransport,
]
assert transport in available_transports
transport = SslCertificatesClient.get_transport_class("rest")
assert transport == transports.SslCertificatesRestTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(SslCertificatesClient, transports.SslCertificatesRestTransport, "rest"),],
)
@mock.patch.object(
SslCertificatesClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SslCertificatesClient),
)
def test_ssl_certificates_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(SslCertificatesClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(SslCertificatesClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(
SslCertificatesClient,
transports.SslCertificatesRestTransport,
"rest",
"true",
),
(
SslCertificatesClient,
transports.SslCertificatesRestTransport,
"rest",
"false",
),
],
)
@mock.patch.object(
SslCertificatesClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SslCertificatesClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_ssl_certificates_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize("client_class", [SslCertificatesClient])
@mock.patch.object(
SslCertificatesClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(SslCertificatesClient),
)
def test_ssl_certificates_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(SslCertificatesClient, transports.SslCertificatesRestTransport, "rest"),],
)
def test_ssl_certificates_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(scopes=["1", "2"],)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[(SslCertificatesClient, transports.SslCertificatesRestTransport, "rest"),],
)
def test_ssl_certificates_client_client_options_credentials_file(
client_class, transport_class, transport_name
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
@pytest.mark.parametrize(
"request_type", [compute.AggregatedListSslCertificatesRequest, dict,]
)
def test_aggregated_list_rest(request_type):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.SslCertificateAggregatedList(
id="id_value",
kind="kind_value",
next_page_token="next_page_token_value",
self_link="self_link_value",
unreachables=["unreachables_value"],
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.SslCertificateAggregatedList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.aggregated_list(request)
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.AggregatedListPager)
assert response.id == "id_value"
assert response.kind == "kind_value"
assert response.next_page_token == "next_page_token_value"
assert response.self_link == "self_link_value"
assert response.unreachables == ["unreachables_value"]
def test_aggregated_list_rest_required_fields(
request_type=compute.AggregatedListSslCertificatesRequest,
):
transport_class = transports.SslCertificatesRestTransport
request_init = {}
request_init["project"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).aggregated_list._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).aggregated_list._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(
(
"max_results",
"include_all_scopes",
"filter",
"order_by",
"page_token",
"return_partial_success",
)
)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.SslCertificateAggregatedList()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "get",
"query_params": request_init,
}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.SslCertificateAggregatedList.to_json(
return_value
)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.aggregated_list(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_aggregated_list_rest_unset_required_fields():
transport = transports.SslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.aggregated_list._get_unset_required_fields({})
assert set(unset_fields) == (
set(
(
"maxResults",
"includeAllScopes",
"filter",
"orderBy",
"pageToken",
"returnPartialSuccess",
)
)
& set(("project",))
)
def test_aggregated_list_rest_bad_request(
transport: str = "rest", request_type=compute.AggregatedListSslCertificatesRequest
):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.aggregated_list(request)
def test_aggregated_list_rest_flattened():
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.SslCertificateAggregatedList()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.SslCertificateAggregatedList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1"}
# get truthy value for each flattened field
mock_args = dict(project="project_value",)
mock_args.update(sample_request)
client.aggregated_list(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/aggregated/sslCertificates"
% client.transport._host,
args[1],
)
def test_aggregated_list_rest_flattened_error(transport: str = "rest"):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.aggregated_list(
compute.AggregatedListSslCertificatesRequest(), project="project_value",
)
def test_aggregated_list_rest_pager(transport: str = "rest"):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# TODO(kbandes): remove this mock unless there's a good reason for it.
# with mock.patch.object(path_template, 'transcode') as transcode:
# Set the response as a series of pages
response = (
compute.SslCertificateAggregatedList(
items={
"a": compute.SslCertificatesScopedList(),
"b": compute.SslCertificatesScopedList(),
"c": compute.SslCertificatesScopedList(),
},
next_page_token="abc",
),
compute.SslCertificateAggregatedList(items={}, next_page_token="def",),
compute.SslCertificateAggregatedList(
items={"g": compute.SslCertificatesScopedList(),},
next_page_token="ghi",
),
compute.SslCertificateAggregatedList(
items={
"h": compute.SslCertificatesScopedList(),
"i": compute.SslCertificatesScopedList(),
},
),
)
# Two responses for two calls
response = response + response
# Wrap the values into proper Response objs
response = tuple(
compute.SslCertificateAggregatedList.to_json(x) for x in response
)
return_values = tuple(Response() for i in response)
for return_val, response_val in zip(return_values, response):
return_val._content = response_val.encode("UTF-8")
return_val.status_code = 200
req.side_effect = return_values
sample_request = {"project": "sample1"}
pager = client.aggregated_list(request=sample_request)
assert isinstance(pager.get("a"), compute.SslCertificatesScopedList)
assert pager.get("h") is None
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, tuple) for i in results)
for result in results:
assert isinstance(result, tuple)
assert tuple(type(t) for t in result) == (
str,
compute.SslCertificatesScopedList,
)
assert pager.get("a") is None
assert isinstance(pager.get("h"), compute.SslCertificatesScopedList)
pages = list(client.aggregated_list(request=sample_request).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize("request_type", [compute.DeleteSslCertificateRequest, dict,])
def test_delete_unary_rest(request_type):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "ssl_certificate": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.delete_unary(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_delete_unary_rest_required_fields(
request_type=compute.DeleteSslCertificateRequest,
):
transport_class = transports.SslCertificatesRestTransport
request_init = {}
request_init["project"] = ""
request_init["ssl_certificate"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).delete._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
jsonified_request["sslCertificate"] = "ssl_certificate_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).delete._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(("request_id",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "sslCertificate" in jsonified_request
assert jsonified_request["sslCertificate"] == "ssl_certificate_value"
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "delete",
"query_params": request_init,
}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.delete_unary(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_delete_unary_rest_unset_required_fields():
transport = transports.SslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.delete._get_unset_required_fields({})
assert set(unset_fields) == (
set(("requestId",)) & set(("project", "sslCertificate",))
)
def test_delete_unary_rest_bad_request(
transport: str = "rest", request_type=compute.DeleteSslCertificateRequest
):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "ssl_certificate": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.delete_unary(request)
def test_delete_unary_rest_flattened():
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "ssl_certificate": "sample2"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value", ssl_certificate="ssl_certificate_value",
)
mock_args.update(sample_request)
client.delete_unary(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/global/sslCertificates/{ssl_certificate}"
% client.transport._host,
args[1],
)
def test_delete_unary_rest_flattened_error(transport: str = "rest"):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_unary(
compute.DeleteSslCertificateRequest(),
project="project_value",
ssl_certificate="ssl_certificate_value",
)
def test_delete_unary_rest_error():
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize("request_type", [compute.GetSslCertificateRequest, dict,])
def test_get_rest(request_type):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "ssl_certificate": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.SslCertificate(
certificate="certificate_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
expire_time="expire_time_value",
id=205,
kind="kind_value",
name="name_value",
private_key="private_key_value",
region="region_value",
self_link="self_link_value",
subject_alternative_names=["subject_alternative_names_value"],
type_="type__value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.SslCertificate.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.SslCertificate)
assert response.certificate == "certificate_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.expire_time == "expire_time_value"
assert response.id == 205
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.private_key == "private_key_value"
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.subject_alternative_names == ["subject_alternative_names_value"]
assert response.type_ == "type__value"
def test_get_rest_required_fields(request_type=compute.GetSslCertificateRequest):
transport_class = transports.SslCertificatesRestTransport
request_init = {}
request_init["project"] = ""
request_init["ssl_certificate"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).get._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
jsonified_request["sslCertificate"] = "ssl_certificate_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).get._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
assert "sslCertificate" in jsonified_request
assert jsonified_request["sslCertificate"] == "ssl_certificate_value"
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.SslCertificate()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "get",
"query_params": request_init,
}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.SslCertificate.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.get(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_get_rest_unset_required_fields():
transport = transports.SslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.get._get_unset_required_fields({})
assert set(unset_fields) == (set(()) & set(("project", "sslCertificate",)))
def test_get_rest_bad_request(
transport: str = "rest", request_type=compute.GetSslCertificateRequest
):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1", "ssl_certificate": "sample2"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.get(request)
def test_get_rest_flattened():
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.SslCertificate()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.SslCertificate.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1", "ssl_certificate": "sample2"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value", ssl_certificate="ssl_certificate_value",
)
mock_args.update(sample_request)
client.get(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/global/sslCertificates/{ssl_certificate}"
% client.transport._host,
args[1],
)
def test_get_rest_flattened_error(transport: str = "rest"):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get(
compute.GetSslCertificateRequest(),
project="project_value",
ssl_certificate="ssl_certificate_value",
)
def test_get_rest_error():
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize("request_type", [compute.InsertSslCertificateRequest, dict,])
def test_insert_unary_rest(request_type):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1"}
request_init["ssl_certificate_resource"] = {
"certificate": "certificate_value",
"creation_timestamp": "creation_timestamp_value",
"description": "description_value",
"expire_time": "expire_time_value",
"id": 205,
"kind": "kind_value",
"managed": {
"domain_status": {},
"domains": ["domains_value_1", "domains_value_2"],
"status": "status_value",
},
"name": "name_value",
"private_key": "private_key_value",
"region": "region_value",
"self_link": "self_link_value",
"self_managed": {
"certificate": "certificate_value",
"private_key": "private_key_value",
},
"subject_alternative_names": [
"subject_alternative_names_value_1",
"subject_alternative_names_value_2",
],
"type_": "type__value",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation(
client_operation_id="client_operation_id_value",
creation_timestamp="creation_timestamp_value",
description="description_value",
end_time="end_time_value",
http_error_message="http_error_message_value",
http_error_status_code=2374,
id=205,
insert_time="insert_time_value",
kind="kind_value",
name="name_value",
operation_group_id="operation_group_id_value",
operation_type="operation_type_value",
progress=885,
region="region_value",
self_link="self_link_value",
start_time="start_time_value",
status=compute.Operation.Status.DONE,
status_message="status_message_value",
target_id=947,
target_link="target_link_value",
user="user_value",
zone="zone_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.insert_unary(request)
# Establish that the response is the type that we expect.
assert isinstance(response, compute.Operation)
assert response.client_operation_id == "client_operation_id_value"
assert response.creation_timestamp == "creation_timestamp_value"
assert response.description == "description_value"
assert response.end_time == "end_time_value"
assert response.http_error_message == "http_error_message_value"
assert response.http_error_status_code == 2374
assert response.id == 205
assert response.insert_time == "insert_time_value"
assert response.kind == "kind_value"
assert response.name == "name_value"
assert response.operation_group_id == "operation_group_id_value"
assert response.operation_type == "operation_type_value"
assert response.progress == 885
assert response.region == "region_value"
assert response.self_link == "self_link_value"
assert response.start_time == "start_time_value"
assert response.status == compute.Operation.Status.DONE
assert response.status_message == "status_message_value"
assert response.target_id == 947
assert response.target_link == "target_link_value"
assert response.user == "user_value"
assert response.zone == "zone_value"
def test_insert_unary_rest_required_fields(
request_type=compute.InsertSslCertificateRequest,
):
transport_class = transports.SslCertificatesRestTransport
request_init = {}
request_init["project"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).insert._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).insert._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(("request_id",))
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "post",
"query_params": request_init,
}
transcode_result["body"] = {}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.insert_unary(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_insert_unary_rest_unset_required_fields():
transport = transports.SslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.insert._get_unset_required_fields({})
assert set(unset_fields) == (
set(("requestId",)) & set(("project", "sslCertificateResource",))
)
def test_insert_unary_rest_bad_request(
transport: str = "rest", request_type=compute.InsertSslCertificateRequest
):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1"}
request_init["ssl_certificate_resource"] = {
"certificate": "certificate_value",
"creation_timestamp": "creation_timestamp_value",
"description": "description_value",
"expire_time": "expire_time_value",
"id": 205,
"kind": "kind_value",
"managed": {
"domain_status": {},
"domains": ["domains_value_1", "domains_value_2"],
"status": "status_value",
},
"name": "name_value",
"private_key": "private_key_value",
"region": "region_value",
"self_link": "self_link_value",
"self_managed": {
"certificate": "certificate_value",
"private_key": "private_key_value",
},
"subject_alternative_names": [
"subject_alternative_names_value_1",
"subject_alternative_names_value_2",
],
"type_": "type__value",
}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.insert_unary(request)
def test_insert_unary_rest_flattened():
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.Operation()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.Operation.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1"}
# get truthy value for each flattened field
mock_args = dict(
project="project_value",
ssl_certificate_resource=compute.SslCertificate(
certificate="certificate_value"
),
)
mock_args.update(sample_request)
client.insert_unary(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/global/sslCertificates"
% client.transport._host,
args[1],
)
def test_insert_unary_rest_flattened_error(transport: str = "rest"):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.insert_unary(
compute.InsertSslCertificateRequest(),
project="project_value",
ssl_certificate_resource=compute.SslCertificate(
certificate="certificate_value"
),
)
def test_insert_unary_rest_error():
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest"
)
@pytest.mark.parametrize("request_type", [compute.ListSslCertificatesRequest, dict,])
def test_list_rest(request_type):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1"}
request = request_type(request_init)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.SslCertificateList(
id="id_value",
kind="kind_value",
next_page_token="next_page_token_value",
self_link="self_link_value",
)
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.SslCertificateList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.list(request)
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListPager)
assert response.id == "id_value"
assert response.kind == "kind_value"
assert response.next_page_token == "next_page_token_value"
assert response.self_link == "self_link_value"
def test_list_rest_required_fields(request_type=compute.ListSslCertificatesRequest):
transport_class = transports.SslCertificatesRestTransport
request_init = {}
request_init["project"] = ""
request = request_type(request_init)
jsonified_request = json.loads(
request_type.to_json(
request, including_default_value_fields=False, use_integers_for_enums=False
)
)
# verify fields with default values are dropped
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).list._get_unset_required_fields(jsonified_request)
jsonified_request.update(unset_fields)
# verify required fields with default values are now present
jsonified_request["project"] = "project_value"
unset_fields = transport_class(
credentials=ga_credentials.AnonymousCredentials()
).list._get_unset_required_fields(jsonified_request)
# Check that path parameters and body parameters are not mixing in.
assert not set(unset_fields) - set(
("max_results", "filter", "order_by", "page_token", "return_partial_success",)
)
jsonified_request.update(unset_fields)
# verify required fields with non-default values are left alone
assert "project" in jsonified_request
assert jsonified_request["project"] == "project_value"
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
request = request_type(request_init)
# Designate an appropriate value for the returned response.
return_value = compute.SslCertificateList()
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# We need to mock transcode() because providing default values
# for required fields will fail the real version if the http_options
# expect actual values for those fields.
with mock.patch.object(path_template, "transcode") as transcode:
# A uri without fields and an empty body will force all the
# request fields to show up in the query_params.
transcode_result = {
"uri": "v1/sample_method",
"method": "get",
"query_params": request_init,
}
transcode.return_value = transcode_result
response_value = Response()
response_value.status_code = 200
json_return_value = compute.SslCertificateList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
response = client.list(request)
expected_params = []
actual_params = req.call_args.kwargs["params"]
assert expected_params == actual_params
def test_list_rest_unset_required_fields():
transport = transports.SslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials
)
unset_fields = transport.list._get_unset_required_fields({})
assert set(unset_fields) == (
set(("maxResults", "filter", "orderBy", "pageToken", "returnPartialSuccess",))
& set(("project",))
)
def test_list_rest_bad_request(
transport: str = "rest", request_type=compute.ListSslCertificatesRequest
):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# send a request that will satisfy transcoding
request_init = {"project": "sample1"}
request = request_type(request_init)
# Mock the http request call within the method and fake a BadRequest error.
with mock.patch.object(Session, "request") as req, pytest.raises(
core_exceptions.BadRequest
):
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 400
response_value.request = Request()
req.return_value = response_value
client.list(request)
def test_list_rest_flattened():
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport="rest",
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(type(client.transport._session), "request") as req:
# Designate an appropriate value for the returned response.
return_value = compute.SslCertificateList()
# Wrap the value into a proper Response obj
response_value = Response()
response_value.status_code = 200
json_return_value = compute.SslCertificateList.to_json(return_value)
response_value._content = json_return_value.encode("UTF-8")
req.return_value = response_value
# get arguments that satisfy an http rule for this method
sample_request = {"project": "sample1"}
# get truthy value for each flattened field
mock_args = dict(project="project_value",)
mock_args.update(sample_request)
client.list(**mock_args)
# Establish that the underlying call was made with the expected
# request object values.
assert len(req.mock_calls) == 1
_, args, _ = req.mock_calls[0]
assert path_template.validate(
"https://%s/compute/v1/projects/{project}/global/sslCertificates"
% client.transport._host,
args[1],
)
def test_list_rest_flattened_error(transport: str = "rest"):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list(
compute.ListSslCertificatesRequest(), project="project_value",
)
def test_list_rest_pager(transport: str = "rest"):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# Mock the http request call within the method and fake a response.
with mock.patch.object(Session, "request") as req:
# TODO(kbandes): remove this mock unless there's a good reason for it.
# with mock.patch.object(path_template, 'transcode') as transcode:
# Set the response as a series of pages
response = (
compute.SslCertificateList(
items=[
compute.SslCertificate(),
compute.SslCertificate(),
compute.SslCertificate(),
],
next_page_token="abc",
),
compute.SslCertificateList(items=[], next_page_token="def",),
compute.SslCertificateList(
items=[compute.SslCertificate(),], next_page_token="ghi",
),
compute.SslCertificateList(
items=[compute.SslCertificate(), compute.SslCertificate(),],
),
)
# Two responses for two calls
response = response + response
# Wrap the values into proper Response objs
response = tuple(compute.SslCertificateList.to_json(x) for x in response)
return_values = tuple(Response() for i in response)
for return_val, response_val in zip(return_values, response):
return_val._content = response_val.encode("UTF-8")
return_val.status_code = 200
req.side_effect = return_values
sample_request = {"project": "sample1"}
pager = client.list(request=sample_request)
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, compute.SslCertificate) for i in results)
pages = list(client.list(request=sample_request).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.SslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.SslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SslCertificatesClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.SslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = SslCertificatesClient(client_options=options, transport=transport,)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = SslCertificatesClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.SslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = SslCertificatesClient(
client_options={"scopes": ["1", "2"]}, transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.SslCertificatesRestTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = SslCertificatesClient(transport=transport)
assert client.transport is transport
@pytest.mark.parametrize("transport_class", [transports.SslCertificatesRestTransport,])
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
def test_ssl_certificates_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.SslCertificatesTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_ssl_certificates_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.compute_v1.services.ssl_certificates.transports.SslCertificatesTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.SslCertificatesTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"aggregated_list",
"delete",
"get",
"insert",
"list",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
def test_ssl_certificates_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.compute_v1.services.ssl_certificates.transports.SslCertificatesTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SslCertificatesTransport(
credentials_file="credentials.json", quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id="octopus",
)
def test_ssl_certificates_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.compute_v1.services.ssl_certificates.transports.SslCertificatesTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.SslCertificatesTransport()
adc.assert_called_once()
def test_ssl_certificates_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
SslCertificatesClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=(
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
),
quota_project_id=None,
)
def test_ssl_certificates_http_transport_client_cert_source_for_mtls():
cred = ga_credentials.AnonymousCredentials()
with mock.patch(
"google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"
) as mock_configure_mtls_channel:
transports.SslCertificatesRestTransport(
credentials=cred, client_cert_source_for_mtls=client_cert_source_callback
)
mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback)
def test_ssl_certificates_host_no_port():
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com"
),
)
assert client.transport._host == "compute.googleapis.com:443"
def test_ssl_certificates_host_with_port():
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="compute.googleapis.com:8000"
),
)
assert client.transport._host == "compute.googleapis.com:8000"
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = SslCertificatesClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = SslCertificatesClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = SslCertificatesClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(folder=folder,)
actual = SslCertificatesClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = SslCertificatesClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = SslCertificatesClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(organization=organization,)
actual = SslCertificatesClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = SslCertificatesClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = SslCertificatesClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(project=project,)
actual = SslCertificatesClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = SslCertificatesClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = SslCertificatesClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project, location=location,
)
actual = SslCertificatesClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = SslCertificatesClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = SslCertificatesClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.SslCertificatesTransport, "_prep_wrapped_messages"
) as prep:
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.SslCertificatesTransport, "_prep_wrapped_messages"
) as prep:
transport_class = SslCertificatesClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
def test_transport_close():
transports = {
"rest": "_session",
}
for transport, close_name in transports.items():
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"rest",
]
for transport in transports:
client = SslCertificatesClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[(SslCertificatesClient, transports.SslCertificatesRestTransport),],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
hackathon/backend/qiskitflow_backend/asgi.py | """
ASGI config for qiskitflow_backend project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'qiskitflow_backend.settings')
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
cli/command/container/opts_test.go | package container
import (
"fmt"
"io/ioutil"
"os"
"runtime"
"strings"
"testing"
"time"
"github.com/docker/docker/api/types/container"
networktypes "github.com/docker/docker/api/types/network"
"github.com/docker/go-connections/nat"
"github.com/pkg/errors"
"github.com/spf13/pflag"
"gotest.tools/assert"
is "gotest.tools/assert/cmp"
"gotest.tools/skip"
)
func TestValidateAttach(t *testing.T) {
valid := []string{
"stdin",
"stdout",
"stderr",
"STDIN",
"STDOUT",
"STDERR",
}
if _, err := validateAttach("invalid"); err == nil {
t.Fatal("Expected error with [valid streams are STDIN, STDOUT and STDERR], got nothing")
}
for _, attach := range valid {
value, err := validateAttach(attach)
if err != nil {
t.Fatal(err)
}
if value != strings.ToLower(attach) {
t.Fatalf("Expected [%v], got [%v]", attach, value)
}
}
}
func parseRun(args []string) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) {
flags, copts := setupRunFlags()
if err := flags.Parse(args); err != nil {
return nil, nil, nil, err
}
// TODO: fix tests to accept ContainerConfig
containerConfig, err := parse(flags, copts, runtime.GOOS)
if err != nil {
return nil, nil, nil, err
}
return containerConfig.Config, containerConfig.HostConfig, containerConfig.NetworkingConfig, err
}
func setupRunFlags() (*pflag.FlagSet, *containerOptions) {
flags := pflag.NewFlagSet("run", pflag.ContinueOnError)
flags.SetOutput(ioutil.Discard)
flags.Usage = nil
copts := addFlags(flags)
return flags, copts
}
func parseMustError(t *testing.T, args string) {
_, _, _, err := parseRun(strings.Split(args+" ubuntu bash", " ")) //nolint:dogsled
assert.ErrorContains(t, err, "", args)
}
func mustParse(t *testing.T, args string) (*container.Config, *container.HostConfig) {
config, hostConfig, _, err := parseRun(append(strings.Split(args, " "), "ubuntu", "bash"))
assert.NilError(t, err)
return config, hostConfig
}
func TestParseRunLinks(t *testing.T) {
if _, hostConfig := mustParse(t, "--link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" {
t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links)
}
if _, hostConfig := mustParse(t, "--link a:b --link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" {
t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links)
}
if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 {
t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links)
}
}
func TestParseRunAttach(t *testing.T) {
if config, _ := mustParse(t, "-a stdin"); !config.AttachStdin || config.AttachStdout || config.AttachStderr {
t.Fatalf("Error parsing attach flags. Expect only Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
}
if config, _ := mustParse(t, "-a stdin -a stdout"); !config.AttachStdin || !config.AttachStdout || config.AttachStderr {
t.Fatalf("Error parsing attach flags. Expect only Stdin and Stdout enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
}
if config, _ := mustParse(t, "-a stdin -a stdout -a stderr"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr {
t.Fatalf("Error parsing attach flags. Expect all attach enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
}
if config, _ := mustParse(t, ""); config.AttachStdin || !config.AttachStdout || !config.AttachStderr {
t.Fatalf("Error parsing attach flags. Expect Stdin disabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
}
if config, _ := mustParse(t, "-i"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr {
t.Fatalf("Error parsing attach flags. Expect Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr)
}
}
func TestParseRunWithInvalidArgs(t *testing.T) {
parseMustError(t, "-a")
parseMustError(t, "-a invalid")
parseMustError(t, "-a invalid -a stdout")
parseMustError(t, "-a stdout -a stderr -d")
parseMustError(t, "-a stdin -d")
parseMustError(t, "-a stdout -d")
parseMustError(t, "-a stderr -d")
parseMustError(t, "-d --rm")
}
// nolint: gocyclo
func TestParseWithVolumes(t *testing.T) {
// A single volume
arr, tryit := setupPlatformVolume([]string{`/tmp`}, []string{`c:\tmp`})
if config, hostConfig := mustParse(t, tryit); hostConfig.Binds != nil {
t.Fatalf("Error parsing volume flags, %q should not mount-bind anything. Received %v", tryit, hostConfig.Binds)
} else if _, exists := config.Volumes[arr[0]]; !exists {
t.Fatalf("Error parsing volume flags, %q is missing from volumes. Received %v", tryit, config.Volumes)
}
// Two volumes
arr, tryit = setupPlatformVolume([]string{`/tmp`, `/var`}, []string{`c:\tmp`, `c:\var`})
if config, hostConfig := mustParse(t, tryit); hostConfig.Binds != nil {
t.Fatalf("Error parsing volume flags, %q should not mount-bind anything. Received %v", tryit, hostConfig.Binds)
} else if _, exists := config.Volumes[arr[0]]; !exists {
t.Fatalf("Error parsing volume flags, %s is missing from volumes. Received %v", arr[0], config.Volumes)
} else if _, exists := config.Volumes[arr[1]]; !exists {
t.Fatalf("Error parsing volume flags, %s is missing from volumes. Received %v", arr[1], config.Volumes)
}
// A single bind mount
arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`}, []string{os.Getenv("TEMP") + `:c:\containerTmp`})
if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || hostConfig.Binds[0] != arr[0] {
t.Fatalf("Error parsing volume flags, %q should mount-bind the path before the colon into the path after the colon. Received %v %v", arr[0], hostConfig.Binds, config.Volumes)
}
// Two bind mounts.
arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`, `/hostVar:/containerVar`}, []string{os.Getenv("ProgramData") + `:c:\ContainerPD`, os.Getenv("TEMP") + `:c:\containerTmp`})
if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil {
t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds)
}
// Two bind mounts, first read-only, second read-write.
// TODO Windows: The Windows version uses read-write as that's the only mode it supports. Can change this post TP4
arr, tryit = setupPlatformVolume(
[]string{`/hostTmp:/containerTmp:ro`, `/hostVar:/containerVar:rw`},
[]string{os.Getenv("TEMP") + `:c:\containerTmp:rw`, os.Getenv("ProgramData") + `:c:\ContainerPD:rw`})
if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil {
t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds)
}
// Similar to previous test but with alternate modes which are only supported by Linux
if runtime.GOOS != "windows" {
arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:ro,Z`, `/hostVar:/containerVar:rw,Z`}, []string{})
if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil {
t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds)
}
arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:Z`, `/hostVar:/containerVar:z`}, []string{})
if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil {
t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds)
}
}
// One bind mount and one volume
arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`, `/containerVar`}, []string{os.Getenv("TEMP") + `:c:\containerTmp`, `c:\containerTmp`})
if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != arr[0] {
t.Fatalf("Error parsing volume flags, %s and %s should only one and only one bind mount %s. Received %s", arr[0], arr[1], arr[0], hostConfig.Binds)
} else if _, exists := config.Volumes[arr[1]]; !exists {
t.Fatalf("Error parsing volume flags %s and %s. %s is missing from volumes. Received %v", arr[0], arr[1], arr[1], config.Volumes)
}
// Root to non-c: drive letter (Windows specific)
if runtime.GOOS == "windows" {
arr, tryit = setupPlatformVolume([]string{}, []string{os.Getenv("SystemDrive") + `\:d:`})
if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != arr[0] || len(config.Volumes) != 0 {
t.Fatalf("Error parsing %s. Should have a single bind mount and no volumes", arr[0])
}
}
}
// setupPlatformVolume takes two arrays of volume specs - a Unix style
// spec and a Windows style spec. Depending on the platform being unit tested,
// it returns one of them, along with a volume string that would be passed
// on the docker CLI (e.g. -v /bar -v /foo).
func setupPlatformVolume(u []string, w []string) ([]string, string) {
var a []string
if runtime.GOOS == "windows" {
a = w
} else {
a = u
}
s := ""
for _, v := range a {
s = s + "-v " + v + " "
}
return a, s
}
// check if (a == c && b == d) || (a == d && b == c)
// because maps are randomized
func compareRandomizedStrings(a, b, c, d string) error {
if a == c && b == d {
return nil
}
if a == d && b == c {
return nil
}
return errors.Errorf("strings don't match")
}
// Simple parse with MacAddress validation
func TestParseWithMacAddress(t *testing.T) {
invalidMacAddress := "--mac-address=invalidMacAddress"
validMacAddress := "--mac-address=92:d0:c6:0a:29:33"
if _, _, _, err := parseRun([]string{invalidMacAddress, "img", "cmd"}); err != nil && err.Error() != "invalidMacAddress is not a valid mac address" {
t.Fatalf("Expected an error with %v mac-address, got %v", invalidMacAddress, err)
}
if config, _ := mustParse(t, validMacAddress); config.MacAddress != "92:d0:c6:0a:29:33" {
t.Fatalf("Expected the config to have '92:d0:c6:0a:29:33' as MacAddress, got '%v'", config.MacAddress)
}
}
func TestRunFlagsParseWithMemory(t *testing.T) {
flags, _ := setupRunFlags()
args := []string{"--memory=invalid", "img", "cmd"}
err := flags.Parse(args)
assert.ErrorContains(t, err, `invalid argument "invalid" for "-m, --memory" flag`)
_, hostconfig := mustParse(t, "--memory=1G")
assert.Check(t, is.Equal(int64(1073741824), hostconfig.Memory))
}
func TestParseWithMemorySwap(t *testing.T) {
flags, _ := setupRunFlags()
args := []string{"--memory-swap=invalid", "img", "cmd"}
err := flags.Parse(args)
assert.ErrorContains(t, err, `invalid argument "invalid" for "--memory-swap" flag`)
_, hostconfig := mustParse(t, "--memory-swap=1G")
assert.Check(t, is.Equal(int64(1073741824), hostconfig.MemorySwap))
_, hostconfig = mustParse(t, "--memory-swap=-1")
assert.Check(t, is.Equal(int64(-1), hostconfig.MemorySwap))
}
func TestParseHostname(t *testing.T) {
validHostnames := map[string]string{
"hostname": "hostname",
"host-name": "host-name",
"hostname123": "hostname123",
"123hostname": "123hostname",
"hostname-of-63-bytes-long-should-be-valid-and-without-any-error": "hostname-of-63-bytes-long-should-be-valid-and-without-any-error",
}
hostnameWithDomain := "--hostname=hostname.domainname"
hostnameWithDomainTld := "--hostname=hostname.domainname.tld"
for hostname, expectedHostname := range validHostnames {
if config, _ := mustParse(t, fmt.Sprintf("--hostname=%s", hostname)); config.Hostname != expectedHostname {
t.Fatalf("Expected the config to have 'hostname' as %q, got %q", expectedHostname, config.Hostname)
}
}
if config, _ := mustParse(t, hostnameWithDomain); config.Hostname != "hostname.domainname" || config.Domainname != "" {
t.Fatalf("Expected the config to have 'hostname' as hostname.domainname, got %q", config.Hostname)
}
if config, _ := mustParse(t, hostnameWithDomainTld); config.Hostname != "hostname.domainname.tld" || config.Domainname != "" {
t.Fatalf("Expected the config to have 'hostname' as hostname.domainname.tld, got %q", config.Hostname)
}
}
func TestParseHostnameDomainname(t *testing.T) {
validDomainnames := map[string]string{
"domainname": "domainname",
"domain-name": "domain-name",
"domainname123": "domainname123",
"123domainname": "123domainname",
"domainname-63-bytes-long-should-be-valid-and-without-any-errors": "domainname-63-bytes-long-should-be-valid-and-without-any-errors",
}
for domainname, expectedDomainname := range validDomainnames {
if config, _ := mustParse(t, "--domainname="+domainname); config.Domainname != expectedDomainname {
t.Fatalf("Expected the config to have 'domainname' as %q, got %q", expectedDomainname, config.Domainname)
}
}
if config, _ := mustParse(t, "--hostname=some.prefix --domainname=domainname"); config.Hostname != "some.prefix" || config.Domainname != "domainname" {
t.Fatalf("Expected the config to have 'hostname' as 'some.prefix' and 'domainname' as 'domainname', got %q and %q", config.Hostname, config.Domainname)
}
if config, _ := mustParse(t, "--hostname=another-prefix --domainname=domainname.tld"); config.Hostname != "another-prefix" || config.Domainname != "domainname.tld" {
t.Fatalf("Expected the config to have 'hostname' as 'another-prefix' and 'domainname' as 'domainname.tld', got %q and %q", config.Hostname, config.Domainname)
}
}
func TestParseWithExpose(t *testing.T) {
invalids := map[string]string{
":": "invalid port format for --expose: :",
"8080:9090": "invalid port format for --expose: 8080:9090",
"/tcp": "invalid range format for --expose: /tcp, error: Empty string specified for ports.",
"/udp": "invalid range format for --expose: /udp, error: Empty string specified for ports.",
"NaN/tcp": `invalid range format for --expose: NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`,
"NaN-NaN/tcp": `invalid range format for --expose: NaN-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`,
"8080-NaN/tcp": `invalid range format for --expose: 8080-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`,
"1234567890-8080/tcp": `invalid range format for --expose: 1234567890-8080/tcp, error: strconv.ParseUint: parsing "1234567890": value out of range`,
}
valids := map[string][]nat.Port{
"8080/tcp": {"8080/tcp"},
"8080/udp": {"8080/udp"},
"8080/ncp": {"8080/ncp"},
"8080-8080/udp": {"8080/udp"},
"8080-8082/tcp": {"8080/tcp", "8081/tcp", "8082/tcp"},
}
for expose, expectedError := range invalids {
if _, _, _, err := parseRun([]string{fmt.Sprintf("--expose=%v", expose), "img", "cmd"}); err == nil || err.Error() != expectedError {
t.Fatalf("Expected error '%v' with '--expose=%v', got '%v'", expectedError, expose, err)
}
}
for expose, exposedPorts := range valids {
config, _, _, err := parseRun([]string{fmt.Sprintf("--expose=%v", expose), "img", "cmd"})
if err != nil {
t.Fatal(err)
}
if len(config.ExposedPorts) != len(exposedPorts) {
t.Fatalf("Expected %v exposed port, got %v", len(exposedPorts), len(config.ExposedPorts))
}
for _, port := range exposedPorts {
if _, ok := config.ExposedPorts[port]; !ok {
t.Fatalf("Expected %v, got %v", exposedPorts, config.ExposedPorts)
}
}
}
// Merge with actual published port
config, _, _, err := parseRun([]string{"--publish=80", "--expose=80-81/tcp", "img", "cmd"})
if err != nil {
t.Fatal(err)
}
if len(config.ExposedPorts) != 2 {
t.Fatalf("Expected 2 exposed ports, got %v", config.ExposedPorts)
}
ports := []nat.Port{"80/tcp", "81/tcp"}
for _, port := range ports {
if _, ok := config.ExposedPorts[port]; !ok {
t.Fatalf("Expected %v, got %v", ports, config.ExposedPorts)
}
}
}
func TestParseDevice(t *testing.T) {
skip.If(t, runtime.GOOS == "windows") // Windows validates server-side
valids := map[string]container.DeviceMapping{
"/dev/snd": {
PathOnHost: "/dev/snd",
PathInContainer: "/dev/snd",
CgroupPermissions: "rwm",
},
"/dev/snd:rw": {
PathOnHost: "/dev/snd",
PathInContainer: "/dev/snd",
CgroupPermissions: "rw",
},
"/dev/snd:/something": {
PathOnHost: "/dev/snd",
PathInContainer: "/something",
CgroupPermissions: "rwm",
},
"/dev/snd:/something:rw": {
PathOnHost: "/dev/snd",
PathInContainer: "/something",
CgroupPermissions: "rw",
},
}
for device, deviceMapping := range valids {
_, hostconfig, _, err := parseRun([]string{fmt.Sprintf("--device=%v", device), "img", "cmd"})
if err != nil {
t.Fatal(err)
}
if len(hostconfig.Devices) != 1 {
t.Fatalf("Expected 1 devices, got %v", hostconfig.Devices)
}
if hostconfig.Devices[0] != deviceMapping {
t.Fatalf("Expected %v, got %v", deviceMapping, hostconfig.Devices)
}
}
}
func TestParseNetworkConfig(t *testing.T) {
tests := []struct {
name string
flags []string
expected map[string]*networktypes.EndpointSettings
expectedCfg container.HostConfig
expectedErr string
}{
{
name: "single-network-legacy",
flags: []string{"--network", "net1"},
expected: map[string]*networktypes.EndpointSettings{},
expectedCfg: container.HostConfig{NetworkMode: "net1"},
},
{
name: "single-network-advanced",
flags: []string{"--network", "name=net1"},
expected: map[string]*networktypes.EndpointSettings{},
expectedCfg: container.HostConfig{NetworkMode: "net1"},
},
{
name: "single-network-legacy-with-options",
flags: []string{
"--ip", "172.20.88.22",
"--ip6", "2001:db8::8822",
"--link", "foo:bar",
"--link", "bar:baz",
"--link-local-ip", "169.254.2.2",
"--link-local-ip", "fe80::169:254:2:2",
"--network", "name=net1",
"--network-alias", "web1",
"--network-alias", "web2",
},
expected: map[string]*networktypes.EndpointSettings{
"net1": {
IPAMConfig: &networktypes.EndpointIPAMConfig{
IPv4Address: "172.20.88.22",
IPv6Address: "2001:db8::8822",
LinkLocalIPs: []string{"169.254.2.2", "fe80::169:254:2:2"},
},
Links: []string{"foo:bar", "bar:baz"},
Aliases: []string{"web1", "web2"},
},
},
expectedCfg: container.HostConfig{NetworkMode: "net1"},
},
{
name: "multiple-network-advanced-mixed",
flags: []string{
"--ip", "172.20.88.22",
"--ip6", "2001:db8::8822",
"--link", "foo:bar",
"--link", "bar:baz",
"--link-local-ip", "169.254.2.2",
"--link-local-ip", "fe80::169:254:2:2",
"--network", "name=net1,driver-opt=field1=value1",
"--network-alias", "web1",
"--network-alias", "web2",
"--network", "net2",
"--network", "name=net3,alias=web3,driver-opt=field3=value3",
},
expected: map[string]*networktypes.EndpointSettings{
"net1": {
DriverOpts: map[string]string{"field1": "value1"},
IPAMConfig: &networktypes.EndpointIPAMConfig{
IPv4Address: "172.20.88.22",
IPv6Address: "2001:db8::8822",
LinkLocalIPs: []string{"169.254.2.2", "fe80::169:254:2:2"},
},
Links: []string{"foo:bar", "bar:baz"},
Aliases: []string{"web1", "web2"},
},
"net2": {},
"net3": {
DriverOpts: map[string]string{"field3": "value3"},
Aliases: []string{"web3"},
},
},
expectedCfg: container.HostConfig{NetworkMode: "net1"},
},
{
name: "single-network-advanced-with-options",
flags: []string{"--network", "name=net1,alias=web1,alias=web2,driver-opt=field1=value1,driver-opt=field2=value2"},
expected: map[string]*networktypes.EndpointSettings{
"net1": {
DriverOpts: map[string]string{
"field1": "value1",
"field2": "value2",
},
Aliases: []string{"web1", "web2"},
},
},
expectedCfg: container.HostConfig{NetworkMode: "net1"},
},
{
name: "multiple-networks",
flags: []string{"--network", "net1", "--network", "name=net2"},
expected: map[string]*networktypes.EndpointSettings{"net1": {}, "net2": {}},
expectedCfg: container.HostConfig{NetworkMode: "net1"},
},
{
name: "conflict-network",
flags: []string{"--network", "duplicate", "--network", "name=duplicate"},
expectedErr: `network "duplicate" is specified multiple times`,
},
{
name: "conflict-options",
flags: []string{"--network", "name=net1,alias=web1", "--network-alias", "web1"},
expectedErr: `conflicting options: cannot specify both --network-alias and per-network alias`,
},
{
name: "invalid-mixed-network-types",
flags: []string{"--network", "name=host", "--network", "net1"},
expectedErr: `conflicting options: cannot attach both user-defined and non-user-defined network-modes`,
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
_, hConfig, nwConfig, err := parseRun(tc.flags)
if tc.expectedErr != "" {
assert.Error(t, err, tc.expectedErr)
return
}
assert.NilError(t, err)
assert.DeepEqual(t, hConfig.NetworkMode, tc.expectedCfg.NetworkMode)
assert.DeepEqual(t, nwConfig.EndpointsConfig, tc.expected)
})
}
}
func TestParseModes(t *testing.T) {
// pid ko
flags, copts := setupRunFlags()
args := []string{"--pid=container:", "img", "cmd"}
assert.NilError(t, flags.Parse(args))
_, err := parse(flags, copts, runtime.GOOS)
assert.ErrorContains(t, err, "--pid: invalid PID mode")
// pid ok
_, hostconfig, _, err := parseRun([]string{"--pid=host", "img", "cmd"})
assert.NilError(t, err)
if !hostconfig.PidMode.Valid() {
t.Fatalf("Expected a valid PidMode, got %v", hostconfig.PidMode)
}
// uts ko
_, _, _, err = parseRun([]string{"--uts=container:", "img", "cmd"}) //nolint:dogsled
assert.ErrorContains(t, err, "--uts: invalid UTS mode")
// uts ok
_, hostconfig, _, err = parseRun([]string{"--uts=host", "img", "cmd"})
assert.NilError(t, err)
if !hostconfig.UTSMode.Valid() {
t.Fatalf("Expected a valid UTSMode, got %v", hostconfig.UTSMode)
}
}
func TestRunFlagsParseShmSize(t *testing.T) {
// shm-size ko
flags, _ := setupRunFlags()
args := []string{"--shm-size=a128m", "img", "cmd"}
expectedErr := `invalid argument "a128m" for "--shm-size" flag: invalid size: 'a128m'`
err := flags.Parse(args)
assert.ErrorContains(t, err, expectedErr)
// shm-size ok
_, hostconfig, _, err := parseRun([]string{"--shm-size=128m", "img", "cmd"})
assert.NilError(t, err)
if hostconfig.ShmSize != 134217728 {
t.Fatalf("Expected a valid ShmSize, got %d", hostconfig.ShmSize)
}
}
func TestParseRestartPolicy(t *testing.T) {
invalids := map[string]string{
"always:2:3": "invalid restart policy format",
"on-failure:invalid": "maximum retry count must be an integer",
}
valids := map[string]container.RestartPolicy{
"": {},
"always": {
Name: "always",
MaximumRetryCount: 0,
},
"on-failure:1": {
Name: "on-failure",
MaximumRetryCount: 1,
},
}
for restart, expectedError := range invalids {
if _, _, _, err := parseRun([]string{fmt.Sprintf("--restart=%s", restart), "img", "cmd"}); err == nil || err.Error() != expectedError {
t.Fatalf("Expected an error with message '%v' for %v, got %v", expectedError, restart, err)
}
}
for restart, expected := range valids {
_, hostconfig, _, err := parseRun([]string{fmt.Sprintf("--restart=%v", restart), "img", "cmd"})
if err != nil {
t.Fatal(err)
}
if hostconfig.RestartPolicy != expected {
t.Fatalf("Expected %v, got %v", expected, hostconfig.RestartPolicy)
}
}
}
func TestParseRestartPolicyAutoRemove(t *testing.T) {
expected := "Conflicting options: --restart and --rm"
_, _, _, err := parseRun([]string{"--rm", "--restart=always", "img", "cmd"}) //nolint:dogsled
if err == nil || err.Error() != expected {
t.Fatalf("Expected error %v, but got none", expected)
}
}
func TestParseHealth(t *testing.T) {
checkOk := func(args ...string) *container.HealthConfig {
config, _, _, err := parseRun(args)
if err != nil {
t.Fatalf("%#v: %v", args, err)
}
return config.Healthcheck
}
checkError := func(expected string, args ...string) {
config, _, _, err := parseRun(args)
if err == nil {
t.Fatalf("Expected error, but got %#v", config)
}
if err.Error() != expected {
t.Fatalf("Expected %#v, got %#v", expected, err)
}
}
health := checkOk("--no-healthcheck", "img", "cmd")
if health == nil || len(health.Test) != 1 || health.Test[0] != "NONE" {
t.Fatalf("--no-healthcheck failed: %#v", health)
}
health = checkOk("--health-cmd=/check.sh -q", "img", "cmd")
if len(health.Test) != 2 || health.Test[0] != "CMD-SHELL" || health.Test[1] != "/check.sh -q" {
t.Fatalf("--health-cmd: got %#v", health.Test)
}
if health.Timeout != 0 {
t.Fatalf("--health-cmd: timeout = %s", health.Timeout)
}
checkError("--no-healthcheck conflicts with --health-* options",
"--no-healthcheck", "--health-cmd=/check.sh -q", "img", "cmd")
health = checkOk("--health-timeout=2s", "--health-retries=3", "--health-interval=4.5s", "--health-start-period=5s", "img", "cmd")
if health.Timeout != 2*time.Second || health.Retries != 3 || health.Interval != 4500*time.Millisecond || health.StartPeriod != 5*time.Second {
t.Fatalf("--health-*: got %#v", health)
}
}
func TestParseLoggingOpts(t *testing.T) {
// logging opts ko
if _, _, _, err := parseRun([]string{"--log-driver=none", "--log-opt=anything", "img", "cmd"}); err == nil || err.Error() != "invalid logging opts for driver none" {
t.Fatalf("Expected an error with message 'invalid logging opts for driver none', got %v", err)
}
// logging opts ok
_, hostconfig, _, err := parseRun([]string{"--log-driver=syslog", "--log-opt=something", "img", "cmd"})
if err != nil {
t.Fatal(err)
}
if hostconfig.LogConfig.Type != "syslog" || len(hostconfig.LogConfig.Config) != 1 {
t.Fatalf("Expected a 'syslog' LogConfig with one config, got %v", hostconfig.RestartPolicy)
}
}
func TestParseEnvfileVariables(t *testing.T) {
e := "open nonexistent: no such file or directory"
if runtime.GOOS == "windows" {
e = "open nonexistent: The system cannot find the file specified."
}
// env ko
if _, _, _, err := parseRun([]string{"--env-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != e {
t.Fatalf("Expected an error with message '%s', got %v", e, err)
}
// env ok
config, _, _, err := parseRun([]string{"--env-file=testdata/valid.env", "img", "cmd"})
if err != nil {
t.Fatal(err)
}
if len(config.Env) != 1 || config.Env[0] != "ENV1=value1" {
t.Fatalf("Expected a config with [ENV1=value1], got %v", config.Env)
}
config, _, _, err = parseRun([]string{"--env-file=testdata/valid.env", "--env=ENV2=value2", "img", "cmd"})
if err != nil {
t.Fatal(err)
}
if len(config.Env) != 2 || config.Env[0] != "ENV1=value1" || config.Env[1] != "ENV2=value2" {
t.Fatalf("Expected a config with [ENV1=value1 ENV2=value2], got %v", config.Env)
}
}
func TestParseEnvfileVariablesWithBOMUnicode(t *testing.T) {
// UTF8 with BOM
config, _, _, err := parseRun([]string{"--env-file=testdata/utf8.env", "img", "cmd"})
if err != nil {
t.Fatal(err)
}
env := []string{"FOO=BAR", "HELLO=" + string([]byte{0xe6, 0x82, 0xa8, 0xe5, 0xa5, 0xbd}), "BAR=FOO"}
if len(config.Env) != len(env) {
t.Fatalf("Expected a config with %d env variables, got %v: %v", len(env), len(config.Env), config.Env)
}
for i, v := range env {
if config.Env[i] != v {
t.Fatalf("Expected a config with [%s], got %v", v, []byte(config.Env[i]))
}
}
// UTF16 with BOM
e := "contains invalid utf8 bytes at line"
if _, _, _, err := parseRun([]string{"--env-file=testdata/utf16.env", "img", "cmd"}); err == nil || !strings.Contains(err.Error(), e) {
t.Fatalf("Expected an error with message '%s', got %v", e, err)
}
// UTF16BE with BOM
if _, _, _, err := parseRun([]string{"--env-file=testdata/utf16be.env", "img", "cmd"}); err == nil || !strings.Contains(err.Error(), e) {
t.Fatalf("Expected an error with message '%s', got %v", e, err)
}
}
func TestParseLabelfileVariables(t *testing.T) {
e := "open nonexistent: no such file or directory"
if runtime.GOOS == "windows" {
e = "open nonexistent: The system cannot find the file specified."
}
// label ko
if _, _, _, err := parseRun([]string{"--label-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != e {
t.Fatalf("Expected an error with message '%s', got %v", e, err)
}
// label ok
config, _, _, err := parseRun([]string{"--label-file=testdata/valid.label", "img", "cmd"})
if err != nil {
t.Fatal(err)
}
if len(config.Labels) != 1 || config.Labels["LABEL1"] != "value1" {
t.Fatalf("Expected a config with [LABEL1:value1], got %v", config.Labels)
}
config, _, _, err = parseRun([]string{"--label-file=testdata/valid.label", "--label=LABEL2=value2", "img", "cmd"})
if err != nil {
t.Fatal(err)
}
if len(config.Labels) != 2 || config.Labels["LABEL1"] != "value1" || config.Labels["LABEL2"] != "value2" {
t.Fatalf("Expected a config with [LABEL1:value1 LABEL2:value2], got %v", config.Labels)
}
}
func TestParseEntryPoint(t *testing.T) {
config, _, _, err := parseRun([]string{"--entrypoint=anything", "cmd", "img"})
if err != nil {
t.Fatal(err)
}
if len(config.Entrypoint) != 1 && config.Entrypoint[0] != "anything" {
t.Fatalf("Expected entrypoint 'anything', got %v", config.Entrypoint)
}
}
func TestValidateDevice(t *testing.T) {
skip.If(t, runtime.GOOS == "windows") // Windows validates server-side
valid := []string{
"/home",
"/home:/home",
"/home:/something/else",
"/with space",
"/home:/with space",
"relative:/absolute-path",
"hostPath:/containerPath:r",
"/hostPath:/containerPath:rw",
"/hostPath:/containerPath:mrw",
}
invalid := map[string]string{
"": "bad format for path: ",
"./": "./ is not an absolute path",
"../": "../ is not an absolute path",
"/:../": "../ is not an absolute path",
"/:path": "path is not an absolute path",
":": "bad format for path: :",
"/tmp:": " is not an absolute path",
":test": "bad format for path: :test",
":/test": "bad format for path: :/test",
"tmp:": " is not an absolute path",
":test:": "bad format for path: :test:",
"::": "bad format for path: ::",
":::": "bad format for path: :::",
"/tmp:::": "bad format for path: /tmp:::",
":/tmp::": "bad format for path: :/tmp::",
"path:ro": "ro is not an absolute path",
"path:rr": "rr is not an absolute path",
"a:/b:ro": "bad mode specified: ro",
"a:/b:rr": "bad mode specified: rr",
}
for _, path := range valid {
if _, err := validateDevice(path, runtime.GOOS); err != nil {
t.Fatalf("ValidateDevice(`%q`) should succeed: error %q", path, err)
}
}
for path, expectedError := range invalid {
if _, err := validateDevice(path, runtime.GOOS); err == nil {
t.Fatalf("ValidateDevice(`%q`) should have failed validation", path)
} else {
if err.Error() != expectedError {
t.Fatalf("ValidateDevice(`%q`) error should contain %q, got %q", path, expectedError, err.Error())
}
}
}
}
func TestParseSystemPaths(t *testing.T) {
tests := []struct {
doc string
in, out, masked, readonly []string
}{
{
doc: "not set",
in: []string{},
out: []string{},
},
{
doc: "not set, preserve other options",
in: []string{
"seccomp=unconfined",
"apparmor=unconfined",
"label=user:USER",
"foo=bar",
},
out: []string{
"seccomp=unconfined",
"apparmor=unconfined",
"label=user:USER",
"foo=bar",
},
},
{
doc: "unconfined",
in: []string{"systempaths=unconfined"},
out: []string{},
masked: []string{},
readonly: []string{},
},
{
doc: "unconfined and other options",
in: []string{"foo=bar", "bar=baz", "systempaths=unconfined"},
out: []string{"foo=bar", "bar=baz"},
masked: []string{},
readonly: []string{},
},
{
doc: "unknown option",
in: []string{"foo=bar", "systempaths=unknown", "bar=baz"},
out: []string{"foo=bar", "systempaths=unknown", "bar=baz"},
},
}
for _, tc := range tests {
securityOpts, maskedPaths, readonlyPaths := parseSystemPaths(tc.in)
assert.DeepEqual(t, securityOpts, tc.out)
assert.DeepEqual(t, maskedPaths, tc.masked)
assert.DeepEqual(t, readonlyPaths, tc.readonly)
}
}
| [
"\"TEMP\"",
"\"ProgramData\"",
"\"TEMP\"",
"\"TEMP\"",
"\"ProgramData\"",
"\"TEMP\"",
"\"SystemDrive\""
]
| []
| [
"SystemDrive",
"ProgramData",
"TEMP"
]
| [] | ["SystemDrive", "ProgramData", "TEMP"] | go | 3 | 0 | |
.github/actions/hello-world/main.go | package main
import (
"fmt"
"os"
)
func main() {
// Access Inputs as environment vars
firstGreeting := os.Getenv("INPUT_FIRSTGREETING")
secondGreeting := os.Getenv("INPUT_SECONDGREETING")
thirdGreeting := os.Getenv("INPUT_THIRDGREETING")
// Use those inputs in the action logic
fmt.Println("Hello " + firstGreeting)
fmt.Println("Hello " + secondGreeting)
if thirdGreeting != "" {
fmt.Println("Hello " + thirdGreeting)
}
}
| [
"\"INPUT_FIRSTGREETING\"",
"\"INPUT_SECONDGREETING\"",
"\"INPUT_THIRDGREETING\""
]
| []
| [
"INPUT_SECONDGREETING",
"INPUT_FIRSTGREETING",
"INPUT_THIRDGREETING"
]
| [] | ["INPUT_SECONDGREETING", "INPUT_FIRSTGREETING", "INPUT_THIRDGREETING"] | go | 3 | 0 | |
train_gen.py | import argparse
from utils.utils import verify_free_gpu_memory
from utils.codifications import Chromosome, Fitness
from time import sleep, time
import os
parser = argparse.ArgumentParser(description='Train a gen of a CNN.')
parser.add_argument('-gf', '--gen_file', type=str, required=True,
help='text file who contains the genetic encoding of the CNN to train')
parser.add_argument('-ff', '--fitness_file', type=str, required=True,
help='file that contains the fitness object to use in the training and evaluating process')
parser.add_argument('-t', '--test', type=bool, default=False,
help="If use the test dataset to evaluate the model trained")
parser.add_argument('-fp', '--float_precision', type=int, default=32,
help='Bits to use in float precision. FP32 is more accurate, but FP is faster and use less memory')
parser.add_argument('-pm', '--precise_mode', type=bool, default=False,
help="Train the gen with a secondary configuration, in order to make a more precise calculation"
" of the fitness")
args = vars(parser.parse_args())
abs_ti = time()
chromosome = Chromosome.load(args['gen_file'])
print(chromosome)
fitness = Fitness.load(args['fitness_file'])
while not verify_free_gpu_memory()[0]:
sleep(3)
print("Waiting 3 seconds for a gpu...")
gpu_id = verify_free_gpu_memory()[1]
print("GPU AVAILABLE: :/GPU %d" % gpu_id)
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "%d" % gpu_id
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
training_time = time()
try:
score = fitness.calc(chromosome, test=args['test'], file_model='./model_acc_gpu%d.hdf5' % gpu_id,
fp=args['float_precision'], precise_mode=args['precise_mode'])
except:
score = 1
training_time = (time() - training_time) / 60.
print()
with open("%s_score" % args['gen_file'], 'w') as f:
f.write("\nScore: %0.6f" % score)
abs_ti = (time() - abs_ti) / 60.
hours = abs_ti // 60
minutes = abs_ti % 60
work_directory = os.path.split(args['gen_file'])[0]
record_file = os.path.join(work_directory, 'RECORD')
with open(record_file, 'a') as f:
f.write("-" * 40 + "\n")
f.write(f"{chromosome.__repr__()}\n")
if abs_ti > 10:
f.write("Taking too much time\n")
f.write(f"Precision:\t{args['precise_mode']}\n")
f.write(f"Score:\t\t{score.__format__('2.4')}\n")
f.write("Training time:\t%d:%d\n" % (hours, minutes))
print("Score: %0.4f" % score)
| []
| []
| [
"CUDA_DEVICE_ORDER",
"CUDA_VISIBLE_DEVICES",
"TF_CPP_MIN_LOG_LEVEL"
]
| [] | ["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES", "TF_CPP_MIN_LOG_LEVEL"] | python | 3 | 0 | |
Problem Solving/Strings/AlternatingCharacters.java | import java.io.*;
import java.math.*;
import java.security.*;
import java.text.*;
import java.util.*;
import java.util.concurrent.*;
import java.util.regex.*;
public class Solution {
// Complete the alternatingCharacters function below.
static int alternatingCharacters(String s) {
if(s.length()==1 || s.length()==0)
{
return 0;
}
char ch=s.charAt(0); //this can be either A or B;
int delete=0;
for(int i=1;i<s.length();i++)
{
if(s.charAt(i)==ch)
{
delete++; //I need to delete this;
}
else
{
ch=s.charAt(i);
//Now this is our alternate char.
}
}
return delete;
}
private static final Scanner scanner = new Scanner(System.in);
public static void main(String[] args) throws IOException {
BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(System.getenv("OUTPUT_PATH")));
int q = scanner.nextInt();
scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?");
for (int qItr = 0; qItr < q; qItr++) {
String s = scanner.nextLine();
int result = alternatingCharacters(s);
bufferedWriter.write(String.valueOf(result));
bufferedWriter.newLine();
}
bufferedWriter.close();
scanner.close();
}
}
| [
"\"OUTPUT_PATH\""
]
| []
| [
"OUTPUT_PATH"
]
| [] | ["OUTPUT_PATH"] | java | 1 | 0 | |
src/opendr/perception/object_detection_2d/retinaface/retinaface_learner.py | # Copyright 2020-2021 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) 2018 Jiankang Deng and Jia Guo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import sys
import json
from tqdm import tqdm
from collections import defaultdict
import numpy as np
import mxnet as mx
from mxnet.module import Module
from urllib.request import urlretrieve
# OpenDR engine imports
from opendr.engine.learners import Learner
from opendr.engine.data import Image
from opendr.engine.target import BoundingBox, BoundingBoxList
from opendr.engine.constants import OPENDR_SERVER_URL
from opendr.perception.object_detection_2d.retinaface.algorithm.models.retinaface import RetinaFace
from opendr.perception.object_detection_2d.retinaface.algorithm.utils.load_data import load_gt_roidb
from opendr.perception.object_detection_2d.retinaface.algorithm.core.loader import CropLoader
from opendr.perception.object_detection_2d.retinaface.algorithm.core import metric
from opendr.perception.object_detection_2d.retinaface.algorithm.config import config, generate_config
from opendr.perception.object_detection_2d.retinaface.algorithm.symbol.symbol_resnet import get_resnet_train
from opendr.perception.object_detection_2d.retinaface.algorithm.logger import logger
from opendr.perception.object_detection_2d.retinaface.algorithm.eval_recall import FaceDetectionRecallMetric
from opendr.perception.object_detection_2d.datasets.detection_dataset import DetectionDataset
from opendr.perception.object_detection_2d.datasets.wider_face import WiderFaceDataset
from opendr.perception.object_detection_2d.datasets.transforms import BoundingBoxListToNumpyArray
class RetinaFaceLearner(Learner):
def __init__(self, backbone='resnet', lr=0.001, batch_size=2, checkpoint_after_iter=0, checkpoint_load_iter=0,
lr_steps='0', epochs=100, momentum=0.9, weight_decay=5e-4, log_after=20, prefix='',
shuffle=True, flip=False, val_after=5, temp_path='', device='cuda'):
super(RetinaFaceLearner, self).__init__(lr=lr, batch_size=batch_size, backbone=backbone,
checkpoint_after_iter=checkpoint_after_iter,
checkpoint_load_iter=checkpoint_load_iter, temp_path=temp_path,
device=device)
self.device = device
if device == 'cuda':
self.gpu_id = 0
else:
# use cpu
self.gpu_id = -1
self.detector = None
if self.backbone not in ['resnet', 'mnet']:
raise ValueError("network must be one of ['resnet', 'mnet']")
if self.backbone == 'resnet':
self.net = 'net3'
else:
self.net = 'net3l'
self.classes = ['face', 'masked face']
self.flip = flip
self.shuffle = shuffle
self.lr_steps = [int(step) for step in lr_steps.split(',')]
self.epochs = epochs
self.momentum = momentum
self.weight_decay = weight_decay
self.log_after = log_after
self.prefix = prefix
self.val_after = val_after
def __get_ctx(self):
ctx = []
if 'CUDA_VISIBLE_DEVICES' in os.environ and self.device == 'cuda':
cvd = os.environ['CUDA_VISIBLE_DEVICES'].strip()
elif self.device == 'cuda' and mx.context.num_gpus() > 0:
cvd = ['0']
else:
cvd = []
if len(cvd) > 0 and self.device == 'cuda':
if isinstance(cvd, str):
visibles_ids = cvd.split(',')
elif isinstance(cvd, list):
visibles_ids = cvd
for i in visibles_ids:
ctx.append(mx.gpu(int(i)))
else:
ctx = [mx.cpu()]
return ctx
def fit(self, dataset, val_dataset=None, from_scratch=False, silent=False, verbose=True):
"""
This method is used to train the detector on the WIDER Face dataset. Validation if performed if a val_dataset is
provided.
:param dataset: training dataset object; only WiderFaceDataset is supported currently
:type dataset: opendr.perception.object_detection_2d.datasets.WiderFaceDataset
:param val_dataset: validation dataset object
:type val_dataset: opendr.perception.object_detection_2d.datasets.DetectionDataset, optional
:param from_scratch: indicates whether to train from scratch or to download and use a pretrained backbone
:type from_scratch: bool, optional
:param silent: if set to True, disables all printing to STDOUT, defaults to False
:type silent: bool, optional
:param verbose: if set to True, additional information is printed to STDOUT, defaults to True
:type verbose: bool
:return: returns stats regarding the training and validation process
:rtype: dict
"""
if silent:
logger.setLevel(0)
# verbose = False
if self.backbone == "mnet":
raise NotImplementedError("Only the 'resnet' backbone is supported for training")
ctx = self.__get_ctx()
input_batch_size = self.batch_size * len(ctx)
checkpoint_path = os.path.join(self.temp_path, self.prefix)
# prepare dataset for training, downloads extra annotations if they're not present in the dataset
dataset = self.__prepare_dataset(dataset)
self.eval_dataset = val_dataset
# get roidbs
image_sets = dataset.splits
roidbs = [load_gt_roidb('retinaface',
'WIDER_' + image_set,
self.temp_path,
dataset.root, flip=self.flip,
verbose=verbose)
for image_set in image_sets]
roidb = roidbs[0]
generate_config(self.backbone, 'retinaface')
start_epoch = 0
# create network & get backbone weights
sym = None
if from_scratch:
arg_params = {}
aux_params = {}
else:
backbone_path = os.path.join(self.temp_path, "backbone")
self.download(backbone_path, mode="backbone")
backbone_prefix = os.path.join(backbone_path, "resnet-50")
if self.checkpoint_load_iter > 0:
if verbose:
print("Loading checkpoint from {}...".format(checkpoint_path))
mx.model.load_checkpoint(checkpoint_path, self.checkpoint_load_iter)
start_epoch = self.checkpoint_load_iter
else:
sym, arg_params, aux_params = mx.model.load_checkpoint(backbone_prefix, start_epoch)
sym = get_resnet_train(sym)
feat_sym = []
for stride in config.RPN_FEAT_STRIDE:
feat_sym.append(
sym.get_internals()['face_rpn_cls_score_stride%s_output' % stride]
)
train_data = CropLoader(feat_sym, roidb, input_batch_size, shuffle=self.shuffle, ctx=ctx,
work_load_list=None)
max_data_shape = [('data', (1, 3, max([v[1] for v in config.SCALES]), max([v[1] for v in config.SCALES])))]
max_data_shape, max_label_shape = train_data.infer_shape(max_data_shape)
max_data_shape.append(('gt_boxes', (1, roidb[0]['max_num_boxes'], 5)))
# infer shape
data_shape_dict = dict(train_data.provide_data + train_data.provide_label)
arg_shape, out_shape, aux_shape = sym.infer_shape(**data_shape_dict)
arg_shape_dict = dict(zip(sym.list_arguments(), arg_shape))
# out_shape_dict = dict(zip(sym.list_outputs(), out_shape))
# aux_shape_dict = dict(zip(sym.list_auxiliary_states(), aux_shape))
for k in arg_shape_dict:
v = arg_shape_dict[k]
if k.find('upsampling') >= 0:
if verbose:
print('Initializing upsampling weight', k)
arg_params[k] = mx.nd.zeros(shape=v)
init = mx.init.Initializer()
init._init_bilinear(k, arg_params[k])
# fixed_param_prefix = config.FIXED_PARAMS
# create solver
data_names = [k[0] for k in train_data.provide_data]
label_names = [k[0] for k in train_data.provide_label]
fixed_param_names = self.__get_fixed_params(sym)
if verbose and fixed_param_names:
print('Fixed', fixed_param_names)
mod = Module(sym,
data_names=data_names,
label_names=label_names,
context=ctx,
logger=logger,
fixed_param_names=fixed_param_names)
self._model = mod
eval_metrics = mx.metric.CompositeEvalMetric()
train_dict = defaultdict(list)
mid = 0
for m in range(len(config.RPN_FEAT_STRIDE)):
stride = config.RPN_FEAT_STRIDE[m]
_metric = metric.RPNAccMetric(pred_idx=mid, label_idx=mid + 1, name='RPNAcc_s%s' % stride)
eval_metrics.add(_metric)
mid += 2
_metric = metric.RPNL1LossMetric(loss_idx=mid, weight_idx=mid + 1, name='RPNL1Loss_s%s' % stride)
eval_metrics.add(_metric)
mid += 2
if config.FACE_LANDMARK:
_metric = metric.RPNL1LossMetric(loss_idx=mid, weight_idx=mid + 1, name='RPNLandMarkL1Loss_s%s' % stride)
eval_metrics.add(_metric)
mid += 2
if config.HEAD_BOX:
_metric = metric.RPNAccMetric(pred_idx=mid, label_idx=mid + 1, name='RPNAcc_head_s%s' % stride)
eval_metrics.add(_metric)
mid += 2
_metric = metric.RPNL1LossMetric(loss_idx=mid, weight_idx=mid + 1, name='RPNL1Loss_head_s%s' % stride)
eval_metrics.add(_metric)
mid += 2
if config.CASCADE > 0:
for _idx in range(config.CASCADE):
if stride in config.CASCADE_CLS_STRIDES:
_metric = metric.RPNAccMetric(pred_idx=mid, label_idx=mid + 1, name='RPNAccCAS%d_s%s' % (_idx, stride))
eval_metrics.add(_metric)
mid += 2
if stride in config.CASCADE_BBOX_STRIDES:
_metric = metric.RPNL1LossMetric(loss_idx=mid, weight_idx=mid + 1, name='RPNL1LossCAS%d_s%s' % (_idx,
stride))
eval_metrics.add(_metric)
mid += 2
# lr
lr_epoch = [int(epoch) for epoch in self.lr_steps]
lr_epoch_diff = [epoch - start_epoch for epoch in lr_epoch if epoch > start_epoch]
lr_iters = [int(epoch * len(roidb)) / input_batch_size for epoch in lr_epoch_diff]
iter_per_epoch = int(len(roidb) / input_batch_size)
lr_steps = []
if len(lr_iters) == 5:
factors = [0.5, 0.5, 0.4, 0.1, 0.1]
for i in range(5):
lr_steps.append((lr_iters[i], factors[i]))
elif len(lr_iters) == 8: # warmup
for li in lr_iters[0:5]:
lr_steps.append((li, 1.5849))
for li in lr_iters[5:]:
lr_steps.append((li, 0.1))
else:
for li in lr_iters:
lr_steps.append((li, 0.1))
end_epoch = self.epochs
opt = mx.optimizer.SGD(learning_rate=self.lr, momentum=self.momentum, wd=self.weight_decay,
rescale_grad=1. / len(ctx), clip_gradient=None)
initializer = mx.init.Xavier()
train_data = mx.io.PrefetchingIter(train_data)
_cb = mx.callback.Speedometer(train_data.batch_size, frequent=self.log_after, auto_reset=False)
global_step = [0]
def save_model(epoch):
arg, aux = mod.get_params()
all_layers = mod.symbol.get_internals()
outs = []
for stride in config.RPN_FEAT_STRIDE:
num_anchors = config.RPN_ANCHOR_CFG[str(stride)]['NUM_ANCHORS']
if config.CASCADE > 0:
_name = 'face_rpn_cls_score_stride%d_output' % (stride)
cls_pred = all_layers[_name]
cls_pred = mx.symbol.Reshape(data=cls_pred, shape=(0, 2, -1, 0))
cls_pred = mx.symbol.SoftmaxActivation(data=cls_pred, mode="channel")
cls_pred = mx.symbol.Reshape(data=cls_pred, shape=(0, 2 * num_anchors, -1, 0))
outs.append(cls_pred)
_name = 'face_rpn_bbox_pred_stride%d_output' % stride
rpn_bbox_pred = all_layers[_name]
outs.append(rpn_bbox_pred)
if config.FACE_LANDMARK:
_name = 'face_rpn_landmark_pred_stride%d_output' % stride
rpn_landmark_pred = all_layers[_name]
outs.append(rpn_landmark_pred)
for casid in range(config.CASCADE):
if stride in config.CASCADE_CLS_STRIDES:
_name = 'face_rpn_cls_score_stride%d_cas%d_output' % (stride, casid)
cls_pred = all_layers[_name]
cls_pred = mx.symbol.Reshape(data=cls_pred, shape=(0, 2, -1, 0))
cls_pred = mx.symbol.SoftmaxActivation(data=cls_pred, mode="channel")
cls_pred = mx.symbol.Reshape(data=cls_pred, shape=(0, 2 * num_anchors, -1, 0))
outs.append(cls_pred)
if stride in config.CASCADE_BBOX_STRIDES:
_name = 'face_rpn_bbox_pred_stride%d_cas%d_output' % (stride, casid)
bbox_pred = all_layers[_name]
outs.append(bbox_pred)
else:
_name = 'face_rpn_cls_score_stride%d_output' % stride
rpn_cls_score = all_layers[_name]
# prepare rpn data
rpn_cls_score_reshape = mx.symbol.Reshape(data=rpn_cls_score,
shape=(0, 2, -1, 0),
name="face_rpn_cls_score_reshape_stride%d" % stride)
rpn_cls_prob = mx.symbol.SoftmaxActivation(data=rpn_cls_score_reshape,
mode="channel",
name="face_rpn_cls_prob_stride%d" % stride)
rpn_cls_prob_reshape = mx.symbol.Reshape(data=rpn_cls_prob,
shape=(0, 2 * num_anchors, -1, 0),
name='face_rpn_cls_prob_reshape_stride%d' % stride)
_name = 'face_rpn_bbox_pred_stride%d_output' % stride
rpn_bbox_pred = all_layers[_name]
outs.append(rpn_cls_prob_reshape)
outs.append(rpn_bbox_pred)
if config.FACE_LANDMARK:
_name = 'face_rpn_landmark_pred_stride%d_output' % stride
rpn_landmark_pred = all_layers[_name]
outs.append(rpn_landmark_pred)
_sym = mx.sym.Group(outs)
mx.model.save_checkpoint(checkpoint_path, epoch, _sym, arg, aux)
def _batch_callback(param):
# global global_step
_cb(param)
global_step[0] += 1
mbatch = global_step[0]
for step in lr_steps:
if mbatch == step[0]:
opt.lr *= step[1]
if verbose:
print('lr change to', opt.lr, ' in batch', mbatch, file=sys.stderr)
break
if self.checkpoint_after_iter > 0 and mbatch % iter_per_epoch == self.checkpoint_after_iter - 1:
metrics = param.eval_metric.metrics
for m in metrics:
ks, vals = m.get()
if isinstance(ks, list):
for m_idx, k in enumerate(ks):
train_dict[k].append(vals[m_idx])
else:
train_dict[ks].append(vals)
save_model(int((mbatch - 1) / iter_per_epoch))
# train:
mod.fit(train_data, eval_metric=eval_metrics, epoch_end_callback=self.__epoch_callback,
batch_end_callback=_batch_callback, kvstore='device',
optimizer=opt,
initializer=initializer,
allow_missing=True,
arg_params=arg_params, aux_params=aux_params, begin_epoch=start_epoch, num_epoch=end_epoch)
if verbose:
for k, v in train_dict.items():
print(k, len(v), v[0], v[-1])
return train_dict
@staticmethod
def __prepare_detector(mod):
"""
This method makes some necessary modifications to the model in order to prepare it for inference.
"""
arg, aux = mod.get_params()
all_layers = mod.symbol.get_internals()
outs = []
for stride in config.RPN_FEAT_STRIDE:
num_anchors = config.RPN_ANCHOR_CFG[str(stride)]['NUM_ANCHORS']
if config.CASCADE > 0:
_name = 'face_rpn_cls_score_stride%d_output' % (stride)
cls_pred = all_layers[_name]
cls_pred = mx.symbol.Reshape(data=cls_pred, shape=(0, 2, -1, 0))
cls_pred = mx.symbol.SoftmaxActivation(data=cls_pred, mode="channel")
cls_pred = mx.symbol.Reshape(data=cls_pred, shape=(0, 2 * num_anchors, -1, 0))
outs.append(cls_pred)
_name = 'face_rpn_bbox_pred_stride%d_output' % stride
rpn_bbox_pred = all_layers[_name]
outs.append(rpn_bbox_pred)
if config.FACE_LANDMARK:
_name = 'face_rpn_landmark_pred_stride%d_output' % stride
rpn_landmark_pred = all_layers[_name]
outs.append(rpn_landmark_pred)
for casid in range(config.CASCADE):
if stride in config.CASCADE_CLS_STRIDES:
_name = 'face_rpn_cls_score_stride%d_cas%d_output' % (stride, casid)
cls_pred = all_layers[_name]
cls_pred = mx.symbol.Reshape(data=cls_pred, shape=(0, 2, -1, 0))
cls_pred = mx.symbol.SoftmaxActivation(data=cls_pred, mode="channel")
cls_pred = mx.symbol.Reshape(data=cls_pred, shape=(0, 2 * num_anchors, -1, 0))
outs.append(cls_pred)
if stride in config.CASCADE_BBOX_STRIDES:
_name = 'face_rpn_bbox_pred_stride%d_cas%d_output' % (stride, casid)
bbox_pred = all_layers[_name]
outs.append(bbox_pred)
else:
_name = 'face_rpn_cls_score_stride%d_output' % stride
rpn_cls_score = all_layers[_name]
# prepare rpn data
rpn_cls_score_reshape = mx.symbol.Reshape(data=rpn_cls_score,
shape=(0, 2, -1, 0),
name="face_rpn_cls_score_reshape_stride%d" % stride)
rpn_cls_prob = mx.symbol.SoftmaxActivation(data=rpn_cls_score_reshape,
mode="channel",
name="face_rpn_cls_prob_stride%d" % stride)
rpn_cls_prob_reshape = mx.symbol.Reshape(data=rpn_cls_prob,
shape=(0, 2 * num_anchors, -1, 0),
name='face_rpn_cls_prob_reshape_stride%d' % stride)
_name = 'face_rpn_bbox_pred_stride%d_output' % stride
rpn_bbox_pred = all_layers[_name]
outs.append(rpn_cls_prob_reshape)
outs.append(rpn_bbox_pred)
if config.FACE_LANDMARK:
_name = 'face_rpn_landmark_pred_stride%d_output' % stride
rpn_landmark_pred = all_layers[_name]
outs.append(rpn_landmark_pred)
_sym = mx.sym.Group(outs)
return _sym, arg, aux
def __epoch_callback(self, epoch, symbol, arg_params, aux_params):
"""
Callback method, called at the end of each training epoch. Evaluation is performed if a validation dataset has been
provided by the user, every 'val_after' epochs.
"""
if epoch % self.val_after == self.val_after - 1 and self.eval_dataset is not None:
sym, arg, aux = self.__prepare_detector(self._model)
self.detector = RetinaFace(network=self.net, sym=sym, arg_params=arg, aux_params=aux, model=None)
self.eval(self.eval_dataset, use_subset=True, subset_size=500, pyramid=False, flip=False)
def eval(self, dataset, verbose=True, use_subset=False, subset_size=250, pyramid=True, flip=True):
"""
This method performs evaluation on a given dataset and returns a dictionary with the evaluation results.
:param dataset: dataset object, to perform evaluation on
:type dataset: opendr.perception.object_detection_2d.datasets.DetectionDataset
:param verbose: if True, additional information is printed on stdout
:type verbose: bool, optional
:param use_subset: if True, only a subset of the dataset is evaluated, defaults to False
:type use_subset: bool, optional
:param subset_size: if use_subset is True, subset_size controls the size of the subset to be evaluated
:type subset_size: int, optional
:param pyramid: if True, an image pyramid is used during evaluation to increase performance
:type pyramid: bool, optional
:param flip: if True, images are flipped during evaluation to increase performance
:type flip: bool, optional
:return: dictionary containing evaluation metric names nad values
:rtype: dict
"""
if self.detector is None:
assert "Detector must be loaded with load() before inference."
if verbose:
print("Evaluation params: [pyramid={}, flip={}]".format(pyramid, flip))
# prepare dataset & get metric
dataset, eval_metric = self.__prepare_val_dataset(dataset)
if use_subset:
val_indices = np.random.choice(range(len(dataset)), subset_size)
eval_metric.n_val_images = subset_size
if verbose:
print("Using random subset of {} images...".format(subset_size))
else:
val_indices = np.arange(0, len(dataset))
# perform evaluation
eval_metric.reset()
for idx in tqdm(val_indices):
img, labels = dataset[idx]
if isinstance(img, Image):
img = img.data
if isinstance(labels, BoundingBoxList):
labels = BoundingBoxListToNumpyArray()(labels)
do_flip = flip
if not pyramid:
# target_size = 1600
target_size = 640
# max_size = 2150
max_size = 1024
im_shape = img.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size:
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
scales = [im_scale]
else:
do_flip = True
TEST_SCALES = [500, 800, 1100, 1400, 1700]
target_size = 800
max_size = 1200
im_shape = img.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size:
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
scales = [float(scale) / target_size * im_scale for scale in TEST_SCALES]
faces, landmarks = self.detector.detect(img, self.threshold, scales=scales, do_flip=do_flip)
det_boxes = faces[np.newaxis, :, :4]
det_scores = faces[np.newaxis, :, 4]
det_labels = [np.zeros_like(det_scores, dtype=np.int)]
gt_boxes = labels[np.newaxis, :, :4]
gt_labels = labels[np.newaxis, :, 4]
gt_difficult = np.zeros_like(gt_labels)
eval_metric.update(det_boxes, det_labels, det_scores, gt_boxes, gt_labels, gt_difficult)
map_name, mean_ap = eval_metric.get()
val_msg = '\n'.join(['{}={}'.format(k, v) for k, v in zip(map_name, mean_ap)])
if verbose:
print(val_msg)
eval_dict = {k.lower(): v for k, v in zip(map_name, mean_ap)}
return eval_dict
def __prepare_dataset(self, dataset):
"""
Prepares the WIDER Face dataset for training. Additional training annotations are downloaded if they don't already
exist in the dataset folder.
:param dataset: object containing WiderFaceDataset
:type dataset: opendr.perception.object_detection_2d.datasets.WiderFaceDataset
:return: the dataset is returned as-is, apart from a minor modification in the dataset_type attribute,
made for consistency reasons with the original implementation
:rtype:
"""
if issubclass(type(dataset), WiderFaceDataset):
dataset.dataset_type = ''.join(dataset.dataset_type.split('_'))
if not os.path.exists(os.path.join(dataset.root, "WIDER_train", "labels.txt")):
print("Landmark annotations not found, downloading to dataset root...")
self.download(dataset.root, mode="annotations")
return dataset
else:
return ValueError("Only WIDER face dataset is supported for this detector")
@staticmethod
def __prepare_val_dataset(dataset):
"""
Prepares any face DetectionDataset for evaluation.
:param dataset: evaluation dataset object
:type dataset: opendr.perception.obejct_detection_2d.datasets.DetectionDataset
:return: returns the converted dataset and recall metric
:rtype: opendr.perception.obejct_detection_2d.datasets.DetectionDataset,
opendr.perception.object_detection_2d.retinaface.algorithm.eval_recall.FaceDetectionRecallMetric
"""
if issubclass(type(dataset), WiderFaceDataset):
dataset.dataset_type = ''.join(dataset.dataset_type.split('_'))
eval_metric = FaceDetectionRecallMetric()
return dataset, eval_metric
elif issubclass(type(dataset), DetectionDataset):
eval_metric = FaceDetectionRecallMetric()
return dataset, eval_metric
else:
return ValueError("Dataset must be subclass of the DetectionDataset base class.")
@staticmethod
def __get_fixed_params(symbol):
"""
Makes necessary modifications to the network's symbolic structure before training
"""
if not config.LAYER_FIX:
return []
fixed_param_names = []
idx = 0
for name in symbol.list_arguments():
if idx < 7 and name != 'data':
fixed_param_names.append(name)
if name.find('upsampling') >= 0:
fixed_param_names.append(name)
idx += 1
return fixed_param_names
def infer(self, img, threshold=0.8, nms_threshold=0.4, scales=[1024, 1980], mask_thresh=0.8):
"""
Performs inference on a single image and returns the resulting bounding boxes.
:param img: image to perform inference on
:type img: opendr.engine.data.Image
:param threshold: confidence threshold
:type threshold: float, optional
:param nms_threshold: NMS threshold
:type nms_threshold: float, optional
:param scales: inference scales
:type scales: list, optional
:param mask_thresh: mask confidence threshold, only use when backbone is 'mnet'
:type mask_thresh: float, optional
:return: list of bounding boxes
:rtype: BoundingBoxList
"""
if self.detector is None:
assert "Detector must be loaded with load() before inference."
self.detector.nms_threshold = nms_threshold
if not isinstance(img, Image):
img = Image(img)
_img = img.convert("channels_last", "rgb")
im_shape = _img.shape
target_size = scales[0]
max_size = scales[1]
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size:
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
scales = [im_scale]
flip = False
faces, landmarks = self.detector.detect(_img, threshold, scales=scales, do_flip=flip)
faces = np.hstack([faces, np.zeros((faces.shape[0], 1))])
bboxes = BoundingBoxList([])
for face in faces:
if face.shape[0] > 4:
mask = int(face[5] > mask_thresh)
# faces in xywhc format, convert to BoundingBoxs
bbox = BoundingBox(left=face[0], top=face[1],
width=face[2] - face[0],
height=face[3] - face[1],
name=mask, score=face[4])
bboxes.data.append(bbox)
# return faces, landmarks
return bboxes
def save(self, path, verbose=False):
"""
This method is used to save a trained model.
Provided with the path, absolute or relative, including a *folder* name, it creates a directory with the name
of the *folder* provided and saves the model inside with a proper format and a .json file with metadata.
:param path: for the model to be log, including the folder name
:type path: str
:param verbose: whether to print success message or not, defaults to 'False'
:type verbose: bool, optional
"""
if self._model is None:
raise UserWarning("No model is loaded, cannot save.")
model_name = os.path.basename(path)
os.makedirs(path, exist_ok=True)
model_metadata = {"model_paths": [], "framework": "mxnet", "format": "", "has_data": False,
"inference_params": {}, "optimized": None, "optimizer_info": {},
}
_sym, arg, aux = self.__prepare_detector(self._model)
model_path = os.path.join(path, model_name)
mx.model.save_checkpoint(model_path, 0, _sym, arg, aux)
if verbose:
print("Saved model.")
model_metadata["model_paths"] = ['%s-symbol.json' % model_name,
'%s-%04d.params' % (model_name, 0)]
model_metadata["optimized"] = False
model_metadata["format"] = "checkpoint"
with open(os.path.join(path, model_name + ".json"), 'w') as outfile:
json.dump(model_metadata, outfile)
if verbose:
print("Saved model metadata.")
def load(self, path, verbose=True):
"""
Loads the model from inside the path provided, based on the metadata .json file included.
:param path: path of the directory the model was log
:type path: str
:param verbose: whether to print success message or not, defaults to 'False'
:type verbose: bool, optional
"""
# first, get model_name from path
model_name = os.path.basename(path)
with open(os.path.join(path, model_name + ".json")) as metadata_file:
metadata = json.load(metadata_file)
model_name = metadata['model_paths'][0].split('-')[0]
model_path = os.path.join(path, model_name)
if verbose:
print("Loading model from path: ", model_path)
generate_config(self.backbone, 'retinaface')
self.detector = RetinaFace(prefix=model_path, ctx_id=self.gpu_id, network=self.net)
self._model = self.detector.model
if verbose:
print("Loaded mxnet model.")
def download(self, path=None, mode="pretrained", verbose=True,
url=OPENDR_SERVER_URL + "perception/object_detection_2d/retinaface/"):
"""
Downloads all files necessary for inference, evaluation and training. Valid mode options: ["pretrained", "images",
"backbone", "annotations"]
:param path: folder to which files will be downloaded, if None self.temp_path will be used
:type path: str, optional
:param mode: one of: ["pretrained", "images", "backbone", "annotations"], where "pretrained" downloads a pretrained
network depending on the self.backbone type, "images" downloads example inference data, "backbone" downloads a
pretrained resnet backbone for training, and "annotations" downloads additional annotation files for training
:type mode: str, optional
:param verbose: if True, additional information is printed on stdout
:type verbose: bool, optional
:param url: URL to file location on FTP server
:type url: str, optional
"""
valid_modes = ["pretrained", "images", "backbone", "annotations", "test_data"]
if mode not in valid_modes:
raise UserWarning("Parameter 'mode' should be one of:", valid_modes)
if path is None:
path = self.temp_path
if not os.path.exists(path):
os.makedirs(path)
if mode == "pretrained":
model_name = "retinaface_{}".format(self.backbone)
if verbose:
print("Downloading pretrained files for {}".format(model_name))
path = os.path.join(path, model_name)
if not os.path.exists(path):
os.makedirs(path)
if verbose:
print("Downloading pretrained model...")
file_url = os.path.join(url, "pretrained", model_name, "{}.json".format(model_name))
if verbose:
print("Downloading metadata...")
file_path = os.path.join(path, "{}.json".format(model_name))
if not os.path.exists(file_path):
urlretrieve(file_url, file_path)
if verbose:
print("Downloading params...")
file_url = os.path.join(url, "pretrained", model_name, "{}-0000.params".format(model_name))
file_path = os.path.join(path, "{}-0000.params".format(model_name))
if not os.path.exists(file_path):
urlretrieve(file_url, file_path)
if verbose:
print("Downloading symbol...")
file_url = os.path.join(url, "pretrained", model_name, "{}-symbol.json".format(model_name))
file_path = os.path.join(path, "{}-symbol.json".format(model_name))
if not os.path.exists(file_path):
urlretrieve(file_url, file_path)
elif mode == "images":
file_url = os.path.join(url, "images", "cov4.jpg")
if verbose:
print("Downloading example image...")
file_path = os.path.join(path, "cov4.jpg")
if not os.path.exists(file_path):
urlretrieve(file_url, file_path)
elif mode == "annotations":
if verbose:
print("Downloading training annotations...")
for subset in ["train", "val", "test"]:
file_url = os.path.join(url, "annotations", "WIDER_{}".format(subset), "label.txt")
file_path = os.path.join(path, "WIDER_{}".format(subset), "label.txt")
if not os.path.exists(file_path):
urlretrieve(file_url, file_path)
elif mode == "backbone":
if verbose:
print("Downloading resnet backbone...")
file_url = os.path.join(url, "backbone", "resnet-50-symbol.json")
file_path = os.path.join(path, "resnet-50-symbol.json")
if not os.path.exists(file_path):
urlretrieve(file_url, file_path)
file_url = os.path.join(url, "backbone", "resnet-50-0000.params")
file_path = os.path.join(path, "resnet-50-0000.params")
if not os.path.exists(file_path):
urlretrieve(file_url, file_path)
elif mode == "test_data":
if verbose:
print("Downloading data for unit tests...")
# fake label.txt files
for subset in ["train", "val", "test"]:
file_url = os.path.join(url, "test_data", "WIDER_{}".format(subset), "label.txt")
if verbose:
print(file_url)
file_path = os.path.join(path, "WIDER_{}".format(subset), "label.txt")
if not os.path.exists(file_path):
os.makedirs(os.path.join(path, "WIDER_{}".format(subset)), exist_ok=True)
urlretrieve(file_url, file_path)
# single training image
file_url = os.path.join(url, "test_data", "WIDER_train", "images",
"0--Parade", "0_Parade_marchingband_1_849.jpg")
if verbose:
print(file_url)
file_path = os.path.join(path, "WIDER_train", "images",
"0--Parade", "0_Parade_marchingband_1_849.jpg")
if not os.path.exists(file_path):
print("t")
os.makedirs(os.path.join(path, "WIDER_train", "images", "0--Parade"), exist_ok=True)
print("t")
urlretrieve(file_url, file_path)
if verbose:
print("Downloaded")
# annotations
file_url = os.path.join(url, "test_data", "wider_face_split",
"wider_face_train_bbx_gt.txt")
if verbose:
print(file_url)
file_path = os.path.join(path, "wider_face_split",
"wider_face_train_bbx_gt.txt")
if not os.path.exists(file_path):
os.makedirs(os.path.join(path, "wider_face_split"), exist_ok=True)
urlretrieve(file_url, file_path)
def optimize(self, target_device):
"""This method is not used in this implementation."""
return NotImplementedError
def reset(self):
"""This method is not used in this implementation."""
return NotImplementedError
| []
| []
| [
"CUDA_VISIBLE_DEVICES"
]
| [] | ["CUDA_VISIBLE_DEVICES"] | python | 1 | 0 | |
tests/scripts/thread-cert/node.py | #!/usr/bin/env python3
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import config
import ipaddress
import os
import sys
import pexpect
import pexpect.popen_spawn
import re
import simulator
import socket
import time
import unittest
import binascii
class Node:
def __init__(self, nodeid, is_mtd=False, simulator=None, version=None, is_bbr=False):
self.nodeid = nodeid
self.verbose = int(float(os.getenv('VERBOSE', 0)))
self.node_type = os.getenv('NODE_TYPE', 'sim')
self.env_version = os.getenv('THREAD_VERSION', '1.1')
self.is_bbr = is_bbr
self._initialized = False
if version is not None:
self.version = version
else:
self.version = self.env_version
self.simulator = simulator
if self.simulator:
self.simulator.add_node(self)
mode = os.environ.get('USE_MTD') == '1' and is_mtd and 'mtd' or 'ftd'
if self.node_type == 'soc':
self.__init_soc(nodeid)
elif self.node_type == 'ncp-sim':
# TODO use mode after ncp-mtd is available.
self.__init_ncp_sim(nodeid, 'ftd')
else:
self.__init_sim(nodeid, mode)
if self.verbose:
self.pexpect.logfile_read = sys.stdout.buffer
self._initialized = True
self.set_extpanid(config.EXTENDED_PANID)
def __init_sim(self, nodeid, mode):
""" Initialize a simulation node. """
# Default command if no match below, will be overridden if below conditions are met.
cmd = './ot-cli-%s' % (mode)
# If Thread version of node matches the testing environment version.
if self.version == self.env_version:
# Load Thread 1.2 BBR device when testing Thread 1.2 scenarios
# which requires device with Backbone functionality.
if self.version == '1.2' and self.is_bbr:
if 'OT_CLI_PATH_1_2_BBR' in os.environ:
cmd = os.environ['OT_CLI_PATH_1_2_BBR']
elif 'top_builddir_1_2_bbr' in os.environ:
srcdir = os.environ['top_builddir_1_2_bbr']
cmd = '%s/examples/apps/cli/ot-cli-%s' % (srcdir, mode)
# Load Thread device of the testing environment version (may be 1.1 or 1.2)
else:
if 'OT_CLI_PATH' in os.environ:
cmd = os.environ['OT_CLI_PATH']
elif 'top_builddir' in os.environ:
srcdir = os.environ['top_builddir']
cmd = '%s/examples/apps/cli/ot-cli-%s' % (srcdir, mode)
if 'RADIO_DEVICE' in os.environ:
cmd += ' --real-time-signal=+1 -v spinel+hdlc+uart://%s?forkpty-arg=%d' % (os.environ['RADIO_DEVICE'],
nodeid)
else:
cmd += ' %d' % nodeid
# Load Thread 1.1 node when testing Thread 1.2 scenarios for interoperability
elif self.version == '1.1':
# Posix app
if 'OT_CLI_PATH_1_1' in os.environ:
cmd = os.environ['OT_CLI_PATH_1_1']
elif 'top_builddir_1_1' in os.environ:
srcdir = os.environ['top_builddir_1_1']
cmd = '%s/examples/apps/cli/ot-cli-%s' % (srcdir, mode)
if 'RADIO_DEVICE_1_1' in os.environ:
cmd += ' --real-time-signal=+1 -v spinel+hdlc+uart://%s?forkpty-arg=%d' % (
os.environ['RADIO_DEVICE_1_1'], nodeid)
else:
cmd += ' %d' % nodeid
print("%s" % cmd)
self.pexpect = pexpect.popen_spawn.PopenSpawn(cmd, timeout=10)
# Add delay to ensure that the process is ready to receive commands.
timeout = 0.4
while timeout > 0:
self.pexpect.send('\r\n')
try:
self.pexpect.expect('> ', timeout=0.1)
break
except pexpect.TIMEOUT:
timeout -= 0.1
def __init_ncp_sim(self, nodeid, mode):
""" Initialize an NCP simulation node. """
# Default command if no match below, will be overridden if below conditions are met.
cmd = 'spinel-cli.py -p ./ot-ncp-%s -n' % mode
# If Thread version of node matches the testing environment version.
if self.version == self.env_version:
if 'RADIO_DEVICE' in os.environ:
args = ' --real-time-signal=+1 spinel+hdlc+uart://%s?forkpty-arg=%d' % (os.environ['RADIO_DEVICE'],
nodeid)
else:
args = ''
# Load Thread 1.2 BBR device when testing Thread 1.2 scenarios
# which requires device with Backbone functionality.
if self.version == '1.2' and self.is_bbr:
if 'OT_NCP_PATH_1_2_BBR' in os.environ:
cmd = 'spinel-cli.py -p "%s%s" -n' % (
os.environ['OT_NCP_PATH_1_2_BBR'],
args,
)
elif 'top_builddir_1_2_bbr' in os.environ:
srcdir = os.environ['top_builddir_1_2_bbr']
cmd = '%s/examples/apps/ncp/ot-ncp-%s' % (srcdir, mode)
cmd = 'spinel-cli.py -p "%s%s" -n' % (
cmd,
args,
)
# Load Thread device of the testing environment version (may be 1.1 or 1.2).
else:
if 'OT_NCP_PATH' in os.environ:
cmd = 'spinel-cli.py -p "%s%s" -n' % (
os.environ['OT_NCP_PATH'],
args,
)
elif 'top_builddir' in os.environ:
srcdir = os.environ['top_builddir']
cmd = '%s/examples/apps/ncp/ot-ncp-%s' % (srcdir, mode)
cmd = 'spinel-cli.py -p "%s%s" -n' % (
cmd,
args,
)
# Load Thread 1.1 node when testing Thread 1.2 scenarios for interoperability.
elif self.version == '1.1':
if 'RADIO_DEVICE_1_1' in os.environ:
args = ' --real-time-signal=+1 spinel+hdlc+uart://%s?forkpty-arg=%d' % (os.environ['RADIO_DEVICE_1_1'],
nodeid)
else:
args = ''
if 'OT_NCP_PATH_1_1' in os.environ:
cmd = 'spinel-cli.py -p "%s%s" -n' % (
os.environ['OT_NCP_PATH_1_1'],
args,
)
elif 'top_builddir_1_1' in os.environ:
srcdir = os.environ['top_builddir_1_1']
cmd = '%s/examples/apps/ncp/ot-ncp-%s' % (srcdir, mode)
cmd = 'spinel-cli.py -p "%s%s" -n' % (
cmd,
args,
)
cmd += ' %d' % nodeid
print("%s" % cmd)
self.pexpect = pexpect.spawn(cmd, timeout=10)
# Add delay to ensure that the process is ready to receive commands.
time.sleep(0.2)
self._expect('spinel-cli >')
self.debug(int(os.getenv('DEBUG', '0')))
def _expect(self, pattern, timeout=-1, *args, **kwargs):
""" Process simulator events until expected the pattern. """
if timeout == -1:
timeout = self.pexpect.timeout
assert timeout > 0
while timeout > 0:
try:
return self.pexpect.expect(pattern, 0.1, *args, **kwargs)
except pexpect.TIMEOUT:
timeout -= 0.1
self.simulator.go(0)
if timeout <= 0:
raise
def _prepare_pattern(self, pattern):
"""Build a new pexpect pattern matching line by line.
Adds lookahead and lookbehind to make each pattern match a whole line,
and add 'Done' as the first pattern.
Args:
pattern: a single regex or a list of regex.
Returns:
A list of regex.
"""
EXPECT_LINE_FORMAT = r'(?<=[\r\n])%s(?=[\r\n])'
if isinstance(pattern, list):
pattern = [EXPECT_LINE_FORMAT % p for p in pattern]
else:
pattern = [EXPECT_LINE_FORMAT % pattern]
return [EXPECT_LINE_FORMAT % 'Done'] + pattern
def _expect_result(self, pattern, *args, **kwargs):
"""Expect a single matching result.
The arguments are identical to pexpect.expect().
Returns:
The matched line.
"""
results = self._expect_results(pattern, *args, **kwargs)
assert len(results) == 1
return results[0]
def _expect_results(self, pattern, *args, **kwargs):
"""Expect multiple matching results.
The arguments are identical to pexpect.expect().
Returns:
The matched lines.
"""
results = []
pattern = self._prepare_pattern(pattern)
while self._expect(pattern, *args, **kwargs):
results.append(self.pexpect.match.group(0).decode('utf8'))
return results
def __init_soc(self, nodeid):
""" Initialize a System-on-a-chip node connected via UART. """
import fdpexpect
serialPort = '/dev/ttyUSB%d' % ((nodeid - 1) * 2)
self.pexpect = fdpexpect.fdspawn(os.open(serialPort, os.O_RDWR | os.O_NONBLOCK | os.O_NOCTTY))
def __del__(self):
self.destroy()
def destroy(self):
if not self._initialized:
return
if (hasattr(self.pexpect, 'proc') and self.pexpect.proc.poll() is None or
not hasattr(self.pexpect, 'proc') and self.pexpect.isalive()):
print("%d: exit" % self.nodeid)
self.pexpect.send('exit\n')
self.pexpect.expect(pexpect.EOF)
self.pexpect.wait()
self._initialized = False
def read_cert_messages_in_commissioning_log(self, timeout=-1):
"""Get the log of the traffic after DTLS handshake.
"""
format_str = br"=+?\[\[THCI\].*?type=%s.*?\].*?=+?[\s\S]+?-{40,}"
join_fin_req = format_str % br"JOIN_FIN\.req"
join_fin_rsp = format_str % br"JOIN_FIN\.rsp"
dummy_format_str = br"\[THCI\].*?type=%s.*?"
join_ent_ntf = dummy_format_str % br"JOIN_ENT\.ntf"
join_ent_rsp = dummy_format_str % br"JOIN_ENT\.rsp"
pattern = (b"(" + join_fin_req + b")|(" + join_fin_rsp + b")|(" + join_ent_ntf + b")|(" + join_ent_rsp + b")")
messages = []
# There are at most 4 cert messages both for joiner and commissioner
for _ in range(0, 4):
try:
self._expect(pattern, timeout=timeout)
log = self.pexpect.match.group(0)
messages.append(self._extract_cert_message(log))
except BaseException:
break
return messages
def _extract_cert_message(self, log):
res = re.search(br"direction=\w+", log)
assert res
direction = res.group(0).split(b'=')[1].strip()
res = re.search(br"type=\S+", log)
assert res
type = res.group(0).split(b'=')[1].strip()
payload = bytearray([])
payload_len = 0
if type in [b"JOIN_FIN.req", b"JOIN_FIN.rsp"]:
res = re.search(br"len=\d+", log)
assert res
payload_len = int(res.group(0).split(b'=')[1].strip())
hex_pattern = br"\|(\s([0-9a-fA-F]{2}|\.\.))+?\s+?\|"
while True:
res = re.search(hex_pattern, log)
if not res:
break
data = [int(hex, 16) for hex in res.group(0)[1:-1].split(b' ') if hex and hex != b'..']
payload += bytearray(data)
log = log[res.end() - 1:]
assert len(payload) == payload_len
return (direction, type, payload)
def send_command(self, cmd, go=True):
print("%d: %s" % (self.nodeid, cmd))
self.pexpect.send(cmd + '\n')
if go:
self.simulator.go(0, nodeid=self.nodeid)
sys.stdout.flush()
def get_commands(self):
self.send_command('?')
self._expect('Commands:')
return self._expect_results(r'\S+')
def set_mode(self, mode):
cmd = 'mode %s' % mode
self.send_command(cmd)
self._expect('Done')
def debug(self, level):
# `debug` command will not trigger interaction with simulator
self.send_command('debug %d' % level, go=False)
def start(self):
self.interface_up()
self.thread_start()
def stop(self):
self.thread_stop()
self.interface_down()
def interface_up(self):
self.send_command('ifconfig up')
self._expect('Done')
def interface_down(self):
self.send_command('ifconfig down')
self._expect('Done')
def thread_start(self):
self.send_command('thread start')
self._expect('Done')
def thread_stop(self):
self.send_command('thread stop')
self._expect('Done')
def commissioner_start(self):
cmd = 'commissioner start'
self.send_command(cmd)
self._expect('Done')
def commissioner_add_joiner(self, addr, psk):
cmd = 'commissioner joiner add %s %s' % (addr, psk)
self.send_command(cmd)
self._expect('Done')
def joiner_start(self, pskd='', provisioning_url=''):
cmd = 'joiner start %s %s' % (pskd, provisioning_url)
self.send_command(cmd)
self._expect('Done')
def clear_whitelist(self):
cmd = 'macfilter addr clear'
self.send_command(cmd)
self._expect('Done')
def enable_whitelist(self):
cmd = 'macfilter addr whitelist'
self.send_command(cmd)
self._expect('Done')
def disable_whitelist(self):
cmd = 'macfilter addr disable'
self.send_command(cmd)
self._expect('Done')
def add_whitelist(self, addr, rssi=None):
cmd = 'macfilter addr add %s' % addr
if rssi is not None:
cmd += ' %s' % rssi
self.send_command(cmd)
self._expect('Done')
def get_bbr_registration_jitter(self):
self.send_command('bbr jitter')
return int(self._expect_result(r'\d+'))
def set_bbr_registration_jitter(self, jitter):
cmd = 'bbr jitter %d' % jitter
self.send_command(cmd)
self._expect('Done')
def enable_backbone_router(self):
cmd = 'bbr enable'
self.send_command(cmd)
self._expect('Done')
def disable_backbone_router(self):
cmd = 'bbr disable'
self.send_command(cmd)
self._expect('Done')
def register_backbone_router(self):
cmd = 'bbr register'
self.send_command(cmd)
self._expect('Done')
def get_backbone_router_state(self):
states = [r'Disabled', r'Primary', r'Secondary']
self.send_command('bbr state')
return self._expect_result(states)
def get_backbone_router(self):
cmd = 'bbr config'
self.send_command(cmd)
self._expect(r'(.*)Done')
g = self.pexpect.match.groups()
output = g[0].decode("utf-8")
lines = output.strip().split('\n')
lines = [l.strip() for l in lines]
ret = {}
for l in lines:
z = re.search(r'seqno:\s+([0-9]+)', l)
if z:
ret['seqno'] = int(z.groups()[0])
z = re.search(r'delay:\s+([0-9]+)', l)
if z:
ret['delay'] = int(z.groups()[0])
z = re.search(r'timeout:\s+([0-9]+)', l)
if z:
ret['timeout'] = int(z.groups()[0])
return ret
def set_backbone_router(self, seqno=None, reg_delay=None, mlr_timeout=None):
cmd = 'bbr config'
if seqno is not None:
cmd += ' seqno %d' % seqno
if reg_delay is not None:
cmd += ' delay %d' % reg_delay
if mlr_timeout is not None:
cmd += ' timeout %d' % mlr_timeout
self.send_command(cmd)
self._expect('Done')
def set_domain_prefix(self, prefix, flags='prosD'):
self.add_prefix(prefix, flags)
self.register_netdata()
def remove_domain_prefix(self, prefix):
self.remove_prefix(prefix)
self.register_netdata()
def set_next_dua_response(self, status, iid=None):
cmd = 'bbr mgmt dua {}'.format(status)
if iid is not None:
cmd += ' ' + str(iid)
self.send_command(cmd)
self._expect('Done')
def set_dua_iid(self, iid):
cmd = 'dua iid {}'.format(iid)
self.send_command(cmd)
self._expect('Done')
def clear_dua_iid(self):
cmd = 'dua iid clear'
self.send_command(cmd)
self._expect('Done')
def set_link_quality(self, addr, lqi):
cmd = 'macfilter rss add-lqi %s %s' % (addr, lqi)
self.send_command(cmd)
self._expect('Done')
def remove_whitelist(self, addr):
cmd = 'macfilter addr remove %s' % addr
self.send_command(cmd)
self._expect('Done')
def get_addr16(self):
self.send_command('rloc16')
rloc16 = self._expect_result(r'[0-9a-fA-F]{4}')
return int(rloc16, 16)
def get_router_id(self):
rloc16 = self.get_addr16()
return rloc16 >> 10
def get_addr64(self):
self.send_command('extaddr')
return self._expect_result('[0-9a-fA-F]{16}')
def set_addr64(self, addr64):
self.send_command('extaddr %s' % addr64)
self._expect('Done')
def get_eui64(self):
self.send_command('eui64')
return self._expect_result('[0-9a-fA-F]{16}')
def set_extpanid(self, extpanid):
self.send_command('extpanid %s' % extpanid)
self._expect('Done')
def get_joiner_id(self):
self.send_command('joiner id')
return self._expect_result('[0-9a-fA-F]{16}')
def get_channel(self):
self.send_command('channel')
return int(self._expect_result(r'\d+'))
def set_channel(self, channel):
cmd = 'channel %d' % channel
self.send_command(cmd)
self._expect('Done')
def get_masterkey(self):
self.send_command('masterkey')
return self._expect_result('[0-9a-fA-F]{32}')
def set_masterkey(self, masterkey):
cmd = 'masterkey %s' % masterkey
self.send_command(cmd)
self._expect('Done')
def get_key_sequence_counter(self):
self.send_command('keysequence counter')
result = self._expect_result(r'\d+')
return int(result)
def set_key_sequence_counter(self, key_sequence_counter):
cmd = 'keysequence counter %d' % key_sequence_counter
self.send_command(cmd)
self._expect('Done')
def set_key_switch_guardtime(self, key_switch_guardtime):
cmd = 'keysequence guardtime %d' % key_switch_guardtime
self.send_command(cmd)
self._expect('Done')
def set_network_id_timeout(self, network_id_timeout):
cmd = 'networkidtimeout %d' % network_id_timeout
self.send_command(cmd)
self._expect('Done')
def _escape_escapable(self, string):
"""Escape CLI escapable characters in the given string.
Args:
string (str): UTF-8 input string.
Returns:
[str]: The modified string with escaped characters.
"""
escapable_chars = '\\ \t\r\n'
for char in escapable_chars:
string = string.replace(char, '\\%s' % char)
return string
def get_network_name(self):
self.send_command('networkname')
return self._expect_result([r'\S+'])
def set_network_name(self, network_name):
cmd = 'networkname %s' % self._escape_escapable(network_name)
self.send_command(cmd)
self._expect('Done')
def get_panid(self):
self.send_command('panid')
result = self._expect_result('0x[0-9a-fA-F]{4}')
return int(result, 16)
def set_panid(self, panid=config.PANID):
cmd = 'panid %d' % panid
self.send_command(cmd)
self._expect('Done')
def set_parent_priority(self, priority):
cmd = 'parentpriority %d' % priority
self.send_command(cmd)
self._expect('Done')
def get_partition_id(self):
self.send_command('leaderpartitionid')
return self._expect_result(r'\d+')
def set_partition_id(self, partition_id):
cmd = 'leaderpartitionid %d' % partition_id
self.send_command(cmd)
self._expect('Done')
def get_pollperiod(self):
self.send_command('pollperiod')
return self._expect_result(r'\d+')
def set_pollperiod(self, pollperiod):
self.send_command('pollperiod %d' % pollperiod)
self._expect('Done')
def set_router_upgrade_threshold(self, threshold):
cmd = 'routerupgradethreshold %d' % threshold
self.send_command(cmd)
self._expect('Done')
def set_router_downgrade_threshold(self, threshold):
cmd = 'routerdowngradethreshold %d' % threshold
self.send_command(cmd)
self._expect('Done')
def prefer_router_id(self, router_id):
cmd = 'preferrouterid %d' % router_id
self.send_command(cmd)
self._expect('Done')
def release_router_id(self, router_id):
cmd = 'releaserouterid %d' % router_id
self.send_command(cmd)
self._expect('Done')
def get_state(self):
states = [r'detached', r'child', r'router', r'leader']
self.send_command('state')
return self._expect_result(states)
def set_state(self, state):
cmd = 'state %s' % state
self.send_command(cmd)
self._expect('Done')
def get_timeout(self):
self.send_command('childtimeout')
return self._expect_result(r'\d+')
def set_timeout(self, timeout):
cmd = 'childtimeout %d' % timeout
self.send_command(cmd)
self._expect('Done')
def set_max_children(self, number):
cmd = 'childmax %d' % number
self.send_command(cmd)
self._expect('Done')
def get_weight(self):
self.send_command('leaderweight')
return self._expect_result(r'\d+')
def set_weight(self, weight):
cmd = 'leaderweight %d' % weight
self.send_command(cmd)
self._expect('Done')
def add_ipaddr(self, ipaddr):
cmd = 'ipaddr add %s' % ipaddr
self.send_command(cmd)
self._expect('Done')
def add_ipmaddr(self, ipmaddr):
cmd = 'ipmaddr add %s' % ipmaddr
self.send_command(cmd)
self._expect('Done')
def del_ipmaddr(self, ipmaddr):
cmd = 'ipmaddr del %s' % ipmaddr
self.send_command(cmd)
self._expect('Done')
def get_addrs(self):
self.send_command('ipaddr')
return self._expect_results(r'\S+(:\S*)+')
def get_mleid(self):
self.send_command('ipaddr mleid')
return self._expect_result(r'\S+(:\S*)+')
def get_linklocal(self):
self.send_command('ipaddr linklocal')
return self._expect_result(r'\S+(:\S*)+')
def get_rloc(self):
self.send_command('ipaddr rloc')
return self._expect_result(r'\S+(:\S*)+')
def get_addr(self, prefix):
network = ipaddress.ip_network(u'%s' % str(prefix))
addrs = self.get_addrs()
for addr in addrs:
if isinstance(addr, bytearray):
addr = bytes(addr)
ipv6_address = ipaddress.ip_address(addr)
if ipv6_address in network:
return ipv6_address.exploded
return None
def has_ipaddr(self, address):
ipaddr = ipaddress.ip_address(address)
ipaddrs = self.get_addrs()
for addr in ipaddrs:
if isinstance(addr, bytearray):
addr = bytes(addr)
if ipaddress.ip_address(addr) == ipaddr:
return True
return False
def get_ipmaddrs(self):
self.send_command('ipmaddr')
return self._expect_results(r'\S+(:\S*)+')
def has_ipmaddr(self, address):
ipmaddr = ipaddress.ip_address(address)
ipmaddrs = self.get_ipmaddrs()
for addr in ipmaddrs:
if isinstance(addr, bytearray):
addr = bytes(addr)
if ipaddress.ip_address(addr) == ipmaddr:
return True
return False
def get_addr_leader_aloc(self):
addrs = self.get_addrs()
for addr in addrs:
segs = addr.split(':')
if (segs[4] == '0' and segs[5] == 'ff' and segs[6] == 'fe00' and segs[7] == 'fc00'):
return addr
return None
def get_eidcaches(self):
eidcaches = []
self.send_command('eidcache')
pattern = self._prepare_pattern(r'([a-fA-F0-9\:]+) ([a-fA-F0-9]+)')
while self._expect(pattern):
eid = self.pexpect.match.groups()[0].decode("utf-8")
rloc = self.pexpect.match.groups()[1].decode("utf-8")
eidcaches.append((eid, rloc))
return eidcaches
def add_service(self, enterpriseNumber, serviceData, serverData):
cmd = 'service add %s %s %s' % (
enterpriseNumber,
serviceData,
serverData,
)
self.send_command(cmd)
self._expect('Done')
def remove_service(self, enterpriseNumber, serviceData):
cmd = 'service remove %s %s' % (enterpriseNumber, serviceData)
self.send_command(cmd)
self._expect('Done')
def __getLinkLocalAddress(self):
for ip6Addr in self.get_addrs():
if re.match(config.LINK_LOCAL_REGEX_PATTERN, ip6Addr, re.I):
return ip6Addr
return None
def __getGlobalAddress(self):
global_address = []
for ip6Addr in self.get_addrs():
if ((not re.match(config.LINK_LOCAL_REGEX_PATTERN, ip6Addr, re.I)) and
(not re.match(config.MESH_LOCAL_PREFIX_REGEX_PATTERN, ip6Addr, re.I)) and
(not re.match(config.ROUTING_LOCATOR_REGEX_PATTERN, ip6Addr, re.I))):
global_address.append(ip6Addr)
return global_address
def __getRloc(self):
for ip6Addr in self.get_addrs():
if (re.match(config.MESH_LOCAL_PREFIX_REGEX_PATTERN, ip6Addr, re.I) and
re.match(config.ROUTING_LOCATOR_REGEX_PATTERN, ip6Addr, re.I) and
not (re.match(config.ALOC_FLAG_REGEX_PATTERN, ip6Addr, re.I))):
return ip6Addr
return None
def __getAloc(self):
aloc = []
for ip6Addr in self.get_addrs():
if (re.match(config.MESH_LOCAL_PREFIX_REGEX_PATTERN, ip6Addr, re.I) and
re.match(config.ROUTING_LOCATOR_REGEX_PATTERN, ip6Addr, re.I) and
re.match(config.ALOC_FLAG_REGEX_PATTERN, ip6Addr, re.I)):
aloc.append(ip6Addr)
return aloc
def __getMleid(self):
for ip6Addr in self.get_addrs():
if re.match(config.MESH_LOCAL_PREFIX_REGEX_PATTERN, ip6Addr,
re.I) and not (re.match(config.ROUTING_LOCATOR_REGEX_PATTERN, ip6Addr, re.I)):
return ip6Addr
return None
def get_ip6_address(self, address_type):
"""Get specific type of IPv6 address configured on thread device.
Args:
address_type: the config.ADDRESS_TYPE type of IPv6 address.
Returns:
IPv6 address string.
"""
if address_type == config.ADDRESS_TYPE.LINK_LOCAL:
return self.__getLinkLocalAddress()
elif address_type == config.ADDRESS_TYPE.GLOBAL:
return self.__getGlobalAddress()
elif address_type == config.ADDRESS_TYPE.RLOC:
return self.__getRloc()
elif address_type == config.ADDRESS_TYPE.ALOC:
return self.__getAloc()
elif address_type == config.ADDRESS_TYPE.ML_EID:
return self.__getMleid()
else:
return None
return None
def get_context_reuse_delay(self):
self.send_command('contextreusedelay')
return self._expect_result(r'\d+')
def set_context_reuse_delay(self, delay):
cmd = 'contextreusedelay %d' % delay
self.send_command(cmd)
self._expect('Done')
def add_prefix(self, prefix, flags='paosr', prf='med'):
cmd = 'prefix add %s %s %s' % (prefix, flags, prf)
self.send_command(cmd)
self._expect('Done')
def remove_prefix(self, prefix):
cmd = 'prefix remove %s' % prefix
self.send_command(cmd)
self._expect('Done')
def add_route(self, prefix, prf='med'):
cmd = 'route add %s %s' % (prefix, prf)
self.send_command(cmd)
self._expect('Done')
def remove_route(self, prefix):
cmd = 'route remove %s' % prefix
self.send_command(cmd)
self._expect('Done')
def register_netdata(self):
self.send_command('netdataregister')
self._expect('Done')
def send_network_diag_get(self, addr, tlv_types):
self.send_command('networkdiagnostic get %s %s' % (addr, ' '.join([str(t.value) for t in tlv_types])))
if isinstance(self.simulator, simulator.VirtualTime):
self.simulator.go(8)
timeout = 1
else:
timeout = 8
self._expect('Done', timeout=timeout)
def send_network_diag_reset(self, addr, tlv_types):
self.send_command('networkdiagnostic reset %s %s' % (addr, ' '.join([str(t.value) for t in tlv_types])))
if isinstance(self.simulator, simulator.VirtualTime):
self.simulator.go(8)
timeout = 1
else:
timeout = 8
self._expect('Done', timeout=timeout)
def energy_scan(self, mask, count, period, scan_duration, ipaddr):
cmd = 'commissioner energy %d %d %d %d %s' % (
mask,
count,
period,
scan_duration,
ipaddr,
)
self.send_command(cmd)
if isinstance(self.simulator, simulator.VirtualTime):
self.simulator.go(8)
timeout = 1
else:
timeout = 8
self._expect('Energy:', timeout=timeout)
def panid_query(self, panid, mask, ipaddr):
cmd = 'commissioner panid %d %d %s' % (panid, mask, ipaddr)
self.send_command(cmd)
if isinstance(self.simulator, simulator.VirtualTime):
self.simulator.go(8)
timeout = 1
else:
timeout = 8
self._expect('Conflict:', timeout=timeout)
def scan(self):
self.send_command('scan')
return self._expect_results(r'\|\s(\S+)\s+\|\s(\S+)\s+\|\s([0-9a-fA-F]{4})\s\|\s([0-9a-fA-F]{16})\s\|\s(\d+)')
def ping(self, ipaddr, num_responses=1, size=None, timeout=5):
cmd = 'ping %s' % ipaddr
if size is not None:
cmd += ' %d' % size
self.send_command(cmd)
end = self.simulator.now() + timeout
responders = {}
result = True
# ncp-sim doesn't print Done
done = (self.node_type == 'ncp-sim')
while len(responders) < num_responses or not done:
self.simulator.go(1)
try:
i = self._expect([r'from (\S+):', r'Done'], timeout=0.1)
except (pexpect.TIMEOUT, socket.timeout):
if self.simulator.now() < end:
continue
result = False
if isinstance(self.simulator, simulator.VirtualTime):
self.simulator.sync_devices()
break
else:
if i == 0:
responders[self.pexpect.match.groups()[0]] = 1
elif i == 1:
done = True
return result
def reset(self):
self.send_command('reset')
time.sleep(0.1)
def set_router_selection_jitter(self, jitter):
cmd = 'routerselectionjitter %d' % jitter
self.send_command(cmd)
self._expect('Done')
def set_active_dataset(
self,
timestamp,
panid=None,
channel=None,
channel_mask=None,
master_key=None,
):
self.send_command('dataset clear')
self._expect('Done')
cmd = 'dataset activetimestamp %d' % timestamp
self.send_command(cmd)
self._expect('Done')
if panid is not None:
cmd = 'dataset panid %d' % panid
self.send_command(cmd)
self._expect('Done')
if channel is not None:
cmd = 'dataset channel %d' % channel
self.send_command(cmd)
self._expect('Done')
if channel_mask is not None:
cmd = 'dataset channelmask %d' % channel_mask
self.send_command(cmd)
self._expect('Done')
if master_key is not None:
cmd = 'dataset masterkey %s' % master_key
self.send_command(cmd)
self._expect('Done')
# Set the meshlocal prefix in config.py
self.send_command('dataset meshlocalprefix %s' % config.MESH_LOCAL_PREFIX.split('/')[0])
self._expect('Done')
self.send_command('dataset commit active')
self._expect('Done')
def set_pending_dataset(self, pendingtimestamp, activetimestamp, panid=None, channel=None):
self.send_command('dataset clear')
self._expect('Done')
cmd = 'dataset pendingtimestamp %d' % pendingtimestamp
self.send_command(cmd)
self._expect('Done')
cmd = 'dataset activetimestamp %d' % activetimestamp
self.send_command(cmd)
self._expect('Done')
if panid is not None:
cmd = 'dataset panid %d' % panid
self.send_command(cmd)
self._expect('Done')
if channel is not None:
cmd = 'dataset channel %d' % channel
self.send_command(cmd)
self._expect('Done')
# Set the meshlocal prefix in config.py
self.send_command('dataset meshlocalprefix %s' % config.MESH_LOCAL_PREFIX.split('/')[0])
self._expect('Done')
self.send_command('dataset commit pending')
self._expect('Done')
def announce_begin(self, mask, count, period, ipaddr):
cmd = 'commissioner announce %d %d %d %s' % (
mask,
count,
period,
ipaddr,
)
self.send_command(cmd)
self._expect('Done')
def send_mgmt_active_set(
self,
active_timestamp=None,
channel=None,
channel_mask=None,
extended_panid=None,
panid=None,
master_key=None,
mesh_local=None,
network_name=None,
binary=None,
):
cmd = 'dataset mgmtsetcommand active '
if active_timestamp is not None:
cmd += 'activetimestamp %d ' % active_timestamp
if channel is not None:
cmd += 'channel %d ' % channel
if channel_mask is not None:
cmd += 'channelmask %d ' % channel_mask
if extended_panid is not None:
cmd += 'extpanid %s ' % extended_panid
if panid is not None:
cmd += 'panid %d ' % panid
if master_key is not None:
cmd += 'masterkey %s ' % master_key
if mesh_local is not None:
cmd += 'localprefix %s ' % mesh_local
if network_name is not None:
cmd += 'networkname %s ' % self._escape_escapable(network_name)
if binary is not None:
cmd += 'binary %s ' % binary
self.send_command(cmd)
self._expect('Done')
def send_mgmt_pending_set(
self,
pending_timestamp=None,
active_timestamp=None,
delay_timer=None,
channel=None,
panid=None,
master_key=None,
mesh_local=None,
network_name=None,
):
cmd = 'dataset mgmtsetcommand pending '
if pending_timestamp is not None:
cmd += 'pendingtimestamp %d ' % pending_timestamp
if active_timestamp is not None:
cmd += 'activetimestamp %d ' % active_timestamp
if delay_timer is not None:
cmd += 'delaytimer %d ' % delay_timer
if channel is not None:
cmd += 'channel %d ' % channel
if panid is not None:
cmd += 'panid %d ' % panid
if master_key is not None:
cmd += 'masterkey %s ' % master_key
if mesh_local is not None:
cmd += 'localprefix %s ' % mesh_local
if network_name is not None:
cmd += 'networkname %s ' % self._escape_escapable(network_name)
self.send_command(cmd)
self._expect('Done')
def coap_cancel(self):
"""
Cancel a CoAP subscription.
"""
cmd = 'coap cancel'
self.send_command(cmd)
self._expect('Done')
def coap_delete(self, ipaddr, uri, con=False, payload=None):
"""
Send a DELETE request via CoAP.
"""
return self._coap_rq('delete', ipaddr, uri, con, payload)
def coap_get(self, ipaddr, uri, con=False, payload=None):
"""
Send a GET request via CoAP.
"""
return self._coap_rq('get', ipaddr, uri, con, payload)
def coap_observe(self, ipaddr, uri, con=False, payload=None):
"""
Send a GET request via CoAP with Observe set.
"""
return self._coap_rq('observe', ipaddr, uri, con, payload)
def coap_post(self, ipaddr, uri, con=False, payload=None):
"""
Send a POST request via CoAP.
"""
return self._coap_rq('post', ipaddr, uri, con, payload)
def coap_put(self, ipaddr, uri, con=False, payload=None):
"""
Send a PUT request via CoAP.
"""
return self._coap_rq('put', ipaddr, uri, con, payload)
def _coap_rq(self, method, ipaddr, uri, con=False, payload=None):
"""
Issue a GET/POST/PUT/DELETE/GET OBSERVE request.
"""
cmd = 'coap %s %s %s' % (method, ipaddr, uri)
if con:
cmd += ' con'
else:
cmd += ' non'
if payload is not None:
cmd += ' %s' % payload
self.send_command(cmd)
return self.coap_wait_response()
def coap_wait_response(self):
"""
Wait for a CoAP response, and return it.
"""
if isinstance(self.simulator, simulator.VirtualTime):
self.simulator.go(5)
timeout = 1
else:
timeout = 5
self._expect(r'coap response from ([\da-f:]+)(?: OBS=(\d+))?'
r'(?: with payload: ([\da-f]+))?\b',
timeout=timeout)
(source, observe, payload) = self.pexpect.match.groups()
source = source.decode('UTF-8')
if observe is not None:
observe = int(observe, base=10)
if payload is not None:
payload = binascii.a2b_hex(payload).decode('UTF-8')
# Return the values received
return dict(source=source, observe=observe, payload=payload)
def coap_wait_request(self):
"""
Wait for a CoAP request to be made.
"""
if isinstance(self.simulator, simulator.VirtualTime):
self.simulator.go(5)
timeout = 1
else:
timeout = 5
self._expect(r'coap request from ([\da-f:]+)(?: OBS=(\d+))?'
r'(?: with payload: ([\da-f]+))?\b',
timeout=timeout)
(source, observe, payload) = self.pexpect.match.groups()
source = source.decode('UTF-8')
if observe is not None:
observe = int(observe, base=10)
if payload is not None:
payload = binascii.a2b_hex(payload).decode('UTF-8')
# Return the values received
return dict(source=source, observe=observe, payload=payload)
def coap_wait_subscribe(self):
"""
Wait for a CoAP client to be subscribed.
"""
if isinstance(self.simulator, simulator.VirtualTime):
self.simulator.go(5)
timeout = 1
else:
timeout = 5
self._expect(r'Subscribing client\b', timeout=timeout)
def coap_wait_ack(self):
"""
Wait for a CoAP notification ACK.
"""
if isinstance(self.simulator, simulator.VirtualTime):
self.simulator.go(5)
timeout = 1
else:
timeout = 5
self._expect(r'Received ACK in reply to notification ' r'from ([\da-f:]+)\b', timeout=timeout)
(source,) = self.pexpect.match.groups()
source = source.decode('UTF-8')
return source
def coap_set_resource_path(self, path):
"""
Set the path for the CoAP resource.
"""
cmd = 'coap resource %s' % path
self.send_command(cmd)
self._expect('Done')
def coap_set_content(self, content):
"""
Set the content of the CoAP resource.
"""
cmd = 'coap set %s' % content
self.send_command(cmd)
self._expect('Done')
def coap_start(self):
"""
Start the CoAP service.
"""
cmd = 'coap start'
self.send_command(cmd)
self._expect('Done')
def coap_stop(self):
"""
Stop the CoAP service.
"""
cmd = 'coap stop'
self.send_command(cmd)
if isinstance(self.simulator, simulator.VirtualTime):
self.simulator.go(5)
timeout = 1
else:
timeout = 5
self._expect('Done', timeout=timeout)
def coaps_start_psk(self, psk, pskIdentity):
cmd = 'coaps psk %s %s' % (psk, pskIdentity)
self.send_command(cmd)
self._expect('Done')
cmd = 'coaps start'
self.send_command(cmd)
self._expect('Done')
def coaps_start_x509(self):
cmd = 'coaps x509'
self.send_command(cmd)
self._expect('Done')
cmd = 'coaps start'
self.send_command(cmd)
self._expect('Done')
def coaps_set_resource_path(self, path):
cmd = 'coaps resource %s' % path
self.send_command(cmd)
self._expect('Done')
def coaps_stop(self):
cmd = 'coaps stop'
self.send_command(cmd)
if isinstance(self.simulator, simulator.VirtualTime):
self.simulator.go(5)
timeout = 1
else:
timeout = 5
self._expect('Done', timeout=timeout)
def coaps_connect(self, ipaddr):
cmd = 'coaps connect %s' % ipaddr
self.send_command(cmd)
if isinstance(self.simulator, simulator.VirtualTime):
self.simulator.go(5)
timeout = 1
else:
timeout = 5
self._expect('coaps connected', timeout=timeout)
def coaps_disconnect(self):
cmd = 'coaps disconnect'
self.send_command(cmd)
self._expect('Done')
self.simulator.go(5)
def coaps_get(self):
cmd = 'coaps get test'
self.send_command(cmd)
if isinstance(self.simulator, simulator.VirtualTime):
self.simulator.go(5)
timeout = 1
else:
timeout = 5
self._expect('coaps response', timeout=timeout)
def commissioner_mgmtset(self, tlvs_binary):
cmd = 'commissioner mgmtset binary %s' % tlvs_binary
self.send_command(cmd)
self._expect('Done')
def bytes_to_hex_str(self, src):
return ''.join(format(x, '02x') for x in src)
def commissioner_mgmtset_with_tlvs(self, tlvs):
payload = bytearray()
for tlv in tlvs:
payload += tlv.to_hex()
self.commissioner_mgmtset(self.bytes_to_hex_str(payload))
def udp_start(self, local_ipaddr, local_port):
cmd = 'udp open'
self.send_command(cmd)
self._expect('Done')
cmd = 'udp bind %s %s' % (local_ipaddr, local_port)
self.send_command(cmd)
self._expect('Done')
def udp_stop(self):
cmd = 'udp close'
self.send_command(cmd)
self._expect('Done')
def udp_send(self, bytes, ipaddr, port, success=True):
cmd = 'udp send %s %d -s %d ' % (ipaddr, port, bytes)
self.send_command(cmd)
if success:
self._expect('Done')
else:
self._expect('Error')
def udp_check_rx(self, bytes_should_rx):
self._expect('%d bytes' % bytes_should_rx)
def set_routereligible(self, enable: bool):
cmd = f'routereligible {"enable" if enable else "disable"}'
self.send_command(cmd)
self._expect('Done')
def router_list(self):
cmd = 'router list'
self.send_command(cmd)
self._expect([r'(\d+)((\s\d+)*)'])
g = self.pexpect.match.groups()
router_list = g[0] + ' ' + g[1]
router_list = [int(x) for x in router_list.split()]
self._expect('Done')
return router_list
def router_table(self):
cmd = 'router table'
self.send_command(cmd)
self._expect(r'(.*)Done')
g = self.pexpect.match.groups()
output = g[0]
lines = output.strip().split('\n')
lines = [l.strip() for l in lines]
router_table = {}
for i, line in enumerate(lines):
if not line.startswith('|') or not line.endswith('|'):
if i not in (0, 2):
# should not happen
print("unexpected line %d: %s" % (i, line))
continue
line = line[1:][:-1]
line = [x.strip() for x in line.split('|')]
if len(line) != 8:
print("unexpected line %d: %s" % (i, line))
continue
try:
int(line[0])
except ValueError:
if i != 1:
print("unexpected line %d: %s" % (i, line))
continue
id = int(line[0])
rloc16 = int(line[1], 16)
nexthop = int(line[2])
pathcost = int(line[3])
lqin = int(line[4])
lqout = int(line[5])
age = int(line[6])
emac = str(line[7])
router_table[id] = {
'rloc16': rloc16,
'nexthop': nexthop,
'pathcost': pathcost,
'lqin': lqin,
'lqout': lqout,
'age': age,
'emac': emac,
}
return router_table
if __name__ == '__main__':
unittest.main()
| []
| []
| [
"THREAD_VERSION",
"USE_MTD",
"OT_NCP_PATH_1_2_BBR",
"OT_NCP_PATH_1_1",
"top_builddir",
"RADIO_DEVICE",
"OT_NCP_PATH",
"OT_CLI_PATH_1_1",
"OT_CLI_PATH",
"top_builddir_1_1",
"top_builddir_1_2_bbr",
"VERBOSE",
"RADIO_DEVICE_1_1",
"DEBUG",
"OT_CLI_PATH_1_2_BBR",
"NODE_TYPE"
]
| [] | ["THREAD_VERSION", "USE_MTD", "OT_NCP_PATH_1_2_BBR", "OT_NCP_PATH_1_1", "top_builddir", "RADIO_DEVICE", "OT_NCP_PATH", "OT_CLI_PATH_1_1", "OT_CLI_PATH", "top_builddir_1_1", "top_builddir_1_2_bbr", "VERBOSE", "RADIO_DEVICE_1_1", "DEBUG", "OT_CLI_PATH_1_2_BBR", "NODE_TYPE"] | python | 16 | 0 | |
workers/btcbroadcastingmanager.go | package workers
import (
"encoding/json"
"fmt"
"os"
"strconv"
"sync"
"time"
"github.com/btcsuite/btcd/rpcclient"
"github.com/incognitochain/portal-workers/utils"
"github.com/incognitochain/portal-workers/utxomanager"
"github.com/syndtr/goleveldb/leveldb"
)
const (
MaxUnshieldFee = 1000000
InitIncBlockBatchSize = 1000
FirstBroadcastTxBlockHeight = 1
TimeoutBTCFeeReplacement = 200
TimeIntervalBTCFeeReplacement = 50
BroadcastingManagerDBFileDir = "db/broadcastingmanager"
BroadcastingManagerDBObjectName = "BTCBroadcast-LastUpdate"
)
type BTCBroadcastingManager struct {
WorkerAbs
btcClient *rpcclient.Client
bitcoinFee uint
db *leveldb.DB
}
type BroadcastTx struct {
TxContent string // only has value when be broadcasted
TxHash string // only has value when be broadcasted
VSize int
RBFReqTxID string
FeePerRequest uint
NumOfRequests uint
IsBroadcasted bool
BlkHeight uint64 // height of the current Incog chain height when broadcasting tx
}
type BroadcastTxArrayObject struct {
TxArray map[string]map[string]*BroadcastTx // key: batchID | RBFRexTxID
NextBlkHeight uint64 // height of the next block need to scan in Inc chain
}
func (b *BTCBroadcastingManager) Init(
id int, name string, freq int, network string, utxoManager *utxomanager.UTXOManager,
) error {
b.WorkerAbs.Init(id, name, freq, network, utxoManager)
var err error
// init bitcoin rpcclient
b.btcClient, err = utils.BuildBTCClient()
if err != nil {
b.ExportErrorLog(fmt.Sprintf("Could not initialize Bitcoin RPCClient - with err: %v", err))
return err
}
return nil
}
func (b *BTCBroadcastingManager) ExportErrorLog(msg string) {
b.WorkerAbs.ExportErrorLog(msg)
}
func (b *BTCBroadcastingManager) ExportInfoLog(msg string) {
b.WorkerAbs.ExportInfoLog(msg)
}
// This function will execute a worker that has 3 main tasks:
// - Broadcast a unshielding transaction to Bitcoin network
// - Check for a Bitcoin transaction is stuck or not and request RBF transaction
// - Check a broadcasted Bitcoin transaction confirmation and notify the Incognito chain
func (b *BTCBroadcastingManager) Execute() {
b.ExportErrorLog("BTCBroadcastingManager worker is executing...")
// init leveldb instance
var err error
b.db, err = leveldb.OpenFile(BroadcastingManagerDBFileDir, nil)
if err != nil {
b.ExportErrorLog(fmt.Sprintf("Could not open leveldb storage file - with err: %v", err))
return
}
defer b.db.Close()
nextBlkHeight := uint64(FirstBroadcastTxBlockHeight)
broadcastTxArray := map[string]map[string]*BroadcastTx{}
// restore from db
lastUpdateBytes, err := b.db.Get([]byte(BroadcastingManagerDBObjectName), nil)
if err == nil {
var broadcastTxsDBObject *BroadcastTxArrayObject
json.Unmarshal(lastUpdateBytes, &broadcastTxsDBObject)
nextBlkHeight = broadcastTxsDBObject.NextBlkHeight
broadcastTxArray = broadcastTxsDBObject.TxArray
}
shardID, _ := strconv.Atoi(os.Getenv("SHARD_ID"))
go getCurrentRelayingFee()
for {
isBTCNodeAlive := getBTCFullnodeStatus(b.btcClient)
if !isBTCNodeAlive {
b.ExportErrorLog("Could not connect to BTC full node")
return
}
feeRWLock.RLock()
if feePerVByte < 0 {
b.ExportErrorLog("Could not get fee from external API")
time.Sleep(3 * time.Minute)
feeRWLock.RUnlock()
return
}
b.bitcoinFee = uint(feePerVByte)
feeRWLock.RUnlock()
if err != nil {
b.ExportErrorLog(fmt.Sprintf("Could not get bitcoin fee - with err: %v", err))
return
}
// wait until next blocks available
var curIncBlkHeight uint64
for {
curIncBlkHeight, err = getFinalizedShardHeight(b.UTXOManager.IncClient, b.Logger, -1)
if err != nil {
b.ExportErrorLog(fmt.Sprintf("Could not get latest beacon height - with err: %v", err))
return
}
if nextBlkHeight <= curIncBlkHeight {
break
}
time.Sleep(40 * time.Second)
}
var IncBlockBatchSize uint64
if nextBlkHeight+InitIncBlockBatchSize-1 <= curIncBlkHeight {
IncBlockBatchSize = InitIncBlockBatchSize
} else {
IncBlockBatchSize = curIncBlkHeight - nextBlkHeight + 1
}
fmt.Printf("Next Scan Block Height: %v, Batch Size: %v, Current Finalized Block Height: %v\n", nextBlkHeight, IncBlockBatchSize, curIncBlkHeight)
batchIDs, err := getBatchIDsFromBeaconHeight(nextBlkHeight+IncBlockBatchSize-1, b.RPCClient, b.Logger, uint64(FirstBroadcastTxBlockHeight))
if err != nil {
b.ExportErrorLog(fmt.Sprintf("Could not retrieve batches from beacon block %v - with err: %v", nextBlkHeight+IncBlockBatchSize-1, err))
return
}
newBroadcastTxArray, err := b.getBroadcastTx(broadcastTxArray, batchIDs, curIncBlkHeight)
if err != nil {
b.ExportErrorLog(fmt.Sprintf("Could not retrieve broadcast txs - with err: %v", err))
return
}
for batchID, batchInfo := range newBroadcastTxArray {
for _, tx := range batchInfo {
if tx.IsBroadcasted {
fmt.Printf("Broadcast tx for batch %v, content %v \n", batchID, tx.TxContent)
err := b.broadcastTx(tx.TxContent)
if err != nil {
b.ExportErrorLog(fmt.Sprintf("Could not broadcast tx %v - with err: %v", tx.TxHash, err))
continue
}
} else {
fmt.Printf("Does not broadcast tx for batch %v has fee %v is not enough\n", batchID, tx.FeePerRequest)
}
}
}
broadcastTxArray = joinTxArray(broadcastTxArray, newBroadcastTxArray)
// check confirmed -> send rpc to notify the Inc chain
relayingBTCHeight, err := getLatestBTCHeightFromIncog(b.RPCBTCRelayingReaders)
if err != nil {
b.ExportErrorLog(fmt.Sprintf("Could not retrieve Inc relaying BTC block height - with err: %v", err))
return
}
var wg sync.WaitGroup
// submit confirmation requests by checking BTC tx
for batchID, txArray := range broadcastTxArray {
for _, tx := range txArray {
curBatchID := batchID
curTx := tx
isConfirmed, btcBlockHeight := b.isConfirmedBTCTx(curTx.TxHash)
if isConfirmed && btcBlockHeight+BTCConfirmationThreshold-1 <= relayingBTCHeight {
fmt.Printf("BTC Tx %v is confirmed\n", curTx.TxHash)
// submit confirmed tx
wg.Add(1)
go func() {
defer wg.Done()
// generate BTC proof
btcProof, err := utils.BuildProof(b.btcClient, curTx.TxHash, btcBlockHeight)
if err != nil {
b.ExportErrorLog(fmt.Sprintf("Could not generate BTC proof for batch %v - with err: %v", curBatchID, err))
return
}
txID, err := b.submitConfirmedTx(btcProof, curBatchID)
if err != nil {
b.ExportErrorLog(fmt.Sprintf("Could not submit confirmed tx for batch %v - with err: %v", curBatchID, err))
return
}
status, err := b.getSubmitConfirmedTxStatus(txID)
if err != nil {
b.ExportErrorLog(fmt.Sprintf("Could not get submit confirmed tx status for batch %v, txID %v - with err: %v", curBatchID, txID, err))
} else {
ok := isFinalizedTx(b.UTXOManager.IncClient, b.Logger, shardID, txID)
if !ok {
return
}
if status == 0 { // rejected
b.ExportErrorLog(fmt.Sprintf("Send confirmation failed for batch %v, txID %v", curBatchID, txID))
} else { // succeed
b.ExportInfoLog(fmt.Sprintf("Send confirmation succeed for batch %v, txID %v", curBatchID, txID))
}
}
}()
}
}
}
wg.Wait()
confirmedBatchIDChan := make(chan string, len(broadcastTxArray))
// check whether unshielding batches are completed by batch ID
for batchID := range broadcastTxArray {
curBatchID := batchID
wg.Add(1)
go func() {
defer wg.Done()
status, err := b.getUnshieldingBatchStatus(curBatchID)
if err != nil {
b.ExportErrorLog(fmt.Sprintf("Could not get batch %v status - with err: %v", curBatchID, err))
} else if status.Status == 1 { // completed
b.ExportInfoLog(fmt.Sprintf("Batch %v is completed", curBatchID))
confirmedBatchIDChan <- curBatchID
}
}()
}
wg.Wait()
close(confirmedBatchIDChan)
for batchID := range confirmedBatchIDChan {
delete(broadcastTxArray, batchID)
}
// check if waiting too long -> send rpc to notify the Inc chain for fee replacement
for batchID, txArray := range broadcastTxArray {
tx := getLastestBroadcastTx(txArray)
curBatchID := batchID
curTx := tx
if b.isTimeoutBTCTx(curTx, curIncBlkHeight) { // waiting too long
wg.Add(1)
go func() {
defer wg.Done()
newFee := utils.GetNewFee(curTx.VSize, curTx.FeePerRequest, curTx.NumOfRequests, b.bitcoinFee)
if newFee > MaxUnshieldFee {
return
}
fmt.Printf("Old fee %v, request new fee %v for batchID %v\n", curTx.FeePerRequest, newFee, curBatchID)
// notify the Inc chain for fee replacement
txID, err := b.requestFeeReplacement(curBatchID, newFee)
if err != nil {
b.ExportErrorLog(fmt.Sprintf("Could not request RBF for batch %v - with err: %v", curBatchID, err))
return
}
status, err := b.getRequestFeeReplacementTxStatus(txID)
if err != nil {
b.ExportErrorLog(fmt.Sprintf("Could not request RBF tx status for batch %v, txID %v - with err: %v", curBatchID, txID, err))
} else {
ok := isFinalizedTx(b.UTXOManager.IncClient, b.Logger, shardID, txID)
if !ok {
return
}
if status == 0 { // rejected
b.ExportErrorLog(fmt.Sprintf("Send RBF request failed for batch %v, txID %v", curBatchID, txID))
} else {
b.ExportInfoLog(fmt.Sprintf("Send RBF request succeed for batch %v, txID %v", curBatchID, txID))
}
}
}()
}
}
wg.Wait()
nextBlkHeight += IncBlockBatchSize
// update to db
BroadcastTxArrayObjectBytes, _ := json.Marshal(&BroadcastTxArrayObject{
TxArray: broadcastTxArray,
NextBlkHeight: nextBlkHeight,
})
err = b.db.Put([]byte(BroadcastingManagerDBObjectName), BroadcastTxArrayObjectBytes, nil)
if err != nil {
b.ExportErrorLog(fmt.Sprintf("Could not save object to db - with err: %v", err))
return
}
sleepingTime := 10
fmt.Printf("Sleeping: %v seconds\n", sleepingTime)
time.Sleep(time.Duration(sleepingTime) * time.Second)
}
}
| [
"\"SHARD_ID\""
]
| []
| [
"SHARD_ID"
]
| [] | ["SHARD_ID"] | go | 1 | 0 | |
vnfs/DAaaS/microservices/prom-kafka-writer/cmd/prom-kafka-writer/main.go | /*
*
* Copyright 2019 Intel Corporation.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package main
import (
"context"
"net/http"
"os"
"os/signal"
"time"
"prom-kafka-writer/pkg/api"
logger "prom-kafka-writer/pkg/config"
kw "prom-kafka-writer/pkg/kafkawriter"
)
const defaultAddr = ":8686"
// main starts an http server on the $PORT environment variable.
func main() {
log := logger.GetLoggerInstance()
addr := defaultAddr
// $PORT environment variable is provided in the Kubernetes deployment.
if p := os.Getenv("PORT"); p != "" {
addr = ":" + p
}
log.Infow("Starting Prometheus Kafka writer", "addr", addr)
defer log.Infow("Prometheus Kafka writer Terminated")
s := &http.Server{
Handler: api.NewRouter(),
Addr: addr,
}
// shutdown hook. Wait for clean up if the pod/container is killed
shutdownChannel := make(chan struct{})
go func() {
log.Debug("msg", "Creating shutdown hooks")
sigChan := make(chan os.Signal, 1)
signal.Notify(sigChan, os.Interrupt)
<-sigChan
log.Debug("msg", "Received os.Interrupt")
log.Debug("msg", "Initiate cleanup")
//TODO: Cleanup here
kw.Cleanup()
time.Sleep(time.Second * 3)
_ = s.Shutdown(context.Background())
close(shutdownChannel)
}()
err := s.ListenAndServe()
if err != nil {
log.Fatalw("Server Error - Shutting down", "error", err)
}
<-shutdownChannel
}
| [
"\"PORT\""
]
| []
| [
"PORT"
]
| [] | ["PORT"] | go | 1 | 0 | |
pkg/build/builder/cmd/dockercfg/cfg.go | package dockercfg
import (
"encoding/base64"
"encoding/json"
"io/ioutil"
"os"
"os/user"
"path/filepath"
"strings"
"github.com/fsouza/go-dockerclient"
"github.com/spf13/pflag"
)
//TODO: Remove this code once the methods in Kubernetes kubelet/dockertools/config.go are public
// Default docker registry server
const defaultRegistryServer = "https://index.docker.io/v1/"
// Helper contains all the valid config options for reading the local dockercfg file
type Helper struct {
}
// NewHelper creates a Flags object with the default values set.
func NewHelper() *Helper {
return &Helper{}
}
// InstallFlags installs the Docker flag helper into a FlagSet with the default
// options and default values from the Helper object.
func (_ *Helper) InstallFlags(flags *pflag.FlagSet) {
}
// GetDockerAuth returns a valid Docker AuthConfiguration entry, and whether it was read
// from the local dockercfg file
func (_ *Helper) GetDockerAuth(registry string) (docker.AuthConfiguration, bool) {
var authCfg docker.AuthConfiguration
dockercfgPath := getDockercfgFile("")
if _, err := os.Stat(dockercfgPath); err != nil {
return authCfg, false
}
cfg, err := readDockercfg(dockercfgPath)
if err != nil {
return authCfg, false
}
server := registry
if server == "" {
server = defaultRegistryServer
}
entry, ok := cfg[server]
if !ok {
return authCfg, false
}
uname, pass, err := getCredentials(entry.Auth)
if err != nil {
return authCfg, false
}
authCfg.Username = uname
authCfg.Password = pass
return authCfg, true
}
// getDockercfgFile returns the path to the dockercfg file
func getDockercfgFile(path string) string {
var cfgPath string
if path != "" {
cfgPath = path
} else if os.Getenv("DOCKERCFG_PATH") != "" {
cfgPath = os.Getenv("DOCKERCFG_PATH")
} else if currentUser, err := user.Current(); err == nil {
cfgPath = filepath.Join(currentUser.HomeDir, ".dockercfg")
}
return cfgPath
}
// authEntry is a single entry for a given server in a
// .dockercfg file
type authEntry struct {
Auth string `json:auth`
Email string `json:email`
}
// dockercfg represents the contents of a .dockercfg file
type dockercfg map[string]authEntry
// readDockercfg reads the contents of a .dockercfg file into a map
// with server name keys and AuthEntry values
func readDockercfg(filePath string) (cfg dockercfg, err error) {
content, err := ioutil.ReadFile(filePath)
if err != nil {
return
}
cfg = dockercfg{}
if err := json.Unmarshal(content, &cfg); err != nil {
return nil, err
}
return
}
// getCredentials parses an auth string inside a dockercfg file into
// a username and password
func getCredentials(auth string) (username, password string, err error) {
creds, err := base64.StdEncoding.DecodeString(auth)
if err != nil {
return
}
unamepass := strings.Split(string(creds), ":")
username = unamepass[0]
password = unamepass[1]
return
}
| [
"\"DOCKERCFG_PATH\"",
"\"DOCKERCFG_PATH\""
]
| []
| [
"DOCKERCFG_PATH"
]
| [] | ["DOCKERCFG_PATH"] | go | 1 | 0 | |
handwritten_ocr/apply_ocrmypdf.py | import os
from dataclasses import dataclass
from pathlib import Path
from typing import Union
import ocrmypdf
from misc_utils.cached_data import CachedData
from misc_utils.dataclass_utils import UNDEFINED, _UNDEFINED
from misc_utils.processing_utils import exec_command
@dataclass
class OCRMyPDFsFolder(CachedData):
folder: Union[_UNDEFINED, str] = UNDEFINED
lang: Union[_UNDEFINED, str] = UNDEFINED
name: Union[_UNDEFINED, str] = UNDEFINED
def _build_cache(self):
for pdf_file in Path(self.folder).rglob("*.*"):
print(f"{pdf_file}")
ouput_dir = self.prefix_cache_dir("ocred_pdfs")
os.makedirs(ouput_dir)
ocrmypdf.ocr(
input_file=pdf_file,
output_file=f"{ouput_dir}/{pdf_file.stem}_ocr.pdf",
language="spa",
)
# _,e=exec_command(f"ocrmypdf -l {self.lang} -r {pdf_file} {ouput_dir}/{pdf_file.stem}_ocr.pdf")
# if len(e)>0:
# print(f"failed with: {e}")
break
if __name__ == "__main__":
# https://ocrmypdf.readthedocs.io/en/latest/batch.html -> TODO!
# cache_base = os.environ['DATA_PATH']
# OCRMyPDFsFolder(
# name="esc_cong_2018",
# lang="spa",
# folder=f"{cache_base}/WgetPdfs-esc_cong_2018-64ef3d6edcc9a7961dab1c80f2d9e07569e82362/pdfs",
# cache_base=cache_base,
# ).build()
base_path = "handwritten_ocr"
pdf_file = f"handwritten_ocr/data/e14_cong_2018__e14_divulgacion_01_001_001_CAM_E14_CAM_X_01_001_001_XX_01_005_X_XXX.pdf"
ocrmypdf.ocr(
input_file=pdf_file,
output_file=f"handwritten_ocr/{Path(pdf_file).stem}_ocr.pdf",
language="spa",
)
| []
| []
| [
"DATA_PATH"
]
| [] | ["DATA_PATH"] | python | 1 | 0 | |
grpc/client.go | /*
Copyright 2018 Iguazio Systems Ltd.
Licensed under the Apache License, Version 2.0 (the "License") with
an addition restriction as set forth herein. You may not use this
file except in compliance with the License. You may obtain a copy of
the License at http://www.apache.org/licenses/LICENSE-2.0.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing
permissions and limitations under the License.
In addition, you may not use the software for any purposes that are
illegal under applicable law, and the grant of the foregoing license
under the Apache 2.0 license is conditioned upon your compliance with
such restriction.
*/
package grpc
import (
"context"
"fmt"
"github.com/v3io/frames"
"github.com/v3io/frames/pb"
"io"
"os"
"time"
"github.com/nuclio/logger"
"github.com/pkg/errors"
"google.golang.org/grpc"
)
// Client is frames gRPC client
type Client struct {
client pb.FramesClient
session *frames.Session
}
var (
// Make sure we're implementing frames.Client
_ frames.Client = &Client{}
)
// NewClient returns a new gRPC client
func NewClient(address string, session *frames.Session, logger logger.Logger) (*Client, error) {
if address == "" {
address = os.Getenv("V3IO_URL")
}
if address == "" {
return nil, fmt.Errorf("empty address")
}
conn, err := grpc.Dial(
address,
grpc.WithInsecure(),
grpc.WithMaxMsgSize(grpcMsgSize),
)
if err != nil {
return nil, errors.Wrap(err, "can't create gRPC connection")
}
if session == nil {
var err error
session, err = frames.SessionFromEnv()
if err != nil {
return nil, err
}
}
client := &Client{
client: pb.NewFramesClient(conn),
session: session,
}
return client, nil
}
func (c *Client) Read(request *frames.ReadRequest) (frames.FrameIterator, error) {
if request.Session == nil {
request.Session = c.session
}
stream, err := c.client.Read(context.Background(), request)
if err != nil {
return nil, err
}
it := &frameIterator{
stream: stream,
}
return it, nil
}
func (c *Client) Write(request *frames.WriteRequest) (frames.FrameAppender, error) {
if request.Session == nil {
request.Session = c.session
}
var frame *pb.Frame
if request.ImmidiateData != nil {
proto, ok := request.ImmidiateData.(pb.Framed)
if !ok {
return nil, errors.Errorf("unknown frame type")
}
frame = proto.Proto()
}
stream, err := c.client.Write(context.Background())
if err != nil {
return nil, err
}
ireq := &pb.InitialWriteRequest{
Session: request.Session,
Backend: request.Backend,
Table: request.Table,
InitialData: frame,
Expression: request.Expression,
More: request.HaveMore,
}
req := &pb.WriteRequest{
Type: &pb.WriteRequest_Request{
Request: ireq,
},
}
if err := stream.Send(req); err != nil {
stream.CloseAndRecv()
return nil, err
}
fa := &frameAppender{
stream: stream,
closed: false,
}
return fa, nil
}
// Create creates a table
func (c *Client) Create(request *frames.CreateRequest) error {
if request.Session == nil {
request.Session = c.session
}
_, err := c.client.Create(context.Background(), request)
return err
}
// Delete deletes data or table
func (c *Client) Delete(request *frames.DeleteRequest) error {
if request.Session == nil {
request.Session = c.session
}
_, err := c.client.Delete(context.Background(), request)
return err
}
// Exec executes a command on the backend
func (c *Client) Exec(request *frames.ExecRequest) (frames.Frame, error) {
if request.Session == nil {
request.Session = c.session
}
msg, err := c.client.Exec(context.Background(), request)
if err != nil {
return nil, err
}
var frame frames.Frame
if msg.Frame != nil {
frame = frames.NewFrameFromProto(msg.Frame)
}
return frame, nil
}
type frameIterator struct {
stream pb.Frames_ReadClient
frame frames.Frame
err error
done bool
}
func (it *frameIterator) Next() bool {
if it.done || it.err != nil {
return false
}
it.frame = nil
msg, err := it.stream.Recv()
if err != nil {
if err != io.EOF {
it.err = err
}
return false
}
it.frame = frames.NewFrameFromProto(msg)
return true
}
func (it *frameIterator) Err() error {
return it.err
}
func (it *frameIterator) At() frames.Frame {
return it.frame
}
type frameAppender struct {
stream pb.Frames_WriteClient
closed bool
}
func (fa *frameAppender) Add(frame frames.Frame) error {
if fa.closed {
return fmt.Errorf("stream closed")
}
pbf, ok := frame.(pb.Framed)
if !ok {
return errors.New("unknown frame type")
}
fMsg := pbf.Proto()
msg := &pb.WriteRequest{
Type: &pb.WriteRequest_Frame{
Frame: fMsg,
},
}
if err := fa.stream.Send(msg); err != nil {
fa.stream.CloseAndRecv()
fa.closed = true
return err
}
return nil
}
func (fa *frameAppender) WaitForComplete(timeout time.Duration) error {
if fa.closed {
return fmt.Errorf("stream closed")
}
// TODO: timeout
_, err := fa.stream.CloseAndRecv()
return err
}
| [
"\"V3IO_URL\""
]
| []
| [
"V3IO_URL"
]
| [] | ["V3IO_URL"] | go | 1 | 0 | |
pkg/cmd/step_bdd.go | package cmd
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
"github.com/jenkins-x/jx/pkg/cloud"
"github.com/jenkins-x/jx/pkg/cmd/create"
"github.com/jenkins-x/jx/pkg/cmd/deletecmd"
"github.com/jenkins-x/jx/pkg/cmd/helper"
v1 "github.com/jenkins-x/jx/pkg/apis/jenkins.io/v1"
"github.com/jenkins-x/jx/pkg/cmd/bdd"
"github.com/jenkins-x/jx/pkg/cmd/opts"
"github.com/jenkins-x/jx/pkg/cmd/templates"
"github.com/jenkins-x/jx/pkg/config"
"github.com/jenkins-x/jx/pkg/gits"
configio "github.com/jenkins-x/jx/pkg/io"
"github.com/jenkins-x/jx/pkg/kube"
"github.com/jenkins-x/jx/pkg/log"
"github.com/jenkins-x/jx/pkg/util"
"github.com/pkg/errors"
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
optionDefaultAdminPassword = "default-admin-password"
)
// StepBDDOptions contains the command line arguments for this command
type StepBDDOptions struct {
opts.StepOptions
InstallOptions create.InstallOptions
Flags StepBDDFlags
}
type StepBDDFlags struct {
GoPath string
GitProvider string
GitOwner string
ReportsOutputDir string
UseCurrentTeam bool
DeleteTeam bool
DisableDeleteApp bool
DisableDeleteRepo bool
IgnoreTestFailure bool
Parallel bool
VersionsDir string
VersionsRepository string
VersionsGitRef string
ConfigFile string
TestRepoGitCloneUrl string
SkipRepoGitClone bool
UseRevision bool
TestGitBranch string
TestGitPrNumber string
JxBinary string
TestCases []string
VersionsRepoPr bool
BaseDomain string
}
var (
stepBDDLong = templates.LongDesc(`
This pipeline step lets you run the BDD tests in the current team in a current cluster or create a new cluster/team run tests there then tear things down again.
`)
stepBDDExample = templates.Examples(`
# run the BDD tests in the current team
jx step bdd --use-current-team --git-provider-url=https://my.git.server.com
# create a new team for the tests, run the tests then tear everything down again
jx step bdd -b --provider=gke --git-provider=ghe --git-provider-url=https://my.git.server.com --default-admin-password=myadminpwd --git-username myuser --git-api-token mygittoken
`)
)
func NewCmdStepBDD(commonOpts *opts.CommonOptions) *cobra.Command {
options := StepBDDOptions{
StepOptions: opts.StepOptions{
CommonOptions: commonOpts,
},
InstallOptions: create.CreateInstallOptions(commonOpts),
}
cmd := &cobra.Command{
Use: "bdd",
Short: "Performs the BDD tests on the current cluster, new clusters or teams",
Long: stepBDDLong,
Example: stepBDDExample,
Run: func(cmd *cobra.Command, args []string) {
options.Cmd = cmd
options.Args = args
err := options.Run()
helper.CheckErr(err)
},
}
installOptions := &options.InstallOptions
installOptions.AddInstallFlags(cmd, true)
cmd.Flags().StringVarP(&options.Flags.BaseDomain, "base-domain", "", "", "the base domain to use when creating the cluster")
cmd.Flags().StringVarP(&options.Flags.ConfigFile, "config", "c", "", "the config YAML file containing the clusters to create")
cmd.Flags().StringVarP(&options.Flags.GoPath, "gopath", "", "", "the GOPATH directory where the BDD test git repository will be cloned")
cmd.Flags().StringVarP(&options.Flags.GitProvider, "git-provider", "g", "", "the git provider kind")
cmd.Flags().StringVarP(&options.Flags.GitOwner, "git-owner", "", "", "the git owner of new git repositories created by the tests")
cmd.Flags().StringVarP(&options.Flags.ReportsOutputDir, "reports-dir", "", "reports", "the directory used to copy in any generated report files")
cmd.Flags().StringVarP(&options.Flags.TestRepoGitCloneUrl, "test-git-repo", "r", "https://github.com/jenkins-x/bdd-jx.git", "the git repository to clone for the BDD tests")
cmd.Flags().BoolVarP(&options.Flags.SkipRepoGitClone, "skip-test-git-repo-clone", "", false, "Skip cloning the bdd test git repo")
cmd.Flags().StringVarP(&options.Flags.JxBinary, "binary", "", "jx", "the binary location of the 'jx' executable for creating clusters")
cmd.Flags().StringVarP(&options.Flags.TestGitBranch, "test-git-branch", "", "master", "the git repository branch to use for the BDD tests")
cmd.Flags().StringVarP(&options.Flags.TestGitPrNumber, "test-git-pr-number", "", "", "the Pull Request number to fetch from the repository for the BDD tests")
cmd.Flags().StringArrayVarP(&options.Flags.TestCases, "tests", "t", []string{"test-quickstart-node-http"}, "the list of the test cases to run")
cmd.Flags().StringVarP(&options.Flags.VersionsDir, "dir", "", "", "the git clone of the jenkins-x/jenkins-x-versions git repository. Used to default the version of jenkins-x-platform when creating clusters if no --version option is supplied")
cmd.Flags().BoolVarP(&options.Flags.DeleteTeam, "delete-team", "", true, "Whether we should delete the Team we create for each Git Provider")
cmd.Flags().BoolVarP(&options.Flags.DisableDeleteApp, "no-delete-app", "", false, "Disables deleting the created app after the test")
cmd.Flags().BoolVarP(&options.Flags.DisableDeleteRepo, "no-delete-repo", "", false, "Disables deleting the created repository after the test")
cmd.Flags().BoolVarP(&options.Flags.UseCurrentTeam, "use-current-team", "", false, "If enabled lets use the current Team to run the tests")
cmd.Flags().BoolVarP(&options.Flags.IgnoreTestFailure, "ignore-fail", "i", false, "Ignores test failures so that a BDD test run can capture the output and report on the test passes/failures")
cmd.Flags().BoolVarP(&options.Flags.IgnoreTestFailure, "parallel", "", false, "Should we process each cluster configuration in parallel")
cmd.Flags().BoolVarP(&options.Flags.UseRevision, "use-revision", "", true, "Use the git revision from the current git clone instead of the Pull Request branch")
cmd.Flags().BoolVarP(&options.Flags.VersionsRepoPr, "version-repo-pr", "", false, "For use with jenkins-x-versions PR. Indicates the git revision of the PR should be used to clone the jenkins-x-versions")
cmd.Flags().StringVarP(&installOptions.Flags.Provider, "provider", "", "", "Cloud service providing the Kubernetes cluster. Supported providers: "+cloud.KubernetesProviderOptions())
return cmd
}
func (o *StepBDDOptions) Run() error {
flags := &o.Flags
var err error
if o.Flags.GoPath == "" {
o.Flags.GoPath = os.Getenv("GOPATH")
if o.Flags.GoPath == "" {
o.Flags.GoPath, err = os.Getwd()
if err != nil {
return err
}
}
}
if o.InstallOptions.Flags.VersionsRepository == "" {
o.InstallOptions.Flags.VersionsRepository = opts.DefaultVersionsURL
}
gitProviderUrl := o.gitProviderUrl()
if gitProviderUrl == "" {
return util.MissingOption("git-provider-url")
}
fileName := flags.ConfigFile
if fileName == "" {
return o.runOnCurrentCluster()
}
config, err := bdd.LoadBddClusters(fileName)
if err != nil {
return err
}
if len(config.Clusters) == 0 {
return fmt.Errorf("No clusters specified in configuration file %s", fileName)
}
// TODO handle parallel...
errors := []error{}
for _, cluster := range config.Clusters {
err := o.createCluster(cluster)
if err != nil {
return err
}
defer o.deleteCluster(cluster)
err = o.runTests(o.Flags.GoPath)
if err != nil {
log.Logger().Warnf("Failed to perform tests on cluster %s: %s", cluster.Name, err)
errors = append(errors, err)
}
}
return util.CombineErrors(errors...)
}
// runOnCurrentCluster runs the tests on the current cluster
func (o *StepBDDOptions) runOnCurrentCluster() error {
var err error
gitProviderName := o.Flags.GitProvider
if gitProviderName != "" && !o.Flags.UseCurrentTeam {
gitUser := o.InstallOptions.GitRepositoryOptions.Username
if gitUser == "" {
return util.MissingOption("git-username")
}
gitToken := o.InstallOptions.GitRepositoryOptions.ApiToken
if gitToken == "" {
return util.MissingOption("git-api-token")
}
defaultAdminPassword := o.InstallOptions.AdminSecretsService.Flags.DefaultAdminPassword
if defaultAdminPassword == "" {
return util.MissingOption(optionDefaultAdminPassword)
}
defaultOptions := o.createDefaultCommonOptions()
gitProviderUrl := o.gitProviderUrl()
teamPrefix := "bdd-"
if o.InstallOptions.Flags.Tekton {
teamPrefix += "tekton-"
}
team := kube.ToValidName(teamPrefix + gitProviderName + "-" + o.teamNameSuffix())
log.Logger().Infof("Creating team %s", util.ColorInfo(team))
installOptions := o.InstallOptions
installOptions.CommonOptions = defaultOptions
installOptions.InitOptions.CommonOptions = defaultOptions
installOptions.SkipAuthSecretsMerge = true
installOptions.BatchMode = true
installOptions.InitOptions.Flags.NoTiller = true
installOptions.InitOptions.Flags.HelmClient = true
installOptions.InitOptions.Flags.SkipTiller = true
installOptions.Flags.Namespace = team
installOptions.Flags.NoDefaultEnvironments = true
installOptions.Flags.DefaultEnvironmentPrefix = team
installOptions.AdminSecretsService.Flags.DefaultAdminPassword = defaultAdminPassword
err = installOptions.Run()
if err != nil {
return errors.Wrapf(err, "Failed to install team %s", team)
}
defer o.deleteTeam(team)
defaultOptions.SetDevNamespace(team)
// now lets setup the git server
createGitServer := &create.CreateGitServerOptions{
CreateOptions: create.CreateOptions{
CommonOptions: defaultOptions,
},
Kind: gitProviderName,
Name: gitProviderName,
URL: gitProviderUrl,
}
err = o.Retry(10, time.Second*10, func() error {
err = createGitServer.Run()
if err != nil {
return errors.Wrapf(err, "Failed to create git server with kind %s at url %s in team %s", gitProviderName, gitProviderUrl, team)
}
return nil
})
if err != nil {
return err
}
createGitToken := &create.CreateGitTokenOptions{
CreateOptions: create.CreateOptions{
CommonOptions: defaultOptions,
},
ServerFlags: opts.ServerFlags{
ServerURL: gitProviderUrl,
},
Username: gitUser,
ApiToken: gitToken,
}
err = createGitToken.Run()
if err != nil {
return errors.Wrapf(err, "Failed to create git user token for user %s at url %s in team %s", gitProviderName, gitProviderUrl, team)
}
// now lets create an environment...
createEnv := &create.CreateEnvOptions{
CreateOptions: create.CreateOptions{
CommonOptions: defaultOptions,
},
HelmValuesConfig: config.HelmValuesConfig{
ExposeController: &config.ExposeController{},
},
Options: v1.Environment{
ObjectMeta: metav1.ObjectMeta{},
Spec: v1.EnvironmentSpec{
PromotionStrategy: v1.PromotionStrategyTypeAutomatic,
Order: 100,
},
},
PromotionStrategy: string(v1.PromotionStrategyTypeAutomatic),
ForkEnvironmentGitRepo: kube.DefaultEnvironmentGitRepoURL,
Prefix: team,
}
createEnv.BatchMode = true
createEnv.Options.Name = "staging"
createEnv.Options.Spec.Label = "Staging"
createEnv.GitRepositoryOptions.ServerURL = gitProviderUrl
gitOwner := o.Flags.GitOwner
if gitOwner == "" && gitUser != "" {
// lets avoid loading the git owner from the current cluster
gitOwner = gitUser
}
if gitOwner != "" {
createEnv.GitRepositoryOptions.Owner = gitOwner
}
if gitUser != "" {
createEnv.GitRepositoryOptions.Username = gitUser
}
log.Logger().Infof("using environment git owner: %s", util.ColorInfo(gitOwner))
log.Logger().Infof("using environment git user: %s", util.ColorInfo(gitUser))
err = createEnv.Run()
if err != nil {
return err
}
} else {
log.Logger().Infof("Using the default git provider for the tests")
}
return o.runTests(o.Flags.GoPath)
}
func (o *StepBDDOptions) deleteTeam(team string) error {
if !o.Flags.DeleteTeam {
log.Logger().Infof("Disabling the deletion of team: %s", util.ColorInfo(team))
return nil
}
log.Logger().Infof("Deleting team %s", util.ColorInfo(team))
deleteTeam := &deletecmd.DeleteTeamOptions{
CommonOptions: o.createDefaultCommonOptions(),
Confirm: true,
}
deleteTeam.Args = []string{team}
err := deleteTeam.Run()
if err != nil {
return errors.Wrapf(err, "Failed to delete team %s", team)
}
return nil
}
func (o *StepBDDOptions) createDefaultCommonOptions() *opts.CommonOptions {
defaultOptions := o.CommonOptions
defaultOptions.BatchMode = true
defaultOptions.Args = nil
return defaultOptions
}
func (o *StepBDDOptions) gitProviderUrl() string {
return o.InstallOptions.GitRepositoryOptions.ServerURL
}
// teamNameSuffix returns a team name suffix using the current branch +
func (o *StepBDDOptions) teamNameSuffix() string {
repo := os.Getenv("REPO_NAME")
branch := os.Getenv("BRANCH_NAME")
buildNumber := o.GetBuildNumber()
if buildNumber == "" {
buildNumber = "1"
}
return strings.Join([]string{repo, branch, buildNumber}, "-")
}
func (o *StepBDDOptions) runTests(gopath string) error {
gitURL := o.Flags.TestRepoGitCloneUrl
gitRepository, err := gits.ParseGitURL(gitURL)
if err != nil {
return errors.Wrapf(err, "Failed to parse git url %s", gitURL)
}
testDir := filepath.Join(gopath, gitRepository.Organisation, gitRepository.Name)
if !o.Flags.SkipRepoGitClone {
log.Logger().Infof("cloning BDD test repository to: %s", util.ColorInfo(testDir))
err = os.MkdirAll(testDir, util.DefaultWritePermissions)
if err != nil {
return errors.Wrapf(err, "Failed to create dir %s", testDir)
}
log.Logger().Infof("Cloning git repository %s to dir %s", util.ColorInfo(gitURL), util.ColorInfo(testDir))
err = o.Git().CloneOrPull(gitURL, testDir)
if err != nil {
return errors.Wrapf(err, "Failed to clone repo %s to %s", gitURL, testDir)
}
branchName := o.Flags.TestGitBranch
pullRequestNumber := o.Flags.TestGitPrNumber
log.Logger().Infof("Checking out repository branch %s to dir %s", util.ColorInfo(branchName), util.ColorInfo(testDir))
if pullRequestNumber != "" {
err = o.Git().FetchBranch(testDir, "origin", fmt.Sprintf("pull/%s/head:%s", pullRequestNumber, branchName))
if err != nil {
return errors.Wrapf(err, "Failed to fetch Pull request number %s", pullRequestNumber)
}
}
err = o.Git().Checkout(testDir, branchName)
if err != nil {
return errors.Wrapf(err, "Failed to checkout branch %s", branchName)
}
}
env := map[string]string{
"GIT_PROVIDER_URL": o.gitProviderUrl(),
}
gitOwner := o.Flags.GitOwner
if gitOwner != "" {
env["GIT_ORGANISATION"] = gitOwner
}
if o.Flags.DisableDeleteApp {
env["JX_DISABLE_DELETE_APP"] = "true"
}
if o.Flags.DisableDeleteRepo {
env["JX_DISABLE_DELETE_REPO"] = "true"
}
awsAccessKey := os.Getenv("AWS_ACCESS_KEY_ID")
if awsAccessKey != "" {
env["AWS_ACCESS_KEY_ID"] = awsAccessKey
}
awsSecret := os.Getenv("AWS_SECRET_ACCESS_KEY")
if awsSecret != "" {
env["AWS_SECRET_ACCESS_KEY"] = awsSecret
}
awsRegion := os.Getenv("AWS_REGION")
if awsRegion != "" {
env["AWS_REGION"] = awsRegion
}
c := &util.Command{
Dir: testDir,
Name: "make",
Args: o.Flags.TestCases,
Env: env,
Out: os.Stdout,
Err: os.Stdout,
}
_, err = c.RunWithoutRetry()
err = o.reportStatus(testDir, err)
o.copyReports(testDir, err)
if o.Flags.IgnoreTestFailure && err != nil {
log.Logger().Infof("Ignoring test failure %s", err)
return nil
}
return err
}
// reportStatus runs a bunch of commands to report on the status of the cluster
func (o *StepBDDOptions) reportStatus(testDir string, err error) error {
errs := []error{}
if err != nil {
errs = append(errs, err)
}
commands := []util.Command{
{
Name: "kubectl",
Args: []string{"get", "pods"},
},
{
Name: "kubectl",
Args: []string{"get", "env", "dev", "-oyaml"},
},
{
Name: "jx",
Args: []string{"status", "-b"},
},
{
Name: "jx",
Args: []string{"version", "-b"},
},
{
Name: "jx",
Args: []string{"get", "env", "-b"},
},
{
Name: "jx",
Args: []string{"get", "activities", "-b"},
},
{
Name: "jx",
Args: []string{"get", "application", "-b"},
},
{
Name: "jx",
Args: []string{"get", "preview", "-b"},
},
{
Name: "jx",
Args: []string{"open"},
},
}
for _, cmd := range commands {
fmt.Println("")
fmt.Printf("Running %s\n\n", cmd.String())
cmd.Dir = testDir
cmd.Out = os.Stdout
cmd.Err = os.Stdout
_, err = cmd.RunWithoutRetry()
if err != nil {
errs = append(errs, err)
}
}
return util.CombineErrors(errs...)
}
func (o *StepBDDOptions) copyReports(testDir string, err error) error {
reportsDir := filepath.Join(testDir, "reports")
if _, err := os.Stat(reportsDir); os.IsNotExist(err) {
return nil
}
reportsOutputDir := o.Flags.ReportsOutputDir
if reportsOutputDir == "" {
reportsOutputDir = "reports"
}
err = os.MkdirAll(reportsOutputDir, util.DefaultWritePermissions)
if err != nil {
log.Logger().Warnf("failed to make reports output dir: %s : %s", reportsOutputDir, err)
return err
}
err = util.CopyDir(reportsDir, reportsOutputDir, true)
if err != nil {
log.Logger().Warnf("failed to copy reports dir: %s directory to: %s : %s", reportsDir, reportsOutputDir, err)
}
return err
}
func (o *StepBDDOptions) createCluster(cluster *bdd.CreateCluster) error {
buildNum := o.GetBuildNumber()
if buildNum == "" {
log.Logger().Warnf("No build number could be found from the environment variable $BUILD_NUMBER!")
}
baseClusterName := kube.ToValidName(cluster.Name)
revision := os.Getenv("PULL_PULL_SHA")
branch := o.GetBranchName(o.Flags.VersionsDir)
if branch == "" {
branch = "x"
}
log.Logger().Infof("found git revision %s: branch %s", revision, branch)
if o.Flags.VersionsRepoPr && o.InstallOptions.Flags.VersionsGitRef == "" {
if revision != "" && (branch == "" || o.Flags.UseRevision) {
o.InstallOptions.Flags.VersionsGitRef = revision
} else {
o.InstallOptions.Flags.VersionsGitRef = branch
}
} else {
o.InstallOptions.Flags.VersionsGitRef = "master"
}
log.Logger().Infof("using versions git repo %s and ref %s", o.InstallOptions.Flags.VersionsRepository, o.InstallOptions.Flags.VersionsGitRef)
cluster.Name = kube.ToValidName(branch + "-" + buildNum + "-" + cluster.Name)
log.Logger().Infof("\nCreating cluster %s", util.ColorInfo(cluster.Name))
binary := o.Flags.JxBinary
args := cluster.Args
if cluster.Terraform {
// use the cluster name as the organisation name
args = append(args, "--organisation-name", cluster.Name)
args = append(args, "--cluster-name", "dev")
} else {
args = append(args, "--cluster-name", cluster.Name)
}
if cluster.Terraform {
// use the cluster name as the organisation name
args = append(args, "--organisation-name", cluster.Name)
}
if util.StringArrayIndex(args, "-b") < 0 && util.StringArrayIndex(args, "--batch-mode") < 0 {
args = append(args, "--batch-mode")
}
if util.StringArrayIndex(args, "--version") < 0 && util.StringArrayHasPrefixIndex(args, "--version=") < 0 {
version, err := o.getVersion()
if err != nil {
return err
}
if version != "" {
args = append(args, "--version", version)
}
}
if !cluster.NoLabels {
cluster.Labels = create.AddLabel(cluster.Labels, "cluster", baseClusterName)
cluster.Labels = create.AddLabel(cluster.Labels, "branch", branch)
args = append(args, "--labels", cluster.Labels)
}
if o.Flags.BaseDomain != "" {
args = append(args, "--domain", cluster.Name+"."+o.Flags.BaseDomain)
}
gitProviderURL := o.gitProviderUrl()
if gitProviderURL != "" {
args = append(args, "--git-provider-url", gitProviderURL)
}
if o.InstallOptions.Flags.VersionsRepository != "" {
args = append(args, "--versions-repo", o.InstallOptions.Flags.VersionsRepository)
}
if o.InstallOptions.Flags.VersionsGitRef != "" {
args = append(args, "--versions-ref", o.InstallOptions.Flags.VersionsGitRef)
}
gitUsername := o.InstallOptions.GitRepositoryOptions.Username
if gitUsername != "" {
args = append(args, "--git-username", gitUsername)
}
gitOwner := o.Flags.GitOwner
if gitOwner != "" {
args = append(args, "--environment-git-owner", gitOwner)
}
gitKind := o.InstallOptions.GitRepositoryOptions.ServerKind
if gitKind != "" {
args = append(args, "--git-provider-kind ", gitKind)
}
if o.CommonOptions.InstallDependencies {
args = append(args, "--install-dependencies")
}
// expand any environment variables
for i, arg := range args {
args[i] = os.ExpandEnv(arg)
}
safeArgs := append([]string{}, args...)
gitToken := o.InstallOptions.GitRepositoryOptions.ApiToken
if gitToken != "" {
args = append(args, "--git-api-token", gitToken)
safeArgs = append(safeArgs, "--git-api-token", "**************¬")
}
adminPwd := o.InstallOptions.AdminSecretsService.Flags.DefaultAdminPassword
if adminPwd != "" {
args = append(args, "--default-admin-password", adminPwd)
safeArgs = append(safeArgs, "--default-admin-password", "**************¬")
}
log.Logger().Infof("running command: %s", util.ColorInfo(fmt.Sprintf("%s %s", binary, strings.Join(safeArgs, " "))))
// lets not log any sensitive command line arguments
e := exec.Command(binary, args...)
e.Stdout = o.Out
e.Stderr = o.Err
os.Setenv("PATH", util.PathWithBinary())
// work around for helm apply with GitOps using a k8s local Service URL
os.Setenv("CHART_REPOSITORY", kube.DefaultChartMuseumURL)
err := e.Run()
if err != nil {
log.Logger().Errorf("Error: Command failed %s %s", binary, strings.Join(safeArgs, " "))
}
return err
}
func (o *StepBDDOptions) deleteCluster(cluster *bdd.CreateCluster) error {
return nil
}
// getVersion returns the jenkins-x-platform version to use for the cluster or empty string if no specific version can be found
func (o *StepBDDOptions) getVersion() (string, error) {
version := o.InstallOptions.Flags.Version
if version != "" {
return version, nil
}
// lets try detect a local `Makefile` to find the version
dir := o.Flags.VersionsDir
version, err := create.LoadVersionFromCloudEnvironmentsDir(dir, configio.NewFileStore())
if err != nil {
return version, errors.Wrapf(err, "failed to load jenkins-x-platform version from dir %s", dir)
}
log.Logger().Infof("loaded version %s from Makefile in directory %s\n", util.ColorInfo(version), util.ColorInfo(dir))
return version, nil
}
| [
"\"GOPATH\"",
"\"REPO_NAME\"",
"\"BRANCH_NAME\"",
"\"AWS_ACCESS_KEY_ID\"",
"\"AWS_SECRET_ACCESS_KEY\"",
"\"AWS_REGION\"",
"\"PULL_PULL_SHA\""
]
| []
| [
"AWS_SECRET_ACCESS_KEY",
"AWS_REGION",
"PULL_PULL_SHA",
"GOPATH",
"BRANCH_NAME",
"REPO_NAME",
"AWS_ACCESS_KEY_ID"
]
| [] | ["AWS_SECRET_ACCESS_KEY", "AWS_REGION", "PULL_PULL_SHA", "GOPATH", "BRANCH_NAME", "REPO_NAME", "AWS_ACCESS_KEY_ID"] | go | 7 | 0 | |
projects/envoyinit/cmd/main.go | package main
import (
"log"
"os"
"syscall"
"github.com/solo-io/envoy-operator/pkg/downward"
)
func main() {
inputfile := inputCfg()
outfile := outputCfg()
transformer := downward.NewTransformer()
err := transformer.TransformFiles(inputfile, outfile)
if err != nil {
log.Fatalf("initializer failed: %v", err)
}
env := os.Environ()
args := []string{envoy(), "-c", outfile}
if len(os.Args) > 1 {
args = append(args, os.Args[1:]...)
}
if err := syscall.Exec(args[0], args, env); err != nil {
panic(err)
}
}
func envoy() string {
maybeEnvoy := os.Getenv("ENVOY")
if maybeEnvoy != "" {
return maybeEnvoy
}
return "/usr/local/bin/envoy"
}
func inputCfg() string {
maybeConf := os.Getenv("INPUT_CONF")
if maybeConf != "" {
return maybeConf
}
return "/etc/envoy/envoy.yaml"
}
func outputCfg() string {
maybeConf := os.Getenv("OUTPUT_CONF")
if maybeConf != "" {
return maybeConf
}
return "/tmp/envoy.yaml"
}
| [
"\"ENVOY\"",
"\"INPUT_CONF\"",
"\"OUTPUT_CONF\""
]
| []
| [
"ENVOY",
"INPUT_CONF",
"OUTPUT_CONF"
]
| [] | ["ENVOY", "INPUT_CONF", "OUTPUT_CONF"] | go | 3 | 0 | |
HackerRank/src/amazon/robot/test/Solution.java | package amazon.robot.test;
import java.io.BufferedWriter;
import java.io.FileWriter;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Scanner;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class Solution {
/**
* x position to the robot move. East = +1 West = -1
*/
static private int x = 0;
/**
* x position to the robot move. North = +1 South = -1
*/
static private int y = 0;
/**
* Pattern to validate input format.
* Must be a Valid Sequence.
* Before a N,S,W or E could have a digit.
* After N,S,W or E could have 0 to n 'X' character
* this Characters
*/
static Pattern INPUT_VALIDATOR = Pattern.compile("^(\\d*[NSEW]X*)*$");
static Pattern SPLIT_GROUP = Pattern.compile("(\\d*[NSWE]X*)");
/**
* Pattern to split magnitude and undo.
*/
static Pattern SPLIT_N_DIRECTION = Pattern.compile("(\\d*)N(X*)");
static Pattern SPLIT_S_DIRECTION = Pattern.compile("(\\d*)S(X*)");
static Pattern SPLIT_W_DIRECTION = Pattern.compile("(\\d*)W(X*)");
static Pattern SPLIT_E_DIRECTION = Pattern.compile("(\\d*)E(X*)");
/**
* Inner Class Coordinates Plan, using the concept of Command Pattern
* */
private static class CoordinatesRobot {
public int _x = 0;
public int _y = 0;
/**
* Based in Pattern Command
* */
public int undo = 0;
@Override
public String toString() {
return "("+_x+","+_y+") "+ undo;
}
}
/**
* Move the Robot to Origin (0,0)
*/
private static void toOrigin() {
x = 0;
y = 0;
}
/**
* All comands are suposed to be calculated from Origin (0,0). All valids
* commands must follow these rules. Contain only these letters NSEWX
* uppercase Contain any number and after a number MUST be any of these
* letters above, and never end with a NUMBER.
*
*
* Space Complexity = O(n) , because of Array in method calculateDestination
*
* Runtime Complexity = 2.n + c in method calculateDestination
* Runtime Complexity = O(n), we don't have an while inside an while, if we did then O(n^2)
* Runtime Complexity = because of that the answer is O(n).
*
* @param position
* @return destination in format (x, y);
*/
/*
* Complete the function below.
*/
static String moveRobot(String s) {
toOrigin();
try {
// Validate empty command
if (s == null || s.isEmpty()) {
throw new IllegalArgumentException("Invalid Command ["
+ s + "].");
}
// Validate the Arguments to Operate
validateCommandInput(s);
calculateDestination(s);
} catch (Exception e) {
x = 999;
y = 999;
}
return getPoints();
}
private static void calculateDestination(String position) {
Matcher m = SPLIT_GROUP.matcher(position);
List<String> commands = new ArrayList<String>();
while ( m.find() )
commands.add(m.group());
while ( !commands.isEmpty() ){
String lastCmd = commands.get(commands.size()-1);
CoordinatesRobot c = extractPoint(lastCmd);
// System.out.println(c);
/**
* The command is OK.
*/
if ( c.undo == 0 ){
commands.remove(commands.size()-1);
x += c._x;
y += c._y;
} else {
while ( !commands.isEmpty() && c.undo > 0){
commands.remove(commands.size()-1);
c.undo--;
}
}
}
}
/**
* These method is used to extract the magnitude value and how much undo the algorithms should do.
* @param cmd
* @return
*/
private static CoordinatesRobot extractPoint(String cmd) {
CoordinatesRobot c = new CoordinatesRobot();
Matcher m;
if ( cmd.contains("N") ){
m = SPLIT_N_DIRECTION.matcher(cmd);
if ( m.find() ){
String yvalue = m.group(1);
if ( !yvalue.isEmpty() ){
c._y = Integer.parseInt(yvalue);
if ( c._y == 0 )
throw new IllegalArgumentException("Magnitude can't be 0");
} else {
c._y = 1;
}
String undoValue = m.group(2);
if ( !undoValue.isEmpty() ){
c.undo = undoValue.length();
}
}
} else if ( cmd.contains("S") ){
m = SPLIT_S_DIRECTION.matcher(cmd);
if ( m.find() ){
String yvalue = m.group(1);
if ( !yvalue.isEmpty() ){
c._y = -Integer.parseInt(yvalue);
if ( c._y == 0 )
throw new IllegalArgumentException("Magnitude can't be 0");
} else {
c._y = -1;
}
String undoValue = m.group(2);
if ( !undoValue.isEmpty() ){
c.undo = undoValue.length();
}
}
} else if ( cmd.contains("E") ){
m = SPLIT_E_DIRECTION.matcher(cmd);
if ( m.find() ){
String yvalue = m.group(1);
if ( !yvalue.isEmpty() ){
c._x = Integer.parseInt(yvalue);
if ( c._x == 0 )
throw new IllegalArgumentException("Magnitude can't be 0");
} else {
c._x = 1;
}
String undoValue = m.group(2);
if ( !undoValue.isEmpty() ){
c.undo = undoValue.length();
}
}
} else if ( cmd.contains("W") ){
m = SPLIT_W_DIRECTION.matcher(cmd);
if ( m.find() ){
String yvalue = m.group(1);
if ( !yvalue.isEmpty() ){
c._x = -Integer.parseInt(yvalue);
if ( c._x == 0 )
throw new IllegalArgumentException("Magnitude can't be 0");
} else {
c._x = -1;
}
String undoValue = m.group(2);
if ( !undoValue.isEmpty() ){
c.undo = undoValue.length();
}
}
}
return c;
}
private static String getPoints() {
return "(" + x + ", " + y + ")";
}
private static void validateCommandInput(String position) {
Matcher m = INPUT_VALIDATOR.matcher(position);
if (!m.find())
throw new IllegalArgumentException("Invalid Command [" + position
+ "].");
}
public static final void runTests(){
/**
* Illegal
*/
// System.out.println(moveRobot(null));
// System.out.println(moveRobot(""));
// System.out.println(moveRobot("A7NS"));
//MAYBE
// System.out.println(moveRobot("7NX"));
// System.out.println(moveRobot("7NXX"));
// System.out.println(moveRobot("7NXXX"));
System.out.println(moveRobot("N0W"));
/**
* Legal
*/
// System.out.println(moveRobot("N"));
// System.out.println(moveRobot("S"));
// System.out.println(moveRobot("E"));
// System.out.println(moveRobot("W"));
// System.out.println(moveRobot("7NXXX"));
}
public static void main(String[] args) throws IOException{
runTests();
// Scanner in = new Scanner(System.in);
// final String fileName = System.getenv("OUTPUT_PATH");
// BufferedWriter bw = new BufferedWriter(new FileWriter(fileName));
// String res;
// String _s;
// try {
// _s = in.nextLine();
// } catch (Exception e) {
// _s = null;
// }
//
// res = moveRobot(_s);
// bw.write(res);
// bw.newLine();
//
// bw.close();
}
}
| [
"\"OUTPUT_PATH\""
]
| []
| [
"OUTPUT_PATH"
]
| [] | ["OUTPUT_PATH"] | java | 1 | 0 | |
cmd/target.go | // Copyright 2012 tsuru authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cmd
import (
"fmt"
"io"
"io/ioutil"
"os"
"regexp"
"sort"
"strings"
"syscall"
"github.com/pkg/errors"
"github.com/tsuru/gnuflag"
)
var errUndefinedTarget = errors.New(`No target defined. Please use target-add/target-set to define a target.
For more details, please run "tsuru help target".`)
type tsuruTarget struct {
label, url string
}
func (t *tsuruTarget) String() string {
return t.label + " (" + t.url + ")"
}
type targetSlice struct {
targets []tsuruTarget
current int
sorted bool
}
func newTargetSlice() *targetSlice {
return &targetSlice{current: -1}
}
func (t *targetSlice) add(label, url string) {
t.targets = append(t.targets, tsuruTarget{label: label, url: url})
length := t.Len()
if length > 1 && !t.Less(t.Len()-2, t.Len()-1) {
t.sorted = false
}
}
func (t *targetSlice) Len() int {
return len(t.targets)
}
func (t *targetSlice) Less(i, j int) bool {
return t.targets[i].label < t.targets[j].label
}
func (t *targetSlice) Swap(i, j int) {
t.targets[i], t.targets[j] = t.targets[j], t.targets[i]
}
func (t *targetSlice) Sort() {
sort.Sort(t)
t.sorted = true
}
func (t *targetSlice) setCurrent(url string) {
if !t.sorted {
t.Sort()
}
for i, target := range t.targets {
if target.url == url {
t.current = i
break
}
}
}
func (t *targetSlice) String() string {
if !t.sorted {
t.Sort()
}
values := make([]string, len(t.targets))
for i, target := range t.targets {
prefix := " "
if t.current == i {
prefix = "* "
}
values[i] = prefix + target.String()
}
return strings.Join(values, "\n")
}
// ReadTarget returns the current target, as defined in the TSURU_TARGET
// environment variable or in the target file.
func ReadTarget() (string, error) {
if target := os.Getenv("TSURU_TARGET"); target != "" {
targets, err := getTargets()
if err == nil {
if val, ok := targets[target]; ok {
return val, nil
}
}
return target, nil
}
targetPath := JoinWithUserDir(".tsuru", "target")
target, err := readTarget(targetPath)
if err == errUndefinedTarget {
copyTargetFiles()
target, err = readTarget(JoinWithUserDir(".tsuru_target"))
}
return target, err
}
func readTarget(targetPath string) (string, error) {
if f, err := filesystem().Open(targetPath); err == nil {
defer f.Close()
if b, err := ioutil.ReadAll(f); err == nil {
return strings.TrimSpace(string(b)), nil
}
}
return "", errUndefinedTarget
}
func deleteTargetFile() {
filesystem().Remove(JoinWithUserDir(".tsuru", "target"))
}
func GetTarget() (string, error) {
var prefix string
target, err := ReadTarget()
if err != nil {
return "", err
}
if m, _ := regexp.MatchString("^https?://", target); !m {
prefix = "http://"
}
return prefix + target, nil
}
func GetTargetLabel() (string, error) {
target, err := GetTarget()
if err != nil {
return "", err
}
targets, err := getTargets()
if err != nil {
return "", err
}
targetKeys := make([]string, len(targets))
for k := range targets {
targetKeys = append(targetKeys, k)
}
sort.Strings(targetKeys)
for _, k := range targetKeys {
if targets[k] == target {
return k, nil
}
}
return "", errors.Errorf("label for target %q not found ", target)
}
func GetURLVersion(version, path string) (string, error) {
target, err := GetTarget()
if err != nil {
return "", err
}
return strings.TrimRight(target, "/") + "/" + version + path, nil
}
func GetURL(path string) (string, error) {
return GetURLVersion("1.0", path)
}
// WriteTarget writes the given endpoint to the target file.
func WriteTarget(t string) error {
targetPath := JoinWithUserDir(".tsuru", "target")
targetFile, err := filesystem().OpenFile(targetPath, syscall.O_WRONLY|syscall.O_CREAT|syscall.O_TRUNC, 0600)
if err != nil {
return err
}
defer targetFile.Close()
n, err := targetFile.WriteString(t)
if n != len(t) || err != nil {
return errors.New("Failed to write the target file")
}
return nil
}
type targetAdd struct {
fs *gnuflag.FlagSet
set bool
}
func (t *targetAdd) Info() *Info {
return &Info{
Name: "target-add",
Usage: "target add <label> <target> [--set-current|-s]",
Desc: "Adds a new entry to the list of available targets",
MinArgs: 2,
}
}
func (t *targetAdd) Run(ctx *Context, client *Client) error {
var target string
var label string
if len(ctx.Args) != 2 {
return errors.New("Invalid arguments")
}
label = ctx.Args[0]
target = ctx.Args[1]
err := WriteOnTargetList(label, target)
if err != nil {
return err
}
fmt.Fprintf(ctx.Stdout, "New target %s -> %s added to target list", label, target)
if t.set {
WriteTarget(target)
fmt.Fprint(ctx.Stdout, " and defined as the current target")
}
fmt.Fprintln(ctx.Stdout)
return nil
}
func (t *targetAdd) Flags() *gnuflag.FlagSet {
if t.fs == nil {
t.fs = gnuflag.NewFlagSet("target-add", gnuflag.ExitOnError)
t.fs.BoolVar(&t.set, "set-current", false, "Add and define the target as the current target")
t.fs.BoolVar(&t.set, "s", false, "Add and define the target as the current target")
}
return t.fs
}
func resetTargetList() error {
targetsPath := JoinWithUserDir(".tsuru", "targets")
targetsFile, err := filesystem().OpenFile(targetsPath, syscall.O_WRONLY|syscall.O_CREAT|syscall.O_TRUNC, 0600)
if err != nil {
return err
}
defer targetsFile.Close()
return nil
}
// WriteOnTargetList writes the given target in the target list file.
func WriteOnTargetList(label, target string) error {
label = strings.TrimSpace(label)
target = strings.TrimSpace(target)
targetExist, err := CheckIfTargetLabelExists(label)
if err != nil {
return err
}
if targetExist {
return errors.New("Target label provided already exists")
}
targetsPath := JoinWithUserDir(".tsuru", "targets")
targetsFile, err := filesystem().OpenFile(targetsPath, syscall.O_RDWR|syscall.O_CREAT|syscall.O_APPEND, 0600)
if err != nil {
return err
}
defer targetsFile.Close()
content := label + "\t" + target + "\n"
n, err := targetsFile.WriteString(content)
if n != len(content) || err != nil {
return errors.New("Failed to write the target file")
}
return nil
}
func CheckIfTargetLabelExists(label string) (bool, error) {
targets, err := getTargets()
if err != nil {
return false, err
}
_, exists := targets[label]
if exists {
return true, nil
}
return false, nil
}
func getTargets() (map[string]string, error) {
var targets = map[string]string{}
legacyTargetsPath := JoinWithUserDir(".tsuru_targets")
targetsPath := JoinWithUserDir(".tsuru", "targets")
err := filesystem().MkdirAll(JoinWithUserDir(".tsuru"), 0700)
if err != nil {
return nil, err
}
var legacy bool
f, err := filesystem().Open(targetsPath)
if os.IsNotExist(err) {
f, err = filesystem().Open(legacyTargetsPath)
legacy = true
}
if err == nil {
defer f.Close()
if b, err := ioutil.ReadAll(f); err == nil {
var targetLines = strings.Split(strings.TrimSpace(string(b)), "\n")
for i := range targetLines {
var targetSplit = strings.Split(targetLines[i], "\t")
if len(targetSplit) == 2 {
targets[targetSplit[0]] = targetSplit[1]
}
}
}
}
if legacy {
copyTargetFiles()
}
return targets, nil
}
func copyTargetFiles() {
filesystem().MkdirAll(JoinWithUserDir(".tsuru"), 0700)
if src, err := filesystem().Open(JoinWithUserDir(".tsuru_targets")); err == nil {
defer src.Close()
if dst, err := filesystem().OpenFile(JoinWithUserDir(".tsuru", "targets"), syscall.O_WRONLY|syscall.O_CREAT|syscall.O_TRUNC, 0600); err == nil {
defer dst.Close()
io.Copy(dst, src)
}
}
if target, err := readTarget(JoinWithUserDir(".tsuru_target")); err == nil {
WriteTarget(target)
}
}
type targetList struct{}
func (t *targetList) Info() *Info {
desc := `Displays the list of targets, marking the current.
Other commands related to target:
- target add: adds a new target to the list of targets
- target set: defines one of the targets in the list as the current target
- target remove: removes one target from the list`
return &Info{
Name: "target-list",
Usage: "target list",
Desc: desc,
MinArgs: 0,
}
}
func (t *targetList) Run(ctx *Context, client *Client) error {
slice := newTargetSlice()
targets, err := getTargets()
if err != nil {
return err
}
for label, target := range targets {
slice.add(label, target)
}
if current, err := ReadTarget(); err == nil {
slice.setCurrent(current)
}
fmt.Fprintf(ctx.Stdout, "%v\n", slice)
return nil
}
type targetRemove struct{}
func (t *targetRemove) Info() *Info {
desc := `Remove a target from target-list (tsuru server)
`
return &Info{
Name: "target-remove",
Usage: "target remove",
Desc: desc,
MinArgs: 1,
}
}
func (t *targetRemove) Run(ctx *Context, client *Client) error {
if len(ctx.Args) != 1 {
return errors.New("Invalid arguments")
}
targetLabelToRemove := strings.TrimSpace(ctx.Args[0])
targets, err := getTargets()
if err != nil {
return err
}
var turl string
for label, url := range targets {
if label == targetLabelToRemove {
turl = url
delete(targets, label)
}
}
if turl != "" {
var current string
if current, err = ReadTarget(); err == nil && current == turl {
deleteTargetFile()
}
}
err = resetTargetList()
if err != nil {
return err
}
for label, target := range targets {
WriteOnTargetList(label, target)
}
return nil
}
type targetSet struct{}
func (t *targetSet) Info() *Info {
desc := `Change current target (tsuru server)
`
return &Info{
Name: "target-set",
Usage: "target set <label>",
Desc: desc,
MinArgs: 1,
}
}
func (t *targetSet) Run(ctx *Context, client *Client) error {
if len(ctx.Args) != 1 {
return errors.New("Invalid arguments")
}
targetLabelToSet := strings.TrimSpace(ctx.Args[0])
labelExist, err := CheckIfTargetLabelExists(targetLabelToSet)
if err != nil {
return err
}
if !labelExist {
return errors.New("Target not found")
}
targets, err := getTargets()
if err != nil {
return err
}
for label, target := range targets {
if label == targetLabelToSet {
err = WriteTarget(target)
if err != nil {
return err
}
fmt.Fprintf(ctx.Stdout, "New target is %s -> %s\n", label, target)
}
}
return nil
}
| [
"\"TSURU_TARGET\""
]
| []
| [
"TSURU_TARGET"
]
| [] | ["TSURU_TARGET"] | go | 1 | 0 | |
app/connector/knative/src/main/java/io/syndesis/connector/knative/meta/KnativeMetaDataSupport.java | /*
* Copyright (C) 2016 Red Hat, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.syndesis.connector.knative.meta;
import io.fabric8.kubernetes.api.model.ObjectMeta;
import io.fabric8.kubernetes.api.model.apiextensions.CustomResourceDefinition;
import io.fabric8.kubernetes.api.model.apiextensions.CustomResourceDefinitionBuilder;
import io.fabric8.kubernetes.api.model.apiextensions.CustomResourceDefinitionStatusBuilder;
import io.fabric8.openshift.client.DefaultOpenShiftClient;
import io.fabric8.openshift.client.OpenShiftClient;
import io.syndesis.connector.knative.meta.crd.KnativeResource;
import io.syndesis.connector.knative.meta.crd.KnativeResourceDoneable;
import io.syndesis.connector.knative.meta.crd.KnativeResourceList;
import java.util.List;
import java.util.Optional;
import java.util.stream.Collectors;
public final class KnativeMetaDataSupport {
private static final CustomResourceDefinition KNATIVE_CHANNEL_CRD = new CustomResourceDefinitionBuilder()
.withApiVersion("eventing.knative.dev/v1alpha1")
.withKind("Channel")
.withNewMetadata()
.withName("channels.eventing.knative.dev")
.endMetadata()
.withNewSpec()
.withGroup("eventing.knative.dev")
.withScope("Namespaced")
.withVersion("v1alpha1")
.withNewNames()
.withKind("Channel")
.withListKind("ChannelList")
.withPlural("channels")
.withSingular("channel")
.endNames()
.endSpec()
.withStatus(new CustomResourceDefinitionStatusBuilder().build())
.build();
private static final CustomResourceDefinition KNATIVE_SERVICE_CRD = new CustomResourceDefinitionBuilder()
.withApiVersion("serving.knative.dev/v1alpha1")
.withKind("Service")
.withNewMetadata()
.withName("services.serving.knative.dev")
.endMetadata()
.withNewSpec()
.withGroup("serving.knative.dev")
.withScope("Namespaced")
.withVersion("v1alpha1")
.withNewNames()
.withKind("Service")
.withListKind("ServiceList")
.withPlural("services")
.withSingular("service")
.endNames()
.endSpec()
.withStatus(new CustomResourceDefinitionStatusBuilder().build())
.build();
private KnativeMetaDataSupport() {
}
public static List<String> listChannels() {
return listResources(KNATIVE_CHANNEL_CRD);
}
public static List<String> listServices() {
return listResources(KNATIVE_SERVICE_CRD);
}
private static List<String> listResources(CustomResourceDefinition crd) {
try (OpenShiftClient client = new DefaultOpenShiftClient()) {
return client.customResources(crd, KnativeResource.class, KnativeResourceList.class, KnativeResourceDoneable.class)
.inNamespace(getTargetNamespace())
.list()
.getItems()
.stream()
.map(KnativeResource::getMetadata)
.map(ObjectMeta::getName)
.collect(Collectors.toList());
}
}
private static String getTargetNamespace() {
return Optional.ofNullable(System.getenv("NAMESPACE")).orElse("");
}
}
| [
"\"NAMESPACE\""
]
| []
| [
"NAMESPACE"
]
| [] | ["NAMESPACE"] | java | 1 | 0 | |
main.go | package main
import (
"context"
"log"
"os"
"github.com/johanneswuerbach/jaeger-dynamodb/plugin"
pConfig "github.com/johanneswuerbach/jaeger-dynamodb/plugin/config"
"github.com/johanneswuerbach/jaeger-dynamodb/setup"
"github.com/ory/viper"
"github.com/spf13/pflag"
hclog "github.com/hashicorp/go-hclog"
"github.com/jaegertracing/jaeger/plugin/storage/grpc"
"github.com/jaegertracing/jaeger/plugin/storage/grpc/shared"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/credentials"
"github.com/aws/aws-sdk-go-v2/service/dynamodb"
)
const (
loggerName = "jaeger-dynamodb"
spansTable = "jaeger.spans"
servicesTable = "jaeger.services"
operationsTable = "jaeger.operations"
dependenciesTable = "jaeger.dependencies"
)
func main() {
logLevel := os.Getenv("GRPC_STORAGE_PLUGIN_LOG_LEVEL")
if logLevel == "" {
logLevel = hclog.Warn.String()
}
logger := hclog.New(&hclog.LoggerOptions{
Level: hclog.LevelFromString(logLevel),
Name: loggerName,
JSONFormat: true,
})
var configPath string
pflag.StringVar(&configPath, "config", "", "A path to the dynamodb plugin's configuration file")
pflag.Bool("create-tables", false, "(Re)create dynamodb table")
pflag.Bool("only-create-tables", false, "Exit after creating dynamodb tables")
pflag.Parse()
if err := viper.BindPFlags(pflag.CommandLine); err != nil {
log.Fatalf("unable bind flags, %v", err)
}
if configPath != "" {
viper.SetConfigFile(configPath)
if err := viper.ReadInConfig(); err != nil {
log.Fatalf("error reading config file, %v", err)
}
}
var configuration pConfig.Configuration
err := viper.Unmarshal(&configuration)
if err != nil {
log.Fatalf("unable to decode into struct, %v", err)
}
logger.Debug("plugin starting ...", configuration)
ctx := context.TODO()
cfg, err := config.LoadDefaultConfig(ctx, func(lo *config.LoadOptions) error {
if configuration.DynamoDB.Endpoint != "" {
lo.Credentials = credentials.NewStaticCredentialsProvider("TEST_ONLY", "TEST_ONLY", "TEST_ONLY")
lo.Region = "us-east-1"
lo.EndpointResolver = aws.EndpointResolverFunc(
func(service, region string) (aws.Endpoint, error) {
return aws.Endpoint{URL: configuration.DynamoDB.Endpoint, Source: aws.EndpointSourceCustom}, nil
})
}
return nil
})
if err != nil {
log.Fatalf("unable to load SDK config, %v", err)
}
svc := dynamodb.NewFromConfig(cfg)
logger.Debug("plugin configured")
if viper.GetBool("create-tables") || configuration.DynamoDB.RecreateTables {
if err := setup.PollUntilReady(ctx, svc); err != nil {
log.Fatalf("unable to poll until ready, %v", err)
}
logger.Debug("Creating tables.")
if err := setup.RecreateSpanStoreTables(ctx, svc, &setup.SetupSpanOptions{
SpansTable: spansTable,
ServicesTable: servicesTable,
OperationsTable: operationsTable,
}); err != nil {
log.Fatalf("unable to create tables, %v", err)
}
if err := setup.RecreateDependencyStoreTables(ctx, svc, &setup.SetupDependencyOptions{
DependenciesTable: dependenciesTable,
}); err != nil {
log.Fatalf("unable to create tables, %v", err)
}
}
if viper.GetBool("only-create-tables") {
logger.Debug("Exiting after tables created.")
return
}
dynamodbPlugin, err := plugin.NewDynamoDBPlugin(logger, svc, spansTable, servicesTable, operationsTable, dependenciesTable)
if err != nil {
log.Fatalf("unable to create plugin, %v", err)
}
logger.Debug("plugin created")
grpc.Serve(&shared.PluginServices{
Store: dynamodbPlugin,
ArchiveStore: dynamodbPlugin,
})
}
| [
"\"GRPC_STORAGE_PLUGIN_LOG_LEVEL\""
]
| []
| [
"GRPC_STORAGE_PLUGIN_LOG_LEVEL"
]
| [] | ["GRPC_STORAGE_PLUGIN_LOG_LEVEL"] | go | 1 | 0 | |
test/functional/test_framework/test_node.py | #!/usr/bin/env python3
# Copyright (c) 2017-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Class for falconivityd node under test"""
import contextlib
import decimal
import errno
from enum import Enum
import http.client
import json
import logging
import os
import re
import subprocess
import tempfile
import time
import urllib.parse
import collections
import shlex
import sys
from .authproxy import JSONRPCException
from .descriptors import descsum_create
from .messages import MY_SUBVERSION
from .util import (
MAX_NODES,
append_config,
delete_cookie_file,
get_auth_cookie,
get_rpc_proxy,
rpc_url,
wait_until_helper,
p2p_port,
EncodeDecimal,
)
BITCOIND_PROC_WAIT_TIMEOUT = 60
class FailedToStartError(Exception):
"""Raised when a node fails to start correctly."""
class ErrorMatch(Enum):
FULL_TEXT = 1
FULL_REGEX = 2
PARTIAL_REGEX = 3
class TestNode():
"""A class for representing a falconivityd node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
- one or more P2P connections to the node
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
def __init__(self, i, datadir, *, chain, rpchost, timewait, timeout_factor, bitcoind, bitcoin_cli, coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, start_perf=False, use_valgrind=False, version=None, descriptors=False):
"""
Kwargs:
start_perf (bool): If True, begin profiling the node with `perf` as soon as
the node starts.
"""
self.index = i
self.datadir = datadir
self.bitcoinconf = os.path.join(self.datadir, "falconivity.conf")
self.stdout_dir = os.path.join(self.datadir, "stdout")
self.stderr_dir = os.path.join(self.datadir, "stderr")
self.chain = chain
self.rpchost = rpchost
self.rpc_timeout = timewait
self.binary = bitcoind
self.coverage_dir = coverage_dir
self.cwd = cwd
self.descriptors = descriptors
if extra_conf is not None:
append_config(datadir, extra_conf)
# Most callers will just need to add extra args to the standard list below.
# For those callers that need more flexibility, they can just set the args property directly.
# Note that common args are set in the config file (see initialize_datadir)
self.extra_args = extra_args
self.version = version
# Configuration for logging is set as command-line args rather than in the bitcoin.conf file.
# This means that starting a bitcoind using the temp dir to debug a failed test won't
# spam debug.log.
self.args = [
self.binary,
"-datadir=" + self.datadir,
"-logtimemicros",
"-debug",
"-debugexclude=libevent",
"-debugexclude=leveldb",
"-uacomment=testnode%d" % i,
]
if use_valgrind:
default_suppressions_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"..", "..", "..", "contrib", "valgrind.supp")
suppressions_file = os.getenv("VALGRIND_SUPPRESSIONS_FILE",
default_suppressions_file)
self.args = ["valgrind", "--suppressions={}".format(suppressions_file),
"--gen-suppressions=all", "--exit-on-first-error=yes",
"--error-exitcode=1", "--quiet"] + self.args
if self.version_is_at_least(190000):
self.args.append("-logthreadnames")
self.cli = TestNodeCLI(bitcoin_cli, self.datadir)
self.use_cli = use_cli
self.start_perf = start_perf
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
self.cleanup_on_exit = True # Whether to kill the node when this object goes away
# Cache perf subprocesses here by their data output filename.
self.perf_subprocesses = {}
self.p2ps = []
self.timeout_factor = timeout_factor
AddressKeyPair = collections.namedtuple('AddressKeyPair', ['address', 'key'])
PRIV_KEYS = [
# address , privkey
AddressKeyPair('mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', 'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'),
AddressKeyPair('msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', 'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'),
AddressKeyPair('mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP', 'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'),
AddressKeyPair('mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR', 'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'),
AddressKeyPair('msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws', 'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'),
AddressKeyPair('n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi', 'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'),
AddressKeyPair('myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', 'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'),
AddressKeyPair('mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', 'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'),
AddressKeyPair('mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', 'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'),
AddressKeyPair('mq4fBNdckGtvY2mijd9am7DRsbRB4KjUkf', 'cN55daf1HotwBAgAKWVgDcoppmUNDtQSfb7XLutTLeAgVc3u8hik'),
AddressKeyPair('mpFAHDjX7KregM3rVotdXzQmkbwtbQEnZ6', 'cT7qK7g1wkYEMvKowd2ZrX1E5f6JQ7TM246UfqbCiyF7kZhorpX3'),
AddressKeyPair('mzRe8QZMfGi58KyWCse2exxEFry2sfF2Y7', 'cPiRWE8KMjTRxH1MWkPerhfoHFn5iHPWVK5aPqjW8NxmdwenFinJ'),
]
def get_deterministic_priv_key(self):
"""Return a deterministic priv key in base58, that only depends on the node's index"""
assert len(self.PRIV_KEYS) == MAX_NODES
return self.PRIV_KEYS[self.index]
def _node_msg(self, msg: str) -> str:
"""Return a modified msg that identifies this node by its index as a debugging aid."""
return "[node %d] %s" % (self.index, msg)
def _raise_assertion_error(self, msg: str):
"""Raise an AssertionError with msg modified to identify this node."""
raise AssertionError(self._node_msg(msg))
def __del__(self):
# Ensure that we don't leave any bitcoind processes lying around after
# the test ends
if self.process and self.cleanup_on_exit:
# Should only happen on test failure
# Avoid using logger, as that may have already been shutdown when
# this destructor is called.
print(self._node_msg("Cleaning up leftover process"))
self.process.kill()
def __getattr__(self, name):
"""Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
if self.use_cli:
return getattr(RPCOverloadWrapper(self.cli, True, self.descriptors), name)
else:
assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection")
return getattr(RPCOverloadWrapper(self.rpc, descriptors=self.descriptors), name)
def start(self, extra_args=None, *, cwd=None, stdout=None, stderr=None, **kwargs):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
# Add a new stdout and stderr file each time bitcoind is started
if stderr is None:
stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False)
if stdout is None:
stdout = tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False)
self.stderr = stderr
self.stdout = stdout
if cwd is None:
cwd = self.cwd
# Delete any existing cookie file -- if such a file exists (eg due to
# unclean shutdown), it will get overwritten anyway by bitcoind, and
# potentially interfere with our attempt to authenticate
delete_cookie_file(self.datadir, self.chain)
# add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal
subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1")
self.process = subprocess.Popen(self.args + extra_args, env=subp_env, stdout=stdout, stderr=stderr, cwd=cwd, **kwargs)
self.running = True
self.log.debug("falconivityd started, waiting for RPC to come up")
if self.start_perf:
self._start_perf()
def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the falconivityd process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
for _ in range(poll_per_s * self.rpc_timeout):
if self.process.poll() is not None:
raise FailedToStartError(self._node_msg(
'falconivityd exited with status {} during initialization'.format(self.process.returncode)))
try:
rpc = get_rpc_proxy(
rpc_url(self.datadir, self.index, self.chain, self.rpchost),
self.index,
timeout=self.rpc_timeout // 2, # Shorter timeout to allow for one retry in case of ETIMEDOUT
coveragedir=self.coverage_dir,
)
rpc.getblockcount()
# If the call to getblockcount() succeeds then the RPC connection is up
if self.version_is_at_least(190000):
# getmempoolinfo.loaded is available since commit
# bb8ae2c (version 0.19.0)
wait_until_helper(lambda: rpc.getmempoolinfo()['loaded'], timeout_factor=self.timeout_factor)
# Wait for the node to finish reindex, block import, and
# loading the mempool. Usually importing happens fast or
# even "immediate" when the node is started. However, there
# is no guarantee and sometimes ThreadImport might finish
# later. This is going to cause intermittent test failures,
# because generally the tests assume the node is fully
# ready after being started.
#
# For example, the node will reject block messages from p2p
# when it is still importing with the error "Unexpected
# block message received"
#
# The wait is done here to make tests as robust as possible
# and prevent racy tests and intermittent failures as much
# as possible. Some tests might not need this, but the
# overhead is trivial, and the added guarantees are worth
# the minimal performance cost.
self.log.debug("RPC successfully started")
if self.use_cli:
return
self.rpc = rpc
self.rpc_connected = True
self.url = self.rpc.url
return
except JSONRPCException as e: # Initialization phase
# -28 RPC in warmup
# -342 Service unavailable, RPC server started but is shutting down due to error
if e.error['code'] != -28 and e.error['code'] != -342:
raise # unknown JSON RPC exception
except ConnectionResetError:
# This might happen when the RPC server is in warmup, but shut down before the call to getblockcount
# succeeds. Try again to properly raise the FailedToStartError
pass
except OSError as e:
if e.errno == errno.ETIMEDOUT:
pass # Treat identical to ConnectionResetError
elif e.errno == errno.ECONNREFUSED:
pass # Port not yet open?
else:
raise # unknown OS error
except ValueError as e: # cookie file not found and no rpcuser or rpcpassword; bitcoind is still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(1.0 / poll_per_s)
self._raise_assertion_error("Unable to connect to falconivityd after {}s".format(self.rpc_timeout))
def wait_for_cookie_credentials(self):
"""Ensures auth cookie credentials can be read, e.g. for testing CLI with -rpcwait before RPC connection is up."""
self.log.debug("Waiting for cookie credentials")
# Poll at a rate of four times per second.
poll_per_s = 4
for _ in range(poll_per_s * self.rpc_timeout):
try:
get_auth_cookie(self.datadir, self.chain)
self.log.debug("Cookie credentials successfully retrieved")
return
except ValueError: # cookie file not found and no rpcuser or rpcpassword; bitcoind is still starting
pass # so we continue polling until RPC credentials are retrieved
time.sleep(1.0 / poll_per_s)
self._raise_assertion_error("Unable to retrieve cookie credentials after {}s".format(self.rpc_timeout))
def generate(self, nblocks, maxtries=1000000):
self.log.debug("TestNode.generate() dispatches `generate` call to `generatetoaddress`")
return self.generatetoaddress(nblocks=nblocks, address=self.get_deterministic_priv_key().address, maxtries=maxtries)
def get_wallet_rpc(self, wallet_name):
if self.use_cli:
return RPCOverloadWrapper(self.cli("-rpcwallet={}".format(wallet_name)), True, self.descriptors)
else:
assert self.rpc_connected and self.rpc, self._node_msg("RPC not connected")
wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name))
return RPCOverloadWrapper(self.rpc / wallet_path, descriptors=self.descriptors)
def version_is_at_least(self, ver):
return self.version is None or self.version >= ver
def stop_node(self, expected_stderr='', wait=0):
"""Stop the node."""
if not self.running:
return
self.log.debug("Stopping node")
try:
# Do not use wait argument when testing older nodes, e.g. in feature_backwards_compatibility.py
if self.version_is_at_least(180000):
self.stop(wait=wait)
else:
self.stop()
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
# If there are any running perf processes, stop them.
for profile_name in tuple(self.perf_subprocesses.keys()):
self._stop_perf(profile_name)
# Check that stderr is as expected
self.stderr.seek(0)
stderr = self.stderr.read().decode('utf-8').strip()
if stderr != expected_stderr:
raise AssertionError("Unexpected stderr {} != {}".format(stderr, expected_stderr))
self.stdout.close()
self.stderr.close()
del self.p2ps[:]
def is_node_stopped(self):
"""Checks whether the node has stopped.
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
if not self.running:
return True
return_code = self.process.poll()
if return_code is None:
return False
# process has stopped. Assert that it didn't return an error code.
assert return_code == 0, self._node_msg(
"Node returned non-zero exit code (%d) when stopping" % return_code)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.log.debug("Node stopped")
return True
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
wait_until_helper(self.is_node_stopped, timeout=timeout, timeout_factor=self.timeout_factor)
@contextlib.contextmanager
def assert_debug_log(self, expected_msgs, unexpected_msgs=None, timeout=2):
if unexpected_msgs is None:
unexpected_msgs = []
time_end = time.time() + timeout * self.timeout_factor
debug_log = os.path.join(self.datadir, self.chain, 'debug.log')
with open(debug_log, encoding='utf-8') as dl:
dl.seek(0, 2)
prev_size = dl.tell()
yield
while True:
found = True
with open(debug_log, encoding='utf-8') as dl:
dl.seek(prev_size)
log = dl.read()
print_log = " - " + "\n - ".join(log.splitlines())
for unexpected_msg in unexpected_msgs:
if re.search(re.escape(unexpected_msg), log, flags=re.MULTILINE):
self._raise_assertion_error('Unexpected message "{}" partially matches log:\n\n{}\n\n'.format(unexpected_msg, print_log))
for expected_msg in expected_msgs:
if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None:
found = False
if found:
return
if time.time() >= time_end:
break
time.sleep(0.05)
self._raise_assertion_error('Expected messages "{}" does not partially match log:\n\n{}\n\n'.format(str(expected_msgs), print_log))
@contextlib.contextmanager
def profile_with_perf(self, profile_name):
"""
Context manager that allows easy profiling of node activity using `perf`.
See `test/functional/README.md` for details on perf usage.
Args:
profile_name (str): This string will be appended to the
profile data filename generated by perf.
"""
subp = self._start_perf(profile_name)
yield
if subp:
self._stop_perf(profile_name)
def _start_perf(self, profile_name=None):
"""Start a perf process to profile this node.
Returns the subprocess running perf."""
subp = None
def test_success(cmd):
return subprocess.call(
# shell=True required for pipe use below
cmd, shell=True,
stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) == 0
if not sys.platform.startswith('linux'):
self.log.warning("Can't profile with perf; only available on Linux platforms")
return None
if not test_success('which perf'):
self.log.warning("Can't profile with perf; must install perf-tools")
return None
if not test_success('readelf -S {} | grep .debug_str'.format(shlex.quote(self.binary))):
self.log.warning(
"perf output won't be very useful without debug symbols compiled into falconivityd")
output_path = tempfile.NamedTemporaryFile(
dir=self.datadir,
prefix="{}.perf.data.".format(profile_name or 'test'),
delete=False,
).name
cmd = [
'perf', 'record',
'-g', # Record the callgraph.
'--call-graph', 'dwarf', # Compatibility for gcc's --fomit-frame-pointer.
'-F', '101', # Sampling frequency in Hz.
'-p', str(self.process.pid),
'-o', output_path,
]
subp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.perf_subprocesses[profile_name] = subp
return subp
def _stop_perf(self, profile_name):
"""Stop (and pop) a perf subprocess."""
subp = self.perf_subprocesses.pop(profile_name)
output_path = subp.args[subp.args.index('-o') + 1]
subp.terminate()
subp.wait(timeout=10)
stderr = subp.stderr.read().decode()
if 'Consider tweaking /proc/sys/kernel/perf_event_paranoid' in stderr:
self.log.warning(
"perf couldn't collect data! Try "
"'sudo sysctl -w kernel.perf_event_paranoid=-1'")
else:
report_cmd = "perf report -i {}".format(output_path)
self.log.info("See perf output by running '{}'".format(report_cmd))
def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs):
"""Attempt to start the node and expect it to raise an error.
extra_args: extra arguments to pass through to falconivityd
expected_msg: regex that stderr should match when falconivityd fails
Will throw if falconivityd starts without an error.
Will throw if an expected_msg is provided and it does not match falconivityd's stdout."""
with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \
tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout:
try:
self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs)
self.wait_for_rpc_connection()
self.stop_node()
self.wait_until_stopped()
except FailedToStartError as e:
self.log.debug('falconivityd failed to start: %s', e)
self.running = False
self.process = None
# Check stderr for expected message
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8').strip()
if match == ErrorMatch.PARTIAL_REGEX:
if re.search(expected_msg, stderr, flags=re.MULTILINE) is None:
self._raise_assertion_error(
'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_REGEX:
if re.fullmatch(expected_msg, stderr) is None:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_TEXT:
if expected_msg != stderr:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
else:
if expected_msg is None:
assert_msg = "falconivityd should have exited with an error"
else:
assert_msg = "falconivityd should have exited with expected error " + expected_msg
self._raise_assertion_error(assert_msg)
def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs):
"""Add a p2p connection to the node.
This method adds the p2p connection to the self.p2ps list and also
returns the connection to the caller."""
if 'dstport' not in kwargs:
kwargs['dstport'] = p2p_port(self.index)
if 'dstaddr' not in kwargs:
kwargs['dstaddr'] = '127.0.0.1'
p2p_conn.peer_connect(**kwargs, net=self.chain, timeout_factor=self.timeout_factor)()
self.p2ps.append(p2p_conn)
p2p_conn.wait_until(lambda: p2p_conn.is_connected, check_connected=False)
if wait_for_verack:
# Wait for the node to send us the version and verack
p2p_conn.wait_for_verack()
# At this point we have sent our version message and received the version and verack, however the full node
# has not yet received the verack from us (in reply to their version). So, the connection is not yet fully
# established (fSuccessfullyConnected).
#
# This shouldn't lead to any issues when sending messages, since the verack will be in-flight before the
# message we send. However, it might lead to races where we are expecting to receive a message. E.g. a
# transaction that will be added to the mempool as soon as we return here.
#
# So syncing here is redundant when we only want to send a message, but the cost is low (a few milliseconds)
# in comparison to the upside of making tests less fragile and unexpected intermittent errors less likely.
p2p_conn.sync_with_ping()
return p2p_conn
def num_test_p2p_connections(self):
"""Return number of test framework p2p connections to the node."""
return len([peer for peer in self.getpeerinfo() if peer['subver'] == MY_SUBVERSION])
def disconnect_p2ps(self):
"""Close all p2p connections to the node."""
for p in self.p2ps:
p.peer_disconnect()
del self.p2ps[:]
wait_until_helper(lambda: self.num_test_p2p_connections() == 0, timeout_factor=self.timeout_factor)
class TestNodeCLIAttr:
def __init__(self, cli, command):
self.cli = cli
self.command = command
def __call__(self, *args, **kwargs):
return self.cli.send_cli(self.command, *args, **kwargs)
def get_request(self, *args, **kwargs):
return lambda: self(*args, **kwargs)
def arg_to_cli(arg):
if isinstance(arg, bool):
return str(arg).lower()
elif arg is None:
return 'null'
elif isinstance(arg, dict) or isinstance(arg, list):
return json.dumps(arg, default=EncodeDecimal)
else:
return str(arg)
class TestNodeCLI():
"""Interface to falconivity-cli for an individual node"""
def __init__(self, binary, datadir):
self.options = []
self.binary = binary
self.datadir = datadir
self.input = None
self.log = logging.getLogger('TestFramework.bitcoincli')
def __call__(self, *options, input=None):
# TestNodeCLI is callable with bitcoin-cli command-line options
cli = TestNodeCLI(self.binary, self.datadir)
cli.options = [str(o) for o in options]
cli.input = input
return cli
def __getattr__(self, command):
return TestNodeCLIAttr(self, command)
def batch(self, requests):
results = []
for request in requests:
try:
results.append(dict(result=request()))
except JSONRPCException as e:
results.append(dict(error=e))
return results
def send_cli(self, command=None, *args, **kwargs):
"""Run falconivity-cli command. Deserializes returned string as python object."""
pos_args = [arg_to_cli(arg) for arg in args]
named_args = [str(key) + "=" + arg_to_cli(value) for (key, value) in kwargs.items()]
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same falconivity-cli call"
p_args = [self.binary, "-datadir=" + self.datadir] + self.options
if named_args:
p_args += ["-named"]
if command is not None:
p_args += [command]
p_args += pos_args + named_args
self.log.debug("Running falconivity-cli {}".format(p_args[2:]))
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cli_stdout, cli_stderr = process.communicate(input=self.input)
returncode = process.poll()
if returncode:
match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)
if match:
code, message = match.groups()
raise JSONRPCException(dict(code=int(code), message=message))
# Ignore cli_stdout, raise with cli_stderr
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
try:
return json.loads(cli_stdout, parse_float=decimal.Decimal)
except (json.JSONDecodeError, decimal.InvalidOperation):
return cli_stdout.rstrip("\n")
class RPCOverloadWrapper():
def __init__(self, rpc, cli=False, descriptors=False):
self.rpc = rpc
self.is_cli = cli
self.descriptors = descriptors
def __getattr__(self, name):
return getattr(self.rpc, name)
def createwallet(self, wallet_name, disable_private_keys=None, blank=None, passphrase='', avoid_reuse=None, descriptors=None, load_on_startup=None):
if descriptors is None:
descriptors = self.descriptors
return self.__getattr__('createwallet')(wallet_name, disable_private_keys, blank, passphrase, avoid_reuse, descriptors, load_on_startup)
def importprivkey(self, privkey, label=None, rescan=None):
wallet_info = self.getwalletinfo()
if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']):
return self.__getattr__('importprivkey')(privkey, label, rescan)
desc = descsum_create('combo(' + privkey + ')')
req = [{
'desc': desc,
'timestamp': 0 if rescan else 'now',
'label': label if label else ''
}]
import_res = self.importdescriptors(req)
if not import_res[0]['success']:
raise JSONRPCException(import_res[0]['error'])
def addmultisigaddress(self, nrequired, keys, label=None, address_type=None):
wallet_info = self.getwalletinfo()
if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']):
return self.__getattr__('addmultisigaddress')(nrequired, keys, label, address_type)
cms = self.createmultisig(nrequired, keys, address_type)
req = [{
'desc': cms['descriptor'],
'timestamp': 0,
'label': label if label else ''
}]
import_res = self.importdescriptors(req)
if not import_res[0]['success']:
raise JSONRPCException(import_res[0]['error'])
return cms
def importpubkey(self, pubkey, label=None, rescan=None):
wallet_info = self.getwalletinfo()
if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']):
return self.__getattr__('importpubkey')(pubkey, label, rescan)
desc = descsum_create('combo(' + pubkey + ')')
req = [{
'desc': desc,
'timestamp': 0 if rescan else 'now',
'label': label if label else ''
}]
import_res = self.importdescriptors(req)
if not import_res[0]['success']:
raise JSONRPCException(import_res[0]['error'])
def importaddress(self, address, label=None, rescan=None, p2sh=None):
wallet_info = self.getwalletinfo()
if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']):
return self.__getattr__('importaddress')(address, label, rescan, p2sh)
is_hex = False
try:
int(address ,16)
is_hex = True
desc = descsum_create('raw(' + address + ')')
except:
desc = descsum_create('addr(' + address + ')')
reqs = [{
'desc': desc,
'timestamp': 0 if rescan else 'now',
'label': label if label else ''
}]
if is_hex and p2sh:
reqs.append({
'desc': descsum_create('p2sh(raw(' + address + '))'),
'timestamp': 0 if rescan else 'now',
'label': label if label else ''
})
import_res = self.importdescriptors(reqs)
for res in import_res:
if not res['success']:
raise JSONRPCException(res['error'])
| []
| []
| [
"VALGRIND_SUPPRESSIONS_FILE"
]
| [] | ["VALGRIND_SUPPRESSIONS_FILE"] | python | 1 | 0 | |
net/dns/direct.go | // Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package dns
import (
"bufio"
"bytes"
"crypto/rand"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"runtime"
"strings"
"inet.af/netaddr"
"tailscale.com/types/logger"
"tailscale.com/util/dnsname"
)
const (
backupConf = "/etc/resolv.pre-tailscale-backup.conf"
resolvConf = "/etc/resolv.conf"
)
// writeResolvConf writes DNS configuration in resolv.conf format to the given writer.
func writeResolvConf(w io.Writer, servers []netaddr.IP, domains []dnsname.FQDN) {
io.WriteString(w, "# resolv.conf(5) file generated by tailscale\n")
io.WriteString(w, "# DO NOT EDIT THIS FILE BY HAND -- CHANGES WILL BE OVERWRITTEN\n\n")
for _, ns := range servers {
io.WriteString(w, "nameserver ")
io.WriteString(w, ns.String())
io.WriteString(w, "\n")
}
if len(domains) > 0 {
io.WriteString(w, "search")
for _, domain := range domains {
io.WriteString(w, " ")
io.WriteString(w, domain.WithoutTrailingDot())
}
io.WriteString(w, "\n")
}
}
func readResolv(r io.Reader) (config OSConfig, err error) {
scanner := bufio.NewScanner(r)
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
i := strings.IndexByte(line, '#')
if i >= 0 {
line = line[:i]
}
if strings.HasPrefix(line, "nameserver") {
s := strings.TrimPrefix(line, "nameserver")
nameserver := strings.TrimSpace(s)
if len(nameserver) == len(s) {
return OSConfig{}, fmt.Errorf("missing space after \"nameserver\" in %q", line)
}
ip, err := netaddr.ParseIP(nameserver)
if err != nil {
return OSConfig{}, err
}
config.Nameservers = append(config.Nameservers, ip)
continue
}
if strings.HasPrefix(line, "search") {
s := strings.TrimPrefix(line, "search")
domain := strings.TrimSpace(s)
if len(domain) == len(s) {
// No leading space?!
return OSConfig{}, fmt.Errorf("missing space after \"domain\" in %q", line)
}
fqdn, err := dnsname.ToFQDN(domain)
if err != nil {
return OSConfig{}, fmt.Errorf("parsing search domains %q: %w", line, err)
}
config.SearchDomains = append(config.SearchDomains, fqdn)
continue
}
}
return config, nil
}
// resolvOwner returns the apparent owner of the resolv.conf
// configuration in bs - one of "resolvconf", "systemd-resolved" or
// "NetworkManager", or "" if no known owner was found.
func resolvOwner(bs []byte) string {
likely := ""
b := bytes.NewBuffer(bs)
for {
line, err := b.ReadString('\n')
if err != nil {
return likely
}
line = strings.TrimSpace(line)
if line == "" {
continue
}
if line[0] != '#' {
// First non-empty, non-comment line. Assume the owner
// isn't hiding further down.
return likely
}
if strings.Contains(line, "systemd-resolved") {
likely = "systemd-resolved"
} else if strings.Contains(line, "NetworkManager") {
likely = "NetworkManager"
} else if strings.Contains(line, "resolvconf") {
likely = "resolvconf"
}
}
}
// isResolvedRunning reports whether systemd-resolved is running on the system,
// even if it is not managing the system DNS settings.
func isResolvedRunning() bool {
if runtime.GOOS != "linux" {
return false
}
// systemd-resolved is never installed without systemd.
_, err := exec.LookPath("systemctl")
if err != nil {
return false
}
// is-active exits with code 3 if the service is not active.
err = exec.Command("systemctl", "is-active", "systemd-resolved.service").Run()
return err == nil
}
// directManager is an OSConfigurator which replaces /etc/resolv.conf with a file
// generated from the given configuration, creating a backup of its old state.
//
// This way of configuring DNS is precarious, since it does not react
// to the disappearance of the Tailscale interface.
// The caller must call Down before program shutdown
// or as cleanup if the program terminates unexpectedly.
type directManager struct {
logf logger.Logf
fs wholeFileFS
// renameBroken is set if fs.Rename to or from /etc/resolv.conf
// fails. This can happen in some container runtimes, where
// /etc/resolv.conf is bind-mounted from outside the container,
// and therefore /etc and /etc/resolv.conf are different
// filesystems as far as rename(2) is concerned.
//
// In those situations, we fall back to emulating rename with file
// copies and truncations, which is not as good (opens up a race
// where a reader can see an empty or partial /etc/resolv.conf),
// but is better than having non-functioning DNS.
renameBroken bool
}
func newDirectManager(logf logger.Logf) *directManager {
return &directManager{
logf: logf,
fs: directFS{},
}
}
func newDirectManagerOnFS(logf logger.Logf, fs wholeFileFS) *directManager {
return &directManager{
logf: logf,
fs: fs,
}
}
func (m *directManager) readResolvFile(path string) (OSConfig, error) {
b, err := m.fs.ReadFile(path)
if err != nil {
return OSConfig{}, err
}
return readResolv(bytes.NewReader(b))
}
// ownedByTailscale reports whether /etc/resolv.conf seems to be a
// tailscale-managed file.
func (m *directManager) ownedByTailscale() (bool, error) {
isRegular, err := m.fs.Stat(resolvConf)
if err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
if !isRegular {
return false, nil
}
bs, err := m.fs.ReadFile(resolvConf)
if err != nil {
return false, err
}
if bytes.Contains(bs, []byte("generated by tailscale")) {
return true, nil
}
return false, nil
}
// backupConfig creates or updates a backup of /etc/resolv.conf, if
// resolv.conf does not currently contain a Tailscale-managed config.
func (m *directManager) backupConfig() error {
if _, err := m.fs.Stat(resolvConf); err != nil {
if os.IsNotExist(err) {
// No resolv.conf, nothing to back up. Also get rid of any
// existing backup file, to avoid restoring something old.
m.fs.Remove(backupConf)
return nil
}
return err
}
owned, err := m.ownedByTailscale()
if err != nil {
return err
}
if owned {
return nil
}
return m.rename(resolvConf, backupConf)
}
func (m *directManager) restoreBackup() (restored bool, err error) {
if _, err := m.fs.Stat(backupConf); err != nil {
if os.IsNotExist(err) {
// No backup, nothing we can do.
return false, nil
}
return false, err
}
owned, err := m.ownedByTailscale()
if err != nil {
return false, err
}
_, err = m.fs.Stat(resolvConf)
if err != nil && !os.IsNotExist(err) {
return false, err
}
resolvConfExists := !os.IsNotExist(err)
if resolvConfExists && !owned {
// There's already a non-tailscale config in place, get rid of
// our backup.
m.fs.Remove(backupConf)
return false, nil
}
// We own resolv.conf, and a backup exists.
if err := m.rename(backupConf, resolvConf); err != nil {
return false, err
}
return true, nil
}
// rename tries to rename old to new using m.fs.Rename, and falls back
// to hand-copying bytes and truncating old if that fails.
//
// This is a workaround to /etc/resolv.conf being a bind-mounted file
// some container environments, which cannot be moved elsewhere in
// /etc (because that would be a cross-filesystem move) or deleted
// (because that would break the bind in surprising ways).
func (m *directManager) rename(old, new string) error {
if !m.renameBroken {
err := m.fs.Rename(old, new)
if err == nil {
return nil
}
m.logf("rename of %q to %q failed (%v), falling back to copy+delete", old, new, err)
m.renameBroken = true
}
bs, err := m.fs.ReadFile(old)
if err != nil {
return fmt.Errorf("reading %q to rename: %v", old, err)
}
if err := m.fs.WriteFile(new, bs, 0644); err != nil {
return fmt.Errorf("writing to %q in rename of %q: %v", new, old, err)
}
if err := m.fs.Remove(old); err != nil {
err2 := m.fs.Truncate(old)
if err2 != nil {
return fmt.Errorf("remove of %q failed (%v) and so did truncate: %v", old, err, err2)
}
}
return nil
}
func (m *directManager) SetDNS(config OSConfig) (err error) {
var changed bool
if config.IsZero() {
changed, err = m.restoreBackup()
if err != nil {
return err
}
} else {
changed = true
if err := m.backupConfig(); err != nil {
return err
}
buf := new(bytes.Buffer)
writeResolvConf(buf, config.Nameservers, config.SearchDomains)
if err := m.atomicWriteFile(m.fs, resolvConf, buf.Bytes(), 0644); err != nil {
return err
}
}
// We might have taken over a configuration managed by resolved,
// in which case it will notice this on restart and gracefully
// start using our configuration. This shouldn't happen because we
// try to manage DNS through resolved when it's around, but as a
// best-effort fallback if we messed up the detection, try to
// restart resolved to make the system configuration consistent.
//
// We take care to only kick systemd-resolved if we've made some
// change to the system's DNS configuration, because this codepath
// can end up running in cases where the user has manually
// configured /etc/resolv.conf to point to systemd-resolved (but
// it's not managed explicitly by systemd-resolved), *and* has
// --accept-dns=false, meaning we pass an empty configuration to
// the running DNS manager. In that very edge-case scenario, we
// cause a disruptive DNS outage each time we reset an empty
// OS configuration.
if changed && isResolvedRunning() && !runningAsGUIDesktopUser() {
exec.Command("systemctl", "restart", "systemd-resolved.service").Run()
}
return nil
}
func (m *directManager) SupportsSplitDNS() bool {
return false
}
func (m *directManager) GetBaseConfig() (OSConfig, error) {
owned, err := m.ownedByTailscale()
if err != nil {
return OSConfig{}, err
}
fileToRead := resolvConf
if owned {
fileToRead = backupConf
}
return m.readResolvFile(fileToRead)
}
func (m *directManager) Close() error {
// We used to keep a file for the tailscale config and symlinked
// to it, but then we stopped because /etc/resolv.conf being a
// symlink to surprising places breaks snaps and other sandboxing
// things. Clean it up if it's still there.
m.fs.Remove("/etc/resolv.tailscale.conf")
if _, err := m.fs.Stat(backupConf); err != nil {
if os.IsNotExist(err) {
// No backup, nothing we can do.
return nil
}
return err
}
owned, err := m.ownedByTailscale()
if err != nil {
return err
}
_, err = m.fs.Stat(resolvConf)
if err != nil && !os.IsNotExist(err) {
return err
}
resolvConfExists := !os.IsNotExist(err)
if resolvConfExists && !owned {
// There's already a non-tailscale config in place, get rid of
// our backup.
m.fs.Remove(backupConf)
return nil
}
// We own resolv.conf, and a backup exists.
if err := m.rename(backupConf, resolvConf); err != nil {
return err
}
if isResolvedRunning() && !runningAsGUIDesktopUser() {
exec.Command("systemctl", "restart", "systemd-resolved.service").Run() // Best-effort.
}
return nil
}
func (m *directManager) atomicWriteFile(fs wholeFileFS, filename string, data []byte, perm os.FileMode) error {
var randBytes [12]byte
if _, err := rand.Read(randBytes[:]); err != nil {
return fmt.Errorf("atomicWriteFile: %w", err)
}
tmpName := fmt.Sprintf("%s.%x.tmp", filename, randBytes[:])
defer fs.Remove(tmpName)
if err := fs.WriteFile(tmpName, data, perm); err != nil {
return fmt.Errorf("atomicWriteFile: %w", err)
}
return m.rename(tmpName, filename)
}
// wholeFileFS is a high-level file system abstraction designed just for use
// by directManager, with the goal that it is easy to implement over wsl.exe.
//
// All name parameters are absolute paths.
type wholeFileFS interface {
Stat(name string) (isRegular bool, err error)
Rename(oldName, newName string) error
Remove(name string) error
ReadFile(name string) ([]byte, error)
Truncate(name string) error
WriteFile(name string, contents []byte, perm os.FileMode) error
}
// directFS is a wholeFileFS implemented directly on the OS.
type directFS struct {
// prefix is file path prefix.
//
// All name parameters are absolute paths so this is typically a
// testing temporary directory like "/tmp".
prefix string
}
func (fs directFS) path(name string) string { return filepath.Join(fs.prefix, name) }
func (fs directFS) Stat(name string) (isRegular bool, err error) {
fi, err := os.Stat(fs.path(name))
if err != nil {
return false, err
}
return fi.Mode().IsRegular(), nil
}
func (fs directFS) Rename(oldName, newName string) error {
return os.Rename(fs.path(oldName), fs.path(newName))
}
func (fs directFS) Remove(name string) error { return os.Remove(fs.path(name)) }
func (fs directFS) ReadFile(name string) ([]byte, error) {
return ioutil.ReadFile(fs.path(name))
}
func (fs directFS) Truncate(name string) error {
return os.Truncate(fs.path(name), 0)
}
func (fs directFS) WriteFile(name string, contents []byte, perm os.FileMode) error {
return ioutil.WriteFile(fs.path(name), contents, perm)
}
// runningAsGUIDesktopUser reports whether it seems that this code is
// being run as a regular user on a Linux desktop. This is a quick
// hack to fix Issue 2672 where PolicyKit pops up a GUI dialog asking
// to proceed we do a best effort attempt to restart
// systemd-resolved.service. There's surely a better way.
func runningAsGUIDesktopUser() bool {
return os.Getuid() != 0 && os.Getenv("DISPLAY") != ""
}
| [
"\"DISPLAY\""
]
| []
| [
"DISPLAY"
]
| [] | ["DISPLAY"] | go | 1 | 0 | |
internal/install/execution/nerdstorage_status_reporter_integration_test.go | // +build integration
package execution
import (
"os"
"strconv"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/newrelic/newrelic-cli/internal/install/types"
"github.com/newrelic/newrelic-client-go/newrelic"
"github.com/newrelic/newrelic-client-go/pkg/config"
"github.com/newrelic/newrelic-client-go/pkg/nerdstorage"
"github.com/newrelic/newrelic-client-go/pkg/workloads"
)
func TestReportRecipeSucceeded_Basic(t *testing.T) {
apiKey := os.Getenv("NEW_RELIC_API_KEY")
accountID := os.Getenv("NEW_RELIC_ACCOUNT_ID")
if apiKey == "" || accountID == "" {
t.Skipf("NEW_RELIC_API_KEY and NEW_RELIC_ACCOUNT_ID are required to run this test")
}
cfg := config.Config{
PersonalAPIKey: apiKey,
}
c, err := newrelic.New(newrelic.ConfigPersonalAPIKey(cfg.PersonalAPIKey))
if err != nil {
t.Fatalf("error creating integration test client")
}
a, err := strconv.Atoi(accountID)
if err != nil {
t.Fatalf("error parsing account ID")
}
entityGUID := createEntity(t, a, c)
r := NewNerdStorageStatusReporter(&c.NerdStorage)
status := NewStatusRollup([]StatusReporter{r})
defer deleteUserStatusCollection(t, c.NerdStorage)
defer deleteEntityStatusCollection(t, entityGUID, c.NerdStorage)
defer deleteEntity(t, entityGUID, c)
rec := types.Recipe{Name: "testName"}
evt := RecipeStatusEvent{
Recipe: rec,
EntityGUID: entityGUID,
}
err = r.ReportRecipeInstalled(status, evt)
require.NoError(t, err)
time.Sleep(1 * time.Second)
s, err := getUserStatusCollection(t, c.NerdStorage)
require.NoError(t, err)
require.NotEmpty(t, s)
s, err = getEntityStatusCollection(t, entityGUID, c.NerdStorage)
require.NoError(t, err)
require.NotEmpty(t, s)
}
func TestReportRecipeSucceeded_UserScopeOnly(t *testing.T) {
apiKey := os.Getenv("NEW_RELIC_API_KEY")
accountID := os.Getenv("NEW_RELIC_ACCOUNT_ID")
if apiKey == "" || accountID == "" {
t.Skipf("NEW_RELIC_API_KEY and NEW_RELIC_ACCOUNT_ID are required to run this test")
}
cfg := config.Config{
PersonalAPIKey: apiKey,
}
c, err := newrelic.New(newrelic.ConfigPersonalAPIKey(cfg.PersonalAPIKey))
if err != nil {
t.Fatalf("error creating integration test client")
}
a, err := strconv.Atoi(accountID)
if err != nil {
t.Fatalf("error parsing account ID")
}
entityGUID := createEntity(t, a, c)
r := NewNerdStorageStatusReporter(&c.NerdStorage)
status := NewStatusRollup([]StatusReporter{r})
defer deleteUserStatusCollection(t, c.NerdStorage)
defer deleteEntityStatusCollection(t, entityGUID, c.NerdStorage)
defer deleteEntity(t, entityGUID, c)
rec := types.Recipe{Name: "testName"}
evt := RecipeStatusEvent{
Recipe: rec,
}
err = r.ReportRecipeInstalled(status, evt)
require.NoError(t, err)
s, err := getUserStatusCollection(t, c.NerdStorage)
require.NoError(t, err)
require.NotEmpty(t, s)
s, err = getEntityStatusCollection(t, entityGUID, c.NerdStorage)
require.NoError(t, err)
require.Empty(t, s)
}
func getUserStatusCollection(t *testing.T, c nerdstorage.NerdStorage) ([]interface{}, error) {
getCollectionInput := nerdstorage.GetCollectionInput{
PackageID: packageID,
Collection: collectionID,
}
return c.GetCollectionWithUserScope(getCollectionInput)
}
func getEntityStatusCollection(t *testing.T, guid string, c nerdstorage.NerdStorage) ([]interface{}, error) {
getCollectionInput := nerdstorage.GetCollectionInput{
PackageID: packageID,
Collection: collectionID,
}
return c.GetCollectionWithEntityScope(guid, getCollectionInput)
}
func deleteUserStatusCollection(t *testing.T, c nerdstorage.NerdStorage) {
di := nerdstorage.DeleteCollectionInput{
Collection: collectionID,
PackageID: packageID,
}
ok, err := c.DeleteCollectionWithUserScope(di)
require.NoError(t, err)
require.True(t, ok)
}
func deleteEntityStatusCollection(t *testing.T, guid string, c nerdstorage.NerdStorage) {
di := nerdstorage.DeleteCollectionInput{
Collection: collectionID,
PackageID: packageID,
}
_, err := c.DeleteCollectionWithEntityScope(guid, di)
require.NoError(t, err)
}
func createEntity(t *testing.T, accountID int, c *newrelic.NewRelic) string {
i := workloads.CreateInput{
Name: "testEntity",
}
e, err := c.Workloads.CreateWorkload(accountID, i)
require.NoError(t, err)
return e.GUID
}
func deleteEntity(t *testing.T, guid string, c *newrelic.NewRelic) {
_, err := c.Workloads.DeleteWorkload(guid)
require.NoError(t, err)
}
| [
"\"NEW_RELIC_API_KEY\"",
"\"NEW_RELIC_ACCOUNT_ID\"",
"\"NEW_RELIC_API_KEY\"",
"\"NEW_RELIC_ACCOUNT_ID\""
]
| []
| [
"NEW_RELIC_ACCOUNT_ID",
"NEW_RELIC_API_KEY"
]
| [] | ["NEW_RELIC_ACCOUNT_ID", "NEW_RELIC_API_KEY"] | go | 2 | 0 | |
trash/project_secret.py | #!/usr/bin/env python3
"""Simple tools for getting/setting project-scoped secrets. Supports arbitrary JSON-compatible secrets.
Makes use of the "keyring" package to actually store/retrieve secrets.
.project-secret.json:
{
"version": "1.0",
"keyring_backend_package": "keyring.backends.SecretService.Keyring",
"namespace_template": "project-secret-${project_dir_hash}",
"project_secret_root_dir": ".",
}
A key namespace for this project ensures that secrets in this project do not collide with secrets from other projects:
The key namespace is determoined from (in order of preference):
1. If namespace is provided as a parameter to api calls or command line, it is used
2. If environment variable PROJECT_SECRET_NAMESPACE is defined and not empty, it is used
3. If the project root directory can be determined (see below), that directory is used to determine a configuration (see below).
The project root directory name is determined from (in order of preference):
1. If project_dir is provided as a parameter to API calls, it is used.
2. If environment variable PROJECT_SECRET_ROOT_DIR is set, it is used.
3. If a parent directory of the current dorectory or a provided starting directory contains a file '.project-secret.json', that directory is used
3. If the git command is available and "git rev-parse --show-toplevel" succeeds, this is used (based on current working dir)
a SHA1 hash of the project directory name
is used to define a unique namespace.
"""
from argparse import Namespace
from typing import Optional, List, Dict, Tuple, Type
from importlib_metadata import version
from typehints import JSONType
import os
import sys
import keyring
from keyring.backend import KeyringBackend
import keyring.errors
import subprocess
import hashlib
import json
from string import Template
def hash(s: str) -> str:
h = hashlib.sha1(s.encode("utf-8")).hexdigest()
return h
KeyringBackendClass = Type[KeyringBackend]
default_keyring: Optional[KeyringBackend] = None
def set_default_key_ring(key_ring: Optional[KeyringBackend]=None):
global default_key_ring
default_key_ring = key_ring
def hash(s: str) -> str:
h = hashlib.sha1(s.encode("utf-8")).hexdigest()
return h
def set_default_keyring(key_ring: Optional[KeyringBackend]=None):
global default_keyring
default_keyring = key_ring
def get_default_key_ring(key_ring: Optional[KeyringBackend]=None):
if key_ring is None:
key_ring = default_key_ring
if key_ring is None:
key_ring = keyring.get_keyring()
return key_ring
def get_default_keyring_backend_package(key_ring: Optional[KeyringBackend]):
key_ring = get_default_key_ring(key_ring)
klass = key_ring.__class__
module = klass.__module__
return module + '.' + klass.__qualname__
class ProjectSecretError(RuntimeError):
pass
class NoRootDirError(ProjectSecretError):
def __init__(self, starting_dir: Optional[str]=None):
if starting_dir is None:
msg = "A project secret root dir could not be determined. Please set PROJECT_SECRET_ROOT_DIR, or use a directory within the project secret root dir")
else:
msg = f"A project secret root dir could not be found at or above {starting_dir}. Please set PROJECT_SECRET_ROOT_DIR, or use a directory within the project secret root dir")
super().__init__(msg)
class NoNamespaceError(ProjectSecretError):
def __init__(self):
super().__init__("A project secret namespace could not be determined. Please set PROJECT_SECRET_NAMESPACE or PROJECT_SECRET_ROOT_DIR, or use directory within the project secret root dir")
class NoSecretError(ProjectSecretError):
def __init__(self, name: str, namespace: Optional[str]=None):
if namespace is None or namespace == '':
msg = f"The project secret \"{name}\" has not been set"
else:
msg = f"The project secret \"{name}\" has not been set in namespace \"{namespace}\""
super().__init__(msg)
class ProjectSecretConfig:
version: Optional[str] = None
keyring_backend_package: Optional[str] = None
namespace_template: Optional[str] = None
project_dir: Optional[str] = None
is_rel_project_dir: bool = True
config_file_path: Optional[str] = None
key_ring: Optional[KeyringBackend] = None
DEFAULT_VERSION = "1.0"
DEFAULT_FILENAME = ".project-secret.json"
DEFAULT_NAMESPACE_TEMPLATE = "project-secret-%%DIRHASH%%"
def __init__(
self,
keyring_backend_package: Optional[str] = None,
namespace_template: Optional[str] = None,
project_dir: Optional[str] = None,
is_rel_project_dir: bool = True,
config_file_path: Optional[str] = None,
):
self.keyring_backend_package = keyring_backend_package
self.namespace_template = namespace_template
self.is_rel_project_dir = is_rel_project_dir
if not project_dir is None:
project_dir = os.path.abspath(project_dir)
self.project_dir = project_dir
if not config_file_path is None:
config_file_path = os.path.abspath(config_file_path)
self.config_file_path = config_file_path
def clone(self) -> 'ProjectSecretConfig':
result = ProjectSecretConfig()
result.version = self.version
result.keyring_backend_package = self.keyring_backend_package
result.namespace_template = self.namespace_template
result.project_dir = self.project_dir
result.is_rel_project_dir = self.is_rel_project_dir
result.config_file_path = self.config_file_path
result.key_ring = self.key_ring
return result
def save(self, config_file_path: Optional[str] = None):
"""Writes the configuration file out
{
"version": "1.0",
"keyring_backend_package": "keyring.backends.SecretService.Keyring",
"namespace_template": "project-secret-${project_dir_hash}",
"project_dir": ".",
}
Args:
config_file_path (Optional[str], optional): The path to the config file, if not already set on this object. Defaults to None.
Raises:
ValueError: If a config file path could not be determined
"""
if config_file_path is None:
config_file_path = self.config_file_path
if config_file_path is None:
raise ValueError("config_file_path is required")
config_file_path = os.path.abspath(config_file_path)
if os.path.is_dir(config_file_path):
config_dir = config_file_path
config_file_path = os.path.join(config_dir, self.DEFAULT_FILENAME)
else:
config_dir = os.path.dirname(config_file_path)
if not os.path.is_dir(config_dir):
raise RuntimeError(f"config file path {config_file_path}: parent directory does not exist")
version = self.version
if version is None:
version = self.DEFAULT_VERSION
project_dir = self.project_dir
rel_project_dir: Optional[str] = None
is_rel_project_dir = self.is_rel_project_dir
if not project_dir is None:
project_dir = os.path.abspath(project_dir)
if is_rel_project_dir:
rel_project_dir = os.path.relpath(project_dir, config_dir)
else:
rel_project_dir = project_dir
namespace_template = self.namespace_template
if namespace_template is None:
namespace_template = self.DEFAULT_NAMESPACE_TEMPLATE
keyring_backend_package = self.keyring_backend_package
if keyring_backend_package == '':
keyring_backend_package = None
data = dict(version=version)
if not keyring_backend_package is None:
data.update(keyring_backend_package)
if not namespace_template is None:
data.update(namespace_template=namespace_template)
if not rel_project_dir is None and rel_project_dir != '.' and rel_project_dir != config_dir:
data.update(project_dir=rel_project_dir)
with open(config_file_path, 'w') as f:
print(json.dumps(data, sort_keys=True, intent=2), file=f)
self.version = version
self.keyring_backend_package = keyring_backend_package
self.namespace_template = namespace_template
self.project_dir = project_dir
self.is_rel_project_dir = not os.path.isabs(rel_project_dir)
self.config_file_path = config_file_path
def load(self, starting_dir: Optional[str] = None, config_file_path: Optional[str] = None) -> 'ProjectSecretConfig':
if not config_file_path is None:
config_file_path = os.path.abspath(config_file_path)
if os.path.isdir(config_file_path):
config_file_path = os.path.join(config_file_path, self.DEFAULT_FILENAME)
else:
if starting_dir is None:
starting_dir = '.'
starting_dir = os.path.abspath(starting_dir)
cdir = starting_dir
if os.path.isdir(cdir):
while True:
cpath = os.path.join(cdir, self.DEFAULT_FILENAME)
if os.path.exists(cpath):
config_file_path = cpath
break
ndir = os.path.dirname(cdir)
if ndir == cdir:
break
cdir = ndir
if config_file_path is None:
raise NoSecretNamespaceError(f"Could not find {self.DEFAULT_FILENAME} in parent directory chain of {starting_dir}")
with open(config_file_path) as f:
json_text = f.read()
data = json.loads(json_text)
config_dir = os.path.dirname(config_file_path)
version = data.get('version', None)
if version is None:
raise RuntimeError("Invalid project-secrets config file--no version")
keyring_backend_package = data.get('keyring_backend_package', None)
namespace_template = data.get("namespace_template", None)
rel_project_dir = data.get("project_dir", None)
if rel_project_dir is None:
is_rel_project_dir = True
project_dir = config_dir
else:
is_rel_project_dir = not os.path.isabs(project_dir)
project_dir = os.path.abspath(os.path.join(config_dir, rel_project_dir))
self.version = version
self.keyring_backend_package = keyring_backend_package
self.namespace_template = namespace_template
self.project_dir = project_dir
self.is_rel_project_dir = is_rel_project_dir
self.config_file_path = config_file_path
return self
@property
def project_dir_hash(self) -> Optional[str]:
project_dir = self.project_dir
if project_dir is None:
return None
result = hash(os.path.abspath(project_dir))
return result
@property
def namespace(self) -> Optional[str]:
project_dir_hash = self.project_dir_hash
if project_dir_hash is None:
return None
namespace_template = self.namespace_template
if namespace_template is None:
namespace_template = self.DEFAULT_NAMESPACE_TEMPLATE
tmpl = Template(namespace_template)
result = tmpl.safe_substitute(dict(project_dir_hash=project_dir_hash))
return result
def get_keyring(self) -> KeyringBackend:
result = self.key_ring
if result is None:
if self.keyring_backend_package is None:
result = get_default_key_ring()
else:
result = keyring.core.load_keyring(self.keyring_backend_package)
if result is None:
raise NoSecretNamespaceError("Unable to load keyring")
return result
def get_config(starting_dir: Optional[str] = None) -> ProjectSecretConfig:
return ProjectSecretConfig().load(starting_dir=starting_dir)
def get_key_ring(cfg: Optional[ProjectSecretConfig] = None) -> KeyringBackend:
if cfg is None:
if key_ring is None:
key_ring = default_key_ring
if key_ring is None:
key_ring = keyring.get_keyring()
return key_ring
'''
def get_git_root_dir(starting_dir: Optional[str] = None) -> Optional[str]:
result: Optional[str] = None
if not starting_dir is None and starting_dir == '':
starting_dir = None
cmd = ['git', 'rev-parse', '--show-toplevel']
try:
with subprocess.Popen(cmd, cwd=starting_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as proc:
(stdout_bytes, stderr_bytes) = proc.communicate()
exit_code = proc.returncode
if exit_code == 0:
stdout_s = stdout_bytes.decode('utf-8').rstrip()
if stdout_s != '':
result = stdout_s
else:
# git returned an error. Probably not in a git repo, but we should make sure
stderr_s = stderr_bytes.decode('utf-8').split['\n'][0].rstrip()
if not stderr_s.startswith("fatal: not a git repository"):
# Unexpected error
raise subprocess.CalledProcessError(exit_code, cmd)
except FileNotFoundError:
# "git" is not in PATH
pass
return result
def get_secret_project_dir(project_dir: Optional[str]=None, starting_dir: Optional[str]=None) -> Optional[str]:
if project_dir is None:
project_dir = os.environ.get('PROJECT_SECRET_ROOT_DIR', None):
if not project_dir is None and project_dir == '':
project_dir = None
if project_dir is None:
project_dir = get_git_root_dir(starting_dir=starting_dir)
return project_dir
def get_secret_namespace(
namespace: Optional[str]=None,
project_dir: Optional[str]=None,
starting_dir: Optional[str]=None,
require: bool=True,
) -> Optional[str]:
if namespace is None:
namespace = os.environ.get('PROJECT_SECRET_NAMESPACE', None):
if not namespace is None and namespace == '':
namespace = None
if namespace is None:
dirname = get_secret_project_dir(project_dir=project_dir, starting_dir=starting_dir)
if not dirname is None:
namespace = f"project-{hash(dirname)}"
if require and namespace is None:
raise NoSecretNamespaceError()
'''
class Secrets:
CFG_FILENAME = ".project-secret.json"
NAMESPACE_PREFIX = "project-secret-"
key_ring: Optional[KeyringBackend] = None
namespace: Optional[str] = None
@classmethod
def hash(cls, s: str) -> str:
h = hashlib.sha1(s.encode("utf-8")).hexdigest()
return h
@classmethod
def get_project_root_dir(cls, starting_dir: Optional[str]=None, project_root_dir: Optional[str]=None, require: bool=True) -> Optional[str]:
if project_root_dir is None:
project_root_dir = os.environ.get("PROJECT_SECRET_ROOT_DIR")
if project_root_dir == '':
project_root_dir = None
if project_root_dir is None
if starting_dir is None:
starting_dir = '.'
starting_dir = os.path.abspath(starting_dir)
if os.path.is_dir(starting_dir):
cdir = starting_dir
while True:
cfg_file = os.path.join(cdir, cls.CFG_FILENAME)
if os.path.exists(cfg_file):
project_root_dir = cdir
break
ndir = os.path.dirname(cdir)
if ndir == cdir:
break # we have reached the system root dir
cdir = ndir
if require and project_root_dir is None:
raise NoRootDirError(starting_dir=starting_dir)
project_root_dir = os.path.abspath(project_root_dir)
return project_root_dir
@classmethod
def get_project_namespace(
cls,
namespace: Optional[str]=None,
project_root_dir: Optional[str]=None,
starting_dir: Optional[str]=None,
require: bool=True,
) -> Optional[str]:
if namespace is None:
namespace = os.environ.get('PROJECT_SECRET_NAMESPACE', None)
if namespace == '':
namespace = None
if namespace is None:
project_root_dir = cls.get_project_root_dir(starting_dir=starting_dir, project_root_dir=project_root_dir, require=require)
if not project_root_dir is None:
namespace = f"project-secret-{cls.hash(project_root_dir)}"
if require and namespace is None:
raise NoNamespaceError()
def __init__(
self,
starting_dir: Optional[str] = None,
namespace: Optional[str] = None,
project_root_dir: Optional[str] = None,
key_ring: Optional[str] = None,
):
namespace = self.get_project_namespace(namespace=namespace, project_root_dir=project_root_dir, require=True)
self.namespace = namespace
if key_ring is None:
key_ring = get_default_key_ring()
self.key_ring = key_ring
def get_secret_json_text_if_exists(
self,
name: str,
namespace: Optional[str]=None,
project_dir: Optional[str]=None,
starting_dir: Optional[str]=None,
require_namespace: bool=True,
key_ring: Optional[KeyringBackend]=None
) -> Optional[str]:
namespace = get_secret_namespace(namespace=namespace, project_dir=project_dir, starting_dir=starting_dir, require=require_namespace)
if namespace is None:
return None
key_ring = get_key_ring(key_ring)
json_text = key_ring.get_password(namespace, name)
return (json_text, not json_text is None)
def get_secret_if_exists(
self,
name: str,
namespace: Optional[str]=None,
project_dir: Optional[str]=None,
starting_dir: Optional[str]=None,
require_namespace: bool=True,
key_ring: Optional[KeyringBackend]=None
) -> Tuple[JSONType, bool]:
json_text = get_secret_json_text_if_exists(
name=name, namespace=namespace, project_dir=project_dir,
starting_dir=starting_dir, require_namespace=require_namespace, key_ring=key_ring
)
if json_text is None:
return (None, False)
result = json.loads(json_text)
return (result, True)
def get_secret(
self,
name: str,
namespace: Optional[str]=None,
project_dir: Optional[str]=None,
starting_dir: Optional[str]=None,
require: bool=True,
require_namespace: bool=True,
key_ring: Optional[KeyringBackend]=None
) -> JSONType:
result, exists = get_secret_if_exists(
name=name, namespace=namespace, project_dir=project_dir,
starting_dir=starting_dir, require_namespace=require_namespace, key_ring=key_ring
)
if require and not exists:
raise NoSecretError(name, namespace)
def secret_exists(
self,
name: str,
namespace: Optional[str]=None,
project_dir: Optional[str]=None,
starting_dir: Optional[str]=None,
require_namespace: bool=True,
key_ring: Optional[KeyringBackend]=None
) -> bool:
json_text = get_secret_json_text_if_exists(
name=name, namespace=namespace, project_dir=project_dir,
starting_dir=starting_dir, require_namespace=require_namespace, key_ring=key_ring
)
return not json_text is None
def set_secret_json_text(
self,
name: str,
json_text: str,
namespace: Optional[str]=None,
project_dir: Optional[str]=None,
starting_dir: Optional[str]=None,
key_ring: Optional[KeyringBackend]=None,
validate_json: bool = True,
):
if name is None or name == "":
raise ValueError("Empty secret name key is not permitted")
if json_text is None or not isinstance(json_text, str):
raise ValueError("A string is required for JSON text")
if validate_json:
json.loads(json_text)
namespace = get_secret_namespace(namespace=namespace, project_dir=project_dir, starting_dir=starting_dir, require=True)
key_ring = get_key_ring(key_ring)
key_ring.set_password(namespace, name, json_text)
def set_secret(
self,
name: str,
value: JSONType,
namespace: Optional[str]=None,
project_dir: Optional[str]=None,
starting_dir: Optional[str]=None,
key_ring: Optional[KeyringBackend]=None
):
json_text = json.dumps(value, sort_keys=True)
set_secret_json_text(
name=name,
json_text = json_text,
namespace = namespace,
project_dir = project_dir,
starting_dir = starting_dir,
key_ring = key_ring,
validate = False # No need to validate since we just serialized to json
)
def delete_secret(
self,
name: str,
namespace: Optional[str]=None,
project_dir: Optional[str]=None,
starting_dir: Optional[str]=None,
require: bool=True,
require_namespace: bool=True,
key_ring: Optional[KeyringBackend]=None,
):
require_namespace = require_namespace or require
namespace = get_secret_namespace(namespace=namespace, project_dir=project_dir, starting_dir=starting_dir, require=require_namespace)
if not namespace is None:
key_ring = get_key_ring(key_ring)
try:
key_ring.delete_password(namespace, name)
except keyring.errors.PasswordDeleteError as ex:
if require:
raise NoSecretError(name, namespace) from ex
def __contains__(self, item):
return self.v.__contains__(item)
def __getitem__(self, key: str) -> JSONType:
try:
result = self.get_secret(key, require=True)
except NoSecretError:
def main(argv: Optional[List[str]]=None):
if __name__ == '__main__':
pctx = PulumiContext()
exit_code = pctx.pulumi_call()
if exit_code != 0:
sys.exit(exit_code)
| []
| []
| [
"PROJECT_SECRET_ROOT_DIR",
"PROJECT_SECRET_NAMESPACE"
]
| [] | ["PROJECT_SECRET_ROOT_DIR", "PROJECT_SECRET_NAMESPACE"] | python | 2 | 0 | |
behave/features/steps/user_routes.py | import os
import re
import requests
from behave import given, then, when
def get_plus_string(group_name):
# To keep things short I've truncated the stage name
# in the plus string from the longer name used in the
# ENVIRONMENT env var
stages = {"testing": "test", "staging": "stage"}
env = stages[os.environ.get("ENVIRONMENT", "testing")]
# for a stupid reason I created all the test users
# with a slightly different naming convention
groups = {
"standard-download": "user-download",
"standard-upload": "user-upload",
}
if group_name in groups:
group_suffix = groups[group_name]
else:
group_suffix = group_name
plus_string = f"+c19-{env}-{group_suffix}"
return plus_string
def username_for_group_name(group_name):
plus_string = get_plus_string(group_name)
root_email = os.environ["E2E_STAGING_USERNAME"]
return root_email.replace("@", f"{plus_string}@")
@given("the credentials")
def credentials_step(context):
context.browser.header_overrides = {
"e2e_username": os.environ["E2E_STAGING_USERNAME"],
"e2e_password": os.environ["E2E_STAGING_PASSWORD"],
}
@given('credentials for the "{group_name}" group')
def group_credentials_step(context, group_name):
e2e_username = username_for_group_name(group_name)
context.browser.header_overrides = {
"e2e_username": e2e_username,
"e2e_password": os.environ["E2E_STAGING_PASSWORD"],
}
@when("oauth username is set")
def login_username_step(context):
"""
The cognito signin form is rendered in HTML twice for difference screen sizes.
The small screen version appears first in the HTML but is hidden by CSS.
Without the .visible-md class this resolves the hidden form element and
is unable to interact with the form.
"""
elem = context.browser.find_element_by_css_selector(
".visible-md .modal-body #signInFormUsername"
)
elem.click()
elem.send_keys(context.browser.header_overrides["e2e_username"])
@when("oauth password is set")
def login_password_step(context):
"""
The cognito signin form is rendered in HTML twice for difference screen sizes.
The small screen version appears first in the HTML but is hidden by CSS.
Without the .visible-md class this resolves the hidden form element and
is unable to interact with the form.
"""
elem = context.browser.find_element_by_css_selector(
".visible-md .modal-body #signInFormPassword"
)
elem.click()
elem.send_keys(context.browser.header_overrides["e2e_password"])
@when("oauth form is submitted")
def login_submit_step(context):
"""
The cognito signin form is rendered in HTML twice for difference screen sizes.
The small screen version appears first in the HTML but is hidden by CSS.
Without the .visible-md class this resolves the hidden form element and
is unable to interact with the form.
"""
elem = context.browser.find_element_by_css_selector(
".visible-md .modal-body #signInFormPassword"
)
elem.submit()
@when("oauth sign in button is clicked")
def login_submit_button_click_step(context):
"""
The cognito signin form is rendered in HTML twice for difference screen sizes.
The small screen version appears first in the HTML but is hidden by CSS.
Without the .visible-md class this resolves the hidden form element and
is unable to interact with the form.
"""
elem = context.browser.find_element_by_name(
".visible-md .modal-body .btn.btn-primary.submitButton-customizable"
)
elem.submit()
@when("you navigate to user home")
def user_home_step(context):
url = os.environ["E2E_STAGING_ROOT_URL"]
context.browser.get(url)
context.logger.debug("***** LOG PAGE SOURCE *****")
context.logger.debug(context.browser.page_source)
context.logger.debug("***** END PAGE SOURCE *****")
@when('you navigate to "{path}"')
def user_path_step(context, path):
url = os.environ["E2E_STAGING_ROOT_URL"]
context.browser.get(f"{url}{path}")
context.logger.debug("***** LOG PAGE SOURCE *****")
context.logger.debug(context.browser.page_source)
context.logger.debug("***** END PAGE SOURCE *****")
@then("you get redirected to user home")
def user_redirect_home_step(context):
url = os.environ["E2E_STAGING_ROOT_URL"]
assert context.browser.current_url == url
@then('you get redirected to route: "{route}"')
def user_redirect_to_route_step(context, route):
url = re.sub("/$", route, os.environ["E2E_STAGING_ROOT_URL"])
assert context.browser.current_url == url
@then('the content of element with selector "{selector}" equals "{title}"')
def content_equals_step(context, selector, title):
elem = context.browser.find_element_by_css_selector(selector).text
assert elem == title
@then('the content of element with selector "{selector}" contains "{part}"')
def content_contains_step(context, selector, part):
elem = context.browser.find_element_by_css_selector(selector).text
assert part in elem
@then('the content of element with selector "{selector}" contains username')
def content_contains_username_step(context, selector):
elem = context.browser.find_element_by_css_selector(selector).text
part = context.browser.header_overrides["e2e_username"]
assert part in elem
@then("we have a session cookie")
def session_cookie_step(context):
cookie = context.browser.get_cookie("session")
assert cookie is not None
@when("you login with these credentials")
def login_with_credentials(context):
context.execute_steps(
"""
When you navigate to user home
When you click on "#main-content .covid-transfer-signin-button"
Then wait "5" seconds
When oauth username is set
When oauth password is set
When oauth form is submitted
Then wait "5" seconds
Then the content of element with selector"""
+ """ ".covid-transfer-page-title" contains "COVID-19 Data Transfer"
Then the content of element with selector"""
+ """ "#main-content .covid-transfer-email" contains username"""
)
@then('you download link from "{selector}"')
def download_files(context, selector):
elem = context.browser.find_element_by_css_selector(selector)
link_target = elem.get_attribute("href")
cookies = context.browser.get_cookies()
cookie_dict = {item["name"]: item["value"] for item in cookies}
download_response = requests.get(link_target, cookies=cookie_dict)
assert "test,the,csv" in download_response.text
assert download_response.status_code == 200
| []
| []
| [
"ENVIRONMENT",
"E2E_STAGING_PASSWORD",
"E2E_STAGING_ROOT_URL",
"E2E_STAGING_USERNAME"
]
| [] | ["ENVIRONMENT", "E2E_STAGING_PASSWORD", "E2E_STAGING_ROOT_URL", "E2E_STAGING_USERNAME"] | python | 4 | 0 | |
todoapp/backend/todoapp/asgi.py | """
ASGI config for todoapp project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "todoapp.settings")
application = get_asgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
exampleapp/wsgi.py | """
WSGI config for exampleapp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'exampleapp.settings')
application = get_wsgi_application()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
openmdao.main/src/openmdao/main/test/test_egg_save.py | """
Test saving and loading of simulations as eggs.
"""
import cPickle
import glob
import logging
import os.path
import pkg_resources
import shutil
import subprocess
import sys
import unittest
import nose
from enthought.traits.api import Callable
from openmdao.main.api import Assembly, Component, Container, SAVE_PICKLE, \
SAVE_CPICKLE, set_as_top
from openmdao.main.filevar import FileMetadata
from openmdao.main.pkg_res_factory import PkgResourcesFactory
from openmdao.main.eggchecker import check_save_load
from openmdao.lib.datatypes.api import Int, Bool, List, Str, Array, \
File
from openmdao.util.testutil import assert_raises, find_python, \
make_protected_dir
# pylint: disable-msg=E1101,E1103
# "Instance of <class> has no <attr> member"
EXTERNAL_FILES = ('xyzzy', '../sub/data2', 'hello', '../sub/data4')
SOURCE_INIT = False # Used to verify __init__() gets called when expected.
SINK_INIT = False
# Various Pickle issues arise only when this test runs as the main module.
# This is used to detect when we're the main module or not.
MODULE_NAME = __name__
# Set local dir in case we're running in a different directory.
PY_DIR = pkg_resources.resource_filename('openmdao.main', 'test')
# Observations made by observer().
OBSERVATIONS = []
# Version counter to ensure we know which egg we're dealing with.
EGG_VERSION = 0
def next_egg():
""" Return next egg version. """
global EGG_VERSION
EGG_VERSION += 1
return str(EGG_VERSION)
class Source(Assembly):
"""
Produces files. A fair amount of stuff happens in Component.save_to_egg()
in relation to handling external files and file variables.
"""
write_files = Bool(True, iotype='in')
text_data = Str(iotype='in')
text_file = File(path='source.txt', iotype='out')
def __init__(self, *args, **kwargs):
super(Source, self).__init__(*args, **kwargs)
global SOURCE_INIT
SOURCE_INIT = True
def configure(self):
""" Called once we have a valid hierarchy above us. """
self.add('sub', Subcontainer())
self.create_passthrough('sub.binary_file')
# Some custom objects that must be restored.
self.obj_list = [DataObj(i) for i in range(3)]
# External file that doesn't exist at time of save.
self.external_files.append(FileMetadata(path='does-not-exist'))
self.directory = self.get_abs_directory() # Force absolute.
# Absolute external file that exists at time of save.
path = os.path.join(self.directory, EXTERNAL_FILES[0])
with open(path, 'w') as out:
out.write('Twisty narrow passages.\n')
self.external_files.append(FileMetadata(path=path, input=True,
constant=True))
# Absolute external file that exists at time of save, in separate tree.
path = os.path.join(self.directory, EXTERNAL_FILES[1])
leaf = os.path.dirname(path)
if not os.path.exists(leaf):
os.makedirs(leaf)
with open(path, 'w') as out:
out.write('Some external data.\n')
self.external_files.append(FileMetadata(path=path))
# Relative external file that exists at time of save.
with self.dir_context:
path = EXTERNAL_FILES[2]
with open(path, 'w') as out:
out.write('Hello world!\n')
self.external_files.append(FileMetadata(path=path))
# Relative external file that exists at time of save, in separate tree.
with self.dir_context:
path = EXTERNAL_FILES[3]
leaf = os.path.dirname(path)
if not os.path.exists(leaf):
os.makedirs(leaf)
with open(path, 'w') as out:
out.write('Some more external data.\n')
self.external_files.append(FileMetadata(path=path))
def execute(self):
""" Write test data to files. """
if self.write_files:
cwd = os.getcwd()
self._logger.debug("opening file '%s' in %s" %
(self.text_file.path, cwd))
with open(self.text_file.path, 'w') as out:
out.write(self.text_data)
self._logger.debug("opening file '%s' in %s" %
(self.sub.binary_file.path, cwd))
with open(self.sub.binary_file.path, 'wb') as out:
cPickle.dump(self.sub.binary_data, out, 2)
class Subcontainer(Container):
""" Just a subcontainer for Source. """
binary_data = Array(dtype='d', iotype='in')
binary_file = File(path=os.path.join('..', 'sub', 'source.bin'),
iotype='out', binary=True)
class DataObj(object):
""" Just a custom class for objects to save & reload. """
def __init__(self, data):
self.data = data
class Sink(Component):
""" Consumes files. """
text_data = Str(iotype='out')
binary_data = Array(dtype='d', iotype='out')
text_file = File(iotype='in')
binary_file = File(iotype='in')
executions = Int(0, iotype='in',
desc='Count of Oddball instance_method() calls.')
def __init__(self, *args, **kwargs):
super(Sink, self).__init__(*args, **kwargs)
global SINK_INIT
SINK_INIT = True
def execute(self):
""" Read test data from files. """
with self.text_file.open() as inp:
self.text_data = inp.read()
with self.binary_file.open() as inp:
self.binary_data = cPickle.load(inp)
class Oddball(Assembly):
"""
Just a component that needs a separate directory to be created.
Also has some attributes used to exercise some Pickle issues.
"""
# FIXME: I tried the built-in trait types of Callable, Method, and Function
# for these two sockets and couldn't get them to work. We may have to
# create new Variables for these...
#function_socket = Slot(Callable, none_allowed=True,
# desc='Just something to call.', required=False)
#method_socket = Slot(Callable, none_allowed=True,
# desc='Just something to call.', required=False)
executions = Int(0, iotype='out', desc='Counts instance_method() calls.')
def configure(self):
self.add('oddcomp', OddballComponent())
self.add('oddcont', OddballContainer())
self.driver.workflow.add('oddcomp')
self.thing_to_call = self.instance_method
self.list_to_call = [[self.instance_method, ()],
[Assembly.get_pathname, (self,)]]
# Generate IMHolder with self == None.
self.function_socket = os.getpid
self.method_socket = self.instance_method
self.peer_class = Source # Check that class in __main__ is handled.
self.scratch_tuple = (1, 2)
def execute(self):
""" Call stuff. Empty sockets are clumsy. """
if self.thing_to_call:
self._logger.debug('thing_to_call returned %s', self.thing_to_call())
for thing, args in self.list_to_call:
self._logger.debug('list-thing returned %s', thing(*args))
try:
self._logger.debug('function_socket returned %s', self.function_socket())
except RuntimeError, exc:
if not str(exc).find('empty'):
raise exc
try:
self._logger.debug('method_socket returned %s', self.method_socket())
except RuntimeError, exc:
if not str(exc).find('empty'):
raise exc
def instance_method(self):
""" Called by execute(). """
self.executions += 1
return self.executions
@staticmethod
def static_method():
""" This won't pickle. """
return None
class OddballComponent(Component):
""" Just a subcomponent for Oddball to test nested entry points. """
def __init__(self):
super(OddballComponent, self).__init__()
# Some custom objects that must be restored.
self.obj_list = [DataObj(i) for i in range(3)]
class OddballContainer(Container):
""" Just a subcontainer for Oddball to test nested entry points. """
def __init__(self):
super(OddballContainer, self).__init__()
# Some custom objects that must be restored.
self.obj_list = [DataObj(i) for i in range(3)]
def observer(state, string, file_fraction, byte_fraction):
""" Observe progress. """
if state != 'analyze': # 'analyze' is sporadic due to re-use of analyses.
OBSERVATIONS.append((state, string, file_fraction, byte_fraction))
return True
class Model(Assembly):
""" Transfer files from producer to consumer. """
def configure(self):
self.add('Source', Source(directory='Source'))
self.add('Oddball', Oddball(directory='Oddball'))
self.add('Sink', Sink(directory='Sink'))
self.driver.workflow.add(['Source','Oddball','Sink'])
self.connect('Source.text_file', 'Sink.text_file')
self.connect('Source.binary_file', 'Sink.binary_file')
self.connect('Oddball.executions', 'Sink.executions')
self.Source.text_data = 'oiuyoiuyoiuy'
self.Source.sub.binary_data = [3.14159, 2.781828, 42]
class TestCase(unittest.TestCase):
""" Test saving and loading of simulations as eggs. """
def setUp(self):
""" Called before each test in this class. """
self.model = set_as_top(Model(directory='Egg'))
self.model.name = 'Egg_TestModel'
self.child_objs = [self.model.Source, self.model.Sink,
self.model.Oddball, self.model.Oddball.oddcomp,
self.model.Oddball.oddcont]
self.egg_name = None
def tearDown(self):
""" Called after each test in this class. """
self.model.pre_delete() # Paranoia. Only needed by NPSS I think.
self.model = None
for path in glob.glob('Egg_TestModel*.egg'):
os.remove(path)
if os.path.exists('Egg'):
# Wonderful Windows sometimes doesn't remove...
shutil.rmtree('Egg', onerror=self.onerror)
def onerror(self, function, path, excinfo):
""" Called by shutil.rmtree() if 'Egg' tree removal has problems. """
logging.error('onerror: function %s, path %s, excinfo %s',
function, path, excinfo)
if function == os.rmdir:
# On Windows, sometimes get 'Directory not empty'.
logging.error(' files: %s', os.listdir(path))
def save_load(self):
""" Save to egg and reload. """
global SOURCE_INIT, SINK_INIT
# Verify initial state.
self.assertEqual(SOURCE_INIT, True)
self.assertEqual(SINK_INIT, True)
self.assertNotEqual(self.model.Sink.text_data,
self.model.Source.text_data)
self.assertNotEqual(self.model.Sink.binary_data,
self.model.Source.sub.binary_data)
for path in EXTERNAL_FILES:
path = os.path.join(self.model.Source.get_abs_directory(), path)
if not os.path.exists(path):
self.fail("pre-save path '%s' does not exist" % path)
for i in range(3):
self.assertEqual(self.model.Source.obj_list[i].data, i)
self.assertEqual(self.model.Sink.executions, 0)
# Save to egg.
global OBSERVATIONS
OBSERVATIONS = []
egg_info = self.model.save_to_egg(self.model.name, next_egg(),
py_dir=PY_DIR,
child_objs=self.child_objs,
observer=observer)
self.egg_name = egg_info[0]
# Check observations.
expected = [
('add', 'EGG-INFO/PKG-INFO'),
('add', 'EGG-INFO/dependency_links.txt'),
('add', 'EGG-INFO/entry_points.txt'),
('add', 'EGG-INFO/not-zip-safe'),
('add', 'EGG-INFO/requires.txt'),
('add', 'EGG-INFO/openmdao_orphans.txt'),
('add', 'EGG-INFO/top_level.txt'),
('add', 'EGG-INFO/SOURCES.txt'),
('add', 'Egg_TestModel/Egg_TestModel.pickle'),
('add', 'Egg_TestModel/Egg_TestModel_loader.py'),
('add', 'Egg_TestModel/Oddball.pickle'),
('add', 'Egg_TestModel/Oddball_loader.py'),
('add', 'Egg_TestModel/Oddball_oddcomp.pickle'),
('add', 'Egg_TestModel/Oddball_oddcomp_loader.py'),
('add', 'Egg_TestModel/Oddball_oddcont.pickle'),
('add', 'Egg_TestModel/Oddball_oddcont_loader.py'),
('add', 'Egg_TestModel/Sink.pickle'),
('add', 'Egg_TestModel/Sink_loader.py'),
('add', 'Egg_TestModel/Source.pickle'),
('add', 'Egg_TestModel/Source/hello'),
('add', 'Egg_TestModel/Source/xyzzy'),
('add', 'Egg_TestModel/Source_loader.py'),
('add', 'Egg_TestModel/__init__.py'),
('add', 'Egg_TestModel/sub/data2'),
('add', 'Egg_TestModel/sub/data4'),
]
# Add our file if we're not considered part of an egg.
if sys.modules[self.__module__].__file__.find('.egg') < 0:
expected.append(('add', 'Egg_TestModel/test_egg_save.py'))
expected.append(('complete', 'Egg_TestModel-1.2.3-py%d.%d.egg' % sys.version_info[:2]))
self.assertEqual(len(OBSERVATIONS), len(expected))
for i, observation in enumerate(OBSERVATIONS):
state, string, file_fraction, byte_fraction = observation
self.assertEqual(state, expected[i][0])
if expected[i][1].endswith('.egg'): # Unique versions mess this up.
self.assertEqual(string.startswith(self.model.name), True)
self.assertEqual(string.endswith('.egg'), True)
else:
self.assertEqual(string.replace('\\','/'), expected[i][1])
self.assertEqual(file_fraction, float(i)/float(len(expected)-1))
# Run and verify correct operation.
self.model.run()
self.assertEqual(self.model.Sink.text_data,
self.model.Source.text_data)
self.assertEqual(True,
all(self.model.Sink.binary_data==self.model.Source.sub.binary_data))
self.assertEqual(self.model.Sink.binary_file.binary, True)
self.assertEqual(self.model.Sink.executions, 3)
# Restore in test directory.
orig_dir = os.getcwd()
test_dir = 'EggTest'
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
os.mkdir(test_dir)
os.chdir(test_dir)
try:
# Clear flags to detect if loading calls __init__.
SOURCE_INIT = False
SINK_INIT = False
# Load from saved initial state in egg.
self.model.pre_delete()
egg_path = os.path.join('..', self.egg_name)
OBSERVATIONS = []
self.model = Component.load_from_eggfile(egg_path,
observer=observer)
self.model.directory = os.path.join(os.getcwd(), self.model.name)
# Check observations.
expected = [
('extract', 'EGG-INFO/PKG-INFO'),
('extract', 'EGG-INFO/dependency_links.txt'),
('extract', 'EGG-INFO/entry_points.txt'),
('extract', 'EGG-INFO/not-zip-safe'),
('extract', 'EGG-INFO/requires.txt'),
('extract', 'EGG-INFO/openmdao_orphans.txt'),
('extract', 'EGG-INFO/top_level.txt'),
('extract', 'EGG-INFO/SOURCES.txt'),
('extract', 'Egg_TestModel/Egg_TestModel.pickle'),
('extract', 'Egg_TestModel/Egg_TestModel_loader.py'),
('extract', 'Egg_TestModel/Oddball.pickle'),
('extract', 'Egg_TestModel/Oddball_loader.py'),
('extract', 'Egg_TestModel/Oddball_oddcomp.pickle'),
('extract', 'Egg_TestModel/Oddball_oddcomp_loader.py'),
('extract', 'Egg_TestModel/Oddball_oddcont.pickle'),
('extract', 'Egg_TestModel/Oddball_oddcont_loader.py'),
('extract', 'Egg_TestModel/Sink.pickle'),
('extract', 'Egg_TestModel/Sink_loader.py'),
('extract', 'Egg_TestModel/Source.pickle'),
('extract', 'Egg_TestModel/Source/hello'),
('extract', 'Egg_TestModel/Source/xyzzy'),
('extract', 'Egg_TestModel/Source_loader.py'),
('extract', 'Egg_TestModel/__init__.py'),
('extract', 'Egg_TestModel/sub/data2'),
('extract', 'Egg_TestModel/sub/data4'),
]
# Add our file if we're not considered part of an egg.
if sys.modules[self.__module__].__file__.find('.egg') < 0:
expected.append(('extract', 'Egg_TestModel/test_egg_save.py'))
expected.append(('complete', None))
self.assertEqual(len(OBSERVATIONS), len(expected))
for i, observation in enumerate(OBSERVATIONS):
state, string, file_fraction, byte_fraction = observation
self.assertEqual(state, expected[i][0])
self.assertEqual(string, expected[i][1])
self.assertEqual(file_fraction,
float(i)/float(len(expected)-1))
# Verify initial state.
self.assertEqual(SOURCE_INIT, False)
self.assertEqual(SINK_INIT, False)
self.assertNotEqual(self.model.Sink.text_data,
self.model.Source.text_data)
self.assertNotEqual(self.model.Sink.binary_data,
self.model.Source.sub.binary_data)
for path in EXTERNAL_FILES:
path = os.path.join(self.model.Source.get_abs_directory(), path)
if not os.path.exists(path):
self.fail("after loading, path '%s' does not exist" % path)
for i in range(3):
self.assertEqual(self.model.Source.obj_list[i].data, i)
self.assertEqual(self.model.Oddball.executions, 0)
# Run and verify correct operation.
self.model.run()
self.assertEqual(self.model.Sink.text_data,
self.model.Source.text_data)
self.assertEqual(all(self.model.Sink.binary_data==
self.model.Source.sub.binary_data), True)
self.assertEqual(
self.model.Sink.binary_file.binary, True)
self.assertEqual(self.model.Oddball.executions, 3)
finally:
os.chdir(orig_dir)
shutil.rmtree(test_dir)
def test_save_load(self):
logging.debug('')
logging.debug('test_save_load')
self.save_load()
def test_save_bad_name(self):
logging.debug('')
logging.debug('test_save_bad_name')
code = "self.model.save_to_egg('#%^&', next_egg(), py_dir=PY_DIR)"
assert_raises(self, code, globals(), locals(), ValueError,
'Egg_TestModel: Egg name must be alphanumeric')
def test_save_bad_version(self):
logging.debug('')
logging.debug('test_save_bad_version')
code = "self.model.save_to_egg(self.model.name, '#%^&', py_dir=PY_DIR)"
assert_raises(self, code, globals(), locals(), ValueError,
'Egg_TestModel: Egg version must be alphanumeric')
def test_save_bad_directory(self):
logging.debug('')
logging.debug('test_save_bad_directory')
# Set subcomponent directory outside model root.
self.model.Oddball.directory = os.getcwd()
code = 'self.model.save_to_egg(self.model.name, next_egg(), py_dir=PY_DIR)'
msg = "Egg_TestModel: Can't save, Egg_TestModel.Oddball.oddcomp" \
" directory"
assert_raises(self, code, globals(), locals(), ValueError, msg)
def test_save_bad_destination(self):
logging.debug('')
logging.debug('test_save_bad_destination')
# TODO: get make_protected_dir() to work on Windows.
if sys.platform == 'win32':
raise nose.SkipTest()
directory = make_protected_dir()
try:
# Attempt to save to directory we aren't allowed to write to.
self.model.save_to_egg(self.model.name, next_egg(), py_dir=PY_DIR,
dst_dir=directory)
except IOError, exc:
self.assertTrue('no write permission' in str(exc) or
'Permission denied' in str(exc))
else:
self.fail('Expected IOError')
finally:
os.rmdir(directory)
def test_save_bad_external(self):
logging.debug('')
logging.debug('test_save_bad_external')
# Set external file path outside model root.
path = os.path.join(os.getcwd(), 'bad-external')
out = open(path, 'w')
out.close()
metadata = self.model.Source.external_files[0]
metadata.path = path
code = 'self.model.save_to_egg(self.model.name, next_egg(), py_dir=PY_DIR)'
msg = "Egg_TestModel: Can't save, Egg_TestModel.Source file"
try:
assert_raises(self, code, globals(), locals(), ValueError, msg)
finally:
os.remove(path)
def test_save_noforce(self):
logging.debug('')
logging.debug('test_save_noforce')
# Set external file path outside model root.
path = os.path.join(os.getcwd(), 'unforced-external')
out = open(path, 'w')
out.close()
metadata = self.model.Source.external_files[0]
metadata.path = path
try:
self.model.save_to_egg(self.model.name, next_egg(), py_dir=PY_DIR,
require_relpaths=False)
finally:
os.remove(path)
def test_save_bad_filevar(self):
logging.debug('')
logging.debug('test_save_bad_filevar')
# Set file trait path outside model root.
self.model.Source.text_file.path = '/illegal'
code = 'self.model.save_to_egg(self.model.name, next_egg(), py_dir=PY_DIR)'
msg = "Egg_TestModel: Can't save, Egg_TestModel.Source.text_file path"
assert_raises(self, code, globals(), locals(), ValueError, msg)
def test_save_bad_function(self):
logging.debug('')
logging.debug('test_save_bad_function')
# Set reference to unpickleable function.
self.model.Oddball.function_socket = observer
try:
self.model.save_to_egg(self.model.name, next_egg(), py_dir=PY_DIR)
except RuntimeError, exc:
msg = "Egg_TestModel: Can't save: reference to function defined" \
" in main module"
self.assertEqual(str(exc)[:len(msg)], msg)
else:
if MODULE_NAME == '__main__':
self.fail('Expected RuntimeError')
def test_save_bad_method(self):
logging.debug('')
logging.debug('test_save_bad_method')
# Set reference to unpickleable static method.
self.model.Oddball.method_socket = self.model.Oddball.static_method
code = 'self.model.save_to_egg(self.model.name, next_egg(), py_dir=PY_DIR)'
assert_raises(self, code, globals(), locals(), RuntimeError,
"Egg_TestModel: Can't save, 1 object cannot be pickled.")
def test_save_bad_pickle(self):
logging.debug('')
logging.debug('test_save_bad_pickle')
# Code objects don't pickle.
self.model.code = compile('3 + 4', '<string>', 'eval')
try:
# This will fail due to code object.
self.model.save_to_egg(self.model.name, next_egg(), py_dir=PY_DIR)
except cPickle.PicklingError, exc:
msg = "Egg_TestModel: Can't save to" \
" 'Egg_TestModel/Egg_TestModel.pickle': Can't pickle" \
" <type 'code'>: attribute lookup __builtin__.code failed"
self.assertEqual(str(exc).replace('\\','/'), msg)
else:
self.fail('Expected cPickle.PicklingError')
def test_save_bad_child(self):
logging.debug('')
logging.debug('test_save_bad_child')
# Create orphan component.
orphan = Component()
code = 'self.model.save_to_egg(self.model.name, next_egg(),' \
' py_dir=PY_DIR, child_objs=[orphan])'
assert_raises(self, code, globals(), locals(), RuntimeError,
'Entry point object has no parent!')
# Create non-orphan component that is not part of model.
badboy = orphan.add('badboy', Component())
code = 'self.model.save_to_egg(self.model.name, next_egg(),' \
' py_dir=PY_DIR, child_objs=[badboy])'
assert_raises(self, code, globals(), locals(), RuntimeError,
'Egg_TestModel: badboy is not a child of Egg_TestModel.')
def test_save_load_container(self):
logging.debug('')
logging.debug('test_save_load_container')
# Save to egg.
egg_info = self.model.Source.sub.save_to_egg(self.model.name,
next_egg(), py_dir=PY_DIR)
self.egg_name = egg_info[0]
# Restore in test directory.
orig_dir = os.getcwd()
test_dir = 'EggTest'
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
os.mkdir(test_dir)
os.chdir(test_dir)
try:
egg_path = os.path.join('..', self.egg_name)
sub = Container.load_from_eggfile(egg_path)
self.assertTrue(all(sub.binary_data == self.model.Source.sub.binary_data))
finally:
os.chdir(orig_dir)
shutil.rmtree(test_dir)
def test_load_badfile(self):
logging.debug('')
logging.debug('test_load_badfile')
# Try to load from non-egg.
assert_raises(self, "Component.load_from_eggfile('.')",
globals(), locals(), ValueError,
"'.' is not an egg/zipfile.")
def test_load_nofile(self):
logging.debug('')
logging.debug('test_load_nofile')
# Try to load from nonexistant egg file.
assert_raises(self, "Component.load_from_eggfile('no-such-egg')",
globals(), locals(), ValueError,
"'no-such-egg' not found.")
def test_load_nopackage(self):
logging.debug('')
logging.debug('test_load_nopackage')
# Try to load from nonexistant egg package.
assert_raises(self, "Component.load_from_eggpkg('no-such-egg')",
globals(), locals(), pkg_resources.DistributionNotFound,
'no-such-egg')
def test_check_save_load(self):
logging.debug('')
logging.debug('test_check_save_load')
# Exercise check_save_load().
retcode = check_save_load(self.model, py_dir=PY_DIR)
self.assertEqual(retcode, 0)
def test_install_load(self):
# Creates egg.
# Installs in special directory.
# Tries to load and run from installed egg in various ways.
logging.debug('')
logging.debug('test_install_load')
# Find correct python.
python = find_python()
logging.debug(' Using python: %s' % python)
# Write to egg.
egg_info = self.model.save_to_egg(self.model.name, next_egg(),
py_dir=PY_DIR,
child_objs=self.child_objs)
self.egg_name = egg_info[0]
# Create directory for installation.
install_dir = os.path.join(os.getcwd(), 'install_dir')
if os.path.exists(install_dir):
shutil.rmtree(install_dir)
os.mkdir(install_dir)
try:
# Create special installer script.
# This basically does an easy_install.
installer = os.path.join(install_dir, 'installer.py')
out = open(installer, 'w')
out.write("""\
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools>=0.6c8','console_scripts','easy_install'
__requires__ = 'setuptools>=0.6c8'
import os
import sys
from pkg_resources import load_entry_point
#print
#print 'Installer Environment:'
#for name, val in sorted(os.environ.items(), key=lambda item: item[0]):
# print ' %s = %r' % (name, val)
#print
sys.exit(
load_entry_point('setuptools>=0.6c8', 'console_scripts', 'easy_install')()
)
""")
out.close()
# Install via subprocess with PYTHONPATH set (for easy_install).
logging.debug('Installing via subprocess...')
env = os.environ.copy()
path = env.get('PYTHONPATH', '')
if path:
path += os.pathsep
path += install_dir
env['PYTHONPATH'] = path
# logging.debug('Test Environment:')
# for name, val in sorted(env.items(), key=lambda item: item[0]):
# logging.debug(' %s = %r', name, val)
cmdline = [python, installer, '-d', install_dir, self.egg_name]
stdout = open(os.path.join(install_dir, 'installer.out'), 'w')
retcode = subprocess.call(cmdline, env=env,
stdout=stdout, stderr=subprocess.STDOUT)
stdout.close()
stdout = open(os.path.join(install_dir, 'installer.out'), 'r')
for line in stdout:
logging.debug(' %s', line.rstrip())
stdout.close()
self.assertEqual(retcode, 0)
# Load full model and run.
package_name = self.model.name
entry_name = ''
retcode = self.load_n_run(python, install_dir,
package_name, entry_name)
self.assertEqual(retcode, 0)
# Load just the Oddball component and run.
entry_name = self.model.Oddball.get_pathname()
retcode = self.load_n_run(python, install_dir,
package_name, entry_name)
self.assertEqual(retcode, 0)
# Try a non-existent package.
code = "Component.load_from_eggpkg('no-such-pkg', 'no-such-entry')"
assert_raises(self, code, globals(), locals(),
pkg_resources.DistributionNotFound, 'no-such-pkg')
# Try a non-existent entry point.
egg_path = os.path.join(install_dir, self.egg_name)
sys.path.append(egg_path)
orig_ws = pkg_resources.working_set
pkg_resources.working_set = pkg_resources.WorkingSet()
code = "Component.load_from_eggpkg(package_name, 'no-such-entry')"
msg = "No 'openmdao.component' 'no-such-entry' entry point."
try:
assert_raises(self, code, globals(), locals(), RuntimeError, msg)
finally:
sys.path.pop()
pkg_resources.working_set = orig_ws
finally:
shutil.rmtree(install_dir)
def load_n_run(self, python, install_dir, package_name, entry_name):
""" Load component from installed egg and run it. """
# Create and move to test directory.
orig_dir = os.getcwd()
test_dir = 'EggTest'
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
os.mkdir(test_dir)
os.chdir(test_dir)
try:
# Create load-n-run script.
out = open('load-n-run.py', 'w')
out.write("""\
import sys
sys.path.append('%(egg)s')
from openmdao.main.api import Component
comp = Component.load_from_eggpkg('%(package)s', '%(entry)s')
comp.run()
""" % {'egg':os.path.join(install_dir, self.egg_name).replace('\\', '/'),
'package':package_name, 'entry':entry_name})
out.close()
# Load & run in subprocess.
logging.debug("Load and run '%s' in subprocess...", entry_name)
logging.debug(' %s', os.path.join(install_dir, self.egg_name))
cmdline = [python, 'load-n-run.py']
stdout = open('load-n-run.out', 'w')
retcode = subprocess.call(cmdline, stdout=stdout,
stderr=subprocess.STDOUT)
stdout.close()
stdout = open('load-n-run.out', 'r')
for line in stdout:
logging.debug(' %s'% line.rstrip())
stdout.close()
return retcode
finally:
os.chdir(orig_dir)
shutil.rmtree(test_dir)
def test_pkg_resources_factory(self):
# NOTE: this test fails if run standalone:
# ImportError: No module named test_egg_save
# Probably need Egg_TestModel.test_egg_save, or adjusted sys.path.
if MODULE_NAME == '__main__':
return
logging.debug('')
logging.debug('test_pkg_resources_factory')
# Write to egg.
egg_info = self.model.save_to_egg(self.model.name, next_egg(),
py_dir=PY_DIR,
child_objs=self.child_objs)
self.egg_name = egg_info[0]
# Create factory.
factory = PkgResourcesFactory(['openmdao.component',
'openmdao.container'],
[os.getcwd()])
# Create and move to test directory.
orig_dir = os.getcwd()
test_dir = 'EggTest'
if os.path.exists(test_dir):
shutil.rmtree(test_dir)
os.mkdir(test_dir)
os.chdir(test_dir)
try:
# Check multiple model instances.
self.create_and_check_model(factory, 'test_model_1',
'Hello world!\n')
self.create_and_check_model(factory, 'test_model_2',
'Hello world!\n')
# Check that reloading doesn't clobber existing files.
file_data = 'New and interesting stuff\n'
path = os.path.join('test_model_2', 'Source', EXTERNAL_FILES[2])
out = open(path, 'w')
out.write(file_data)
out.close()
logging.debug('updated %s', path)
self.create_and_check_model(factory, 'test_model_2', file_data)
# Check observations.
global OBSERVATIONS
OBSERVATIONS = []
model = factory.create('Egg_TestModel', name='observed',
observer=observer)
if model is None:
self.fail("Create of 'observed' failed.")
expected = [
('copy', 'Source/xyzzy'),
('copy', 'sub/data2'),
('copy', 'Source/hello'),
('copy', 'sub/data4'),
('complete', 'observed'),
]
self.assertEqual(len(OBSERVATIONS), len(expected))
for i, observation in enumerate(OBSERVATIONS):
state, string, file_fraction, byte_fraction = observation
self.assertEqual(state, expected[i][0])
self.assertEqual(string, expected[i][1])
self.assertEqual(file_fraction, float(i)/float(len(expected)-1))
# Create a component.
comp = factory.create('Egg_TestModel.Oddball', name='test_comp',
observer=observer)
if comp is None:
self.fail('Create of test_comp failed.')
self.assertEqual(comp.executions, 0)
comp.run()
self.assertEqual(comp.executions, 3)
self.assertEqual(comp.get_pathname(), 'test_comp')
# Create a (sub)component.
sub = factory.create('Egg_TestModel.Oddball.oddcomp',
name='test_sub')
if sub is None:
self.fail('Create of test_sub failed.')
self.assertEqual(sub.get_pathname(), 'test_sub')
# Create a (sub)container.
sub = factory.create('Egg_TestModel.Oddball.oddcont',
name='test_sub')
if sub is None:
self.fail('Create of test_sub failed.')
self.assertEqual(sub.get_pathname(), 'test_sub')
# Try a non-existent entry point.
obj = factory.create('no-such-entry', name='xyzzy')
self.assertEqual(obj, None)
finally:
os.chdir(orig_dir)
shutil.rmtree(test_dir)
def create_and_check_model(self, factory, name, file_data):
""" Create a complete model instance and check it's operation. """
model = factory.create('Egg_TestModel', name=name)
logging.debug('model.directory = %s' % model.directory)
if model is None:
self.fail("Create of '%s' failed." % name)
self.assertEqual(model.get_abs_directory(),
os.path.join(os.getcwd(), name))
self.assertEqual(model.Oddball.get_pathname(), name+'.Oddball')
# Verify initial state.
self.assertNotEqual(model.Sink.text_data,
model.Source.text_data)
self.assertNotEqual(model.Sink.binary_data,
model.Source.sub.binary_data)
orig_dir = os.getcwd()
os.chdir(model.Source.get_abs_directory())
try:
for path in EXTERNAL_FILES:
if not os.path.exists(path):
self.fail("path '%s' does not exist" % path)
inp = open(EXTERNAL_FILES[2])
data = inp.read()
inp.close()
self.assertEqual(data.strip(), file_data.strip())
finally:
os.chdir(orig_dir)
for i in range(3):
self.assertEqual(model.Source.obj_list[i].data, i)
self.assertEqual(model.Oddball.executions, 0)
# Run and verify correct operation.
model.run()
self.assertEqual(model.Sink.text_data,
model.Source.text_data)
self.assertEqual(all(model.Sink.binary_data==model.Source.sub.binary_data),
True)
self.assertEqual(model.Sink.binary_file.binary, True)
self.assertEqual(model.Oddball.executions, 3)
def test_main_module(self):
if MODULE_NAME == '__main__':
return
# Ensure that __main__ translation is correctly handled.
logging.debug('')
logging.debug('test_main_module')
# Find correct python.
python = find_python()
logging.debug(' Using python: %s' % python)
orig_dir = os.getcwd()
os.chdir(PY_DIR)
try:
cmdline = [python, 'test_egg_save.py']
stdout = open('main_handling.out', 'w')
retcode = subprocess.call(cmdline, stdout=stdout,
stderr=subprocess.STDOUT)
stdout.close()
stdout = open('main_handling.out', 'r')
for line in stdout:
logging.debug(' %s'% line.rstrip())
stdout.close()
os.remove('main_handling.out')
finally:
os.chdir(orig_dir)
self.assertEqual(retcode, 0)
if __name__ == '__main__':
sys.argv.append('--cover-package=openmdao')
sys.argv.append('--cover-erase')
# Clobber cache so we have a known state.
path = os.path.expanduser(os.path.join('~', '.openmdao', 'eggsaver.dat'))
if os.path.exists(path):
os.remove(path)
nose.runmodule()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
cli/deployment/api_client.go | package deployment
import (
"fmt"
"log"
"os"
"os/exec"
"strings"
"gopkg.in/yaml.v2"
)
// Status represent Deployment status in enum/numerical format
type Status int
const (
// Done Status represent successfully finished Deployment state.
Done Status = 0
// Pending Status means Deployment in "PENDING" state, means resource creation process not started yet.
Pending Status = 1
// Pending Status means Deployment in "RUNNING" state, means resource creation process started.
Running Status = 2
// NotFound Status means Deployment is not exists in any state.
NotFound Status = 3
// Error Status means Deployment is failed or deployment describe operation itself completed with error.
Error Status = 4
)
const (
// ApplyAction passed as "action" parameter value to cmd.execute(action string, ...) to apply Deployment created or updated in preview mode.
ActionApply string = "apply"
// ApplyAction passed as "action" parameter value to cmd.execute(action string, ...) to delete Deployment.
ActionDelete string = "delete"
// ApplyAction passed as "action" parameter value to cmd.execute(action string, ...) to update Deployment.
ActionCreate string = "create"
// ApplyAction passed as "action" parameter value to cmd.execute(action string, ...) to update Deployment.
ActionUpdate string = "update"
)
type DeploymentDescriptionResource struct {
Name string
Type string
Properties string
FinalProperties string `yaml:",omitempty"`
Update struct {
Properties string
FinalProperties string `yaml:",omitempty"`
State string
} `yaml:",omitempty"`
}
type DeploymentDescription struct {
Deployment struct {
Name string
Operation struct {
OperationType string
Status string
}
}
Resources []DeploymentDescriptionResource
}
// Resource struct used to parse deployment manifest yaml received with 'gcloud deployment-manager manifests describe' command.
type Resources struct {
Name string
Outputs []struct {
Value interface{} `yaml:"finalValue"`
Name string
}
Resources []Resources
}
var actions = []string{ActionApply, ActionDelete, ActionCreate, ActionUpdate}
// String returns human readable string representation of Deployment Status.
func (s Status) String() string {
return [...]string{"DONE", "PENDING", "RUNNING", "NOT_FOUND", "ERROR"}[s]
}
// The RunGCloud function runs the gcloud tool with the specified arguments. It is implemented
// as a variable so that it can be mocked in tests of its exported consumers.
var RunGCloud = func(args ...string) (result string, err error) {
log.Println("gcloud", strings.Join(args, " "))
cmd := exec.Command("gcloud", args...)
// pass user's PATH env variable, expected gcloud executable can be found in PATH
cmd.Env = append(cmd.Env, fmt.Sprintf("PATH=%s", os.Getenv("PATH")))
output, err := cmd.CombinedOutput()
if err != nil {
return string(output), err
}
return string(output), err
}
// GetOutputs retrieves existing Deployment outputs using gcloud and store result in map[string]interface{}
// where "resourceName.propertyName" is key, and value is string (in case of flat value) or JSON object.
func GetOutputs(project string, name string) (map[string]interface{}, error) {
output, err := RunGCloud("deployment-manager", "manifests", "describe", "--deployment", name, "--project", project, "--format", "yaml")
if err != nil {
log.Printf("failed to describe deployment manifest for deployment: %s.%s, error: %v, output: %s", project, name, err, output)
return nil, err
}
return parseOutputs(output)
}
// GCloudDefaultProjectID returns the default project id taken from local gcloud configuration.
func GCloudDefaultProjectID() (string, error) {
data, err := RunGCloud("config", "list", "--format", "yaml")
if err != nil {
return "", err
}
out := struct {
Core struct {
Project string
}
}{}
err = yaml.Unmarshal([]byte(data), &out)
if err != nil {
return "", err
}
return out.Core.Project, nil
}
// Create creates deployment based on passed Deployment object passed into it.
// Create initialize passed Deployment object with Outputs map in case of successful creation and return error otherwise.
// preview parameter define if deployment should be created in 'Preview' mode.
// Function returns gcloud cli raw output for debug purposes both in case of success and error.
func Create(deployment *Deployment, preview bool) (string, error) {
return createOrUpdate(ActionCreate, deployment, preview)
}
// Update updates deployment based on passed Deployment object passed into it.
// Update initialize passed Deployment object with Outputs map in case of successful creation and return error otherwise.
// preview parameter define if deployment should be updated in 'Preview' mode.
// Function returns gcloud cli raw output for debug purposes both in case of success and error.
func Update(deployment *Deployment, preview bool) (string, error) {
return createOrUpdate(ActionUpdate, deployment, preview)
}
func createOrUpdate(action string, deployment *Deployment, preview bool) (string, error) {
if action != ActionCreate && action != ActionUpdate {
log.Fatalf("action %s not in [%s,%s] for deployment: %v", action, ActionCreate, ActionUpdate, deployment)
}
args := []string{
"deployment-manager",
"deployments",
action,
deployment.config.Name,
"--config",
deployment.configFile,
"--project",
deployment.config.GetProject(),
}
if preview {
args = append(args, "--preview")
}
output, err := RunGCloud(args...)
if err != nil {
log.Printf("failed to %s deployment: %v, error: %v, output: %s", action, deployment, err, output)
return output, err
}
if !preview {
outputs, err := GetOutputs(deployment.config.GetProject(), deployment.config.Name)
if err != nil {
log.Printf("on %s action, failed to get outputs for deployment: %v, error: %v, output: %s", action, deployment, err, output)
return output, err
}
deployment.Outputs = outputs
}
return output, nil
}
// CancelPreview cancels update/create/delete action, created with review flag.
// Function uses gcloud deployments cancel-preview command.
// In case of cancellation of preview of create action, required deployment and run Delete() after CancelPreview() for cleanup.
func CancelPreview(deployment *Deployment) (string, error) {
args := []string{
"deployment-manager",
"deployments",
"cancel-preview",
deployment.config.Name,
"--project",
deployment.config.GetProject(),
"-q",
}
output, err := RunGCloud(args...)
if err != nil {
log.Printf("failed to cancel preview deployment: %v, error: %v, output: %s", deployment, err, output)
return output, err
}
return output, nil
}
// ApplyPreview function apply changes made before with --preview flag
func ApplyPreview(deployment *Deployment) (string, error) {
args := []string{
"deployment-manager",
"deployments",
"update",
deployment.config.Name,
"--project",
deployment.config.GetProject(),
"-q",
}
output, err := RunGCloud(args...)
if err != nil {
log.Printf("failed to apply preview for deployment: %v, error: %v, output: %s", deployment, err, output)
return output, err
}
return output, nil
}
// Delete function removed Deployment passed into it as parameter.
// Boolean preview param define if changes have to be previewed before apply.
func Delete(deployment *Deployment, preview bool) (string, error) {
args := []string{
"deployment-manager",
"deployments",
"delete",
deployment.config.Name,
"--project",
deployment.config.GetProject(),
"-q",
}
if preview {
args = append(args, "--preview")
}
output, err := RunGCloud(args...)
if err != nil {
log.Printf("failed to delete deployment: %v, error: %v, output: %s", deployment, err, output)
return output, err
}
return output, nil
}
func GetDeploymentDescription(name string, project string) (*DeploymentDescription, error) {
args := []string{
"deployment-manager",
"deployments",
"describe",
name,
"--project",
project,
"--format", "yaml",
}
response, err := RunGCloud(args...)
if err != nil {
return nil, err
}
description := &DeploymentDescription{}
err = yaml.Unmarshal([]byte(response), description)
if err != nil {
log.Printf("error unmarshall response: %s,\n deployment: %v \n error: %v", response, name, err)
return nil, err
}
return description, nil
}
// GetStatus retrieves Deployment status using gcloud cli, see deployment.Status type for details.
func GetStatus(deployment *Deployment) (Status, error) {
args := []string{
"deployment-manager",
"deployments",
"describe",
deployment.config.Name,
"--project",
deployment.config.GetProject(),
"--format", "yaml",
}
response, err := RunGCloud(args...)
if err != nil {
if strings.Contains(response, "code=404") {
return NotFound, nil
} else {
return Error, err
}
}
description := &DeploymentDescription{}
err = yaml.Unmarshal([]byte(response), description)
if err != nil {
log.Printf("error unmarshall response: %s,\n deployment: %v \n error: %v", response, deployment, err)
return Error, err
}
status := description.Deployment.Operation.Status
switch status {
case "DONE":
return Done, nil
case "RUNNING":
return Running, nil
case "PENDING":
return Pending, nil
default:
return Error, fmt.Errorf("Unknown status %s, for deployment %s",
deployment.config.FullName(), status)
}
}
func parseOutputs(data string) (map[string]interface{}, error) {
describe, err := unmarshal(data)
if err != nil {
log.Println("error parsing deployment outputs")
return nil, err
}
layoutData := describe["layout"].(string)
res := &struct {
Resources []Resources
}{}
err = yaml.Unmarshal([]byte(layoutData), res)
if err != nil {
log.Println("error parsing deployment outputs layout section")
return nil, err
}
result := make(map[string]interface{})
resources := flattenResources(res.Resources)
for _, resource := range resources {
for _, output := range resource.Outputs {
key := resource.Name + "." + output.Name
value := output.Value
result[key] = value
}
}
if len(result) == 0 {
return nil, nil
}
return result, nil
}
// flattenResources iterate over passed slice of Resources object, iterates over all sub-resources recursively and add all
// resources to result array. In simple worlds flattenResources extracts all resouces and sub-resources with non empty Outputs field.
func flattenResources(source []Resources) []Resources {
var result []Resources
for _, resource := range source {
if len(resource.Outputs) > 0 {
result = append(result, resource)
}
if len(resource.Resources) > 0 {
result = append(result, flattenResources(resource.Resources)...)
}
}
return result
}
| [
"\"PATH\""
]
| []
| [
"PATH"
]
| [] | ["PATH"] | go | 1 | 0 | |
config.go | package main
import (
"bufio"
"encoding/json"
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/adrg/xdg"
"github.com/docopt/docopt-go"
"github.com/smallnest/ringbuffer"
"gopkg.in/yaml.v2"
)
// Read RL configuration from a standard file-path. RL configuration
// will be a YAML file
func ReadConfig(cfg *ConfigOpts) (*ConfigOpts, error) {
cfgConn, err := os.Open(cfg.ConfigPath)
if err != nil {
return cfg, err
}
defer func() {
cfgConn.Close()
}()
var rlCfg RLConfigFile
decoder := yaml.NewDecoder(cfgConn)
err = decoder.Decode(&rlCfg)
if err != nil {
return cfg, err
}
cfg.Config = rlCfg
return cfg, nil
}
// Create a configuration file, if it doesn't exist already.
func CreateConfigFile(cfg *ConfigOpts) error {
// -- create the config file if it doesn't exist
// -- write to file
_, err := os.Stat(cfg.ConfigPath)
if errors.Is(err, os.ErrNotExist) {
// -- the file does not exist, write yaml to a file
cfgConn, err := os.OpenFile(cfg.ConfigPath, os.O_RDWR|os.O_CREATE, USER_READ_WRITE_OCTAL)
if err != nil {
return err
}
defer func() {
cfgConn.Close()
}()
enc := yaml.NewEncoder(cfgConn)
encodeErr := enc.Encode(RLConfigFile{false})
if encodeErr != nil {
return encodeErr
}
} else {
return err
}
return nil
}
// Create a history file, if it doesn't exist already. This may not
// actually be used, depending on user-configuration
func CreateHistoryFile(cfg *ConfigOpts) error {
histConn, err := os.OpenFile(cfg.HistoryPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, USER_READ_WRITE_OCTAL)
if err != nil {
return err
}
defer func() {
histConn.Close()
}()
return nil
}
// Initialise RL configuration; create required configuration directories
// and files, and return configuration that's already present
func InitConfig() (*ConfigOpts, error) {
// use XDG specification paths for configuration & data
configPath := filepath.Join(xdg.ConfigHome, "rl.yaml")
dataDir := filepath.Join(xdg.DataHome, "rl")
historyPath := filepath.Join(dataDir, "history")
cfg := ConfigOpts{
historyPath,
configPath,
RLConfigFile{},
}
// ensure XDG directories exist
for _, dir := range []string{xdg.ConfigHome, dataDir} {
err := os.MkdirAll(dir, USER_READ_WRITE_OCTAL)
if err != nil {
return &cfg, err
}
}
if cfgErr := CreateConfigFile(&cfg); cfgErr != nil {
return &cfg, cfgErr
}
if histErr := CreateHistoryFile(&cfg); histErr != nil {
return &cfg, histErr
}
// Read configuration; if it already exists there might be user configuration here
return ReadConfig(&cfg)
}
// Write to file history when history events are sent via a channel.
// This will not be used if the user has history disabled
func HistoryWriter(histChan chan *History, cfg *ConfigOpts) {
var historyLock = sync.Mutex{}
histConn, _ := os.OpenFile(cfg.HistoryPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, USER_READ_WRITE_OCTAL)
writer := bufio.NewWriter(histConn)
defer func() {
historyLock.Lock()
histConn.Close()
writer.Flush()
historyLock.Unlock()
}()
startTime := time.Now()
for {
hist := <-histChan
hist.StartTime = startTime
entry, _ := json.Marshal(hist)
historyLock.Lock()
writer.WriteString(string(entry) + "\n")
writer.Flush()
historyLock.Unlock()
}
}
// Depending on configuration, initialise history writer
func StartHistoryWriter(cfg *ConfigOpts) chan *History {
// write to RL history, if that's configured
histChan := make(chan *History)
if cfg.Config.SaveHistory {
go HistoryWriter(histChan, cfg)
}
return histChan
}
// Read standard-input into a circular buffer; stdin can be infinite, and
// often is when using commands like `journalctl`, we don't want to exhaust all memory
// attempting to store it.
func ReadStdin() (*ringbuffer.RingBuffer, int) {
stdin := ringbuffer.New(STDIN_BUFFER_SIZE)
piped, pipeErr := StdinPiped()
if pipeErr != nil {
fmt.Printf("RL: could not inspect whether stdin was piped into RL: %v\n", pipeErr)
return stdin, 1
}
// read from standard input and redirect to subcommands. Input can be infinite,
// so manage this read from a goroutine an read into a circular buffer
if piped {
go StdinReader(stdin)
}
return stdin, 0
}
// Validate user-configuration before starting RL properly
func ValidateConfig() (*ConfigOpts, int) {
tty, ttyErr := OpenTTY()
cfg, cfgErr := InitConfig()
if cfgErr != nil {
fmt.Printf("RL: Failed to read configuration: %s\n", cfgErr)
return cfg, 1
}
if ttyErr != nil {
fmt.Printf("RL: could not open /dev/tty. Are you running rl non-interactively?")
return cfg, 1
}
tty.Close()
return cfg, 0
}
// Read the user's SHELL variable from the environment; this will normally be bash or zsh. If it's present,
// just assume it's accurate, the user would have to lie for it to be set incorrectly most likely
func ReadShell() (string, int) {
shell := os.Getenv("SHELL")
if shell == "" {
fmt.Printf("RL: could not determine user's shell (e.g bash, zsh). Ensure $SHELL is set.")
return shell, 1
}
return shell, 0
}
func RLState(opts *docopt.Opts) (LineChangeState, LineChangeCtx, int) {
execute, execErr := opts.String("<cmd>")
code := AuditCommand(&execute)
if code != 0 {
return LineChangeState{}, LineChangeCtx{}, code
}
if execErr != nil {
execute = ""
}
inputOnly, inputErr := opts.Bool("--input-only")
if inputErr != nil {
fmt.Printf("RL: failed to read --input-only option. %v\n", inputErr)
os.Exit(1)
}
_, rerunErr := opts.Bool("--rerun")
if rerunErr != nil {
fmt.Printf("RL: failed to read --rerun option. %v\n", rerunErr)
os.Exit(1)
}
splitEnvVars := [][]string{}
// docopt is unmaintained
envVarsIface, present := (*opts)["<env_vars>"]
if present {
envVars, castOk := envVarsIface.([]string)
if !castOk {
fmt.Println("RL: failed to read <env_vars> option.")
os.Exit(1)
}
splitEnvVars = make([][]string, len(envVars))
for idx, pair := range envVars {
split := strings.SplitN(pair, "=", 2)
if len(split) != 2 {
fmt.Printf("RL: failed to split environment-variable '%v' provided to rl\n", pair)
os.Exit(1)
}
splitEnvVars[idx] = split
}
}
shell, code := ReadShell()
if code != 0 {
return LineChangeState{}, LineChangeCtx{}, code
}
stdin, code := ReadStdin()
if code != 0 {
return LineChangeState{}, LineChangeCtx{}, code
}
ctx := LineChangeCtx{
shell,
inputOnly,
&execute,
os.Environ(),
splitEnvVars,
stdin,
}
linebuffer := LineBuffer{}
state := LineChangeState{
lineBuffer: &linebuffer,
cmd: nil,
}
return state, ctx, 0
}
| [
"\"SHELL\""
]
| []
| [
"SHELL"
]
| [] | ["SHELL"] | go | 1 | 0 | |
pkg/handlers/response.go | package handlers
import (
"encoding/json"
"net/http"
"os"
"log"
)
type response struct {
Ok bool `json:"ok"`
Err string `json:"error"`
Body interface{} `json:"body"`
}
func sendErrorResp(w http.ResponseWriter, err string, code int) {
if code == http.StatusInternalServerError && os.Getenv("DEV_STAGE") == "" {
log.Println(err)
err = "on getting an internal server error"
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(code)
json.NewEncoder(w).Encode(response{
Err: err,
})
}
func sendSuccessResp(w http.ResponseWriter, body interface{}) {
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusAccepted)
json.NewEncoder(w).Encode(response{
Ok: true,
Body: body,
})
}
| [
"\"DEV_STAGE\""
]
| []
| [
"DEV_STAGE"
]
| [] | ["DEV_STAGE"] | go | 1 | 0 | |
send_grid_mail.go | package main
import (
"fmt"
"log"
"os"
"github.com/sendgrid/sendgrid-go"
"github.com/sendgrid/sendgrid-go/helpers/mail"
)
func Send_Gridmail(plainTextContent string) {
from := mail.NewEmail("Walmart Slot Robort", "[email protected]")
subject := "There are slots available"
to := mail.NewEmail("Special Customer", os.Getenv("TOEMAIL1"))
htmlContent := "<strong>" + plainTextContent + "</strong>"
message := mail.NewSingleEmail(from, subject, to, plainTextContent, htmlContent)
client := sendgrid.NewSendClient(os.Getenv("PASSWORD"))
response, err := client.Send(message)
if err != nil {
log.Println(err)
} else {
fmt.Println(response.StatusCode)
fmt.Println(response.Body)
fmt.Println(response.Headers)
}
to2 := mail.NewEmail("Custom2", os.Getenv("TOEMAIL2"))
message2 := mail.NewSingleEmail(from, subject, to2, plainTextContent, htmlContent)
response2, err := client.Send(message2)
if err != nil {
log.Println(err)
} else {
fmt.Println(response2.StatusCode)
fmt.Println(response2.Body)
fmt.Println(response2.Headers)
}
}
| [
"\"TOEMAIL1\"",
"\"PASSWORD\"",
"\"TOEMAIL2\""
]
| []
| [
"TOEMAIL2",
"TOEMAIL1",
"PASSWORD"
]
| [] | ["TOEMAIL2", "TOEMAIL1", "PASSWORD"] | go | 3 | 0 | |
examples/conversation/observer.py | """
Get the events of an ongoing conversation.
"""
import argparse
import os
from time import time
import websocket as ws # type: ignore
from websocket import WebSocketTimeoutException # type: ignore
from uhlive.stream.conversation import Conversation, Ok, build_conversation_url
parser = argparse.ArgumentParser(
description="Get the events of an ongoing conversation."
)
parser.add_argument("conversation_id", help="Conversation ID")
args = parser.parse_args()
uhlive_url = os.environ["UHLIVE_API_URL"]
uhlive_token = os.environ["UHLIVE_API_TOKEN"]
uhlive_id = os.environ["UHLIVE_API_ID"]
url = build_conversation_url(uhlive_url, uhlive_token)
socket = ws.create_connection(url, timeout=5)
client = Conversation(uhlive_id, args.conversation_id, "observer")
socket.send(client.join(readonly=True))
print("Listening to events")
last_ping = time()
try:
while True:
# As we don't stream audio, we need to regularly ping the server to keep the connection open
if time() - last_ping > 15:
socket.ping()
last_ping = time()
try:
event = client.receive(socket.recv())
except WebSocketTimeoutException:
print("Silence")
continue
if isinstance(event, Ok):
continue
else:
print(event)
finally:
print("Exiting")
socket.send(client.leave())
socket.close()
| []
| []
| [
"UHLIVE_API_URL",
"UHLIVE_API_ID",
"UHLIVE_API_TOKEN"
]
| [] | ["UHLIVE_API_URL", "UHLIVE_API_ID", "UHLIVE_API_TOKEN"] | python | 3 | 0 | |
enterprise/internal/campaigns/resolvers/resolver_test.go | package resolvers
import (
"context"
"database/sql"
"fmt"
"log"
"os"
"reflect"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/graph-gophers/graphql-go"
"github.com/pkg/errors"
"github.com/sourcegraph/go-diff/diff"
"github.com/sourcegraph/sourcegraph/cmd/frontend/backend"
"github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend"
"github.com/sourcegraph/sourcegraph/cmd/repo-updater/repos"
ee "github.com/sourcegraph/sourcegraph/enterprise/internal/campaigns"
"github.com/sourcegraph/sourcegraph/enterprise/internal/campaigns/resolvers/apitest"
ct "github.com/sourcegraph/sourcegraph/enterprise/internal/campaigns/testing"
"github.com/sourcegraph/sourcegraph/internal/actor"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/campaigns"
"github.com/sourcegraph/sourcegraph/internal/db/dbconn"
"github.com/sourcegraph/sourcegraph/internal/db/dbtesting"
"github.com/sourcegraph/sourcegraph/internal/extsvc"
"github.com/sourcegraph/sourcegraph/internal/extsvc/github"
"github.com/sourcegraph/sourcegraph/internal/gitserver"
"github.com/sourcegraph/sourcegraph/internal/httptestutil"
"github.com/sourcegraph/sourcegraph/internal/rcache"
"github.com/sourcegraph/sourcegraph/internal/repoupdater/protocol"
"github.com/sourcegraph/sourcegraph/internal/vcs/git"
"github.com/sourcegraph/sourcegraph/schema"
)
func TestCampaigns(t *testing.T) {
if testing.Short() {
t.Skip()
}
ctx := backend.WithAuthzBypass(context.Background())
dbtesting.SetupGlobalTestDB(t)
rcache.SetupForTest(t)
cf, save := httptestutil.NewGitHubRecorderFactory(t, *update, "test-campaigns")
defer save()
now := time.Now().UTC().Truncate(time.Microsecond)
clock := func() time.Time {
return now.UTC().Truncate(time.Microsecond)
}
sr := &Resolver{
store: ee.NewStoreWithClock(dbconn.Global, clock),
httpFactory: cf,
}
s, err := graphqlbackend.NewSchema(sr, nil, nil)
if err != nil {
t.Fatal(err)
}
var users struct {
Admin, User struct {
apitest.User `json:"user"`
}
}
apitest.MustExec(ctx, t, s, nil, &users, `
fragment u on User { id, databaseID, siteAdmin }
mutation {
admin: createUser(username: "admin") {
user { ...u }
}
user: createUser(username: "user") {
user { ...u }
}
}
`)
if !users.Admin.SiteAdmin {
t.Fatal("admin must be a site-admin, since it was the first user created")
}
var orgs struct {
ACME apitest.Org
}
ctx = actor.WithActor(ctx, actor.FromUser(users.Admin.DatabaseID))
apitest.MustExec(ctx, t, s, nil, &orgs, `
fragment o on Org { id, name }
mutation {
acme: createOrganization(name: "ACME") { ...o }
}
`)
var campaigns struct{ Admin, Org apitest.Campaign }
input := map[string]interface{}{
"admin": map[string]interface{}{
"namespace": users.Admin.ID,
"name": "Admin Campaign",
"description": "It's an admin's campaign",
},
"org": map[string]interface{}{
"namespace": orgs.ACME.ID,
"name": "ACME's Campaign",
"description": "It's an ACME's campaign",
},
}
apitest.MustExec(ctx, t, s, input, &campaigns, `
fragment u on User { id, databaseID, siteAdmin }
fragment o on Org { id, name }
fragment c on Campaign {
id, name, description, createdAt, updatedAt
author { ...u }
namespace {
... on User { ...u }
... on Org { ...o }
}
}
mutation($admin: CreateCampaignInput!, $org: CreateCampaignInput!){
admin: createCampaign(input: $admin) { ...c }
org: createCampaign(input: $org) { ...c }
}
`)
if have, want := campaigns.Admin.Namespace.ID, users.Admin.ID; have != want {
t.Fatalf("have admin's campaign namespace id %q, want %q", have, want)
}
if have, want := campaigns.Org.Namespace.ID, orgs.ACME.ID; have != want {
t.Fatalf("have orgs's campaign namespace id %q, want %q", have, want)
}
var listed struct{ First, All apitest.CampaignConnection }
apitest.MustExec(ctx, t, s, nil, &listed, `
fragment u on User { id, databaseID, siteAdmin }
fragment o on Org { id, name }
fragment c on Campaign {
id, name, description, createdAt, updatedAt
author { ...u }
namespace {
... on User { ...u }
... on Org { ...o }
}
}
fragment n on CampaignConnection {
nodes { ...c }
totalCount
pageInfo { hasNextPage }
}
query {
first: campaigns(first: 1) { ...n }
all: campaigns() { ...n }
}
`)
have := listed.First.Nodes
want := []apitest.Campaign{campaigns.Admin}
if !reflect.DeepEqual(have, want) {
t.Errorf("wrong campaigns listed. diff=%s", cmp.Diff(have, want))
}
if !listed.First.PageInfo.HasNextPage {
t.Errorf("wrong page info: %+v", listed.First.PageInfo.HasNextPage)
}
have = listed.All.Nodes
want = []apitest.Campaign{campaigns.Admin, campaigns.Org}
if !reflect.DeepEqual(have, want) {
t.Errorf("wrong campaigns listed. diff=%s", cmp.Diff(have, want))
}
if listed.All.PageInfo.HasNextPage {
t.Errorf("wrong page info: %+v", listed.All.PageInfo.HasNextPage)
}
campaigns.Admin.Name = "Updated Admin Campaign Name"
campaigns.Admin.Description = "Updated Admin Campaign Description"
updateInput := map[string]interface{}{
"input": map[string]interface{}{
"id": campaigns.Admin.ID,
"name": campaigns.Admin.Name,
"description": campaigns.Admin.Description,
},
}
var updated struct{ UpdateCampaign apitest.Campaign }
apitest.MustExec(ctx, t, s, updateInput, &updated, `
fragment u on User { id, databaseID, siteAdmin }
fragment o on Org { id, name }
fragment c on Campaign {
id, name, description, createdAt, updatedAt
author { ...u }
namespace {
... on User { ...u }
... on Org { ...o }
}
}
mutation($input: UpdateCampaignInput!){
updateCampaign(input: $input) { ...c }
}
`)
haveUpdated, wantUpdated := updated.UpdateCampaign, campaigns.Admin
if !reflect.DeepEqual(haveUpdated, wantUpdated) {
t.Errorf("wrong campaign updated. diff=%s", cmp.Diff(haveUpdated, wantUpdated))
}
store := repos.NewDBStore(dbconn.Global, sql.TxOptions{})
githubExtSvc := &repos.ExternalService{
Kind: extsvc.KindGitHub,
DisplayName: "GitHub",
Config: marshalJSON(t, &schema.GitHubConnection{
Url: "https://github.com",
Token: os.Getenv("GITHUB_TOKEN"),
Repos: []string{"sourcegraph/sourcegraph"},
}),
}
bbsURL := os.Getenv("BITBUCKET_SERVER_URL")
if bbsURL == "" {
// The test fixtures and golden files were generated with
// this config pointed to bitbucket.sgdev.org
bbsURL = "https://bitbucket.sgdev.org"
}
bbsExtSvc := &repos.ExternalService{
Kind: extsvc.KindBitbucketServer,
DisplayName: "Bitbucket Server",
Config: marshalJSON(t, &schema.BitbucketServerConnection{
Url: bbsURL,
Token: os.Getenv("BITBUCKET_SERVER_TOKEN"),
Repos: []string{"SOUR/vegeta"},
}),
}
err = store.UpsertExternalServices(ctx, githubExtSvc, bbsExtSvc)
if err != nil {
t.Fatal(t)
}
githubSrc, err := repos.NewGithubSource(githubExtSvc, cf)
if err != nil {
t.Fatal(t)
}
githubRepo, err := githubSrc.GetRepo(ctx, "sourcegraph/sourcegraph")
if err != nil {
t.Fatal(t)
}
bbsSrc, err := repos.NewBitbucketServerSource(bbsExtSvc, cf)
if err != nil {
t.Fatal(t)
}
bbsRepos := getBitbucketServerRepos(t, ctx, bbsSrc)
if len(bbsRepos) != 1 {
t.Fatalf("wrong number of bitbucket server repos. got=%d", len(bbsRepos))
}
bbsRepo := bbsRepos[0]
err = store.UpsertRepos(ctx, githubRepo, bbsRepo)
if err != nil {
t.Fatal(err)
}
git.Mocks.ResolveRevision = func(spec string, opt git.ResolveRevisionOptions) (api.CommitID, error) {
return "mockcommitid", nil
}
defer func() { git.Mocks.ResolveRevision = nil }()
var result struct {
Changesets []apitest.Changeset
}
graphqlGithubRepoID := string(graphqlbackend.MarshalRepositoryID(githubRepo.ID))
graphqlBBSRepoID := string(graphqlbackend.MarshalRepositoryID(bbsRepo.ID))
in := fmt.Sprintf(
`[{repository: %q, externalID: %q}, {repository: %q, externalID: %q}]`,
graphqlGithubRepoID, "999",
graphqlBBSRepoID, "2",
)
state := ct.MockChangesetSyncState(&protocol.RepoInfo{
Name: api.RepoName(githubRepo.Name),
VCS: protocol.VCSInfo{URL: githubRepo.URI},
})
defer state.Unmock()
apitest.MustExec(ctx, t, s, nil, &result, fmt.Sprintf(`
fragment gitRef on GitRef {
name
abbrevName
displayName
prefix
type
repository { id }
url
target {
oid
abbreviatedOID
type
}
}
fragment cs on ExternalChangeset {
id
repository { id }
createdAt
updatedAt
title
body
state
nextSyncAt
externalURL {
url
serviceType
}
reviewState
checkState
events(first: 100) {
totalCount
}
head { ...gitRef }
base { ...gitRef }
}
mutation() {
changesets: createChangesets(input: %s) {
...cs
}
}
`, in))
{
want := []apitest.Changeset{
{
Repository: apitest.Repository{ID: graphqlGithubRepoID},
CreatedAt: now.Format(time.RFC3339),
UpdatedAt: now.Format(time.RFC3339),
Title: "add extension filter to filter bar",
Body: "Enables adding extension filters to the filter bar by rendering the extension filter as filter chips inside the filter bar.\r\nWIP for https://github.com/sourcegraph/sourcegraph/issues/962\r\n\r\n> This PR updates the CHANGELOG.md file to describe any user-facing changes.\r\n.\r\n",
State: "MERGED",
ExternalURL: struct{ URL, ServiceType string }{
URL: "https://github.com/sourcegraph/sourcegraph/pull/999",
ServiceType: extsvc.TypeGitHub,
},
ReviewState: "APPROVED",
CheckState: "PASSED",
Events: apitest.ChangesetEventConnection{
TotalCount: 57,
},
// Not scheduled, not added to a campaign yet.
NextSyncAt: "",
Head: apitest.GitRef{
Name: "refs/heads/vo/add-type-issue-filter",
AbbrevName: "vo/add-type-issue-filter",
DisplayName: "vo/add-type-issue-filter",
Prefix: "refs/heads/",
RefType: "GIT_BRANCH",
Repository: struct{ ID string }{ID: "UmVwb3NpdG9yeTox"},
URL: "/github.com/sourcegraph/sourcegraph@vo/add-type-issue-filter",
Target: apitest.GitTarget{
OID: "23a5556c7e25aaab1f1529cee4efb90fe6fe3a30",
AbbreviatedOID: "23a5556",
TargetType: "GIT_COMMIT",
},
},
Base: apitest.GitRef{
Name: "refs/heads/master",
AbbrevName: "master",
DisplayName: "master",
Prefix: "refs/heads/",
RefType: "GIT_BRANCH",
Repository: struct{ ID string }{ID: "UmVwb3NpdG9yeTox"},
URL: "/github.com/sourcegraph/sourcegraph@master",
Target: apitest.GitTarget{
OID: "fa3815ba9ddd49db9111c5e9691e16d27e8f1f60",
AbbreviatedOID: "fa3815b",
TargetType: "GIT_COMMIT",
},
},
},
{
Repository: apitest.Repository{ID: graphqlBBSRepoID},
CreatedAt: now.Format(time.RFC3339),
UpdatedAt: now.Format(time.RFC3339),
Title: "Release testing pr",
Body: "* Remove dump.go\r\n* make make make",
State: "MERGED",
ExternalURL: struct{ URL, ServiceType string }{
URL: "https://bitbucket.sgdev.org/projects/SOUR/repos/vegeta/pull-requests/2",
ServiceType: "bitbucketServer",
},
ReviewState: "PENDING",
CheckState: "PENDING",
Events: apitest.ChangesetEventConnection{
TotalCount: 10,
},
// Not scheduled, not added to a campaign yet.
NextSyncAt: "",
Head: apitest.GitRef{
Name: "refs/heads/release-testing-pr",
AbbrevName: "release-testing-pr",
DisplayName: "release-testing-pr",
Prefix: "refs/heads/",
RefType: "GIT_BRANCH",
Repository: struct{ ID string }{ID: "UmVwb3NpdG9yeToy"},
URL: "/bitbucket.sgdev.org/SOUR/vegeta@release-testing-pr",
Target: apitest.GitTarget{
OID: "be4d84e9c4b0a15e59c5f52900e6d55c7525b8d3",
AbbreviatedOID: "be4d84e",
TargetType: "GIT_COMMIT",
},
},
Base: apitest.GitRef{
Name: "refs/heads/master",
AbbrevName: "master",
DisplayName: "master",
Prefix: "refs/heads/",
RefType: "GIT_BRANCH",
Repository: struct{ ID string }{ID: "UmVwb3NpdG9yeToy"},
URL: "/bitbucket.sgdev.org/SOUR/vegeta@master",
Target: apitest.GitTarget{
OID: "mockcommitid",
AbbreviatedOID: "mockcom",
TargetType: "GIT_COMMIT",
},
},
},
}
have := make([]apitest.Changeset, 0, len(result.Changesets))
for _, c := range result.Changesets {
if c.ID == "" {
t.Fatal("Changeset ID is empty")
}
c.ID = ""
have = append(have, c)
}
if diff := cmp.Diff(have, want); diff != "" {
t.Fatal(diff)
}
// Test node resolver has nextSyncAt correctly set.
for _, c := range result.Changesets {
var changesetResult struct{ Node apitest.Changeset }
apitest.MustExec(ctx, t, s, nil, &changesetResult, fmt.Sprintf(`
query {
node(id: %q) {
... on ExternalChangeset {
nextSyncAt
}
}
}
`, c.ID))
if have, want := changesetResult.Node.NextSyncAt, ""; have != want {
t.Fatalf("incorrect nextSyncAt value, want=%q have=%q", want, have)
}
}
}
var addChangesetsResult struct{ Campaign apitest.Campaign }
changesetIDs := make([]string, 0, len(result.Changesets))
for _, c := range result.Changesets {
changesetIDs = append(changesetIDs, c.ID)
}
// Date when PR #999 from above was created
countsFrom := parseJSONTime(t, "2018-11-14T22:07:45Z")
// Date when PR #999 from above was merged
countsTo := parseJSONTime(t, "2018-12-04T08:10:07Z")
apitest.MustExec(ctx, t, s, nil, &addChangesetsResult, fmt.Sprintf(`
fragment u on User { id, databaseID, siteAdmin }
fragment o on Org { id, name }
fragment cs on ExternalChangeset {
id
repository { id }
createdAt
updatedAt
nextSyncAt
campaigns { nodes { id } }
title
body
state
externalURL {
url
serviceType
}
reviewState
}
fragment c on Campaign {
id, name, description, createdAt, updatedAt
author { ...u }
namespace {
... on User { ...u }
... on Org { ...o }
}
changesets {
nodes {
... on ExternalChangeset {
...cs
}
}
totalCount
pageInfo { hasNextPage }
}
changesetCountsOverTime(from: %s, to: %s) {
date
total
merged
closed
open
openApproved
openChangesRequested
openPending
}
diffStat {
added
changed
deleted
}
}
mutation() {
campaign: addChangesetsToCampaign(campaign: %q, changesets: %s) {
...c
}
}
`,
marshalDateTime(t, countsFrom),
marshalDateTime(t, countsTo),
campaigns.Admin.ID,
marshalJSON(t, changesetIDs),
))
{
have := addChangesetsResult.Campaign.Changesets.TotalCount
want := len(changesetIDs)
if have != want {
t.Fatalf(
"want campaign changesets totalcount %d, have=%d",
want, have,
)
}
}
{
var have []string
want := changesetIDs
for _, n := range addChangesetsResult.Campaign.Changesets.Nodes {
have = append(have, n.ID)
}
if !reflect.DeepEqual(have, want) {
t.Errorf("wrong changesets added to campaign. want=%v, have=%v", want, have)
}
}
{
have := map[string]bool{}
for _, cs := range addChangesetsResult.Campaign.Changesets.Nodes {
have[cs.Campaigns.Nodes[0].ID] = true
}
if !have[campaigns.Admin.ID] || len(have) != 1 {
t.Errorf("wrong campaign added to changeset. want=%v, have=%v", campaigns.Admin.ID, have)
}
}
{
counts := addChangesetsResult.Campaign.ChangesetCountsOverTime
// There's 20 1-day intervals between countsFrom and including countsTo
if have, want := len(counts), 20; have != want {
t.Errorf("wrong changeset counts length %d, have=%d", want, have)
}
for _, c := range counts {
if have, want := c.Total, int32(1); have != want {
t.Errorf("wrong changeset counts total %d, have=%d", want, have)
}
}
}
{
have := addChangesetsResult.Campaign.DiffStat
want := apitest.DiffStat{Added: 2, Changed: 2, Deleted: 6}
if have != want {
t.Errorf("wrong campaign combined diffstat. want=%v, have=%v", want, have)
}
}
{
for _, c := range addChangesetsResult.Campaign.Changesets.Nodes {
if have, want := c.NextSyncAt, now.Add(8*time.Hour).Format(time.RFC3339); have != want {
t.Fatalf("incorrect nextSyncAt value, want=%q have=%q", want, have)
}
var changesetResult struct{ Node apitest.Changeset }
apitest.MustExec(ctx, t, s, nil, &changesetResult, fmt.Sprintf(`
query {
node(id: %q) {
... on ExternalChangeset {
nextSyncAt
}
}
}
`, c.ID))
if have, want := changesetResult.Node.NextSyncAt, now.Add(8*time.Hour).Format(time.RFC3339); have != want {
t.Fatalf("incorrect nextSyncAt value, want=%q have=%q", want, have)
}
}
}
deleteInput := map[string]interface{}{"id": campaigns.Admin.ID}
apitest.MustExec(ctx, t, s, deleteInput, &struct{}{}, `
mutation($id: ID!){
deleteCampaign(campaign: $id) { alwaysNil }
}
`)
var campaignsAfterDelete struct {
Campaigns struct {
TotalCount int
}
}
apitest.MustExec(ctx, t, s, nil, &campaignsAfterDelete, `
query { campaigns { totalCount } }
`)
haveCount := campaignsAfterDelete.Campaigns.TotalCount
wantCount := listed.All.TotalCount - 1
if haveCount != wantCount {
t.Errorf("wrong campaigns totalcount after delete. want=%d, have=%d", wantCount, haveCount)
}
}
func TestChangesetCountsOverTime(t *testing.T) {
if testing.Short() {
t.Skip()
}
ctx := backend.WithAuthzBypass(context.Background())
dbtesting.SetupGlobalTestDB(t)
rcache.SetupForTest(t)
cf, save := httptestutil.NewGitHubRecorderFactory(t, *update, "test-changeset-counts-over-time")
defer save()
userID := insertTestUser(t, dbconn.Global, "changeset-counts-over-time", false)
repoStore := repos.NewDBStore(dbconn.Global, sql.TxOptions{})
githubExtSvc := &repos.ExternalService{
Kind: extsvc.KindGitHub,
DisplayName: "GitHub",
Config: marshalJSON(t, &schema.GitHubConnection{
Url: "https://github.com",
Token: os.Getenv("GITHUB_TOKEN"),
Repos: []string{"sourcegraph/sourcegraph"},
}),
}
err := repoStore.UpsertExternalServices(ctx, githubExtSvc)
if err != nil {
t.Fatal(t)
}
githubSrc, err := repos.NewGithubSource(githubExtSvc, cf)
if err != nil {
t.Fatal(t)
}
githubRepo, err := githubSrc.GetRepo(ctx, "sourcegraph/sourcegraph")
if err != nil {
t.Fatal(err)
}
err = repoStore.UpsertRepos(ctx, githubRepo)
if err != nil {
t.Fatal(err)
}
store := ee.NewStore(dbconn.Global)
campaign := &campaigns.Campaign{
Name: "Test campaign",
Description: "Testing changeset counts",
AuthorID: userID,
NamespaceUserID: userID,
}
err = store.CreateCampaign(ctx, campaign)
if err != nil {
t.Fatal(err)
}
changesets := []*campaigns.Changeset{
{
RepoID: githubRepo.ID,
ExternalID: "5834",
ExternalServiceType: githubRepo.ExternalRepo.ServiceType,
CampaignIDs: []int64{campaign.ID},
},
{
RepoID: githubRepo.ID,
ExternalID: "5849",
ExternalServiceType: githubRepo.ExternalRepo.ServiceType,
CampaignIDs: []int64{campaign.ID},
},
}
err = store.CreateChangesets(ctx, changesets...)
if err != nil {
t.Fatal(err)
}
mockState := ct.MockChangesetSyncState(&protocol.RepoInfo{
Name: api.RepoName(githubRepo.Name),
VCS: protocol.VCSInfo{URL: githubRepo.URI},
})
defer mockState.Unmock()
err = ee.SyncChangesets(ctx, repoStore, store, cf, changesets...)
if err != nil {
t.Fatal(err)
}
for _, c := range changesets {
campaign.ChangesetIDs = append(campaign.ChangesetIDs, c.ID)
}
err = store.UpdateCampaign(ctx, campaign)
if err != nil {
t.Fatal(err)
}
// Date when PR #5834 was created: "2019-10-02T14:49:31Z"
// We start exactly one day earlier
// Date when PR #5849 was created: "2019-10-03T15:03:21Z"
start := parseJSONTime(t, "2019-10-01T14:49:31Z")
// Date when PR #5834 was merged: "2019-10-07T13:13:45Z"
// Date when PR #5849 was merged: "2019-10-04T08:55:21Z"
end := parseJSONTime(t, "2019-10-07T13:13:45Z")
daysBeforeEnd := func(days int) time.Time {
return end.AddDate(0, 0, -days)
}
r := &campaignResolver{store: store, Campaign: campaign}
rs, err := r.ChangesetCountsOverTime(ctx, &graphqlbackend.ChangesetCountsArgs{
From: &graphqlbackend.DateTime{Time: start},
To: &graphqlbackend.DateTime{Time: end},
})
if err != nil {
t.Fatalf("ChangsetCountsOverTime failed with error: %s", err)
}
have := make([]*ee.ChangesetCounts, 0, len(rs))
for _, cr := range rs {
r := cr.(*changesetCountsResolver)
have = append(have, r.counts)
}
want := []*ee.ChangesetCounts{
{Time: daysBeforeEnd(5), Total: 0, Open: 0},
{Time: daysBeforeEnd(4), Total: 1, Open: 1, OpenPending: 1},
{Time: daysBeforeEnd(3), Total: 2, Open: 1, OpenPending: 1, Merged: 1},
{Time: daysBeforeEnd(2), Total: 2, Open: 1, OpenPending: 1, Merged: 1},
{Time: daysBeforeEnd(1), Total: 2, Open: 1, OpenPending: 1, Merged: 1},
{Time: end, Total: 2, Merged: 2},
}
if !reflect.DeepEqual(have, want) {
t.Errorf("wrong counts listed. diff=%s", cmp.Diff(have, want))
}
}
func TestNullIDResilience(t *testing.T) {
sr := &Resolver{store: ee.NewStore(dbconn.Global)}
s, err := graphqlbackend.NewSchema(sr, nil, nil)
if err != nil {
t.Fatal(err)
}
ctx := backend.WithAuthzBypass(context.Background())
ids := []graphql.ID{
marshalPatchSetID(0),
marshalPatchID(0),
campaigns.MarshalCampaignID(0),
marshalExternalChangesetID(0),
}
for _, id := range ids {
var response struct{ Node struct{ ID string } }
query := fmt.Sprintf(`query { node(id: %q) { id } }`, id)
apitest.MustExec(ctx, t, s, nil, &response, query)
if have, want := response.Node.ID, ""; have != want {
t.Fatalf("node has wrong ID. have=%q, want=%q", have, want)
}
}
mutations := []string{
fmt.Sprintf(`mutation { retryCampaignChangesets(campaign: %q) { id } }`, campaigns.MarshalCampaignID(0)),
fmt.Sprintf(`mutation { closeCampaign(campaign: %q) { id } }`, campaigns.MarshalCampaignID(0)),
fmt.Sprintf(`mutation { deleteCampaign(campaign: %q) { alwaysNil } }`, campaigns.MarshalCampaignID(0)),
fmt.Sprintf(`mutation { publishChangeset(patch: %q) { alwaysNil } }`, marshalPatchID(0)),
fmt.Sprintf(`mutation { syncChangeset(changeset: %q) { alwaysNil } }`, marshalExternalChangesetID(0)),
}
for _, m := range mutations {
var response struct{}
errs := apitest.Exec(ctx, t, s, nil, &response, m)
if len(errs) == 0 {
t.Fatalf("expected errors but none returned (mutation: %q)", m)
}
if have, want := errs[0].Error(), fmt.Sprintf("graphql: %s", ErrIDIsZero.Error()); have != want {
t.Fatalf("wrong errors. have=%s, want=%s (mutation: %q)", have, want, m)
}
}
}
func TestCreatePatchSetFromPatchesResolver(t *testing.T) {
ctx := backend.WithAuthzBypass(context.Background())
dbtesting.SetupGlobalTestDB(t)
userID := insertTestUser(t, dbconn.Global, "create-patch-set", false)
act := actor.FromUser(userID)
ctx = actor.WithActor(ctx, act)
t.Run("invalid patch", func(t *testing.T) {
args := graphqlbackend.CreatePatchSetFromPatchesArgs{
Patches: []graphqlbackend.PatchInput{
{
Repository: graphqlbackend.MarshalRepositoryID(1),
BaseRevision: "f00b4r",
BaseRef: "master",
Patch: "!!! this is not a valid unified diff !!!\n--- x\n+++ y\n@@ 1,1 2,2\na",
},
},
}
_, err := (&Resolver{}).CreatePatchSetFromPatches(ctx, args)
if err == nil {
t.Fatal("want error")
}
if _, ok := errors.Cause(err).(*diff.ParseError); !ok {
t.Fatalf("got error %q (%T), want a diff ParseError", err, errors.Cause(err))
}
})
t.Run("integration", func(t *testing.T) {
if testing.Short() {
t.Skip()
}
rcache.SetupForTest(t)
now := time.Now().UTC().Truncate(time.Microsecond)
clock := func() time.Time {
return now.UTC().Truncate(time.Microsecond)
}
// For testing purposes they all share the same rev, across repos
testingRev := api.CommitID("24f7ca7c1190835519e261d7eefa09df55ceea4f")
mockBackendCommits(t, testingRev)
reposStore := repos.NewDBStore(dbconn.Global, sql.TxOptions{})
repo := newGitHubTestRepo("github.com/sourcegraph/sourcegraph", 1)
if err := reposStore.UpsertRepos(ctx, repo); err != nil {
t.Fatal(err)
}
store := ee.NewStoreWithClock(dbconn.Global, clock)
sr := &Resolver{store: store}
s, err := graphqlbackend.NewSchema(sr, nil, nil)
if err != nil {
t.Fatal(err)
}
var response struct{ CreatePatchSetFromPatches apitest.PatchSet }
apitest.MustExec(ctx, t, s, nil, &response, fmt.Sprintf(`
mutation {
createPatchSetFromPatches(patches: [{repository: %q, baseRevision: %q, baseRef: "master", patch: %q}]) {
... on PatchSet {
id
patches(first: %d) {
nodes {
... on Patch {
repository {
name
}
diff {
fileDiffs {
rawDiff
diffStat {
added
deleted
changed
}
nodes {
oldPath
newPath
hunks {
body
section
newRange { startLine, lines }
oldRange { startLine, lines }
oldNoNewlineAt
}
stat {
added
deleted
changed
}
oldFile {
name
externalURLs {
serviceType
url
}
}
}
}
}
}
}
}
previewURL
diffStat {
added
deleted
changed
}
}
}
}
`, graphqlbackend.MarshalRepositoryID(repo.ID), testingRev, testDiff, 1))
result := response.CreatePatchSetFromPatches
wantPatches := []apitest.Patch{
{
Repository: struct{ Name, URL string }{Name: repo.Name},
Diff: struct{ FileDiffs apitest.FileDiffs }{FileDiffs: testDiffGraphQL},
},
}
if !cmp.Equal(result.Patches.Nodes, wantPatches) {
t.Error("wrong patches", cmp.Diff(result.Patches.Nodes, wantPatches))
}
if have, want := result.PreviewURL, "http://example.com/campaigns/new?patchSet=UGF0Y2hTZXQ6MQ%3D%3D"; have != want {
t.Fatalf("have PreviewURL %q, want %q", have, want)
}
if have, want := result.DiffStat, (apitest.DiffStat{Changed: 2}); have != want {
t.Fatalf("wrong PatchSet.DiffStat.Changed %d, want=%d", have, want)
}
})
}
func TestCreateCampaignWithPatchSet(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
rcache.SetupForTest(t)
ctx := backend.WithAuthzBypass(context.Background())
userID := insertTestUser(t, dbconn.Global, "create-patch-set", true)
act := actor.FromUser(userID)
ctx = actor.WithActor(ctx, act)
now := time.Now().UTC().Truncate(time.Microsecond)
clock := func() time.Time {
return now.UTC().Truncate(time.Microsecond)
}
testBaseRevision := api.CommitID("24f7ca7c1190835519e261d7eefa09df55ceea4f")
testBaseRef := "refs/heads/master"
testHeadRef := "refs/heads/my-cool-branch"
// gitserver Mocks
mockBackendCommits(t, testBaseRevision)
git.Mocks.MergeBase = func(repo gitserver.Repo, a, b api.CommitID) (api.CommitID, error) {
if string(a) != testBaseRef || string(b) != testHeadRef {
t.Fatalf("gitserver.MergeBase received wrong args: %s %s", a, b)
}
return testBaseRevision, nil
}
t.Cleanup(func() { git.Mocks.MergeBase = nil })
// repo & external service setup
reposStore := repos.NewDBStore(dbconn.Global, sql.TxOptions{})
ext := &repos.ExternalService{
Kind: extsvc.KindGitHub,
DisplayName: "GitHub",
Config: marshalJSON(t, &schema.GitHubConnection{
Url: "https://github.com",
Token: "SECRETTOKEN",
}),
}
if err := reposStore.UpsertExternalServices(ctx, ext); err != nil {
t.Fatal(err)
}
repo := newGitHubTestRepo("github.com/sourcegraph/sourcegraph", 1)
repo.Sources = map[string]*repos.SourceInfo{ext.URN(): {ID: ext.URN()}}
if err := reposStore.UpsertRepos(ctx, repo); err != nil {
t.Fatal(err)
}
// Setup schema resolver
store := ee.NewStoreWithClock(dbconn.Global, clock)
sr := &Resolver{store: store}
s, err := graphqlbackend.NewSchema(sr, nil, nil)
if err != nil {
t.Fatal(err)
}
// Start test
var createPatchSetResponse struct{ CreatePatchSetFromPatches apitest.PatchSet }
apitest.MustExec(ctx, t, s, nil, &createPatchSetResponse, fmt.Sprintf(`
mutation {
createPatchSetFromPatches(patches: [{repository: %q, baseRevision: %q, baseRef: %q, patch: %q}]) {
... on PatchSet {
id
previewURL
}
}
}
`, graphqlbackend.MarshalRepositoryID(repo.ID), testBaseRevision, testBaseRef, testDiff))
patchSetID := createPatchSetResponse.CreatePatchSetFromPatches.ID
var createCampaignResponse struct{ CreateCampaign apitest.Campaign }
input := map[string]interface{}{
"input": map[string]interface{}{
"namespace": string(graphqlbackend.MarshalUserID(userID)),
"name": "Campaign with PatchSet",
"description": "This campaign has a patchset",
"patchSet": patchSetID,
"branch": "my-cool-branch",
},
}
apitest.MustExec(ctx, t, s, input, &createCampaignResponse, `
fragment c on Campaign {
id
branch
status { state }
hasUnpublishedPatches
patches {
nodes {
... on HiddenPatch {
id
}
... on Patch {
id
publicationEnqueued
repository {
name
}
diff {
fileDiffs {
rawDiff
diffStat {
added
deleted
changed
}
nodes {
oldPath
newPath
hunks {
body
section
newRange { startLine, lines }
oldRange { startLine, lines }
oldNoNewlineAt
}
stat {
added
deleted
changed
}
oldFile {
name
externalURLs {
serviceType
url
}
}
}
}
}
}
}
}
diffStat {
added
deleted
changed
}
}
mutation($input: CreateCampaignInput!) {
createCampaign(input: $input) { ...c }
}
`)
campaign := createCampaignResponse.CreateCampaign
if campaign.ID == "" {
log.Fatalf("Campaign does not have ID!")
}
if have, want := len(campaign.Patches.Nodes), 1; have != want {
log.Fatalf("wrong length of patches. want=%d, have=%d", want, have)
}
if campaign.DiffStat.Changed != 2 {
t.Fatalf("diffstat is wrong: %+v", campaign.DiffStat)
}
if !campaign.HasUnpublishedPatches {
t.Errorf("campaign HasUnpublishedPatches is false, want true")
}
patch := campaign.Patches.Nodes[0]
if have, want := campaign.DiffStat, patch.Diff.FileDiffs.DiffStat; have != want {
t.Errorf("wrong campaign combined diffstat. want=%v, have=%v", want, have)
}
if patch.PublicationEnqueued {
t.Errorf("patch PublicationEnqueued is true, want false")
}
// Publish the changesets in the campaign
for _, p := range campaign.Patches.Nodes {
var res struct{}
input := map[string]interface{}{"patch": p.ID}
q := `mutation($patch: ID!) { publishChangeset(patch: $patch) { alwaysNil } }`
apitest.MustExec(ctx, t, s, input, &res, q)
}
// Now we need to run the created ChangsetJob
changesetJobs, _, err := store.ListChangesetJobs(ctx, ee.ListChangesetJobsOpts{})
if err != nil {
t.Fatal(err)
}
if len(changesetJobs) != 1 {
t.Fatalf("wrong number of changeset jobs created: %d", len(changesetJobs))
}
headRef := "refs/heads/" + campaign.Branch
fakePR := &github.PullRequest{
ID: "FOOBARID",
Title: campaign.Name,
Body: campaign.Description,
BaseRefName: git.AbbreviateRef(testBaseRef),
HeadRefName: git.AbbreviateRef(headRef),
Number: 12345,
State: "OPEN",
TimelineItems: []github.TimelineItem{
{Type: "PullRequestCommit", Item: &github.PullRequestCommit{
Commit: github.Commit{
OID: "new-f00bar",
PushedDate: now,
CommittedDate: now,
},
}},
},
CreatedAt: now,
UpdatedAt: now,
}
gitClient := &ct.FakeGitserverClient{Response: headRef, ResponseErr: nil}
sourcer := repos.NewFakeSourcer(nil, &ct.FakeChangesetSource{
Svc: ext,
WantHeadRef: headRef,
WantBaseRef: testBaseRef,
FakeMetadata: fakePR,
})
state := ct.MockChangesetSyncState(&protocol.RepoInfo{
Name: api.RepoName(repo.Name),
VCS: protocol.VCSInfo{URL: repo.URI},
})
defer state.Unmock()
job := changesetJobs[0]
c, err := store.GetCampaign(ctx, ee.GetCampaignOpts{ID: job.CampaignID})
if err != nil {
t.Fatal(err)
}
err = ee.ExecChangesetJob(ctx, c, job, ee.ExecChangesetJobOpts{
Clock: clock, Store: store, GitClient: gitClient, Sourcer: sourcer, ExternalURL: "http://localhost",
})
if err != nil {
t.Fatal(err)
}
updatedJob, err := store.GetChangesetJob(ctx, ee.GetChangesetJobOpts{ID: job.ID})
if err != nil {
t.Fatal(err)
}
if updatedJob.ChangesetID == 0 {
t.Fatal("ChangesetJob.ChangesetID has not been updated")
}
cs, err := store.GetChangeset(ctx, ee.GetChangesetOpts{
ID: job.ChangesetID,
})
if err != nil {
t.Fatal(err)
}
cs.SetDiffStat(&diff.Stat{Added: 1, Changed: 1, Deleted: 3})
if err := store.UpdateChangesets(ctx, cs); err != nil {
t.Fatal(err)
}
// We need to setup these mocks because the GraphQL now needs to talk to
// gitserver to calculate the diff for a changeset.
git.Mocks.GetCommit = func(api.CommitID) (*git.Commit, error) {
return &git.Commit{ID: testBaseRevision}, nil
}
defer func() { git.Mocks.GetCommit = nil }()
var queryCampaignResponse struct{ Node apitest.Campaign }
apitest.MustExec(ctx, t, s, nil, &queryCampaignResponse, fmt.Sprintf(`
fragment c on Campaign {
id
status { state }
hasUnpublishedPatches
branch
patches {
totalCount
}
changesets {
nodes {
... on ExternalChangeset {
state
diff {
fileDiffs {
diffStat {
added
deleted
changed
}
}
}
}
}
totalCount
}
openChangesets {
totalCount
}
diffStat {
added
deleted
changed
}
}
query {
node(id: %q) { ...c }
}
`, campaign.ID))
campaign = queryCampaignResponse.Node
if campaign.Status.State != "COMPLETED" {
t.Fatalf("campaign is not in state 'COMPLETED': %q", campaign.Status.State)
}
if campaign.HasUnpublishedPatches {
t.Errorf("campaign HasUnpublishedPatches is true, want false")
}
if campaign.Patches.TotalCount != 0 {
t.Fatalf("campaign.Patches.TotalCount is not zero: %d", campaign.Patches.TotalCount)
}
if campaign.OpenChangesets.TotalCount != 1 {
t.Fatalf("campaign.OpenChangesets.TotalCount is not 1: %d", campaign.OpenChangesets.TotalCount)
}
if campaign.Changesets.TotalCount != 1 {
t.Fatalf("campaign.Changesets.TotalCount is not 1: %d", campaign.Changesets.TotalCount)
}
if campaign.DiffStat.Changed != 1 {
t.Fatalf("diffstat is wrong: %+v", campaign.DiffStat)
}
changeset := campaign.Changesets.Nodes[0]
if have, want := campaign.DiffStat, changeset.Diff.FileDiffs.DiffStat; have != want {
t.Errorf("wrong campaign combined diffstat. want=%v, have=%v", want, have)
}
}
func getBitbucketServerRepos(t testing.TB, ctx context.Context, src *repos.BitbucketServerSource) []*repos.Repo {
results := make(chan repos.SourceResult)
go func() {
src.ListRepos(ctx, results)
close(results)
}()
var repos []*repos.Repo
for res := range results {
if res.Err != nil {
t.Fatal(res.Err)
}
repos = append(repos, res.Repo)
}
return repos
}
| [
"\"GITHUB_TOKEN\"",
"\"BITBUCKET_SERVER_URL\"",
"\"BITBUCKET_SERVER_TOKEN\"",
"\"GITHUB_TOKEN\""
]
| []
| [
"BITBUCKET_SERVER_URL",
"BITBUCKET_SERVER_TOKEN",
"GITHUB_TOKEN"
]
| [] | ["BITBUCKET_SERVER_URL", "BITBUCKET_SERVER_TOKEN", "GITHUB_TOKEN"] | go | 3 | 0 | |
build/lambda/custom/custom.go | package main
import (
"context"
"encoding/json"
"fmt"
"os"
"github.com/aws/aws-lambda-go/lambda"
"github.com/lendi-au/helm-janitor/cmd/delete"
"github.com/lendi-au/helm-janitor/cmd/scan"
"github.com/lendi-au/helm-janitor/internal/config"
"github.com/lendi-au/helm-janitor/internal/format"
events "github.com/lendi-au/helm-janitor/pkg/lambda"
log "github.com/sirupsen/logrus"
)
// runs the generic handler to execute helm delete...
// when the ttl expires.
func init() {
// Log as JSON instead of the default ASCII formatter.
log.SetFormatter(&log.JSONFormatter{})
// Output to stdout instead of the default stderr
// Can be any io.Writer, see below for File example
log.SetOutput(os.Stdout)
logLevel := "info"
if os.Getenv("LOG_LEVEL") != "" {
logLevel = os.Getenv("LOG_LEVEL")
}
level, err := log.ParseLevel(logLevel)
if err != nil {
log.Errorf("Dodgy log level set: %s", logLevel)
log.SetLevel(log.WarnLevel)
} else {
log.SetLevel(level)
}
}
type Test struct {
Timmy string `json:"timmy"`
}
// EventBody is the git webhook event send by Stack Janitor
type EventBody struct {
Name string `json:"name"`
Time Test `json:"time"`
}
// HandleRequest is the main lambda handler
func HandleRequest(ctx context.Context, event interface{}) error {
scanner := scan.NewScanClient()
// fmt.Println(reflect.TypeOf(event))
test, _ := json.Marshal(event)
// log.Debugf(string(test))
switch event := event.(type) {
case nil:
log.Fatal("event is nil")
case string:
log.Fatalf("event was a string: %s", event)
case EventBody:
log.Infof("what kind of event: %v", event.Name)
scanner.Selector = event.Name
case events.GithubWebhookEvent:
log.Debugf("my action is a %v with pr %v and repo %v", event.Action, event.PullRequest, event.Repository)
a := validBranchName(event.PullRequest.State)
b := fmt.Sprintf("BRANCH=%s,REPOSITORY=%s", a, event.PullRequest.Head.Repository.Name)
scanner.Selector = b
case events.BitbucketWebhookEvent:
log.Debugf("my pr %v on the repo %v", event.PullRequest, event.Repository)
a := validBranchName(event.PullRequest.Source.Branch.Name)
b := fmt.Sprintf("BRANCH=%s,REPOSITORY=%s", a, event.Repository.Name)
scanner.Selector = b
default:
a := new(events.BitbucketWebhookEvent)
_ = json.Unmarshal(test, a)
b := validBranchName(a.PullRequest.Source.Branch.Name)
log.Infof("tried: %s on branch %s", a.Repository.Name, b)
c := fmt.Sprintf("BRANCH=%s,REPOSITORY=%s", b, a.Repository.Name)
scanner.Selector = c
}
scanner.Dryrun = config.GetenvWithDefaultBool("DRY_RUN", false)
scanner.AllNamespaces = config.GetenvWithDefaultBool("ALL_NAMESPACES", true)
scanner.Namespace = config.GetenvWithDefault("NAMESPACE", "")
scanner.IncludeNamespaces = config.GetenvWithDefault("INCLUDE_NAMESPACES", "")
scanner.ExcludeNamespaces = config.GetenvWithDefault("EXCLUDE_NAMESPACES", "")
scanner.Context = ctx
scanner.Init()
delete.RunDeleteSet(scanner)
return nil
}
func main() {
log.Infof("starting")
if os.Getenv("DEBUG") == "true" {
ctx := context.Background()
a := validBranchName("feature/DE-4258-define-coversheet-view-model-and-conversion-for-se-incomes")
b := "decision-engine-team"
HandleRequest(ctx, EventBody{
Name: fmt.Sprintf("BRANCH=%s,REPOSITORY=%s,helm-janitor=true", a, b),
Time: Test{
Timmy: "now",
},
})
} else {
lambda.Start(HandleRequest)
}
log.Infof("finished")
}
func validBranchName(branch string) string {
a := format.FormatBranch(branch)
return format.ShortBranchName(a)
}
| [
"\"LOG_LEVEL\"",
"\"LOG_LEVEL\"",
"\"DEBUG\""
]
| []
| [
"LOG_LEVEL",
"DEBUG"
]
| [] | ["LOG_LEVEL", "DEBUG"] | go | 2 | 0 | |
sriov-fec/main.go | // SPDX-License-Identifier: Apache-2.0
// Copyright (c) 2020-2021 Intel Corporation
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"flag"
"os"
"strings"
"time"
secv1 "github.com/openshift/api/security/v1"
appsv1 "k8s.io/api/apps/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"github.com/open-ness/openshift-operator/common/pkg/assets"
sriovfecv1 "github.com/open-ness/openshift-operator/sriov-fec/api/v1"
"github.com/open-ness/openshift-operator/sriov-fec/controllers"
// +kubebuilder:scaffold:imports
)
var (
scheme = runtime.NewScheme()
setupLog = ctrl.Log.WithName("setup")
operatorDeploymentName string
)
func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(secv1.AddToScheme(scheme))
utilruntime.Must(sriovfecv1.AddToScheme(scheme))
n := os.Getenv("NAME")
operatorDeploymentName = n[:strings.LastIndex(n[:strings.LastIndex(n, "-")], "-")]
// +kubebuilder:scaffold:scheme
}
func main() {
var metricsAddr string
var healthProbeAddr string
var enableLeaderElection bool
flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.")
flag.BoolVar(&enableLeaderElection, "leader-elect", false,
"Enable leader election for controller manager. "+
"Enabling this will ensure there is only one active controller manager.")
flag.StringVar(&healthProbeAddr, "health-probe-bind-address", ":8081", "The address the controller binds to for serving health probes.")
opts := zap.Options{}
opts.BindFlags(flag.CommandLine)
flag.Parse()
ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
config := ctrl.GetConfigOrDie()
mgr, err := ctrl.NewManager(config, ctrl.Options{
Scheme: scheme,
MetricsBindAddress: metricsAddr,
HealthProbeBindAddress: healthProbeAddr,
Port: 9443,
LeaderElection: enableLeaderElection,
LeaderElectionID: "98e78623.intel.com",
})
if err != nil {
setupLog.Error(err, "unable to start manager")
os.Exit(1)
}
if err = (&controllers.SriovFecClusterConfigReconciler{
Client: mgr.GetClient(),
Log: ctrl.Log.WithName("controllers").WithName("SriovFecClusterConfig"),
Scheme: mgr.GetScheme(),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "SriovFecClusterConfig")
os.Exit(1)
}
// +kubebuilder:scaffold:builder
if err := mgr.AddHealthzCheck("health", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up health check")
os.Exit(1)
}
if err := mgr.AddReadyzCheck("check", healthz.Ping); err != nil {
setupLog.Error(err, "unable to set up ready check")
os.Exit(1)
}
c, err := client.New(config, client.Options{Scheme: scheme})
if err != nil {
setupLog.Error(err, "failed to create client")
os.Exit(1)
}
namespace := os.Getenv("SRIOV_FEC_NAMESPACE")
owner := &appsv1.Deployment{}
err = c.Get(context.Background(), client.ObjectKey{
Namespace: namespace,
Name: operatorDeploymentName,
}, owner)
if err != nil {
setupLog.Error(err, "Unable to get operator deployment")
os.Exit(1)
}
if err := (&assets.Manager{
Client: c,
Log: ctrl.Log.WithName("asset_manager").WithName("sriov-fec"),
EnvPrefix: "SRIOV_FEC_",
Scheme: scheme,
Owner: owner,
Assets: []assets.Asset{
{
Path: "assets/100-labeler.yaml",
},
{
Path: "assets/200-device-plugin.yaml",
},
{
Path: "assets/300-daemon.yaml",
BlockingReadiness: assets.ReadinessPollConfig{Retries: 30, Delay: 20 * time.Second},
},
},
}).LoadAndDeploy(context.Background(), true); err != nil {
setupLog.Error(err, "failed to deploy the assets")
os.Exit(1)
}
setupLog.V(2).Info("starting manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
}
| [
"\"NAME\"",
"\"SRIOV_FEC_NAMESPACE\""
]
| []
| [
"SRIOV_FEC_NAMESPACE",
"NAME"
]
| [] | ["SRIOV_FEC_NAMESPACE", "NAME"] | go | 2 | 0 | |
config/database.go | package config
import (
"log"
"os"
"github.com/jinzhu/gorm"
_ "github.com/jinzhu/gorm/dialects/postgres" // Driver for GORM
)
// DB Context
var DB *gorm.DB
// Database Initialization
func Database() *gorm.DB {
driver := os.Getenv("DATABASE_DRIVER")
database := os.Getenv("DATABASE_URL")
var err error
DB, err = gorm.Open(driver, database)
if err != nil {
log.Panic(err)
}
log.Println("Database Connected")
return DB
}
| [
"\"DATABASE_DRIVER\"",
"\"DATABASE_URL\""
]
| []
| [
"DATABASE_DRIVER",
"DATABASE_URL"
]
| [] | ["DATABASE_DRIVER", "DATABASE_URL"] | go | 2 | 0 | |
alicloud/provider.go | package alicloud
import (
"os"
"github.com/alibaba/terraform-provider/alicloud/connectivity"
"github.com/denverdino/aliyungo/common"
"github.com/hashicorp/terraform/helper/mutexkv"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/terraform"
)
// Provider returns a schema.Provider for alicloud
func Provider() terraform.ResourceProvider {
return &schema.Provider{
Schema: map[string]*schema.Schema{
"access_key": &schema.Schema{
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_ACCESS_KEY", os.Getenv("ALICLOUD_ACCESS_KEY")),
Description: descriptions["access_key"],
},
"secret_key": &schema.Schema{
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_SECRET_KEY", os.Getenv("ALICLOUD_SECRET_KEY")),
Description: descriptions["secret_key"],
},
"region": &schema.Schema{
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_REGION", os.Getenv("ALICLOUD_REGION")),
Description: descriptions["region"],
},
"security_token": &schema.Schema{
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_SECURITY_TOKEN", os.Getenv("SECURITY_TOKEN")),
Description: descriptions["security_token"],
},
"ots_instance_name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Deprecated: "Field 'ots_instance_name' has been deprecated from provider version 1.10.0. New field 'instance_name' of resource 'alicloud_ots_table' instead.",
},
"log_endpoint": &schema.Schema{
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("LOG_ENDPOINT", os.Getenv("LOG_ENDPOINT")),
Description: descriptions["log_endpoint"],
},
"mns_endpoint": &schema.Schema{
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("MNS_ENDPOINT", os.Getenv("MNS_ENDPOINT")),
Description: descriptions["mns_endpoint"],
},
"account_id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("ALICLOUD_ACCOUNT_ID", os.Getenv("ALICLOUD_ACCOUNT_ID")),
Description: descriptions["account_id"],
},
"fc": &schema.Schema{
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("FC_ENDPOINT", os.Getenv("FC_ENDPOINT")),
Description: descriptions["fc"],
},
},
DataSourcesMap: map[string]*schema.Resource{
"alicloud_account": dataSourceAlicloudAccount(),
"alicloud_images": dataSourceAlicloudImages(),
"alicloud_regions": dataSourceAlicloudRegions(),
"alicloud_zones": dataSourceAlicloudZones(),
"alicloud_instance_types": dataSourceAlicloudInstanceTypes(),
"alicloud_instances": dataSourceAlicloudInstances(),
"alicloud_disks": dataSourceAlicloudDisks(),
"alicloud_vpcs": dataSourceAlicloudVpcs(),
"alicloud_vswitches": dataSourceAlicloudVSwitches(),
"alicloud_eips": dataSourceAlicloudEips(),
"alicloud_key_pairs": dataSourceAlicloudKeyPairs(),
"alicloud_kms_keys": dataSourceAlicloudKmsKeys(),
"alicloud_dns_domains": dataSourceAlicloudDnsDomains(),
"alicloud_dns_groups": dataSourceAlicloudDnsGroups(),
"alicloud_dns_records": dataSourceAlicloudDnsRecords(),
// alicloud_dns_domain_groups, alicloud_dns_domain_records have been deprecated.
"alicloud_dns_domain_groups": dataSourceAlicloudDnsGroups(),
"alicloud_dns_domain_records": dataSourceAlicloudDnsRecords(),
// alicloud_ram_account_alias has been deprecated
"alicloud_ram_account_alias": dataSourceAlicloudRamAccountAlias(),
"alicloud_ram_account_aliases": dataSourceAlicloudRamAccountAlias(),
"alicloud_ram_groups": dataSourceAlicloudRamGroups(),
"alicloud_ram_users": dataSourceAlicloudRamUsers(),
"alicloud_ram_roles": dataSourceAlicloudRamRoles(),
"alicloud_ram_policies": dataSourceAlicloudRamPolicies(),
"alicloud_security_groups": dataSourceAlicloudSecurityGroups(),
"alicloud_security_group_rules": dataSourceAlicloudSecurityGroupRules(),
"alicloud_slbs": dataSourceAlicloudSlbs(),
"alicloud_slb_attachments": dataSourceAlicloudSlbAttachments(),
"alicloud_slb_listeners": dataSourceAlicloudSlbListeners(),
"alicloud_slb_rules": dataSourceAlicloudSlbRules(),
"alicloud_slb_server_groups": dataSourceAlicloudSlbServerGroups(),
"alicloud_oss_bucket_objects": dataSourceAlicloudOssBucketObjects(),
"alicloud_oss_buckets": dataSourceAlicloudOssBuckets(),
"alicloud_fc_functions": dataSourceAlicloudFcFunctions(),
"alicloud_fc_services": dataSourceAlicloudFcServices(),
"alicloud_fc_triggers": dataSourceAlicloudFcTriggers(),
"alicloud_db_instances": dataSourceAlicloudDBInstances(),
"alicloud_pvtz_zones": dataSourceAlicloudPvtzZones(),
"alicloud_pvtz_zone_records": dataSourceAlicloudPvtzZoneRecords(),
"alicloud_router_interfaces": dataSourceAlicloudRouterInterfaces(),
"alicloud_vpn_gateways": dataSourceAlicloudVpnGateways(),
"alicloud_vpn_customer_gateways": dataSourceAlicloudVpnCustomerGateways(),
"alicloud_vpn_connections": dataSourceAlicloudVpnConnections(),
"alicloud_mongo_instances": dataSourceAlicloudMongoInstances(),
"alicloud_kvstore_instances": dataSourceAlicloudKVStoreInstances(),
"alicloud_cen_instances": dataSourceAlicloudCenInstances(),
"alicloud_cen_bandwidth_packages": dataSourceAlicloudCenBandwidthPackages(),
"alicloud_cen_bandwidth_limits": dataSourceAlicloudCenBandwidthLimits(),
"alicloud_cen_route_entries": dataSourceAlicloudCenRouteEntries(),
"alicloud_mns_queues": dataSourceAlicloudMNSQueues(),
"alicloud_mns_topics": dataSourceAlicloudMNSTopics(),
"alicloud_mns_topic_subscriptions": dataSourceAlicloudMNSTopicSubscriptions(),
"alicloud_api_gateway_groups": dataSourceAlicloudApiGatewayGroups(),
},
ResourcesMap: map[string]*schema.Resource{
"alicloud_instance": resourceAliyunInstance(),
"alicloud_ram_role_attachment": resourceAlicloudRamRoleAttachment(),
"alicloud_disk": resourceAliyunDisk(),
"alicloud_disk_attachment": resourceAliyunDiskAttachment(),
"alicloud_security_group": resourceAliyunSecurityGroup(),
"alicloud_security_group_rule": resourceAliyunSecurityGroupRule(),
"alicloud_db_database": resourceAlicloudDBDatabase(),
"alicloud_db_account": resourceAlicloudDBAccount(),
"alicloud_db_account_privilege": resourceAlicloudDBAccountPrivilege(),
"alicloud_db_backup_policy": resourceAlicloudDBBackupPolicy(),
"alicloud_db_connection": resourceAlicloudDBConnection(),
"alicloud_db_instance": resourceAlicloudDBInstance(),
"alicloud_ess_scaling_group": resourceAlicloudEssScalingGroup(),
"alicloud_ess_scaling_configuration": resourceAlicloudEssScalingConfiguration(),
"alicloud_ess_scaling_rule": resourceAlicloudEssScalingRule(),
"alicloud_ess_schedule": resourceAlicloudEssSchedule(),
"alicloud_ess_attachment": resourceAlicloudEssAttachment(),
"alicloud_ess_lifecycle_hook": resourceAlicloudEssLifecycleHook(),
"alicloud_ess_alarm": resourceAlicloudEssAlarm(),
"alicloud_vpc": resourceAliyunVpc(),
"alicloud_nat_gateway": resourceAliyunNatGateway(),
// "alicloud_subnet" aims to match aws usage habit.
"alicloud_subnet": resourceAliyunSubnet(),
"alicloud_vswitch": resourceAliyunSubnet(),
"alicloud_route_entry": resourceAliyunRouteEntry(),
"alicloud_route_table": resourceAliyunRouteTable(),
"alicloud_route_table_attachment": resourceAliyunRouteTableAttachment(),
"alicloud_snat_entry": resourceAliyunSnatEntry(),
"alicloud_forward_entry": resourceAliyunForwardEntry(),
"alicloud_eip": resourceAliyunEip(),
"alicloud_eip_association": resourceAliyunEipAssociation(),
"alicloud_slb": resourceAliyunSlb(),
"alicloud_slb_listener": resourceAliyunSlbListener(),
"alicloud_slb_attachment": resourceAliyunSlbAttachment(),
"alicloud_slb_server_group": resourceAliyunSlbServerGroup(),
"alicloud_slb_rule": resourceAliyunSlbRule(),
"alicloud_slb_acl": resourceAlicloudSlbAcl(),
"alicloud_oss_bucket": resourceAlicloudOssBucket(),
"alicloud_oss_bucket_object": resourceAlicloudOssBucketObject(),
"alicloud_dns_record": resourceAlicloudDnsRecord(),
"alicloud_dns": resourceAlicloudDns(),
"alicloud_dns_group": resourceAlicloudDnsGroup(),
"alicloud_key_pair": resourceAlicloudKeyPair(),
"alicloud_key_pair_attachment": resourceAlicloudKeyPairAttachment(),
"alicloud_kms_key": resourceAlicloudKmsKey(),
"alicloud_ram_user": resourceAlicloudRamUser(),
"alicloud_ram_access_key": resourceAlicloudRamAccessKey(),
"alicloud_ram_login_profile": resourceAlicloudRamLoginProfile(),
"alicloud_ram_group": resourceAlicloudRamGroup(),
"alicloud_ram_role": resourceAlicloudRamRole(),
"alicloud_ram_policy": resourceAlicloudRamPolicy(),
// alicloud_ram_alias has been deprecated
"alicloud_ram_alias": resourceAlicloudRamAccountAlias(),
"alicloud_ram_account_alias": resourceAlicloudRamAccountAlias(),
"alicloud_ram_group_membership": resourceAlicloudRamGroupMembership(),
"alicloud_ram_user_policy_attachment": resourceAlicloudRamUserPolicyAtatchment(),
"alicloud_ram_role_policy_attachment": resourceAlicloudRamRolePolicyAttachment(),
"alicloud_ram_group_policy_attachment": resourceAlicloudRamGroupPolicyAtatchment(),
"alicloud_container_cluster": resourceAlicloudCSSwarm(),
"alicloud_cs_application": resourceAlicloudCSApplication(),
"alicloud_cs_swarm": resourceAlicloudCSSwarm(),
"alicloud_cs_kubernetes": resourceAlicloudCSKubernetes(),
"alicloud_cdn_domain": resourceAlicloudCdnDomain(),
"alicloud_router_interface": resourceAlicloudRouterInterface(),
"alicloud_router_interface_connection": resourceAlicloudRouterInterfaceConnection(),
"alicloud_ots_table": resourceAlicloudOtsTable(),
"alicloud_ots_instance": resourceAlicloudOtsInstance(),
"alicloud_ots_instance_attachment": resourceAlicloudOtsInstanceAttachment(),
"alicloud_cms_alarm": resourceAlicloudCmsAlarm(),
"alicloud_pvtz_zone": resourceAlicloudPvtzZone(),
"alicloud_pvtz_zone_attachment": resourceAlicloudPvtzZoneAttachment(),
"alicloud_pvtz_zone_record": resourceAlicloudPvtzZoneRecord(),
"alicloud_log_project": resourceAlicloudLogProject(),
"alicloud_log_store": resourceAlicloudLogStore(),
"alicloud_log_store_index": resourceAlicloudLogStoreIndex(),
"alicloud_log_machine_group": resourceAlicloudLogMachineGroup(),
"alicloud_fc_service": resourceAlicloudFCService(),
"alicloud_fc_function": resourceAlicloudFCFunction(),
"alicloud_fc_trigger": resourceAlicloudFCTrigger(),
"alicloud_vpn_gateway": resourceAliyunVpnGateway(),
"alicloud_vpn_customer_gateway": resourceAliyunVpnCustomerGateway(),
"alicloud_vpn_connection": resourceAliyunVpnConnection(),
"alicloud_ssl_vpn_server": resourceAliyunSslVpnServer(),
"alicloud_ssl_vpn_client_cert": resourceAliyunSslVpnClientCert(),
"alicloud_cen_instance": resourceAlicloudCenInstance(),
"alicloud_cen_instance_attachment": resourceAlicloudCenInstanceAttachment(),
"alicloud_cen_bandwidth_package": resourceAlicloudCenBandwidthPackage(),
"alicloud_cen_bandwidth_package_attachment": resourceAlicloudCenBandwidthPackageAttachment(),
"alicloud_cen_bandwidth_limit": resourceAlicloudCenBandwidthLimit(),
"alicloud_cen_route_entry": resourceAlicloudCenRouteEntry(),
"alicloud_kvstore_instance": resourceAlicloudKVStoreInstance(),
"alicloud_kvstore_backup_policy": resourceAlicloudKVStoreBackupPolicy(),
"alicloud_datahub_project": resourceAlicloudDatahubProject(),
"alicloud_datahub_subscription": resourceAlicloudDatahubSubscription(),
"alicloud_datahub_topic": resourceAlicloudDatahubTopic(),
"alicloud_mns_queue": resourceAlicloudMNSQueue(),
"alicloud_mns_topic": resourceAlicloudMNSTopic(),
"alicloud_havip": resourceAliyunHaVip(),
"alicloud_mns_topic_subscription": resourceAlicloudMNSSubscription(),
"alicloud_havip_attachment": resourceAliyunHaVipAttachment(),
"alicloud_api_gateway_group": resourceAliyunApigatewayGroup(),
},
ConfigureFunc: providerConfigure,
}
}
func providerConfigure(d *schema.ResourceData) (interface{}, error) {
region, ok := d.GetOk("region")
if !ok {
if region == "" {
region = DEFAULT_REGION
}
}
config := connectivity.Config{
AccessKey: d.Get("access_key").(string),
SecretKey: d.Get("secret_key").(string),
Region: common.Region(region.(string)),
RegionId: region.(string),
}
if token, ok := d.GetOk("security_token"); ok && token.(string) != "" {
config.SecurityToken = token.(string)
}
if ots_instance_name, ok := d.GetOk("ots_instance_name"); ok && ots_instance_name.(string) != "" {
config.OtsInstanceName = ots_instance_name.(string)
}
if logEndpoint, ok := d.GetOk("log_endpoint"); ok && logEndpoint.(string) != "" {
config.LogEndpoint = logEndpoint.(string)
}
if mnsEndpoint, ok := d.GetOk("mns_endpoint"); ok && mnsEndpoint.(string) != "" {
config.MNSEndpoint = mnsEndpoint.(string)
}
if account, ok := d.GetOk("account_id"); ok && account.(string) != "" {
config.AccountId = account.(string)
}
if fcEndpoint, ok := d.GetOk("fc"); ok && fcEndpoint.(string) != "" {
config.FcEndpoint = fcEndpoint.(string)
}
client, err := config.Client()
if err != nil {
return nil, err
}
return client, nil
}
// This is a global MutexKV for use within this plugin.
var alicloudMutexKV = mutexkv.NewMutexKV()
var descriptions map[string]string
func init() {
descriptions = map[string]string{
"access_key": "Access key of alicloud",
"secret_key": "Secret key of alicloud",
"region": "Region of alicloud",
"security_token": "Alibaba Cloud Security Token",
"log_endpoint": "Alibaba Cloud log service self-define endpoint",
"mns_endpoint": "Alibaba Cloud mns service self-define endpoint",
"account_id": "Alibaba Cloud account ID",
"fc": "Custom function compute endpoints",
}
}
| [
"\"ALICLOUD_ACCESS_KEY\"",
"\"ALICLOUD_SECRET_KEY\"",
"\"ALICLOUD_REGION\"",
"\"SECURITY_TOKEN\"",
"\"LOG_ENDPOINT\"",
"\"MNS_ENDPOINT\"",
"\"ALICLOUD_ACCOUNT_ID\"",
"\"FC_ENDPOINT\""
]
| []
| [
"ALICLOUD_REGION",
"LOG_ENDPOINT",
"SECURITY_TOKEN",
"MNS_ENDPOINT",
"ALICLOUD_ACCOUNT_ID",
"ALICLOUD_ACCESS_KEY",
"FC_ENDPOINT",
"ALICLOUD_SECRET_KEY"
]
| [] | ["ALICLOUD_REGION", "LOG_ENDPOINT", "SECURITY_TOKEN", "MNS_ENDPOINT", "ALICLOUD_ACCOUNT_ID", "ALICLOUD_ACCESS_KEY", "FC_ENDPOINT", "ALICLOUD_SECRET_KEY"] | go | 8 | 0 | |
pilotctl/receivers/mongo/core/conf.go | package core
/*
Onix Config Manager - MongoDb event receiver for Pilot Control
Copyright (c) 2018-2021 by www.gatblau.org
Licensed under the Apache License, Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0
Contributors to this project, hereby assign copyright in this code to the project,
to be licensed under the same terms as the rest of the code.
*/
import "os"
// getDbConnString get the connection string to the MongoDb database
// e.g. mongodb://localhost:27017
// e.g. mongodb://user:[email protected]:27017/dbname?keepAlive=true&poolSize=30&autoReconnect=true&socketTimeoutMS=360000&connectTimeoutMS=360000
func getDbConnString() string {
value := os.Getenv("OX_MONGO_EVR_CONN")
if len(value) == 0 {
panic("OX_MONGO_EVR_CONN not defined")
}
return value
}
| [
"\"OX_MONGO_EVR_CONN\""
]
| []
| [
"OX_MONGO_EVR_CONN"
]
| [] | ["OX_MONGO_EVR_CONN"] | go | 1 | 0 | |
src/helloworld.go | package main
import (
"fmt"
"os"
"github.com/bwmarrin/discordgo"
)
var (
commandPrefix string
botID string
)
func main() {
discord, err := discordgo.New("Bot " + os.Getenv("DISCORD_BOT_KEY"))
errCheck("error creating discord session", err)
user, err := discord.User("@me")
errCheck("error retrieving account", err)
botID = user.ID
discord.AddHandler(commandHandler)
discord.AddHandler(func(discord *discordgo.Session, ready *discordgo.Ready) {
err = discord.UpdateStatus(0, "Status!")
if err != nil {
fmt.Println("Error attempting to set my status")
}
servers := discord.State.Guilds
fmt.Printf("Weebot has started on %d servers", len(servers))
})
err = discord.Open()
errCheck("Error opening connection to Discord", err)
defer discord.Close()
commandPrefix = "!"
<-make(chan struct{})
}
func errCheck(msg string, err error) {
if err != nil {
fmt.Printf("%s: %+v", msg, err)
panic(err)
}
}
func commandHandler(discord *discordgo.Session, message *discordgo.MessageCreate) {
user := message.Author
if user.ID == botID || user.Bot {
//Do nothing because the bot is talking
return
}
if message.Content == "!hello" {
discord.ChannelMessageSend(message.ChannelID, "Hey!")
}
} | [
"\"DISCORD_BOT_KEY\""
]
| []
| [
"DISCORD_BOT_KEY"
]
| [] | ["DISCORD_BOT_KEY"] | go | 1 | 0 | |
venv/lib/python3.8/site-packages/pip/_internal/commands/debug.py | from __future__ import absolute_import
import locale
import logging
import os
import sys
import pip._vendor
from pip._vendor import pkg_resources
from pip._vendor.certifi import where
from pip import __file__ as pip_location
from pip._internal.cli import cmdoptions
from pip._internal.cli.base_command import Command
from pip._internal.cli.cmdoptions import make_target_python
from pip._internal.cli.status_codes import SUCCESS
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import get_pip_version
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from types import ModuleType
from typing import List, Optional, Dict
from optparse import Values
from pip._internal.configuration import Configuration
logger = logging.getLogger(__name__)
def show_value(name, value):
# type: (str, Optional[str]) -> None
logger.info('%s: %s', name, value)
def show_sys_implementation():
# type: () -> None
logger.info('sys.implementation:')
if hasattr(sys, 'implementation'):
implementation = sys.implementation # type: ignore
implementation_name = implementation.name
else:
implementation_name = ''
with indent_log():
show_value('name', implementation_name)
def create_vendor_txt_map():
# type: () -> Dict[str, str]
vendor_txt_path = os.path.join(
os.path.dirname(pip_location),
'_vendor',
'vendor.txt'
)
with open(vendor_txt_path) as f:
# Purge non version specifying lines.
# Also, remove any space prefix or suffixes (including comments).
lines = [line.strip().split(' ', 1)[0]
for line in f.readlines() if '==' in line]
# Transform into "module" -> version dict.
return dict(line.split('==', 1) for line in lines) # type: ignore
def get_module_from_module_name(module_name):
# type: (str) -> ModuleType
# Module name can be uppercase in vendor.txt for some reason...
module_name = module_name.lower()
# PATCH: setuptools is actually only pkg_resources.
if module_name == 'setuptools':
module_name = 'pkg_resources'
__import__(
'pip._vendor.{}'.format(module_name),
globals(),
locals(),
level=0
)
return getattr(pip._vendor, module_name)
def get_vendor_version_from_module(module_name):
# type: (str) -> Optional[str]
module = get_module_from_module_name(module_name)
version = getattr(module, '__version__', None)
if not version:
# Try to find version in debundled module info
# The type for module.__file__ is Optional[str] in
# Python 2, and str in Python 3. The type: ignore is
# added to account for Python 2, instead of a cast
# and should be removed once we drop Python 2 support
pkg_set = pkg_resources.WorkingSet(
[os.path.dirname(module.__file__)] # type: ignore
)
package = pkg_set.find(pkg_resources.Requirement.parse(module_name))
version = getattr(package, 'version', None)
return version
def show_actual_vendor_versions(vendor_txt_versions):
# type: (Dict[str, str]) -> None
"""Log the actual version and print extra info if there is
a conflict or if the actual version could not be imported.
"""
for module_name, expected_version in vendor_txt_versions.items():
extra_message = ''
actual_version = get_vendor_version_from_module(module_name)
if not actual_version:
extra_message = ' (Unable to locate actual module version, using'\
' vendor.txt specified version)'
actual_version = expected_version
elif actual_version != expected_version:
extra_message = ' (CONFLICT: vendor.txt suggests version should'\
' be {})'.format(expected_version)
logger.info('%s==%s%s', module_name, actual_version, extra_message)
def show_vendor_versions():
# type: () -> None
logger.info('vendored library versions:')
vendor_txt_versions = create_vendor_txt_map()
with indent_log():
show_actual_vendor_versions(vendor_txt_versions)
def show_tags(options):
# type: (Values) -> None
tag_limit = 10
target_python = make_target_python(options)
tags = target_python.get_tags()
# Display the target options that were explicitly provided.
formatted_target = target_python.format_given()
suffix = ''
if formatted_target:
suffix = ' (target: {})'.format(formatted_target)
msg = 'Compatible tags: {}{}'.format(len(tags), suffix)
logger.info(msg)
if options.verbose < 1 and len(tags) > tag_limit:
tags_limited = True
tags = tags[:tag_limit]
else:
tags_limited = False
with indent_log():
for tag in tags:
logger.info(str(tag))
if tags_limited:
msg = (
'...\n'
'[First {tag_limit} tags shown. Pass --verbose to show all.]'
).format(tag_limit=tag_limit)
logger.info(msg)
def ca_bundle_info(config):
# type: (Configuration) -> str
levels = set()
for key, _ in config.items():
levels.add(key.split('.')[0])
if not levels:
return "Not specified"
levels_that_override_global = ['install', 'wheel', 'download']
global_overriding_level = [
level for level in levels if level in levels_that_override_global
]
if not global_overriding_level:
return 'global'
if 'global' in levels:
levels.remove('global')
return ", ".join(levels)
class DebugCommand(Command):
"""
Display debug information.
"""
usage = """
%prog <options>"""
ignore_require_venv = True
def add_options(self):
# type: () -> None
cmdoptions.add_target_python_options(self.cmd_opts)
self.parser.insert_option_group(0, self.cmd_opts)
self.parser.config.load()
def run(self, options, args):
# type: (Values, List[str]) -> int
logger.warning(
"This command is only meant for debugging. "
"Do not use this with automation for parsing and getting these "
"details, since the output and options of this command may "
"change without notice."
)
show_value('pip version', get_pip_version())
show_value('sys.version', sys.version)
show_value('sys.executable', sys.executable)
show_value('sys.getdefaultencoding', sys.getdefaultencoding())
show_value('sys.getfilesystemencoding', sys.getfilesystemencoding())
show_value(
'locale.getpreferredencoding', locale.getpreferredencoding(),
)
show_value('sys.platform', sys.platform)
show_sys_implementation()
show_value("'cert' config value", ca_bundle_info(self.parser.config))
show_value("REQUESTS_CA_BUNDLE", os.environ.get('REQUESTS_CA_BUNDLE'))
show_value("CURL_CA_BUNDLE", os.environ.get('CURL_CA_BUNDLE'))
show_value("pip._vendor.certifi.where()", where())
show_value("pip._vendor.DEBUNDLED", pip._vendor.DEBUNDLED)
show_vendor_versions()
show_tags(options)
return SUCCESS
| []
| []
| [
"REQUESTS_CA_BUNDLE",
"CURL_CA_BUNDLE"
]
| [] | ["REQUESTS_CA_BUNDLE", "CURL_CA_BUNDLE"] | python | 2 | 0 | |
vendor/github.com/getsentry/sentry-go/client.go | package sentry
import (
"context"
"crypto/x509"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"net/http"
"os"
"reflect"
"sort"
"time"
)
// Logger is an instance of log.Logger that is use to provide debug information about running Sentry Client
// can be enabled by either using `Logger.SetOutput` directly or with `Debug` client option
var Logger = log.New(ioutil.Discard, "[Sentry] ", log.LstdFlags) // nolint: gochecknoglobals
type EventProcessor func(event *Event, hint *EventHint) *Event
type EventModifier interface {
ApplyToEvent(event *Event, hint *EventHint) *Event
}
var globalEventProcessors []EventProcessor // nolint: gochecknoglobals
func AddGlobalEventProcessor(processor EventProcessor) {
globalEventProcessors = append(globalEventProcessors, processor)
}
// Integration allows for registering a functions that modify or discard captured events.
type Integration interface {
Name() string
SetupOnce(client *Client)
}
// ClientOptions that configures a SDK Client
type ClientOptions struct {
// The DSN to use. If the DSN is not set, the client is effectively disabled.
Dsn string
// In debug mode, the debug information is printed to stdout to help you understand what
// sentry is doing.
Debug bool
// Configures whether SDK should generate and attach stacktraces to pure capture message calls.
AttachStacktrace bool
// The sample rate for event submission (0.0 - 1.0, defaults to 1.0).
SampleRate float32
// List of regexp strings that will be used to match against event's message
// and if applicable, caught errors type and value.
// If the match is found, then a whole event will be dropped.
IgnoreErrors []string
// Before send callback.
BeforeSend func(event *Event, hint *EventHint) *Event
// Before breadcrumb add callback.
BeforeBreadcrumb func(breadcrumb *Breadcrumb, hint *BreadcrumbHint) *Breadcrumb
// Integrations to be installed on the current Client, receives default integrations
Integrations func([]Integration) []Integration
// io.Writer implementation that should be used with the `Debug` mode
DebugWriter io.Writer
// The transport to use.
// This is an instance of a struct implementing `Transport` interface.
// Defaults to `httpTransport` from `transport.go`
Transport Transport
// The server name to be reported.
ServerName string
// The release to be sent with events.
Release string
// The dist to be sent with events.
Dist string
// The environment to be sent with events.
Environment string
// Maximum number of breadcrumbs.
MaxBreadcrumbs int
// An optional pointer to `http.Transport` that will be used with a default HTTPTransport.
HTTPTransport *http.Transport
// An optional HTTP proxy to use.
// This will default to the `http_proxy` environment variable.
// or `https_proxy` if that one exists.
HTTPProxy string
// An optional HTTPS proxy to use.
// This will default to the `HTTPS_PROXY` environment variable
// or `http_proxy` if that one exists.
HTTPSProxy string
// An optionsl CaCerts to use.
// Defaults to `gocertifi.CACerts()`.
CaCerts *x509.CertPool
}
// Client is the underlying processor that's used by the main API and `Hub` instances.
type Client struct {
options ClientOptions
dsn *Dsn
eventProcessors []EventProcessor
integrations []Integration
Transport Transport
}
// NewClient creates and returns an instance of `Client` configured using `ClientOptions`.
func NewClient(options ClientOptions) (*Client, error) {
if options.Debug {
debugWriter := options.DebugWriter
if debugWriter == nil {
debugWriter = os.Stdout
}
Logger.SetOutput(debugWriter)
}
if options.Dsn == "" {
options.Dsn = os.Getenv("SENTRY_DSN")
}
if options.Release == "" {
options.Release = os.Getenv("SENTRY_RELEASE")
}
if options.Environment == "" {
options.Environment = os.Getenv("SENTRY_ENVIRONMENT")
}
var dsn *Dsn
if options.Dsn != "" {
var err error
dsn, err = NewDsn(options.Dsn)
if err != nil {
return nil, err
}
}
client := Client{
options: options,
dsn: dsn,
}
client.setupTransport()
client.setupIntegrations()
return &client, nil
}
func (client *Client) setupTransport() {
transport := client.options.Transport
if transport == nil {
if client.options.Dsn == "" {
transport = new(noopTransport)
} else {
transport = NewHTTPTransport()
}
}
transport.Configure(client.options)
client.Transport = transport
}
func (client *Client) setupIntegrations() {
integrations := []Integration{
new(contextifyFramesIntegration),
new(environmentIntegration),
new(modulesIntegration),
new(ignoreErrorsIntegration),
}
if client.options.Integrations != nil {
integrations = client.options.Integrations(integrations)
}
for _, integration := range integrations {
if client.integrationAlreadyInstalled(integration.Name()) {
Logger.Printf("Integration %s is already installed\n", integration.Name())
continue
}
client.integrations = append(client.integrations, integration)
integration.SetupOnce(client)
Logger.Printf("Integration installed: %s\n", integration.Name())
}
}
// AddEventProcessor adds an event processor to the client.
func (client *Client) AddEventProcessor(processor EventProcessor) {
client.eventProcessors = append(client.eventProcessors, processor)
}
// Options return `ClientOptions` for the current `Client`.
func (client Client) Options() ClientOptions {
return client.options
}
// CaptureMessage captures an arbitrary message.
func (client *Client) CaptureMessage(message string, hint *EventHint, scope EventModifier) *EventID {
event := client.eventFromMessage(message, LevelInfo)
return client.CaptureEvent(event, hint, scope)
}
// CaptureException captures an error.
func (client *Client) CaptureException(exception error, hint *EventHint, scope EventModifier) *EventID {
event := client.eventFromException(exception, LevelError)
return client.CaptureEvent(event, hint, scope)
}
// CaptureEvent captures an event on the currently active client if any.
//
// The event must already be assembled. Typically code would instead use
// the utility methods like `CaptureException`. The return value is the
// event ID. In case Sentry is disabled or event was dropped, the return value will be nil.
func (client *Client) CaptureEvent(event *Event, hint *EventHint, scope EventModifier) *EventID {
return client.processEvent(event, hint, scope)
}
// Recover captures a panic.
// Returns `EventID` if successfully, or `nil` if there's no error to recover from.
func (client *Client) Recover(err interface{}, hint *EventHint, scope EventModifier) *EventID {
if err == nil {
err = recover()
}
if err != nil {
if err, ok := err.(error); ok {
event := client.eventFromException(err, LevelFatal)
return client.CaptureEvent(event, hint, scope)
}
if err, ok := err.(string); ok {
event := client.eventFromMessage(err, LevelFatal)
return client.CaptureEvent(event, hint, scope)
}
}
return nil
}
// Recover captures a panic and passes relevant context object.
// Returns `EventID` if successfully, or `nil` if there's no error to recover from.
func (client *Client) RecoverWithContext(
ctx context.Context,
err interface{},
hint *EventHint,
scope EventModifier,
) *EventID {
if err == nil {
err = recover()
}
if err != nil {
if hint.Context == nil && ctx != nil {
hint.Context = ctx
}
if err, ok := err.(error); ok {
event := client.eventFromException(err, LevelFatal)
return client.CaptureEvent(event, hint, scope)
}
if err, ok := err.(string); ok {
event := client.eventFromMessage(err, LevelFatal)
return client.CaptureEvent(event, hint, scope)
}
}
return nil
}
// Flush notifies when all the buffered events have been sent by returning `true`
// or `false` if timeout was reached. It calls `Flush` method of the configured `Transport`.
func (client *Client) Flush(timeout time.Duration) bool {
return client.Transport.Flush(timeout)
}
func (client *Client) eventFromMessage(message string, level Level) *Event {
event := NewEvent()
event.Level = level
event.Message = message
if client.Options().AttachStacktrace {
event.Threads = []Thread{{
Stacktrace: NewStacktrace(),
Crashed: false,
Current: true,
}}
}
return event
}
func (client *Client) eventFromException(exception error, level Level) *Event {
if exception == nil {
event := NewEvent()
event.Level = level
event.Message = fmt.Sprintf("Called %s with nil value", callerFunctionName())
return event
}
stacktrace := ExtractStacktrace(exception)
if stacktrace == nil {
stacktrace = NewStacktrace()
}
event := NewEvent()
event.Level = level
event.Exception = []Exception{{
Value: exception.Error(),
Type: reflect.TypeOf(exception).String(),
Stacktrace: stacktrace,
}}
return event
}
func (client *Client) processEvent(event *Event, hint *EventHint, scope EventModifier) *EventID {
options := client.Options()
// TODO: Reconsider if its worth going away from default implementation
// of other SDKs. In Go zero value (default) for float32 is 0.0,
// which means that if someone uses ClientOptions{} struct directly
// and we would not check for 0 here, we'd skip all events by default
if options.SampleRate != 0.0 {
randomFloat := rand.New(rand.NewSource(time.Now().UnixNano())).Float32()
if randomFloat > options.SampleRate {
Logger.Println("Event dropped due to SampleRate hit.")
return nil
}
}
if event = client.prepareEvent(event, hint, scope); event == nil {
return nil
}
if options.BeforeSend != nil {
h := &EventHint{}
if hint != nil {
h = hint
}
if event = options.BeforeSend(event, h); event == nil {
Logger.Println("Event dropped due to BeforeSend callback.")
return nil
}
}
client.Transport.SendEvent(event)
return &event.EventID
}
func (client *Client) prepareEvent(event *Event, hint *EventHint, scope EventModifier) *Event {
if event.EventID == "" {
event.EventID = EventID(uuid())
}
if event.Timestamp == 0 {
event.Timestamp = time.Now().Unix()
}
if event.Level == "" {
event.Level = LevelInfo
}
if event.ServerName == "" {
if client.Options().ServerName != "" {
event.ServerName = client.Options().ServerName
} else if hostname, err := os.Hostname(); err == nil {
event.ServerName = hostname
}
}
if event.Release == "" && client.Options().Release != "" {
event.Release = client.Options().Release
}
if event.Dist == "" && client.Options().Dist != "" {
event.Dist = client.Options().Dist
}
if event.Environment == "" && client.Options().Environment != "" {
event.Environment = client.Options().Environment
}
event.Platform = "go"
event.Sdk = SdkInfo{
Name: "sentry.go",
Version: Version,
Integrations: client.listIntegrations(),
Packages: []SdkPackage{{
Name: "sentry-go",
Version: Version,
}},
}
event = scope.ApplyToEvent(event, hint)
for _, processor := range client.eventProcessors {
id := event.EventID
event = processor(event, hint)
if event == nil {
Logger.Printf("Event dropped by one of the Client EventProcessors: %s\n", id)
return nil
}
}
for _, processor := range globalEventProcessors {
id := event.EventID
event = processor(event, hint)
if event == nil {
Logger.Printf("Event dropped by one of the Global EventProcessors: %s\n", id)
return nil
}
}
return event
}
func (client Client) listIntegrations() []string {
integrations := make([]string, 0, len(client.integrations))
for _, integration := range client.integrations {
integrations = append(integrations, integration.Name())
}
sort.Strings(integrations)
return integrations
}
func (client Client) integrationAlreadyInstalled(name string) bool {
for _, integration := range client.integrations {
if integration.Name() == name {
return true
}
}
return false
}
| [
"\"SENTRY_DSN\"",
"\"SENTRY_RELEASE\"",
"\"SENTRY_ENVIRONMENT\""
]
| []
| [
"SENTRY_DSN",
"SENTRY_RELEASE",
"SENTRY_ENVIRONMENT"
]
| [] | ["SENTRY_DSN", "SENTRY_RELEASE", "SENTRY_ENVIRONMENT"] | go | 3 | 0 | |
cmd/plan9/devdraw-proxy/proxy.go | // Copyright (c) 2019 Aram Hăvărneanu <[email protected]>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
/*
devdraw-proxy: fake devdraw
DEVDRAW_SERVER=net!addr DEVDRAW=devdraw-proxy cmd
This tool masquarades as devdraw for plan9port binaries when
DEVDRAW=devdraw-proxy. It relays the protocol to the devdraw server
specified by DEVDRAW_SERVER.
This program is not intended to be called directly by the user, but
by plan9port graphical programs.
*/
package main
import (
"flag"
"fmt"
"io"
"log"
"os"
"mgk.ro/net/netutil"
_ "mgk.ro/log"
)
var usageString = "usage: DEVDRAW_SERVER=net!addr DEVDRAW=devdraw-proxy cmd\n"
func usage() {
fmt.Fprint(os.Stderr, usageString)
os.Exit(1)
}
func main() {
flag.Usage = usage
flag.Parse()
conn, err := netutil.Dial(os.Getenv("DEVDRAW_SERVER"))
if err != nil {
log.Fatal(err)
}
go proxy(os.Stdin, conn)
proxy(conn, os.Stdout)
}
func proxy(dst io.Writer, src io.Reader) {
_, err := io.Copy(dst, src)
if err != nil {
log.Fatal(err)
}
}
| [
"\"DEVDRAW_SERVER\""
]
| []
| [
"DEVDRAW_SERVER"
]
| [] | ["DEVDRAW_SERVER"] | go | 1 | 0 | |
certbot-dns-cloudxns/docs/conf.py | # -*- coding: utf-8 -*-
#
# certbot-dns-cloudxns documentation build configuration file, created by
# sphinx-quickstart on Wed May 10 16:05:50 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode']
autodoc_member_order = 'bysource'
autodoc_default_flags = ['show-inheritance']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'certbot-dns-cloudxns'
copyright = u'2017, Certbot Project'
author = u'Certbot Project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0'
# The full version, including alpha/beta/rc tags.
release = u'0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
default_role = 'py:obj'
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# http://docs.readthedocs.org/en/latest/theme.html#how-do-i-use-this-locally-and-on-read-the-docs
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify it
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'certbot-dns-cloudxnsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'certbot-dns-cloudxns.tex', u'certbot-dns-cloudxns Documentation',
u'Certbot Project', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'certbot-dns-cloudxns', u'certbot-dns-cloudxns Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'certbot-dns-cloudxns', u'certbot-dns-cloudxns Documentation',
author, 'certbot-dns-cloudxns', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/', None),
'acme': ('https://acme-python.readthedocs.org/en/latest/', None),
'certbot': ('https://certbot.eff.org/docs/', None),
}
| []
| []
| [
"READTHEDOCS"
]
| [] | ["READTHEDOCS"] | python | 1 | 0 | |
distributor/cmd/main.go | // Copyright 2012-2019 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/url"
"net/http"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/keptn/keptn/distributor/pkg/lib"
keptnmodels "github.com/keptn/go-utils/pkg/api/models"
keptnapi "github.com/keptn/go-utils/pkg/api/utils"
"github.com/cloudevents/sdk-go/pkg/cloudevents"
"github.com/cloudevents/sdk-go/pkg/cloudevents/client"
cloudeventshttp "github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http"
cloudeventsnats "github.com/cloudevents/sdk-go/pkg/cloudevents/transport/nats"
"github.com/kelseyhightower/envconfig"
"github.com/nats-io/nats.go"
)
type envConfig struct {
// Port on which to listen for cloudevents
Port int `envconfig:"RCV_PORT" default:"8080"`
Path string `envconfig:"RCV_PATH" default:"/"`
}
var httpClient client.Client
var nc *nats.Conn
var subscriptions []*nats.Subscription
var uptimeTicker *time.Ticker
var ctx context.Context
var close = make(chan bool)
var mux sync.Mutex
var sentCloudEvents map[string][]string
func main() {
var env envConfig
if err := envconfig.Process("", &env); err != nil {
fmt.Println("Failed to process env var: " + err.Error())
os.Exit(1)
}
go keptnapi.RunHealthEndpoint("10999")
os.Exit(_main(os.Args[1:], env))
}
const connectionTypeNATS = "nats"
const connectionTypeHTTP = "http"
func _main(args []string, env envConfig) int {
// initialize the http client
connectionType := strings.ToLower(os.Getenv("CONNECTION_TYPE"))
switch connectionType {
case "":
createNATSConnection()
break
case connectionTypeNATS:
createNATSConnection()
break
case connectionTypeHTTP:
createHTTPConnection()
break
default:
createNATSConnection()
}
return 0
}
const defaultPollingInterval = 10
func createHTTPConnection() {
sentCloudEvents = map[string][]string{}
httpClient = createRecipientConnection()
eventEndpoint := getHTTPPollingEndpoint()
eventEndpointAuthToken := os.Getenv("HTTP_EVENT_ENDPOINT_AUTH_TOKEN")
topics := strings.Split(os.Getenv("PUBSUB_TOPIC"), ",")
pollingInterval, err := strconv.ParseInt(os.Getenv("HTTP_POLLING_INTERVAL"), 10, 64)
if err != nil {
pollingInterval = defaultPollingInterval
}
pollingTicker := time.NewTicker(time.Duration(pollingInterval) * time.Second)
for {
<-pollingTicker.C
pollHTTPEventSource(eventEndpoint, eventEndpointAuthToken, topics, httpClient)
}
}
func getHTTPPollingEndpoint() string {
endpoint := os.Getenv("HTTP_EVENT_ENDPOINT")
if endpoint == "" {
return "http://shipyard-controller:8080/v1/event/triggered"
}
parsedURL, _ := url.Parse(endpoint)
if parsedURL.Scheme == "" {
parsedURL.Scheme = "http"
}
if parsedURL.Path == "" {
parsedURL.Path = "v1/event/triggered"
}
return parsedURL.String()
}
func pollHTTPEventSource(endpoint string, token string, topics []string, client client.Client) {
fmt.Println("Polling events from " + endpoint)
for _, topic := range topics {
pollEventsForTopic(endpoint, token, topic, client)
}
}
func pollEventsForTopic(endpoint string, token string, topic string, client client.Client) {
fmt.Println("Retrieving events of type " + topic)
events, err := getEventsFromEndpoint(endpoint, token, topic)
if err != nil {
fmt.Println("Could not retrieve events of type " + topic + " from " + endpoint + ": " + endpoint)
}
fmt.Println("Received " + strconv.FormatInt(int64(len(events)), 10) + " new .triggered events")
for _, event := range events {
fmt.Println("Check if event " + event.ID + " has already been sent...")
if sentCloudEvents == nil {
fmt.Println("Map containing already sent cloudEvents is nil. Creating a new one")
sentCloudEvents = map[string][]string{}
}
if sentCloudEvents[topic] == nil {
fmt.Println("List of sent events for topic " + topic + " is nil. Creating a new one.")
sentCloudEvents[topic] = []string{}
}
alreadySent := hasEventBeenSent(sentCloudEvents[topic], event.ID)
if alreadySent {
fmt.Println("CloudEvent with ID " + event.ID + " has already been sent.")
continue
}
fmt.Println("CloudEvent with ID " + event.ID + " has not been sent yet.")
marshal, err := json.Marshal(event)
e, err := decodeCloudEvent(marshal)
if e != nil {
fmt.Println("Sending CloudEvent with ID " + event.ID + " to " + os.Getenv("PUBSUB_RECIPIENT"))
err = sendEvent(*e, client)
if err != nil {
fmt.Println("Could not send CloudEvent: " + err.Error())
}
fmt.Println("Event has been sent successfully. Adding it to the list of sent events.")
sentCloudEvents[topic] = append(sentCloudEvents[*event.Type], event.ID)
fmt.Println("Number of sent events for topic " + topic + ": " + strconv.FormatInt(int64(len(sentCloudEvents[topic])), 10))
}
}
// clean up list of sent events to avoid memory leaks -> if an item that has been marked as already sent
// is not an open .triggered event anymore, it can be removed from the list
fmt.Println("Cleaning up list of sent events for topic " + topic)
sentCloudEvents[topic] = cleanSentEventList(sentCloudEvents[topic], events)
}
func getEventsFromEndpoint(endpoint string, token string, topic string) ([]*keptnmodels.KeptnContextExtendedCE, error) {
events := []*keptnmodels.KeptnContextExtendedCE{}
nextPageKey := ""
for {
endpoint = strings.TrimSuffix(endpoint, "/")
url, err := url.Parse(endpoint)
url.Path = url.Path + "/" + topic
if err != nil {
return nil, err
}
q := url.Query()
if nextPageKey != "" {
q.Set("nextPageKey", nextPageKey)
url.RawQuery = q.Encode()
}
req, err := http.NewRequest("GET", url.String(), nil)
req.Header.Set("Content-Type", "application/json")
if token != "" {
req.Header.Add("x-token", token)
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
if resp.StatusCode == 200 {
received := &keptnmodels.Events{}
err = json.Unmarshal(body, received)
if err != nil {
return nil, err
}
events = append(events, received.Events...)
if received.NextPageKey == "" || received.NextPageKey == "0" {
break
}
nextPageKey = received.NextPageKey
} else {
var respErr keptnmodels.Error
err = json.Unmarshal(body, &respErr)
if err != nil {
return nil, err
}
return nil, errors.New(*respErr.Message)
}
}
return events, nil
}
func hasEventBeenSent(sentEvents []string, eventID string) bool {
alreadySent := false
if sentEvents == nil {
sentEvents = []string{}
}
for _, sentEvent := range sentEvents {
if sentEvent == eventID {
alreadySent = true
}
}
return alreadySent
}
func cleanSentEventList(sentEvents []string, events []*keptnmodels.KeptnContextExtendedCE) []string {
updatedList := []string{}
for _, sentEvent := range sentEvents {
fmt.Println("Determine whether event " + sentEvent + " can be removed from list")
found := false
for _, ev := range events {
if ev.ID == sentEvent {
found = true
break
}
}
if found {
fmt.Println("Event " + sentEvent + " is still open. Keeping it in the list")
updatedList = append(updatedList, sentEvent)
} else {
fmt.Println("Event " + sentEvent + " is not open anymore. Removing it from the list")
}
}
return updatedList
}
func stringp(s string) *string {
return &s
}
func createNATSConnection() {
uptimeTicker = time.NewTicker(10 * time.Second)
httpClient = createRecipientConnection()
natsURL := os.Getenv("PUBSUB_URL")
topics := strings.Split(os.Getenv("PUBSUB_TOPIC"), ",")
nch := lib.NewNatsConnectionHandler(natsURL, topics)
nch.MessageHandler = handleMessage
err := nch.SubscribeToTopics()
if err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
defer func() {
nch.RemoveAllSubscriptions()
// Close connection
fmt.Println("Disconnected from NATS")
}()
for {
select {
case <-uptimeTicker.C:
_ = nch.SubscribeToTopics()
case <-close:
return
}
}
}
func createRecipientConnection() client.Client {
recipientURL, err := getPubSubRecipientURL(
os.Getenv("PUBSUB_RECIPIENT"),
os.Getenv("PUBSUB_RECIPIENT_PORT"),
os.Getenv("PUBSUB_RECIPIENT_PATH"),
)
if err != nil {
fmt.Println(err.Error())
os.Exit(1)
}
httpTransport, err := cloudeventshttp.New(
cloudeventshttp.WithTarget(recipientURL),
cloudeventshttp.WithStructuredEncoding(),
)
if err != nil {
fmt.Println("failed to create Http connection: " + err.Error())
os.Exit(1)
}
httpClient, err := client.New(httpTransport)
if err != nil {
fmt.Println("failed to create client: " + err.Error())
os.Exit(1)
}
return httpClient
}
func handleMessage(m *nats.Msg) {
fmt.Printf("Received a message for topic [%s]\n", m.Subject)
e, err := decodeCloudEvent(m.Data)
if e != nil {
err = sendEvent(*e, httpClient)
if err != nil {
fmt.Println("Could not send CloudEvent: " + err.Error())
}
}
}
func decodeCloudEvent(data []byte) (*cloudevents.Event, error) {
ceMsg := &cloudeventsnats.Message{
Body: data,
}
codec := &cloudeventsnats.Codec{}
switch ceMsg.CloudEventsVersion() {
default:
fmt.Println("Cannot parse incoming payload: CloudEvent Spec version not set")
return nil, errors.New("CloudEvent version not set")
case cloudevents.CloudEventsVersionV02:
codec.Encoding = cloudeventsnats.StructuredV02
case cloudevents.CloudEventsVersionV03:
codec.Encoding = cloudeventsnats.StructuredV03
case cloudevents.CloudEventsVersionV1:
codec.Encoding = cloudeventsnats.StructuredV1
}
event, err := codec.Decode(ctx, ceMsg)
if err != nil {
fmt.Println("Could not unmarshal CloudEvent: " + err.Error())
return nil, err
}
return event, nil
}
func sendEvent(event cloudevents.Event, client client.Client) error {
ctx := context.Background()
_, _, err := client.Send(ctx, event)
if err != nil {
fmt.Println("failed to send event: " + err.Error())
}
return nil
}
func getPubSubRecipientURL(recipientService string, port string, path string) (string, error) {
if recipientService == "" {
return "", errors.New("no recipient service defined")
}
if !strings.HasPrefix(recipientService, "https://") && !strings.HasPrefix(recipientService, "http://") {
recipientService = "http://" + recipientService
}
if port == "" {
port = "8080"
}
if path != "" && !strings.HasPrefix(path, "/") {
path = "/" + path
}
return recipientService + ":" + port + path, nil
}
| [
"\"CONNECTION_TYPE\"",
"\"HTTP_EVENT_ENDPOINT_AUTH_TOKEN\"",
"\"PUBSUB_TOPIC\"",
"\"HTTP_POLLING_INTERVAL\"",
"\"HTTP_EVENT_ENDPOINT\"",
"\"PUBSUB_RECIPIENT\"",
"\"PUBSUB_URL\"",
"\"PUBSUB_TOPIC\"",
"\"PUBSUB_RECIPIENT\"",
"\"PUBSUB_RECIPIENT_PORT\"",
"\"PUBSUB_RECIPIENT_PATH\""
]
| []
| [
"PUBSUB_RECIPIENT",
"PUBSUB_URL",
"PUBSUB_RECIPIENT_PORT",
"CONNECTION_TYPE",
"HTTP_POLLING_INTERVAL",
"HTTP_EVENT_ENDPOINT_AUTH_TOKEN",
"HTTP_EVENT_ENDPOINT",
"PUBSUB_TOPIC",
"PUBSUB_RECIPIENT_PATH"
]
| [] | ["PUBSUB_RECIPIENT", "PUBSUB_URL", "PUBSUB_RECIPIENT_PORT", "CONNECTION_TYPE", "HTTP_POLLING_INTERVAL", "HTTP_EVENT_ENDPOINT_AUTH_TOKEN", "HTTP_EVENT_ENDPOINT", "PUBSUB_TOPIC", "PUBSUB_RECIPIENT_PATH"] | go | 9 | 0 | |
router/server.go | package main
import (
"crypto/tls"
"encoding/base64"
"flag"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"os"
"strconv"
"strings"
discoverd "github.com/flynn/flynn/discoverd/client"
"github.com/flynn/flynn/pkg/keepalive"
"github.com/flynn/flynn/pkg/postgres"
"github.com/flynn/flynn/pkg/shutdown"
"github.com/flynn/flynn/router/schema"
router "github.com/flynn/flynn/router/types"
"github.com/inconshreveable/log15"
)
var logger = log15.New()
func init() {
if os.Getenv("DEBUG") == "" {
// filter debug log messages if DEBUG is not set
logger.SetHandler(log15.LvlFilterHandler(log15.LvlInfo, log15.StdoutHandler))
}
}
type Listener interface {
Start() error
Close() error
AddRoute(*router.Route) error
UpdateRoute(*router.Route) error
RemoveRoute(id string) error
Watcher
DataStoreReader
}
type Router struct {
HTTP Listener
TCP Listener
}
func (s *Router) ListenerFor(typ string) Listener {
switch typ {
case "http":
return s.HTTP
case "tcp":
return s.TCP
default:
return nil
}
}
func (s *Router) Start() error {
log := logger.New("fn", "Start")
log.Info("starting HTTP listener")
if err := s.HTTP.Start(); err != nil {
log.Error("error starting HTTP listener", "err", err)
return err
}
log.Info("starting TCP listener")
if err := s.TCP.Start(); err != nil {
log.Error("error starting TCP listener", "err", err)
s.HTTP.Close()
return err
}
return nil
}
func (s *Router) Close() {
s.HTTP.Close()
s.TCP.Close()
}
var listenFunc = keepalive.ReusableListen
func main() {
defer shutdown.Exit()
log := logger.New("fn", "main")
var cookieKey *[32]byte
if key := os.Getenv("COOKIE_KEY"); key != "" {
res, err := base64.StdEncoding.DecodeString(key)
if err != nil {
shutdown.Fatalf("error decoding COOKIE_KEY: %s", err)
}
if len(res) != 32 {
shutdown.Fatalf("decoded %d bytes from COOKIE_KEY, expected 32", len(res))
}
var k [32]byte
copy(k[:], res)
cookieKey = &k
}
if cookieKey == nil {
shutdown.Fatal("Missing random 32 byte base64-encoded COOKIE_KEY")
}
proxyProtocol := os.Getenv("PROXY_PROTOCOL") == "true"
legacyTLS := os.Getenv("LEGACY_TLS") == "true"
if !legacyTLS {
// Enable TLS 1.3
os.Setenv("GODEBUG", os.Getenv("GODEBUG")+",tls13=1")
}
httpPort := flag.Int("http-port", 8080, "default http listen port")
httpsPort := flag.Int("https-port", 4433, "default https listen port")
tcpIP := flag.String("tcp-ip", os.Getenv("LISTEN_IP"), "tcp router listen ip")
tcpRangeStart := flag.Int("tcp-range-start", 3000, "tcp port range start")
tcpRangeEnd := flag.Int("tcp-range-end", 3500, "tcp port range end")
certFile := flag.String("tls-cert", "", "TLS (SSL) cert file in pem format")
keyFile := flag.String("tls-key", "", "TLS (SSL) key file in pem format")
apiPort := flag.String("api-port", "", "api listen port")
flag.Parse()
httpPorts := []int{*httpPort}
httpsPorts := []int{*httpsPort}
if portRaw := os.Getenv("DEFAULT_HTTP_PORT"); portRaw != "" {
if port, err := strconv.Atoi(portRaw); err != nil {
shutdown.Fatalf("Invalid DEFAULT_HTTP_PORTS: %s", err)
} else if port == 0 {
log.Warn("Disabling HTTP acccess (DEFAULT_HTTP_PORT=0)")
httpPorts = nil
} else {
httpPorts[0] = port
}
}
if portRaw := os.Getenv("DEFAULT_HTTPS_PORT"); portRaw != "" {
if port, err := strconv.Atoi(portRaw); err != nil {
shutdown.Fatalf("Invalid DEFAULT_HTTPS_PORTS: %s", err)
} else if port == 0 {
shutdown.Fatal("Cannot disable HTTPS access (DEFAULT_HTTPS_PORT=0)")
} else {
httpsPorts[0] = port
}
}
defaultPorts := append(httpPorts, httpsPorts...)
if added := os.Getenv("ADDITIONAL_HTTP_PORTS"); added != "" {
for _, raw := range strings.Split(added, ",") {
if port, err := strconv.Atoi(raw); err == nil {
httpPorts = append(httpPorts, port)
} else {
shutdown.Fatal(err)
}
}
}
if added := os.Getenv("ADDITIONAL_HTTPS_PORTS"); added != "" {
for _, raw := range strings.Split(added, ",") {
if port, err := strconv.Atoi(raw); err == nil {
httpsPorts = append(httpsPorts, port)
} else {
shutdown.Fatal(err)
}
}
}
if *apiPort == "" {
*apiPort = os.Getenv("PORT")
if *apiPort == "" {
*apiPort = "5000"
}
}
keypair := tls.Certificate{}
var err error
if *certFile != "" {
if keypair, err = tls.LoadX509KeyPair(*certFile, *keyFile); err != nil {
shutdown.Fatal(err)
}
} else if tlsCert := os.Getenv("TLSCERT"); tlsCert != "" {
if tlsKey := os.Getenv("TLSKEY"); tlsKey != "" {
os.Setenv("TLSKEY", fmt.Sprintf("md5^(%s)", md5sum(tlsKey)))
if keypair, err = tls.X509KeyPair([]byte(tlsCert), []byte(tlsKey)); err != nil {
shutdown.Fatal(err)
}
}
}
var error503Page []byte
if error503PageURL := os.Getenv("ERROR_503_PAGE_URL"); error503PageURL != "" {
func() {
res, err := http.Get(error503PageURL)
if err != nil {
log.Error("error getting ERROR_503_PAGE_URL", "err", err)
return
}
defer res.Body.Close()
if res.StatusCode != 200 {
log.Error("unexpected status code getting ERROR_503_PAGE_URL", "status", res.StatusCode)
return
}
error503Page, err = ioutil.ReadAll(&io.LimitedReader{R: res.Body, N: 1000000})
if err != nil {
log.Error("error reading ERROR_503_PAGE_URL", "err", err)
return
}
return
}()
}
log.Info("connecting to postgres")
db := postgres.Wait(nil, nil)
log.Info("running DB migrations")
if err := migrateDB(db); err != nil {
shutdown.Fatal(err)
}
db.Close()
log.Info("reconnecting to postgres with prepared queries")
db = postgres.Wait(nil, schema.PrepareStatements)
shutdown.BeforeExit(func() { db.Close() })
var httpAddrs []string
var httpsAddrs []string
var reservedPorts []int
for _, port := range httpPorts {
httpAddrs = append(httpAddrs, net.JoinHostPort(os.Getenv("LISTEN_IP"), strconv.Itoa(port)))
reservedPorts = append(reservedPorts, port)
}
for _, port := range httpsPorts {
httpsAddrs = append(httpsAddrs, net.JoinHostPort(os.Getenv("LISTEN_IP"), strconv.Itoa(port)))
reservedPorts = append(reservedPorts, port)
}
r := Router{
TCP: &TCPListener{
IP: *tcpIP,
startPort: *tcpRangeStart,
endPort: *tcpRangeEnd,
ds: NewPostgresDataStore("tcp", db.ConnPool),
discoverd: discoverd.DefaultClient,
reservedPorts: reservedPorts,
},
HTTP: &HTTPListener{
Addrs: httpAddrs,
TLSAddrs: httpsAddrs,
LegacyTLSVersions: legacyTLS,
defaultPorts: defaultPorts,
cookieKey: cookieKey,
keypair: keypair,
ds: NewPostgresDataStore("http", db.ConnPool),
discoverd: discoverd.DefaultClient,
proxyProtocol: proxyProtocol,
error503Page: error503Page,
},
}
if err := r.Start(); err != nil {
shutdown.Fatal(err)
}
shutdown.BeforeExit(r.Close)
apiAddr := net.JoinHostPort(os.Getenv("LISTEN_IP"), *apiPort)
log.Info("starting API listener")
listener, err := listenFunc("tcp4", apiAddr)
if err != nil {
log.Error("error starting API listener", "err", err)
shutdown.Fatal(listenErr{apiAddr, err})
}
httpAddr := net.JoinHostPort(os.Getenv("LISTEN_IP"), strconv.Itoa(httpPorts[0]))
services := map[string]string{
"router-api": apiAddr,
"router-http": httpAddr,
}
for service, addr := range services {
log.Info("registering service", "name", service, "addr", addr)
hb, err := discoverd.AddServiceAndRegister(service, addr)
if err != nil {
log.Error("error registering service", "name", service, "addr", addr, "err", err)
shutdown.Fatal(err)
}
shutdown.BeforeExit(func() { hb.Close() })
}
log.Info("serving API requests")
shutdown.Fatal(http.Serve(listener, apiHandler(&r)))
}
type listenErr struct {
Addr string
Err error
}
func (e listenErr) Error() string {
return fmt.Sprintf("error binding to port (check if another service is listening on %s): %s", e.Addr, e.Err)
}
| [
"\"DEBUG\"",
"\"COOKIE_KEY\"",
"\"PROXY_PROTOCOL\"",
"\"LEGACY_TLS\"",
"\"GODEBUG\"",
"\"LISTEN_IP\"",
"\"DEFAULT_HTTP_PORT\"",
"\"DEFAULT_HTTPS_PORT\"",
"\"ADDITIONAL_HTTP_PORTS\"",
"\"ADDITIONAL_HTTPS_PORTS\"",
"\"PORT\"",
"\"TLSCERT\"",
"\"TLSKEY\"",
"\"ERROR_503_PAGE_URL\"",
"\"LISTEN_IP\"",
"\"LISTEN_IP\"",
"\"LISTEN_IP\"",
"\"LISTEN_IP\""
]
| []
| [
"PORT",
"TLSCERT",
"PROXY_PROTOCOL",
"LISTEN_IP",
"DEFAULT_HTTP_PORT",
"ADDITIONAL_HTTPS_PORTS",
"ADDITIONAL_HTTP_PORTS",
"TLSKEY",
"DEFAULT_HTTPS_PORT",
"LEGACY_TLS",
"COOKIE_KEY",
"DEBUG",
"GODEBUG",
"ERROR_503_PAGE_URL"
]
| [] | ["PORT", "TLSCERT", "PROXY_PROTOCOL", "LISTEN_IP", "DEFAULT_HTTP_PORT", "ADDITIONAL_HTTPS_PORTS", "ADDITIONAL_HTTP_PORTS", "TLSKEY", "DEFAULT_HTTPS_PORT", "LEGACY_TLS", "COOKIE_KEY", "DEBUG", "GODEBUG", "ERROR_503_PAGE_URL"] | go | 14 | 0 | |
run_tests.py | #!/usr/bin/env python
# Test runner taken from the django_debug_toolbar
# https://github.com/django-debug-toolbar/django-debug-toolbar
import sys
import os
from os.path import dirname, abspath, join
from optparse import OptionParser
from django.conf import settings
# For convenience configure settings if they are not pre-configured or if we
# haven't been provided settings to use by environment variable.
if not settings.configured and not os.environ.get('DJANGO_SETTINGS_MODULE'):
settings.configure(
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
},
INSTALLED_APPS=[
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'honest_ab',
'tests',
'south'
],
ROOT_URLCONF='',
DEBUG=True,
SITE_ID=1,
)
from django.test.simple import DjangoTestSuiteRunner
def runtests(*test_args, **kwargs):
if 'south' in settings.INSTALLED_APPS:
from south.management.commands import patch_for_test_db_setup
patch_for_test_db_setup()
if not test_args:
test_args = ['tests']
parent = dirname(abspath(__file__))
sys.path.insert(0, parent)
test_runner = DjangoTestSuiteRunner(verbosity=kwargs.get('verbosity', 1), interactive=kwargs.get('interactive', False), failfast=kwargs.get('failfast'))
failures = test_runner.run_tests(test_args)
sys.exit(failures)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('--failfast', action='store_true', default=False, dest='failfast')
(options, args) = parser.parse_args()
runtests(failfast=options.failfast, *args)
| []
| []
| [
"DJANGO_SETTINGS_MODULE"
]
| [] | ["DJANGO_SETTINGS_MODULE"] | python | 1 | 0 | |
cmd/ore/aws/upload.go | // Copyright 2017 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package aws
import (
"encoding/json"
"fmt"
"net/url"
"os"
"path/filepath"
"strings"
"github.com/coreos/mantle/platform/api/aws"
"github.com/coreos/mantle/sdk"
"github.com/coreos/mantle/util"
"github.com/spf13/cobra"
)
var (
cmdUpload = &cobra.Command{
Use: "upload",
Short: "Create AWS images",
Long: `Upload CoreOS image to S3 and create relevant AMIs (hvm).
Supported source formats are VMDK (as created with ./image_to_vm --format=ami_vmdk) and RAW.
After a successful run, the final line of output will be a line of JSON describing the relevant resources.
`,
Example: ` ore aws upload --region=us-east-1 \
--ami-name="CoreOS-stable-1234.5.6" \
--ami-description="CoreOS stable 1234.5.6" \
--file="/home/.../coreos_production_ami_vmdk_image.vmdk" \
--tags="machine=production"`,
RunE: runUpload,
}
uploadSourceObject string
uploadBucket string
uploadImageName string
uploadBoard string
uploadFile string
uploadDiskSizeGiB uint
uploadDiskSizeInspect bool
uploadDeleteObject bool
uploadForce bool
uploadSourceSnapshot string
uploadObjectFormat aws.EC2ImageFormat
uploadAMIName string
uploadAMIDescription string
uploadGrantUsers []string
uploadTags []string
)
func init() {
AWS.AddCommand(cmdUpload)
cmdUpload.Flags().StringVar(&uploadSourceObject, "source-object", "", "'s3://' URI pointing to image data (default: same as upload)")
cmdUpload.Flags().StringVar(&uploadBucket, "bucket", "", "s3://bucket/prefix/ (defaults to a regional bucket and prefix defaults to $USER/board/name)")
cmdUpload.Flags().StringVar(&uploadImageName, "name", "", "name of uploaded image (default COREOS_VERSION)")
cmdUpload.Flags().StringVar(&uploadBoard, "board", "amd64-usr", "board used for naming with default prefix and AMI architecture")
cmdUpload.Flags().StringVar(&uploadFile, "file",
defaultUploadFile(),
"path to CoreOS image (build with: ./image_to_vm.sh --format=ami_vmdk ...)")
cmdUpload.Flags().UintVarP(&uploadDiskSizeGiB, "disk-size-gib", "", aws.ContainerLinuxDiskSizeGiB, "AMI disk size in GiB")
cmdUpload.Flags().BoolVar(&uploadDiskSizeInspect, "disk-size-inspect", false, "set AMI disk size to size of local file")
cmdUpload.Flags().BoolVar(&uploadDeleteObject, "delete-object", true, "delete uploaded S3 object after snapshot is created")
cmdUpload.Flags().BoolVar(&uploadForce, "force", false, "overwrite any existing S3 object, snapshot, and AMI")
cmdUpload.Flags().StringVar(&uploadSourceSnapshot, "source-snapshot", "", "the snapshot ID to base this AMI on (default: create new snapshot)")
cmdUpload.Flags().Var(&uploadObjectFormat, "object-format", fmt.Sprintf("object format: %s or %s (default: %s)", aws.EC2ImageFormatVmdk, aws.EC2ImageFormatRaw, aws.EC2ImageFormatVmdk))
cmdUpload.Flags().StringVar(&uploadAMIName, "ami-name", "", "name of the AMI to create (default: Container-Linux-$USER-$VERSION)")
cmdUpload.Flags().StringVar(&uploadAMIDescription, "ami-description", "", "description of the AMI to create (default: empty)")
cmdUpload.Flags().StringSliceVar(&uploadGrantUsers, "grant-user", []string{}, "grant launch permission to this AWS user ID")
cmdUpload.Flags().StringSliceVar(&uploadTags, "tags", []string{}, "list of key=value tags to attach to the AMI")
}
func defaultBucketNameForRegion(region string) string {
return fmt.Sprintf("coreos-dev-ami-import-%s", region)
}
func defaultUploadFile() string {
build := sdk.BuildRoot()
return build + "/images/amd64-usr/latest/flatcar_production_ami_vmdk_image.vmdk"
}
// defaultBucketURL determines the location the tool should upload to.
// The 'urlPrefix' parameter, if it contains a path, will override all other
// arguments
func defaultBucketURL(urlPrefix, imageName, board, file, region string) (*url.URL, error) {
if urlPrefix == "" {
urlPrefix = fmt.Sprintf("s3://%s", defaultBucketNameForRegion(region))
}
s3URL, err := url.Parse(urlPrefix)
if err != nil {
return nil, err
}
if s3URL.Scheme != "s3" {
return nil, fmt.Errorf("invalid s3 scheme; must be 's3://', not '%s://'", s3URL.Scheme)
}
if s3URL.Host == "" {
return nil, fmt.Errorf("URL missing bucket name %v\n", urlPrefix)
}
// if prefix not specified, default to /$USER/$BOARD/$VERSION
if s3URL.Path == "" {
s3URL.Path = fmt.Sprintf("/%s/%s/%s", os.Getenv("USER"), board, imageName)
}
if s3URL.Path[len(s3URL.Path)-1] != '/' {
s3URL.Path += "/"
}
s3URL.Path += filepath.Base(file)
return s3URL, nil
}
func runUpload(cmd *cobra.Command, args []string) error {
if len(args) != 0 {
fmt.Fprintf(os.Stderr, "Unrecognized args in aws upload cmd: %v\n", args)
os.Exit(2)
}
if uploadSourceObject != "" && uploadSourceSnapshot != "" {
fmt.Fprintf(os.Stderr, "At most one of --source-object and --source-snapshot may be specified.\n")
os.Exit(2)
}
if uploadDiskSizeInspect && (uploadSourceObject != "" || uploadSourceSnapshot != "") {
fmt.Fprintf(os.Stderr, "--disk-size-inspect cannot be used with --source-object or --source-snapshot.\n")
os.Exit(2)
}
// if an image name is unspecified try to use version.txt
imageName := uploadImageName
if imageName == "" {
ver, err := sdk.VersionsFromDir(filepath.Dir(uploadFile))
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to get version from image directory, provide a -name flag or include a version.txt in the image directory: %v\n", err)
os.Exit(1)
}
imageName = ver.Version
}
if uploadDiskSizeInspect {
imageInfo, err := util.GetImageInfo(uploadFile)
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to query size of disk: %v\n", err)
os.Exit(1)
}
plog.Debugf("Image size: %v\n", imageInfo.VirtualSize)
const GiB = 1024 * 1024 * 1024
uploadDiskSizeGiB = uint(imageInfo.VirtualSize / GiB)
// Round up if there's leftover
if imageInfo.VirtualSize%GiB > 0 {
uploadDiskSizeGiB += 1
}
}
amiName := uploadAMIName
if amiName == "" {
ver, err := sdk.VersionsFromDir(filepath.Dir(uploadFile))
if err != nil {
fmt.Fprintf(os.Stderr, "could not guess image name: %v\n", err)
os.Exit(1)
}
awsVersion := strings.Replace(ver.Version, "+", "-", -1) // '+' is invalid in an AMI name
amiName = fmt.Sprintf("Container-Linux-dev-%s-%s", os.Getenv("USER"), awsVersion)
}
switch uploadBoard {
case "amd64-usr":
case "arm64-usr":
if !strings.HasSuffix(amiName, "-arm64") {
amiName = amiName + "-arm64"
}
default:
fmt.Fprintf(os.Stderr, "No AMI name suffix known for board %q\n", uploadBoard)
os.Exit(1)
}
var s3URL *url.URL
var err error
if uploadSourceObject != "" {
s3URL, err = url.Parse(uploadSourceObject)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
} else {
s3URL, err = defaultBucketURL(uploadBucket, imageName, uploadBoard, uploadFile, region)
if err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
}
plog.Debugf("S3 object: %v\n", s3URL)
s3BucketName := s3URL.Host
s3ObjectPath := strings.TrimPrefix(s3URL.Path, "/")
if uploadForce {
API.RemoveImage(amiName, imageName, s3BucketName, s3ObjectPath, nil)
}
// if no snapshot was specified, check for an existing one or a
// snapshot task in progress
sourceSnapshot := uploadSourceSnapshot
if sourceSnapshot == "" {
snapshot, err := API.FindSnapshot(imageName)
if err != nil {
fmt.Fprintf(os.Stderr, "failed finding snapshot: %v\n", err)
os.Exit(1)
}
if snapshot != nil {
sourceSnapshot = snapshot.SnapshotID
}
}
// if there's no existing snapshot and no provided S3 object to
// make one from, upload to S3
if uploadSourceObject == "" && sourceSnapshot == "" {
f, err := os.Open(uploadFile)
if err != nil {
fmt.Fprintf(os.Stderr, "Could not open image file %v: %v\n", uploadFile, err)
os.Exit(1)
}
defer f.Close()
err = API.UploadObject(f, s3BucketName, s3ObjectPath, uploadForce)
if err != nil {
fmt.Fprintf(os.Stderr, "Error uploading: %v\n", err)
os.Exit(1)
}
}
// if we don't already have a snapshot, make one
if sourceSnapshot == "" {
snapshot, err := API.CreateSnapshot(imageName, s3URL.String(), uploadObjectFormat)
if err != nil {
fmt.Fprintf(os.Stderr, "unable to create snapshot: %v\n", err)
os.Exit(1)
}
sourceSnapshot = snapshot.SnapshotID
}
// if delete is enabled and we created the snapshot from an S3
// object that we also created (perhaps in a previous run), delete
// the S3 object
if uploadSourceObject == "" && uploadSourceSnapshot == "" && uploadDeleteObject {
if err := API.DeleteObject(s3BucketName, s3ObjectPath); err != nil {
fmt.Fprintf(os.Stderr, "unable to delete object: %v\n", err)
os.Exit(1)
}
}
amiArch, err := aws.AmiArchForBoard(uploadBoard)
if err != nil {
fmt.Fprintf(os.Stderr, "could not get architecture for board: %v\n", err)
os.Exit(1)
}
// create AMIs and grant permissions
hvmID, err := API.CreateHVMImage(sourceSnapshot, uploadDiskSizeGiB, amiName+"-hvm", uploadAMIDescription, amiArch)
if err != nil {
fmt.Fprintf(os.Stderr, "unable to create HVM image: %v\n", err)
os.Exit(1)
}
if len(uploadGrantUsers) > 0 {
err = API.GrantLaunchPermission(hvmID, uploadGrantUsers)
if err != nil {
fmt.Fprintf(os.Stderr, "unable to grant launch permission: %v\n", err)
os.Exit(1)
}
}
tagMap := make(map[string]string)
for _, tag := range uploadTags {
splitTag := strings.SplitN(tag, "=", 2)
if len(splitTag) != 2 {
fmt.Fprintf(os.Stderr, "invalid tag format; should be key=value, not %v\n", tag)
os.Exit(1)
}
key, value := splitTag[0], splitTag[1]
tagMap[key] = value
}
if err := API.CreateTags([]string{hvmID, sourceSnapshot}, tagMap); err != nil {
fmt.Fprintf(os.Stderr, "unable to add tags: %v\n", err)
os.Exit(1)
}
err = json.NewEncoder(os.Stdout).Encode(&struct {
HVM string
SnapshotID string
S3Object string
}{
HVM: hvmID,
SnapshotID: sourceSnapshot,
S3Object: s3URL.String(),
})
if err != nil {
fmt.Fprintf(os.Stderr, "Couldn't encode result: %v\n", err)
os.Exit(1)
}
return nil
}
| [
"\"USER\"",
"\"USER\""
]
| []
| [
"USER"
]
| [] | ["USER"] | go | 1 | 0 | |
tutorial/concurrency/default-selection.go | package main
import (
"fmt"
"time"
)
func main() {
tick := time.Tick(100 * time.Millisecond)
boom := time.Tick(500 * time.Millisecond)
for {
select {
case <-tick:
fmt.Println("tick.")
case <-boom:
fmt.Println("BOOM!")
return
default:
fmt.Println(" .")
time.Sleep(50 * time.Millisecond)
}
}
}
| []
| []
| []
| [] | [] | go | null | null | null |
ansible/mitogen-0.2.3/ansible_mitogen/target.py | # Copyright 2017, David Wilson
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Helper functions intended to be executed on the target. These are entrypoints
for file transfer, module execution and sundry bits like changing file modes.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import errno
import functools
import grp
import json
import logging
import operator
import os
import pwd
import re
import resource
import signal
import stat
import subprocess
import sys
import tempfile
import traceback
import types
import mitogen.core
import mitogen.fork
import mitogen.parent
import mitogen.service
# Ansible since PR #41749 inserts "import __main__" into
# ansible.module_utils.basic. Mitogen's importer will refuse such an import, so
# we must setup a fake "__main__" before that module is ever imported. The
# str() is to cast Unicode to bytes on Python 2.6.
if not sys.modules.get(str('__main__')):
sys.modules[str('__main__')] = types.ModuleType(str('__main__'))
import ansible.module_utils.json_utils
import ansible_mitogen.runner
LOG = logging.getLogger(__name__)
MAKE_TEMP_FAILED_MSG = (
"Unable to find a useable temporary directory. This likely means no\n"
"system-supplied TMP directory can be written to, or all directories\n"
"were mounted on 'noexec' filesystems.\n"
"\n"
"The following paths were tried:\n"
" %(namelist)s\n"
"\n"
"Please check '-vvv' output for a log of individual path errors."
)
#: Initialized to an econtext.parent.Context pointing at a pristine fork of
#: the target Python interpreter before it executes any code or imports.
_fork_parent = None
#: Set by :func:`init_child` to the name of a writeable and executable
#: temporary directory accessible by the active user account.
good_temp_dir = None
# issue #362: subprocess.Popen(close_fds=True) aka. AnsibleModule.run_command()
# loops the entire SC_OPEN_MAX space. CentOS>5 ships with 1,048,576 FDs by
# default, resulting in huge (>500ms) runtime waste running many commands.
# Therefore if we are a child, cap the range to something reasonable.
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
if (rlimit[0] > 512 or rlimit[1] > 512) and not mitogen.is_master:
resource.setrlimit(resource.RLIMIT_NOFILE, (512, 512))
subprocess.MAXFD = 512 # Python <3.x
del rlimit
def get_small_file(context, path):
"""
Basic in-memory caching module fetcher. This generates an one roundtrip for
every previously unseen file, so it is only a temporary solution.
:param context:
Context we should direct FileService requests to. For now (and probably
forever) this is just the top-level Mitogen connection manager process.
:param path:
Path to fetch from FileService, must previously have been registered by
a privileged context using the `register` command.
:returns:
Bytestring file data.
"""
pool = mitogen.service.get_or_create_pool(router=context.router)
service = pool.get_service('mitogen.service.PushFileService')
return service.get(path)
def transfer_file(context, in_path, out_path, sync=False, set_owner=False):
"""
Streamily download a file from the connection multiplexer process in the
controller.
:param mitogen.core.Context context:
Reference to the context hosting the FileService that will be used to
fetch the file.
:param bytes in_path:
FileService registered name of the input file.
:param bytes out_path:
Name of the output path on the local disk.
:param bool sync:
If :data:`True`, ensure the file content and metadat are fully on disk
before renaming the temporary file over the existing file. This should
ensure in the case of system crash, either the entire old or new file
are visible post-reboot.
:param bool set_owner:
If :data:`True`, look up the metadata username and group on the local
system and file the file owner using :func:`os.fchmod`.
"""
out_path = os.path.abspath(out_path)
fd, tmp_path = tempfile.mkstemp(suffix='.tmp',
prefix='.ansible_mitogen_transfer-',
dir=os.path.dirname(out_path))
fp = os.fdopen(fd, 'wb', mitogen.core.CHUNK_SIZE)
LOG.debug('transfer_file(%r) temporary file: %s', out_path, tmp_path)
try:
try:
ok, metadata = mitogen.service.FileService.get(
context=context,
path=in_path,
out_fp=fp,
)
if not ok:
raise IOError('transfer of %r was interrupted.' % (in_path,))
os.fchmod(fp.fileno(), metadata['mode'])
if set_owner:
set_fd_owner(fp.fileno(), metadata['owner'], metadata['group'])
finally:
fp.close()
if sync:
os.fsync(fp.fileno())
os.rename(tmp_path, out_path)
except BaseException:
os.unlink(tmp_path)
raise
os.utime(out_path, (metadata['atime'], metadata['mtime']))
def prune_tree(path):
"""
Like shutil.rmtree(), but log errors rather than discard them, and do not
waste multiple os.stat() calls discovering whether the object can be
deleted, just try deleting it instead.
"""
try:
os.unlink(path)
return
except OSError as e:
if not (os.path.isdir(path) and
e.args[0] in (errno.EPERM, errno.EISDIR)):
LOG.error('prune_tree(%r): %s', path, e)
return
try:
# Ensure write access for readonly directories. Ignore error in case
# path is on a weird filesystem (e.g. vfat).
os.chmod(path, int('0700', 8))
except OSError as e:
LOG.warning('prune_tree(%r): %s', path, e)
try:
for name in os.listdir(path):
if name not in ('.', '..'):
prune_tree(os.path.join(path, name))
os.rmdir(path)
except OSError as e:
LOG.error('prune_tree(%r): %s', path, e)
def _on_broker_shutdown():
"""
Respond to broker shutdown (graceful termination by parent, or loss of
connection to parent) by deleting our sole temporary directory.
"""
prune_tree(temp_dir)
def is_good_temp_dir(path):
"""
Return :data:`True` if `path` can be used as a temporary directory, logging
any failures that may cause it to be unsuitable. If the directory doesn't
exist, we attempt to create it using :func:`os.makedirs`.
"""
if not os.path.exists(path):
try:
os.makedirs(path, mode=int('0700', 8))
except OSError as e:
LOG.debug('temp dir %r unusable: did not exist and attempting '
'to create it failed: %s', path, e)
return False
try:
tmp = tempfile.NamedTemporaryFile(
prefix='ansible_mitogen_is_good_temp_dir',
dir=path,
)
except (OSError, IOError) as e:
LOG.debug('temp dir %r unusable: %s', path, e)
return False
try:
try:
os.chmod(tmp.name, int('0700', 8))
except OSError as e:
LOG.debug('temp dir %r unusable: %s: chmod failed: %s',
path, e)
return False
try:
# access(.., X_OK) is sufficient to detect noexec.
if not os.access(tmp.name, os.X_OK):
raise OSError('filesystem appears to be mounted noexec')
except OSError as e:
LOG.debug('temp dir %r unusable: %s: %s', path, e)
return False
finally:
tmp.close()
return True
def find_good_temp_dir(candidate_temp_dirs):
"""
Given a list of candidate temp directories extracted from ``ansible.cfg``,
combine it with the Python-builtin list of candidate directories used by
:mod:`tempfile`, then iteratively try each until one is found that is both
writeable and executable.
:param list candidate_temp_dirs:
List of candidate $variable-expanded and tilde-expanded directory paths
that may be usable as a temporary directory.
"""
paths = [os.path.expandvars(os.path.expanduser(p))
for p in candidate_temp_dirs]
paths.extend(tempfile._candidate_tempdir_list())
for path in paths:
if is_good_temp_dir(path):
LOG.debug('Selected temp directory: %r (from %r)', path, paths)
return path
raise IOError(MAKE_TEMP_FAILED_MSG % {
'paths': '\n '.join(paths),
})
@mitogen.core.takes_econtext
def init_child(econtext, log_level, candidate_temp_dirs):
"""
Called by ContextService immediately after connection; arranges for the
(presently) spotless Python interpreter to be forked, where the newly
forked interpreter becomes the parent of any newly forked future
interpreters.
This is necessary to prevent modules that are executed in-process from
polluting the global interpreter state in a way that effects explicitly
isolated modules.
:param int log_level:
Logging package level active in the master.
:param list[str] candidate_temp_dirs:
List of $variable-expanded and tilde-expanded directory names to add to
candidate list of temporary directories.
:returns:
Dict like::
{
'fork_context': mitogen.core.Context.
'home_dir': str.
}
Where `fork_context` refers to the newly forked 'fork parent' context
the controller will use to start forked jobs, and `home_dir` is the
home directory for the active user account.
"""
# Copying the master's log level causes log messages to be filtered before
# they reach LogForwarder, thus reducing an influx of tiny messges waking
# the connection multiplexer process in the master.
LOG.setLevel(log_level)
logging.getLogger('ansible_mitogen').setLevel(log_level)
global _fork_parent
mitogen.parent.upgrade_router(econtext)
_fork_parent = econtext.router.fork()
global good_temp_dir
good_temp_dir = find_good_temp_dir(candidate_temp_dirs)
return {
'fork_context': _fork_parent,
'home_dir': mitogen.core.to_text(os.path.expanduser('~')),
'good_temp_dir': good_temp_dir,
}
@mitogen.core.takes_econtext
def create_fork_child(econtext):
"""
For helper functions executed in the fork parent context, arrange for
the context's router to be upgraded as necessary and for a new child to be
prepared.
"""
mitogen.parent.upgrade_router(econtext)
context = econtext.router.fork()
LOG.debug('create_fork_child() -> %r', context)
return context
def run_module(kwargs):
"""
Set up the process environment in preparation for running an Ansible
module. This monkey-patches the Ansible libraries in various places to
prevent it from trying to kill the process on completion, and to prevent it
from reading sys.stdin.
"""
runner_name = kwargs.pop('runner_name')
klass = getattr(ansible_mitogen.runner, runner_name)
impl = klass(**kwargs)
return impl.run()
def _get_async_dir():
return os.path.expanduser(
os.environ.get('ANSIBLE_ASYNC_DIR', '~/.ansible_async')
)
class AsyncRunner(object):
def __init__(self, job_id, timeout_secs, econtext, kwargs):
self.job_id = job_id
self.timeout_secs = timeout_secs
self.econtext = econtext
self.kwargs = kwargs
self._timed_out = False
self._init_path()
def _init_path(self):
async_dir = _get_async_dir()
if not os.path.exists(async_dir):
os.makedirs(async_dir)
self.path = os.path.join(async_dir, self.job_id)
def _update(self, dct):
"""
Update an async job status file.
"""
LOG.info('%r._update(%r, %r)', self, self.job_id, dct)
dct.setdefault('ansible_job_id', self.job_id)
dct.setdefault('data', '')
with open(self.path + '.tmp', 'w') as fp:
fp.write(json.dumps(dct))
os.rename(self.path + '.tmp', self.path)
def _on_sigalrm(self, signum, frame):
"""
Respond to SIGALRM (job timeout) by updating the job file and killing
the process.
"""
msg = "Job reached maximum time limit of %d seconds." % (
self.timeout_secs,
)
self._update({
"failed": 1,
"finished": 1,
"msg": msg,
})
self._timed_out = True
self.econtext.broker.shutdown()
def _install_alarm(self):
signal.signal(signal.SIGALRM, self._on_sigalrm)
signal.alarm(self.timeout_secs)
def _run_module(self):
kwargs = dict(self.kwargs, **{
'detach': True,
'econtext': self.econtext,
'emulate_tty': False,
})
return run_module(kwargs)
def _parse_result(self, dct):
filtered, warnings = (
ansible.module_utils.json_utils.
_filter_non_json_lines(dct['stdout'])
)
result = json.loads(filtered)
result.setdefault('warnings', []).extend(warnings)
result['stderr'] = dct['stderr']
self._update(result)
def _run(self):
"""
1. Immediately updates the status file to mark the job as started.
2. Installs a timer/signal handler to implement the time limit.
3. Runs as with run_module(), writing the result to the status file.
:param dict kwargs:
Runner keyword arguments.
:param str job_id:
String job ID.
:param int timeout_secs:
If >0, limit the task's maximum run time.
"""
self._update({
'started': 1,
'finished': 0,
'pid': os.getpid()
})
if self.timeout_secs > 0:
self._install_alarm()
dct = self._run_module()
if not self._timed_out:
# After SIGALRM fires, there is a window between broker responding
# to shutdown() by killing the process, and work continuing on the
# main thread. If main thread was asleep in at least
# basic.py/select.select(), an EINTR will be raised. We want to
# discard that exception.
try:
self._parse_result(dct)
except Exception:
self._update({
"failed": 1,
"msg": traceback.format_exc(),
"data": dct['stdout'], # temporary notice only
"stderr": dct['stderr']
})
def run(self):
try:
try:
self._run()
except Exception:
self._update({
"failed": 1,
"msg": traceback.format_exc(),
})
finally:
self.econtext.broker.shutdown()
@mitogen.core.takes_econtext
def run_module_async(kwargs, job_id, timeout_secs, econtext):
"""
Execute a module with its run status and result written to a file,
terminating on the process on completion. This function must run in a child
forked using :func:`create_fork_child`.
"""
arunner = AsyncRunner(job_id, timeout_secs, econtext, kwargs)
arunner.run()
def get_user_shell():
"""
For commands executed directly via an SSH command-line, SSH looks up the
user's shell via getpwuid() and only defaults to /bin/sh if that field is
missing or empty.
"""
try:
pw_shell = pwd.getpwuid(os.geteuid()).pw_shell
except KeyError:
pw_shell = None
return pw_shell or '/bin/sh'
def exec_args(args, in_data='', chdir=None, shell=None, emulate_tty=False):
"""
Run a command in a subprocess, emulating the argument handling behaviour of
SSH.
:param list[str]:
Argument vector.
:param bytes in_data:
Optional standard input for the command.
:param bool emulate_tty:
If :data:`True`, arrange for stdout and stderr to be merged into the
stdout pipe and for LF to be translated into CRLF, emulating the
behaviour of a TTY.
:return:
(return code, stdout bytes, stderr bytes)
"""
LOG.debug('exec_args(%r, ..., chdir=%r)', args, chdir)
assert isinstance(args, list)
if emulate_tty:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
proc = subprocess.Popen(
args=args,
stdout=subprocess.PIPE,
stderr=stderr,
stdin=subprocess.PIPE,
cwd=chdir,
)
stdout, stderr = proc.communicate(in_data)
if emulate_tty:
stdout = stdout.replace(b'\n', b'\r\n')
return proc.returncode, stdout, stderr or ''
def exec_command(cmd, in_data='', chdir=None, shell=None, emulate_tty=False):
"""
Run a command in a subprocess, emulating the argument handling behaviour of
SSH.
:param bytes cmd:
String command line, passed to user's shell.
:param bytes in_data:
Optional standard input for the command.
:return:
(return code, stdout bytes, stderr bytes)
"""
assert isinstance(cmd, mitogen.core.UnicodeType)
return exec_args(
args=[get_user_shell(), '-c', cmd],
in_data=in_data,
chdir=chdir,
shell=shell,
emulate_tty=emulate_tty,
)
def read_path(path):
"""
Fetch the contents of a filesystem `path` as bytes.
"""
return open(path, 'rb').read()
def set_fd_owner(fd, owner, group=None):
if owner:
uid = pwd.getpwnam(owner).pw_uid
else:
uid = os.geteuid()
if group:
gid = grp.getgrnam(group).gr_gid
else:
gid = os.getegid()
os.fchown(fd, (uid, gid))
def write_path(path, s, owner=None, group=None, mode=None,
utimes=None, sync=False):
"""
Writes bytes `s` to a filesystem `path`.
"""
path = os.path.abspath(path)
fd, tmp_path = tempfile.mkstemp(suffix='.tmp',
prefix='.ansible_mitogen_transfer-',
dir=os.path.dirname(path))
fp = os.fdopen(fd, 'wb', mitogen.core.CHUNK_SIZE)
LOG.debug('write_path(path=%r) temporary file: %s', path, tmp_path)
try:
try:
if mode:
os.fchmod(fp.fileno(), mode)
if owner or group:
set_fd_owner(fp.fileno(), owner, group)
fp.write(s)
finally:
fp.close()
if sync:
os.fsync(fp.fileno())
os.rename(tmp_path, path)
except BaseException:
os.unlink(tmp_path)
raise
if utimes:
os.utime(path, utimes)
CHMOD_CLAUSE_PAT = re.compile(r'([uoga]*)([+\-=])([ugo]|[rwx]*)')
CHMOD_MASKS = {
'u': stat.S_IRWXU,
'g': stat.S_IRWXG,
'o': stat.S_IRWXO,
'a': (stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO),
}
CHMOD_BITS = {
'u': {'r': stat.S_IRUSR, 'w': stat.S_IWUSR, 'x': stat.S_IXUSR},
'g': {'r': stat.S_IRGRP, 'w': stat.S_IWGRP, 'x': stat.S_IXGRP},
'o': {'r': stat.S_IROTH, 'w': stat.S_IWOTH, 'x': stat.S_IXOTH},
'a': {
'r': (stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH),
'w': (stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH),
'x': (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
}
}
def apply_mode_spec(spec, mode):
"""
Given a symbolic file mode change specification in the style of chmod(1)
`spec`, apply changes in the specification to the numeric file mode `mode`.
"""
for clause in spec.split(','):
match = CHMOD_CLAUSE_PAT.match(clause)
who, op, perms = match.groups()
for ch in who or 'a':
mask = CHMOD_MASKS[ch]
bits = CHMOD_BITS[ch]
cur_perm_bits = mode & mask
new_perm_bits = functools.reduce(operator.or_, (bits[p] for p in perms), 0)
mode &= ~mask
if op == '=':
mode |= new_perm_bits
elif op == '+':
mode |= new_perm_bits | cur_perm_bits
else:
mode |= cur_perm_bits & ~new_perm_bits
return mode
def set_file_mode(path, spec):
"""
Update the permissions of a file using the same syntax as chmod(1).
"""
mode = os.stat(path).st_mode
if spec.isdigit():
new_mode = int(spec, 8)
else:
new_mode = apply_mode_spec(spec, mode)
os.chmod(path, new_mode)
| []
| []
| [
"ANSIBLE_ASYNC_DIR"
]
| [] | ["ANSIBLE_ASYNC_DIR"] | python | 1 | 0 | |
tests/postgres/main_test.go | package postgres
import (
"context"
"database/sql"
"fmt"
"github.com/go-jet/jet/v2/tests/internal/utils/repo"
"math/rand"
"os"
"runtime"
"testing"
"time"
"github.com/jackc/pgx/v4/stdlib"
"github.com/go-jet/jet/v2/postgres"
"github.com/go-jet/jet/v2/tests/dbconfig"
_ "github.com/lib/pq"
"github.com/pkg/profile"
"github.com/stretchr/testify/require"
_ "github.com/jackc/pgx/v4/stdlib"
)
var db *sql.DB
var testRoot string
var source string
const CockroachDB = "COCKROACH_DB"
func init() {
source = os.Getenv("PG_SOURCE")
}
func sourceIsCockroachDB() bool {
return source == CockroachDB
}
func skipForCockroachDB(t *testing.T) {
if sourceIsCockroachDB() {
t.SkipNow()
}
}
func TestMain(m *testing.M) {
rand.Seed(time.Now().Unix())
defer profile.Start().Stop()
setTestRoot()
for _, driverName := range []string{"pgx", "postgres"} {
fmt.Printf("\nRunning postgres tests for '%s' driver\n", driverName)
func() {
connectionString := dbconfig.PostgresConnectString
if sourceIsCockroachDB() {
connectionString = dbconfig.CockroachConnectString
}
var err error
db, err = sql.Open(driverName, connectionString)
if err != nil {
fmt.Println(err.Error())
panic("Failed to connect to test db")
}
defer db.Close()
ret := m.Run()
if ret != 0 {
os.Exit(ret)
}
}()
}
}
func setTestRoot() {
testRoot = repo.GetTestsDirPath()
}
var loggedSQL string
var loggedSQLArgs []interface{}
var loggedDebugSQL string
var queryInfo postgres.QueryInfo
var callerFile string
var callerLine int
var callerFunction string
func init() {
postgres.SetLogger(func(ctx context.Context, statement postgres.PrintableStatement) {
loggedSQL, loggedSQLArgs = statement.Sql()
loggedDebugSQL = statement.DebugSql()
})
postgres.SetQueryLogger(func(ctx context.Context, info postgres.QueryInfo) {
queryInfo = info
callerFile, callerLine, callerFunction = info.Caller()
})
}
func requireLogged(t *testing.T, statement postgres.Statement) {
query, args := statement.Sql()
require.Equal(t, loggedSQL, query)
require.Equal(t, loggedSQLArgs, args)
require.Equal(t, loggedDebugSQL, statement.DebugSql())
}
func requireQueryLogged(t *testing.T, statement postgres.Statement, rowsProcessed int64) {
query, args := statement.Sql()
queryLogged, argsLogged := queryInfo.Statement.Sql()
require.Equal(t, query, queryLogged)
require.Equal(t, args, argsLogged)
require.Equal(t, queryInfo.RowsProcessed, rowsProcessed)
pc, file, _, _ := runtime.Caller(1)
funcDetails := runtime.FuncForPC(pc)
require.Equal(t, file, callerFile)
require.NotEmpty(t, callerLine)
require.Equal(t, funcDetails.Name(), callerFunction)
}
func skipForPgxDriver(t *testing.T) {
if isPgxDriver() {
t.SkipNow()
}
}
func isPgxDriver() bool {
switch db.Driver().(type) {
case *stdlib.Driver:
return true
}
return false
}
| [
"\"PG_SOURCE\""
]
| []
| [
"PG_SOURCE"
]
| [] | ["PG_SOURCE"] | go | 1 | 0 | |
salt/modules/rbenv.py | # -*- coding: utf-8 -*-
'''
Manage ruby installations with rbenv. rbenv is supported on Linux and macOS.
rbenv doesn't work on Windows (and isn't really necessary on Windows as there is
no system Ruby on Windows). On Windows, the RubyInstaller and/or Pik are both
good alternatives to work with multiple versions of Ruby on the same box.
http://misheska.com/blog/2013/06/15/using-rbenv-to-manage-multiple-versions-of-ruby/
.. versionadded:: 0.16.0
'''
# Import python libs
from __future__ import absolute_import, unicode_literals, print_function
import os
import re
import logging
# Import Salt libs
import salt.utils.args
import salt.utils.data
import salt.utils.path
import salt.utils.platform
from salt.exceptions import SaltInvocationError
# Import 3rd-party libs
from salt.ext import six
# Set up logger
log = logging.getLogger(__name__)
__func_alias__ = {
'list_': 'list'
}
__opts__ = {
'rbenv.root': None,
'rbenv.build_env': None,
}
def __virtual__():
'''
Only work on POSIX-like systems
'''
if salt.utils.platform.is_windows():
return (False, 'The rbenv execution module failed to load: only available on non-Windows systems.')
return True
def _shlex_split(s):
# from python:salt.utils.args.shlex_split: passing None for s will read
# the string to split from standard input.
if s is None:
ret = salt.utils.args.shlex_split('')
else:
ret = salt.utils.args.shlex_split(s)
return ret
def _parse_env(env):
if not env:
env = {}
if isinstance(env, list):
env = salt.utils.data.repack_dictlist(env)
if not isinstance(env, dict):
env = {}
for bad_env_key in (x for x, y in six.iteritems(env) if y is None):
log.error('Environment variable \'%s\' passed without a value. '
'Setting value to an empty string', bad_env_key)
env[bad_env_key] = ''
return env
def _rbenv_bin(runas=None):
path = _rbenv_path(runas)
return '{0}/bin/rbenv'.format(path)
def _rbenv_path(runas=None):
path = None
if runas in (None, 'root'):
path = __salt__['config.option']('rbenv.root') or '/usr/local/rbenv'
else:
path = __salt__['config.option']('rbenv.root') \
or '~{0}/.rbenv'.format(runas)
return os.path.expanduser(path)
def _rbenv_exec(command, env=None, runas=None, ret=None):
if not is_installed(runas):
return False
binary = _rbenv_bin(runas)
path = _rbenv_path(runas)
environ = _parse_env(env)
environ['RBENV_ROOT'] = path
result = __salt__['cmd.run_all'](
[binary] + command,
runas=runas,
env=environ
)
if isinstance(ret, dict):
ret.update(result)
return ret
if result['retcode'] == 0:
return result['stdout']
else:
return False
def _install_rbenv(path, runas=None):
if os.path.isdir(path):
return True
cmd = ['git', 'clone', 'https://github.com/rbenv/rbenv.git', path]
return __salt__['cmd.retcode'](cmd, runas=runas, python_shell=False) == 0
def _install_ruby_build(path, runas=None):
path = '{0}/plugins/ruby-build'.format(path)
if os.path.isdir(path):
return True
cmd = ['git', 'clone',
'https://github.com/rbenv/ruby-build.git', path]
return __salt__['cmd.retcode'](cmd, runas=runas, python_shell=False) == 0
def _update_rbenv(path, runas=None):
if not os.path.isdir(path):
return False
return __salt__['cmd.retcode'](['git', 'pull'],
runas=runas,
cwd=path,
python_shell=False) == 0
def _update_ruby_build(path, runas=None):
path = '{0}/plugins/ruby-build'.format(path)
if not os.path.isdir(path):
return False
return __salt__['cmd.retcode'](['git', 'pull'],
runas=runas,
cwd=path,
python_shell=False) == 0
def install(runas=None, path=None):
'''
Install rbenv systemwide
CLI Example:
.. code-block:: bash
salt '*' rbenv.install
'''
path = path or _rbenv_path(runas)
path = os.path.expanduser(path)
return _install_rbenv(path, runas) and _install_ruby_build(path, runas)
def update(runas=None, path=None):
'''
Updates the current versions of rbenv and ruby-build
runas
The user under which to run rbenv. If not specified, then rbenv will be
run as the user under which Salt is running.
CLI Example:
.. code-block:: bash
salt '*' rbenv.update
'''
path = path or _rbenv_path(runas)
path = os.path.expanduser(path)
return _update_rbenv(path, runas) and _update_ruby_build(path, runas)
def is_installed(runas=None):
'''
Check if rbenv is installed
CLI Example:
.. code-block:: bash
salt '*' rbenv.is_installed
'''
return __salt__['cmd.has_exec'](_rbenv_bin(runas))
def install_ruby(ruby, runas=None):
'''
Install a ruby implementation.
ruby
The version of Ruby to install, should match one of the
versions listed by :py:func:`rbenv.list <salt.modules.rbenv.list>`
runas
The user under which to run rbenv. If not specified, then rbenv will be
run as the user under which Salt is running.
Additional environment variables can be configured in pillar /
grains / master:
.. code-block:: yaml
rbenv:
build_env: 'CONFIGURE_OPTS="--no-tcmalloc" CFLAGS="-fno-tree-dce"'
CLI Example:
.. code-block:: bash
salt '*' rbenv.install_ruby 2.0.0-p0
'''
ruby = re.sub(r'^ruby-', '', ruby)
env = None
env_list = []
if __grains__['os'] in ('FreeBSD', 'NetBSD', 'OpenBSD'):
env_list.append('MAKE=gmake')
if __salt__['config.get']('rbenv:build_env'):
env_list.append(__salt__['config.get']('rbenv:build_env'))
elif __salt__['config.option']('rbenv.build_env'):
env_list.append(__salt__['config.option']('rbenv.build_env'))
if env_list:
env = ' '.join(env_list)
ret = {}
ret = _rbenv_exec(['install', ruby], env=env, runas=runas, ret=ret)
if ret is not False and ret['retcode'] == 0:
rehash(runas=runas)
return ret['stderr']
else:
# Cleanup the failed installation so it doesn't list as installed
uninstall_ruby(ruby, runas=runas)
return False
def uninstall_ruby(ruby, runas=None):
'''
Uninstall a ruby implementation.
ruby
The version of ruby to uninstall. Should match one of the versions
listed by :py:func:`rbenv.versions <salt.modules.rbenv.versions>`.
runas
The user under which to run rbenv. If not specified, then rbenv will be
run as the user under which Salt is running.
CLI Example:
.. code-block:: bash
salt '*' rbenv.uninstall_ruby 2.0.0-p0
'''
ruby = re.sub(r'^ruby-', '', ruby)
_rbenv_exec(['uninstall', '--force', ruby], runas=runas)
return True
def versions(runas=None):
'''
List the installed versions of ruby
CLI Example:
.. code-block:: bash
salt '*' rbenv.versions
'''
ret = _rbenv_exec(['versions', '--bare'], runas=runas)
return [] if ret is False else ret.splitlines()
def default(ruby=None, runas=None):
'''
Returns or sets the currently defined default ruby
ruby
The version to set as the default. Should match one of the versions
listed by :py:func:`rbenv.versions <salt.modules.rbenv.versions>`.
Leave blank to return the current default.
CLI Example:
.. code-block:: bash
salt '*' rbenv.default
salt '*' rbenv.default 2.0.0-p0
'''
if ruby:
_rbenv_exec(['global', ruby], runas=runas)
return True
else:
ret = _rbenv_exec(['global'], runas=runas)
return '' if ret is False else ret.strip()
def list_(runas=None):
'''
List the installable versions of ruby
runas
The user under which to run rbenv. If not specified, then rbenv will be
run as the user under which Salt is running.
CLI Example:
.. code-block:: bash
salt '*' rbenv.list
'''
ret = []
output = _rbenv_exec(['install', '--list'], runas=runas)
if output:
for line in output.splitlines():
if line == 'Available versions:':
continue
ret.append(line.strip())
return ret
def rehash(runas=None):
'''
Run ``rbenv rehash`` to update the installed shims
runas
The user under which to run rbenv. If not specified, then rbenv will be
run as the user under which Salt is running.
CLI Example:
.. code-block:: bash
salt '*' rbenv.rehash
'''
_rbenv_exec(['rehash'], runas=runas)
return True
def do(cmdline, runas=None, env=None):
'''
Execute a ruby command with rbenv's shims from the user or the system
CLI Example:
.. code-block:: bash
salt '*' rbenv.do 'gem list bundler'
salt '*' rbenv.do 'gem list bundler' deploy
'''
if not cmdline:
# This is a positional argument so this should never happen, but this
# will handle cases where someone explicitly passes a false value for
# cmdline.
raise SaltInvocationError('Command must be specified')
path = _rbenv_path(runas)
if not env:
env = {}
# NOTE: Env vars (and their values) need to be str type on both Python 2
# and 3. The code below first normalizes all path components to unicode to
# stitch them together, and then converts the result back to a str type.
env[str('PATH')] = salt.utils.stringutils.to_str( # future lint: disable=blacklisted-function
os.pathsep.join((
salt.utils.path.join(path, 'shims'),
salt.utils.stringutils.to_unicode(os.environ['PATH'])
))
)
try:
cmdline = salt.utils.args.shlex_split(cmdline)
except AttributeError:
cmdauth = salt.utils.args.shlex_split(six.text_type(cmdline))
result = __salt__['cmd.run_all'](
cmdline,
runas=runas,
env=env,
python_shell=False
)
if result['retcode'] == 0:
rehash(runas=runas)
return result['stdout']
else:
return False
def do_with_ruby(ruby, cmdline, runas=None):
'''
Execute a ruby command with rbenv's shims using a specific ruby version
CLI Example:
.. code-block:: bash
salt '*' rbenv.do_with_ruby 2.0.0-p0 'gem list bundler'
salt '*' rbenv.do_with_ruby 2.0.0-p0 'gem list bundler' runas=deploy
'''
if not cmdline:
# This is a positional argument so this should never happen, but this
# will handle cases where someone explicitly passes a false value for
# cmdline.
raise SaltInvocationError('Command must be specified')
try:
cmdline = salt.utils.args.shlex_split(cmdline)
except AttributeError:
cmdline = salt.utils.args.shlex_split(six.text_type(cmdline))
env = {}
if ruby:
env['RBENV_VERSION'] = ruby
cmd = cmdline
else:
cmd = cmdline
return do(cmd, runas=runas, env=env)
| []
| []
| [
"PATH"
]
| [] | ["PATH"] | python | 1 | 0 | |
jax/numpy/lax_numpy.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Implements the NumPy API, using the primitives in :mod:`jax.lax`.
NumPy operations are implemented in Python in terms of the primitive operations
in :mod:`jax.lax`. Since NumPy operations are not primitive and instead are
implemented in terms of :mod:`jax.lax` operations, we do not need to define
transformation rules such as gradient or batching rules. Instead,
transformations for NumPy primitives can be derived from the transformation
rules for the underlying :code:`lax` primitives.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from distutils.util import strtobool
import collections
try:
from collections.abc import Sequence
except ImportError: # python 2
from collections import Sequence
import itertools
import os
import re
import string
import types
import warnings
import numpy as onp
import opt_einsum
import six
from six.moves import builtins, xrange
from jax import jit, device_put, custom_transforms, defjvp
from .. import core
from .. import dtypes
from ..abstract_arrays import UnshapedArray, ShapedArray, ConcreteArray
from ..config import flags
from ..interpreters.xla import DeviceArray
from .. import lax
from ..util import partial, get_module_functions, unzip2, prod as _prod, subvals
from ..lib import pytree
from ..lib import xla_client
FLAGS = flags.FLAGS
flags.DEFINE_enum(
'jax_numpy_rank_promotion', os.getenv('JAX_NUMPY_RANK_PROMOTION', 'allow'),
enum_values=['allow', 'warn', 'raise'],
help=
'Control NumPy-style automatic rank promotion broadcasting '
'("allow", "warn", or "raise").')
if six.PY3:
def removechars(s, chars):
return s.translate(str.maketrans(dict.fromkeys(chars)))
else:
def removechars(s, chars):
return s.translate(None, ''.join(chars))
newaxis = None
# We replace some builtin names to follow Numpy's API, so we capture here.
_abs = builtins.abs
_all = builtins.all
_any = builtins.any
_max = builtins.max
_min = builtins.min
_sum = builtins.sum
_divmod = builtins.divmod
# NumPy constants
pi = onp.pi
e = onp.e
euler_gamma = onp.euler_gamma
inf = onp.inf
NINF = onp.NINF
PZERO = onp.PZERO
NZERO = onp.NZERO
nan = onp.nan
# And some numpy utility functions
set_printoptions = onp.set_printoptions
# We want isinstance(x, np.ndarray) checks in user code to work with the our
# array-like types, including DeviceArray and UnshapedArray (i.e. the abstract
# array base class). We can override the isinstance behavior directly, without
# having the complexity of multiple inheritance on those classes, by defining
# the ndarray class to have a metaclass with special __instancecheck__ behavior.
_arraylike_types = (onp.ndarray, UnshapedArray, DeviceArray)
class _ArrayMeta(type(onp.ndarray)):
"""Metaclass for overriding ndarray isinstance checks."""
def __instancecheck__(self, instance):
try:
return isinstance(instance.aval, _arraylike_types)
except AttributeError:
return isinstance(instance, _arraylike_types)
class ndarray(six.with_metaclass(_ArrayMeta, onp.ndarray)):
def __init__(shape, dtype=None, buffer=None, offset=0, strides=None,
order=None):
raise TypeError("jax.numpy.ndarray() should not be instantiated explicitly."
" Use jax.numpy.array, or jax.numpy.zeros instead.")
iscomplexobj = onp.iscomplexobj
shape = _shape = onp.shape
ndim = _ndim = onp.ndim
size = onp.size
_dtype = dtypes.result_type
# At present JAX doesn't have a reason to distinguish between scalars and arrays
# in its object system. Further, we want JAX scalars to have the same type
# promotion behaviors as JAX arrays. Rather than introducing a new type of JAX
# scalar object with JAX promotion behaviors, instead we make the JAX scalar
# types return JAX arrays when instantiated.
class _ScalarMeta(type):
def __hash__(self):
return hash(self.dtype.type)
def __eq__(self, other):
return id(self) == id(other) or self.dtype == other
def __ne__(self, other):
return not (self == other)
def __call__(self, x):
return array(self.dtype.type(x), dtype=self.dtype)
def _make_scalar_type(onp_scalar_type):
return type(onp_scalar_type.__name__,
(six.with_metaclass(_ScalarMeta, object),),
{"dtype": onp.dtype(onp_scalar_type)})
bool_ = _make_scalar_type(onp.bool_)
uint8 = _make_scalar_type(onp.uint8)
uint16 = _make_scalar_type(onp.uint16)
uint32 = _make_scalar_type(onp.uint32)
uint64 = _make_scalar_type(onp.uint64)
int8 = _make_scalar_type(onp.int8)
int16 = _make_scalar_type(onp.int16)
int32 = _make_scalar_type(onp.int32)
int64 = _make_scalar_type(onp.int64)
bfloat16 = _make_scalar_type(dtypes.bfloat16)
float16 = _make_scalar_type(onp.float16)
float32 = single = _make_scalar_type(onp.float32)
float64 = double = _make_scalar_type(onp.float64)
complex64 = csingle = _make_scalar_type(onp.complex64)
complex128 = cdouble = _make_scalar_type(onp.complex128)
int_ = int32 if dtypes.int_ == onp.int32 else int64
float_ = float32 if dtypes.float_ == onp.float32 else float64
complex_ = complex64 if dtypes.complex_ == onp.complex64 else complex128
number = onp.number
inexact = onp.inexact
complexfloating = onp.complexfloating
floating = onp.floating
integer = onp.integer
signedinteger = onp.signedinteger
unsignedinteger = onp.unsignedinteger
flexible = onp.flexible
character = onp.character
object_ = onp.object_
iinfo = dtypes.iinfo
dtype = onp.dtype
can_cast = dtypes.can_cast
issubsctype = dtypes.issubsctype
result_type = dtypes.result_type
promote_types = dtypes.promote_types
ComplexWarning = onp.ComplexWarning
array_str = onp.array_str
array_repr = onp.array_repr
save = onp.save
savez = onp.savez
load = onp.load
### utility functions
def _promote_shapes(fun_name, *args):
"""Prepend implicit leading singleton dimensions for Numpy broadcasting."""
if len(args) < 2:
return args
else:
shapes = [shape(arg) for arg in args]
nonscalar_ranks = [len(shp) for shp in shapes if shp]
if not nonscalar_ranks or len(set(nonscalar_ranks)) == 1:
return args
else:
if FLAGS.jax_numpy_rank_promotion != "allow":
_rank_promotion_warning_or_error(fun_name, shapes)
result_rank = len(lax.broadcast_shapes(*shapes))
return [lax.reshape(arg, (1,) * (result_rank - len(shp)) + shp)
if shp and len(shp) != result_rank else arg
for arg, shp in zip(args, shapes)]
def _rank_promotion_warning_or_error(fun_name, shapes):
if FLAGS.jax_numpy_rank_promotion == "warn":
msg = ("Following NumPy automatic rank promotion for {} on shapes {}. "
"Set the jax_numpy_rank_promotion config option to 'allow' to "
"disable this warning; for more information, see "
"https://jax.readthedocs.io/en/latest/rank_promotion_warning.html.")
warnings.warn(msg.format(fun_name, ' '.join(map(str, shapes))))
elif FLAGS.jax_numpy_rank_promotion == "raise":
msg = ("Operands could not be broadcast together for {} on shapes {} "
"and with the config option jax_numpy_rank_promotion='raise'. "
"For more information, see "
"https://jax.readthedocs.io/en/latest/rank_promotion_warning.html.")
raise ValueError(msg.format(fun_name, ' '.join(map(str, shapes))))
def _promote_dtypes(*args):
"""Convenience function to apply Numpy argument dtype promotion."""
# TODO(dougalm,mattjj): This is a performance bottleneck. Consider memoizing.
if len(args) < 2:
return args
else:
to_dtype = result_type(*args)
return [lax.convert_element_type(x, to_dtype) for x in args]
def _promote_dtypes_inexact(*args):
"""Convenience function to apply Numpy argument dtype promotion.
Promotes arguments to an inexact type."""
to_dtype = _to_inexact_dtype(result_type(*args))
return [lax.convert_element_type(x, to_dtype) for x in args]
def _to_inexact_dtype(dtype):
"""Promotes a dtype into an inexact dtype, if it is not already one."""
return dtype if issubdtype(dtype, inexact) else promote_types(dtype, float_)
def _complex_elem_type(dtype):
"""Returns the float type of the real/imaginary parts of a complex dtype."""
return onp.abs(onp.zeros((), dtype)).dtype
def _result_dtype(op, *args):
"""Compute result dtype of applying op to arguments with given dtypes."""
args = [onp.ones((0,) * ndim(arg), _dtype(arg)) for arg in args]
return _dtype(op(*args))
def _arraylike(x): return isinstance(x, ndarray) or isscalar(x)
def _check_arraylike(fun_name, *args):
"""Check if all args fit JAX's definition of arraylike (ndarray or scalar)."""
if _any(not _arraylike(arg) for arg in args):
pos, arg = next((i, arg) for i, arg in enumerate(args)
if not _arraylike(arg))
msg = "{} requires ndarray or scalar arguments, got {} at position {}."
raise TypeError(msg.format(fun_name, type(arg), pos))
def _promote_args(fun_name, *args):
"""Convenience function to apply Numpy argument shape and dtype promotion."""
_check_arraylike(fun_name, *args)
return _promote_shapes(fun_name, *_promote_dtypes(*args))
def _promote_args_inexact(fun_name, *args):
"""Convenience function to apply Numpy argument shape and dtype promotion.
Promotes non-inexact types to an inexact type."""
_check_arraylike(fun_name, *args)
return _promote_shapes(fun_name, *_promote_dtypes_inexact(*args))
def _constant_like(x, const):
return onp.array(const, dtype=_dtype(x))
def update_numpydoc(docstr, fun, op):
'''Transforms the numpy docstring to remove references of
parameters that are supported by the numpy version but not the JAX version'''
#Some numpy functions have an extra tab at the beginning of each line,
#If this function is one of those we remove this extra tab from all the lines
if not hasattr(op, '__code__'):
return docstr
if docstr[:4] == ' ':
lines = docstr.split('\n')
for idx, line in enumerate(lines):
lines[idx] = line.replace(' ', '', 1)
docstr = '\n'.join(lines)
begin_idx = docstr.find("Parameters")
begin_idx = docstr.find("--\n", begin_idx) + 2
end_idx = docstr.find("Returns", begin_idx)
parameters = docstr[begin_idx:end_idx]
param_list = parameters.replace('\n ', '@@').split('\n')
for idx, p in enumerate(param_list):
param = p[:p.find(' : ')].split(", ")[0]
if param not in op.__code__.co_varnames:
param_list[idx] = ''
param_list = [param for param in param_list if param != '']
parameters = '\n'.join(param_list).replace('@@', '\n ')
return docstr[:begin_idx + 1] + parameters + docstr[end_idx - 2:]
_numpy_signature_re = re.compile(r'^([\w., ]+=)?\s*[\w\.]+\(.*\)$')
def _wraps(fun, update_doc=True, lax_description=""):
"""Like functools.wraps but works with numpy.ufuncs.
It is important that when wrapping numpy functions the parameters names
in the original function and in the JAX version are the same
Parameters:
fun: The function being wrapped
update_doc: whether to transform the numpy docstring to remove references of
parameters that are supported by the numpy version but not the JAX version.
If False, include the numpy docstring verbatim.
"""
def wrap(op):
if not hasattr(fun, '__doc__') or fun.__doc__ is None:
return op
try:
# Numpy doc comments have the form:
# fn(x, y, z) (optional)
#
# A one-line summary
#
# ... everything else ...
# We (a) move the summary to the top, since it is what the Sphinx
# autosummary extension expects, and (b) add a comment below the summary
# to the effect that this is a LAX wrapper of a Numpy function.
sections = fun.__doc__.split("\n\n")
signatures = []
summary = None
for i in xrange(len(sections)):
if _numpy_signature_re.match(sections[i]):
signatures.append(sections[i])
else:
summary = sections[i].strip()
break
body = "\n\n".join(signatures + sections[i + 1:])
if update_doc:
body = update_numpydoc(body, fun, op)
desc = lax_description + "\n" if lax_description else ""
docstr = (
"{summary}\n\nLAX-backend implementation of :func:`{fun}`.\n"
"{lax_description}Original docstring below.\n\n{body}"
.format(summary=summary, lax_description=desc,
fun=fun.__name__, body=body))
op.__name__ = fun.__name__
op.__doc__ = docstr
finally:
return op
return wrap
def _canonicalize_axis(axis, num_dims):
"""Canonicalize an axis in (-num_dims, num_dims) to [0, num_dims)."""
axis = int(axis)
if axis < 0:
axis = axis + num_dims
if axis < 0 or axis >= num_dims:
raise ValueError(
"axis {} is out of bounds for array of dimension {}".format(
axis, num_dims))
return axis
### implementations of numpy functions in terms of lax
@_wraps(onp.finfo)
def finfo(dtype): return dtypes.finfo(dtype)
@_wraps(onp.issubdtype)
def issubdtype(arg1, arg2): return dtypes.issubdtype(arg1, arg2)
@_wraps(onp.isscalar)
def isscalar(num): return dtypes.is_python_scalar(num) or onp.isscalar(num)
iterable = onp.iterable
@_wraps(onp.result_type)
def result_type(*args):
return dtypes.result_type(*args)
def _one_to_one_unop(numpy_fn, lax_fn, promote_to_inexact=False):
if promote_to_inexact:
def fn(x):
x = lax.convert_element_type(x, _to_inexact_dtype(_dtype(x)))
return lax_fn(x)
else:
fn = lambda x: lax_fn(x)
return _wraps(numpy_fn)(fn)
def _one_to_one_binop(numpy_fn, lax_fn, promote_to_inexact=False):
if promote_to_inexact:
fn = lambda x1, x2: lax_fn(*_promote_args_inexact(numpy_fn, x1, x2))
else:
fn = lambda x1, x2: lax_fn(*_promote_args(numpy_fn.__name__, x1, x2))
return _wraps(numpy_fn)(fn)
def _maybe_bool_binop(numpy_fn, lax_fn, bool_lax_fn):
def fn(x1, x2):
x1, x2 = _promote_args(numpy_fn.__name__, x1, x2)
return lax_fn(x1, x2) if x1.dtype != bool_ else bool_lax_fn(x1, x2)
return _wraps(numpy_fn)(fn)
absolute = abs = _one_to_one_unop(onp.absolute, lax.abs)
fabs = _one_to_one_unop(onp.fabs, lax.abs, True)
bitwise_not = _one_to_one_unop(onp.bitwise_not, lax.bitwise_not)
negative = _one_to_one_unop(onp.negative, lax.neg)
positive = _one_to_one_unop(onp.positive, lambda x: x)
sign = _one_to_one_unop(onp.sign, lax.sign)
floor = _one_to_one_unop(onp.floor, lax.floor, True)
ceil = _one_to_one_unop(onp.ceil, lax.ceil, True)
exp = _one_to_one_unop(onp.exp, lax.exp, True)
log = _one_to_one_unop(onp.log, lax.log, True)
expm1 = _one_to_one_unop(onp.expm1, lax.expm1, True)
log1p = _one_to_one_unop(onp.log1p, lax.log1p, True)
sin = _one_to_one_unop(onp.sin, lax.sin, True)
cos = _one_to_one_unop(onp.cos, lax.cos, True)
tan = _one_to_one_unop(onp.tan, lax.tan, True)
arcsin = _one_to_one_unop(onp.arcsin, lax.asin, True)
arccos = _one_to_one_unop(onp.arccos, lax.acos, True)
arctan = _one_to_one_unop(onp.arctan, lax.atan, True)
sinh = _one_to_one_unop(onp.sinh, lax.sinh, True)
cosh = _one_to_one_unop(onp.cosh, lax.cosh, True)
tanh = _one_to_one_unop(onp.tanh, lax.tanh, True)
sqrt = _one_to_one_unop(onp.sqrt, lax.sqrt, True)
add = _maybe_bool_binop(onp.add, lax.add, lax.bitwise_or)
bitwise_and = _one_to_one_binop(onp.bitwise_and, lax.bitwise_and)
bitwise_or = _one_to_one_binop(onp.bitwise_or, lax.bitwise_or)
bitwise_xor = _one_to_one_binop(onp.bitwise_xor, lax.bitwise_xor)
right_shift = _one_to_one_binop(onp.right_shift, lax.shift_right_arithmetic)
left_shift = _one_to_one_binop(onp.left_shift, lax.shift_left)
equal = _one_to_one_binop(onp.equal, lax.eq)
multiply = _maybe_bool_binop(onp.multiply, lax.mul, lax.bitwise_and)
not_equal = _one_to_one_binop(onp.not_equal, lax.ne)
subtract = _one_to_one_binop(onp.subtract, lax.sub)
arctan2 = _one_to_one_binop(onp.arctan2, lax.atan2, True)
minimum = _one_to_one_binop(onp.minimum, lax.min)
maximum = _one_to_one_binop(onp.maximum, lax.max)
float_power = _one_to_one_binop(onp.float_power, lax.pow, True)
nextafter = _one_to_one_binop(onp.nextafter, lax.nextafter, True)
def _comparison_op(numpy_fn, lax_fn):
def fn(x1, x2):
x1, x2 = _promote_args(numpy_fn.__name__, x1, x2)
# Comparison on complex types are defined as a lexicographic ordering on
# the (real, imag) pair.
if issubdtype(_dtype(x1), complexfloating):
rx = lax.real(x1)
ry = lax.real(x2)
return lax.select(lax.eq(rx, ry), lax_fn(lax.imag(x1), lax.imag(x2)),
lax_fn(rx, ry))
return lax_fn(x1, x2)
return _wraps(numpy_fn)(fn)
greater_equal = _comparison_op(onp.greater_equal, lax.ge)
greater = _comparison_op(onp.greater, lax.gt)
less_equal = _comparison_op(onp.less_equal, lax.le)
less = _comparison_op(onp.less, lax.lt)
def _logical_op(np_op, bitwise_op):
@_wraps(np_op, update_doc=False)
def op(*args):
zero = lambda x: lax.full_like(x, shape=(), fill_value=0)
args = (x if issubdtype(_dtype(x), bool_) else lax.ne(x, zero(x))
for x in args)
return bitwise_op(*_promote_args(np_op.__name__, *args))
return op
logical_and = _logical_op(onp.logical_and, lax.bitwise_and)
logical_not = _logical_op(onp.logical_not, lax.bitwise_not)
logical_or = _logical_op(onp.logical_or, lax.bitwise_or)
logical_xor = _logical_op(onp.logical_xor, lax.bitwise_xor)
@_wraps(onp.true_divide)
def true_divide(x1, x2):
x1, x2 = _promote_args_inexact("true_divide", x1, x2)
return lax.div(x1, x2)
@_wraps(onp.divide)
def divide(x1, x2):
# decide whether to perform integer division based on Numpy result dtype, as a
# way to check whether Python 3 style division is active in Numpy
result_dtype = _result_dtype(onp.divide, x1, x2)
if issubdtype(result_dtype, integer):
return floor_divide(x1, x2)
else:
return true_divide(x1, x2)
@_wraps(onp.floor_divide)
def floor_divide(x1, x2):
x1, x2 = _promote_args("floor_divide", x1, x2)
dtype = _dtype(x1)
if issubdtype(dtype, integer):
quotient = lax.div(x1, x2)
select = logical_and(lax.sign(x1) != lax.sign(x2), lax.rem(x1, x2) != 0)
# TODO(mattjj): investigate why subtracting a scalar was causing promotion
return where(select, quotient - onp.array(1, _dtype(quotient)), quotient)
elif issubdtype(dtype, complexfloating):
x1r = lax.real(x1)
x1i = lax.imag(x1)
x2r = lax.real(x2)
x2i = lax.imag(x2)
which = lax.ge(lax.abs(x2r), lax.abs(x2i))
rat1 = where(which, lax._const(x2i, 1), lax.div(x2r, x2i))
rat2 = where(which, lax.div(x2i, x2r), lax._const(x2i, 1))
out = lax.floor(lax.div(lax.add(lax.mul(x1r, rat1), lax.mul(x1i, rat2)),
lax.add(lax.mul(x2r, rat1), lax.mul(x2i, rat2))))
return lax.convert_element_type(out, dtype)
else:
return _float_divmod(x1, x2)[0]
@_wraps(onp.divmod)
def divmod(x1, x2):
x1, x2 = _promote_args("divmod", x1, x2)
if issubdtype(_dtype(x1), integer):
return floor_divide(x1, x2), remainder(x1, x2)
else:
return _float_divmod(x1, x2)
def _float_divmod(x1, x2):
# see float_divmod in floatobject.c of CPython
mod = lax.rem(x1, x2)
div = lax.div(lax.sub(x1, mod), x2)
ind = lax.bitwise_and(mod != 0, lax.sign(x2) != lax.sign(mod))
mod = lax.select(ind, mod + x2, mod)
div = lax.select(ind, div - _constant_like(div, 1), div)
return lax.round(div), mod
@_wraps(onp.power)
def power(x1, x2):
x1 = asarray(x1)
x2 = asarray(x2)
x1, x2 = _promote_args(onp.power, x1, x2)
dtype = _dtype(x1)
if not issubdtype(dtype, integer):
return lax.pow(x1, x2)
# Integer power => use binary exponentiation.
# TODO(phawkins): add integer pow support to XLA.
bits = 6 # Anything more would overflow for any x1 > 1
acc = ones(shape(x1), dtype=dtype)
for _ in xrange(bits):
acc = where(lax.bitwise_and(x2, _constant_like(x2, 1)),
lax.mul(acc, x1), acc)
x1 = lax.mul(x1, x1)
x2 = lax.shift_right_logical(x2, _constant_like(x2, 1))
return acc
@_wraps(onp.logaddexp)
def logaddexp(x1, x2):
x1, x2 = _promote_shapes("logaddexp", *_promote_dtypes_inexact(x1, x2))
amax = lax.max(x1, x2)
delta = lax.sub(x1, x2)
return lax.select(isnan(delta),
lax.add(x1, x2), # NaNs or infinities of the same sign.
lax.add(amax, lax.log1p(lax.exp(-lax.abs(delta)))))
@_wraps(onp.logaddexp2)
def logaddexp2(x1, x2):
x1, x2 = _promote_shapes("logaddexp2", *_promote_dtypes_inexact(x1, x2))
amax = lax.max(x1, x2)
delta = lax.sub(x1, x2)
return lax.select(isnan(delta),
lax.add(x1, x2), # NaNs or infinities of the same sign.
lax.add(amax, lax.div(lax.log1p(exp2(-lax.abs(delta))),
_constant_like(x1, onp.log(2)))))
@_wraps(onp.log2)
def log2(x):
x, = _promote_dtypes_inexact(x)
return lax.div(lax.log(x), lax.log(_constant_like(x, 2)))
@_wraps(onp.log10)
def log10(x):
x, = _promote_dtypes_inexact(x)
return lax.div(lax.log(x), lax.log(_constant_like(x, 10)))
@_wraps(onp.exp2)
def exp2(x):
x, = _promote_dtypes_inexact(x)
return lax.exp(lax.mul(lax.log(_constant_like(x, 2)), x))
@_wraps(onp.signbit)
def signbit(x):
x, = _promote_shapes("signbit", x)
dtype = _dtype(x)
if issubdtype(dtype, integer):
return lax.lt(x, _constant_like(x, 0))
elif issubdtype(dtype, bool_):
return full_like(x, False, dtype=bool_)
elif not issubdtype(dtype, floating):
raise ValueError(
"jax.numpy.signbit is not well defined for %s" % dtype)
# TPU supports BF16 but not S16 types, so as a workaround, convert BF16 to
# F32.
if dtype == bfloat16:
dtype = float32
x = lax.convert_element_type(x, float32)
info = finfo(dtype)
if info.bits == 16:
int_type = onp.int16
elif info.bits == 32:
int_type = onp.int32
elif info.bits == 64:
int_type = onp.int64
else:
raise NotImplementedError(
"jax.numpy.signbit only supports 16, 32, and 64-bit types.")
x = lax.bitcast_convert_type(x, int_type)
return lax.convert_element_type(x >> (info.nexp + info.nmant), onp.bool)
@_wraps(onp.remainder)
def remainder(x1, x2):
x1, x2 = _promote_args("remainder", x1, x2)
zero = _constant_like(x1, 0)
trunc_mod = lax.rem(x1, x2)
trunc_mod_not_zero = lax.ne(trunc_mod, zero)
do_plus = lax.bitwise_and(
lax.ne(lax.lt(trunc_mod, zero), lax.lt(x2, zero)), trunc_mod_not_zero)
return lax.select(do_plus, lax.add(trunc_mod, x2), trunc_mod)
mod = remainder
fmod = _wraps(onp.fmod)(lambda x1, x2: lax.rem(x1, x2))
@_wraps(onp.cbrt)
def cbrt(x):
x, = _promote_dtypes_inexact(x)
return lax.sign(x) * power(lax.abs(x), _constant_like(x, 1. / 3.))
@_wraps(onp.square)
def square(x): return lax.mul(x, x)
@_wraps(onp.deg2rad)
def deg2rad(x):
x, = _promote_dtypes_inexact(x)
return lax.mul(x, lax._const(x, pi / 180))
@_wraps(onp.rad2deg)
def rad2deg(x):
x, = _promote_dtypes_inexact(x)
return lax.mul(x, lax._const(x, 180 / pi))
degrees = rad2deg
radians = deg2rad
@_wraps(onp.heaviside)
def heaviside(x1, x2):
x1, x2 = _promote_dtypes_inexact(x1, x2)
zero = lax._const(x1, 0)
return where(lax.lt(x1, zero), zero,
where(lax.gt(x1, zero), lax._const(x1, 1), x2))
@_wraps(onp.hypot)
def hypot(x1, x2):
x1, x2 = _promote_dtypes_inexact(x1, x2)
return lax.sqrt(x1*x1 + x2*x2)
@_wraps(onp.reciprocal)
def reciprocal(x):
x, = _promote_dtypes_inexact(x)
return lax.div(lax._const(x, 1), x)
@_wraps(onp.sinc, update_doc=False)
def sinc(x):
x, = _promote_dtypes_inexact(x)
eq_zero = lax.eq(x, lax._const(x, 0))
safe_x = where(eq_zero, lax._const(x, 0), x)
pi_x = lax.mul(lax._const(x, pi), safe_x)
return where(eq_zero,
lax._const(x, 1), lax.div(lax.sin(pi_x), pi_x))
@_wraps(onp.arcsinh)
@custom_transforms
@jit
@lax._upcast_fp16_for_computation
def arcsinh(x):
# asinh(x) = log(x + sqrt(x**2 + 1))
x, = _promote_dtypes_inexact(x)
one = lax._const(x, 1)
result = lax.log(x + lax.sqrt(x * x + one))
if issubdtype(_dtype(result), complexfloating):
return result
a = abs(x)
sqrt_max_value = onp.sqrt(finfo(_dtype(x)).max)
log2 = lax._const(a, onp.log(2))
return lax.select(a < sqrt_max_value, result, lax.sign(x) * (lax.log(a) + log2))
defjvp(arcsinh, lambda g, ans, x: g / lax.sqrt(lax._const(x, 1) + square(x)))
@_wraps(onp.arccosh)
@jit
@lax._upcast_fp16_for_computation
def arccosh(x):
# acosh(x) = log(x + sqrt((x + 1) * (x - 1))) if x < sqrt_max_value
# log(x) + log(2) otherwise
x, = _promote_dtypes_inexact(x)
one = lax._const(x, 1)
result = lax.log(x + lax.sqrt((x + one) * (x - one)))
if issubdtype(_dtype(result), complexfloating):
return result
sqrt_max_value = onp.sqrt(finfo(_dtype(x)).max)
log2 = lax._const(x, onp.log(2))
return lax.select(x < sqrt_max_value, result, lax.log(x) + log2)
@_wraps(onp.arctanh)
def arctanh(x):
# atanh(x) = 0.5 * log((1 + x) / (1 - x))
x, = _promote_dtypes_inexact(x)
one = lax._const(x, 1)
result = lax._const(x, 0.5) * lax.log((one + x) / (one - x))
if issubdtype(_dtype(result), complexfloating):
return result
return lax.select(abs(x) <= 1, result, lax.full_like(x, onp.nan))
@_wraps(onp.transpose)
def transpose(a, axes=None):
axes = onp.arange(ndim(a))[::-1] if axes is None else axes
return lax.transpose(a, axes)
@_wraps(onp.rot90)
def rot90(m, k=1, axes=(0, 1)):
ax1, ax2 = axes
ax1 = _canonicalize_axis(ax1, m.ndim)
ax2 = _canonicalize_axis(ax2, m.ndim)
if ax1 == ax2:
raise ValueError("Axes must be different") # same as numpy error
k = k % 4
if k == 0:
return m
elif k == 2:
return flip(flip(m, ax1), ax2)
else:
perm = list(range(m.ndim))
perm[ax1], perm[ax2] = perm[ax2], perm[ax1]
if k == 1:
return transpose(flip(m, ax2), perm)
else:
return flip(transpose(m, perm), ax2)
@_wraps(onp.flip)
def flip(m, axis=None):
if axis is None:
return lax.rev(m, list(range(len(m.shape))))
return lax.rev(m, [_canonicalize_axis(axis, len(m.shape))])
@_wraps(onp.fliplr)
def fliplr(m):
return flip(m, 1)
@_wraps(onp.flipud)
def flipud(m):
return flip(m, 0)
@_wraps(onp.conjugate)
def conjugate(x):
return lax.conj(x) if iscomplexobj(x) else x
conj = conjugate
@_wraps(onp.imag)
def imag(val):
return lax.imag(val) if iscomplexobj(val) else zeros_like(val)
@_wraps(onp.real)
def real(val):
return lax.real(val) if iscomplexobj(val) else val
@_wraps(onp.iscomplex)
def iscomplex(x):
i = imag(x)
return lax.ne(i, lax._const(i, 0))
@_wraps(onp.isreal)
def isreal(x):
i = imag(x)
return lax.eq(i, lax._const(i, 0))
@_wraps(onp.angle)
def angle(z):
re = real(z)
im = imag(z)
dtype = _dtype(re)
if not issubdtype(dtype, inexact) or (
issubdtype(_dtype(z), floating) and ndim(z) == 0):
dtype = dtypes.canonicalize_dtype(float_)
re = lax.convert_element_type(re, dtype)
im = lax.convert_element_type(im, dtype)
return lax.atan2(im, re)
@_wraps(onp.diff)
def diff(a, n=1, axis=-1,):
if not isinstance(a, ndarray) or a.ndim == 0:
return a
if n == 0:
return a
if n < 0:
raise ValueError(
"order must be non-negative but got " + repr(n))
nd = a.ndim
slice1 = [slice(None)] * nd
slice2 = [slice(None)] * nd
slice1[axis] = slice(1, None)
slice2[axis] = slice(None, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
op = not_equal if a.dtype == onp.bool_ else subtract
for _ in range(n):
a = op(a[slice1], a[slice2])
return a
@_wraps(onp.isrealobj)
def isrealobj(x):
return not iscomplexobj(x)
@_wraps(onp.reshape)
def reshape(a, newshape, order="C"):
try:
return a.reshape(newshape, order=order) # forward to method for ndarrays
except AttributeError:
return _reshape(a, newshape, order=order)
def _compute_newshape(a, newshape):
"""Fixes a -1 value in newshape, if present."""
# other errors, like having more than one -1, are caught downstream
newsize = _prod(newshape)
if newsize < 0:
fix = a.size // -newsize
return [d if d != -1 else fix for d in newshape]
else:
return newshape
def _reshape(a, newshape, order="C"):
computed_newshape = _compute_newshape(a, newshape)
if order == "C":
return lax.reshape(a, computed_newshape, None)
elif order == "F":
dims = onp.arange(ndim(a))[::-1]
return lax.reshape(a, computed_newshape[::-1], dims).T
elif order == "A":
raise NotImplementedError("np.reshape order=A is not implemented.")
else:
raise ValueError("Unexpected value for 'order' argument: {}.".format(order))
def _reshape_method(a, *newshape, **kwargs):
order = kwargs.pop("order", "C")
if len(kwargs) == 1:
invalid_kwarg, = kwargs
msg = "'{}' is an invalid keyword argument for this function"
raise TypeError(msg.format(invalid_kwarg)) # same as NumPy error
elif kwargs:
invalid_kwargs = "'{}'".format("'".join(kwargs))
msg = "{} are invalid keyword arguments for this function"
raise TypeError(msg.format(invalid_kwargs)) # different from NumPy error
if len(newshape) == 1 and not isinstance(newshape[0], int):
newshape = newshape[0]
return _reshape(a, newshape, order=order)
@_wraps(onp.ravel)
def ravel(a, order="C"):
if order == "K":
raise NotImplementedError("Ravel not implemented for order='K'.")
return reshape(a, (size(a),), order)
@_wraps(onp.squeeze)
def squeeze(a, axis=None):
if 1 not in shape(a):
return a
if axis is None:
newshape = [d for d in shape(a) if d != 1]
else:
if isinstance(axis, int):
axis = (axis,)
axis = frozenset(_canonicalize_axis(i, ndim(a)) for i in axis)
newshape = [d for i, d in enumerate(shape(a))
if d != 1 or i not in axis]
return lax.reshape(a, newshape)
@_wraps(onp.expand_dims)
def expand_dims(a, axis):
shape = _shape(a)
axis = _canonicalize_axis(axis, ndim(a) + 1)
return lax.reshape(a, shape[:axis] + (1,) + shape[axis:])
@_wraps(onp.swapaxes)
def swapaxes(a, axis1, axis2):
perm = onp.arange(ndim(a))
perm[axis1], perm[axis2] = perm[axis2], perm[axis1]
return lax.transpose(a, perm)
@_wraps(onp.moveaxis)
def moveaxis(a, source, destination):
if isinstance(source, int):
source = (source,)
if isinstance(destination, int):
destination = (destination,)
source = tuple(_canonicalize_axis(i, ndim(a)) for i in source)
destination = tuple(_canonicalize_axis(i, ndim(a)) for i in destination)
if len(source) != len(destination):
raise ValueError("Inconsistent number of elements: {} vs {}"
.format(len(source), len(destination)))
perm = [i for i in range(ndim(a)) if i not in source]
for dest, src in sorted(zip(destination, source)):
perm.insert(dest, src)
return lax.transpose(a, perm)
@_wraps(onp.isclose)
def isclose(a, b, rtol=1e-05, atol=1e-08):
a, b = _promote_args("isclose", asarray(a), asarray(b))
dtype = _dtype(a)
if issubdtype(dtype, inexact):
if issubdtype(dtype, complexfloating):
dtype = _complex_elem_type(dtype)
rtol = lax.convert_element_type(rtol, dtype)
atol = lax.convert_element_type(atol, dtype)
out = lax.le(
lax.abs(lax.sub(a, b)),
lax.add(atol, lax.mul(rtol, lax.abs(b))))
return _maybe_numpy_1_13_isclose_behavior(a, out)
else:
return lax.eq(a, b)
numpy_version = tuple(map(int, onp.version.version.split('.')[:2]))
if numpy_version < (1, 14):
# see discussion at https://github.com/numpy/numpy/pull/9720
def _maybe_numpy_1_13_isclose_behavior(a, out):
if size(out) == 1 and issubdtype(_dtype(a), complexfloating):
return lax.reshape(out, (1,))
else:
return out
else:
def _maybe_numpy_1_13_isclose_behavior(a, out):
return out
# The `jit` on `where` exists to avoid materializing constants in cases like
# `np.where(np.zeros(1000), 7, 4)`. In op-by-op mode, we don't want to
# materialize the broadcast forms of scalar arguments.
@jit
def _where(condition, x=None, y=None):
if x is None or y is None:
raise ValueError("Either both or neither of the x and y arguments should "
"be provided to jax.numpy.where, got {} and {}."
.format(x, y))
if not issubdtype(_dtype(condition), bool_):
condition = lax.ne(condition, zeros_like(condition))
x, y = _promote_dtypes(x, y)
condition, x, y = broadcast_arrays(condition, x, y)
return lax.select(condition, x, y) if onp.size(x) else x
_WHERE_DOC = """\
At present, JAX does not support JIT-compilation of the single-argument form
of :py:func:`jax.numpy.where` because its output shape is data-dependent. The
three-argument form does not have a data-dependent shape and can be JIT-compiled
successfully.
"""
@_wraps(onp.where, update_doc=False, lax_description=_WHERE_DOC)
def where(condition, x=None, y=None):
if x is None and y is None:
return nonzero(asarray(condition))
else:
return _where(condition, x, y)
@_wraps(onp.select)
def select(condlist, choicelist, default=0):
if len(condlist) != len(choicelist):
msg = "condlist must have length equal to choicelist ({} vs {})"
raise ValueError(msg.format(len(condlist), len(choicelist)))
if len(condlist) == 0:
raise ValueError("condlist must be non-empty")
choices = _promote_dtypes(default, *choicelist)
choicelist = choices[1:]
output = choices[0]
for cond, choice in zip(condlist[::-1], choicelist[::-1]):
output = where(cond, choice, output)
return output
def broadcast_arrays(*args):
"""Like Numpy's broadcast_arrays but doesn't return views."""
shapes = [shape(arg) for arg in args]
if len(set(shapes)) == 1:
return [arg if isinstance(arg, ndarray) or isscalar(arg) else array(arg)
for arg in args]
result_shape = lax.broadcast_shapes(*shapes)
return [broadcast_to(arg, result_shape) for arg in args]
def broadcast_to(arr, shape):
"""Like Numpy's broadcast_to but doesn't necessarily return views."""
arr = arr if isinstance(arr, ndarray) else array(arr)
shape = tuple(map(int, shape)) # check that shape is concrete
arr_shape = _shape(arr)
if arr_shape == shape:
return arr
else:
nlead = len(shape) - len(arr_shape)
compatible = onp.equal(arr_shape, shape[nlead:]) | onp.equal(arr_shape, 1)
if nlead < 0 or not onp.all(compatible):
msg = "Incompatible shapes for broadcasting: {} and requested shape {}"
raise ValueError(msg.format(arr_shape, shape))
diff, = onp.where(onp.not_equal(shape[nlead:], arr_shape))
new_dims = tuple(range(nlead)) + tuple(nlead + diff)
kept_dims = tuple(onp.delete(onp.arange(len(shape)), new_dims))
return lax.broadcast_in_dim(squeeze(arr, diff), shape, kept_dims)
@_wraps(onp.split)
def split(ary, indices_or_sections, axis=0):
dummy_val = onp.broadcast_to(0, ary.shape) # zero strides
subarrays = onp.split(dummy_val, indices_or_sections, axis) # shapes
split_indices = onp.cumsum([0] + [onp.shape(sub)[axis] for sub in subarrays])
starts, ends = [0] * ndim(ary), shape(ary)
_subval = lambda x, i, v: subvals(x, [(i, v)])
return [lax.slice(ary, _subval(starts, axis, start), _subval(ends, axis, end))
for start, end in zip(split_indices[:-1], split_indices[1:])]
def _split_on_axis(onp_fun, axis):
@_wraps(onp_fun, update_doc=False)
def f(ary, indices_or_sections):
return split(ary, indices_or_sections, axis=axis)
return f
vsplit = _split_on_axis(onp.vsplit, axis=0)
hsplit = _split_on_axis(onp.hsplit, axis=1)
dsplit = _split_on_axis(onp.dsplit, axis=2)
@_wraps(onp.clip)
def clip(a, a_min=None, a_max=None):
if a_min is None and a_max is None:
raise "At most one of a_min and a_max may be None"
if a_min is not None:
if _dtype(a_min) != _dtype(a):
a_min = lax.convert_element_type(a_min, _dtype(a))
a = maximum(a_min, a)
if a_max is not None:
if _dtype(a_max) != _dtype(a):
a_max = lax.convert_element_type(a_max, _dtype(a))
a = minimum(a_max, a)
return a
def _dtype_info(dtype):
"""Helper function for to get dtype info needed for clipping."""
if issubdtype(dtype, integer):
return iinfo(dtype)
return finfo(dtype)
def _round_to_nearest_even(x):
half = lax._const(x, 0.5)
one = lax._const(x, 1)
round_val = lax.floor(x)
fraction = x - round_val
nearest_even_int = lax.sub(
round_val, lax.mul(lax._const(x, 2), lax.floor(lax.mul(half, x))))
is_odd = lax.eq(nearest_even_int, one)
return lax.select(
lax.bitwise_or(lax.gt(fraction, half),
lax.bitwise_and(lax.eq(fraction, half), is_odd)),
lax.add(round_val, one), round_val)
@_wraps(onp.round, update_doc=False)
def round(a, decimals=0):
dtype = _dtype(a)
if issubdtype(dtype, integer):
if decimals < 0:
raise NotImplementedError(
"integer np.round not implemented for decimals < 0")
return a # no-op on integer types
def _round_float(x):
if decimals == 0:
return _round_to_nearest_even(x)
# TODO(phawkins): the strategy of rescaling the value isn't necessarily a
# good one since we may be left with an incorrectly rounded value at the
# end due to precision problems. As a workaround for float16, convert to
# float32,
x = lax.convert_element_type(x, onp.float32) if dtype == onp.float16 else x
factor = _constant_like(x, 10 ** decimals)
out = lax.div(_round_to_nearest_even(lax.mul(x, factor)), factor)
return lax.convert_element_type(out, dtype) if dtype == onp.float16 else out
if issubdtype(dtype, complexfloating):
return lax.complex(_round_float(lax.real(a)), _round_float(lax.imag(a)))
else:
return _round_float(a)
around = round
@_wraps(onp.fix)
def fix(x, out=None):
if out is not None:
raise ValueError("fix does not support the `out` argument.")
zero = lax._const(x, 0)
return where(lax.ge(x, zero), lax.floor(x), lax.ceil(x))
@_wraps(onp.isfinite)
def isfinite(x):
dtype = _dtype(x)
if issubdtype(dtype, floating):
return lax.is_finite(x)
elif issubdtype(dtype, complexfloating):
return lax.bitwise_and(lax.is_finite(real(x)), lax.is_finite(imag(x)))
else:
return full_like(x, True, dtype=bool_)
@_wraps(onp.isinf)
def isinf(x):
dtype = _dtype(x)
if issubdtype(dtype, floating):
return lax.eq(lax.abs(x), _constant_like(x, inf))
elif issubdtype(dtype, complexfloating):
re = lax.real(x)
im = lax.imag(x)
return lax.bitwise_or(lax.eq(lax.abs(re), _constant_like(re, inf)),
lax.eq(lax.abs(im), _constant_like(im, inf)))
else:
return full_like(x, False, dtype=bool_)
def _isposneginf(infinity, x):
dtype = _dtype(x)
if issubdtype(dtype, floating):
return lax.eq(x, _constant_like(x, infinity))
elif issubdtype(dtype, complexfloating):
raise ValueError("isposinf/isneginf are not well defined for complex types")
else:
return full_like(x, False, dtype=bool_)
isposinf = _wraps(onp.isposinf)(partial(_isposneginf, inf))
isneginf = _wraps(onp.isneginf)(partial(_isposneginf, -inf))
@_wraps(onp.isnan)
def isnan(x):
return lax.bitwise_and(lax.bitwise_not(isfinite(x)),
lax.bitwise_not(isinf(x)))
@_wraps(onp.nan_to_num)
def nan_to_num(x, copy=True):
del copy
dtype = _dtype(x)
if issubdtype(dtype, complexfloating):
return lax.complex(nan_to_num(lax.real(x)), nan_to_num(lax.imag(x)))
info = finfo(dtypes.canonicalize_dtype(dtype))
x = where(isnan(x), _constant_like(x, 0), x)
x = where(isposinf(x), _constant_like(x, info.max), x)
x = where(isneginf(x), _constant_like(x, info.min), x)
return x
### Reducers
def _make_reduction(np_fun, op, init_val, preproc=None, bool_op=None,
upcast_f16_for_computation=False):
"""Creates reduction function given a binary operation and monoid identity."""
bool_op = bool_op or op
@_wraps(np_fun)
def reduction(a, axis=None, dtype=None, out=None, keepdims=False):
if out is not None:
raise ValueError("reduction does not support the `out` argument.")
a = a if isinstance(a, ndarray) else asarray(a)
a = preproc(a) if preproc else a
dims = _reduction_dims(a, axis)
result_dtype = dtype or _dtype(np_fun(onp.ones((), dtype=_dtype(a))))
if upcast_f16_for_computation and issubdtype(result_dtype, inexact):
computation_dtype = promote_types(result_dtype, float32)
else:
computation_dtype = result_dtype
a = lax.convert_element_type(a, computation_dtype)
result = lax.reduce(a, _reduction_init_val(a, init_val),
op if computation_dtype != onp.bool_ else bool_op, dims)
if keepdims:
shape_with_singletons = subvals(shape(a), zip(dims, (1,) * len(dims)))
result = lax.reshape(result, shape_with_singletons)
return lax.convert_element_type(result, dtype or result_dtype)
return reduction
def _reduction_dims(a, axis):
if axis is None:
return onp.arange(ndim(a))
elif isinstance(axis, (onp.ndarray, tuple, list)):
return tuple(_canonicalize_axis(x, ndim(a)) for x in axis)
elif isinstance(axis, int):
return (_canonicalize_axis(axis, ndim(a)),)
else:
raise TypeError("Unexpected type of axis argument: {}".format(type(axis)))
def _reduction_init_val(a, init_val):
a_dtype = dtypes.canonicalize_dtype(_dtype(a))
if a_dtype == 'bool':
return onp.array(init_val > 0, dtype=a_dtype)
try:
return onp.array(init_val, dtype=a_dtype)
except OverflowError:
assert issubdtype(a_dtype, integer)
sign, info = onp.sign(init_val), iinfo(a_dtype)
return onp.array(info.min if sign < 0 else info.max, dtype=a_dtype)
_cast_to_bool = partial(lax.convert_element_type, new_dtype=bool_)
sum = _make_reduction(onp.sum, lax.add, 0, upcast_f16_for_computation=True,
bool_op=lax.bitwise_or)
product = prod = _make_reduction(onp.prod, lax.mul, 1, bool_op=lax.bitwise_and,
upcast_f16_for_computation=True)
amax = max = _make_reduction(onp.max, lax.max, -onp.inf)
amin = min = _make_reduction(onp.min, lax.min, onp.inf)
all = alltrue = _make_reduction(onp.all, lax.bitwise_and, True, _cast_to_bool)
any = sometrue = _make_reduction(onp.any, lax.bitwise_or, False, _cast_to_bool)
@_wraps(onp.mean)
def mean(a, axis=None, dtype=None, out=None, keepdims=False):
if out is not None:
raise ValueError("mean does not support the `out` argument.")
if axis is None:
normalizer = size(a)
else:
normalizer = onp.prod(onp.take(shape(a), axis))
if dtype is None:
if issubdtype(_dtype(a), bool_) or issubdtype(_dtype(a), integer):
dtype = float_
else:
dtype = _dtype(a)
return lax.div(
sum(a, axis, dtype=dtype, keepdims=keepdims),
lax.convert_element_type(normalizer, dtype))
@_wraps(onp.average)
def average(a, axis=None, weights=None, returned=False):
a = asarray(a)
if weights is None: # Treat all weights as 1
avg = mean(a, axis=axis)
if axis is None:
weights_sum = full((), size(a), dtype=avg.dtype)
else:
weights_sum = full_like(avg, a.shape[axis], dtype=avg.dtype)
else:
weights = asarray(weights)
if issubdtype(a.dtype, inexact):
out_dtype = result_type(a.dtype, weights.dtype)
else:
out_dtype = result_type(a.dtype, weights.dtype, float_)
out_dtype = dtypes.canonicalize_dtype(out_dtype)
a_shape = shape(a)
a_ndim = len(a_shape)
weights_shape = shape(weights)
axis = None if axis is None else _canonicalize_axis(axis, a_ndim)
if a_shape != weights_shape:
# Make sure the dimensions work out
if axis is None:
raise ValueError("Axis must be specified when shapes of a and "
"weights differ.")
if len(weights_shape) != 1:
raise ValueError("1D weights expected when shapes of a and "
"weights differ.")
if weights_shape[0] != a_shape[axis]:
raise ValueError("Length of weights not "
"compatible with specified axis.")
weights = broadcast_to(weights, (a_ndim - 1) * (1,) + weights_shape)
weights = moveaxis(weights, -1, axis)
weights_sum = sum(weights, axis=axis, dtype=out_dtype)
avg = sum(multiply(a, weights), axis=axis, dtype=out_dtype) / weights_sum
if returned:
if avg.shape != weights_sum.shape:
weights_sum = broadcast_to(weights_sum, avg.shape)
return avg, weights_sum
return avg
@_wraps(onp.var)
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
if out is not None:
raise ValueError("var does not support the `out` argument.")
a_dtype = _dtype(a)
if dtype:
a_dtype = promote_types(a_dtype, dtype)
else:
if not issubdtype(a_dtype, inexact):
dtype = a_dtype = float_
else:
dtype = _complex_elem_type(a_dtype)
a_dtype = promote_types(a_dtype, float32)
a_mean = mean(a, axis, dtype=a_dtype, keepdims=True)
centered = a - a_mean
if issubdtype(centered.dtype, complexfloating):
centered = lax.real(lax.mul(centered, lax.conj(centered)))
else:
centered = lax.square(centered)
if axis is None:
normalizer = size(a)
else:
normalizer = onp.prod(onp.take(shape(a), axis))
normalizer = normalizer - ddof
result = sum(centered, axis, keepdims=keepdims)
out = lax.div(result, lax.convert_element_type(normalizer, result.dtype))
return lax.convert_element_type(out, dtype)
@_wraps(onp.std)
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
if out is not None:
raise ValueError("std does not support the `out` argument.")
return sqrt(var(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims))
@_wraps(onp.ptp)
def ptp(a, axis=None, out=None, keepdims=False):
if out is not None:
raise ValueError("ptp does not support the `out` argument.")
x = amax(a, axis=axis, keepdims=keepdims)
y = amin(a, axis=axis, keepdims=keepdims)
return lax.sub(x, y)
@_wraps(onp.allclose)
def allclose(a, b, rtol=1e-05, atol=1e-08):
return all(isclose(a, b, rtol, atol))
@_wraps(onp.count_nonzero)
def count_nonzero(a, axis=None):
return sum(lax.ne(a, _constant_like(a, 0)), axis=axis,
dtype=dtypes.canonicalize_dtype(onp.int_))
_NONZERO_DOC = """\
At present, JAX does not support JIT-compilation of :py:func:`jax.numpy.nonzero`
because its output shape is data-dependent.
"""
@_wraps(onp.nonzero, lax_description=_NONZERO_DOC)
def nonzero(a):
# Note: this function cannot be jitted because its output has a dynamic
# shape.
a = atleast_1d(a)
dims = shape(a)
ndims = len(dims)
ds = [lax.broadcasted_iota(int_, dims + (1,), i) for i in range(ndims)]
d = concatenate(ds, axis=-1)
indexes = d[a != 0]
return tuple(indexes[..., i] for i in range(ndims))
def _make_nan_reduction(onp_reduction, np_reduction, init_val, nan_if_all_nan):
@_wraps(onp_reduction)
def nan_reduction(a, axis=None, out=None, keepdims=False, **kwargs):
out = np_reduction(where(isnan(a), _reduction_init_val(a, init_val), a),
axis=axis, out=out, keepdims=keepdims, **kwargs)
if nan_if_all_nan:
return where(all(isnan(a), axis=axis, keepdims=keepdims),
_constant_like(a, nan), out)
else:
return out
return nan_reduction
nanmin = _make_nan_reduction(onp.nanmin, min, inf, nan_if_all_nan=True)
nanmax = _make_nan_reduction(onp.nanmax, max, -inf, nan_if_all_nan=True)
nansum = _make_nan_reduction(onp.nansum, sum, 0, nan_if_all_nan=False)
nanprod = _make_nan_reduction(onp.nanprod, prod, 1, nan_if_all_nan=False)
@_wraps(onp.nanmean)
def nanmean(a, axis=None, dtype=None, out=None, keepdims=False):
if out is not None:
raise ValueError("nanmean does not support the `out` argument.")
if issubdtype(_dtype(a), bool_) or issubdtype(_dtype(a), integer):
return mean(a, axis, dtype, out, keepdims)
if dtype is None:
dtype = _dtype(a)
nan_mask = logical_not(isnan(a))
normalizer = sum(nan_mask, axis=axis, dtype=int32, keepdims=keepdims)
normalizer = lax.convert_element_type(normalizer, dtype)
td = lax.div(nansum(a, axis, dtype=dtype, keepdims=keepdims), normalizer)
return td
def _make_cumulative_reduction(onp_reduction, window_reduce, init_val,
squash_nan=False):
# We want to allow XLA to fuse the pad and reduce-window operators to
# avoid materializing the padded output.
# Consider removing `jit` once again if reduce-window is generalized to
# support arbitrary padding.
@partial(jit, static_argnums=(1, 2))
def _cumulative_reduction(a, axis, dtype):
if axis is None or isscalar(a):
a = ravel(a)
axis = 0
a_shape = list(shape(a))
num_dims = len(a_shape)
if axis < 0:
axis = axis + num_dims
if axis < 0 or axis >= num_dims:
raise ValueError(
"axis {} is out of bounds for array of dimension {}".format(
axis, num_dims))
if squash_nan:
a = where(isnan(a), _constant_like(a, init_val), a)
if dtype:
a = lax.convert_element_type(a, dtype)
if a_shape[axis] == 0:
return a
padding = [(0, 0, 0)] * num_dims
padding[axis] = (a_shape[axis] - 1, 0, 0)
a = lax.pad(a, _constant_like(a, init_val), padding)
strides = [1] * num_dims
window_dims = [1] * num_dims
window_dims[axis] = a_shape[axis]
return window_reduce(
a, window_dims, strides, xla_client.PaddingType.VALID)
@_wraps(onp_reduction)
def cumulative_reduction(a, axis=None, dtype=None):
# jit doesn't support kwargs as static_args.
return _cumulative_reduction(a, axis, dtype)
return cumulative_reduction
cumsum = _make_cumulative_reduction(
onp.cumsum, lax._reduce_window_sum, 0, squash_nan=False)
cumprod = _make_cumulative_reduction(
onp.cumprod, lax._reduce_window_prod, 1, squash_nan=False)
cumproduct = cumprod
nancumsum = _make_cumulative_reduction(
onp.nancumsum, lax._reduce_window_sum, 0, squash_nan=True)
nancumprod = _make_cumulative_reduction(
onp.nancumprod, lax._reduce_window_prod, 1, squash_nan=True)
### Array-creation functions
def _check_no_padding(axis_padding, mode):
if (axis_padding[0] > 0 or axis_padding[1] > 0):
msg = "Cannot apply '{}' padding to empty axis"
raise ValueError(msg.format(mode))
@partial(jit, static_argnums=(1, 2))
def _pad(array, pad_width, mode, constant_values):
array = asarray(array)
nd = ndim(array)
pad_width = onp.broadcast_to(onp.asarray(pad_width), (nd, 2))
if any(pad_width < 0):
raise ValueError("index can't contain negative values")
if mode == "constant":
constant_values = broadcast_to(asarray(constant_values), (nd, 2))
constant_values = lax.convert_element_type(constant_values, array.dtype)
for i in xrange(nd):
widths = [(0, 0, 0)] * nd
widths[i] = (pad_width[i, 0], 0, 0)
array = lax.pad(array, constant_values[i, 0], widths)
widths[i] = (0, pad_width[i, 1], 0)
array = lax.pad(array, constant_values[i, 1], widths)
return array
elif mode == "wrap":
for i in xrange(nd):
if array.shape[i] == 0:
_check_no_padding(pad_width[i], mode)
continue
size = array.shape[i]
repeats, (left_remainder, right_remainder) = _divmod(pad_width[i], size)
total_repeats = repeats.sum() + 1
parts = []
if left_remainder:
parts += [lax.slice_in_dim(array, size - left_remainder, size, axis=i)]
parts += total_repeats * [array]
if right_remainder:
parts += [lax.slice_in_dim(array, 0, right_remainder, axis=i)]
array = lax.concatenate(parts, dimension=i)
return array
elif mode in ("symmetric", "reflect"):
for i in xrange(nd):
if array.shape[i] == 0:
_check_no_padding(pad_width[i], mode)
continue
n = array.shape[i]
rarray = lax.rev(array, dimensions=(i,))
offset = 1 if (mode == "reflect" and n > 1) else 0
def build_padding(padding, forward):
xs = []
delta = n - offset
while padding > delta:
padding -= delta
p = array if forward else rarray
xs.append(lax.slice_in_dim(p, offset, n, axis=i))
forward = not forward
if padding > 0:
x = lax.slice_in_dim(array if forward else rarray, offset,
padding + offset, axis=i)
xs.append(x)
return xs
parts = reversed(build_padding(pad_width[i, 0], forward=True))
parts = [lax.rev(x, dimensions=(i,)) for x in parts]
parts += [array]
parts += build_padding(pad_width[i, 1], forward=False)
array = lax.concatenate(parts, dimension=i)
return array
else:
msg = "Unimplemented padding mode '{}' for np.pad."
raise NotImplementedError(msg.format(mode))
@_wraps(onp.pad)
def pad(array, pad_width, mode='constant', constant_values=0):
return _pad(array, pad_width, mode, constant_values)
@_wraps(onp.stack)
def stack(arrays, axis=0):
if not len(arrays):
raise ValueError("Need at least one array to stack.")
shape0 = shape(arrays[0])
axis = _canonicalize_axis(axis, len(shape0) + 1)
new_shape = list(shape0)
new_shape.insert(axis, 1)
new_arrays = []
for a in arrays:
if shape(a) != shape0:
raise ValueError("All input arrays must have the same shape.")
new_arrays.append(reshape(a, new_shape))
return concatenate(new_arrays, axis=axis)
@_wraps(onp.tile)
def tile(a, reps):
if isinstance(reps, int):
reps = (reps,)
a = reshape(a, (1,) * (len(reps) - ndim(a)) + shape(a))
reps = (1,) * (ndim(a) - len(reps)) + tuple(reps)
for i, rep in enumerate(reps):
a = concatenate([a] * int(rep), axis=i)
return a
@_wraps(onp.concatenate)
def concatenate(arrays, axis=0):
if not len(arrays):
raise ValueError("Need at least one array to concatenate.")
if ndim(arrays[0]) == 0:
raise ValueError("Zero-dimensional arrays cannot be concatenated.")
axis = _canonicalize_axis(axis, ndim(arrays[0]))
arrays = _promote_dtypes(*arrays)
# lax.concatenate can be slow to compile for wide concatenations, so form a
# tree of concatenations as a workaround especially for op-by-op mode.
# (https://github.com/google/jax/issues/653).
k = 16
while len(arrays) > 1:
arrays = [lax.concatenate(arrays[i:i+k], axis)
for i in range(0, len(arrays), k)]
return arrays[0]
@_wraps(onp.vstack)
def vstack(tup):
return concatenate([atleast_2d(m) for m in tup], axis=0)
row_stack = vstack
@_wraps(onp.hstack)
def hstack(tup):
arrs = [atleast_1d(m) for m in tup]
if arrs[0].ndim == 1:
return concatenate(arrs, 0)
return concatenate(arrs, 1)
@_wraps(onp.dstack)
def dstack(tup):
return concatenate([atleast_3d(m) for m in tup], axis=2)
@_wraps(onp.column_stack)
def column_stack(tup):
arrays = []
for v in tup:
arr = array(v)
if arr.ndim < 2:
arr = arr.reshape((-1, 1))
arrays.append(arr)
return concatenate(arrays, 1)
@_wraps(onp.atleast_1d, update_doc=False)
def atleast_1d(*arys):
if len(arys) == 1:
arr = array(arys[0])
return arr if ndim(arr) >= 1 else reshape(arr, -1)
else:
return [atleast_1d(arr) for arr in arys]
@_wraps(onp.atleast_2d, update_doc=False)
def atleast_2d(*arys):
if len(arys) == 1:
arr = array(arys[0])
return arr if ndim(arr) >= 2 else reshape(arr, (1, -1))
else:
return [atleast_2d(arr) for arr in arys]
@_wraps(onp.atleast_3d, update_doc=False)
def atleast_3d(*arys):
if len(arys) == 1:
arr = array(arys[0])
if ndim(arr) <= 1:
arr = reshape(arr, (1, -1, 1))
elif ndim(arr) == 2:
arr = reshape(arr, shape(arr) + (1,))
return arr
else:
return [atleast_3d(arr) for arr in arys]
@_wraps(onp.array)
def array(object, dtype=None, copy=True, order="K", ndmin=0):
if order is not None and order != "K":
raise NotImplementedError("Only implemented for order='K'")
lax._check_user_dtype_supported(dtype, "array")
if isinstance(object, ndarray):
if dtype and _dtype(object) != dtypes.canonicalize_dtype(dtype):
out = lax.convert_element_type(object, dtype)
else:
out = device_put(object)
elif isscalar(object):
out = lax.reshape(object, ())
if dtype and _dtype(out) != dtypes.canonicalize_dtype(dtype):
out = lax.convert_element_type(out, dtype)
elif hasattr(object, '__array__'):
# this case is for duck-typed handling of objects that implement `__array__`
out = array(object.__array__(), dtype and dtypes.canonicalize_dtype(dtype))
elif isinstance(object, (list, tuple)):
if object:
out = stack([array(elt, dtype=dtype) for elt in object])
else:
out = onp.array([], dtype or float_)
else:
try:
view = memoryview(object)
except TypeError:
pass # `object` does not support the buffer interface.
else:
return array(onp.asarray(view), dtype, copy)
raise TypeError("Unexpected input type for array: {}".format(type(object)))
if ndmin > ndim(out):
out = lax.reshape(out, (1,) * (ndmin - ndim(out)) + shape(out))
return out
@_wraps(onp.asarray)
def asarray(a, dtype=None, order=None):
lax._check_user_dtype_supported(dtype, "asarray")
return array(a, dtype=dtype, copy=False, order=order)
@_wraps(onp.zeros_like)
def zeros_like(x, dtype=None):
lax._check_user_dtype_supported(dtype, "zeros_like")
return lax.full_like(x, 0, dtype)
@_wraps(onp.ones_like)
def ones_like(x, dtype=None):
lax._check_user_dtype_supported(dtype, "ones_like")
return lax.full_like(x, 1, dtype)
@_wraps(onp.full)
def full(shape, fill_value, dtype=None):
lax._check_user_dtype_supported(dtype, "full")
shape = (shape,) if ndim(shape) == 0 else shape
return lax.full(shape, fill_value, dtype)
@_wraps(onp.full_like)
def full_like(a, fill_value, dtype=None):
lax._check_user_dtype_supported(dtype, "full_like")
return lax.full_like(a, fill_value, dtype)
@_wraps(onp.zeros)
def zeros(shape, dtype=None):
if isinstance(shape, types.GeneratorType):
raise TypeError("expected sequence object with len >= 0 or a single integer")
lax._check_user_dtype_supported(dtype, "zeros")
dtype = float_ if dtype is None else dtype
shape = (shape,) if ndim(shape) == 0 else shape
return lax.full(shape, 0, dtype)
@_wraps(onp.ones)
def ones(shape, dtype=None):
if isinstance(shape, types.GeneratorType):
raise TypeError("expected sequence object with len >= 0 or a single integer")
lax._check_user_dtype_supported(dtype, "ones")
dtype = float_ if dtype is None else dtype
shape = (shape,) if ndim(shape) == 0 else shape
return lax.full(shape, 1, dtype)
@_wraps(onp.array_equal)
def array_equal(a1, a2):
try:
a1, a2 = asarray(a1), asarray(a2)
except Exception:
return False
return shape(a1) == shape(a2) and all(asarray(a1 == a2))
# We can't create uninitialized arrays in XLA; use zeros for empty.
empty_like = zeros_like
empty = zeros
@_wraps(onp.eye)
def eye(N, M=None, k=0, dtype=None):
lax._check_user_dtype_supported(dtype, "eye")
dtype = float_ if dtype is None else dtype
M = N if M is None else M
k = int(k)
if N < 0 or M < 0:
msg = "negative dimensions are not allowed, got {} and {}"
raise ValueError(msg.format(N, M))
if k is not None:
k_dtype = _dtype(k)
if not issubdtype(k_dtype, integer):
msg = "eye argument `k` must be of integer dtype, got {}"
raise TypeError(msg.format(k_dtype))
return lax._eye(dtype, (N, M), k)
@_wraps(onp.identity)
def identity(n, dtype=None):
lax._check_user_dtype_supported(dtype, "identity")
return eye(n, dtype=dtype)
@_wraps(onp.arange)
def arange(start, stop=None, step=None, dtype=None):
lax._check_user_dtype_supported(dtype, "arange")
if stop is None and step is None:
dtype = dtype or _dtype(start)
return lax.iota(dtype, start) # avoids materializing
else:
return array(onp.arange(start, stop=stop, step=step, dtype=dtype))
def _wrap_numpy_nullary_function(f):
"""Adapts `f` to return a DeviceArray instead of an onp.ndarray.
`f` cannot have any non-static array arguments.
"""
@_wraps(f, update_doc=False)
def wrapper(*args, **kwargs):
return asarray(f(*args, **kwargs))
return wrapper
@_wraps(onp.linspace)
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None,
axis=0):
"""Implementation of linspace differentiable in start and stop args."""
lax._check_user_dtype_supported(dtype, "linspace")
if num < 0:
raise ValueError("Number of samples, %s, must be non-negative." % num)
dt = result_type(start, stop, float(num))
dtype = dtype or dt
bounds_shape = list(lax.broadcast_shapes(shape(start), shape(stop)))
broadcast_start = broadcast_to(start, bounds_shape)
axis = len(bounds_shape) + axis + 1 if axis < 0 else axis
bounds_shape.insert(axis, 1)
iota_shape = [1,] * len(bounds_shape)
iota_shape[axis] = num
div = (num - 1) if endpoint else num
if num > 1:
delta = lax.convert_element_type(stop - start, dt) / div
out = (reshape(broadcast_start, bounds_shape) +
reshape(lax.iota(dt, num), iota_shape) *
reshape(delta, bounds_shape))
elif num == 1:
delta = nan
out = reshape(broadcast_start, bounds_shape)
else: # num == 0 degenerate case, match onp behavior
empty_shape = list(lax.broadcast_shapes(shape(start), shape(stop)))
empty_shape.insert(axis, 0)
delta = nan
out = reshape(array([], dtype=dt), empty_shape)
if retstep:
return lax.convert_element_type(out, dtype), delta
else:
return lax.convert_element_type(out, dtype)
@_wraps(onp.logspace)
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0):
"""Implementation of logspace differentiable in start and stop args."""
dtype = dtype or result_type(start, stop, float_)
computation_dtype = promote_types(dtype, float_)
start = asarray(start, dtype=computation_dtype)
stop = asarray(stop, dtype=computation_dtype)
lin = linspace(start, stop, num,
endpoint=endpoint, retstep=False, dtype=None, axis=axis)
return lax.convert_element_type(power(base, lin), dtype)
@_wraps(onp.geomspace)
def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0):
"""Implementation of geomspace differentiable in start and stop args."""
dtype = dtype or result_type(start, stop, float(num), zeros((), dtype))
computation_dtype = promote_types(dtype, float32)
start = asarray(start, dtype=computation_dtype)
stop = asarray(stop, dtype=computation_dtype)
# follow the numpy geomspace convention for negative and complex endpoints
signflip = 1 - (1 - sign(real(start))) * (1 - sign(real(stop))) // 2
res = signflip * logspace(log10(signflip * start),
log10(signflip * stop), num,
endpoint=endpoint, base=10.0,
dtype=computation_dtype, axis=0)
if axis != 0:
res = moveaxis(res, 0, axis)
return lax.convert_element_type(res, dtype)
@_wraps(onp.meshgrid)
def meshgrid(*args, **kwargs):
indexing = kwargs.get("indexing", "xy")
sparse = kwargs.get("sparse", False)
copy = kwargs.get("copy", True)
if not copy:
raise ValueError("jax.numpy.meshgrid only supports copy=True")
args = list(args)
if indexing == "xy":
if len(args) >= 2:
args[0], args[1] = args[1], args[0]
elif indexing != "ij":
raise ValueError("Valid values for indexing are 'xy' and 'ij', got {}"
.format(indexing))
shape = []
for i, a in enumerate(args):
args[i] = a = asarray(a)
if len(a.shape) != 1:
msg = "Arguments to jax.numpy.meshgrid must be 1D, got shape {}"
raise ValueError(msg.format(a.shape))
shape.append(1 if sparse else a.shape[0])
output = []
for i, a in enumerate(args):
a = asarray(a)
s = shape
if sparse:
s = list(s)
s[i] = a.shape[0]
output.append(lax.broadcast_in_dim(a, s, (i,)))
if indexing == "xy" and len(args) >= 2:
output[0], output[1] = output[1], output[0]
return output
@_wraps(onp.ix_)
def ix_(*args):
n = len(args)
output = []
for i, a in enumerate(args):
a = asarray(a)
if len(a.shape) != 1:
msg = "Arguments to jax.numpy.ix_ must be 1-dimensional, got shape {}"
raise ValueError(msg.format(a.shape))
if _dtype(a) == bool_:
raise NotImplementedError(
"Boolean arguments to jax.numpy.ix_ are not implemented")
shape = [1] * n
shape[i] = a.shape[0]
if a.size == 0:
# Numpy uses an integer index type for empty arrays.
output.append(lax.full(shape, onp.zeros((), onp.intp)))
else:
output.append(lax.reshape(a, shape))
return tuple(output)
def _repeat_scalar(a, repeats, axis=None):
if not isscalar(repeats):
raise NotImplementedError(
"_repeat_scalar implementation only supports scalar repeats")
if axis is None or isscalar(a):
a = ravel(a)
axis = 0
a_shape = list(shape(a))
num_dims = len(a_shape)
if axis < 0:
axis = axis + num_dims
if axis < 0 or axis >= num_dims:
raise ValueError(
"axis {} is out of bounds for array of dimension {}".format(
axis, num_dims))
# Broadcasts to [..., X, repeats, ...] and reshapes to [..., X * repeats, ...]
broadcast_shape = list(a_shape)
broadcast_shape.insert(axis + 1, repeats)
broadcast_dims = onp.concatenate((onp.arange(0, axis + 1),
onp.arange(axis + 2, num_dims + 1)))
a_shape[axis] *= repeats
return lax.reshape(
lax.broadcast_in_dim(a, broadcast_shape, broadcast_dims),
a_shape)
@_wraps(onp.repeat)
def repeat(a, repeats, axis=None):
'''
:param repeats: int or array of ints
'''
# use `_repeat_scalar` when possible
if isscalar(repeats):
return _repeat_scalar(a, repeats, axis)
repeats_raveled = ravel(array(repeats)) # make sure it's jax's array type
if size(repeats_raveled) == 1:
return _repeat_scalar(a, list(repeats_raveled)[0], axis)
if axis is None or isscalar(a):
a = ravel(a)
axis = 0
# repeats must match the dimension along the requested axis
a_shape = list(a.shape)
n = a_shape[axis]
if size(repeats_raveled) != n:
raise ValueError("repeats shape {} does not match the dimension on axis {}".format(
repeats_raveled.shape, n
))
# calculating the new shape
total = sum(repeats_raveled)
new_shape = a_shape[:]
new_shape[axis] = total
a_flattened = ravel(a)
'''
main algorithm:
first break down raveled input array into list of chunks; each chunk is the unit of repeat
then tile the repeats to have same length as the list of chunks
finally repeat each unit x number of times according to the tiled repeat list
'''
chunks = product(a_shape[:axis+1]).item()
a_splitted = split(a_flattened, chunks)
repeats_tiled = tile(repeats_raveled, chunks // len(repeats_raveled))
ret = array([], dtype=a.dtype)
for i, repeat in enumerate(repeats_tiled):
if not isinstance(repeat, int):
repeat = repeat.item()
if repeat != 0:
ret = concatenate((ret, tile(a_splitted[i], repeat)))
return reshape(ret, new_shape)
@_wraps(onp.tri)
def tri(N, M=None, k=0, dtype=None):
lax._check_user_dtype_supported(dtype, "tri")
M = M if M is not None else N
dtype = dtype or float32
return lax._tri(dtype, (N, M), k)
@_wraps(onp.tril)
def tril(m, k=0):
m_shape = shape(m)
if len(m_shape) < 2:
raise ValueError("Argument to jax.numpy.tril must be at least 2D")
mask = tri(*m_shape[-2:], k=k, dtype=bool)
return lax.select(lax.broadcast(mask, m_shape[:-2]), m, zeros_like(m))
@_wraps(onp.triu, update_doc=False)
def triu(m, k=0):
m_shape = shape(m)
if len(m_shape) < 2:
raise ValueError("Argument to jax.numpy.triu must be at least 2D")
mask = tri(*m_shape[-2:], k=k - 1, dtype=bool)
return lax.select(lax.broadcast(mask, m_shape[:-2]), zeros_like(m), m)
@_wraps(onp.trace)
def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
if out:
raise NotImplementedError("The 'out' argument to trace is not supported.")
lax._check_user_dtype_supported(dtype, "trace")
axis1 = _canonicalize_axis(axis1, ndim(a))
axis2 = _canonicalize_axis(axis2, ndim(a))
a_shape = shape(a)
if dtype is None:
dtype = _dtype(a)
if issubdtype(dtype, integer):
default_int = dtypes.canonicalize_dtype(onp.int_)
if iinfo(dtype).bits < iinfo(default_int).bits:
dtype = default_int
# Move the axis? dimensions to the end.
perm = [i for i in range(len(a_shape)) if i != axis1 and i != axis2]
perm = perm + [axis1, axis2]
a = lax.transpose(a, perm)
# Mask out the diagonal and reduce.
a = where(eye(a_shape[axis1], a_shape[axis2], k=offset, dtype=bool),
a, zeros_like(a))
return sum(a, axis=(-2, -1), dtype=dtype)
def _wrap_indices_function(f):
@_wraps(f, update_doc=False)
def wrapper(*args, **kwargs):
return tuple(asarray(x) for x in f(*args, **kwargs))
return wrapper
tril_indices = _wrap_indices_function(onp.tril_indices)
triu_indices = _wrap_indices_function(onp.triu_indices)
mask_indices = _wrap_indices_function(onp.mask_indices)
@_wraps(onp.diag_indices)
def diag_indices(n, ndim=2):
if n < 0:
raise ValueError("n argument to diag_indices must be nonnegative, got {}"
.format(n))
if ndim < 0:
raise ValueError("ndim argument to diag_indices must be nonnegative, got {}"
.format(ndim))
return (lax.iota(int_, n),) * ndim
@_wraps(onp.diagonal)
def diagonal(a, offset=0, axis1=0, axis2=1):
a_shape = shape(a)
a_ndims = len(a_shape)
# Move the two dimensions to the end.
axis1 = _canonicalize_axis(axis1, a_ndims)
axis2 = _canonicalize_axis(axis2, a_ndims)
perm = [i for i in range(a_ndims) if i != axis1 and i != axis2]
perm = perm + [axis1, axis2]
a = lax.transpose(a, perm)
# Mask out the diagonal and reduce over one of the axes
a = where(eye(a_shape[axis1], a_shape[axis2], k=offset, dtype=bool),
a, zeros_like(a))
reduce_axis = -2 if offset < 0 else -1
d = sum(a, axis=reduce_axis, dtype=_dtype(a))
# Slice out the correct diagonal size.
diag_size = _max(0, _min(a_shape[axis1] + _min(offset, 0),
a_shape[axis2] - _max(offset, 0)))
return lax.slice_in_dim(d, 0, diag_size, axis=-1)
@_wraps(onp.diag)
def diag(v, k=0):
v_shape = shape(v)
if len(v_shape) == 1:
zero = lambda x: lax.full_like(x, shape=(), fill_value=0)
n = v_shape[0] + _abs(k)
v = lax.pad(v, zero(v), ((_max(0, k), _max(0, -k), 0),))
return where(eye(n, k=k, dtype=bool), v, zeros_like(v))
elif len(v_shape) == 2:
return diagonal(v, offset=k)
else:
raise ValueError("diag input must be 1d or 2d")
@_wraps(onp.polyval)
def polyval(p, x):
if isinstance(p, onp.poly1d):
p = onp.asarray(p)
if isinstance(x, onp.poly1d):
y = 0
else:
y = zeros_like(x)
for i in range(len(p)):
y = y * x + p[i]
return y
@_wraps(onp.append)
def append(arr, values, axis=None):
if axis is None:
return concatenate([ravel(arr), ravel(values)], 0)
else:
return concatenate([arr, values], axis=axis)
### Tensor contraction operations
_PRECISION_DOC = """\
In addition to the original NumPy arguments listed below, also supports
``precision`` for extra control over matrix-multiplication precision
on supported devices. See :py:func:`jax.lax.dot` for details.
"""
@_wraps(onp.dot, lax_description=_PRECISION_DOC)
def dot(a, b, precision=None): # pylint: disable=missing-docstring
_check_arraylike("dot", a, b)
a, b = _promote_dtypes(a, b)
a_ndim, b_ndim = ndim(a), ndim(b)
if a_ndim == 0 or b_ndim == 0:
return lax.mul(a, b)
if _max(a_ndim, b_ndim) <= 2:
return lax.dot(a, b, precision=precision)
if b_ndim == 1:
contract_dims = ((a_ndim - 1,), (0,))
else:
contract_dims = ((a_ndim - 1,), (b_ndim - 2,))
batch_dims = ((), ())
return lax.dot_general(a, b, (contract_dims, batch_dims), precision)
@_wraps(onp.matmul, lax_description=_PRECISION_DOC)
def matmul(a, b, precision=None): # pylint: disable=missing-docstring
_check_arraylike("matmul", a, b)
a_is_vec, b_is_vec = (ndim(a) == 1), (ndim(b) == 1)
a = lax.reshape(a, (1,) + shape(a)) if a_is_vec else a
b = lax.reshape(b, shape(b) + (1,)) if b_is_vec else b
a, b = _promote_dtypes(a, b)
batch_shape = lax.broadcast_shapes(shape(a)[:-2], shape(b)[:-2])
a = broadcast_to(a, batch_shape + shape(a)[-2:])
b = broadcast_to(b, batch_shape + shape(b)[-2:])
batch_dims = tuple(range(len(batch_shape)))
dim_numbers = (((ndim(a) - 1,), (ndim(b) - 2,)), (batch_dims, batch_dims))
result = lax.dot_general(a, b, dim_numbers, precision)
if a_is_vec or b_is_vec:
m, n = shape(result)[-2:]
new_m = () if a_is_vec else (m,)
new_n = () if b_is_vec else (n,)
return lax.reshape(result, batch_shape + new_m + new_n)
else:
return result
@_wraps(onp.vdot, lax_description=_PRECISION_DOC)
def vdot(a, b, precision=None):
if issubdtype(_dtype(a), complexfloating):
a = conj(a)
return dot(a.ravel(), b.ravel(), precision=precision)
@_wraps(onp.tensordot, lax_description=_PRECISION_DOC)
def tensordot(a, b, axes=2, precision=None):
_check_arraylike("tensordot", a, b)
a_ndim = ndim(a)
b_ndim = ndim(b)
if a_ndim < 1 or b_ndim < 1:
msg = "tensordot requires a.ndim and b.dim to be at least 1, got {} and {}."
raise TypeError(msg.format(ndim(a), ndim(b)))
a, b = _promote_dtypes(a, b)
if type(axes) is int:
if axes > _min(a_ndim, b_ndim):
msg = "Number of tensordot axes (axes {}) exceeds input ranks ({} and {})"
raise msg.format(axes, a.shape, b.shape)
contracting_dims = tuple(range(a_ndim - axes, a_ndim)), tuple(range(axes))
elif type(axes) in (list, tuple) and len(axes) == 2:
ax1, ax2 = axes
if type(ax1) == type(ax2) == int:
contracting_dims = ((_canonicalize_axis(ax1, a_ndim),),
(_canonicalize_axis(ax2, b_ndim),))
elif type(ax1) in (list, tuple) and type(ax2) in (list, tuple):
if len(ax1) != len(ax2):
msg = "tensordot requires axes lists to have equal length, got {} and {}."
raise TypeError(msg.format(ax1, ax2))
contracting_dims = (tuple(_canonicalize_axis(i, a_ndim) for i in ax1),
tuple(_canonicalize_axis(i, b_ndim) for i in ax2))
else:
msg = ("tensordot axes argument must be an int, a pair of ints, or a pair "
"of lists/tuples of ints.")
raise TypeError(msg)
return lax.dot_general(a, b, (contracting_dims, ((), ())),
precision=precision)
@_wraps(onp.einsum, lax_description=_PRECISION_DOC)
def einsum(*operands, **kwargs):
optimize = kwargs.pop('optimize', 'auto')
optimize = 'greedy' if optimize is True else optimize
precision = kwargs.pop('precision', None)
if kwargs:
msg = 'invalid keyword arguments for einsum: {}'
raise TypeError(msg.format(', '.join(kwargs)))
# using einsum_call=True here is an internal api for opt_einsum
operands, contractions = opt_einsum.contract_path(
*operands, einsum_call=True, use_blas=True, optimize=optimize)
contractions = tuple(data[:3] for data in contractions)
return _einsum(operands, contractions, precision)
@_wraps(onp.einsum_path)
def einsum_path(subscripts, *operands, **kwargs):
optimize = kwargs.pop('optimize', 'greedy')
# using einsum_call=True here is an internal api for opt_einsum
return opt_einsum.contract_path(subscripts, *operands, optimize=optimize)
@partial(jit, static_argnums=(1, 2))
def _einsum(operands, contractions, precision):
operands = list(_promote_dtypes(*operands))
def sum(x, axes):
return lax.reduce(x, onp.array(0, x.dtype),
lax.add if x.dtype != bool_ else lax.bitwise_or, axes)
def sum_uniques(operand, names, uniques):
if uniques:
axes = [names.index(name) for name in uniques]
operand = sum(operand, axes)
names = removechars(names, uniques)
return operand, names
def sum_repeats(operand, names, counts, keep_names):
for name, count in counts.items():
if count > 1:
axes = [i for i, n in enumerate(names) if n == name]
eye = lax._delta(operand.dtype, operand.shape, axes)
if name not in keep_names:
operand = sum(operand * eye, axes)
names = names.replace(name, '')
else:
operand = sum(operand * eye, axes[:-1])
names = names.replace(name, '', count - 1)
return operand, names
for operand_indices, contracted_names, einstr in contractions:
input_str, result_names = einstr.split('->')
input_names = input_str.split(',')
# switch on the number of operands to be processed in this loop iteration.
# every case here sets 'operand' and 'names'.
if len(operand_indices) == 1:
operand = operands.pop(operand_indices[0])
names, = input_names
counts = collections.Counter(names)
# sum out unique contracted indices with a single reduce-sum
uniques = [name for name in contracted_names if counts[name] == 1]
operand, names = sum_uniques(operand, names, uniques)
# for every repeated index, do a contraction against an identity matrix
operand, names = sum_repeats(operand, names, counts, result_names)
elif len(operand_indices) == 2:
lhs, rhs = map(operands.pop, operand_indices)
lhs_counts, rhs_counts = map(collections.Counter, input_names)
lhs_names, rhs_names = input_names
# sum out unique contracted indices in lhs and rhs
lhs_uniques = [name for name in contracted_names
if lhs_counts[name] == 1 and rhs_counts[name] == 0]
lhs, lhs_names = sum_uniques(lhs, lhs_names, lhs_uniques)
rhs_uniques = [name for name in contracted_names
if rhs_counts[name] == 1 and lhs_counts[name] == 0]
rhs, rhs_names = sum_uniques(rhs, rhs_names, rhs_uniques)
# for every repeated index, contract against an identity matrix
lhs, lhs_names = sum_repeats(lhs, lhs_names, lhs_counts,
result_names + rhs_names)
rhs, rhs_names = sum_repeats(rhs, rhs_names, rhs_counts,
result_names + lhs_names)
contracted_names = contracted_names & (set(lhs_names) | set(rhs_names))
batch_names = (set(lhs_names) & set(rhs_names)) - contracted_names
lhs_batch, rhs_batch = unzip2((lhs_names.find(n), rhs_names.find(n))
for n in batch_names)
# NOTE(mattjj): this can fail non-deterministically in python3, maybe
# due to opt_einsum
assert _all(name in lhs_names and name in rhs_names and
lhs.shape[lhs_names.index(name)] == rhs.shape[rhs_names.index(name)]
for name in contracted_names)
# move batch dims to the front (required by lax.dot_general, and easier)
batch_dims = tuple(range(len(batch_names)))
if lhs_batch != rhs_batch or set(lhs_batch) != set(batch_dims):
lhs = moveaxis(lhs, lhs_batch, batch_dims)
lhs_names = _movechars(lhs_names, lhs_batch, batch_dims)
rhs = moveaxis(rhs, rhs_batch, batch_dims)
rhs_names = _movechars(rhs_names, rhs_batch, batch_dims)
batch_names = ''.join(batch_names)
else:
batch_dims = tuple(lhs_batch)
batch_names = ''.join(lhs_names[i] for i in range(len(lhs_names))
if i in batch_dims)
# contract using lax.dot_general
lhs_cont, rhs_cont = unzip2((lhs_names.index(n), rhs_names.index(n))
for n in contracted_names)
bdims = tuple(range(len(batch_dims)))
dimension_numbers = [(lhs_cont, rhs_cont), (bdims, bdims)]
operand = lax.dot_general(lhs, rhs, dimension_numbers, precision)
deleted_names = batch_names + ''.join(contracted_names)
names = (batch_names + removechars(lhs_names, deleted_names)
+ removechars(rhs_names, deleted_names))
else:
raise NotImplementedError # if this is actually reachable, open an issue!
# the resulting 'operand' with axis labels 'names' should be a permutation
# of the desired result
assert len(names) == len(result_names) == len(set(names))
assert set(names) == set(result_names)
if names != result_names:
perm = tuple([names.index(name) for name in result_names])
operand = lax.transpose(operand, perm)
operands.append(operand) # used in next iteration
return operands[0]
def _movechars(s, src, dst):
"""Helper for einsum string munging, like moveaxis on identifier strings."""
chars = [c for i, c in enumerate(s) if i not in src]
for i, j in sorted(zip(dst, src)):
chars.insert(i, s[j])
return ''.join(chars)
@_wraps(onp.inner, lax_description=_PRECISION_DOC)
def inner(a, b, precision=None):
if ndim(a) == 0 or ndim(b) == 0:
return a * b
return tensordot(a, b, (-1, -1), precision=precision)
@_wraps(onp.outer)
def outer(a, b, out=None):
if out:
raise NotImplementedError("The 'out' argument to outer is not supported.")
a, b = _promote_dtypes(a, b)
return ravel(a)[:, None] * ravel(b)
@partial(jit, static_argnums=(2, 3, 4))
def _cross(a, b, axisa, axisb, axisc):
a = moveaxis(a, axisa, -1)
b = moveaxis(b, axisb, -1)
if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3):
raise ValueError("Dimension must be either 2 or 3 for cross product")
if a.shape[-1] == 2 and b.shape[-1] == 2:
return a[..., 0] * b[..., 1] - a[..., 1] * b[..., 0]
a0 = a[..., 0]
a1 = a[..., 1]
a2 = a[..., 2] if a.shape[-1] == 3 else zeros_like(a0)
b0 = b[..., 0]
b1 = b[..., 1]
b2 = b[..., 2] if b.shape[-1] == 3 else zeros_like(b0)
c = array([a1 * b2 - a2 * b1, a2 * b0 - a0 * b2, a0 * b1 - a1 * b0])
return moveaxis(c, 0, axisc)
@_wraps(onp.cross)
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
if axis is not None:
axisa = axis
axisb = axis
axisc = axis
return _cross(a, b, axisa, axisb, axisc)
@_wraps(onp.kron)
def kron(a, b):
a, b = _promote_dtypes(a, b)
if ndim(a) < ndim(b):
a = reshape(a, (1,) * (ndim(b) - ndim(a)) + shape(a))
elif ndim(b) < ndim(a):
b = reshape(b, (1,) * (ndim(a) - ndim(b)) + shape(b))
a_reshaped = reshape(a, [i for d in shape(a) for i in (d, 1)])
b_reshaped = reshape(b, [i for d in shape(b) for i in (1, d)])
out_shape = tuple(onp.multiply(shape(a), shape(b)))
return reshape(lax.mul(a_reshaped, b_reshaped), out_shape)
@_wraps(onp.vander)
def vander(x, N=None, increasing=False):
x = asarray(x)
dtype = _dtype(x)
if ndim(x) != 1:
raise ValueError("x must be a one-dimensional array")
x_shape = shape(x)
N = N or x_shape[0]
if N < 0:
raise ValueError("N must be nonnegative")
iota = lax.iota(dtype, N)
if not increasing:
iota = lax.sub(lax._const(iota, N - 1), iota)
return power(x[..., None], iota)
### Misc
@_wraps(onp.argmax)
def argmax(a, axis=None):
if axis is None:
a = ravel(a)
axis = 0
return _argminmax(max, a, axis)
@_wraps(onp.argmin)
def argmin(a, axis=None):
if axis is None:
a = ravel(a)
axis = 0
return _argminmax(min, a, axis)
# TODO(mattjj): redo this lowering with a call to variadic lax.reduce
def _argminmax(op, a, axis):
shape = [1] * a.ndim
shape[axis] = a.shape[axis]
idxs = lax.tie_in(a, arange(a.shape[axis])).reshape(shape)
maxval = iinfo(dtypes.canonicalize_dtype(idxs.dtype)).max
maxval = lax.tie_in(a, maxval)
mask_idxs = where(lax._eq_meet(a, op(a, axis, keepdims=True)), idxs, maxval)
return min(mask_idxs, axis)
@_wraps(onp.sort)
def sort(a, axis=-1, kind='quicksort', order=None):
if kind != 'quicksort':
warnings.warn("'kind' argument to sort is ignored.")
if order is not None:
raise ValueError("'order' argument to sort is not supported.")
if axis is None:
return lax.sort(a.ravel(), 0)
else:
return lax.sort(a, _canonicalize_axis(axis, ndim(a)))
@_wraps(onp.argsort)
def argsort(a, axis=-1, kind='quicksort', order=None):
if kind != 'quicksort':
warnings.warn("'kind' argument to argsort is ignored.")
if order is not None:
raise ValueError("'order' argument to argsort is not supported.")
if axis is None:
return argsort(a.ravel(), 0)
else:
axis = _canonicalize_axis(axis, ndim(a))
iota = lax.broadcasted_iota(onp.int64, shape(a), axis)
_, perm = lax.sort_key_val(a, iota, dimension=axis)
return perm
@_wraps(onp.roll)
def roll(a, shift, axis=None):
a = asarray(a)
a_shape = shape(a)
if axis is None:
return lax.reshape(roll(ravel(a), shift, axis=0), a_shape)
a_ndim = len(a_shape)
shift = asarray(shift)
axis = onp.asarray(axis)
b_shape = lax.broadcast_shapes(shift.shape, axis.shape, (1,))
if len(b_shape) != 1:
msg = "'shift' and 'axis' arguments to roll must be scalars or 1D arrays"
raise ValueError(msg)
if b_shape[0] > a_ndim:
raise ValueError("More shifts/axes than dimensions of input to roll.")
for x, i in zip(broadcast_to(shift, b_shape),
onp.broadcast_to(axis, b_shape)):
i = _canonicalize_axis(i, a_ndim)
x = remainder(x, (a_shape[i] or 1))
a = lax.concatenate((a, a), i)
a = lax.dynamic_slice_in_dim(a, a_shape[i] - x, a_shape[i], axis=i)
return a
@_wraps(onp.take)
def take(a, indices, axis=None, out=None, mode=None):
if out:
raise NotImplementedError("The 'out' argument to np.take is not supported.")
a = asarray(a)
indices = asarray(indices)
if axis is None:
a = ravel(a)
axis = 0
axis = _canonicalize_axis(axis, ndim(a))
if mode == "raise":
# TODO(phawkins): we have no way to report out of bounds errors yet.
raise NotImplementedError("The 'raise' mode to np.take is not supported.")
elif mode == "wrap":
indices = mod(indices, _constant_like(indices, a.shape[axis]))
elif mode != "clip" and mode is not None:
raise ValueError("Invalid mode '{}' for np.take".format(mode))
index_dims = len(shape(indices))
slice_sizes = list(shape(a))
slice_sizes[axis] = 1
dnums = lax.GatherDimensionNumbers(
offset_dims=tuple(
list(range(axis)) +
list(range(axis + index_dims, len(a.shape) + index_dims - 1))),
collapsed_slice_dims=(axis,),
start_index_map=(axis,))
return lax.gather(a, indices[..., None], dimension_numbers=dnums,
slice_sizes=tuple(slice_sizes))
def _normalize_index(index, axis_size):
"""Normalizes an index value in the range [-N, N) to the range [0, N)."""
return lax.select(
lax.lt(index, _constant_like(index, 0)),
lax.add(index, _constant_like(index, axis_size)),
index)
@partial(jit, static_argnums=(2,))
def _take_along_axis(arr, indices, axis):
if axis is None:
if ndim(indices) != 1:
msg = "take_along_axis indices must be 1D if axis=None, got shape {}"
raise ValueError(msg.format(indices.shape))
return take_along_axis(arr.ravel(), indices, 0)
rank = ndim(arr)
if rank != ndim(indices):
msg = "indices and arr must have the same number of dimensions; {} vs. {}"
raise ValueError(msg.format(ndim(indices), ndim(arr)))
axis = _canonicalize_axis(axis, rank)
def replace(tup, val):
lst = list(tup)
lst[axis] = val
return tuple(lst)
bcast_shape = lax.broadcast_shapes(replace(arr.shape, 1), replace(indices.shape, 1))
indices = broadcast_to(indices, replace(bcast_shape, indices.shape[axis]))
arr = broadcast_to(arr, replace(bcast_shape, arr.shape[axis]))
axis_size = arr.shape[axis]
arr_shape = replace(arr.shape, 1)
idx_shape = indices.shape
out_shape = lax.broadcast_shapes(idx_shape, arr_shape)
index_dims = [i for i, idx in enumerate(idx_shape) if i == axis or idx != 1]
gather_index_shape = tuple(onp.array(out_shape)[index_dims]) + (1,)
gather_indices = []
slice_sizes = []
offset_dims = []
start_index_map = []
collapsed_slice_dims = []
j = 0
for i in range(rank):
if i == axis:
indices = _normalize_index(indices, axis_size)
gather_indices.append(lax.reshape(indices, gather_index_shape))
slice_sizes.append(1)
start_index_map.append(i)
collapsed_slice_dims.append(i)
j += 1
elif idx_shape[i] != 1:
iota = lax.iota(_dtype(indices), out_shape[i])
iota = lax.tie_in(arr, iota)
iota = lax.broadcast_in_dim(iota, gather_index_shape, (j,))
gather_indices.append(iota)
slice_sizes.append(1)
start_index_map.append(i)
collapsed_slice_dims.append(i)
j += 1
else:
# If idx_shape[i] == 1, we can just take the entirety of the arr's axis
# and avoid forming an iota index.
offset_dims.append(i)
slice_sizes.append(arr_shape[i])
gather_indices = lax.concatenate(gather_indices, dimension=j)
dnums = lax.GatherDimensionNumbers(
offset_dims=tuple(offset_dims),
collapsed_slice_dims=tuple(collapsed_slice_dims),
start_index_map=tuple(start_index_map))
return lax.gather(arr, gather_indices, dnums, tuple(slice_sizes))
@_wraps(getattr(onp, "take_along_axis", None), update_doc=False)
def take_along_axis(arr, indices, axis):
return _take_along_axis(arr, indices, axis)
### Indexing
def _rewriting_take(arr, idx):
# Computes arr[idx].
# All supported cases of indexing can be implemented as an XLA gather,
# followed by an optional reverse and a reshape.
arr = asarray(arr)
treedef, static_idx, dynamic_idx = _split_index_for_jit(idx)
return _gather(arr, treedef, static_idx, dynamic_idx)
# TODO(phawkins): re-enable jit after fixing excessive recompilation for
# slice indexes (e.g., slice(0, 5, None), slice(10, 15, None), etc.).
# @partial(jit, static_argnums=(1, 2))
def _gather(arr, treedef, static_idx, dynamic_idx):
idx = _merge_static_and_dynamic_indices(treedef, static_idx, dynamic_idx)
indexer = _index_to_gather(shape(arr), idx) # shared with _scatter_update
y = arr
# Avoid calling gather if the slice shape is empty, both as a fast path and to
# handle cases like zeros(0)[array([], int32)].
if _prod(indexer.slice_shape) == 0:
return zeros(indexer.slice_shape, dtype=y.dtype)
# We avoid generating a gather when indexer.gather_indices.size is empty.
if indexer.gather_indices.size:
y = lax.gather(y, indexer.gather_indices, indexer.dnums,
indexer.gather_slice_shape)
# Reverses axes with negative strides.
if indexer.reversed_y_dims:
y = lax.rev(y, indexer.reversed_y_dims)
# This adds np.newaxis/None dimensions.
return lax.reshape(y, indexer.slice_shape)
_Indexer = collections.namedtuple("_Indexer", [
# The expected shape of the slice output.
"slice_shape",
# The slice shape to pass to lax.gather().
"gather_slice_shape",
# The gather indices to use.
"gather_indices",
# A GatherDimensionNumbers object describing the gather to perform.
"dnums",
# Slice dimensions that have negative strides, and so must be reversed after
# the gather.
"reversed_y_dims",
# For scatters, we must eliminate any axes created by `newaxis`, which
# are the following dimensions, which must be of size 1. For gathers, we
# simply reshape to `slice_shape` to introduce the new axes.
"newaxis_dims",
])
def _split_index_for_jit(idx):
"""Splits indices into necessarily-static and dynamic parts.
Used to pass indices into `jit`-ted function.
"""
# Convert list indices to tuples in cases (deprecated by NumPy.)
idx = _eliminate_deprecated_list_indexing(idx)
# Expand any (concrete) boolean indices. We can then use advanced integer
# indexing logic to handle them.
idx = _expand_bool_indices(idx)
leaves, treedef = pytree.flatten(idx)
dynamic = [None] * len(leaves)
static = [None] * len(leaves)
for i, x in enumerate(leaves):
if x is Ellipsis:
static[i] = x
elif isinstance(x, slice):
# slice objects aren't hashable.
static[i] = (x.start, x.stop, x.step)
else:
dynamic[i] = x
return treedef, tuple(static), dynamic
def _merge_static_and_dynamic_indices(treedef, static_idx, dynamic_idx):
"""Recombines indices that were split by _split_index_for_jit."""
idx = []
for s, d in zip(static_idx, dynamic_idx):
if d is not None:
idx.append(d)
elif isinstance(s, tuple):
idx.append(slice(s[0], s[1], s[2]))
else:
idx.append(s)
return treedef.unflatten(idx)
def _int(aval):
return not aval.shape and issubdtype(aval.dtype, integer)
def _index_to_gather(x_shape, idx):
# Remove ellipses and add trailing slice(None)s.
idx = _canonicalize_tuple_index(len(x_shape), idx)
# Check for advanced indexing:
# https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing
# Do the advanced indexing axes appear contiguously? If not, NumPy semantics
# move the advanced axes to the front.
advanced_axes_are_contiguous = False
advanced_indexes = None
# The positions of the advanced indexing axes in `idx`.
idx_advanced_axes = []
# The positions of the advanced indexes in x's shape.
# collapsed, after None axes have been removed. See below.
x_advanced_axes = None
if _is_advanced_int_indexer(idx):
idx_no_nones = [(i, d) for i, d in enumerate(idx) if d is not None]
advanced_pairs = (
(asarray(e), i, j) for j, (i, e) in enumerate(idx_no_nones)
if (isinstance(e, Sequence) or isinstance(e, ndarray)))
advanced_pairs = ((_normalize_index(e, x_shape[j]), i, j)
for e, i, j in advanced_pairs)
advanced_indexes, idx_advanced_axes, x_advanced_axes = zip(*advanced_pairs)
advanced_axes_are_contiguous = onp.all(onp.diff(idx_advanced_axes) == 1)
x_axis = 0 # Current axis in x.
y_axis = 0 # Current axis in y, before collapsing. See below.
collapsed_y_axis = 0 # Current axis in y, after collapsing.
# Scatter dimension numbers.
offset_dims = []
collapsed_slice_dims = []
start_index_map = []
gather_indices = onp.zeros((0,), dtype=int32) # use onp to save a compilation
# We perform three transformations to y before the scatter op, in order:
# First, y is broadcast to slice_shape. In general `y` only need broadcast to
# the right shape.
slice_shape = []
# Next, y is squeezed to remove newaxis_dims. This removes np.newaxis/`None`
# indices, which the scatter cannot remove itself.
newaxis_dims = []
# Finally, we reverse reversed_y_dims to handle slices with negative strides.
reversed_y_dims = []
gather_slice_shape = []
for idx_pos, i in enumerate(idx):
# Handle the advanced indices here if:
# * the advanced indices were not contiguous and we are the start.
# * we are at the position of the first advanced index.
if (advanced_indexes is not None and
(advanced_axes_are_contiguous and idx_pos == idx_advanced_axes[0] or
not advanced_axes_are_contiguous and idx_pos == 0)):
advanced_indexes = broadcast_arrays(*advanced_indexes)
shape = advanced_indexes[0].shape
ndim = len(shape)
advanced_indexes = [
lax.convert_element_type(lax.reshape(a, shape + (1,)), int32)
for a in advanced_indexes]
# Broadcast gather_indices from [..., k] to [..., 1, 1, ..., 1, k].
gather_indices = lax.broadcast_in_dim(
gather_indices, onp.insert(gather_indices.shape, -1, shape),
tuple(range(gather_indices.ndim - 1)) + (gather_indices.ndim + ndim - 1,))
gather_indices = concatenate([gather_indices] + advanced_indexes, -1)
start_index_map.extend(x_advanced_axes)
collapsed_slice_dims.extend(x_advanced_axes)
slice_shape.extend(shape)
y_axis += ndim
collapsed_y_axis += ndim
# Per-index bookkeeping for advanced indexes.
if idx_pos in idx_advanced_axes:
x_axis += 1
gather_slice_shape.append(1)
continue
try:
abstract_i = core.get_aval(i)
except TypeError:
abstract_i = None
# Handle basic int indexes.
if (isinstance(abstract_i, ConcreteArray) or
isinstance(abstract_i, ShapedArray)) and _int(abstract_i):
i = _normalize_index(i, x_shape[x_axis])
i = lax.convert_element_type(i, int32)
i = broadcast_to(i, tuple(gather_indices.shape[:-1]) + (1,))
gather_indices = concatenate((gather_indices, i), -1)
collapsed_slice_dims.append(x_axis)
gather_slice_shape.append(1)
start_index_map.append(x_axis)
x_axis += 1
# Handle np.newaxis (None)
elif i is None:
slice_shape.append(1)
newaxis_dims.append(y_axis)
y_axis += 1
# Handle slice(None)
elif _is_slice_none(i):
slice_shape.append(x_shape[x_axis])
gather_slice_shape.append(x_shape[x_axis])
offset_dims.append(collapsed_y_axis)
collapsed_y_axis += 1
y_axis += 1
x_axis += 1
# Handle slice index (only static, otherwise an error is raised)
elif isinstance(i, slice):
if not _all(elt is None or type(core.get_aval(elt)) is ConcreteArray
for elt in (i.start, i.stop, i.step)):
msg = ("Array slice indices must have static start/stop/step to be used "
"with Numpy indexing syntax. Try lax.dynamic_slice/"
"dynamic_update_slice instead.")
raise IndexError(msg)
start, limit, stride, needs_rev = _static_idx(i, x_shape[x_axis])
if needs_rev:
reversed_y_dims.append(collapsed_y_axis)
if stride == 1:
i = lax.convert_element_type(start, int32)
i = broadcast_to(i, tuple(gather_indices.shape[:-1]) + (1,))
gather_indices = concatenate((gather_indices, i), -1)
slice_shape.append(limit - start)
gather_slice_shape.append(limit - start)
offset_dims.append(collapsed_y_axis)
start_index_map.append(x_axis)
else:
i = arange(start, limit, stride, dtype=int32)
size = i.shape[0]
slice_shape.append(size)
gather_slice_shape.append(1)
gather_indices_shape = tuple(gather_indices.shape[:-1]) + (size,)
i = lax.broadcast_in_dim(
i, shape=gather_indices_shape + (1,),
broadcast_dimensions=(len(gather_indices_shape) - 1,))
gather_indices = lax.broadcast_in_dim(
gather_indices,
shape=gather_indices_shape + (len(start_index_map),),
broadcast_dimensions=(
tuple(range(len(gather_indices_shape) - 1)) +
(len(gather_indices_shape),)))
gather_indices = concatenate(
(gather_indices, i), len(gather_indices_shape))
start_index_map.append(x_axis)
collapsed_slice_dims.append(x_axis)
collapsed_y_axis += 1
y_axis += 1
x_axis += 1
else:
if abstract_i and not (issubdtype(abstract_i.dtype, integer) or
issubdtype(abstract_i.dtype, bool_)):
msg = ("Indexer must have integer or boolean type, got indexer "
"with type {} at position {}, indexer value {}")
raise TypeError(msg.format(abstract_i.dtype.name, idx_pos, i))
msg = "Indexing mode not yet supported. Open a feature request!\n{}"
raise IndexError(msg.format(idx))
dnums = lax.GatherDimensionNumbers(
offset_dims = tuple(offset_dims),
collapsed_slice_dims = tuple(sorted(collapsed_slice_dims)),
start_index_map = tuple(start_index_map)
)
return _Indexer(
slice_shape=slice_shape,
newaxis_dims=tuple(newaxis_dims),
gather_slice_shape=gather_slice_shape,
reversed_y_dims=reversed_y_dims,
dnums=dnums,
gather_indices=gather_indices)
def _should_unpack_list_index(x):
"""Helper for _eliminate_deprecated_list_indexing."""
return (isinstance(x, ndarray) and onp.ndim(x) != 0
or isinstance(x, Sequence)
or isinstance(x, slice) or x is Ellipsis or x is None)
def _eliminate_deprecated_list_indexing(idx):
# "Basic slicing is initiated if the selection object is a non-array,
# non-tuple sequence containing slice objects, [Ellipses, or newaxis
# objects]". Detects this case and canonicalizes to a tuple. This case is
# deprecated by NumPy and exists for backward compatibility.
if not isinstance(idx, tuple):
if isinstance(idx, Sequence) and not isinstance(idx, ndarray):
if _any(_should_unpack_list_index(i) for i in idx):
idx = tuple(idx)
else:
idx = (idx,)
else:
idx = (idx,)
return idx
def _expand_bool_indices(idx):
"""Converts concrete bool indexes into advanced integer indexes."""
out = []
for i in idx:
try:
abstract_i = core.get_aval(i)
except TypeError:
abstract_i = None
if (isinstance(abstract_i, ShapedArray) and issubdtype(abstract_i.dtype, bool_)
or isinstance(i, list) and _all(not _shape(e) and issubdtype(_dtype(e), bool_)
for e in i)):
if isinstance(i, list):
i = array(i)
abstract_i = core.get_aval(i)
if not type(abstract_i) is ConcreteArray:
msg = ("Array boolean indices must be static (e.g. no dependence on an "
"argument to a jit or vmap function).")
raise IndexError(msg)
else:
out.extend(onp.where(i))
else:
out.append(i)
return tuple(out)
def _is_slice_none(idx):
"""Return True if idx is equal to slice(None), False otherwise."""
if isinstance(idx, slice):
return idx.start is None and idx.stop is None and idx.step is None
# TODO(mattjj): clean up this logic
def _is_advanced_int_indexer(idx):
"""Returns True if idx should trigger int array indexing, False otherwise."""
# https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing
assert isinstance(idx, tuple)
if _all(onp.ndim(elt) == 0 for elt in idx):
return False
return _all(e is None or e is Ellipsis or isinstance(e, slice)
or _is_int_arraylike(e) for e in idx)
def _is_int_arraylike(x):
"""Returns True if x is array-like with integer dtype, False otherwise."""
return (isinstance(x, int) and not isinstance(x, bool)
or issubdtype(getattr(x, "dtype", None), onp.integer)
or isinstance(x, (list, tuple)) and _all(_is_int_arraylike(e) for e in x))
def _canonicalize_tuple_index(arr_ndim, idx):
"""Helper to remove Ellipsis and add in the implicit trailing slice(None)."""
len_without_none = _sum(1 for e in idx if e is not None and e is not Ellipsis)
if len_without_none > arr_ndim:
msg = "Too many indices for array: {} non-None/Ellipsis indices for dim {}."
raise IndexError(msg.format(len_without_none, arr_ndim))
ellipses = (i for i, elt in enumerate(idx) if elt is Ellipsis)
ellipsis_index = next(ellipses, None)
if ellipsis_index is not None:
if next(ellipses, None) is not None:
msg = "Multiple ellipses (...) not supported: {}."
raise IndexError(msg.format(list(map(type, idx))))
colons = (slice(None),) * (arr_ndim - len_without_none)
idx = idx[:ellipsis_index] + colons + idx[ellipsis_index + 1:]
elif len_without_none < arr_ndim:
colons = (slice(None),) * (arr_ndim - len_without_none)
idx = tuple(idx) + colons
return idx
def _static_idx(idx, size):
"""Helper function to compute the static slice start/limit/stride values."""
assert isinstance(idx, slice)
start, stop, step = idx.indices(size)
if (step < 0 and stop >= start) or (step > 0 and start >= stop):
return 0, 0, 1, False # sliced to size zero
if step > 0:
return start, stop, step, False
else:
k = (start - stop - 1) % (-step)
return stop + k + 1, start + 1, -step, True
blackman = _wrap_numpy_nullary_function(onp.blackman)
bartlett = _wrap_numpy_nullary_function(onp.bartlett)
hamming = _wrap_numpy_nullary_function(onp.hamming)
hanning = _wrap_numpy_nullary_function(onp.hanning)
# TODO: lower `kaiser` via lax to allow non-constant beta values.
kaiser = _wrap_numpy_nullary_function(onp.kaiser)
def _gcd_cond_fn(xs):
x1, x2 = xs
return any(x2 != 0)
def _gcd_body_fn(xs):
x1, x2 = xs
x1, x2 = (where(x2 != 0, x2, x1),
where(x2 != 0, lax.rem(x1, x2), lax._const(x2, 0)))
return (where(x1 < x2, x2, x1), where(x1 < x2, x1, x2))
@_wraps(getattr(onp, "gcd", None))
def gcd(x1, x2):
if (not issubdtype(_dtype(x1), integer) or
not issubdtype(_dtype(x2), integer)):
raise ValueError("Arguments to gcd must be integers.")
x1, x2 = _promote_dtypes(lax.abs(x1), lax.abs(x2))
x1, x2 = broadcast_arrays(x1, x2)
gcd, _ = lax.while_loop(_gcd_cond_fn, _gcd_body_fn, (x1, x2))
return gcd
@_wraps(getattr(onp, "lcm", None))
def lcm(x1, x2):
x1, x2 = _promote_dtypes(x1, x2)
d = gcd(x1, x2)
return where(d == 0, lax._const(d, 0),
lax.div(lax.abs(multiply(x1, x2)), d))
@_wraps(onp.cov)
def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
aweights=None):
msg = ("jax.numpy.cov not implemented for nontrivial {}. "
"Open a feature request at https://github.com/google/jax/issues !")
if y is not None: raise NotImplementedError(msg.format('y'))
# These next two are actually implemented, just not tested.
if fweights is not None: raise NotImplementedError(msg.format('fweights'))
if aweights is not None: raise NotImplementedError(msg.format('aweights'))
if m.ndim > 2:
raise ValueError("m has more than 2 dimensions") # same as numpy error
X = array(m, ndmin=2, dtype=dtypes.canonicalize_dtype(result_type(m, float_)))
if not rowvar and X.shape[0] != 1:
X = X.T
if X.shape[0] == 0:
return onp.array([]).reshape(0, 0)
if ddof is None:
ddof = 1 if bias == 0 else 0
w = None
if fweights is not None:
if onp.ndim(fweights) > 1:
raise RuntimeError("cannot handle multidimensional fweights")
if onp.shape(fweights)[0] != X.shape[1]:
raise RuntimeError("incompatible numbers of samples and fweights")
w = asarray(fweights)
if aweights is not None:
if onp.ndim(aweights) > 1:
raise RuntimeError("cannot handle multidimensional aweights")
if onp.shape(aweights)[0] != X.shape[1]:
raise RuntimeError("incompatible numbers of samples and aweights")
w = aweights if w is None else w * aweights
avg, w_sum = average(X, axis=1, weights=w, returned=True)
w_sum = w_sum[0]
if w is None:
f = X.shape[1] - ddof
elif ddof == 0:
f = w_sum
elif aweights is None:
f = w_sum - ddof
else:
f = w_sum - ddof * sum(w * aweights) / w_sum
X = X - avg[:, None]
X_T = X.T if w is None else (X * w).T
return true_divide(dot(X, X_T.conj()), f).squeeze()
@_wraps(onp.corrcoef)
def corrcoef(x, y=None, rowvar=True, bias=None, ddof=None):
c = cov(x, y, rowvar)
if len(shape(c)) == 0:
# scalar - this should yield nan for values (nan/nan, inf/inf, 0/0), 1 otherwise
return divide(c, c)
d = diag(c)
stddev = sqrt(real(d))
c = divide(c, stddev[:,None])
c = divide(c, stddev[None,:])
real_part = clip(real(c), -1, 1)
if iscomplexobj(c):
complex_part = clip(imag(c), -1, 1)
c = lax.complex(real_part, complex_part)
else:
c = real_part
return c
@_wraps(getattr(onp, "quantile", None))
def quantile(a, q, axis=None, out=None, overwrite_input=False,
interpolation="linear", keepdims=False):
if overwrite_input or out is not None:
msg = ("jax.numpy.quantile does not support overwrite_input=True or "
"out != None")
raise ValueError(msg)
if interpolation != "linear":
raise NotImplementedError("Only interpolation='linear' is implemented")
return _quantile(a, q, axis, keepdims)
@partial(jit, static_argnums=(2, 3))
def _quantile(a, q, axis, keepdims):
a = asarray(a)
if axis is None:
a = ravel(a)
axis = 0
elif isinstance(axis, tuple):
raise NotImplementedError("Tuple values for axis are not implemented")
else:
axis = _canonicalize_axis(axis, ndim(a))
q_ndim = ndim(q)
if q_ndim > 1:
raise ValueError("q must be have rank <= 1, got shape {}".format(shape(q)))
q = asarray(q)
if not issubdtype(a.dtype, floating) or not issubdtype(q.dtype, floating):
msg = "q and a arguments to quantile must be of float type, got {} and {}"
raise TypeError(msg.format(a.dtype, q.dtype))
# Promote q to at least float32 for precise interpolation.
q = lax.convert_element_type(q, promote_types(q.dtype, float32))
a_shape = shape(a)
a = lax.sort(a, dimension=axis)
n = a_shape[axis]
q = lax.mul(q, _constant_like(q, n - 1))
low = lax.floor(q)
high = lax.add(low, _constant_like(low, 1))
high_weight = lax.sub(q, low)
low_weight = lax.sub(_constant_like(high_weight, 1), high_weight)
low = lax.clamp(_constant_like(low, 0), low, _constant_like(low, n - 1))
high = lax.clamp(_constant_like(high, 0), high, _constant_like(high, n - 1))
low = lax.convert_element_type(low, int64)
high = lax.convert_element_type(high, int64)
slice_sizes = list(a_shape)
slice_sizes[axis] = 1
dnums = lax.GatherDimensionNumbers(
offset_dims=tuple(range(
q_ndim,
len(a_shape) + q_ndim if keepdims else len(a_shape) + q_ndim - 1)),
collapsed_slice_dims=() if keepdims else (axis,),
start_index_map=(axis,))
low = low[..., None]
high = high[..., None]
low_value = lax.gather(a, low, dimension_numbers=dnums,
slice_sizes=slice_sizes)
high_value = lax.gather(a, high, dimension_numbers=dnums,
slice_sizes=slice_sizes)
if q_ndim == 1:
low_weight = lax.broadcast_in_dim(low_weight, low_value.shape,
broadcast_dimensions=(0,))
high_weight = lax.broadcast_in_dim(high_weight, high_value.shape,
broadcast_dimensions=(0,))
return lax.convert_element_type(
lax.add(lax.mul(low_value.astype(q.dtype), low_weight),
lax.mul(high_value.astype(q.dtype), high_weight)), a.dtype)
@_wraps(onp.percentile)
def percentile(a, q, axis=None, out=None, overwrite_input=False,
interpolation="linear", keepdims=False):
q = true_divide(asarray(q), float32(100.0))
return quantile(a, q, axis=axis, out=out, overwrite_input=overwrite_input,
interpolation=interpolation, keepdims=keepdims)
@_wraps(onp.median)
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
q = 0.5
return quantile(a, q, axis=axis, out=out, overwrite_input=overwrite_input,
keepdims=keepdims)
def _astype(arr, dtype):
lax._check_user_dtype_supported(dtype, "astype")
return lax.convert_element_type(arr, dtype)
### track unimplemented functions
def _not_implemented(fun):
@_wraps(fun)
def wrapped(*args, **kwargs):
msg = "Numpy function {} not yet implemented"
raise NotImplementedError(msg.format(fun))
return wrapped
# Build a set of all unimplemented NumPy functions.
for func in get_module_functions(onp):
if func.__name__ not in globals():
globals()[func.__name__] = _not_implemented(func)
### add method and operator overloads to arraylike classes
# We add operator overloads to DeviceArray and ShapedArray. These method and
# operator overloads mainly just forward calls to the corresponding lax_numpy
# functions, which can themselves handle instances from any of these classes.
def _swap_args(f):
return lambda x, y: f(y, x)
def _unimplemented_setitem(self, i, x):
msg = ("'{}' object does not support item assignment. JAX arrays are "
"immutable; perhaps you want jax.ops.index_update or "
"jax.ops.index_add instead?")
raise TypeError(msg.format(type(self)))
def _operator_round(number, ndigits=None):
out = round(number, decimals=ndigits or 0)
# If `ndigits` is None, for a builtin float round(7.5) returns an integer.
return out.astype(int_) if ndigits is None else out
_operators = {
"getitem": _rewriting_take,
"setitem": _unimplemented_setitem,
"neg": negative,
"pos": positive,
"eq": equal,
"ne": not_equal,
"lt": less,
"le": less_equal,
"gt": greater,
"ge": greater_equal,
"abs": abs,
"add": add,
"radd": add,
"sub": subtract,
"rsub": _swap_args(subtract),
"mul": multiply,
"rmul": multiply,
"div": divide,
"rdiv": _swap_args(divide),
"truediv": true_divide,
"rtruediv": _swap_args(true_divide),
"floordiv": floor_divide,
"rfloordiv": _swap_args(floor_divide),
"divmod": divmod,
"rdivmod": _swap_args(divmod),
"mod": mod,
"rmod": _swap_args(mod),
"pow": power,
"rpow": _swap_args(power),
"matmul": matmul,
"rmatmul": _swap_args(matmul),
"and": bitwise_and,
"rand": bitwise_and,
"or": bitwise_or,
"ror": bitwise_or,
"xor": bitwise_xor,
"rxor": bitwise_xor,
"invert": bitwise_not,
"lshift": left_shift,
"rshift": right_shift,
"round": _operator_round,
}
# These numpy.ndarray methods are just refs to an equivalent numpy function
_nondiff_methods = ["all", "any", "argmax", "argmin", "argpartition", "argsort",
"nonzero", "searchsorted", "round"]
_diff_methods = ["clip", "compress", "conj", "conjugate", "cumprod", "cumsum",
"diagonal", "dot", "max", "mean", "min", "prod", "ptp",
"ravel", "repeat", "sort", "squeeze", "std", "sum",
"swapaxes", "take", "tile", "trace", "transpose", "var"]
# Set up operator, method, and property forwarding on Tracer instances containing
# ShapedArray avals by following the forwarding conventions for Tracer.
# Forward operators using a single-underscore-prefix naming convention:
for operator_name, function in _operators.items():
setattr(ShapedArray, "_{}".format(operator_name), staticmethod(function))
# Forward methods and properties using core.aval_method and core.aval_property:
for method_name in _nondiff_methods + _diff_methods:
setattr(ShapedArray, method_name, core.aval_method(globals()[method_name]))
setattr(ShapedArray, "reshape", core.aval_method(_reshape_method))
setattr(ShapedArray, "flatten", core.aval_method(ravel))
setattr(ShapedArray, "T", core.aval_property(transpose))
setattr(ShapedArray, "real", core.aval_property(real))
setattr(ShapedArray, "imag", core.aval_property(imag))
setattr(ShapedArray, "astype", core.aval_method(_astype))
# Forward operators, methods, and properties on DeviceArray to lax_numpy
# functions (with no Tracers involved; this forwarding is direct)
for operator_name, function in _operators.items():
setattr(DeviceArray, "__{}__".format(operator_name), function)
for method_name in _nondiff_methods + _diff_methods:
setattr(DeviceArray, method_name, globals()[method_name])
setattr(DeviceArray, "reshape", _reshape_method)
setattr(DeviceArray, "flatten", ravel)
setattr(DeviceArray, "T", property(transpose))
setattr(DeviceArray, "real", property(real))
setattr(DeviceArray, "imag", property(imag))
setattr(DeviceArray, "astype", _astype)
# Extra methods that are handy
setattr(ShapedArray, "broadcast", core.aval_method(lax.broadcast))
setattr(ShapedArray, "broadcast_in_dim", core.aval_method(lax.broadcast_in_dim))
setattr(ShapedArray, "split", core.aval_method(split))
setattr(DeviceArray, "broadcast", lax.broadcast)
setattr(DeviceArray, "broadcast_in_dim", lax.broadcast_in_dim)
setattr(DeviceArray, "split", split)
@jit
def _unstack(x):
if x.ndim == 0:
raise ValueError("Argument to _unstack must be non-scalar")
return [lax.index_in_dim(x, i, keepdims=False) for i in range(x.shape[0])]
setattr(DeviceArray, "_unstack", _unstack)
| []
| []
| [
"JAX_NUMPY_RANK_PROMOTION"
]
| [] | ["JAX_NUMPY_RANK_PROMOTION"] | python | 1 | 0 | |
internal/buf/cmd/buf/command/generate/generate_test.go | // Copyright 2020-2021 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package generate
import (
"bytes"
"context"
"encoding/json"
"fmt"
"os"
"path/filepath"
"testing"
"github.com/bufbuild/buf/internal/buf/bufcli"
"github.com/bufbuild/buf/internal/buf/bufgen"
"github.com/bufbuild/buf/internal/buf/internal/buftesting"
"github.com/bufbuild/buf/internal/pkg/app/appcmd"
"github.com/bufbuild/buf/internal/pkg/app/appcmd/appcmdtesting"
"github.com/bufbuild/buf/internal/pkg/app/appflag"
"github.com/bufbuild/buf/internal/pkg/storage"
"github.com/bufbuild/buf/internal/pkg/storage/storagearchive"
"github.com/bufbuild/buf/internal/pkg/storage/storagemem"
"github.com/bufbuild/buf/internal/pkg/storage/storageos"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var buftestingDirPath = filepath.Join(
"..",
"..",
"..",
"..",
"internal",
"buftesting",
)
func TestCompareGeneratedStubsGoogleapisGo(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
t.Parallel()
googleapisDirPath := buftesting.GetGoogleapisDirPath(t, buftestingDirPath)
testCompareGeneratedStubs(t,
googleapisDirPath,
[]testPluginInfo{
{name: "go", opt: "Mgoogle/api/auth.proto=foo"},
},
)
}
func TestCompareGeneratedStubsGoogleapisGoZip(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
t.Parallel()
googleapisDirPath := buftesting.GetGoogleapisDirPath(t, buftestingDirPath)
testCompareGeneratedStubsArchive(t,
googleapisDirPath,
[]testPluginInfo{
{name: "go", opt: "Mgoogle/api/auth.proto=foo"},
},
false,
)
}
func TestCompareGeneratedStubsGoogleapisGoJar(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
t.Parallel()
googleapisDirPath := buftesting.GetGoogleapisDirPath(t, buftestingDirPath)
testCompareGeneratedStubsArchive(t,
googleapisDirPath,
[]testPluginInfo{
{name: "go", opt: "Mgoogle/api/auth.proto=foo"},
},
true,
)
}
func TestCompareGeneratedStubsGoogleapisRuby(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
t.Parallel()
googleapisDirPath := buftesting.GetGoogleapisDirPath(t, buftestingDirPath)
testCompareGeneratedStubs(t,
googleapisDirPath,
[]testPluginInfo{{name: "ruby"}},
)
}
func TestCompareInsertionPointOutput(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode")
}
t.Parallel()
insertionTestdataDirPath := filepath.Join("testdata", "insertion")
testCompareGeneratedStubs(t,
insertionTestdataDirPath,
[]testPluginInfo{
{name: "insertion-point-receiver"},
{name: "insertion-point-writer"},
},
)
}
func testCompareGeneratedStubs(
t *testing.T,
dirPath string,
plugins []testPluginInfo,
) {
filePaths := buftesting.GetProtocFilePaths(t, dirPath, 100)
actualProtocDir := t.TempDir()
bufGenDir := t.TempDir()
var actualProtocPluginFlags []string
for _, plugin := range plugins {
actualProtocPluginFlags = append(actualProtocPluginFlags, fmt.Sprintf("--%s_out=%s", plugin.name, actualProtocDir))
if plugin.opt != "" {
actualProtocPluginFlags = append(actualProtocPluginFlags, fmt.Sprintf("--%s_opt=%s", plugin.name, plugin.opt))
}
}
buftesting.RunActualProtoc(
t,
false,
false,
dirPath,
filePaths,
map[string]string{
"PATH": os.Getenv("PATH"),
},
nil,
actualProtocPluginFlags...,
)
genFlags := []string{
"--input",
dirPath,
"--template",
newExternalConfigV1Beta1String(t, plugins, bufGenDir),
}
for _, filePath := range filePaths {
genFlags = append(
genFlags,
"--path",
filePath,
)
}
appcmdtesting.RunCommandSuccess(
t,
func(name string) *appcmd.Command {
return NewCommand(
name,
appflag.NewBuilder(name),
bufcli.NopModuleResolverReaderProvider{},
)
},
func(string) map[string]string {
return map[string]string{
"PATH": os.Getenv("PATH"),
}
},
nil,
nil,
genFlags...,
)
storageosProvider := storageos.NewProvider(storageos.ProviderWithSymlinks())
actualReadWriteBucket, err := storageosProvider.NewReadWriteBucket(
actualProtocDir,
storageos.ReadWriteBucketWithSymlinksIfSupported(),
)
require.NoError(t, err)
bufReadWriteBucket, err := storageosProvider.NewReadWriteBucket(
bufGenDir,
storageos.ReadWriteBucketWithSymlinksIfSupported(),
)
require.NoError(t, err)
diff, err := storage.DiffBytes(
context.Background(),
actualReadWriteBucket,
bufReadWriteBucket,
)
require.NoError(t, err)
assert.Empty(t, string(diff))
}
func testCompareGeneratedStubsArchive(
t *testing.T,
dirPath string,
plugins []testPluginInfo,
useJar bool,
) {
fileExt := ".zip"
if useJar {
fileExt = ".jar"
}
filePaths := buftesting.GetProtocFilePaths(t, dirPath, 100)
tempDir := t.TempDir()
actualProtocFile := filepath.Join(tempDir, "actual-protoc"+fileExt)
bufGenFile := filepath.Join(tempDir, "buf-generate"+fileExt)
var actualProtocPluginFlags []string
for _, plugin := range plugins {
actualProtocPluginFlags = append(actualProtocPluginFlags, fmt.Sprintf("--%s_out=%s", plugin.name, actualProtocFile))
if plugin.opt != "" {
actualProtocPluginFlags = append(actualProtocPluginFlags, fmt.Sprintf("--%s_opt=%s", plugin.name, plugin.opt))
}
}
buftesting.RunActualProtoc(
t,
false,
false,
dirPath,
filePaths,
map[string]string{
"PATH": os.Getenv("PATH"),
},
nil,
actualProtocPluginFlags...,
)
genFlags := []string{
"--input",
dirPath,
"--template",
newExternalConfigV1Beta1String(t, plugins, bufGenFile),
}
for _, filePath := range filePaths {
genFlags = append(
genFlags,
"--path",
filePath,
)
}
appcmdtesting.RunCommandSuccess(
t,
func(name string) *appcmd.Command {
return NewCommand(
name,
appflag.NewBuilder(name),
bufcli.NopModuleResolverReaderProvider{},
)
},
func(string) map[string]string {
return map[string]string{
"PATH": os.Getenv("PATH"),
}
},
nil,
nil,
genFlags...,
)
actualData, err := os.ReadFile(actualProtocFile)
require.NoError(t, err)
actualReadBucketBuilder := storagemem.NewReadBucketBuilder()
err = storagearchive.Unzip(
context.Background(),
bytes.NewReader(actualData),
int64(len(actualData)),
actualReadBucketBuilder,
nil,
0,
)
require.NoError(t, err)
actualReadBucket, err := actualReadBucketBuilder.ToReadBucket()
require.NoError(t, err)
bufData, err := os.ReadFile(bufGenFile)
require.NoError(t, err)
bufReadBucketBuilder := storagemem.NewReadBucketBuilder()
err = storagearchive.Unzip(
context.Background(),
bytes.NewReader(bufData),
int64(len(bufData)),
bufReadBucketBuilder,
nil,
0,
)
require.NoError(t, err)
bufReadBucket, err := bufReadBucketBuilder.ToReadBucket()
require.NoError(t, err)
diff, err := storage.DiffBytes(
context.Background(),
actualReadBucket,
bufReadBucket,
)
require.NoError(t, err)
assert.Empty(t, string(diff))
}
type testPluginInfo struct {
name string
opt string
}
func newExternalConfigV1Beta1String(t *testing.T, plugins []testPluginInfo, out string) string {
externalConfig := bufgen.ExternalConfigV1Beta1{
Version: "v1beta1",
}
for _, plugin := range plugins {
externalConfig.Plugins = append(
externalConfig.Plugins,
bufgen.ExternalPluginConfigV1Beta1{
Name: plugin.name,
Out: out,
Opt: plugin.opt,
},
)
}
data, err := json.Marshal(externalConfig)
require.NoError(t, err)
return string(data)
}
| [
"\"PATH\"",
"\"PATH\"",
"\"PATH\"",
"\"PATH\""
]
| []
| [
"PATH"
]
| [] | ["PATH"] | go | 1 | 0 | |
commands.go | package docker
import (
"archive/tar"
"bufio"
"bytes"
"encoding/base64"
"encoding/json"
"errors"
"flag"
"fmt"
"github.com/dotcloud/docker/archive"
"github.com/dotcloud/docker/auth"
"github.com/dotcloud/docker/engine"
"github.com/dotcloud/docker/registry"
"github.com/dotcloud/docker/term"
"github.com/dotcloud/docker/utils"
"io"
"io/ioutil"
"net"
"net/http"
"net/http/httputil"
"net/url"
"os"
"os/signal"
"path"
"reflect"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"syscall"
"text/tabwriter"
"text/template"
"time"
)
var (
GITCOMMIT string
VERSION string
)
var (
ErrConnectionRefused = errors.New("Can't connect to docker daemon. Is 'docker -d' running on this host?")
)
func (cli *DockerCli) getMethod(name string) (func(...string) error, bool) {
methodName := "Cmd" + strings.ToUpper(name[:1]) + strings.ToLower(name[1:])
method := reflect.ValueOf(cli).MethodByName(methodName)
if !method.IsValid() {
return nil, false
}
return method.Interface().(func(...string) error), true
}
func ParseCommands(proto, addr string, args ...string) error {
cli := NewDockerCli(os.Stdin, os.Stdout, os.Stderr, proto, addr)
if len(args) > 0 {
method, exists := cli.getMethod(args[0])
if !exists {
fmt.Println("Error: Command not found:", args[0])
return cli.CmdHelp(args[1:]...)
}
return method(args[1:]...)
}
return cli.CmdHelp(args...)
}
func (cli *DockerCli) CmdHelp(args ...string) error {
if len(args) > 0 {
method, exists := cli.getMethod(args[0])
if !exists {
fmt.Fprintf(cli.err, "Error: Command not found: %s\n", args[0])
} else {
method("--help")
return nil
}
}
help := fmt.Sprintf("Usage: docker [OPTIONS] COMMAND [arg...]\n -H=[unix://%s]: tcp://host:port to bind/connect to or unix://path/to/socket to use\n\nA self-sufficient runtime for linux containers.\n\nCommands:\n", DEFAULTUNIXSOCKET)
for _, command := range [][]string{
{"attach", "Attach to a running container"},
{"build", "Build a container from a Dockerfile"},
{"commit", "Create a new image from a container's changes"},
{"cp", "Copy files/folders from the containers filesystem to the host path"},
{"diff", "Inspect changes on a container's filesystem"},
{"events", "Get real time events from the server"},
{"export", "Stream the contents of a container as a tar archive"},
{"history", "Show the history of an image"},
{"images", "List images"},
{"import", "Create a new filesystem image from the contents of a tarball"},
{"info", "Display system-wide information"},
{"insert", "Insert a file in an image"},
{"inspect", "Return low-level information on a container"},
{"kill", "Kill a running container"},
{"load", "Load an image from a tar archive"},
{"login", "Register or Login to the docker registry server"},
{"logs", "Fetch the logs of a container"},
{"port", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT"},
{"ps", "List containers"},
{"pull", "Pull an image or a repository from the docker registry server"},
{"push", "Push an image or a repository to the docker registry server"},
{"restart", "Restart a running container"},
{"rm", "Remove one or more containers"},
{"rmi", "Remove one or more images"},
{"run", "Run a command in a new container"},
{"save", "Save an image to a tar archive"},
{"search", "Search for an image in the docker index"},
{"start", "Start a stopped container"},
{"stop", "Stop a running container"},
{"tag", "Tag an image into a repository"},
{"top", "Lookup the running processes of a container"},
{"version", "Show the docker version information"},
{"wait", "Block until a container stops, then print its exit code"},
} {
help += fmt.Sprintf(" %-10.10s%s\n", command[0], command[1])
}
fmt.Fprintf(cli.err, "%s\n", help)
return nil
}
func (cli *DockerCli) CmdInsert(args ...string) error {
cmd := cli.Subcmd("insert", "IMAGE URL PATH", "Insert a file from URL in the IMAGE at PATH")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 3 {
cmd.Usage()
return nil
}
v := url.Values{}
v.Set("url", cmd.Arg(1))
v.Set("path", cmd.Arg(2))
return cli.stream("POST", "/images/"+cmd.Arg(0)+"/insert?"+v.Encode(), nil, cli.out, nil)
}
// mkBuildContext returns an archive of an empty context with the contents
// of `dockerfile` at the path ./Dockerfile
func MkBuildContext(dockerfile string, files [][2]string) (archive.Archive, error) {
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
files = append(files, [2]string{"Dockerfile", dockerfile})
for _, file := range files {
name, content := file[0], file[1]
hdr := &tar.Header{
Name: name,
Size: int64(len(content)),
}
if err := tw.WriteHeader(hdr); err != nil {
return nil, err
}
if _, err := tw.Write([]byte(content)); err != nil {
return nil, err
}
}
if err := tw.Close(); err != nil {
return nil, err
}
return buf, nil
}
func (cli *DockerCli) CmdBuild(args ...string) error {
cmd := cli.Subcmd("build", "[OPTIONS] PATH | URL | -", "Build a new container image from the source code at PATH")
tag := cmd.String("t", "", "Repository name (and optionally a tag) to be applied to the resulting image in case of success")
suppressOutput := cmd.Bool("q", false, "Suppress verbose build output")
noCache := cmd.Bool("no-cache", false, "Do not use cache when building the image")
rm := cmd.Bool("rm", false, "Remove intermediate containers after a successful build")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
var (
context archive.Archive
isRemote bool
err error
)
if cmd.Arg(0) == "-" {
// As a special case, 'docker build -' will build from an empty context with the
// contents of stdin as a Dockerfile
dockerfile, err := ioutil.ReadAll(cli.in)
if err != nil {
return err
}
context, err = MkBuildContext(string(dockerfile), nil)
} else if utils.IsURL(cmd.Arg(0)) || utils.IsGIT(cmd.Arg(0)) {
isRemote = true
} else {
if _, err := os.Stat(cmd.Arg(0)); err != nil {
return err
}
filename := path.Join(cmd.Arg(0), "Dockerfile")
if _, err = os.Stat(filename); os.IsNotExist(err) {
return fmt.Errorf("no Dockerfile found in %s", cmd.Arg(0))
}
context, err = archive.Tar(cmd.Arg(0), archive.Uncompressed)
}
var body io.Reader
// Setup an upload progress bar
// FIXME: ProgressReader shouldn't be this annoying to use
if context != nil {
sf := utils.NewStreamFormatter(false)
body = utils.ProgressReader(ioutil.NopCloser(context), 0, cli.err, sf, true, "", "Uploading context")
}
// Upload the build context
v := &url.Values{}
v.Set("t", *tag)
if *suppressOutput {
v.Set("q", "1")
}
if isRemote {
v.Set("remote", cmd.Arg(0))
}
if *noCache {
v.Set("nocache", "1")
}
if *rm {
v.Set("rm", "1")
}
headers := http.Header(make(map[string][]string))
buf, err := json.Marshal(cli.configFile)
if err != nil {
return err
}
headers.Add("X-Registry-Auth", base64.URLEncoding.EncodeToString(buf))
if context != nil {
headers.Set("Content-Type", "application/tar")
}
err = cli.stream("POST", fmt.Sprintf("/build?%s", v.Encode()), body, cli.out, headers)
if jerr, ok := err.(*utils.JSONError); ok {
return &utils.StatusError{Status: jerr.Message, StatusCode: jerr.Code}
}
return err
}
// 'docker login': login / register a user to registry service.
func (cli *DockerCli) CmdLogin(args ...string) error {
cmd := cli.Subcmd("login", "[OPTIONS] [SERVER]", "Register or Login to a docker registry server, if no server is specified \""+auth.IndexServerAddress()+"\" is the default.")
var username, password, email string
cmd.StringVar(&username, "u", "", "username")
cmd.StringVar(&password, "p", "", "password")
cmd.StringVar(&email, "e", "", "email")
err := cmd.Parse(args)
if err != nil {
return nil
}
serverAddress := auth.IndexServerAddress()
if len(cmd.Args()) > 0 {
serverAddress, err = registry.ExpandAndVerifyRegistryUrl(cmd.Arg(0))
if err != nil {
return err
}
fmt.Fprintf(cli.out, "Login against server at %s\n", serverAddress)
}
promptDefault := func(prompt string, configDefault string) {
if configDefault == "" {
fmt.Fprintf(cli.out, "%s: ", prompt)
} else {
fmt.Fprintf(cli.out, "%s (%s): ", prompt, configDefault)
}
}
readInput := func(in io.Reader, out io.Writer) string {
reader := bufio.NewReader(in)
line, _, err := reader.ReadLine()
if err != nil {
fmt.Fprintln(out, err.Error())
os.Exit(1)
}
return string(line)
}
cli.LoadConfigFile()
authconfig, ok := cli.configFile.Configs[serverAddress]
if !ok {
authconfig = auth.AuthConfig{}
}
if username == "" {
promptDefault("Username", authconfig.Username)
username = readInput(cli.in, cli.out)
if username == "" {
username = authconfig.Username
}
}
if username != authconfig.Username {
if password == "" {
oldState, _ := term.SaveState(cli.terminalFd)
fmt.Fprintf(cli.out, "Password: ")
term.DisableEcho(cli.terminalFd, oldState)
password = readInput(cli.in, cli.out)
fmt.Fprint(cli.out, "\n")
term.RestoreTerminal(cli.terminalFd, oldState)
if password == "" {
return fmt.Errorf("Error : Password Required")
}
}
if email == "" {
promptDefault("Email", authconfig.Email)
email = readInput(cli.in, cli.out)
if email == "" {
email = authconfig.Email
}
}
} else {
password = authconfig.Password
email = authconfig.Email
}
authconfig.Username = username
authconfig.Password = password
authconfig.Email = email
authconfig.ServerAddress = serverAddress
cli.configFile.Configs[serverAddress] = authconfig
body, statusCode, err := cli.call("POST", "/auth", cli.configFile.Configs[serverAddress])
if statusCode == 401 {
delete(cli.configFile.Configs, serverAddress)
auth.SaveConfig(cli.configFile)
return err
}
if err != nil {
return err
}
var out2 APIAuth
err = json.Unmarshal(body, &out2)
if err != nil {
cli.configFile, _ = auth.LoadConfig(os.Getenv("HOME"))
return err
}
auth.SaveConfig(cli.configFile)
if out2.Status != "" {
fmt.Fprintf(cli.out, "%s\n", out2.Status)
}
return nil
}
// 'docker wait': block until a container stops
func (cli *DockerCli) CmdWait(args ...string) error {
cmd := cli.Subcmd("wait", "CONTAINER [CONTAINER...]", "Block until a container stops, then print its exit code.")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
var encounteredError error
for _, name := range cmd.Args() {
status, err := waitForExit(cli, name)
if err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to wait one or more containers")
} else {
fmt.Fprintf(cli.out, "%d\n", status)
}
}
return encounteredError
}
// 'docker version': show version information
func (cli *DockerCli) CmdVersion(args ...string) error {
cmd := cli.Subcmd("version", "", "Show the docker version information.")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() > 0 {
cmd.Usage()
return nil
}
if VERSION != "" {
fmt.Fprintf(cli.out, "Client version: %s\n", VERSION)
}
fmt.Fprintf(cli.out, "Go version (client): %s\n", runtime.Version())
if GITCOMMIT != "" {
fmt.Fprintf(cli.out, "Git commit (client): %s\n", GITCOMMIT)
}
body, _, err := cli.call("GET", "/version", nil)
if err != nil {
return err
}
out := engine.NewOutput()
remoteVersion, err := out.AddEnv()
if err != nil {
utils.Errorf("Error reading remote version: %s\n", err)
return err
}
if _, err := out.Write(body); err != nil {
utils.Errorf("Error reading remote version: %s\n", err)
return err
}
out.Close()
fmt.Fprintf(cli.out, "Server version: %s\n", remoteVersion.Get("Version"))
fmt.Fprintf(cli.out, "Git commit (server): %s\n", remoteVersion.Get("GitCommit"))
fmt.Fprintf(cli.out, "Go version (server): %s\n", remoteVersion.Get("GoVersion"))
release := utils.GetReleaseVersion()
if release != "" {
fmt.Fprintf(cli.out, "Last stable version: %s", release)
if (VERSION != "" || remoteVersion.Exists("Version")) && (strings.Trim(VERSION, "-dev") != release || strings.Trim(remoteVersion.Get("Version"), "-dev") != release) {
fmt.Fprintf(cli.out, ", please update docker")
}
fmt.Fprintf(cli.out, "\n")
}
return nil
}
// 'docker info': display system-wide information.
func (cli *DockerCli) CmdInfo(args ...string) error {
cmd := cli.Subcmd("info", "", "Display system-wide information")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() > 0 {
cmd.Usage()
return nil
}
body, _, err := cli.call("GET", "/info", nil)
if err != nil {
return err
}
out := engine.NewOutput()
remoteInfo, err := out.AddEnv()
if err != nil {
return err
}
if _, err := out.Write(body); err != nil {
utils.Errorf("Error reading remote info: %s\n", err)
return err
}
out.Close()
fmt.Fprintf(cli.out, "Containers: %d\n", remoteInfo.GetInt("Containers"))
fmt.Fprintf(cli.out, "Images: %d\n", remoteInfo.GetInt("Images"))
fmt.Fprintf(cli.out, "Driver: %s\n", remoteInfo.Get("Driver"))
var driverStatus [][2]string
if err := remoteInfo.GetJson("DriverStatus", &driverStatus); err != nil {
return err
}
for _, pair := range driverStatus {
fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1])
}
if remoteInfo.GetBool("Debug") || os.Getenv("DEBUG") != "" {
fmt.Fprintf(cli.out, "Debug mode (server): %v\n", remoteInfo.GetBool("Debug"))
fmt.Fprintf(cli.out, "Debug mode (client): %v\n", os.Getenv("DEBUG") != "")
fmt.Fprintf(cli.out, "Fds: %d\n", remoteInfo.GetInt("NFd"))
fmt.Fprintf(cli.out, "Goroutines: %d\n", remoteInfo.GetInt("NGoroutines"))
fmt.Fprintf(cli.out, "LXC Version: %s\n", remoteInfo.Get("LXCVersion"))
fmt.Fprintf(cli.out, "EventsListeners: %d\n", remoteInfo.GetInt("NEventsListener"))
fmt.Fprintf(cli.out, "Kernel Version: %s\n", remoteInfo.Get("KernelVersion"))
if initSha1 := remoteInfo.Get("InitSha1"); initSha1 != "" {
fmt.Fprintf(cli.out, "Init SHA1: %s\n", initSha1)
}
if initPath := remoteInfo.Get("InitPath"); initPath != "" {
fmt.Fprintf(cli.out, "Init Path: %s\n", initPath)
}
}
if len(remoteInfo.GetList("IndexServerAddress")) != 0 {
cli.LoadConfigFile()
u := cli.configFile.Configs[remoteInfo.Get("IndexServerAddress")].Username
if len(u) > 0 {
fmt.Fprintf(cli.out, "Username: %v\n", u)
fmt.Fprintf(cli.out, "Registry: %v\n", remoteInfo.GetList("IndexServerAddress"))
}
}
if !remoteInfo.GetBool("MemoryLimit") {
fmt.Fprintf(cli.err, "WARNING: No memory limit support\n")
}
if !remoteInfo.GetBool("SwapLimit") {
fmt.Fprintf(cli.err, "WARNING: No swap limit support\n")
}
if !remoteInfo.GetBool("IPv4Forwarding") {
fmt.Fprintf(cli.err, "WARNING: IPv4 forwarding is disabled.\n")
}
return nil
}
func (cli *DockerCli) CmdStop(args ...string) error {
cmd := cli.Subcmd("stop", "[OPTIONS] CONTAINER [CONTAINER...]", "Stop a running container (Send SIGTERM, and then SIGKILL after grace period)")
nSeconds := cmd.Int("t", 10, "Number of seconds to wait for the container to stop before killing it.")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
v := url.Values{}
v.Set("t", strconv.Itoa(*nSeconds))
var encounteredError error
for _, name := range cmd.Args() {
_, _, err := cli.call("POST", "/containers/"+name+"/stop?"+v.Encode(), nil)
if err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to stop one or more containers")
} else {
fmt.Fprintf(cli.out, "%s\n", name)
}
}
return encounteredError
}
func (cli *DockerCli) CmdRestart(args ...string) error {
cmd := cli.Subcmd("restart", "[OPTIONS] CONTAINER [CONTAINER...]", "Restart a running container")
nSeconds := cmd.Int("t", 10, "Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default=10")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
v := url.Values{}
v.Set("t", strconv.Itoa(*nSeconds))
var encounteredError error
for _, name := range cmd.Args() {
_, _, err := cli.call("POST", "/containers/"+name+"/restart?"+v.Encode(), nil)
if err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to restart one or more containers")
} else {
fmt.Fprintf(cli.out, "%s\n", name)
}
}
return encounteredError
}
func (cli *DockerCli) forwardAllSignals(cid string) chan os.Signal {
sigc := make(chan os.Signal, 1)
utils.CatchAll(sigc)
go func() {
for s := range sigc {
if s == syscall.SIGCHLD {
continue
}
if _, _, err := cli.call("POST", fmt.Sprintf("/containers/%s/kill?signal=%d", cid, s), nil); err != nil {
utils.Debugf("Error sending signal: %s", err)
}
}
}()
return sigc
}
func (cli *DockerCli) CmdStart(args ...string) error {
cmd := cli.Subcmd("start", "CONTAINER [CONTAINER...]", "Restart a stopped container")
attach := cmd.Bool("a", false, "Attach container's stdout/stderr and forward all signals to the process")
openStdin := cmd.Bool("i", false, "Attach container's stdin")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
var cErr chan error
var tty bool
if *attach || *openStdin {
if cmd.NArg() > 1 {
return fmt.Errorf("Impossible to start and attach multiple containers at once.")
}
body, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil)
if err != nil {
return err
}
container := &Container{}
err = json.Unmarshal(body, container)
if err != nil {
return err
}
tty = container.Config.Tty
if !container.Config.Tty {
sigc := cli.forwardAllSignals(cmd.Arg(0))
defer utils.StopCatch(sigc)
}
var in io.ReadCloser
v := url.Values{}
v.Set("stream", "1")
if *openStdin && container.Config.OpenStdin {
v.Set("stdin", "1")
in = cli.in
}
v.Set("stdout", "1")
v.Set("stderr", "1")
cErr = utils.Go(func() error {
return cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty, in, cli.out, cli.err, nil)
})
}
var encounteredError error
for _, name := range cmd.Args() {
_, _, err := cli.call("POST", "/containers/"+name+"/start", nil)
if err != nil {
if !*attach || !*openStdin {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to start one or more containers")
}
} else {
if !*attach || !*openStdin {
fmt.Fprintf(cli.out, "%s\n", name)
}
}
}
if encounteredError != nil {
if *openStdin || *attach {
cli.in.Close()
<-cErr
}
return encounteredError
}
if *openStdin || *attach {
if tty && cli.isTerminal {
if err := cli.monitorTtySize(cmd.Arg(0)); err != nil {
utils.Errorf("Error monitoring TTY size: %s\n", err)
}
}
return <-cErr
}
return nil
}
func (cli *DockerCli) CmdInspect(args ...string) error {
cmd := cli.Subcmd("inspect", "CONTAINER|IMAGE [CONTAINER|IMAGE...]", "Return low-level information on a container/image")
tmplStr := cmd.String("format", "", "Format the output using the given go template.")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
var tmpl *template.Template
if *tmplStr != "" {
var err error
if tmpl, err = template.New("").Parse(*tmplStr); err != nil {
fmt.Fprintf(cli.err, "Template parsing error: %v\n", err)
return &utils.StatusError{StatusCode: 64,
Status: "Template parsing error: " + err.Error()}
}
}
indented := new(bytes.Buffer)
indented.WriteByte('[')
status := 0
for _, name := range cmd.Args() {
obj, _, err := cli.call("GET", "/containers/"+name+"/json", nil)
if err != nil {
obj, _, err = cli.call("GET", "/images/"+name+"/json", nil)
if err != nil {
if strings.Contains(err.Error(), "No such") {
fmt.Fprintf(cli.err, "Error: No such image or container: %s\n", name)
} else {
fmt.Fprintf(cli.err, "%s", err)
}
status = 1
continue
}
}
if tmpl == nil {
if err = json.Indent(indented, obj, "", " "); err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
status = 1
continue
}
} else {
// Has template, will render
var value interface{}
if err := json.Unmarshal(obj, &value); err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
status = 1
continue
}
if err := tmpl.Execute(cli.out, value); err != nil {
return err
}
cli.out.Write([]byte{'\n'})
}
indented.WriteString(",")
}
if indented.Len() > 1 {
// Remove trailing ','
indented.Truncate(indented.Len() - 1)
}
indented.WriteByte(']')
if tmpl == nil {
if _, err := io.Copy(cli.out, indented); err != nil {
return err
}
}
if status != 0 {
return &utils.StatusError{StatusCode: status}
}
return nil
}
func (cli *DockerCli) CmdTop(args ...string) error {
cmd := cli.Subcmd("top", "CONTAINER [ps OPTIONS]", "Lookup the running processes of a container")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() == 0 {
cmd.Usage()
return nil
}
val := url.Values{}
if cmd.NArg() > 1 {
val.Set("ps_args", strings.Join(cmd.Args()[1:], " "))
}
body, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/top?"+val.Encode(), nil)
if err != nil {
return err
}
procs := APITop{}
err = json.Unmarshal(body, &procs)
if err != nil {
return err
}
w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
fmt.Fprintln(w, strings.Join(procs.Titles, "\t"))
for _, proc := range procs.Processes {
fmt.Fprintln(w, strings.Join(proc, "\t"))
}
w.Flush()
return nil
}
func (cli *DockerCli) CmdPort(args ...string) error {
cmd := cli.Subcmd("port", "CONTAINER PRIVATE_PORT", "Lookup the public-facing port which is NAT-ed to PRIVATE_PORT")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 2 {
cmd.Usage()
return nil
}
port := cmd.Arg(1)
proto := "tcp"
parts := strings.SplitN(port, "/", 2)
if len(parts) == 2 && len(parts[1]) != 0 {
port = parts[0]
proto = parts[1]
}
body, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/json", nil)
if err != nil {
return err
}
var out Container
err = json.Unmarshal(body, &out)
if err != nil {
return err
}
if frontends, exists := out.NetworkSettings.Ports[Port(port+"/"+proto)]; exists && frontends != nil {
for _, frontend := range frontends {
fmt.Fprintf(cli.out, "%s:%s\n", frontend.HostIp, frontend.HostPort)
}
} else {
return fmt.Errorf("Error: No public port '%s' published for %s", cmd.Arg(1), cmd.Arg(0))
}
return nil
}
// 'docker rmi IMAGE' removes all images with the name IMAGE
func (cli *DockerCli) CmdRmi(args ...string) error {
cmd := cli.Subcmd("rmi", "IMAGE [IMAGE...]", "Remove one or more images")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
var encounteredError error
for _, name := range cmd.Args() {
body, _, err := cli.call("DELETE", "/images/"+name, nil)
if err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to remove one or more images")
} else {
var outs []APIRmi
err = json.Unmarshal(body, &outs)
if err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to remove one or more images")
continue
}
for _, out := range outs {
if out.Deleted != "" {
fmt.Fprintf(cli.out, "Deleted: %s\n", out.Deleted)
} else {
fmt.Fprintf(cli.out, "Untagged: %s\n", out.Untagged)
}
}
}
}
return encounteredError
}
func (cli *DockerCli) CmdHistory(args ...string) error {
cmd := cli.Subcmd("history", "[OPTIONS] IMAGE", "Show the history of an image")
quiet := cmd.Bool("q", false, "only show numeric IDs")
noTrunc := cmd.Bool("notrunc", false, "Don't truncate output")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
body, _, err := cli.call("GET", "/images/"+cmd.Arg(0)+"/history", nil)
if err != nil {
return err
}
var outs []APIHistory
err = json.Unmarshal(body, &outs)
if err != nil {
return err
}
w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
if !*quiet {
fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE")
}
for _, out := range outs {
if !*quiet {
if *noTrunc {
fmt.Fprintf(w, "%s\t", out.ID)
} else {
fmt.Fprintf(w, "%s\t", utils.TruncateID(out.ID))
}
fmt.Fprintf(w, "%s ago\t", utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.Created, 0))))
if *noTrunc {
fmt.Fprintf(w, "%s\t", out.CreatedBy)
} else {
fmt.Fprintf(w, "%s\t", utils.Trunc(out.CreatedBy, 45))
}
fmt.Fprintf(w, "%s\n", utils.HumanSize(out.Size))
} else {
if *noTrunc {
fmt.Fprintln(w, out.ID)
} else {
fmt.Fprintln(w, utils.TruncateID(out.ID))
}
}
}
w.Flush()
return nil
}
func (cli *DockerCli) CmdRm(args ...string) error {
cmd := cli.Subcmd("rm", "[OPTIONS] CONTAINER [CONTAINER...]", "Remove one or more containers")
v := cmd.Bool("v", false, "Remove the volumes associated to the container")
link := cmd.Bool("link", false, "Remove the specified link and not the underlying container")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
val := url.Values{}
if *v {
val.Set("v", "1")
}
if *link {
val.Set("link", "1")
}
var encounteredError error
for _, name := range cmd.Args() {
_, _, err := cli.call("DELETE", "/containers/"+name+"?"+val.Encode(), nil)
if err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to remove one or more containers")
} else {
fmt.Fprintf(cli.out, "%s\n", name)
}
}
return encounteredError
}
// 'docker kill NAME' kills a running container
func (cli *DockerCli) CmdKill(args ...string) error {
cmd := cli.Subcmd("kill", "CONTAINER [CONTAINER...]", "Kill a running container (send SIGKILL)")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
var encounteredError error
for _, name := range args {
if _, _, err := cli.call("POST", "/containers/"+name+"/kill", nil); err != nil {
fmt.Fprintf(cli.err, "%s\n", err)
encounteredError = fmt.Errorf("Error: failed to kill one or more containers")
} else {
fmt.Fprintf(cli.out, "%s\n", name)
}
}
return encounteredError
}
func (cli *DockerCli) CmdImport(args ...string) error {
cmd := cli.Subcmd("import", "URL|- [REPOSITORY[:TAG]]", "Create an empty filesystem image and import the contents of the tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it.")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() < 1 {
cmd.Usage()
return nil
}
var src, repository, tag string
if cmd.NArg() == 3 {
fmt.Fprintf(cli.err, "[DEPRECATED] The format 'URL|- [REPOSITORY [TAG]]' as been deprecated. Please use URL|- [REPOSITORY[:TAG]]\n")
src, repository, tag = cmd.Arg(0), cmd.Arg(1), cmd.Arg(2)
} else {
src = cmd.Arg(0)
repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
}
v := url.Values{}
v.Set("repo", repository)
v.Set("tag", tag)
v.Set("fromSrc", src)
var in io.Reader
if src == "-" {
in = cli.in
}
return cli.stream("POST", "/images/create?"+v.Encode(), in, cli.out, nil)
}
func (cli *DockerCli) CmdPush(args ...string) error {
cmd := cli.Subcmd("push", "NAME", "Push an image or a repository to the registry")
if err := cmd.Parse(args); err != nil {
return nil
}
name := cmd.Arg(0)
if name == "" {
cmd.Usage()
return nil
}
cli.LoadConfigFile()
// Resolve the Repository name from fqn to endpoint + name
endpoint, _, err := registry.ResolveRepositoryName(name)
if err != nil {
return err
}
// Resolve the Auth config relevant for this server
authConfig := cli.configFile.ResolveAuthConfig(endpoint)
// If we're not using a custom registry, we know the restrictions
// applied to repository names and can warn the user in advance.
// Custom repositories can have different rules, and we must also
// allow pushing by image ID.
if len(strings.SplitN(name, "/", 2)) == 1 {
username := cli.configFile.Configs[auth.IndexServerAddress()].Username
if username == "" {
username = "<user>"
}
return fmt.Errorf("Impossible to push a \"root\" repository. Please rename your repository in <user>/<repo> (ex: %s/%s)", username, name)
}
v := url.Values{}
push := func(authConfig auth.AuthConfig) error {
buf, err := json.Marshal(authConfig)
if err != nil {
return err
}
registryAuthHeader := []string{
base64.URLEncoding.EncodeToString(buf),
}
return cli.stream("POST", "/images/"+name+"/push?"+v.Encode(), nil, cli.out, map[string][]string{
"X-Registry-Auth": registryAuthHeader,
})
}
if err := push(authConfig); err != nil {
if err.Error() == registry.ErrLoginRequired.Error() {
fmt.Fprintln(cli.out, "\nPlease login prior to push:")
if err := cli.CmdLogin(endpoint); err != nil {
return err
}
authConfig := cli.configFile.ResolveAuthConfig(endpoint)
return push(authConfig)
}
return err
}
return nil
}
func (cli *DockerCli) CmdPull(args ...string) error {
cmd := cli.Subcmd("pull", "NAME", "Pull an image or a repository from the registry")
tag := cmd.String("t", "", "Download tagged image in repository")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
remote, parsedTag := utils.ParseRepositoryTag(cmd.Arg(0))
if *tag == "" {
*tag = parsedTag
}
// Resolve the Repository name from fqn to endpoint + name
endpoint, _, err := registry.ResolveRepositoryName(remote)
if err != nil {
return err
}
cli.LoadConfigFile()
// Resolve the Auth config relevant for this server
authConfig := cli.configFile.ResolveAuthConfig(endpoint)
v := url.Values{}
v.Set("fromImage", remote)
v.Set("tag", *tag)
pull := func(authConfig auth.AuthConfig) error {
buf, err := json.Marshal(authConfig)
if err != nil {
return err
}
registryAuthHeader := []string{
base64.URLEncoding.EncodeToString(buf),
}
return cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.out, map[string][]string{
"X-Registry-Auth": registryAuthHeader,
})
}
if err := pull(authConfig); err != nil {
if err.Error() == registry.ErrLoginRequired.Error() {
fmt.Fprintln(cli.out, "\nPlease login prior to push:")
if err := cli.CmdLogin(endpoint); err != nil {
return err
}
authConfig := cli.configFile.ResolveAuthConfig(endpoint)
return pull(authConfig)
}
return err
}
return nil
}
func (cli *DockerCli) CmdImages(args ...string) error {
cmd := cli.Subcmd("images", "[OPTIONS] [NAME]", "List images")
quiet := cmd.Bool("q", false, "only show numeric IDs")
all := cmd.Bool("a", false, "show all images (by default filter out the intermediate images used to build)")
noTrunc := cmd.Bool("notrunc", false, "Don't truncate output")
flViz := cmd.Bool("viz", false, "output graph in graphviz format")
flTree := cmd.Bool("tree", false, "output graph in tree format")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() > 1 {
cmd.Usage()
return nil
}
filter := cmd.Arg(0)
if *flViz || *flTree {
body, _, err := cli.call("GET", "/images/json?all=1", nil)
if err != nil {
return err
}
var outs []APIImages
if err := json.Unmarshal(body, &outs); err != nil {
return err
}
var (
printNode func(cli *DockerCli, noTrunc bool, image APIImages, prefix string)
startImage APIImages
roots []APIImages
byParent = make(map[string][]APIImages)
)
for _, image := range outs {
if image.ParentId == "" {
roots = append(roots, image)
} else {
if children, exists := byParent[image.ParentId]; exists {
byParent[image.ParentId] = append(children, image)
} else {
byParent[image.ParentId] = []APIImages{image}
}
}
if filter != "" {
if filter == image.ID || filter == utils.TruncateID(image.ID) {
startImage = image
}
for _, repotag := range image.RepoTags {
if repotag == filter {
startImage = image
}
}
}
}
if *flViz {
fmt.Fprintf(cli.out, "digraph docker {\n")
printNode = (*DockerCli).printVizNode
} else {
printNode = (*DockerCli).printTreeNode
}
if startImage.ID != "" {
cli.WalkTree(*noTrunc, &[]APIImages{startImage}, byParent, "", printNode)
} else if filter == "" {
cli.WalkTree(*noTrunc, &roots, byParent, "", printNode)
}
if *flViz {
fmt.Fprintf(cli.out, " base [style=invisible]\n}\n")
}
} else {
v := url.Values{}
if cmd.NArg() == 1 {
v.Set("filter", filter)
}
if *all {
v.Set("all", "1")
}
body, _, err := cli.call("GET", "/images/json?"+v.Encode(), nil)
if err != nil {
return err
}
var outs []APIImages
err = json.Unmarshal(body, &outs)
if err != nil {
return err
}
w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
if !*quiet {
fmt.Fprintln(w, "REPOSITORY\tTAG\tIMAGE ID\tCREATED\tVIRTUAL SIZE")
}
for _, out := range outs {
for _, repotag := range out.RepoTags {
repo, tag := utils.ParseRepositoryTag(repotag)
if !*noTrunc {
out.ID = utils.TruncateID(out.ID)
}
if !*quiet {
fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\n", repo, tag, out.ID, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.Created, 0))), utils.HumanSize(out.VirtualSize))
} else {
fmt.Fprintln(w, out.ID)
}
}
}
if !*quiet {
w.Flush()
}
}
return nil
}
func (cli *DockerCli) WalkTree(noTrunc bool, images *[]APIImages, byParent map[string][]APIImages, prefix string, printNode func(cli *DockerCli, noTrunc bool, image APIImages, prefix string)) {
length := len(*images)
if length > 1 {
for index, image := range *images {
if index+1 == length {
printNode(cli, noTrunc, image, prefix+"└─")
if subimages, exists := byParent[image.ID]; exists {
cli.WalkTree(noTrunc, &subimages, byParent, prefix+" ", printNode)
}
} else {
printNode(cli, noTrunc, image, prefix+"├─")
if subimages, exists := byParent[image.ID]; exists {
cli.WalkTree(noTrunc, &subimages, byParent, prefix+"│ ", printNode)
}
}
}
} else {
for _, image := range *images {
printNode(cli, noTrunc, image, prefix+"└─")
if subimages, exists := byParent[image.ID]; exists {
cli.WalkTree(noTrunc, &subimages, byParent, prefix+" ", printNode)
}
}
}
}
func (cli *DockerCli) printVizNode(noTrunc bool, image APIImages, prefix string) {
var (
imageID string
parentID string
)
if noTrunc {
imageID = image.ID
parentID = image.ParentId
} else {
imageID = utils.TruncateID(image.ID)
parentID = utils.TruncateID(image.ParentId)
}
if image.ParentId == "" {
fmt.Fprintf(cli.out, " base -> \"%s\" [style=invis]\n", imageID)
} else {
fmt.Fprintf(cli.out, " \"%s\" -> \"%s\"\n", parentID, imageID)
}
if image.RepoTags[0] != "<none>:<none>" {
fmt.Fprintf(cli.out, " \"%s\" [label=\"%s\\n%s\",shape=box,fillcolor=\"paleturquoise\",style=\"filled,rounded\"];\n",
imageID, imageID, strings.Join(image.RepoTags, "\\n"))
}
}
func (cli *DockerCli) printTreeNode(noTrunc bool, image APIImages, prefix string) {
var imageID string
if noTrunc {
imageID = image.ID
} else {
imageID = utils.TruncateID(image.ID)
}
fmt.Fprintf(cli.out, "%s%s Virtual Size: %s", prefix, imageID, utils.HumanSize(image.VirtualSize))
if image.RepoTags[0] != "<none>:<none>" {
fmt.Fprintf(cli.out, " Tags: %s\n", strings.Join(image.RepoTags, ", "))
} else {
fmt.Fprint(cli.out, "\n")
}
}
func displayablePorts(ports []APIPort) string {
result := []string{}
for _, port := range ports {
if port.IP == "" {
result = append(result, fmt.Sprintf("%d/%s", port.PublicPort, port.Type))
} else {
result = append(result, fmt.Sprintf("%s:%d->%d/%s", port.IP, port.PublicPort, port.PrivatePort, port.Type))
}
}
sort.Strings(result)
return strings.Join(result, ", ")
}
func (cli *DockerCli) CmdPs(args ...string) error {
cmd := cli.Subcmd("ps", "[OPTIONS]", "List containers")
quiet := cmd.Bool("q", false, "Only display numeric IDs")
size := cmd.Bool("s", false, "Display sizes")
all := cmd.Bool("a", false, "Show all containers. Only running containers are shown by default.")
noTrunc := cmd.Bool("notrunc", false, "Don't truncate output")
nLatest := cmd.Bool("l", false, "Show only the latest created container, include non-running ones.")
since := cmd.String("sinceId", "", "Show only containers created since Id, include non-running ones.")
before := cmd.String("beforeId", "", "Show only container created before Id, include non-running ones.")
last := cmd.Int("n", -1, "Show n last created containers, include non-running ones.")
if err := cmd.Parse(args); err != nil {
return nil
}
v := url.Values{}
if *last == -1 && *nLatest {
*last = 1
}
if *all {
v.Set("all", "1")
}
if *last != -1 {
v.Set("limit", strconv.Itoa(*last))
}
if *since != "" {
v.Set("since", *since)
}
if *before != "" {
v.Set("before", *before)
}
if *size {
v.Set("size", "1")
}
body, _, err := cli.call("GET", "/containers/json?"+v.Encode(), nil)
if err != nil {
return err
}
var outs []APIContainers
err = json.Unmarshal(body, &outs)
if err != nil {
return err
}
w := tabwriter.NewWriter(cli.out, 20, 1, 3, ' ', 0)
if !*quiet {
fmt.Fprint(w, "CONTAINER ID\tIMAGE\tCOMMAND\tCREATED\tSTATUS\tPORTS\tNAMES")
if *size {
fmt.Fprintln(w, "\tSIZE")
} else {
fmt.Fprint(w, "\n")
}
}
for _, out := range outs {
if !*noTrunc {
out.ID = utils.TruncateID(out.ID)
}
// Remove the leading / from the names
for i := 0; i < len(out.Names); i++ {
out.Names[i] = out.Names[i][1:]
}
if !*quiet {
if !*noTrunc {
out.Command = utils.Trunc(out.Command, 20)
}
fmt.Fprintf(w, "%s\t%s\t%s\t%s ago\t%s\t%s\t%s\t", out.ID, out.Image, out.Command, utils.HumanDuration(time.Now().UTC().Sub(time.Unix(out.Created, 0))), out.Status, displayablePorts(out.Ports), strings.Join(out.Names, ","))
if *size {
if out.SizeRootFs > 0 {
fmt.Fprintf(w, "%s (virtual %s)\n", utils.HumanSize(out.SizeRw), utils.HumanSize(out.SizeRootFs))
} else {
fmt.Fprintf(w, "%s\n", utils.HumanSize(out.SizeRw))
}
} else {
fmt.Fprint(w, "\n")
}
} else {
fmt.Fprintln(w, out.ID)
}
}
if !*quiet {
w.Flush()
}
return nil
}
func (cli *DockerCli) CmdCommit(args ...string) error {
cmd := cli.Subcmd("commit", "[OPTIONS] CONTAINER [REPOSITORY[:TAG]]", "Create a new image from a container's changes")
flComment := cmd.String("m", "", "Commit message")
flAuthor := cmd.String("author", "", "Author (eg. \"John Hannibal Smith <[email protected]>\"")
flConfig := cmd.String("run", "", "Config automatically applied when the image is run. "+`(ex: -run='{"Cmd": ["cat", "/world"], "PortSpecs": ["22"]}')`)
if err := cmd.Parse(args); err != nil {
return nil
}
var name, repository, tag string
if cmd.NArg() == 3 {
fmt.Fprintf(cli.err, "[DEPRECATED] The format 'CONTAINER [REPOSITORY [TAG]]' as been deprecated. Please use CONTAINER [REPOSITORY[:TAG]]\n")
name, repository, tag = cmd.Arg(0), cmd.Arg(1), cmd.Arg(2)
} else {
name = cmd.Arg(0)
repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
}
if name == "" {
cmd.Usage()
return nil
}
v := url.Values{}
v.Set("container", name)
v.Set("repo", repository)
v.Set("tag", tag)
v.Set("comment", *flComment)
v.Set("author", *flAuthor)
var config *Config
if *flConfig != "" {
config = &Config{}
if err := json.Unmarshal([]byte(*flConfig), config); err != nil {
return err
}
}
body, _, err := cli.call("POST", "/commit?"+v.Encode(), config)
if err != nil {
return err
}
apiID := &APIID{}
err = json.Unmarshal(body, apiID)
if err != nil {
return err
}
fmt.Fprintf(cli.out, "%s\n", apiID.ID)
return nil
}
func (cli *DockerCli) CmdEvents(args ...string) error {
cmd := cli.Subcmd("events", "[OPTIONS]", "Get real time events from the server")
since := cmd.String("since", "", "Show previously created events and then stream.")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 0 {
cmd.Usage()
return nil
}
v := url.Values{}
if *since != "" {
loc := time.FixedZone(time.Now().Zone())
format := "2006-01-02 15:04:05 -0700 MST"
if len(*since) < len(format) {
format = format[:len(*since)]
}
if t, err := time.ParseInLocation(format, *since, loc); err == nil {
v.Set("since", strconv.FormatInt(t.Unix(), 10))
} else {
v.Set("since", *since)
}
}
if err := cli.stream("GET", "/events?"+v.Encode(), nil, cli.out, nil); err != nil {
return err
}
return nil
}
func (cli *DockerCli) CmdExport(args ...string) error {
cmd := cli.Subcmd("export", "CONTAINER", "Export the contents of a filesystem as a tar archive to STDOUT")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
if err := cli.stream("GET", "/containers/"+cmd.Arg(0)+"/export", nil, cli.out, nil); err != nil {
return err
}
return nil
}
func (cli *DockerCli) CmdDiff(args ...string) error {
cmd := cli.Subcmd("diff", "CONTAINER", "Inspect changes on a container's filesystem")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
body, _, err := cli.call("GET", "/containers/"+cmd.Arg(0)+"/changes", nil)
if err != nil {
return err
}
changes := []Change{}
err = json.Unmarshal(body, &changes)
if err != nil {
return err
}
for _, change := range changes {
fmt.Fprintf(cli.out, "%s\n", change.String())
}
return nil
}
func (cli *DockerCli) CmdLogs(args ...string) error {
cmd := cli.Subcmd("logs", "CONTAINER", "Fetch the logs of a container")
follow := cmd.Bool("f", false, "Follow log output")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
name := cmd.Arg(0)
body, _, err := cli.call("GET", "/containers/"+name+"/json", nil)
if err != nil {
return err
}
container := &Container{}
err = json.Unmarshal(body, container)
if err != nil {
return err
}
v := url.Values{}
v.Set("logs", "1")
v.Set("stdout", "1")
v.Set("stderr", "1")
if *follow && container.State.Running {
v.Set("stream", "1")
}
if err := cli.hijack("POST", "/containers/"+name+"/attach?"+v.Encode(), container.Config.Tty, nil, cli.out, cli.err, nil); err != nil {
return err
}
return nil
}
func (cli *DockerCli) CmdAttach(args ...string) error {
cmd := cli.Subcmd("attach", "[OPTIONS] CONTAINER", "Attach to a running container")
noStdin := cmd.Bool("nostdin", false, "Do not attach stdin")
proxy := cmd.Bool("sig-proxy", true, "Proxify all received signal to the process (even in non-tty mode)")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
name := cmd.Arg(0)
body, _, err := cli.call("GET", "/containers/"+name+"/json", nil)
if err != nil {
return err
}
container := &Container{}
err = json.Unmarshal(body, container)
if err != nil {
return err
}
if !container.State.IsRunning() {
return fmt.Errorf("Impossible to attach to a stopped container, start it first")
}
if container.Config.Tty && cli.isTerminal {
if err := cli.monitorTtySize(cmd.Arg(0)); err != nil {
utils.Debugf("Error monitoring TTY size: %s", err)
}
}
var in io.ReadCloser
v := url.Values{}
v.Set("stream", "1")
if !*noStdin && container.Config.OpenStdin {
v.Set("stdin", "1")
in = cli.in
}
v.Set("stdout", "1")
v.Set("stderr", "1")
if *proxy && !container.Config.Tty {
sigc := cli.forwardAllSignals(cmd.Arg(0))
defer utils.StopCatch(sigc)
}
if err := cli.hijack("POST", "/containers/"+cmd.Arg(0)+"/attach?"+v.Encode(), container.Config.Tty, in, cli.out, cli.err, nil); err != nil {
return err
}
_, status, err := getExitCode(cli, cmd.Arg(0))
if err != nil {
return err
}
if status != 0 {
return &utils.StatusError{StatusCode: status}
}
return nil
}
func (cli *DockerCli) CmdSearch(args ...string) error {
cmd := cli.Subcmd("search", "TERM", "Search the docker index for images")
noTrunc := cmd.Bool("notrunc", false, "Don't truncate output")
trusted := cmd.Bool("trusted", false, "Only show trusted builds")
stars := cmd.Int("stars", 0, "Only displays with at least xxx stars")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
v := url.Values{}
v.Set("term", cmd.Arg(0))
body, _, err := cli.call("GET", "/images/search?"+v.Encode(), nil)
if err != nil {
return err
}
outs := []registry.SearchResult{}
err = json.Unmarshal(body, &outs)
if err != nil {
return err
}
w := tabwriter.NewWriter(cli.out, 10, 1, 3, ' ', 0)
fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tTRUSTED\n")
for _, out := range outs {
if (*trusted && !out.IsTrusted) || (*stars > out.StarCount) {
continue
}
desc := strings.Replace(out.Description, "\n", " ", -1)
desc = strings.Replace(desc, "\r", " ", -1)
if !*noTrunc && len(desc) > 45 {
desc = utils.Trunc(desc, 42) + "..."
}
fmt.Fprintf(w, "%s\t%s\t%d\t", out.Name, desc, out.StarCount)
if out.IsOfficial {
fmt.Fprint(w, "[OK]")
}
fmt.Fprint(w, "\t")
if out.IsTrusted {
fmt.Fprint(w, "[OK]")
}
fmt.Fprint(w, "\n")
}
w.Flush()
return nil
}
// Ports type - Used to parse multiple -p flags
type ports []int
func (cli *DockerCli) CmdTag(args ...string) error {
cmd := cli.Subcmd("tag", "[OPTIONS] IMAGE REPOSITORY[:TAG]", "Tag an image into a repository")
force := cmd.Bool("f", false, "Force")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 2 && cmd.NArg() != 3 {
cmd.Usage()
return nil
}
var repository, tag string
if cmd.NArg() == 3 {
fmt.Fprintf(cli.err, "[DEPRECATED] The format 'IMAGE [REPOSITORY [TAG]]' as been deprecated. Please use IMAGE [REPOSITORY[:TAG]]\n")
repository, tag = cmd.Arg(1), cmd.Arg(2)
} else {
repository, tag = utils.ParseRepositoryTag(cmd.Arg(1))
}
v := url.Values{}
v.Set("repo", repository)
v.Set("tag", tag)
if *force {
v.Set("force", "1")
}
if _, _, err := cli.call("POST", "/images/"+cmd.Arg(0)+"/tag?"+v.Encode(), nil); err != nil {
return err
}
return nil
}
//FIXME Only used in tests
func ParseRun(args []string, capabilities *Capabilities) (*Config, *HostConfig, *flag.FlagSet, error) {
cmd := flag.NewFlagSet("run", flag.ContinueOnError)
cmd.SetOutput(ioutil.Discard)
cmd.Usage = nil
return parseRun(cmd, args, capabilities)
}
func parseRun(cmd *flag.FlagSet, args []string, capabilities *Capabilities) (*Config, *HostConfig, *flag.FlagSet, error) {
var (
// FIXME: use utils.ListOpts for attach and volumes?
flAttach = NewListOpts(ValidateAttach)
flVolumes = NewListOpts(ValidatePath)
flLinks = NewListOpts(ValidateLink)
flEnv = NewListOpts(ValidateEnv)
flPublish ListOpts
flExpose ListOpts
flDns ListOpts
flVolumesFrom ListOpts
flLxcOpts ListOpts
flAutoRemove = cmd.Bool("rm", false, "Automatically remove the container when it exits (incompatible with -d)")
flDetach = cmd.Bool("d", false, "Detached mode: Run container in the background, print new container id")
flNetwork = cmd.Bool("n", true, "Enable networking for this container")
flPrivileged = cmd.Bool("privileged", false, "Give extended privileges to this container")
flPublishAll = cmd.Bool("P", false, "Publish all exposed ports to the host interfaces")
flStdin = cmd.Bool("i", false, "Keep stdin open even if not attached")
flTty = cmd.Bool("t", false, "Allocate a pseudo-tty")
flContainerIDFile = cmd.String("cidfile", "", "Write the container ID to the file")
flEntrypoint = cmd.String("entrypoint", "", "Overwrite the default entrypoint of the image")
flHostname = cmd.String("h", "", "Container host name")
flMemoryString = cmd.String("m", "", "Memory limit (format: <number><optional unit>, where unit = b, k, m or g)")
flUser = cmd.String("u", "", "Username or UID")
flWorkingDir = cmd.String("w", "", "Working directory inside the container")
flCpuShares = cmd.Int64("c", 0, "CPU shares (relative weight)")
// For documentation purpose
_ = cmd.Bool("sig-proxy", true, "Proxify all received signal to the process (even in non-tty mode)")
_ = cmd.String("name", "", "Assign a name to the container")
)
cmd.Var(&flAttach, "a", "Attach to stdin, stdout or stderr.")
cmd.Var(&flVolumes, "v", "Bind mount a volume (e.g. from the host: -v /host:/container, from docker: -v /container)")
cmd.Var(&flLinks, "link", "Add link to another container (name:alias)")
cmd.Var(&flEnv, "e", "Set environment variables")
cmd.Var(&flPublish, "p", fmt.Sprintf("Publish a container's port to the host (format: %s) (use 'docker port' to see the actual mapping)", PortSpecTemplateFormat))
cmd.Var(&flExpose, "expose", "Expose a port from the container without publishing it to your host")
cmd.Var(&flDns, "dns", "Set custom dns servers")
cmd.Var(&flVolumesFrom, "volumes-from", "Mount volumes from the specified container(s)")
cmd.Var(&flLxcOpts, "lxc-conf", "Add custom lxc options -lxc-conf=\"lxc.cgroup.cpuset.cpus = 0,1\"")
if err := cmd.Parse(args); err != nil {
return nil, nil, cmd, err
}
// Check if the kernel supports memory limit cgroup.
if capabilities != nil && *flMemoryString != "" && !capabilities.MemoryLimit {
*flMemoryString = ""
}
// Validate input params
if *flDetach && flAttach.Len() > 0 {
return nil, nil, cmd, ErrConflictAttachDetach
}
if *flWorkingDir != "" && !path.IsAbs(*flWorkingDir) {
return nil, nil, cmd, ErrInvalidWorikingDirectory
}
if *flDetach && *flAutoRemove {
return nil, nil, cmd, ErrConflictDetachAutoRemove
}
// If neither -d or -a are set, attach to everything by default
if flAttach.Len() == 0 && !*flDetach {
if !*flDetach {
flAttach.Set("stdout")
flAttach.Set("stderr")
if *flStdin {
flAttach.Set("stdin")
}
}
}
var flMemory int64
if *flMemoryString != "" {
parsedMemory, err := utils.RAMInBytes(*flMemoryString)
if err != nil {
return nil, nil, cmd, err
}
flMemory = parsedMemory
}
var binds []string
// add any bind targets to the list of container volumes
for bind := range flVolumes.GetMap() {
if arr := strings.Split(bind, ":"); len(arr) > 1 {
if arr[0] == "/" {
return nil, nil, cmd, fmt.Errorf("Invalid bind mount: source can't be '/'")
}
dstDir := arr[1]
flVolumes.Set(dstDir)
binds = append(binds, bind)
flVolumes.Delete(bind)
} else if bind == "/" {
return nil, nil, cmd, fmt.Errorf("Invalid volume: path can't be '/'")
}
}
var (
parsedArgs = cmd.Args()
runCmd []string
entrypoint []string
image string
)
if len(parsedArgs) >= 1 {
image = cmd.Arg(0)
}
if len(parsedArgs) > 1 {
runCmd = parsedArgs[1:]
}
if *flEntrypoint != "" {
entrypoint = []string{*flEntrypoint}
}
lxcConf, err := parseLxcConfOpts(flLxcOpts)
if err != nil {
return nil, nil, cmd, err
}
var (
domainname string
hostname = *flHostname
parts = strings.SplitN(hostname, ".", 2)
)
if len(parts) > 1 {
hostname = parts[0]
domainname = parts[1]
}
ports, portBindings, err := parsePortSpecs(flPublish.GetAll())
if err != nil {
return nil, nil, cmd, err
}
// Merge in exposed ports to the map of published ports
for _, e := range flExpose.GetAll() {
if strings.Contains(e, ":") {
return nil, nil, cmd, fmt.Errorf("Invalid port format for -expose: %s", e)
}
p := NewPort(splitProtoPort(e))
if _, exists := ports[p]; !exists {
ports[p] = struct{}{}
}
}
config := &Config{
Hostname: hostname,
Domainname: domainname,
PortSpecs: nil, // Deprecated
ExposedPorts: ports,
User: *flUser,
Tty: *flTty,
NetworkDisabled: !*flNetwork,
OpenStdin: *flStdin,
Memory: flMemory,
CpuShares: *flCpuShares,
AttachStdin: flAttach.Get("stdin"),
AttachStdout: flAttach.Get("stdout"),
AttachStderr: flAttach.Get("stderr"),
Env: flEnv.GetAll(),
Cmd: runCmd,
Dns: flDns.GetAll(),
Image: image,
Volumes: flVolumes.GetMap(),
VolumesFrom: strings.Join(flVolumesFrom.GetAll(), ","),
Entrypoint: entrypoint,
WorkingDir: *flWorkingDir,
}
hostConfig := &HostConfig{
Binds: binds,
ContainerIDFile: *flContainerIDFile,
LxcConf: lxcConf,
Privileged: *flPrivileged,
PortBindings: portBindings,
Links: flLinks.GetAll(),
PublishAllPorts: *flPublishAll,
}
if capabilities != nil && flMemory > 0 && !capabilities.SwapLimit {
//fmt.Fprintf(stdout, "WARNING: Your kernel does not support swap limit capabilities. Limitation discarded.\n")
config.MemorySwap = -1
}
// When allocating stdin in attached mode, close stdin at client disconnect
if config.OpenStdin && config.AttachStdin {
config.StdinOnce = true
}
return config, hostConfig, cmd, nil
}
func (cli *DockerCli) CmdRun(args ...string) error {
config, hostConfig, cmd, err := parseRun(cli.Subcmd("run", "[OPTIONS] IMAGE [COMMAND] [ARG...]", "Run a command in a new container"), args, nil)
if err != nil {
return err
}
if config.Image == "" {
cmd.Usage()
return nil
}
// Retrieve relevant client-side config
var (
flName = cmd.Lookup("name")
flRm = cmd.Lookup("rm")
flSigProxy = cmd.Lookup("sig-proxy")
autoRemove, _ = strconv.ParseBool(flRm.Value.String())
sigProxy, _ = strconv.ParseBool(flSigProxy.Value.String())
)
// Disable sigProxy in case on TTY
if config.Tty {
sigProxy = false
}
var containerIDFile io.WriteCloser
if len(hostConfig.ContainerIDFile) > 0 {
if _, err := os.Stat(hostConfig.ContainerIDFile); err == nil {
return fmt.Errorf("cid file found, make sure the other container isn't running or delete %s", hostConfig.ContainerIDFile)
}
if containerIDFile, err = os.Create(hostConfig.ContainerIDFile); err != nil {
return fmt.Errorf("failed to create the container ID file: %s", err)
}
defer containerIDFile.Close()
}
containerValues := url.Values{}
if name := flName.Value.String(); name != "" {
containerValues.Set("name", name)
}
//create the container
body, statusCode, err := cli.call("POST", "/containers/create?"+containerValues.Encode(), config)
//if image not found try to pull it
if statusCode == 404 {
_, tag := utils.ParseRepositoryTag(config.Image)
if tag == "" {
tag = DEFAULTTAG
}
fmt.Fprintf(cli.err, "Unable to find image '%s' (tag: %s) locally\n", config.Image, tag)
v := url.Values{}
repos, tag := utils.ParseRepositoryTag(config.Image)
v.Set("fromImage", repos)
v.Set("tag", tag)
// Resolve the Repository name from fqn to endpoint + name
endpoint, _, err := registry.ResolveRepositoryName(repos)
if err != nil {
return err
}
// Load the auth config file, to be able to pull the image
cli.LoadConfigFile()
// Resolve the Auth config relevant for this server
authConfig := cli.configFile.ResolveAuthConfig(endpoint)
buf, err := json.Marshal(authConfig)
if err != nil {
return err
}
registryAuthHeader := []string{
base64.URLEncoding.EncodeToString(buf),
}
if err = cli.stream("POST", "/images/create?"+v.Encode(), nil, cli.err, map[string][]string{"X-Registry-Auth": registryAuthHeader}); err != nil {
return err
}
if body, _, err = cli.call("POST", "/containers/create?"+containerValues.Encode(), config); err != nil {
return err
}
} else if err != nil {
return err
}
var runResult APIRun
if err := json.Unmarshal(body, &runResult); err != nil {
return err
}
for _, warning := range runResult.Warnings {
fmt.Fprintf(cli.err, "WARNING: %s\n", warning)
}
if len(hostConfig.ContainerIDFile) > 0 {
if _, err = containerIDFile.Write([]byte(runResult.ID)); err != nil {
return fmt.Errorf("failed to write the container ID to the file: %s", err)
}
}
if sigProxy {
sigc := cli.forwardAllSignals(runResult.ID)
defer utils.StopCatch(sigc)
}
var (
waitDisplayId chan struct{}
errCh chan error
)
if !config.AttachStdout && !config.AttachStderr {
// Make this asynchrone in order to let the client write to stdin before having to read the ID
waitDisplayId = make(chan struct{})
go func() {
defer close(waitDisplayId)
fmt.Fprintf(cli.out, "%s\n", runResult.ID)
}()
}
// We need to instanciate the chan because the select needs it. It can
// be closed but can't be uninitialized.
hijacked := make(chan io.Closer)
// Block the return until the chan gets closed
defer func() {
utils.Debugf("End of CmdRun(), Waiting for hijack to finish.")
if _, ok := <-hijacked; ok {
utils.Errorf("Hijack did not finish (chan still open)")
}
}()
if config.AttachStdin || config.AttachStdout || config.AttachStderr {
var (
out, stderr io.Writer
in io.ReadCloser
v = url.Values{}
)
v.Set("stream", "1")
if config.AttachStdin {
v.Set("stdin", "1")
in = cli.in
}
if config.AttachStdout {
v.Set("stdout", "1")
out = cli.out
}
if config.AttachStderr {
v.Set("stderr", "1")
if config.Tty {
stderr = cli.out
} else {
stderr = cli.err
}
}
errCh = utils.Go(func() error {
return cli.hijack("POST", "/containers/"+runResult.ID+"/attach?"+v.Encode(), config.Tty, in, out, stderr, hijacked)
})
} else {
close(hijacked)
}
// Acknowledge the hijack before starting
select {
case closer := <-hijacked:
// Make sure that hijack gets closed when returning. (result
// in closing hijack chan and freeing server's goroutines.
if closer != nil {
defer closer.Close()
}
case err := <-errCh:
if err != nil {
utils.Debugf("Error hijack: %s", err)
return err
}
}
//start the container
if _, _, err = cli.call("POST", "/containers/"+runResult.ID+"/start", hostConfig); err != nil {
return err
}
if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && cli.isTerminal {
if err := cli.monitorTtySize(runResult.ID); err != nil {
utils.Errorf("Error monitoring TTY size: %s\n", err)
}
}
if errCh != nil {
if err := <-errCh; err != nil {
utils.Debugf("Error hijack: %s", err)
return err
}
}
// Detached mode: wait for the id to be displayed and return.
if !config.AttachStdout && !config.AttachStderr {
// Detached mode
<-waitDisplayId
return nil
}
var status int
// Attached mode
if autoRemove {
// Autoremove: wait for the container to finish, retrieve
// the exit code and remove the container
if _, _, err := cli.call("POST", "/containers/"+runResult.ID+"/wait", nil); err != nil {
return err
}
if _, status, err = getExitCode(cli, runResult.ID); err != nil {
return err
}
if _, _, err := cli.call("DELETE", "/containers/"+runResult.ID, nil); err != nil {
return err
}
} else {
// No Autoremove: Simply retrieve the exit code
if _, status, err = getExitCode(cli, runResult.ID); err != nil {
return err
}
}
if status != 0 {
return &utils.StatusError{StatusCode: status}
}
return nil
}
func (cli *DockerCli) CmdCp(args ...string) error {
cmd := cli.Subcmd("cp", "CONTAINER:PATH HOSTPATH", "Copy files/folders from the PATH to the HOSTPATH")
if err := cmd.Parse(args); err != nil {
return nil
}
if cmd.NArg() != 2 {
cmd.Usage()
return nil
}
var copyData APICopy
info := strings.Split(cmd.Arg(0), ":")
if len(info) != 2 {
return fmt.Errorf("Error: Path not specified")
}
copyData.Resource = info[1]
copyData.HostPath = cmd.Arg(1)
data, statusCode, err := cli.call("POST", "/containers/"+info[0]+"/copy", copyData)
if err != nil {
return err
}
if statusCode == 200 {
r := bytes.NewReader(data)
if err := archive.Untar(r, copyData.HostPath, nil); err != nil {
return err
}
}
return nil
}
func (cli *DockerCli) CmdSave(args ...string) error {
cmd := cli.Subcmd("save", "IMAGE", "Save an image to a tar archive (streamed to stdout)")
if err := cmd.Parse(args); err != nil {
return err
}
if cmd.NArg() != 1 {
cmd.Usage()
return nil
}
image := cmd.Arg(0)
if err := cli.stream("GET", "/images/"+image+"/get", nil, cli.out, nil); err != nil {
return err
}
return nil
}
func (cli *DockerCli) CmdLoad(args ...string) error {
cmd := cli.Subcmd("load", "SOURCE", "Load an image from a tar archive")
if err := cmd.Parse(args); err != nil {
return err
}
if cmd.NArg() != 0 {
cmd.Usage()
return nil
}
if err := cli.stream("POST", "/images/load", cli.in, cli.out, nil); err != nil {
return err
}
return nil
}
func (cli *DockerCli) call(method, path string, data interface{}) ([]byte, int, error) {
var params io.Reader
if data != nil {
buf, err := json.Marshal(data)
if err != nil {
return nil, -1, err
}
params = bytes.NewBuffer(buf)
}
// fixme: refactor client to support redirect
re := regexp.MustCompile("/+")
path = re.ReplaceAllString(path, "/")
req, err := http.NewRequest(method, fmt.Sprintf("/v%g%s", APIVERSION, path), params)
if err != nil {
return nil, -1, err
}
req.Header.Set("User-Agent", "Docker-Client/"+VERSION)
req.Host = cli.addr
if data != nil {
req.Header.Set("Content-Type", "application/json")
} else if method == "POST" {
req.Header.Set("Content-Type", "plain/text")
}
dial, err := net.Dial(cli.proto, cli.addr)
if err != nil {
if strings.Contains(err.Error(), "connection refused") {
return nil, -1, ErrConnectionRefused
}
return nil, -1, err
}
clientconn := httputil.NewClientConn(dial, nil)
resp, err := clientconn.Do(req)
defer clientconn.Close()
if err != nil {
if strings.Contains(err.Error(), "connection refused") {
return nil, -1, ErrConnectionRefused
}
return nil, -1, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, -1, err
}
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
if len(body) == 0 {
return nil, resp.StatusCode, fmt.Errorf("Error: %s", http.StatusText(resp.StatusCode))
}
return nil, resp.StatusCode, fmt.Errorf("Error: %s", bytes.TrimSpace(body))
}
return body, resp.StatusCode, nil
}
func (cli *DockerCli) stream(method, path string, in io.Reader, out io.Writer, headers map[string][]string) error {
if (method == "POST" || method == "PUT") && in == nil {
in = bytes.NewReader([]byte{})
}
// fixme: refactor client to support redirect
re := regexp.MustCompile("/+")
path = re.ReplaceAllString(path, "/")
req, err := http.NewRequest(method, fmt.Sprintf("/v%g%s", APIVERSION, path), in)
if err != nil {
return err
}
req.Header.Set("User-Agent", "Docker-Client/"+VERSION)
req.Host = cli.addr
if method == "POST" {
req.Header.Set("Content-Type", "plain/text")
}
if headers != nil {
for k, v := range headers {
req.Header[k] = v
}
}
dial, err := net.Dial(cli.proto, cli.addr)
if err != nil {
if strings.Contains(err.Error(), "connection refused") {
return fmt.Errorf("Can't connect to docker daemon. Is 'docker -d' running on this host?")
}
return err
}
clientconn := httputil.NewClientConn(dial, nil)
resp, err := clientconn.Do(req)
defer clientconn.Close()
if err != nil {
if strings.Contains(err.Error(), "connection refused") {
return fmt.Errorf("Can't connect to docker daemon. Is 'docker -d' running on this host?")
}
return err
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
if len(body) == 0 {
return fmt.Errorf("Error :%s", http.StatusText(resp.StatusCode))
}
return fmt.Errorf("Error: %s", bytes.TrimSpace(body))
}
if matchesContentType(resp.Header.Get("Content-Type"), "application/json") {
return utils.DisplayJSONMessagesStream(resp.Body, out, cli.terminalFd, cli.isTerminal)
}
if _, err := io.Copy(out, resp.Body); err != nil {
return err
}
return nil
}
func (cli *DockerCli) hijack(method, path string, setRawTerminal bool, in io.ReadCloser, stdout, stderr io.Writer, started chan io.Closer) error {
defer func() {
if started != nil {
close(started)
}
}()
// fixme: refactor client to support redirect
re := regexp.MustCompile("/+")
path = re.ReplaceAllString(path, "/")
req, err := http.NewRequest(method, fmt.Sprintf("/v%g%s", APIVERSION, path), nil)
if err != nil {
return err
}
req.Header.Set("User-Agent", "Docker-Client/"+VERSION)
req.Header.Set("Content-Type", "plain/text")
req.Host = cli.addr
dial, err := net.Dial(cli.proto, cli.addr)
if err != nil {
if strings.Contains(err.Error(), "connection refused") {
return fmt.Errorf("Can't connect to docker daemon. Is 'docker -d' running on this host?")
}
return err
}
clientconn := httputil.NewClientConn(dial, nil)
defer clientconn.Close()
// Server hijacks the connection, error 'connection closed' expected
clientconn.Do(req)
rwc, br := clientconn.Hijack()
defer rwc.Close()
if started != nil {
started <- rwc
}
var receiveStdout chan error
var oldState *term.State
if in != nil && setRawTerminal && cli.isTerminal && os.Getenv("NORAW") == "" {
oldState, err = term.SetRawTerminal(cli.terminalFd)
if err != nil {
return err
}
defer term.RestoreTerminal(cli.terminalFd, oldState)
}
if stdout != nil || stderr != nil {
receiveStdout = utils.Go(func() (err error) {
defer func() {
if in != nil {
if setRawTerminal && cli.isTerminal {
term.RestoreTerminal(cli.terminalFd, oldState)
}
in.Close()
}
}()
// When TTY is ON, use regular copy
if setRawTerminal {
_, err = io.Copy(stdout, br)
} else {
_, err = utils.StdCopy(stdout, stderr, br)
}
utils.Debugf("[hijack] End of stdout")
return err
})
}
sendStdin := utils.Go(func() error {
if in != nil {
io.Copy(rwc, in)
utils.Debugf("[hijack] End of stdin")
}
if tcpc, ok := rwc.(*net.TCPConn); ok {
if err := tcpc.CloseWrite(); err != nil {
utils.Errorf("Couldn't send EOF: %s\n", err)
}
} else if unixc, ok := rwc.(*net.UnixConn); ok {
if err := unixc.CloseWrite(); err != nil {
utils.Errorf("Couldn't send EOF: %s\n", err)
}
}
// Discard errors due to pipe interruption
return nil
})
if stdout != nil || stderr != nil {
if err := <-receiveStdout; err != nil {
utils.Errorf("Error receiveStdout: %s", err)
return err
}
}
if !cli.isTerminal {
if err := <-sendStdin; err != nil {
utils.Errorf("Error sendStdin: %s", err)
return err
}
}
return nil
}
func (cli *DockerCli) getTtySize() (int, int) {
if !cli.isTerminal {
return 0, 0
}
ws, err := term.GetWinsize(cli.terminalFd)
if err != nil {
utils.Errorf("Error getting size: %s", err)
if ws == nil {
return 0, 0
}
}
return int(ws.Height), int(ws.Width)
}
func (cli *DockerCli) resizeTty(id string) {
height, width := cli.getTtySize()
if height == 0 && width == 0 {
return
}
v := url.Values{}
v.Set("h", strconv.Itoa(height))
v.Set("w", strconv.Itoa(width))
if _, _, err := cli.call("POST", "/containers/"+id+"/resize?"+v.Encode(), nil); err != nil {
utils.Errorf("Error resize: %s", err)
}
}
func (cli *DockerCli) monitorTtySize(id string) error {
cli.resizeTty(id)
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGWINCH)
go func() {
for _ = range sigchan {
cli.resizeTty(id)
}
}()
return nil
}
func (cli *DockerCli) Subcmd(name, signature, description string) *flag.FlagSet {
flags := flag.NewFlagSet(name, flag.ContinueOnError)
flags.Usage = func() {
fmt.Fprintf(cli.err, "\nUsage: docker %s %s\n\n%s\n\n", name, signature, description)
flags.PrintDefaults()
os.Exit(2)
}
return flags
}
func (cli *DockerCli) LoadConfigFile() (err error) {
cli.configFile, err = auth.LoadConfig(os.Getenv("HOME"))
if err != nil {
fmt.Fprintf(cli.err, "WARNING: %s\n", err)
}
return err
}
func waitForExit(cli *DockerCli, containerId string) (int, error) {
body, _, err := cli.call("POST", "/containers/"+containerId+"/wait", nil)
if err != nil {
// If we can't connect, then the daemon probably died.
if err != ErrConnectionRefused {
return -1, err
}
return -1, nil
}
var out APIWait
if err := json.Unmarshal(body, &out); err != nil {
return -1, err
}
return out.StatusCode, nil
}
// getExitCode perform an inspect on the container. It returns
// the running state and the exit code.
func getExitCode(cli *DockerCli, containerId string) (bool, int, error) {
body, _, err := cli.call("GET", "/containers/"+containerId+"/json", nil)
if err != nil {
// If we can't connect, then the daemon probably died.
if err != ErrConnectionRefused {
return false, -1, err
}
return false, -1, nil
}
c := &Container{}
if err := json.Unmarshal(body, c); err != nil {
return false, -1, err
}
return c.State.IsRunning(), c.State.GetExitCode(), nil
}
func NewDockerCli(in io.ReadCloser, out, err io.Writer, proto, addr string) *DockerCli {
var (
isTerminal = false
terminalFd uintptr
)
if in != nil {
if file, ok := in.(*os.File); ok {
terminalFd = file.Fd()
isTerminal = term.IsTerminal(terminalFd)
}
}
if err == nil {
err = out
}
return &DockerCli{
proto: proto,
addr: addr,
in: in,
out: out,
err: err,
isTerminal: isTerminal,
terminalFd: terminalFd,
}
}
type DockerCli struct {
proto string
addr string
configFile *auth.ConfigFile
in io.ReadCloser
out io.Writer
err io.Writer
isTerminal bool
terminalFd uintptr
}
| [
"\"HOME\"",
"\"DEBUG\"",
"\"DEBUG\"",
"\"NORAW\"",
"\"HOME\""
]
| []
| [
"HOME",
"NORAW",
"DEBUG"
]
| [] | ["HOME", "NORAW", "DEBUG"] | go | 3 | 0 | |
main.go | package main
import (
"bytes"
"context"
"crypto/sha256"
"database/sql"
"errors"
"hash"
"io"
"net/http"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/BurntSushi/toml"
_ "github.com/mattn/go-oci8"
"fmt"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/common/log"
"gopkg.in/alecthomas/kingpin.v2"
//Required for debugging
//_ "net/http/pprof"
)
var (
// Version will be set at build time.
Version = "0.0.0.dev"
listenAddress = kingpin.Flag("web.listen-address", "Address to listen on for web interface and telemetry. (env: LISTEN_ADDRESS)").Default(getEnv("LISTEN_ADDRESS", ":9161")).String()
metricPath = kingpin.Flag("web.telemetry-path", "Path under which to expose metrics. (env: TELEMETRY_PATH)").Default(getEnv("TELEMETRY_PATH", "/metrics")).String()
defaultFileMetrics = kingpin.Flag("default.metrics", "File with default metrics in a TOML file. (env: DEFAULT_METRICS)").Default(getEnv("DEFAULT_METRICS", "default-metrics.toml")).String()
customMetrics = kingpin.Flag("custom.metrics", "File that may contain various custom metrics in a TOML file. (env: CUSTOM_METRICS)").Default(getEnv("CUSTOM_METRICS", "")).String()
queryTimeout = kingpin.Flag("query.timeout", "Query timeout (in seconds). (env: QUERY_TIMEOUT)").Default(getEnv("QUERY_TIMEOUT", "5")).String()
maxIdleConns = kingpin.Flag("database.maxIdleConns", "Number of maximum idle connections in the connection pool. (env: DATABASE_MAXIDLECONNS)").Default(getEnv("DATABASE_MAXIDLECONNS", "0")).Int()
maxOpenConns = kingpin.Flag("database.maxOpenConns", "Number of maximum open connections in the connection pool. (env: DATABASE_MAXOPENCONNS)").Default(getEnv("DATABASE_MAXOPENCONNS", "10")).Int()
securedMetrics = kingpin.Flag("web.secured-metrics", "Expose metrics using https.").Default("false").Bool()
serverCert = kingpin.Flag("web.ssl-server-cert", "Path to the PEM encoded certificate").ExistingFile()
serverKey = kingpin.Flag("web.ssl-server-key", "Path to the PEM encoded key").ExistingFile()
)
// Metric name parts.
const (
namespace = "oracledb"
exporter = "exporter"
)
// Metrics object description
type Metric struct {
Context string
Labels []string
MetricsDesc map[string]string
MetricsType map[string]string
MetricsBuckets map[string]map[string]string
FieldToAppend string
Request string
IgnoreZeroResult bool
}
// Used to load multiple metrics from file
type Metrics struct {
Metric []Metric
}
// Metrics to scrap. Use external file (default-metrics.toml and custom if provided)
var (
metricsToScrap Metrics
additionalMetrics Metrics
hashMap map[int][]byte
)
// Exporter collects Oracle DB metrics. It implements prometheus.Collector.
type Exporter struct {
dsn string
duration, error prometheus.Gauge
totalScrapes prometheus.Counter
scrapeErrors *prometheus.CounterVec
up prometheus.Gauge
db *sql.DB
}
// getEnv returns the value of an environment variable, or returns the provided fallback value
func getEnv(key, fallback string) string {
if value, ok := os.LookupEnv(key); ok {
return value
}
return fallback
}
func atoi(stringValue string) int {
intValue, err := strconv.Atoi(stringValue)
if err != nil {
log.Fatal("error while converting to int:", err)
panic(err)
}
return intValue
}
func connect(dsn string) *sql.DB {
log.Debugln("Launching connection: ", dsn)
db, err := sql.Open("oci8", dsn)
if err != nil {
log.Errorln("Error while connecting to", dsn)
panic(err)
}
log.Debugln("set max idle connections to ", *maxIdleConns)
db.SetMaxIdleConns(*maxIdleConns)
log.Debugln("set max open connections to ", *maxOpenConns)
db.SetMaxOpenConns(*maxOpenConns)
log.Debugln("Successfully connected to: ", dsn)
return db
}
// NewExporter returns a new Oracle DB exporter for the provided DSN.
func NewExporter(dsn string) *Exporter {
db := connect(dsn)
return &Exporter{
dsn: dsn,
duration: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: exporter,
Name: "last_scrape_duration_seconds",
Help: "Duration of the last scrape of metrics from Oracle DB.",
}),
totalScrapes: prometheus.NewCounter(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: exporter,
Name: "scrapes_total",
Help: "Total number of times Oracle DB was scraped for metrics.",
}),
scrapeErrors: prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: namespace,
Subsystem: exporter,
Name: "scrape_errors_total",
Help: "Total number of times an error occured scraping a Oracle database.",
}, []string{"collector"}),
error: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Subsystem: exporter,
Name: "last_scrape_error",
Help: "Whether the last scrape of metrics from Oracle DB resulted in an error (1 for error, 0 for success).",
}),
up: prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "up",
Help: "Whether the Oracle database server is up.",
}),
db: db,
}
}
// Describe describes all the metrics exported by the Oracle DB exporter.
func (e *Exporter) Describe(ch chan<- *prometheus.Desc) {
// We cannot know in advance what metrics the exporter will generate
// So we use the poor man's describe method: Run a collect
// and send the descriptors of all the collected metrics. The problem
// here is that we need to connect to the Oracle DB. If it is currently
// unavailable, the descriptors will be incomplete. Since this is a
// stand-alone exporter and not used as a library within other code
// implementing additional metrics, the worst that can happen is that we
// don't detect inconsistent metrics created by this exporter
// itself. Also, a change in the monitored Oracle instance may change the
// exported metrics during the runtime of the exporter.
metricCh := make(chan prometheus.Metric)
doneCh := make(chan struct{})
go func() {
for m := range metricCh {
ch <- m.Desc()
}
close(doneCh)
}()
e.Collect(metricCh)
close(metricCh)
<-doneCh
}
// Collect implements prometheus.Collector.
func (e *Exporter) Collect(ch chan<- prometheus.Metric) {
e.scrape(ch)
ch <- e.duration
ch <- e.totalScrapes
ch <- e.error
e.scrapeErrors.Collect(ch)
ch <- e.up
}
func (e *Exporter) scrape(ch chan<- prometheus.Metric) {
e.totalScrapes.Inc()
var err error
defer func(begun time.Time) {
e.duration.Set(time.Since(begun).Seconds())
if err == nil {
e.error.Set(0)
} else {
e.error.Set(1)
}
}(time.Now())
if err = e.db.Ping(); err != nil {
if strings.Contains(err.Error(), "sql: database is closed") {
log.Infoln("Reconnecting to DB")
e.db = connect(e.dsn)
}
}
if err = e.db.Ping(); err != nil {
log.Errorln("Error pinging oracle:", err)
//e.db.Close()
e.up.Set(0)
return
} else {
log.Debugln("Successfully pinged Oracle database: ")
e.up.Set(1)
}
if checkIfMetricsChanged() {
reloadMetrics()
}
wg := sync.WaitGroup{}
for _, metric := range metricsToScrap.Metric {
wg.Add(1)
metric := metric //https://golang.org/doc/faq#closures_and_goroutines
go func() {
defer wg.Done()
log.Debugln("About to scrape metric: ")
log.Debugln("- Metric MetricsDesc: ", metric.MetricsDesc)
log.Debugln("- Metric Context: ", metric.Context)
log.Debugln("- Metric MetricsType: ", metric.MetricsType)
log.Debugln("- Metric MetricsBuckets: ", metric.MetricsBuckets, "(Ignored unless Histogram type)")
log.Debugln("- Metric Labels: ", metric.Labels)
log.Debugln("- Metric FieldToAppend: ", metric.FieldToAppend)
log.Debugln("- Metric IgnoreZeroResult: ", metric.IgnoreZeroResult)
log.Debugln("- Metric Request: ", metric.Request)
if len(metric.Request) == 0 {
log.Errorln("Error scraping for ", metric.MetricsDesc, ". Did you forget to define request in your toml file?")
return
}
if len(metric.MetricsDesc) == 0 {
log.Errorln("Error scraping for query", metric.Request, ". Did you forget to define metricsdesc in your toml file?")
return
}
for column, metricType := range metric.MetricsType {
if metricType == "histogram" {
_, ok := metric.MetricsBuckets[column]
if !ok {
log.Errorln("Unable to find MetricsBuckets configuration key for metric. (metric=" + column + ")")
return
}
}
}
scrapeStart := time.Now()
if err = ScrapeMetric(e.db, ch, metric); err != nil {
log.Errorln("Error scraping for", metric.Context, "_", metric.MetricsDesc, ":", err)
e.scrapeErrors.WithLabelValues(metric.Context).Inc()
} else {
log.Debugln("Successfully scraped metric: ", metric.Context, metric.MetricsDesc, time.Since(scrapeStart))
}
}()
}
wg.Wait()
}
func GetMetricType(metricType string, metricsType map[string]string) prometheus.ValueType {
var strToPromType = map[string]prometheus.ValueType{
"gauge": prometheus.GaugeValue,
"counter": prometheus.CounterValue,
"histogram": prometheus.UntypedValue,
}
strType, ok := metricsType[strings.ToLower(metricType)]
if !ok {
return prometheus.GaugeValue
}
valueType, ok := strToPromType[strings.ToLower(strType)]
if !ok {
panic(errors.New("Error while getting prometheus type " + strings.ToLower(strType)))
}
return valueType
}
// interface method to call ScrapeGenericValues using Metric struct values
func ScrapeMetric(db *sql.DB, ch chan<- prometheus.Metric, metricDefinition Metric) error {
log.Debugln("Calling function ScrapeGenericValues()")
return ScrapeGenericValues(db, ch, metricDefinition.Context, metricDefinition.Labels,
metricDefinition.MetricsDesc, metricDefinition.MetricsType, metricDefinition.MetricsBuckets,
metricDefinition.FieldToAppend, metricDefinition.IgnoreZeroResult,
metricDefinition.Request)
}
// generic method for retrieving metrics.
func ScrapeGenericValues(db *sql.DB, ch chan<- prometheus.Metric, context string, labels []string,
metricsDesc map[string]string, metricsType map[string]string, metricsBuckets map[string]map[string]string, fieldToAppend string, ignoreZeroResult bool, request string) error {
metricsCount := 0
genericParser := func(row map[string]string) error {
// Construct labels value
labelsValues := []string{}
for _, label := range labels {
labelsValues = append(labelsValues, row[label])
}
// Construct Prometheus values to sent back
for metric, metricHelp := range metricsDesc {
value, err := strconv.ParseFloat(strings.TrimSpace(row[metric]), 64)
// If not a float, skip current metric
if err != nil {
log.Errorln("Unable to convert current value to float (metric=" + metric +
",metricHelp=" + metricHelp + ",value=<" + row[metric] + ">)")
continue
}
log.Debugln("Query result looks like: ", value)
// If metric do not use a field content in metric's name
if strings.Compare(fieldToAppend, "") == 0 {
desc := prometheus.NewDesc(
prometheus.BuildFQName(namespace, context, metric),
metricHelp,
labels, nil,
)
if metricsType[strings.ToLower(metric)] == "histogram" {
count, err := strconv.ParseUint(strings.TrimSpace(row["count"]), 10, 64)
if err != nil {
log.Errorln("Unable to convert count value to int (metric=" + metric +
",metricHelp=" + metricHelp + ",value=<" + row["count"] + ">)")
continue
}
buckets := make(map[float64]uint64)
for field, le := range metricsBuckets[metric] {
lelimit, err := strconv.ParseFloat(strings.TrimSpace(le), 64)
if err != nil {
log.Errorln("Unable to convert bucket limit value to float (metric=" + metric +
",metricHelp=" + metricHelp + ",bucketlimit=<" + le + ">)")
continue
}
counter, err := strconv.ParseUint(strings.TrimSpace(row[field]), 10, 64)
if err != nil {
log.Errorln("Unable to convert ", field, " value to int (metric="+metric+
",metricHelp="+metricHelp+",value=<"+row[field]+">)")
continue
}
buckets[lelimit] = counter
}
ch <- prometheus.MustNewConstHistogram(desc, count, value, buckets, labelsValues...)
} else {
ch <- prometheus.MustNewConstMetric(desc, GetMetricType(metric, metricsType), value, labelsValues...)
}
// If no labels, use metric name
} else {
desc := prometheus.NewDesc(
prometheus.BuildFQName(namespace, context, cleanName(row[fieldToAppend])),
metricHelp,
nil, nil,
)
if metricsType[strings.ToLower(metric)] == "histogram" {
count, err := strconv.ParseUint(strings.TrimSpace(row["count"]), 10, 64)
if err != nil {
log.Errorln("Unable to convert count value to int (metric=" + metric +
",metricHelp=" + metricHelp + ",value=<" + row["count"] + ">)")
continue
}
buckets := make(map[float64]uint64)
for field, le := range metricsBuckets[metric] {
lelimit, err := strconv.ParseFloat(strings.TrimSpace(le), 64)
if err != nil {
log.Errorln("Unable to convert bucket limit value to float (metric=" + metric +
",metricHelp=" + metricHelp + ",bucketlimit=<" + le + ">)")
continue
}
counter, err := strconv.ParseUint(strings.TrimSpace(row[field]), 10, 64)
if err != nil {
log.Errorln("Unable to convert ", field, " value to int (metric="+metric+
",metricHelp="+metricHelp+",value=<"+row[field]+">)")
continue
}
buckets[lelimit] = counter
}
ch <- prometheus.MustNewConstHistogram(desc, count, value, buckets)
} else {
ch <- prometheus.MustNewConstMetric(desc, GetMetricType(metric, metricsType), value)
}
}
metricsCount++
}
return nil
}
err := GeneratePrometheusMetrics(db, genericParser, request)
log.Debugln("ScrapeGenericValues() - metricsCount: ", metricsCount)
if err != nil {
return err
}
if !ignoreZeroResult && metricsCount == 0 {
return errors.New("No metrics found while parsing")
}
return err
}
// inspired by https://kylewbanks.com/blog/query-result-to-map-in-golang
// Parse SQL result and call parsing function to each row
func GeneratePrometheusMetrics(db *sql.DB, parse func(row map[string]string) error, query string) error {
// Add a timeout
timeout, err := strconv.Atoi(*queryTimeout)
if err != nil {
log.Fatal("error while converting timeout option value: ", err)
panic(err)
}
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeout)*time.Second)
defer cancel()
rows, err := db.QueryContext(ctx, query)
if ctx.Err() == context.DeadlineExceeded {
return errors.New("Oracle query timed out")
}
if err != nil {
return err
}
cols, err := rows.Columns()
defer rows.Close()
for rows.Next() {
// Create a slice of interface{}'s to represent each column,
// and a second slice to contain pointers to each item in the columns slice.
columns := make([]interface{}, len(cols))
columnPointers := make([]interface{}, len(cols))
for i, _ := range columns {
columnPointers[i] = &columns[i]
}
// Scan the result into the column pointers...
if err := rows.Scan(columnPointers...); err != nil {
return err
}
// Create our map, and retrieve the value for each column from the pointers slice,
// storing it in the map with the name of the column as the key.
m := make(map[string]string)
for i, colName := range cols {
val := columnPointers[i].(*interface{})
m[strings.ToLower(colName)] = fmt.Sprintf("%v", *val)
}
// Call function to parse row
if err := parse(m); err != nil {
return err
}
}
return nil
}
// Oracle gives us some ugly names back. This function cleans things up for Prometheus.
func cleanName(s string) string {
s = strings.Replace(s, " ", "_", -1) // Remove spaces
s = strings.Replace(s, "(", "", -1) // Remove open parenthesis
s = strings.Replace(s, ")", "", -1) // Remove close parenthesis
s = strings.Replace(s, "/", "", -1) // Remove forward slashes
s = strings.Replace(s, "*", "", -1) // Remove asterisks
s = strings.ToLower(s)
return s
}
func hashFile(h hash.Hash, fn string) error {
f, err := os.Open(fn)
if err != nil {
return err
}
defer f.Close()
if _, err := io.Copy(h, f); err != nil {
return err
}
return nil
}
func checkIfMetricsChanged() bool {
for i, _customMetrics := range strings.Split(*customMetrics, ",") {
if len(_customMetrics) == 0 {
continue
}
log.Debug("Checking modifications in following metrics definition file:", _customMetrics)
h := sha256.New()
if err := hashFile(h, _customMetrics); err != nil {
log.Errorln("Unable to get file hash", err)
return false
}
// If any of files has been changed reload metrics
if !bytes.Equal(hashMap[i], h.Sum(nil)) {
log.Infoln(_customMetrics, "has been changed. Reloading metrics...")
hashMap[i] = h.Sum(nil)
return true
}
}
return false
}
func reloadMetrics() {
// Truncate metricsToScrap
metricsToScrap.Metric = []Metric{}
// Load default metrics
if _, err := toml.DecodeFile(*defaultFileMetrics, &metricsToScrap); err != nil {
log.Errorln(err)
panic(errors.New("Error while loading " + *defaultFileMetrics))
} else {
log.Infoln("Successfully loaded default metrics from: " + *defaultFileMetrics)
}
// If custom metrics, load it
if strings.Compare(*customMetrics, "") != 0 {
for _, _customMetrics := range strings.Split(*customMetrics, ",") {
if _, err := toml.DecodeFile(_customMetrics, &additionalMetrics); err != nil {
log.Errorln(err)
panic(errors.New("Error while loading " + _customMetrics))
} else {
log.Infoln("Successfully loaded custom metrics from: " + _customMetrics)
}
metricsToScrap.Metric = append(metricsToScrap.Metric, additionalMetrics.Metric...)
}
} else {
log.Infoln("No custom metrics defined.")
}
}
func main() {
log.AddFlags(kingpin.CommandLine)
kingpin.Version("oracledb_exporter " + Version)
kingpin.HelpFlag.Short('h')
kingpin.Parse()
log.Infoln("Starting oracledb_exporter " + Version)
dsn := os.Getenv("DATA_SOURCE_NAME")
// Load default and custom metrics
hashMap = make(map[int][]byte)
reloadMetrics()
exporter := NewExporter(dsn)
prometheus.MustRegister(exporter)
// See more info on https://github.com/prometheus/client_golang/blob/master/prometheus/promhttp/http.go#L269
opts := promhttp.HandlerOpts{
ErrorLog: log.NewErrorLogger(),
ErrorHandling: promhttp.ContinueOnError,
}
http.Handle(*metricPath, promhttp.HandlerFor(prometheus.DefaultGatherer, opts))
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("<html><head><title>Oracle DB Exporter " + Version + "</title></head><body><h1>Oracle DB Exporter " + Version + "</h1><p><a href='" + *metricPath + "'>Metrics</a></p></body></html>"))
})
if *securedMetrics {
if _, err := os.Stat(*serverCert); err != nil {
log.Fatal("Error loading certificate:", err)
panic(err)
}
if _, err := os.Stat(*serverKey); err != nil {
log.Fatal("Error loading key:", err)
panic(err)
}
log.Infoln("Listening TLS server on", *listenAddress)
if err := http.ListenAndServeTLS(*listenAddress, *serverCert, *serverKey, nil); err != nil {
log.Fatal("Failed to start the secure server:", err)
panic(err)
}
} else {
log.Infoln("Listening on", *listenAddress)
log.Fatal(http.ListenAndServe(*listenAddress, nil))
}
}
| [
"\"DATA_SOURCE_NAME\""
]
| []
| [
"DATA_SOURCE_NAME"
]
| [] | ["DATA_SOURCE_NAME"] | go | 1 | 0 | |
manage.py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'uofthacksemergencyfunds.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| []
| []
| []
| [] | [] | python | 0 | 0 | |
cord-music/player.py | import os
import asyncio
import async_timeout
import discord
from discord.ext import commands
from wavelink import Player
from .errors import InvalidLoopMode, NotEnoughSong, NothingIsPlaying
class DisPlayer(Player):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.queue = asyncio.Queue()
self.loop = "NONE" # CURRENT, PLAYLIST
self.bound_channel = None
self.track_provider = "yt"
async def destroy(self) -> None:
self.queue = None
await super().stop()
await super().disconnect()
async def do_next(self) -> None:
if self.is_playing():
return
timeout = int(os.getenv("DISMUSIC_TIMEOUT", 300))
try:
with async_timeout.timeout(timeout):
track = await self.queue.get()
except asyncio.TimeoutError:
if not self.is_playing():
await self.destroy()
return
self._source = track
await self.play(track)
self.client.dispatch("dismusic_track_start", self, track)
await self.invoke_player()
async def set_loop(self, loop_type: str) -> None:
valid_types = ["NONE", "CURRENT", "PLAYLIST"]
if not self.is_playing():
raise NothingIsPlaying("Player is not playing any track. Can't loop")
if not loop_type:
if valid_types.index(self.loop) >= 2:
loop_type = "NONE"
else:
loop_type = valid_types[valid_types.index(self.loop) + 1]
if loop_type == "PLAYLIST" and len(self.queue._queue) < 1:
loop_type = "NONE"
if loop_type.upper() == "PLAYLIST" and len(self.queue._queue) < 1:
raise NotEnoughSong("There must be 2 songs in the queue in order to use the PLAYLIST loop")
if loop_type.upper() not in valid_types:
raise InvalidLoopMode("Loop type must be `NONE`, `CURRENT` or `PLAYLIST`.")
self.loop = loop_type.upper()
return self.loop
async def invoke_player(self, ctx: commands.Context = None) -> None:
track = self.source
if not track:
raise NothingIsPlaying("Player is not playing anything.")
embed = discord.Embed(title=track.title, url=track.uri, color=discord.Color.blurple())
embed.set_author(name=track.author, url=track.uri, icon_url=self.client.user.display_avatar.url)
try:
embed.set_thumbnail(url=track.thumb)
except AttributeError:
embed.set_thumbnail(
url="https://cdn.discordapp.com/attachments/776345413132877854/940540758442795028/unknown.png"
)
embed.add_field(
name="Length",
value=f"{int(track.length // 60)}:{int(track.length % 60)}",
)
embed.add_field(name="Looping", value=self.loop)
embed.add_field(name="Volume", value=self.volume)
next_song = ""
if self.loop == "CURRENT":
next_song = self.source.title
else:
if len(self.queue._queue) > 0:
next_song = self.queue._queue[0].title
if next_song:
embed.add_field(name="Next Song", value=next_song, inline=False)
if not ctx:
return await self.bound_channel.send(embed=embed)
await ctx.send(embed=embed)
| []
| []
| [
"DISMUSIC_TIMEOUT"
]
| [] | ["DISMUSIC_TIMEOUT"] | python | 1 | 0 | |
manage.py | #!/usr/bin/env python
import os
import sys
from pathlib import Path
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django # noqa
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
# This allows easy placement of apps within the interior
# opentimesheet directory.
current_path = Path(__file__).parent.resolve()
sys.path.append(str(current_path / "opentimesheet"))
execute_from_command_line(sys.argv)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
apply.py | #!/usr/bin/env python3
import argparse
import glob
import hashlib
import os.path
import re
import shutil
from pathlib import Path
from pathlib import PureWindowsPath
from subprocess import Popen, PIPE, call
from typing import Dict, Optional, List
class File(object):
def __init__(self, name, hash, split, skip):
self.name = name
self.hash = hash
self.split = split
self.skip = skip
def get_env(path: Optional[str] = None) -> Dict[str, str]:
env = dict(os.environ)
if not path:
path = '.env'
with open(path, 'r') as f:
for line in f.readlines():
if not line.startswith('#'):
items = line.split('=')
env[items[0].strip(' ')] = items[1].strip('\n\t" ')
return env
def sha256(filename: str) -> str:
h = hashlib.sha256()
with open(filename, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
h.update(chunk)
return h.hexdigest()
def load_file_list(filename: str) -> Dict[str, File]:
files = dict()
if os.path.isfile('scripts.csv'):
with open(filename, 'r') as f:
for line in f.readlines():
line = line.strip()
skip = False
if line.startswith("#"):
skip = True
line = line.lstrip("# ")
items = line.split(',')
name = items[0].strip()
files[name] = File(name, items[1].strip(), int(items[2].strip()), skip)
return files
def update_file_list(filename: str, files: Dict[str, File], new_files: Dict[str, File]) -> (List[File], List[File], List[File]):
new_files_ = list()
modified_files = list()
removed_files = list()
for file in files.values():
if not os.path.isfile(file.name):
removed_files.append(file)
for file in removed_files:
del files[file.name]
for new_file in new_files.values():
file = files.get(new_file.name, None)
if not file:
files[new_file.name] = new_file
new_files_.append(new_file)
else:
if new_file.hash != file.hash:
file.hash = new_file.hash
modified_files.append(file)
if os.path.isfile(filename):
shutil.move(filename, filename + '.bak')
with open(filename, 'w') as f:
for file in files.values():
print(file.name, file.hash, str(file.split), sep=',', file=f)
return new_files_, modified_files, removed_files
def init_file_list(pathname: str) -> Dict[str, File]:
files = dict()
insert = dict()
for name in glob.glob(pathname, recursive=True):
name = PureWindowsPath(name).as_posix()
if name.endswith('_backfill.sql'):
continue
file = File(name, sha256(name), 0, False)
if Path(name).name.startswith("insert"):
insert[name] = file
else:
files[name] = file
files.update(insert)
return files
def check_file_list(pathname: str, files: Dict[str, File]) -> (List[File], List[File], List[File]):
new_files = list()
modified_files = list()
removed_files = list()
for file in files.values():
if not os.path.isfile(file.name):
removed_files.append(file)
for name in glob.glob(pathname, recursive=True):
name = PureWindowsPath(name).as_posix()
if name.endswith('_backfill.sql'):
continue
file = files.get(name, None)
if file.skip:
continue
if not file:
new_files.append(File(name, sha256(name), 0, False))
else:
hash = sha256(name)
if hash != file.hash:
modified_files.append(File(name, hash, 0, False))
return new_files, modified_files, removed_files
def apply_patch(filename: str) -> int:
if os.path.isfile(filename):
return call(['git', 'apply', filename])
else:
return 0
def revert_patch(path: str) -> int:
return call(['git', 'checkout', '-f', path])
def prepare(filename: str, env: Dict[str, str]) -> int:
if os.path.isfile(filename):
return call(['psql', '-v', 'ON_ERROR_STOP=1', '-f', filename], env=env)
else:
return 0
def max_block_number(match) -> str:
arg = match.group(3)
if arg is None:
arg = "(" + match.group(2) + ")"
return "SELECT max_block_number_" + ("le" if match.group(1) == "<=" else "lt") + arg
def filter_max_block_expr(s: str) -> str:
return re.sub(r"SELECT\s+MAX\s*\(\s*number\s*\)\s+FROM\s+ethereum\.blocks\s+WHERE\s+time\s*(<|<=)\s*(?:('.+')|(\(.+\)))",
max_block_number, s, flags=re.IGNORECASE)
def fix_bytea(match):
arg = match.group(1)
if arg is None:
return "\\\\x" + match.group(2)
else:
return arg + "::BYTEA"
def filter_bytea_literals(s: str) -> str:
return re.sub(r"(?:('\\x[\da-f]{2,}')(?! *:: *BYTEA))|(?:\\\\([\da-f]{2,}))", fix_bytea, s, flags=re.IGNORECASE)
def apply_schema(files: List[File], env: Dict[str, str], backfill: bool = False):
for file in files:
if file.skip:
print("Skipping", file.name)
continue
print(file.name)
with Popen(['psql', '-v', 'ON_ERROR_STOP=1'], stdin=PIPE, stdout=PIPE, stderr=PIPE, text=True,
restore_signals=True, env=env) as psql:
with open(file.name, 'r') as f:
for i, line in enumerate(f.readlines(), start=1):
if backfill or file.split <= 0 or i < file.split:
line = filter_bytea_literals(line)
if backfill:
line = filter_max_block_expr(line)
print(line, file=psql.stdin, flush=True, end='')
psql.stdin.close()
for line in psql.stderr.readlines():
print(line, end='')
for line in psql.stdout.readlines():
print(line, end='')
def make_backfill_scripts(filename: str, files: List[File]):
with open(filename, 'w') as fl:
for file in files:
if file.split > 0:
path = Path(file.name)
name = path.with_stem(path.stem + '_backfill')
with open(file.name, 'r') as f, open(name, 'w') as bf:
print(name, file=fl)
for i, line in enumerate(f.readlines(), start=1):
if i >= file.split:
print(line, file=bf, end='')
def apply_backfill_scripts(filename: str, env: Dict[str, str]):
with open(filename, 'w') as f:
for fn in f.readlines():
fn = fn.strip()
if fn.startswith("#"):
print("Skipping", fn)
continue
print(fn)
with Popen(['psql', '-v', 'ON_ERROR_STOP=1'], stdin=PIPE, stdout=PIPE, stderr=PIPE, text=True,
restore_signals=True, env=env) as psql:
with open(fn, 'r') as f:
for i, line in enumerate(f.readlines(), start=1):
print(filter_max_block_expr(line), file=psql.stdin, flush=True, end='')
psql.stdin.close()
for line in psql.stderr.readlines():
print(line, end='')
for line in psql.stdout.readlines():
print(line, end='')
def print_updated_files(new_files: List[File], modified_files: List[File], removed_files: List[File]) -> bool:
has_updates = False
if len(new_files) > 0:
print("-- New files --")
for f in new_files:
print(f.name, f.hash, f.split, sep=',')
has_updates = True
if len(modified_files) > 0:
print("-- Modified files --")
for f in modified_files:
print(f.name, f.hash, f.split, sep=',')
has_updates = True
if len(removed_files) > 0:
print("-- Removed files --")
for f in removed_files:
print(f.name, f.hash, f.split, sep=',')
has_updates = True
return has_updates
def main(args) -> int:
if args.revert_patch:
revert_patch('ethereum')
return 0
if args.update_list:
files = load_file_list('scripts.csv')
new_files = init_file_list('ethereum/**/*.sql')
new_files_, modified_files, removed_files = update_file_list('scripts.csv', files, new_files)
print_updated_files(new_files_, modified_files, removed_files)
make_backfill_scripts('script-list.txt', files.values())
return 0
files = load_file_list('scripts.csv')
new_files, modified_files, removed_files = check_file_list('ethereum/**/*.sql', files)
if not args.skip_patch:
if print_updated_files(new_files, modified_files, removed_files):
print('Have to fix updates')
return -1
if len(files) == 0:
print('No scripts to apply')
return -1
if not args.skip_patch:
if apply_patch('patch.patch') != 0:
return -1
env = get_env()
if prepare('prepare.sql', env) != 0:
return -1
if args.apply_backfills:
apply_backfill_scripts('script-list.txt', env)
else:
apply_schema(files.values(), env, args.use_backfills)
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Script to apply Dune abstractions scripts in `ethereum` dir')
parser.add_argument('-u', '--update-list', dest='update_list', action='store_true', help='update script list `scripts.csv`', default=False)
parser.add_argument('-a', '--apply-backfills', dest='apply_backfills', action='store_true', help='apply backfill query files', default=False)
parser.add_argument('-b', '--use-backfills', dest='use_backfills', action='store_true', help='use backfill queries if exists', default=False)
parser.add_argument('-s', '--skip-patch', dest='skip_patch', action='store_true', help='skip patch applying', default=False)
parser.add_argument('-r', '--revert-patch', dest='revert_patch', action='store_true', help='revert patch modifications', default=False)
args = parser.parse_args()
print(args)
return_code = main(args)
if return_code != 0:
exit(return_code)
| []
| []
| []
| [] | [] | python | 0 | 0 | |
tests/console-backend-service/internal/mockice/mockice.go | package mockice
import (
"fmt"
"log"
"os"
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/dynamic"
)
var (
podPort int32 = 8080
svcPort int32 = 80
defaultImage string = "hudymi/mockice:0.1.1"
)
func Start(client dynamic.Interface, namespace, name string) (string, error) {
_, err := createConfigMap(client, namespace, name)
if err != nil {
return "", err
}
_, err = createPod(client, namespace, name)
if err != nil {
Stop(client, namespace, name)
return "", err
}
_, err = createService(client, namespace, name)
if err != nil {
Stop(client, namespace, name)
return "", err
}
return fmt.Sprintf("%s.%s.svc.cluster.local:%d", name, namespace, svcPort), nil
}
func Stop(client dynamic.Interface, namespace, name string) {
logOnDeleteError(deleteResource(client, "configmaps", namespace, name), "ConfigMap", namespace, name)
logOnDeleteError(deleteResource(client, "pods", namespace, name), "Pod", namespace, name)
logOnDeleteError(deleteResource(client, "services", namespace, name), "Service", namespace, name)
}
func logOnDeleteError(err error, kind, namespace, name string) {
if err != nil {
log.Println(fmt.Sprintf("Cannot delete %s %s/%s, because: %v", kind, namespace, name, err))
}
}
func ResourceURL(host string) string {
return fmt.Sprintf("http://%s/README.md", host)
}
func deleteResource(client dynamic.Interface, resource, namespace, name string) error {
groupVersion := schema.GroupVersionResource{Group: "", Version: "v1", Resource: resource}
return client.Resource(groupVersion).Namespace(namespace).Delete(name, nil)
}
func createConfigMap(client dynamic.Interface, namespace, name string) (*v1.ConfigMap, error) {
configMap := fixConfigMap(namespace, name)
resource := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"}
obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&configMap)
if err != nil {
return nil, errors.Wrap(err, "while converting ConfigMap to map[string]interface{}")
}
configMap = v1.ConfigMap{}
err = create(client, resource, namespace, obj, &configMap)
return &configMap, err
}
func createPod(client dynamic.Interface, namespace, name string) (*v1.Pod, error) {
pod, err := fixPod(namespace, name)
if err != nil {
return nil, err
}
resource := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}
obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&pod)
if err != nil {
return nil, errors.Wrap(err, "while converting Pod to map[string]interface{}")
}
pod = v1.Pod{}
err = create(client, resource, namespace, obj, &pod)
return &pod, err
}
func createService(client dynamic.Interface, namespace, name string) (*v1.Service, error) {
svc := fixService(namespace, name)
resource := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services"}
obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&svc)
if err != nil {
return nil, errors.Wrap(err, "while converting Service to map[string]interface{}")
}
svc = v1.Service{}
err = create(client, resource, namespace, obj, &svc)
return &svc, err
}
func create(client dynamic.Interface, resource schema.GroupVersionResource, namespace string, unstructuredMap map[string]interface{}, obj interface{}) error {
result, err := client.Resource(resource).Namespace(namespace).Create(&unstructured.Unstructured{Object: unstructuredMap}, metav1.CreateOptions{})
if err != nil {
return errors.Wrap(err, "while creating resource")
}
err = runtime.DefaultUnstructuredConverter.FromUnstructured(result.Object, obj)
if err != nil {
return errors.Wrap(err, "while converting Unstructured resource")
}
return nil
}
func getResources(memory, cpu string) (map[v1.ResourceName]resource.Quantity, error) {
memQ, err := resource.ParseQuantity(memory)
if err != nil {
return nil, err
}
cpuQ, err := resource.ParseQuantity(cpu)
if err != nil {
return nil, err
}
return map[v1.ResourceName]resource.Quantity{
v1.ResourceCPU: cpuQ,
v1.ResourceMemory: memQ,
}, nil
}
func fixPod(namespace, name string) (v1.Pod, error) {
image := os.Getenv("MOCKICE_IMAGE")
if image == "" {
image = defaultImage
}
requests, err := getResources("8Mi", "4m")
if err != nil {
return v1.Pod{}, err
}
limits, err := getResources("16Mi", "8m")
if err != nil {
return v1.Pod{}, err
}
return v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: map[string]string{"sidecar.istio.io/inject": "false"},
Labels: map[string]string{"owner": "console-backend-service-tests", "app": name},
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "config",
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{
Name: name,
}},
},
},
},
Containers: []v1.Container{
{
Name: "mockice",
Image: image,
ImagePullPolicy: v1.PullIfNotPresent,
Args: []string{"--verbose", "--config", "/app/config.yaml"},
VolumeMounts: []v1.VolumeMount{{
Name: "config",
MountPath: "/app/config.yaml",
ReadOnly: true,
SubPath: "config.yaml",
}},
Ports: []v1.ContainerPort{{
Name: "http",
ContainerPort: podPort,
Protocol: v1.ProtocolTCP,
}},
Resources: v1.ResourceRequirements{
Requests: requests,
Limits: limits,
},
},
},
},
}, nil
}
func fixConfigMap(namespace, name string) v1.ConfigMap {
return v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Labels: map[string]string{"owner": "console-backend-service-tests", "app": name},
},
Data: map[string]string{
"config.yaml": fmt.Sprintf(`
address: :%d
endpoints:
- name: README.md
defaultResponseCode: 200
defaultResponseContent: "# Test markdown"
defaultResponseContentType: text/markdown; charset=utf-8
`, podPort),
},
}
}
func fixService(namespace, name string) v1.Service {
return v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Annotations: map[string]string{"auth.istio.io/80": "NONE"},
Labels: map[string]string{"owner": "console-backend-service-tests", "app": name},
},
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{{
Port: svcPort,
TargetPort: intstr.IntOrString{IntVal: podPort},
Protocol: v1.ProtocolTCP,
Name: "http",
}},
Selector: map[string]string{"owner": "console-backend-service-tests", "app": name},
},
}
}
| [
"\"MOCKICE_IMAGE\""
]
| []
| [
"MOCKICE_IMAGE"
]
| [] | ["MOCKICE_IMAGE"] | go | 1 | 0 | |
app/app/settings.py | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'h)@tlih+pp*%z+zngk)d99#s@__u*)&gs#z$mutsyqv$zj&1u%'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'core',
'user',
'recipe',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'HOST': os.environ.get('DB_HOST'),
'NAME': os.environ.get('DB_NAME'),
'USER': os.environ.get('DB_USER'),
'PASSWORD': os.environ.get('DB_PASS'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'core.User'
| []
| []
| [
"DB_PASS",
"DB_USER",
"DB_NAME",
"DB_HOST"
]
| [] | ["DB_PASS", "DB_USER", "DB_NAME", "DB_HOST"] | python | 4 | 0 | |
daemon/daemon.go | // Package daemon exposes the functions that occur on the host server
// that the Docker daemon is running.
//
// In implementing the various functions of the daemon, there is often
// a method-specific struct for configuring the runtime behavior.
package daemon
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"sync"
"syscall"
"time"
"github.com/Sirupsen/logrus"
containerd "github.com/docker/containerd/api/grpc/types"
"github.com/docker/docker/api"
"github.com/docker/docker/container"
"github.com/docker/docker/daemon/events"
"github.com/docker/docker/daemon/exec"
"github.com/docker/engine-api/types"
containertypes "github.com/docker/engine-api/types/container"
"github.com/docker/libnetwork/cluster"
// register graph drivers
_ "github.com/docker/docker/daemon/graphdriver/register"
dmetadata "github.com/docker/docker/distribution/metadata"
"github.com/docker/docker/distribution/xfer"
"github.com/docker/docker/image"
"github.com/docker/docker/layer"
"github.com/docker/docker/libcontainerd"
"github.com/docker/docker/migrate/v1"
"github.com/docker/docker/pkg/fileutils"
"github.com/docker/docker/pkg/graphdb"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/progress"
"github.com/docker/docker/pkg/registrar"
"github.com/docker/docker/pkg/signal"
"github.com/docker/docker/pkg/streamformatter"
"github.com/docker/docker/pkg/sysinfo"
"github.com/docker/docker/pkg/system"
"github.com/docker/docker/pkg/truncindex"
"github.com/docker/docker/reference"
"github.com/docker/docker/registry"
"github.com/docker/docker/runconfig"
"github.com/docker/docker/utils"
volumedrivers "github.com/docker/docker/volume/drivers"
"github.com/docker/docker/volume/local"
"github.com/docker/docker/volume/store"
"github.com/docker/libnetwork"
nwconfig "github.com/docker/libnetwork/config"
"github.com/docker/libtrust"
)
var (
// DefaultRuntimeBinary is the default runtime to be used by
// containerd if none is specified
DefaultRuntimeBinary = "docker-runc"
errSystemNotSupported = fmt.Errorf("The Docker daemon is not supported on this platform.")
)
// Daemon holds information about the Docker daemon.
type Daemon struct {
ID string
repository string
containers container.Store
execCommands *exec.Store
referenceStore reference.Store
downloadManager *xfer.LayerDownloadManager
uploadManager *xfer.LayerUploadManager
distributionMetadataStore dmetadata.Store
trustKey libtrust.PrivateKey
idIndex *truncindex.TruncIndex
configStore *Config
statsCollector *statsCollector
defaultLogConfig containertypes.LogConfig
RegistryService registry.Service
EventsService *events.Events
netController libnetwork.NetworkController
volumes *store.VolumeStore
discoveryWatcher discoveryReloader
root string
seccompEnabled bool
shutdown bool
uidMaps []idtools.IDMap
gidMaps []idtools.IDMap
layerStore layer.Store
imageStore image.Store
nameIndex *registrar.Registrar
linkIndex *linkIndex
containerd libcontainerd.Client
containerdRemote libcontainerd.Remote
defaultIsolation containertypes.Isolation // Default isolation mode on Windows
clusterProvider cluster.Provider
}
func (daemon *Daemon) restore() error {
var (
debug = utils.IsDebugEnabled()
currentDriver = daemon.GraphDriverName()
containers = make(map[string]*container.Container)
)
if !debug {
logrus.Info("Loading containers: start.")
}
dir, err := ioutil.ReadDir(daemon.repository)
if err != nil {
return err
}
containerCount := 0
for _, v := range dir {
id := v.Name()
container, err := daemon.load(id)
if !debug && logrus.GetLevel() == logrus.InfoLevel {
fmt.Print(".")
containerCount++
}
if err != nil {
logrus.Errorf("Failed to load container %v: %v", id, err)
continue
}
// Ignore the container if it does not support the current driver being used by the graph
if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver {
rwlayer, err := daemon.layerStore.GetRWLayer(container.ID)
if err != nil {
logrus.Errorf("Failed to load container mount %v: %v", id, err)
continue
}
container.RWLayer = rwlayer
logrus.Debugf("Loaded container %v", container.ID)
containers[container.ID] = container
} else {
logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID)
}
}
var migrateLegacyLinks bool
restartContainers := make(map[*container.Container]chan struct{})
activeSandboxes := make(map[string]interface{})
for _, c := range containers {
if err := daemon.registerName(c); err != nil {
logrus.Errorf("Failed to register container %s: %s", c.ID, err)
continue
}
if err := daemon.Register(c); err != nil {
logrus.Errorf("Failed to register container %s: %s", c.ID, err)
continue
}
// The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver.
// We should rewrite it to use the daemon defaults.
// Fixes https://github.com/docker/docker/issues/22536
if c.HostConfig.LogConfig.Type == "" {
if err := daemon.mergeAndVerifyLogConfig(&c.HostConfig.LogConfig); err != nil {
logrus.Errorf("Failed to verify log config for container %s: %q", c.ID, err)
continue
}
}
}
var wg sync.WaitGroup
var mapLock sync.Mutex
for _, c := range containers {
wg.Add(1)
go func(c *container.Container) {
defer wg.Done()
rm := c.RestartManager(false)
if c.IsRunning() || c.IsPaused() {
if err := daemon.containerd.Restore(c.ID, libcontainerd.WithRestartManager(rm)); err != nil {
logrus.Errorf("Failed to restore with containerd: %q", err)
return
}
if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() {
options, err := daemon.buildSandboxOptions(c)
if err != nil {
logrus.Warnf("Failed build sandbox option to restore container %s: %v", c.ID, err)
}
mapLock.Lock()
activeSandboxes[c.NetworkSettings.SandboxID] = options
mapLock.Unlock()
}
}
// fixme: only if not running
// get list of containers we need to restart
if daemon.configStore.AutoRestart && !c.IsRunning() && !c.IsPaused() && c.ShouldRestart() {
mapLock.Lock()
restartContainers[c] = make(chan struct{})
mapLock.Unlock()
}
if c.RemovalInProgress {
// We probably crashed in the middle of a removal, reset
// the flag.
//
// We DO NOT remove the container here as we do not
// know if the user had requested for either the
// associated volumes, network links or both to also
// be removed. So we put the container in the "dead"
// state and leave further processing up to them.
logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID)
c.ResetRemovalInProgress()
c.SetDead()
c.ToDisk()
}
// if c.hostConfig.Links is nil (not just empty), then it is using the old sqlite links and needs to be migrated
if c.HostConfig != nil && c.HostConfig.Links == nil {
migrateLegacyLinks = true
}
}(c)
}
wg.Wait()
daemon.netController, err = daemon.initNetworkController(daemon.configStore, activeSandboxes)
if err != nil {
return fmt.Errorf("Error initializing network controller: %v", err)
}
// migrate any legacy links from sqlite
linkdbFile := filepath.Join(daemon.root, "linkgraph.db")
var legacyLinkDB *graphdb.Database
if migrateLegacyLinks {
legacyLinkDB, err = graphdb.NewSqliteConn(linkdbFile)
if err != nil {
return fmt.Errorf("error connecting to legacy link graph DB %s, container links may be lost: %v", linkdbFile, err)
}
defer legacyLinkDB.Close()
}
// Now that all the containers are registered, register the links
for _, c := range containers {
if migrateLegacyLinks {
if err := daemon.migrateLegacySqliteLinks(legacyLinkDB, c); err != nil {
return err
}
}
if err := daemon.registerLinks(c, c.HostConfig); err != nil {
logrus.Errorf("failed to register link for container %s: %v", c.ID, err)
}
}
group := sync.WaitGroup{}
for c, notifier := range restartContainers {
group.Add(1)
go func(c *container.Container, chNotify chan struct{}) {
defer group.Done()
logrus.Debugf("Starting container %s", c.ID)
// ignore errors here as this is a best effort to wait for children to be
// running before we try to start the container
children := daemon.children(c)
timeout := time.After(5 * time.Second)
for _, child := range children {
if notifier, exists := restartContainers[child]; exists {
select {
case <-notifier:
case <-timeout:
}
}
}
// Make sure networks are available before starting
daemon.waitForNetworks(c)
if err := daemon.containerStart(c); err != nil {
logrus.Errorf("Failed to start container %s: %s", c.ID, err)
}
close(chNotify)
}(c, notifier)
}
group.Wait()
// any containers that were started above would already have had this done,
// however we need to now prepare the mountpoints for the rest of the containers as well.
// This shouldn't cause any issue running on the containers that already had this run.
// This must be run after any containers with a restart policy so that containerized plugins
// can have a chance to be running before we try to initialize them.
for _, c := range containers {
// if the container has restart policy, do not
// prepare the mountpoints since it has been done on restarting.
// This is to speed up the daemon start when a restart container
// has a volume and the volume dirver is not available.
if _, ok := restartContainers[c]; ok {
continue
}
group.Add(1)
go func(c *container.Container) {
defer group.Done()
if err := daemon.prepareMountPoints(c); err != nil {
logrus.Error(err)
}
}(c)
}
group.Wait()
if !debug {
if logrus.GetLevel() == logrus.InfoLevel && containerCount > 0 {
fmt.Println()
}
logrus.Info("Loading containers: done.")
}
return nil
}
// waitForNetworks is used during daemon initialization when starting up containers
// It ensures that all of a container's networks are available before the daemon tries to start the container.
// In practice it just makes sure the discovery service is available for containers which use a network that require discovery.
func (daemon *Daemon) waitForNetworks(c *container.Container) {
if daemon.discoveryWatcher == nil {
return
}
// Make sure if the container has a network that requires discovery that the discovery service is available before starting
for netName := range c.NetworkSettings.Networks {
// If we get `ErrNoSuchNetwork` here, we can assume that it is due to discovery not being ready
// Most likely this is because the K/V store used for discovery is in a container and needs to be started
if _, err := daemon.netController.NetworkByName(netName); err != nil {
if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok {
continue
}
// use a longish timeout here due to some slowdowns in libnetwork if the k/v store is on anything other than --net=host
// FIXME: why is this slow???
logrus.Debugf("Container %s waiting for network to be ready", c.Name)
select {
case <-daemon.discoveryWatcher.ReadyCh():
case <-time.After(60 * time.Second):
}
return
}
}
}
func (daemon *Daemon) children(c *container.Container) map[string]*container.Container {
return daemon.linkIndex.children(c)
}
// parents returns the names of the parent containers of the container
// with the given name.
func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container {
return daemon.linkIndex.parents(c)
}
func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error {
fullName := path.Join(parent.Name, alias)
if err := daemon.nameIndex.Reserve(fullName, child.ID); err != nil {
if err == registrar.ErrNameReserved {
logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err)
return nil
}
return err
}
daemon.linkIndex.link(parent, child, fullName)
return nil
}
// SetClusterProvider sets a component for querying the current cluster state.
func (daemon *Daemon) SetClusterProvider(clusterProvider cluster.Provider) {
daemon.clusterProvider = clusterProvider
daemon.netController.SetClusterProvider(clusterProvider)
}
// IsSwarmCompatible verifies if the current daemon
// configuration is compatible with the swarm mode
func (daemon *Daemon) IsSwarmCompatible() error {
if daemon.configStore == nil {
return nil
}
return daemon.configStore.isSwarmCompatible()
}
// NewDaemon sets up everything for the daemon to be able to service
// requests from the webserver.
func NewDaemon(config *Config, registryService registry.Service, containerdRemote libcontainerd.Remote) (daemon *Daemon, err error) {
setDefaultMtu(config)
// Ensure that we have a correct root key limit for launching containers.
if err := ModifyRootKeyLimit(); err != nil {
logrus.Warnf("unable to modify root key limit, number of containers could be limitied by this quota: %v", err)
}
// Ensure we have compatible and valid configuration options
if err := verifyDaemonSettings(config); err != nil {
return nil, err
}
// Do we have a disabled network?
config.DisableBridge = isBridgeNetworkDisabled(config)
// Verify the platform is supported as a daemon
if !platformSupported {
return nil, errSystemNotSupported
}
// Validate platform-specific requirements
if err := checkSystem(); err != nil {
return nil, err
}
// set up SIGUSR1 handler on Unix-like systems, or a Win32 global event
// on Windows to dump Go routine stacks
setupDumpStackTrap()
uidMaps, gidMaps, err := setupRemappedRoot(config)
if err != nil {
return nil, err
}
rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps)
if err != nil {
return nil, err
}
// get the canonical path to the Docker root directory
var realRoot string
if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) {
realRoot = config.Root
} else {
realRoot, err = fileutils.ReadSymlinkedDirectory(config.Root)
if err != nil {
return nil, fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err)
}
}
if err = setupDaemonRoot(config, realRoot, rootUID, rootGID); err != nil {
return nil, err
}
// set up the tmpDir to use a canonical path
tmp, err := tempDir(config.Root, rootUID, rootGID)
if err != nil {
return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err)
}
realTmp, err := fileutils.ReadSymlinkedDirectory(tmp)
if err != nil {
return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err)
}
os.Setenv("TMPDIR", realTmp)
d := &Daemon{configStore: config}
// Ensure the daemon is properly shutdown if there is a failure during
// initialization
defer func() {
if err != nil {
if err := d.Shutdown(); err != nil {
logrus.Error(err)
}
}
}()
// Set the default isolation mode (only applicable on Windows)
if err := d.setDefaultIsolation(); err != nil {
return nil, fmt.Errorf("error setting default isolation mode: %v", err)
}
logrus.Debugf("Using default logging driver %s", config.LogConfig.Type)
if err := configureMaxThreads(config); err != nil {
logrus.Warnf("Failed to configure golang's threads limit: %v", err)
}
installDefaultAppArmorProfile()
daemonRepo := filepath.Join(config.Root, "containers")
if err := idtools.MkdirAllAs(daemonRepo, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) {
return nil, err
}
driverName := os.Getenv("DOCKER_DRIVER")
if driverName == "" {
driverName = config.GraphDriver
}
d.layerStore, err = layer.NewStoreFromOptions(layer.StoreOptions{
StorePath: config.Root,
MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"),
GraphDriver: driverName,
GraphDriverOptions: config.GraphOptions,
UIDMaps: uidMaps,
GIDMaps: gidMaps,
})
if err != nil {
return nil, err
}
graphDriver := d.layerStore.DriverName()
imageRoot := filepath.Join(config.Root, "image", graphDriver)
// Configure and validate the kernels security support
if err := configureKernelSecuritySupport(config, graphDriver); err != nil {
return nil, err
}
logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads)
d.downloadManager = xfer.NewLayerDownloadManager(d.layerStore, *config.MaxConcurrentDownloads)
logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads)
d.uploadManager = xfer.NewLayerUploadManager(*config.MaxConcurrentUploads)
ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb"))
if err != nil {
return nil, err
}
d.imageStore, err = image.NewImageStore(ifs, d.layerStore)
if err != nil {
return nil, err
}
// Configure the volumes driver
volStore, err := d.configureVolumes(rootUID, rootGID)
if err != nil {
return nil, err
}
trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath)
if err != nil {
return nil, err
}
trustDir := filepath.Join(config.Root, "trust")
if err := system.MkdirAll(trustDir, 0700); err != nil {
return nil, err
}
distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution"))
if err != nil {
return nil, err
}
eventsService := events.New()
referenceStore, err := reference.NewReferenceStore(filepath.Join(imageRoot, "repositories.json"))
if err != nil {
return nil, fmt.Errorf("Couldn't create Tag store repositories: %s", err)
}
if err := restoreCustomImage(d.imageStore, d.layerStore, referenceStore); err != nil {
return nil, fmt.Errorf("Couldn't restore custom images: %s", err)
}
migrationStart := time.Now()
if err := v1.Migrate(config.Root, graphDriver, d.layerStore, d.imageStore, referenceStore, distributionMetadataStore); err != nil {
logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err)
}
logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds())
// Discovery is only enabled when the daemon is launched with an address to advertise. When
// initialized, the daemon is registered and we can store the discovery backend as its read-only
if err := d.initDiscovery(config); err != nil {
return nil, err
}
sysInfo := sysinfo.New(false)
// Check if Devices cgroup is mounted, it is hard requirement for container security,
// on Linux.
if runtime.GOOS == "linux" && !sysInfo.CgroupDevicesEnabled {
return nil, fmt.Errorf("Devices cgroup isn't mounted")
}
d.ID = trustKey.PublicKey().KeyID()
d.repository = daemonRepo
d.containers = container.NewMemoryStore()
d.execCommands = exec.NewStore()
d.referenceStore = referenceStore
d.distributionMetadataStore = distributionMetadataStore
d.trustKey = trustKey
d.idIndex = truncindex.NewTruncIndex([]string{})
d.statsCollector = d.newStatsCollector(1 * time.Second)
d.defaultLogConfig = containertypes.LogConfig{
Type: config.LogConfig.Type,
Config: config.LogConfig.Config,
}
d.RegistryService = registryService
d.EventsService = eventsService
d.volumes = volStore
d.root = config.Root
d.uidMaps = uidMaps
d.gidMaps = gidMaps
d.seccompEnabled = sysInfo.Seccomp
d.nameIndex = registrar.NewRegistrar()
d.linkIndex = newLinkIndex()
d.containerdRemote = containerdRemote
go d.execCommandGC()
d.containerd, err = containerdRemote.Client(d)
if err != nil {
return nil, err
}
if err := d.restore(); err != nil {
return nil, err
}
return d, nil
}
func (daemon *Daemon) shutdownContainer(c *container.Container) error {
// TODO(windows): Handle docker restart with paused containers
if c.IsPaused() {
// To terminate a process in freezer cgroup, we should send
// SIGTERM to this process then unfreeze it, and the process will
// force to terminate immediately.
logrus.Debugf("Found container %s is paused, sending SIGTERM before unpausing it", c.ID)
sig, ok := signal.SignalMap["TERM"]
if !ok {
return fmt.Errorf("System does not support SIGTERM")
}
if err := daemon.kill(c, int(sig)); err != nil {
return fmt.Errorf("sending SIGTERM to container %s with error: %v", c.ID, err)
}
if err := daemon.containerUnpause(c); err != nil {
return fmt.Errorf("Failed to unpause container %s with error: %v", c.ID, err)
}
if _, err := c.WaitStop(10 * time.Second); err != nil {
logrus.Debugf("container %s failed to exit in 10 seconds of SIGTERM, sending SIGKILL to force", c.ID)
sig, ok := signal.SignalMap["KILL"]
if !ok {
return fmt.Errorf("System does not support SIGKILL")
}
if err := daemon.kill(c, int(sig)); err != nil {
logrus.Errorf("Failed to SIGKILL container %s", c.ID)
}
c.WaitStop(-1 * time.Second)
return err
}
}
// If container failed to exit in 10 seconds of SIGTERM, then using the force
if err := daemon.containerStop(c, 10); err != nil {
return fmt.Errorf("Failed to stop container %s with error: %v", c.ID, err)
}
c.WaitStop(-1 * time.Second)
return nil
}
// Shutdown stops the daemon.
func (daemon *Daemon) Shutdown() error {
daemon.shutdown = true
// Keep mounts and networking running on daemon shutdown if
// we are to keep containers running and restore them.
if daemon.configStore.LiveRestore {
// check if there are any running containers, if none we should do some cleanup
if ls, err := daemon.Containers(&types.ContainerListOptions{}); len(ls) != 0 || err != nil {
return nil
}
}
if daemon.containers != nil {
logrus.Debug("starting clean shutdown of all containers...")
daemon.containers.ApplyAll(func(c *container.Container) {
if !c.IsRunning() {
return
}
logrus.Debugf("stopping %s", c.ID)
if err := daemon.shutdownContainer(c); err != nil {
logrus.Errorf("Stop container error: %v", err)
return
}
if mountid, err := daemon.layerStore.GetMountID(c.ID); err == nil {
daemon.cleanupMountsByID(mountid)
}
logrus.Debugf("container stopped %s", c.ID)
})
}
// trigger libnetwork Stop only if it's initialized
if daemon.netController != nil {
daemon.netController.Stop()
}
if daemon.layerStore != nil {
if err := daemon.layerStore.Cleanup(); err != nil {
logrus.Errorf("Error during layer Store.Cleanup(): %v", err)
}
}
if err := daemon.cleanupMounts(); err != nil {
return err
}
return nil
}
// Mount sets container.BaseFS
// (is it not set coming in? why is it unset?)
func (daemon *Daemon) Mount(container *container.Container) error {
dir, err := container.RWLayer.Mount(container.GetMountLabel())
if err != nil {
return err
}
logrus.Debugf("container mounted via layerStore: %v", dir)
if container.BaseFS != dir {
// The mount path reported by the graph driver should always be trusted on Windows, since the
// volume path for a given mounted layer may change over time. This should only be an error
// on non-Windows operating systems.
if container.BaseFS != "" && runtime.GOOS != "windows" {
daemon.Unmount(container)
return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')",
daemon.GraphDriverName(), container.ID, container.BaseFS, dir)
}
}
container.BaseFS = dir // TODO: combine these fields
return nil
}
// Unmount unsets the container base filesystem
func (daemon *Daemon) Unmount(container *container.Container) error {
if err := container.RWLayer.Unmount(); err != nil {
logrus.Errorf("Error unmounting container %s: %s", container.ID, err)
return err
}
return nil
}
func writeDistributionProgress(cancelFunc func(), outStream io.Writer, progressChan <-chan progress.Progress) {
progressOutput := streamformatter.NewJSONStreamFormatter().NewProgressOutput(outStream, false)
operationCancelled := false
for prog := range progressChan {
if err := progressOutput.WriteProgress(prog); err != nil && !operationCancelled {
// don't log broken pipe errors as this is the normal case when a client aborts
if isBrokenPipe(err) {
logrus.Info("Pull session cancelled")
} else {
logrus.Errorf("error writing progress to client: %v", err)
}
cancelFunc()
operationCancelled = true
// Don't return, because we need to continue draining
// progressChan until it's closed to avoid a deadlock.
}
}
}
func isBrokenPipe(e error) bool {
if netErr, ok := e.(*net.OpError); ok {
e = netErr.Err
if sysErr, ok := netErr.Err.(*os.SyscallError); ok {
e = sysErr.Err
}
}
return e == syscall.EPIPE
}
// GraphDriverName returns the name of the graph driver used by the layer.Store
func (daemon *Daemon) GraphDriverName() string {
return daemon.layerStore.DriverName()
}
// GetUIDGIDMaps returns the current daemon's user namespace settings
// for the full uid and gid maps which will be applied to containers
// started in this instance.
func (daemon *Daemon) GetUIDGIDMaps() ([]idtools.IDMap, []idtools.IDMap) {
return daemon.uidMaps, daemon.gidMaps
}
// GetRemappedUIDGID returns the current daemon's uid and gid values
// if user namespaces are in use for this daemon instance. If not
// this function will return "real" root values of 0, 0.
func (daemon *Daemon) GetRemappedUIDGID() (int, int) {
uid, gid, _ := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps)
return uid, gid
}
// tempDir returns the default directory to use for temporary files.
func tempDir(rootDir string, rootUID, rootGID int) (string, error) {
var tmpDir string
if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" {
tmpDir = filepath.Join(rootDir, "tmp")
}
return tmpDir, idtools.MkdirAllAs(tmpDir, 0700, rootUID, rootGID)
}
func (daemon *Daemon) setupInitLayer(initPath string) error {
rootUID, rootGID := daemon.GetRemappedUIDGID()
return setupInitLayer(initPath, rootUID, rootGID)
}
func setDefaultMtu(config *Config) {
// do nothing if the config does not have the default 0 value.
if config.Mtu != 0 {
return
}
config.Mtu = defaultNetworkMtu
}
func (daemon *Daemon) configureVolumes(rootUID, rootGID int) (*store.VolumeStore, error) {
volumesDriver, err := local.New(daemon.configStore.Root, rootUID, rootGID)
if err != nil {
return nil, err
}
if !volumedrivers.Register(volumesDriver, volumesDriver.Name()) {
return nil, fmt.Errorf("local volume driver could not be registered")
}
return store.New(daemon.configStore.Root)
}
// IsShuttingDown tells whether the daemon is shutting down or not
func (daemon *Daemon) IsShuttingDown() bool {
return daemon.shutdown
}
// initDiscovery initializes the discovery watcher for this daemon.
func (daemon *Daemon) initDiscovery(config *Config) error {
advertise, err := parseClusterAdvertiseSettings(config.ClusterStore, config.ClusterAdvertise)
if err != nil {
if err == errDiscoveryDisabled {
return nil
}
return err
}
config.ClusterAdvertise = advertise
discoveryWatcher, err := initDiscovery(config.ClusterStore, config.ClusterAdvertise, config.ClusterOpts)
if err != nil {
return fmt.Errorf("discovery initialization failed (%v)", err)
}
daemon.discoveryWatcher = discoveryWatcher
return nil
}
// Reload reads configuration changes and modifies the
// daemon according to those changes.
// These are the settings that Reload changes:
// - Daemon labels.
// - Daemon debug log level.
// - Daemon max concurrent downloads
// - Daemon max concurrent uploads
// - Cluster discovery (reconfigure and restart).
// - Daemon live restore
func (daemon *Daemon) Reload(config *Config) error {
var err error
// used to hold reloaded changes
attributes := map[string]string{}
// We need defer here to ensure the lock is released as
// daemon.SystemInfo() will try to get it too
defer func() {
if err == nil {
daemon.LogDaemonEventWithAttributes("reload", attributes)
}
}()
daemon.configStore.reloadLock.Lock()
defer daemon.configStore.reloadLock.Unlock()
daemon.platformReload(config, &attributes)
if err = daemon.reloadClusterDiscovery(config); err != nil {
return err
}
if config.IsValueSet("labels") {
daemon.configStore.Labels = config.Labels
}
if config.IsValueSet("debug") {
daemon.configStore.Debug = config.Debug
}
if config.IsValueSet("live-restore") {
daemon.configStore.LiveRestore = config.LiveRestore
if err := daemon.containerdRemote.UpdateOptions(libcontainerd.WithLiveRestore(config.LiveRestore)); err != nil {
return err
}
}
// If no value is set for max-concurrent-downloads we assume it is the default value
// We always "reset" as the cost is lightweight and easy to maintain.
if config.IsValueSet("max-concurrent-downloads") && config.MaxConcurrentDownloads != nil {
*daemon.configStore.MaxConcurrentDownloads = *config.MaxConcurrentDownloads
} else {
maxConcurrentDownloads := defaultMaxConcurrentDownloads
daemon.configStore.MaxConcurrentDownloads = &maxConcurrentDownloads
}
logrus.Debugf("Reset Max Concurrent Downloads: %d", *daemon.configStore.MaxConcurrentDownloads)
if daemon.downloadManager != nil {
daemon.downloadManager.SetConcurrency(*daemon.configStore.MaxConcurrentDownloads)
}
// If no value is set for max-concurrent-upload we assume it is the default value
// We always "reset" as the cost is lightweight and easy to maintain.
if config.IsValueSet("max-concurrent-uploads") && config.MaxConcurrentUploads != nil {
*daemon.configStore.MaxConcurrentUploads = *config.MaxConcurrentUploads
} else {
maxConcurrentUploads := defaultMaxConcurrentUploads
daemon.configStore.MaxConcurrentUploads = &maxConcurrentUploads
}
logrus.Debugf("Reset Max Concurrent Uploads: %d", *daemon.configStore.MaxConcurrentUploads)
if daemon.uploadManager != nil {
daemon.uploadManager.SetConcurrency(*daemon.configStore.MaxConcurrentUploads)
}
// We emit daemon reload event here with updatable configurations
attributes["debug"] = fmt.Sprintf("%t", daemon.configStore.Debug)
attributes["cluster-store"] = daemon.configStore.ClusterStore
if daemon.configStore.ClusterOpts != nil {
opts, _ := json.Marshal(daemon.configStore.ClusterOpts)
attributes["cluster-store-opts"] = string(opts)
} else {
attributes["cluster-store-opts"] = "{}"
}
attributes["cluster-advertise"] = daemon.configStore.ClusterAdvertise
if daemon.configStore.Labels != nil {
labels, _ := json.Marshal(daemon.configStore.Labels)
attributes["labels"] = string(labels)
} else {
attributes["labels"] = "[]"
}
attributes["max-concurrent-downloads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentDownloads)
attributes["max-concurrent-uploads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentUploads)
return nil
}
func (daemon *Daemon) reloadClusterDiscovery(config *Config) error {
var err error
newAdvertise := daemon.configStore.ClusterAdvertise
newClusterStore := daemon.configStore.ClusterStore
if config.IsValueSet("cluster-advertise") {
if config.IsValueSet("cluster-store") {
newClusterStore = config.ClusterStore
}
newAdvertise, err = parseClusterAdvertiseSettings(newClusterStore, config.ClusterAdvertise)
if err != nil && err != errDiscoveryDisabled {
return err
}
}
if daemon.clusterProvider != nil {
if err := config.isSwarmCompatible(); err != nil {
return err
}
}
// check discovery modifications
if !modifiedDiscoverySettings(daemon.configStore, newAdvertise, newClusterStore, config.ClusterOpts) {
return nil
}
// enable discovery for the first time if it was not previously enabled
if daemon.discoveryWatcher == nil {
discoveryWatcher, err := initDiscovery(newClusterStore, newAdvertise, config.ClusterOpts)
if err != nil {
return fmt.Errorf("discovery initialization failed (%v)", err)
}
daemon.discoveryWatcher = discoveryWatcher
} else {
if err == errDiscoveryDisabled {
// disable discovery if it was previously enabled and it's disabled now
daemon.discoveryWatcher.Stop()
} else {
// reload discovery
if err = daemon.discoveryWatcher.Reload(config.ClusterStore, newAdvertise, config.ClusterOpts); err != nil {
return err
}
}
}
daemon.configStore.ClusterStore = newClusterStore
daemon.configStore.ClusterOpts = config.ClusterOpts
daemon.configStore.ClusterAdvertise = newAdvertise
if daemon.netController == nil {
return nil
}
netOptions, err := daemon.networkOptions(daemon.configStore, nil)
if err != nil {
logrus.Warnf("Failed to reload configuration with network controller: %v", err)
return nil
}
err = daemon.netController.ReloadConfiguration(netOptions...)
if err != nil {
logrus.Warnf("Failed to reload configuration with network controller: %v", err)
}
return nil
}
func isBridgeNetworkDisabled(config *Config) bool {
return config.bridgeConfig.Iface == disableNetworkBridge
}
func (daemon *Daemon) networkOptions(dconfig *Config, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) {
options := []nwconfig.Option{}
if dconfig == nil {
return options, nil
}
options = append(options, nwconfig.OptionDataDir(dconfig.Root))
dd := runconfig.DefaultDaemonNetworkMode()
dn := runconfig.DefaultDaemonNetworkMode().NetworkName()
options = append(options, nwconfig.OptionDefaultDriver(string(dd)))
options = append(options, nwconfig.OptionDefaultNetwork(dn))
if strings.TrimSpace(dconfig.ClusterStore) != "" {
kv := strings.Split(dconfig.ClusterStore, "://")
if len(kv) != 2 {
return nil, fmt.Errorf("kv store daemon config must be of the form KV-PROVIDER://KV-URL")
}
options = append(options, nwconfig.OptionKVProvider(kv[0]))
options = append(options, nwconfig.OptionKVProviderURL(kv[1]))
}
if len(dconfig.ClusterOpts) > 0 {
options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts))
}
if daemon.discoveryWatcher != nil {
options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher))
}
if dconfig.ClusterAdvertise != "" {
options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise))
}
options = append(options, nwconfig.OptionLabels(dconfig.Labels))
options = append(options, driverOptions(dconfig)...)
if daemon.configStore != nil && daemon.configStore.LiveRestore && len(activeSandboxes) != 0 {
options = append(options, nwconfig.OptionActiveSandboxes(activeSandboxes))
}
return options, nil
}
func copyBlkioEntry(entries []*containerd.BlkioStatsEntry) []types.BlkioStatEntry {
out := make([]types.BlkioStatEntry, len(entries))
for i, re := range entries {
out[i] = types.BlkioStatEntry{
Major: re.Major,
Minor: re.Minor,
Op: re.Op,
Value: re.Value,
}
}
return out
}
| [
"\"DOCKER_DRIVER\"",
"\"DOCKER_TMPDIR\""
]
| []
| [
"DOCKER_DRIVER",
"DOCKER_TMPDIR"
]
| [] | ["DOCKER_DRIVER", "DOCKER_TMPDIR"] | go | 2 | 0 | |
pkg/objstore/cos/cos.go | // Copyright (c) The Thanos Authors.
// Licensed under the Apache License 2.0.
package cos
import (
"context"
"fmt"
"io"
"math"
"math/rand"
"net/http"
"net/url"
"os"
"strings"
"testing"
"time"
"github.com/go-kit/log"
"github.com/pkg/errors"
"github.com/prometheus/common/model"
"github.com/tencentyun/cos-go-sdk-v5"
"gopkg.in/yaml.v2"
"github.com/thanos-io/thanos/pkg/exthttp"
"github.com/thanos-io/thanos/pkg/objstore"
"github.com/thanos-io/thanos/pkg/objstore/clientutil"
"github.com/thanos-io/thanos/pkg/runutil"
)
// DirDelim is the delimiter used to model a directory structure in an object store bucket.
const dirDelim = "/"
// Bucket implements the store.Bucket interface against cos-compatible(Tencent Object Storage) APIs.
type Bucket struct {
logger log.Logger
client *cos.Client
name string
}
// DefaultConfig is the default config for an cos client. default tune the `MaxIdleConnsPerHost`.
var DefaultConfig = Config{
HTTPConfig: HTTPConfig{
IdleConnTimeout: model.Duration(90 * time.Second),
ResponseHeaderTimeout: model.Duration(2 * time.Minute),
TLSHandshakeTimeout: model.Duration(10 * time.Second),
ExpectContinueTimeout: model.Duration(1 * time.Second),
MaxIdleConns: 100,
MaxIdleConnsPerHost: 100,
MaxConnsPerHost: 0,
},
}
// Config encapsulates the necessary config values to instantiate an cos client.
type Config struct {
Bucket string `yaml:"bucket"`
Region string `yaml:"region"`
AppId string `yaml:"app_id"`
Endpoint string `yaml:"endpoint"`
SecretKey string `yaml:"secret_key"`
SecretId string `yaml:"secret_id"`
HTTPConfig HTTPConfig `yaml:"http_config"`
}
// Validate checks to see if mandatory cos config options are set.
func (conf *Config) validate() error {
if conf.Endpoint != "" {
if _, err := url.Parse(conf.Endpoint); err != nil {
return errors.Wrap(err, "parse endpoint")
}
if conf.SecretId == "" ||
conf.SecretKey == "" {
return errors.New("secret_id or secret_key is empty")
}
return nil
}
if conf.Bucket == "" ||
conf.AppId == "" ||
conf.Region == "" ||
conf.SecretId == "" ||
conf.SecretKey == "" {
return errors.New("insufficient cos configuration information")
}
return nil
}
// parseConfig unmarshal a buffer into a Config with default HTTPConfig values.
func parseConfig(conf []byte) (Config, error) {
config := DefaultConfig
if err := yaml.Unmarshal(conf, &config); err != nil {
return Config{}, err
}
return config, nil
}
// HTTPConfig stores the http.Transport configuration for the cos client.
type HTTPConfig struct {
IdleConnTimeout model.Duration `yaml:"idle_conn_timeout"`
ResponseHeaderTimeout model.Duration `yaml:"response_header_timeout"`
TLSHandshakeTimeout model.Duration `yaml:"tls_handshake_timeout"`
ExpectContinueTimeout model.Duration `yaml:"expect_continue_timeout"`
MaxIdleConns int `yaml:"max_idle_conns"`
MaxIdleConnsPerHost int `yaml:"max_idle_conns_per_host"`
MaxConnsPerHost int `yaml:"max_conns_per_host"`
}
// DefaultTransport build http.Transport from config.
func DefaultTransport(c HTTPConfig) *http.Transport {
transport := exthttp.NewTransport()
transport.IdleConnTimeout = time.Duration(c.IdleConnTimeout)
transport.ResponseHeaderTimeout = time.Duration(c.ResponseHeaderTimeout)
transport.TLSHandshakeTimeout = time.Duration(c.TLSHandshakeTimeout)
transport.ExpectContinueTimeout = time.Duration(c.ExpectContinueTimeout)
transport.MaxIdleConns = c.MaxIdleConns
transport.MaxIdleConnsPerHost = c.MaxIdleConnsPerHost
transport.MaxConnsPerHost = c.MaxConnsPerHost
return transport
}
// NewBucket returns a new Bucket using the provided cos configuration.
func NewBucket(logger log.Logger, conf []byte, component string) (*Bucket, error) {
if logger == nil {
logger = log.NewNopLogger()
}
config, err := parseConfig(conf)
if err != nil {
return nil, errors.Wrap(err, "parsing cos configuration")
}
return NewBucketWithConfig(logger, config, component)
}
// NewBucketWithConfig returns a new Bucket using the provided cos config values.
func NewBucketWithConfig(logger log.Logger, config Config, component string) (*Bucket, error) {
if err := config.validate(); err != nil {
return nil, errors.Wrap(err, "validate cos configuration")
}
var bucketURL *url.URL
var err error
if config.Endpoint != "" {
bucketURL, err = url.Parse(config.Endpoint)
if err != nil {
return nil, errors.Wrap(err, "parse endpoint")
}
} else {
bucketURL = cos.NewBucketURL(fmt.Sprintf("%s-%s", config.Bucket, config.AppId), config.Region, true)
}
b := &cos.BaseURL{BucketURL: bucketURL}
client := cos.NewClient(b, &http.Client{
Transport: &cos.AuthorizationTransport{
SecretID: config.SecretId,
SecretKey: config.SecretKey,
Transport: DefaultTransport(config.HTTPConfig),
},
})
bkt := &Bucket{
logger: logger,
client: client,
name: config.Bucket,
}
return bkt, nil
}
// Name returns the bucket name for COS.
func (b *Bucket) Name() string {
return b.name
}
// Attributes returns information about the specified object.
func (b *Bucket) Attributes(ctx context.Context, name string) (objstore.ObjectAttributes, error) {
resp, err := b.client.Object.Head(ctx, name, nil)
if err != nil {
return objstore.ObjectAttributes{}, err
}
size, err := clientutil.ParseContentLength(resp.Header)
if err != nil {
return objstore.ObjectAttributes{}, err
}
// tencent cos return Last-Modified header in RFC1123 format.
// see api doc for details: https://intl.cloud.tencent.com/document/product/436/7729
mod, err := clientutil.ParseLastModified(resp.Header, time.RFC1123)
if err != nil {
return objstore.ObjectAttributes{}, err
}
return objstore.ObjectAttributes{
Size: size,
LastModified: mod,
}, nil
}
var (
_ cos.FixedLengthReader = (*fixedLengthReader)(nil)
)
type fixedLengthReader struct {
io.Reader
size int64
}
func newFixedLengthReader(r io.Reader, size int64) io.Reader {
return fixedLengthReader{
Reader: io.LimitReader(r, size),
size: size,
}
}
// Size implement cos.FixedLengthReader interface.
func (r fixedLengthReader) Size() int64 {
return r.size
}
// Upload the contents of the reader as an object into the bucket.
func (b *Bucket) Upload(ctx context.Context, name string, r io.Reader) error {
size, err := objstore.TryToGetSize(r)
if err != nil {
return errors.Wrapf(err, "getting size of %s", name)
}
// partSize 128MB.
const partSize = 1024 * 1024 * 128
partNums, lastSlice := int(math.Floor(float64(size)/partSize)), size%partSize
if partNums == 0 {
if _, err := b.client.Object.Put(ctx, name, r, nil); err != nil {
return errors.Wrapf(err, "Put object: %s", name)
}
return nil
}
// 1. init.
result, _, err := b.client.Object.InitiateMultipartUpload(ctx, name, nil)
if err != nil {
return errors.Wrapf(err, "InitiateMultipartUpload %s", name)
}
uploadEveryPart := func(partSize int64, part int, uploadID string) (string, error) {
r := newFixedLengthReader(r, partSize)
resp, err := b.client.Object.UploadPart(ctx, name, uploadID, part, r, &cos.ObjectUploadPartOptions{
ContentLength: partSize,
})
if err != nil {
if _, err := b.client.Object.AbortMultipartUpload(ctx, name, uploadID); err != nil {
return "", err
}
return "", err
}
etag := resp.Header.Get("ETag")
return etag, nil
}
optcom := &cos.CompleteMultipartUploadOptions{}
// 2. upload parts.
for part := 1; part <= partNums; part++ {
etag, err := uploadEveryPart(partSize, part, result.UploadID)
if err != nil {
return errors.Wrapf(err, "uploadPart %d, %s", part, name)
}
optcom.Parts = append(optcom.Parts, cos.Object{
PartNumber: part, ETag: etag},
)
}
// 3. upload last part.
if lastSlice != 0 {
part := partNums + 1
etag, err := uploadEveryPart(lastSlice, part, result.UploadID)
if err != nil {
return errors.Wrapf(err, "uploadPart %d, %s", part, name)
}
optcom.Parts = append(optcom.Parts, cos.Object{
PartNumber: part, ETag: etag},
)
}
// 4. complete.
if _, _, err := b.client.Object.CompleteMultipartUpload(ctx, name, result.UploadID, optcom); err != nil {
return errors.Wrapf(err, "CompleteMultipartUpload %s", name)
}
return nil
}
// Delete removes the object with the given name.
func (b *Bucket) Delete(ctx context.Context, name string) error {
if _, err := b.client.Object.Delete(ctx, name); err != nil {
return errors.Wrap(err, "delete cos object")
}
return nil
}
// Iter calls f for each entry in the given directory (not recursive.). The argument to f is the full
// object name including the prefix of the inspected directory.
func (b *Bucket) Iter(ctx context.Context, dir string, f func(string) error, options ...objstore.IterOption) error {
if dir != "" {
dir = strings.TrimSuffix(dir, dirDelim) + dirDelim
}
for object := range b.listObjects(ctx, dir, options...) {
if object.err != nil {
return object.err
}
if object.key == "" {
continue
}
if err := f(object.key); err != nil {
return err
}
}
return nil
}
func (b *Bucket) getRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) {
if name == "" {
return nil, errors.New("given object name should not empty")
}
opts := &cos.ObjectGetOptions{}
if length != -1 {
if err := setRange(opts, off, off+length-1); err != nil {
return nil, err
}
} else if off > 0 {
if err := setRange(opts, off, 0); err != nil {
return nil, err
}
}
resp, err := b.client.Object.Get(ctx, name, opts)
if err != nil {
return nil, err
}
if _, err := resp.Body.Read(nil); err != nil {
runutil.ExhaustCloseWithLogOnErr(b.logger, resp.Body, "cos get range obj close")
return nil, err
}
// Add size info into reader to pass it to Upload function.
r := objectSizerReadCloser{ReadCloser: resp.Body, size: resp.ContentLength}
return r, nil
}
type objectSizerReadCloser struct {
io.ReadCloser
size int64
}
// ObjectSize implement objstore.ObjectSizer.
func (o objectSizerReadCloser) ObjectSize() (int64, error) {
return o.size, nil
}
// Get returns a reader for the given object name.
func (b *Bucket) Get(ctx context.Context, name string) (io.ReadCloser, error) {
return b.getRange(ctx, name, 0, -1)
}
// GetRange returns a new range reader for the given object name and range.
func (b *Bucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) {
return b.getRange(ctx, name, off, length)
}
// Exists checks if the given object exists in the bucket.
func (b *Bucket) Exists(ctx context.Context, name string) (bool, error) {
if _, err := b.client.Object.Head(ctx, name, nil); err != nil {
if b.IsObjNotFoundErr(err) {
return false, nil
}
return false, errors.Wrap(err, "head cos object")
}
return true, nil
}
// IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations.
func (b *Bucket) IsObjNotFoundErr(err error) bool {
switch tmpErr := errors.Cause(err).(type) {
case *cos.ErrorResponse:
if tmpErr.Code == "NoSuchKey" ||
(tmpErr.Response != nil && tmpErr.Response.StatusCode == http.StatusNotFound) {
return true
}
return false
default:
return false
}
}
func (b *Bucket) Close() error { return nil }
type objectInfo struct {
key string
err error
}
func (b *Bucket) listObjects(ctx context.Context, objectPrefix string, options ...objstore.IterOption) <-chan objectInfo {
objectsCh := make(chan objectInfo, 1)
// If recursive iteration is enabled we should pass an empty delimiter.
delimiter := dirDelim
if objstore.ApplyIterOptions(options...).Recursive {
delimiter = ""
}
go func(objectsCh chan<- objectInfo) {
defer close(objectsCh)
var marker string
for {
result, _, err := b.client.Bucket.Get(ctx, &cos.BucketGetOptions{
Prefix: objectPrefix,
MaxKeys: 1000,
Marker: marker,
Delimiter: delimiter,
})
if err != nil {
select {
case objectsCh <- objectInfo{
err: err,
}:
case <-ctx.Done():
}
return
}
for _, object := range result.Contents {
select {
case objectsCh <- objectInfo{
key: object.Key,
}:
case <-ctx.Done():
return
}
}
// The result of CommonPrefixes contains the objects
// that have the same keys between Prefix and the key specified by delimiter.
for _, obj := range result.CommonPrefixes {
select {
case objectsCh <- objectInfo{
key: obj,
}:
case <-ctx.Done():
return
}
}
if !result.IsTruncated {
return
}
marker = result.NextMarker
}
}(objectsCh)
return objectsCh
}
func setRange(opts *cos.ObjectGetOptions, start, end int64) error {
if start == 0 && end < 0 {
opts.Range = fmt.Sprintf("bytes=%d", end)
} else if 0 < start && end == 0 {
opts.Range = fmt.Sprintf("bytes=%d-", start)
} else if 0 <= start && start <= end {
opts.Range = fmt.Sprintf("bytes=%d-%d", start, end)
} else {
return errors.Errorf("Invalid range specified: start=%d end=%d", start, end)
}
return nil
}
func configFromEnv() Config {
c := Config{
Bucket: os.Getenv("COS_BUCKET"),
AppId: os.Getenv("COS_APP_ID"),
Region: os.Getenv("COS_REGION"),
Endpoint: os.Getenv("COS_ENDPOINT"),
SecretId: os.Getenv("COS_SECRET_ID"),
SecretKey: os.Getenv("COS_SECRET_KEY"),
}
return c
}
// NewTestBucket creates test bkt client that before returning creates temporary bucket.
// In a close function it empties and deletes the bucket.
func NewTestBucket(t testing.TB) (objstore.Bucket, func(), error) {
c := configFromEnv()
if err := validateForTest(c); err != nil {
return nil, nil, err
}
if c.Bucket != "" {
if os.Getenv("THANOS_ALLOW_EXISTING_BUCKET_USE") == "" {
return nil, nil, errors.New("COS_BUCKET is defined. Normally this tests will create temporary bucket " +
"and delete it after test. Unset COS_BUCKET env variable to use default logic. If you really want to run " +
"tests against provided (NOT USED!) bucket, set THANOS_ALLOW_EXISTING_BUCKET_USE=true. WARNING: That bucket " +
"needs to be manually cleared. This means that it is only useful to run one test in a time. This is due " +
"to safety (accidentally pointing prod bucket for test) as well as COS not being fully strong consistent.")
}
bc, err := yaml.Marshal(c)
if err != nil {
return nil, nil, err
}
b, err := NewBucket(log.NewNopLogger(), bc, "thanos-e2e-test")
if err != nil {
return nil, nil, err
}
if err := b.Iter(context.Background(), "", func(f string) error {
return errors.Errorf("bucket %s is not empty", c.Bucket)
}); err != nil {
return nil, nil, errors.Wrapf(err, "cos check bucket %s", c.Bucket)
}
t.Log("WARNING. Reusing", c.Bucket, "COS bucket for COS tests. Manual cleanup afterwards is required")
return b, func() {}, nil
}
c.Bucket = createTemporaryTestBucketName(t)
bc, err := yaml.Marshal(c)
if err != nil {
return nil, nil, err
}
b, err := NewBucket(log.NewNopLogger(), bc, "thanos-e2e-test")
if err != nil {
return nil, nil, err
}
if _, err := b.client.Bucket.Put(context.Background(), nil); err != nil {
return nil, nil, err
}
t.Log("created temporary COS bucket for COS tests with name", c.Bucket)
return b, func() {
objstore.EmptyBucket(t, context.Background(), b)
if _, err := b.client.Bucket.Delete(context.Background()); err != nil {
t.Logf("deleting bucket %s failed: %s", c.Bucket, err)
}
}, nil
}
func validateForTest(conf Config) error {
if conf.Endpoint != "" {
if _, err := url.Parse(conf.Endpoint); err != nil {
return errors.Wrap(err, "parse endpoint")
}
if conf.SecretId == "" ||
conf.SecretKey == "" {
return errors.New("secret_id or secret_key is empty")
}
return nil
}
if conf.AppId == "" ||
conf.Region == "" ||
conf.SecretId == "" ||
conf.SecretKey == "" {
return errors.New("insufficient cos configuration information")
}
return nil
}
// createTemporaryTestBucketName create a temp cos bucket for test.
// Bucket Naming Conventions: https://intl.cloud.tencent.com/document/product/436/13312#overview
func createTemporaryTestBucketName(t testing.TB) string {
src := rand.New(rand.NewSource(time.Now().UnixNano()))
name := fmt.Sprintf("test_%x_%s", src.Int31(), strings.ToLower(t.Name()))
name = strings.NewReplacer("_", "-", "/", "-").Replace(name)
const maxLength = 50
if len(name) >= maxLength {
name = name[:maxLength]
}
return strings.TrimSuffix(name, "-")
}
| [
"\"COS_BUCKET\"",
"\"COS_APP_ID\"",
"\"COS_REGION\"",
"\"COS_ENDPOINT\"",
"\"COS_SECRET_ID\"",
"\"COS_SECRET_KEY\"",
"\"THANOS_ALLOW_EXISTING_BUCKET_USE\""
]
| []
| [
"COS_SECRET_ID",
"COS_BUCKET",
"COS_APP_ID",
"COS_SECRET_KEY",
"THANOS_ALLOW_EXISTING_BUCKET_USE",
"COS_ENDPOINT",
"COS_REGION"
]
| [] | ["COS_SECRET_ID", "COS_BUCKET", "COS_APP_ID", "COS_SECRET_KEY", "THANOS_ALLOW_EXISTING_BUCKET_USE", "COS_ENDPOINT", "COS_REGION"] | go | 7 | 0 | |
configuration-service/restapi/configure_configuration_service.go | // This file is safe to edit. Once it exists it will not be overwritten
package restapi
import (
"crypto/tls"
"fmt"
keptncommon "github.com/keptn/go-utils/pkg/lib/keptn"
"io/ioutil"
"net/http"
"os"
"os/exec"
"strings"
keptnapi "github.com/keptn/go-utils/pkg/api/utils"
"github.com/keptn/keptn/configuration-service/restapi/operations/event"
"github.com/keptn/keptn/configuration-service/restapi/operations/services"
errors "github.com/go-openapi/errors"
runtime "github.com/go-openapi/runtime"
handlers "github.com/keptn/keptn/configuration-service/handlers"
"github.com/keptn/keptn/configuration-service/restapi/operations"
"github.com/keptn/keptn/configuration-service/restapi/operations/project"
"github.com/keptn/keptn/configuration-service/restapi/operations/project_resource"
"github.com/keptn/keptn/configuration-service/restapi/operations/service"
"github.com/keptn/keptn/configuration-service/restapi/operations/service_default_resource"
"github.com/keptn/keptn/configuration-service/restapi/operations/service_resource"
"github.com/keptn/keptn/configuration-service/restapi/operations/stage"
"github.com/keptn/keptn/configuration-service/restapi/operations/stage_resource"
)
//go:generate swagger generate server --target ../../configuration-service --name ConfigurationService --spec ../swagger.yaml
func configureFlags(api *operations.ConfigurationServiceAPI) {
// api.CommandLineOptionsGroups = []swag.CommandLineOptionsGroup{ ... }
}
func configureAPI(api *operations.ConfigurationServiceAPI) http.Handler {
// configure the api here
api.ServeError = errors.ServeError
// Set your custom logger if needed. Default one is log.Printf
// Expected interface func(string, ...interface{})
//
// Example:
// api.Logger = log.Printf
api.JSONConsumer = runtime.JSONConsumer()
api.JSONProducer = runtime.JSONProducer()
api.ProjectDeleteProjectProjectNameHandler = project.DeleteProjectProjectNameHandlerFunc(handlers.DeleteProjectProjectNameHandlerFunc)
api.ProjectResourceDeleteProjectProjectNameResourceResourceURIHandler = project_resource.DeleteProjectProjectNameResourceResourceURIHandlerFunc(handlers.DeleteProjectProjectNameResourceResourceURIHandlerFunc)
api.ServiceDefaultResourceDeleteProjectProjectNameServiceServiceNameResourceResourceURIHandler = service_default_resource.DeleteProjectProjectNameServiceServiceNameResourceResourceURIHandlerFunc(handlers.DeleteProjectProjectNameServiceServiceNameResourceResourceURIHandlerFunc)
api.StageDeleteProjectProjectNameStageStageNameHandler = stage.DeleteProjectProjectNameStageStageNameHandlerFunc(handlers.DeleteProjectProjectNameStageStageNameHandlerFunc)
api.StageResourceDeleteProjectProjectNameStageStageNameResourceResourceURIHandler = stage_resource.DeleteProjectProjectNameStageStageNameResourceResourceURIHandlerFunc(handlers.DeleteProjectProjectNameStageStageNameResourceResourceURIHandlerFunc)
api.ServiceDeleteProjectProjectNameStageStageNameServiceServiceNameHandler = service.DeleteProjectProjectNameStageStageNameServiceServiceNameHandlerFunc(handlers.DeleteProjectProjectNameStageStageNameServiceServiceNameHandlerFunc)
api.ServiceResourceDeleteProjectProjectNameStageStageNameServiceServiceNameResourceResourceURIHandler = service_resource.DeleteProjectProjectNameStageStageNameServiceServiceNameResourceResourceURIHandlerFunc(handlers.DeleteProjectProjectNameStageStageNameServiceServiceNameResourceResourceURIHandlerFunc)
api.ProjectGetProjectHandler = project.GetProjectHandlerFunc(handlers.GetProjectHandlerFunc)
api.ProjectGetProjectProjectNameHandler = project.GetProjectProjectNameHandlerFunc(handlers.GetProjectProjectNameHandlerFunc)
api.ProjectResourceGetProjectProjectNameResourceHandler = project_resource.GetProjectProjectNameResourceHandlerFunc(handlers.GetProjectProjectNameResourceHandlerFunc)
api.ProjectResourceGetProjectProjectNameResourceResourceURIHandler = project_resource.GetProjectProjectNameResourceResourceURIHandlerFunc(handlers.GetProjectProjectNameResourceResourceURIHandlerFunc)
api.ServiceDefaultResourceGetProjectProjectNameServiceServiceNameResourceHandler = service_default_resource.GetProjectProjectNameServiceServiceNameResourceHandlerFunc(handlers.GetProjectProjectNameServiceServiceNameResourceHandlerFunc)
api.ServiceDefaultResourceGetProjectProjectNameServiceServiceNameResourceResourceURIHandler = service_default_resource.GetProjectProjectNameServiceServiceNameResourceResourceURIHandlerFunc(handlers.GetProjectProjectNameServiceServiceNameResourceResourceURIHandlerFunc)
api.StageGetProjectProjectNameStageHandler = stage.GetProjectProjectNameStageHandlerFunc(handlers.GetProjectProjectNameStageHandlerFunc)
api.StageGetProjectProjectNameStageStageNameHandler = stage.GetProjectProjectNameStageStageNameHandlerFunc(handlers.GetProjectProjectNameStageStageNameHandlerFunc)
api.StageResourceGetProjectProjectNameStageStageNameResourceHandler = stage_resource.GetProjectProjectNameStageStageNameResourceHandlerFunc(handlers.GetProjectProjectNameStageStageNameResourceHandlerFunc)
api.StageResourceGetProjectProjectNameStageStageNameResourceResourceURIHandler = stage_resource.GetProjectProjectNameStageStageNameResourceResourceURIHandlerFunc(handlers.GetProjectProjectNameStageStageNameResourceResourceURIHandlerFunc)
api.ServiceGetProjectProjectNameStageStageNameServiceHandler = service.GetProjectProjectNameStageStageNameServiceHandlerFunc(handlers.GetProjectProjectNameStageStageNameServiceHandlerFunc)
api.ServiceGetProjectProjectNameStageStageNameServiceServiceNameHandler = service.GetProjectProjectNameStageStageNameServiceServiceNameHandlerFunc(handlers.GetProjectProjectNameStageStageNameServiceServiceNameHandlerFunc)
api.ServiceResourceGetProjectProjectNameStageStageNameServiceServiceNameResourceHandler = service_resource.GetProjectProjectNameStageStageNameServiceServiceNameResourceHandlerFunc(handlers.GetProjectProjectNameStageStageNameServiceServiceNameResourceHandlerFunc)
api.ServiceResourceGetProjectProjectNameStageStageNameServiceServiceNameResourceResourceURIHandler = service_resource.GetProjectProjectNameStageStageNameServiceServiceNameResourceResourceURIHandlerFunc(handlers.GetProjectProjectNameStageStageNameServiceServiceNameResourceResourceURIHandlerFunc)
api.ProjectPostProjectHandler = project.PostProjectHandlerFunc(handlers.PostProjectHandlerFunc)
api.ProjectResourcePostProjectProjectNameResourceHandler = project_resource.PostProjectProjectNameResourceHandlerFunc(handlers.PostProjectProjectNameResourceHandlerFunc)
api.ServiceDefaultResourcePostProjectProjectNameServiceServiceNameResourceHandler = service_default_resource.PostProjectProjectNameServiceServiceNameResourceHandlerFunc(handlers.PostProjectProjectNameServiceServiceNameResourceHandlerFunc)
api.StagePostProjectProjectNameStageHandler = stage.PostProjectProjectNameStageHandlerFunc(handlers.PostProjectProjectNameStageHandlerFunc)
api.StageResourcePostProjectProjectNameStageStageNameResourceHandler = stage_resource.PostProjectProjectNameStageStageNameResourceHandlerFunc(handlers.PostProjectProjectNameStageStageNameResourceHandlerFunc)
api.ServicePostProjectProjectNameStageStageNameServiceHandler = service.PostProjectProjectNameStageStageNameServiceHandlerFunc(handlers.PostProjectProjectNameStageStageNameServiceHandlerFunc)
api.ServiceResourcePostProjectProjectNameStageStageNameServiceServiceNameResourceHandler = service_resource.PostProjectProjectNameStageStageNameServiceServiceNameResourceHandlerFunc(handlers.PostProjectProjectNameStageStageNameServiceServiceNameResourceHandlerFunc)
api.ProjectPutProjectProjectNameHandler = project.PutProjectProjectNameHandlerFunc(handlers.PutProjectProjectNameHandlerFunc)
api.ProjectResourcePutProjectProjectNameResourceHandler = project_resource.PutProjectProjectNameResourceHandlerFunc(handlers.PutProjectProjectNameResourceHandlerFunc)
api.ProjectResourcePutProjectProjectNameResourceResourceURIHandler = project_resource.PutProjectProjectNameResourceResourceURIHandlerFunc(handlers.PutProjectProjectNameResourceResourceURIHandlerFunc)
api.ServiceDefaultResourcePutProjectProjectNameServiceServiceNameResourceHandler = service_default_resource.PutProjectProjectNameServiceServiceNameResourceHandlerFunc(handlers.PutProjectProjectNameServiceServiceNameResourceHandlerFunc)
api.ServiceDefaultResourcePutProjectProjectNameServiceServiceNameResourceResourceURIHandler = service_default_resource.PutProjectProjectNameServiceServiceNameResourceResourceURIHandlerFunc(handlers.PutProjectProjectNameServiceServiceNameResourceResourceURIHandlerFunc)
api.StagePutProjectProjectNameStageStageNameHandler = stage.PutProjectProjectNameStageStageNameHandlerFunc(handlers.PutProjectProjectNameStageStageNameHandlerFunc)
api.StageResourcePutProjectProjectNameStageStageNameResourceHandler = stage_resource.PutProjectProjectNameStageStageNameResourceHandlerFunc(handlers.PutProjectProjectNameStageStageNameResourceHandlerFunc)
api.StageResourcePutProjectProjectNameStageStageNameResourceResourceURIHandler = stage_resource.PutProjectProjectNameStageStageNameResourceResourceURIHandlerFunc(handlers.PutProjectProjectNameStageStageNameResourceResourceURIHandlerFunc)
api.ServicePutProjectProjectNameStageStageNameServiceServiceNameHandler = service.PutProjectProjectNameStageStageNameServiceServiceNameHandlerFunc(handlers.PutProjectProjectNameStageStageNameServiceServiceNameHandlerFunc)
api.ServiceResourcePutProjectProjectNameStageStageNameServiceServiceNameResourceHandler = service_resource.PutProjectProjectNameStageStageNameServiceServiceNameResourceHandlerFunc(handlers.PutProjectProjectNameStageStageNameServiceServiceNameResourceHandlerFunc)
api.ServiceResourcePutProjectProjectNameStageStageNameServiceServiceNameResourceResourceURIHandler = service_resource.PutProjectProjectNameStageStageNameServiceServiceNameResourceResourceURIHandlerFunc(handlers.PutProjectProjectNameStageStageNameServiceServiceNameResourceResourceURIHandlerFunc)
api.EventHandleEventHandler = event.HandleEventHandlerFunc(handlers.HandleEventHandlerFunc)
api.ServicesGetServicesHandler = services.GetServicesHandlerFunc(handlers.GetServices)
api.ServicesGetServiceHandler = services.GetServiceHandlerFunc(handlers.GetService)
api.ServerShutdown = func() {}
return setupGlobalMiddleware(api.Serve(setupMiddlewares))
}
// The TLS configuration before HTTPS server starts.
func configureTLS(tlsConfig *tls.Config) {
// Make all necessary changes to the TLS configuration here.
}
// As soon as server is initialized but not run yet, this function will be called.
// If you need to modify a config, store server instance to stop it individually later, this is the place.
// This function can be called multiple times, depending on the number of serving schemes.
// scheme value will be set accordingly: "http", "https" or "unix"
func configureServer(s *http.Server, scheme, addr string) {
logger := keptncommon.NewLogger("", "", "configuration-service")
if os.Getenv("env") == "production" {
///////// initialize git ////////////
logger.Debug("Configuring git user.email")
cmd := exec.Command("git", "config", "--global", "user.email", "[email protected]")
_, err := cmd.Output()
if err != nil {
logger.Error("Could not configure git user.email: " + err.Error())
}
logger.Debug("Configuring git user.name")
cmd = exec.Command("git", "config", "--global", "user.name", "keptn")
_, err = cmd.Output()
if err != nil {
logger.Error("Could not configure git user.name: " + err.Error())
}
////////////////////////////////////
}
}
// The middleware configuration is for the handler executors. These do not apply to the swagger.json document.
// The middleware executes after routing but before authentication, binding and validation
func setupMiddlewares(handler http.Handler) http.Handler {
return handler
}
// The middleware configuration happens before anything, this middleware also applies to serving the swagger.json document.
// So this is a good place to plug in a panic handling middleware, logging and metrics
func setupGlobalMiddleware(handler http.Handler) http.Handler {
prefixPath := os.Getenv("PREFIX_PATH")
if len(prefixPath) > 0 {
// Set the prefix-path in the swagger.yaml
input, err := ioutil.ReadFile("swagger-ui/swagger.yaml")
if err == nil {
editedSwagger := strings.Replace(string(input), "basePath: /api/configuration-service/v1",
"basePath: "+prefixPath+"/api/configuration-service/v1", -1)
err = ioutil.WriteFile("swagger-ui/swagger.yaml", []byte(editedSwagger), 0644)
if err != nil {
fmt.Println("Failed to write edited swagger.yaml")
}
} else {
fmt.Println("Failed to set basePath in swagger.yaml")
}
}
go keptnapi.RunHealthEndpoint("10999")
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Serving ./swagger-ui/
if strings.Index(r.URL.Path, "/swagger-ui/") == 0 {
http.StripPrefix("/swagger-ui/", http.FileServer(http.Dir("swagger-ui"))).ServeHTTP(w, r)
return
}
handler.ServeHTTP(w, r)
})
}
| [
"\"env\"",
"\"PREFIX_PATH\""
]
| []
| [
"PREFIX_PATH",
"env"
]
| [] | ["PREFIX_PATH", "env"] | go | 2 | 0 | |
purchasing_test/unit/other/test_notifications.py | # -*- coding: utf-8 -*-
import os
from unittest import TestCase
from mock import patch, Mock, MagicMock
from purchasing.notifications import Notification
class TestNotification(TestCase):
def setUp(self):
os.environ['CONFIG'] = 'purchasing.settings.TestConfig'
@patch('purchasing.notifications.render_template', return_value='a test')
def test_notification_initialization(self, render_template):
'''Test notifications properly initialize
'''
notification = Notification(
from_email='[email protected]', to_email='[email protected]', cc_email=[('[email protected]',), ('[email protected]',)]
)
self.assertEquals(notification.to_email, ['[email protected]'])
self.assertEquals(notification.from_email, '[email protected]')
self.assertEquals(notification.cc_email, ['[email protected]', '[email protected]'])
self.assertEquals(notification.subject, '')
self.assertEquals(notification.html_body, 'a test')
self.assertEquals(notification.txt_body, '')
self.assertEquals(notification.attachments, [])
@patch('purchasing.notifications.render_template', return_value='a test')
def test_notification_flatten(self, render_template):
'''Test notification kwarg flattener
'''
obj = MagicMock()
obj.__unicode__ = lambda x: 'quux'
notification = Notification(from_email='[email protected]', foo='bar', baz=['qux1', obj])
self.assertEquals(
{'foo': 'bar', 'baz': 'qux1; qux2'},
notification.convert_models(dict(foo='bar', baz=['qux1', 'qux2']))
)
@patch('purchasing.notifications.render_template', return_value='a test')
def test_notification_reshape(self, render_template):
'''Test notification recipient flattener
'''
notification = Notification(to_email='[email protected]', from_email='[email protected]')
test_recips = [('a',), ('multi',), ['nested', 'thing']]
self.assertEquals(
['a', 'multi', 'nested', 'thing'],
notification.flatten(test_recips)
)
test_recips_complex = ['a', ['b', ['c', 'd']], ['e']]
self.assertEquals(
['a', 'b', 'c', 'd', 'e'],
notification.flatten(test_recips_complex)
)
@patch('purchasing.notifications.current_app')
@patch('purchasing.notifications.render_template', return_value='a test')
def test_notification_build_multi(self, current_app, render_template):
'''Test multi-messages only have one recipient
'''
current_app.logger = Mock(info=Mock())
notification = Notification(to_email=['[email protected]', '[email protected]'], from_email='[email protected]')
# should build two messages on multi send
msgs = notification._build(multi=True)
self.assertTrue(len(msgs), 2)
for msg in msgs:
self.assertEquals(len(msg.recipients), 1)
@patch('purchasing.notifications.current_app')
@patch('purchasing.notifications.render_template', return_value='a test')
def test_notification_build_multi(self, current_app, render_template):
'''Test single build messages have multiple recipients
'''
current_app.logger = Mock(info=Mock())
notification = Notification(to_email=['[email protected]', '[email protected]'], from_email='[email protected]')
# should build two messages on multi send
msgs = notification._build(multi=False)
self.assertTrue(len(msgs), 1)
for msg in msgs:
self.assertEquals(len(msg.recipients), 2)
@patch('flask_mail.Mail.send')
@patch('purchasing.notifications.send_email.delay', return_value=True)
@patch('purchasing.notifications.render_template', return_value='a test')
def test_notification_send_multi(self, send, send_email, render_template):
'''Test multi builds multiple message objects
'''
notification = Notification(to_email=['[email protected]', '[email protected]'], from_email='[email protected]')
notification.build_msg = Mock()
notification.build_msg.return_value = []
# should build two messages on multi send
notification.send(multi=True)
self.assertTrue(notification.build_msg.called)
self.assertEquals(notification.build_msg.call_count, 2)
@patch('flask_mail.Mail.send')
@patch('purchasing.notifications.send_email.delay')
@patch('purchasing.notifications.render_template', return_value='a test')
def test_notification_send_single(self, send, send_email, render_template):
'''Test non-multi only builds one message even with multiple emails
'''
notification = Notification(to_email=['[email protected]', '[email protected]'], from_email='[email protected]')
notification.build_msg = Mock()
notification.build_msg.return_value = []
# should build two messages on multi send
notification.send(multi=False)
self.assertTrue(notification.build_msg.called)
self.assertEquals(notification.build_msg.call_count, 1)
| []
| []
| [
"CONFIG"
]
| [] | ["CONFIG"] | python | 1 | 0 | |
setup_s3_bucket.py | import os
from cloud.aws_service import AwsService
def main():
"""Execute script."""
region = os.environ.get('REGION', 'us-east-1')
s3_bucket = os.environ.get('S3_BUCKET', 'costmgmtacct1234')
aws = AwsService()
result = aws.create_bucket(s3_bucket, region)
if result:
print(f'S3 bucket {s3_bucket} was created.')
else:
print(f'Failed creating S3 bucket {s3_bucket}.')
main()
| []
| []
| [
"S3_BUCKET",
"REGION"
]
| [] | ["S3_BUCKET", "REGION"] | python | 2 | 0 | |
src/testcases/CWE643_Xpath_Injection/CWE643_Xpath_Injection__Environment_01.java | /* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE643_Xpath_Injection__Environment_01.java
Label Definition File: CWE643_Xpath_Injection.label.xml
Template File: sources-sinks-01.tmpl.java
*/
/*
* @description
* CWE: 643 Xpath Injection
* BadSource: Environment Read data from an environment variable
* GoodSource: A hardcoded string
* Sinks:
* GoodSink: validate input through StringEscapeUtils
* BadSink : user input is used without validate
* Flow Variant: 01 Baseline
*
* */
package testcases.CWE643_Xpath_Injection;
import testcasesupport.*;
import javax.servlet.http.*;
import javax.xml.xpath.*;
import org.xml.sax.InputSource;
import org.apache.commons.lang.StringEscapeUtils;
public class CWE643_Xpath_Injection__Environment_01 extends AbstractTestCase
{
public void bad() throws Throwable
{
String data;
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
data = System.getenv("ADD");
String xmlFile = null;
if(System.getProperty("os.name").toLowerCase().indexOf("win") >= 0)
{
/* running on Windows */
xmlFile = "\\src\\testcases\\CWE643_Xpath Injection\\CWE643_Xpath_Injection__Helper.xml";
}
else
{
/* running on non-Windows */
xmlFile = "./src/testcases/CWE643_Xpath Injection/CWE643_Xpath_Injection__Helper.xml";
}
if (data != null)
{
/* assume username||password as source */
String [] tokens = data.split("||");
if (tokens.length < 2)
{
return;
}
String username = tokens[0];
String password = tokens[1];
/* build xpath */
XPath xPath = XPathFactory.newInstance().newXPath();
InputSource inputXml = new InputSource(xmlFile);
/* INCIDENTAL: CWE180 Incorrect Behavior Order: Validate Before Canonicalize
* The user input should be canonicalized before validation. */
/* POTENTIAL FLAW: user input is used without validate */
String query = "//users/user[name/text()='" + username +
"' and pass/text()='" + password + "']" +
"/secret/text()";
String secret = (String)xPath.evaluate(query, inputXml, XPathConstants.STRING);
}
}
public void good() throws Throwable
{
goodG2B();
goodB2G();
}
/* goodG2B() - use goodsource and badsink */
private void goodG2B() throws Throwable
{
String data;
/* FIX: Use a hardcoded string */
data = "foo";
String xmlFile = null;
if(System.getProperty("os.name").toLowerCase().indexOf("win") >= 0)
{
/* running on Windows */
xmlFile = "\\src\\testcases\\CWE643_Xpath Injection\\CWE643_Xpath_Injection__Helper.xml";
}
else
{
/* running on non-Windows */
xmlFile = "./src/testcases/CWE643_Xpath Injection/CWE643_Xpath_Injection__Helper.xml";
}
if (data != null)
{
/* assume username||password as source */
String [] tokens = data.split("||");
if (tokens.length < 2)
{
return;
}
String username = tokens[0];
String password = tokens[1];
/* build xpath */
XPath xPath = XPathFactory.newInstance().newXPath();
InputSource inputXml = new InputSource(xmlFile);
/* INCIDENTAL: CWE180 Incorrect Behavior Order: Validate Before Canonicalize
* The user input should be canonicalized before validation. */
/* POTENTIAL FLAW: user input is used without validate */
String query = "//users/user[name/text()='" + username +
"' and pass/text()='" + password + "']" +
"/secret/text()";
String secret = (String)xPath.evaluate(query, inputXml, XPathConstants.STRING);
}
}
/* goodB2G() - use badsource and goodsink */
private void goodB2G() throws Throwable
{
String data;
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
data = System.getenv("ADD");
String xmlFile = null;
if(System.getProperty("os.name").toLowerCase().indexOf("win") >= 0)
{
/* running on Windows */
xmlFile = "\\src\\testcases\\CWE643_Xpath Injection\\CWE643_Xpath_Injection__Helper.xml";
}
else
{
/* running on non-Windows */
xmlFile = "./src/testcases/CWE643_Xpath Injection/CWE643_Xpath_Injection__Helper.xml";
}
if (data != null)
{
/* assume username||password as source */
String [] tokens = data.split("||");
if( tokens.length < 2 )
{
return;
}
/* FIX: validate input using StringEscapeUtils */
String username = StringEscapeUtils.escapeXml(tokens[0]);
String password = StringEscapeUtils.escapeXml(tokens[1]);
/* build xpath */
XPath xPath = XPathFactory.newInstance().newXPath();
InputSource inputXml = new InputSource(xmlFile);
String query = "//users/user[name/text()='" + username +
"' and pass/text()='" + password + "']" +
"/secret/text()";
String secret = (String)xPath.evaluate(query, inputXml, XPathConstants.STRING);
}
}
/* Below is the main(). It is only used when building this testcase on
* its own for testing or for building a binary to use in testing binary
* analysis tools. It is not used when compiling all the testcases as one
* application, which is how source code analysis tools are tested.
*/
public static void main(String[] args) throws ClassNotFoundException,
InstantiationException, IllegalAccessException
{
mainFromParent(args);
}
}
| [
"\"ADD\"",
"\"ADD\""
]
| []
| [
"ADD"
]
| [] | ["ADD"] | java | 1 | 0 | |
vendor/github.com/openshift/origin/pkg/cmd/util/docker/docker.go | package docker
import (
"os"
"time"
"k8s.io/kubernetes/pkg/kubelet/dockertools"
docker "github.com/fsouza/go-dockerclient"
"github.com/golang/glog"
"github.com/spf13/pflag"
)
// Helper contains all the valid config options for connecting to Docker from
// a command line.
type Helper struct {
}
// NewHelper creates a Flags object with the default values set. Use this
// to use consistent Docker client loading behavior from different contexts.
func NewHelper() *Helper {
return &Helper{}
}
// InstallFlags installs the Docker flag helper into a FlagSet with the default
// options and default values from the Helper object.
func (_ *Helper) InstallFlags(flags *pflag.FlagSet) {
}
// GetClient returns a valid Docker client, the address of the client, or an error
// if the client couldn't be created.
func (_ *Helper) GetClient() (client *docker.Client, endpoint string, err error) {
client, err = docker.NewClientFromEnv()
if len(os.Getenv("DOCKER_HOST")) > 0 {
endpoint = os.Getenv("DOCKER_HOST")
} else {
endpoint = "unix:///var/run/docker.sock"
}
return
}
// GetKubeClient returns the Kubernetes Docker client.
func (_ *Helper) GetKubeClient(requestTimeout, imagePullProgressDeadline time.Duration) (*KubeDocker, string, error) {
var endpoint string
if len(os.Getenv("DOCKER_HOST")) > 0 {
endpoint = os.Getenv("DOCKER_HOST")
} else {
endpoint = "unix:///var/run/docker.sock"
}
client := dockertools.ConnectToDockerOrDie(endpoint, requestTimeout, imagePullProgressDeadline)
originClient := &KubeDocker{client}
return originClient, endpoint, nil
}
// GetClientOrExit returns a valid Docker client and the address of the client,
// or prints an error and exits.
func (h *Helper) GetClientOrExit() (*docker.Client, string) {
client, addr, err := h.GetClient()
if err != nil {
glog.Fatalf("ERROR: Couldn't connect to Docker at %s.\n%v\n.", addr, err)
}
return client, addr
}
// KubeDocker provides a wrapper to Kubernetes Docker interface
// This wrapper is compatible with OpenShift Docker interface.
type KubeDocker struct {
dockertools.DockerInterface
}
// Ping implements the DockerInterface Ping method.
func (c *KubeDocker) Ping() error {
client, err := docker.NewClientFromEnv()
if err != nil {
return err
}
return client.Ping()
}
| [
"\"DOCKER_HOST\"",
"\"DOCKER_HOST\"",
"\"DOCKER_HOST\"",
"\"DOCKER_HOST\""
]
| []
| [
"DOCKER_HOST"
]
| [] | ["DOCKER_HOST"] | go | 1 | 0 | |
alectryon/cli.py | # Copyright © 2019 Clément Pit-Claudel
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import inspect
import os
import os.path
import shutil
import sys
# Pipelines
# =========
def read_plain(_, fpath, fname):
if fname == "-":
return sys.stdin.read()
with open(fpath, encoding="utf-8") as f:
return f.read()
def read_json(_, fpath, fname):
from json import load
if fname == "-":
return load(sys.stdin)
with open(fpath, encoding="utf-8") as f:
return load(f)
def parse_coq_plain(contents):
return [contents]
def _catch_parsing_errors(fpath, k, *args):
from .literate import ParsingError
try:
return k(*args)
except ParsingError as e:
raise ValueError("{}:{}".format(fpath, e))
def coq_to_rst(coq, fpath, point, marker):
from .literate import coq2rst_marked
return _catch_parsing_errors(fpath, coq2rst_marked, coq, point, marker)
def rst_to_coq(coq, fpath, point, marker):
from .literate import rst2coq_marked
return _catch_parsing_errors(fpath, rst2coq_marked, coq, point, marker)
def annotate_chunks(chunks, sertop_args):
from .core import annotate
return annotate(chunks, sertop_args)
def register_docutils(v, sertop_args):
from .docutils import setup, AlectryonTransform
AlectryonTransform.SERTOP_ARGS = sertop_args
setup()
return v
def _gen_docutils_html(source, fpath,
webpage_style, include_banner, include_vernums,
html_assets, traceback, Parser, Reader):
from docutils.core import publish_string
from .docutils import HtmlTranslator, HtmlWriter
# The encoding/decoding dance below happens because setting output_encoding
# to "unicode" causes reST to generate a bad <meta> tag, and setting
# input_encoding to "unicode" breaks the ‘.. include’ directive.
html_assets.extend(HtmlTranslator.JS + HtmlTranslator.CSS)
settings_overrides = {
'traceback': traceback,
'embed_stylesheet': False,
'stylesheet_path': None,
'stylesheet_dirs': [],
'alectryon_banner': include_banner,
'alectryon_vernums': include_vernums,
'webpage_style': webpage_style,
'input_encoding': 'utf-8',
'output_encoding': 'utf-8'
}
parser = Parser()
return publish_string(
source=source.encode("utf-8"),
source_path=fpath, destination_path=None,
reader=Reader(parser), reader_name=None,
parser=parser, parser_name=None,
writer=HtmlWriter(), writer_name=None,
settings=None, settings_spec=None,
settings_overrides=settings_overrides, config_section=None,
enable_exit_status=True).decode("utf-8")
def gen_rstcoq_html(coq, fpath, webpage_style,
include_banner, include_vernums,
html_assets, traceback):
from .docutils import RSTCoqParser, RSTCoqStandaloneReader
return _gen_docutils_html(coq, fpath, webpage_style,
include_banner, include_vernums,
html_assets, traceback,
RSTCoqParser, RSTCoqStandaloneReader)
def gen_rst_html(rst, fpath, webpage_style,
include_banner, include_vernums,
html_assets, traceback):
from docutils.parsers.rst import Parser
from docutils.readers.standalone import Reader
return _gen_docutils_html(rst, fpath, webpage_style,
include_banner, include_vernums,
html_assets, traceback,
Parser, Reader)
def _docutils_cmdline(description, Reader, Parser):
import locale
locale.setlocale(locale.LC_ALL, '')
from docutils.core import publish_cmdline, default_description
from .docutils import setup, HtmlWriter
setup()
parser = Parser()
publish_cmdline(
reader=Reader(parser), parser=parser,
writer=HtmlWriter(),
settings_overrides={'stylesheet_path': None},
description=(description + default_description)
)
def _lint_docutils(source, fpath, Parser, traceback):
from io import StringIO
from docutils.utils import new_document
from docutils.frontend import OptionParser
from docutils.utils import Reporter
from .docutils import JsErrorPrinter
parser = Parser()
settings = OptionParser(components=(Parser,)).get_default_values()
settings.traceback = traceback
observer = JsErrorPrinter(StringIO(), settings)
document = new_document(fpath, settings)
document.reporter.report_level = 0 # Report all messages
document.reporter.halt_level = Reporter.SEVERE_LEVEL + 1 # Do not exit early
document.reporter.stream = False # Disable textual reporting
document.reporter.attach_observer(observer)
parser.parse(source, document)
return observer.stream.getvalue()
def lint_rstcoq(coq, fpath, traceback):
from .docutils import RSTCoqParser
return _lint_docutils(coq, fpath, RSTCoqParser, traceback)
def lint_rst(rst, fpath, traceback):
from docutils.parsers.rst import Parser
return _lint_docutils(rst, fpath, Parser, traceback)
def _scrub_fname(fname):
import re
return re.sub("[^-a-zA-Z0-9]", "-", fname)
def gen_html_snippets(annotated, include_vernums, fname):
from .html import HtmlGenerator
from .pygments import highlight_html
return HtmlGenerator(highlight_html, _scrub_fname(fname)).gen(annotated)
def gen_latex_snippets(annotated):
from .latex import LatexGenerator
from .pygments import highlight_latex
return LatexGenerator(highlight_latex).gen(annotated)
COQDOC_OPTIONS = ['--body-only', '--no-glob', '--no-index', '--no-externals',
'-s', '--html', '--stdout', '--utf8']
def _run_coqdoc(coq_snippets, coqdoc_bin=None):
"""Get the output of coqdoc on coq_code."""
from shutil import rmtree
from tempfile import mkstemp, mkdtemp
from subprocess import check_output
coqdoc_bin = coqdoc_bin or os.path.join(os.getenv("COQBIN", ""), "coqdoc")
dpath = mkdtemp(prefix="alectryon_coqdoc_")
fd, filename = mkstemp(prefix="alectryon_coqdoc_", suffix=".v", dir=dpath)
try:
for snippet in coq_snippets:
os.write(fd, snippet.encode("utf-8"))
os.write(fd, b"\n(* --- *)\n") # Separator to prevent fusing
os.close(fd)
coqdoc = [coqdoc_bin, *COQDOC_OPTIONS, "-d", dpath, filename]
return check_output(coqdoc, cwd=dpath, timeout=10).decode("utf-8")
finally:
rmtree(dpath)
def _gen_coqdoc_html(coqdoc_fragments):
from bs4 import BeautifulSoup
coqdoc_output = _run_coqdoc(fr.contents for fr in coqdoc_fragments)
soup = BeautifulSoup(coqdoc_output, "html.parser")
docs = soup.find_all(class_='doc')
if len(docs) != sum(1 for c in coqdoc_fragments if not c.special):
from pprint import pprint
print("Coqdoc mismatch:", file=sys.stderr)
pprint(list(zip(coqdoc_comments, docs)))
raise AssertionError()
return docs
def _gen_html_snippets_with_coqdoc(annotated, fname):
from dominate.util import raw
from .html import HtmlGenerator
from .pygments import highlight_html
from .transforms import isolate_coqdoc, default_transform, CoqdocFragment
writer = HtmlGenerator(highlight_html, _scrub_fname(fname))
parts = [part for fragments in annotated
for part in isolate_coqdoc(fragments)]
coqdoc = [part for part in parts
if isinstance(part, CoqdocFragment)]
coqdoc_html = iter(_gen_coqdoc_html(coqdoc))
for part in parts:
if isinstance(part, CoqdocFragment):
if not part.special:
yield [raw(str(next(coqdoc_html, None)))]
else:
fragments = default_transform(part.fragments)
yield writer.gen_fragments(fragments)
def gen_html_snippets_with_coqdoc(annotated, html_classes, fname):
html_classes.append("coqdoc")
# ‘return’ instead of ‘yield from’ to update html_classes eagerly
return _gen_html_snippets_with_coqdoc(annotated, fname)
def copy_assets(state, html_assets, copy_fn, output_directory):
from .html import copy_assets as cp
if copy_fn:
cp(output_directory, assets=html_assets, copy_fn=copy_fn)
return state
def dump_html_standalone(snippets, fname, webpage_style,
include_banner, include_vernums,
html_assets, html_classes):
from dominate import tags, document
from dominate.util import raw
from . import GENERATOR
from .core import SerAPI
from .pygments import HTML_FORMATTER
from .html import ASSETS, ADDITIONAL_HEADS, gen_banner, wrap_classes
doc = document(title=fname)
doc.set_attribute("class", "alectryon-standalone")
doc.head.add(tags.meta(charset="utf-8"))
doc.head.add(tags.meta(name="generator", content=GENERATOR))
for hd in ADDITIONAL_HEADS:
doc.head.add(raw(hd))
for css in ASSETS.ALECTRYON_CSS:
doc.head.add(tags.link(rel="stylesheet", href=css))
for link in (ASSETS.IBM_PLEX_CDN, ASSETS.FIRA_CODE_CDN):
doc.head.add(raw(link))
for js in ASSETS.ALECTRYON_JS:
doc.head.add(tags.script(src=js))
html_assets.extend(ASSETS.ALECTRYON_CSS)
html_assets.extend(ASSETS.ALECTRYON_JS)
pygments_css = HTML_FORMATTER.get_style_defs('.highlight')
doc.head.add(tags.style(pygments_css, type="text/css"))
cls = wrap_classes(webpage_style, *html_classes)
root = doc.body.add(tags.article(cls=cls))
if include_banner:
root.add(raw(gen_banner(SerAPI.version_info(), include_vernums)))
for snippet in snippets:
root.add(snippet)
return doc.render(pretty=False)
def prepare_json(obj):
from .json import json_of_annotated
return json_of_annotated(obj)
def dump_json(js):
from json import dumps
return dumps(js, indent=4)
def dump_html_snippets(snippets):
s = ""
for snippet in snippets:
s += snippet.render(pretty=True)
s += "<!-- alectryon-block-end -->\n"
return s
def dump_latex_snippets(snippets):
s = ""
for snippet in snippets:
s += str(snippet)
s += "\n%% alectryon-block-end\n"
return s
def strip_extension(fname):
for ext in EXTENSIONS:
if fname.endswith(ext):
return fname[:-len(ext)]
return fname
def write_output(ext, contents, fname, output, output_directory):
if output == "-" or (output is None and fname == "-"):
sys.stdout.write(contents)
else:
if not output:
output = os.path.join(output_directory, strip_extension(fname) + ext)
with open(output, mode="w", encoding="utf-8") as f:
f.write(contents)
def write_file(ext):
return lambda contents, fname, output, output_directory: \
write_output(ext, contents, fname, output, output_directory)
PIPELINES = {
'json': {
'json': (read_json, annotate_chunks,
prepare_json, dump_json, write_file(".io.json")),
'snippets-html': (read_json, annotate_chunks, gen_html_snippets,
dump_html_snippets, write_file(".snippets.html")),
'snippets-latex': (read_json, annotate_chunks, gen_latex_snippets,
dump_latex_snippets, write_file(".snippets.tex"))
},
'coq': {
'null': (read_plain, parse_coq_plain, annotate_chunks),
'webpage': (read_plain, parse_coq_plain, annotate_chunks,
gen_html_snippets, dump_html_standalone, copy_assets,
write_file(".v.html")),
'snippets-html': (read_plain, parse_coq_plain, annotate_chunks,
gen_html_snippets, dump_html_snippets,
write_file(".snippets.html")),
'snippets-latex': (read_plain, parse_coq_plain, annotate_chunks,
gen_latex_snippets, dump_latex_snippets,
write_file(".snippets.tex")),
'lint': (read_plain, register_docutils, lint_rstcoq,
write_file(".lint.json")),
'rst': (read_plain, coq_to_rst, write_file(".v.rst")),
'json': (read_plain, parse_coq_plain, annotate_chunks, prepare_json,
dump_json, write_file(".io.json"))
},
'coq+rst': {
'webpage': (read_plain, register_docutils, gen_rstcoq_html, copy_assets,
write_file(".html")),
'lint': (read_plain, register_docutils, lint_rstcoq,
write_file(".lint.json")),
'rst': (read_plain, coq_to_rst, write_file(".v.rst"))
},
'coqdoc': {
'webpage': (read_plain, parse_coq_plain, annotate_chunks,
gen_html_snippets_with_coqdoc, dump_html_standalone,
copy_assets, write_file(".html")),
},
'rst': {
'webpage': (read_plain, register_docutils, gen_rst_html, copy_assets,
write_file(".html")),
'lint': (read_plain, register_docutils, lint_rst,
write_file(".lint.json")),
'coq': (read_plain, rst_to_coq, write_file(".v")),
'coq+rst': (read_plain, rst_to_coq, write_file(".v"))
}
}
# CLI
# ===
EXTENSIONS = ['.v', '.json', '.v.rst', '.rst']
FRONTENDS_BY_EXTENSION = [
('.v', 'coq+rst'), ('.json', 'json'), ('.rst', 'rst')
]
BACKENDS_BY_EXTENSION = [
('.v', 'coq'), ('.json', 'json'), ('.rst', 'rst'),
('.lint.json', 'lint'),
('.snippets.html', 'snippets-html'),
('.snippets.tex', 'snippets-latex'),
('.v.html', 'webpage'), ('.html', 'webpage')
]
DEFAULT_BACKENDS = {
'json': 'json',
'coq': 'webpage',
'coqdoc': 'webpage',
'coq+rst': 'webpage',
'rst': 'webpage'
}
def infer_mode(fpath, kind, arg, table):
for (ext, mode) in table:
if fpath.endswith(ext):
return mode
MSG = """{}: Not sure what to do with {!r}.
Try passing {}?"""
raise argparse.ArgumentTypeError(MSG.format(kind, fpath, arg))
def infer_frontend(fpath):
return infer_mode(fpath, "input", "--frontend", FRONTENDS_BY_EXTENSION)
def infer_backend(frontend, out_fpath):
if out_fpath:
return infer_mode(out_fpath, "output", "--backend", BACKENDS_BY_EXTENSION)
return DEFAULT_BACKENDS[frontend]
def resolve_pipeline(fpath, args):
frontend = args.frontend or infer_frontend(fpath)
assert frontend in PIPELINES
supported_backends = PIPELINES[frontend]
backend = args.backend or infer_backend(frontend, args.output)
if backend not in supported_backends:
MSG = """argument --backend: Frontend {!r} does not support backend {!r}: \
expecting one of {}"""
raise argparse.ArgumentTypeError(MSG.format(
frontend, backend, ", ".join(map(repr, supported_backends))))
return supported_backends[backend]
COPY_FUNCTIONS = {
"copy": shutil.copy,
"symlink": os.symlink,
"hardlink": os.link,
"none": None
}
def post_process_arguments(parser, args):
if len(args.input) > 1 and args.output:
parser.error("argument --output: Not valid with multiple inputs")
if args.stdin_filename and "-" not in args.input:
parser.error("argument --stdin-filename: input must be '-'")
for dirpath in args.coq_args_I:
args.sertop_args.extend(("-I", dirpath))
for pair in args.coq_args_R:
args.sertop_args.extend(("-R", ",".join(pair)))
for pair in args.coq_args_Q:
args.sertop_args.extend(("-Q", ",".join(pair)))
# argparse applies ‘type’ before ‘choices’, so we do the conversion here
args.copy_fn = COPY_FUNCTIONS[args.copy_fn]
args.point, args.marker = args.mark_point
if args.point is not None:
try:
args.point = int(args.point)
except ValueError:
MSG = "argument --mark-point: Expecting a number, not {!r}"
parser.error(MSG.format(args.point))
args.html_assets = []
args.html_classes = []
args.pipelines = [(fpath, resolve_pipeline(fpath, args))
for fpath in args.input]
return args
def build_parser():
parser = argparse.ArgumentParser(description="""\
Annotate segments of Coq code with responses and goals.
Take input in Coq, reStructuredText, or JSON format \
and produce reStructuredText, HTML, or JSON output.""")
INPUT_HELP = "Configure the input."
out = parser.add_argument_group("Input arguments", INPUT_HELP)
INPUT_FILES_HELP = "Input files"
parser.add_argument("input", nargs="+", help=INPUT_FILES_HELP)
INPUT_STDIN_NAME_HELP = "Name of file passed on stdin, if any"
parser.add_argument("--stdin-filename", default=None,
help=INPUT_STDIN_NAME_HELP)
FRONTEND_HELP = "Choose a frontend. Defaults: "
FRONTEND_HELP += "; ".join("{!r} → {}".format(ext, frontend)
for ext, frontend in FRONTENDS_BY_EXTENSION)
FRONTEND_CHOICES = sorted(PIPELINES.keys())
out.add_argument("--frontend", default=None, choices=FRONTEND_CHOICES,
help=FRONTEND_HELP)
OUTPUT_HELP = "Configure the output."
out = parser.add_argument_group("Output arguments", OUTPUT_HELP)
BACKEND_HELP = "Choose a backend. Supported: "
BACKEND_HELP += "; ".join(
"{} → {{{}}}".format(frontend, ", ".join(sorted(backends)))
for frontend, backends in PIPELINES.items())
BACKEND_CHOICES = sorted(set(b for _, bs in PIPELINES.items() for b in bs))
out.add_argument("--backend", default=None, choices=BACKEND_CHOICES,
help=BACKEND_HELP)
OUT_FILE_HELP = "Set the output file (default: computed based on INPUT)."
parser.add_argument("-o", "--output", default=None,
help=OUT_FILE_HELP)
OUT_DIR_HELP = "Set the output directory (default: same as each INPUT)."
parser.add_argument("--output-directory", default=None,
help=OUT_DIR_HELP)
COPY_ASSETS_HELP = ("Chose the method to use to copy assets " +
"along the generated file(s) when creating webpages.")
parser.add_argument("--copy-assets", choices=list(COPY_FUNCTIONS.keys()),
default="copy", dest="copy_fn",
help=COPY_ASSETS_HELP)
CACHE_DIRECTORY_HELP = ("Cache Coq's output in DIRECTORY.")
parser.add_argument("--cache-directory", default=None, metavar="DIRECTORY",
help=CACHE_DIRECTORY_HELP)
NO_HEADER_HELP = "Do not insert a header with usage instructions in webpages."
parser.add_argument("--no-header", action='store_false',
dest="include_banner", default="True",
help=NO_HEADER_HELP)
NO_VERSION_NUMBERS = "Omit version numbers in meta tags and headers."
parser.add_argument("--no-version-numbers", action='store_false',
dest="include_vernums", default=True,
help=NO_VERSION_NUMBERS)
WEBPAGE_STYLE_HELP = "Choose a style for standalone webpages."
WEBPAGE_STYLE_CHOICES = ("centered", "floating", "windowed")
parser.add_argument("--webpage-style", default="centered",
choices=WEBPAGE_STYLE_CHOICES,
help=WEBPAGE_STYLE_HELP)
MARK_POINT_HELP = "Mark a point in the output with a given marker."
parser.add_argument("--mark-point", nargs=2, default=(None, None),
metavar=("POINT", "MARKER"),
help=MARK_POINT_HELP)
SUBP_HELP = "Pass arguments to the SerAPI process"
subp = parser.add_argument_group("Subprocess arguments", SUBP_HELP)
SERTOP_ARGS_HELP = "Pass a single argument to SerAPI (e.g. -Q dir,lib)."
subp.add_argument("--sertop-arg", dest="sertop_args",
action="append", default=[],
metavar="SERAPI_ARG",
help=SERTOP_ARGS_HELP)
I_HELP = "Pass -I DIR to the SerAPI subprocess."
subp.add_argument("-I", "--ml-include-path", dest="coq_args_I",
metavar="DIR", nargs=1, action="append",
default=[], help=I_HELP)
Q_HELP = "Pass -Q DIR COQDIR to the SerAPI subprocess."
subp.add_argument("-Q", "--load-path", dest="coq_args_Q",
metavar=("DIR", "COQDIR"), nargs=2, action="append",
default=[], help=Q_HELP)
R_HELP = "Pass -R DIR COQDIR to the SerAPI subprocess."
subp.add_argument("-R", "--rec-load-path", dest="coq_args_R",
metavar=("DIR", "COQDIR"), nargs=2, action="append",
default=[], help=R_HELP)
EXPECT_UNEXPECTED_HELP = "Ignore unexpected output from SerAPI"
parser.add_argument("--expect-unexpected", action="store_true",
default=False, help=EXPECT_UNEXPECTED_HELP)
DEBUG_HELP = "Print communications with SerAPI."
parser.add_argument("--debug", action="store_true",
default=False, help=DEBUG_HELP)
TRACEBACK_HELP = "Print error traces."
parser.add_argument("--traceback", action="store_true",
default=False, help=TRACEBACK_HELP)
return parser
def parse_arguments():
parser = build_parser()
return post_process_arguments(parser, parser.parse_args())
# Entry point
# ===========
def call_pipeline_step(step, state, ctx):
params = list(inspect.signature(step).parameters.keys())[1:]
return step(state, **{p: ctx[p] for p in params})
def build_context(fpath, args):
if fpath == "-":
fname, fpath = "-", (args.stdin_filename or "-")
else:
fname = os.path.basename(fpath)
ctx = {"fpath": fpath, "fname": fname, **vars(args)}
if args.output_directory is None:
if fname == "-":
ctx["output_directory"] = "."
else:
ctx["output_directory"] = os.path.dirname(os.path.abspath(fpath))
return ctx
def process_pipelines(args):
if args.debug:
from . import core
core.DEBUG = True
if args.cache_directory:
from . import docutils
docutils.CACHE_DIRECTORY = args.cache_directory
if args.expect_unexpected:
from . import core
core.SerAPI.EXPECT_UNEXPECTED = True
try:
for fpath, pipeline in args.pipelines:
state, ctx = None, build_context(fpath, args)
for step in pipeline:
state = call_pipeline_step(step, state, ctx)
except (ValueError, FileNotFoundError) as e:
if args.traceback:
raise e
print("Exiting early due to an error:", file=sys.stderr)
print(str(e), file=sys.stderr)
sys.exit(1)
def main():
args = parse_arguments()
process_pipelines(args)
# Alternative CLIs
# ================
def rstcoq2html():
from .docutils import RSTCoqStandaloneReader, RSTCoqParser
DESCRIPTION = 'Build an HTML document from an Alectryon Coq file.'
_docutils_cmdline(DESCRIPTION, RSTCoqStandaloneReader, RSTCoqParser)
def coqrst2html():
from docutils.parsers.rst import Parser
from docutils.readers.standalone import Reader
DESCRIPTION = 'Build an HTML document from an Alectryon reStructuredText file.'
_docutils_cmdline(DESCRIPTION, Reader, Parser)
| []
| []
| [
"COQBIN"
]
| [] | ["COQBIN"] | python | 1 | 0 | |
main.py | import os
from sanic import Sanic, response as res
from sanic.exceptions import NotFound
from sanic.websocket import ConnectionClosed
import json
from database import get_messages, post_message
# initiate the sanic app
app = Sanic('app')
# list of connected clients
clients = set()
# function that sends a websocket message to all connected clients
async def broadcast(message):
# must iterate a copy of the clients set
# because the loop gets inconsistent if removing
# an element while iterating
for client in [*clients]: # copy list with spread syntax
try:
await client.send(message)
except ConnectionClosed:
# remove client from list if disconnected
clients.remove(client)
@app.websocket('/ws')
async def websockets(req, ws):
# add connected client to list
clients.add(ws)
while True:
# wait to receive message from client
data = await ws.recv()
data = json.loads(data) # parse json
# save message to db
data['id'] = await post_message(data)
print(data)
data = json.dumps(data) # stringify dict
# broadcast message to all clients
await broadcast(data)
@app.get('/rest/messages')
async def messages(req):
return res.json(await get_messages())
# enable frontend to be served from root
app.static('/', './dist')
@app.exception(NotFound)
async def ignore_404s(request, exception):
return await res.file('./dist/index.html')
# start the server with the PORT from an environment variable
if __name__ == '__main__':
app.run(host='0.0.0.0', port=int(os.environ.get("PORT", 5000)))
| []
| []
| [
"PORT"
]
| [] | ["PORT"] | python | 1 | 0 | |
main.go | /*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"flag"
"os"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
"k8s.io/client-go/rest"
"k8s.io/klog/v2"
"k8s.io/klog/v2/klogr"
ctrl "sigs.k8s.io/controller-runtime"
// +kubebuilder:scaffold:imports
baremetalv1alpha1 "github.com/metal3-io/baremetal-operator/apis/metal3.io/v1alpha1"
osconfigv1 "github.com/openshift/api/config/v1"
osclientset "github.com/openshift/client-go/config/clientset/versioned"
metal3iov1alpha1 "github.com/openshift/cluster-baremetal-operator/api/v1alpha1"
"github.com/openshift/cluster-baremetal-operator/controllers"
"github.com/openshift/cluster-baremetal-operator/provisioning"
"github.com/openshift/library-go/pkg/operator/events"
)
var (
scheme = runtime.NewScheme()
)
func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(metal3iov1alpha1.AddToScheme(scheme))
utilruntime.Must(osconfigv1.AddToScheme(scheme))
utilruntime.Must(baremetalv1alpha1.AddToScheme(scheme))
// +kubebuilder:scaffold:scheme
// The following is needed to read the Infrastructure CR
utilruntime.Must(osconfigv1.Install(scheme))
}
func main() {
var metricsAddr string
var enableLeaderElection bool
var imagesJSONFilename string
klog.InitFlags(nil)
flag.StringVar(&metricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.")
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
flag.StringVar(&imagesJSONFilename, "images-json", "/etc/cluster-baremetal-operator/images/images.json",
"The location of the file containing the images to use for our operands.")
flag.Parse()
ctrl.SetLogger(klogr.New())
releaseVersion := os.Getenv("RELEASE_VERSION")
if releaseVersion == "" {
klog.Info("Environment variable RELEASE_VERSION not provided")
}
config := ctrl.GetConfigOrDie()
mgr, err := ctrl.NewManager(config, ctrl.Options{
Scheme: scheme,
MetricsBindAddress: metricsAddr,
Namespace: controllers.ComponentNamespace,
LeaderElection: enableLeaderElection,
Port: 9443,
CertDir: "/etc/cluster-baremetal-operator/tls",
})
if err != nil {
klog.ErrorS(err, "unable to start manager")
os.Exit(1)
}
osClient := osclientset.NewForConfigOrDie(rest.AddUserAgent(config, controllers.ComponentName))
kubeClient := kubernetes.NewForConfigOrDie(rest.AddUserAgent(config, controllers.ComponentName))
enabledFeatures, err := controllers.EnabledFeatures(context.Background(), osClient)
if err != nil {
klog.ErrorS(err, "unable to get enabled features")
os.Exit(1)
}
enableWebhook := provisioning.WebhookDependenciesReady(osClient)
if err = (&controllers.ProvisioningReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
OSClient: osClient,
KubeClient: kubeClient,
ReleaseVersion: releaseVersion,
ImagesFilename: imagesJSONFilename,
WebHookEnabled: enableWebhook,
EnabledFeatures: enabledFeatures,
}).SetupWithManager(mgr); err != nil {
klog.ErrorS(err, "unable to create controller", "controller", "Provisioning")
os.Exit(1)
}
if enableWebhook {
info := &provisioning.ProvisioningInfo{
Client: kubeClient,
EventRecorder: events.NewLoggingEventRecorder(controllers.ComponentName),
Namespace: controllers.ComponentNamespace,
}
if err = provisioning.EnableValidatingWebhook(info, mgr, enabledFeatures); err != nil {
klog.ErrorS(err, "problem enabling validating webhook")
os.Exit(1)
}
}
// +kubebuilder:scaffold:builder
klog.Info("starting manager")
if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil {
klog.ErrorS(err, "problem running manager")
os.Exit(1)
}
}
| [
"\"RELEASE_VERSION\""
]
| []
| [
"RELEASE_VERSION"
]
| [] | ["RELEASE_VERSION"] | go | 1 | 0 | |
net/net_linux_test.go | package net
import (
"fmt"
"io/ioutil"
"os"
"strings"
"syscall"
"testing"
"github.com/Codehardt/gopsutil/internal/common"
"github.com/stretchr/testify/assert"
)
func TestIOCountersByFileParsing(t *testing.T) {
// Prpare a temporary file, which will be read during the test
tmpfile, err := ioutil.TempFile("", "proc_dev_net")
defer os.Remove(tmpfile.Name()) // clean up
assert.Nil(t, err, "Temporary file creation failed: ", err)
cases := [4][2]string{
[2]string{"eth0: ", "eth1: "},
[2]string{"eth0:0: ", "eth1:0: "},
[2]string{"eth0:", "eth1:"},
[2]string{"eth0:0:", "eth1:0:"},
}
for _, testCase := range cases {
err = tmpfile.Truncate(0)
assert.Nil(t, err, "Temporary file truncating problem: ", err)
// Parse interface name for assertion
interface0 := strings.TrimSpace(testCase[0])
interface0 = interface0[:len(interface0)-1]
interface1 := strings.TrimSpace(testCase[1])
interface1 = interface1[:len(interface1)-1]
// Replace the interfaces from the test case
proc := []byte(fmt.Sprintf("Inter-| Receive | Transmit\n face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed\n %s1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16\n %s100 200 300 400 500 600 700 800 900 1000 1100 1200 1300 1400 1500 1600\n", testCase[0], testCase[1]))
// Write /proc/net/dev sample output
_, err = tmpfile.Write(proc)
assert.Nil(t, err, "Temporary file writing failed: ", err)
counters, err := IOCountersByFile(true, tmpfile.Name())
assert.Nil(t, err)
assert.NotEmpty(t, counters)
assert.Equal(t, 2, len(counters))
assert.Equal(t, interface0, counters[0].Name)
assert.Equal(t, 1, int(counters[0].BytesRecv))
assert.Equal(t, 2, int(counters[0].PacketsRecv))
assert.Equal(t, 3, int(counters[0].Errin))
assert.Equal(t, 4, int(counters[0].Dropin))
assert.Equal(t, 5, int(counters[0].Fifoin))
assert.Equal(t, 9, int(counters[0].BytesSent))
assert.Equal(t, 10, int(counters[0].PacketsSent))
assert.Equal(t, 11, int(counters[0].Errout))
assert.Equal(t, 12, int(counters[0].Dropout))
assert.Equal(t, 13, int(counters[0].Fifoout))
assert.Equal(t, interface1, counters[1].Name)
assert.Equal(t, 100, int(counters[1].BytesRecv))
assert.Equal(t, 200, int(counters[1].PacketsRecv))
assert.Equal(t, 300, int(counters[1].Errin))
assert.Equal(t, 400, int(counters[1].Dropin))
assert.Equal(t, 500, int(counters[1].Fifoin))
assert.Equal(t, 900, int(counters[1].BytesSent))
assert.Equal(t, 1000, int(counters[1].PacketsSent))
assert.Equal(t, 1100, int(counters[1].Errout))
assert.Equal(t, 1200, int(counters[1].Dropout))
assert.Equal(t, 1300, int(counters[1].Fifoout))
}
err = tmpfile.Close()
assert.Nil(t, err, "Temporary file closing failed: ", err)
}
func TestGetProcInodesAll(t *testing.T) {
if os.Getenv("CIRCLECI") == "true" {
t.Skip("Skip CI")
}
root := common.HostProc("")
v, err := getProcInodesAll(root, 0)
assert.Nil(t, err)
assert.NotEmpty(t, v)
}
func TestConnectionsMax(t *testing.T) {
if os.Getenv("CIRCLECI") == "true" {
t.Skip("Skip CI")
}
max := 10
v, err := ConnectionsMax("tcp", max)
assert.Nil(t, err)
assert.NotEmpty(t, v)
cxByPid := map[int32]int{}
for _, cx := range v {
if cx.Pid > 0 {
cxByPid[cx.Pid]++
}
}
for _, c := range cxByPid {
assert.True(t, c <= max)
}
}
type AddrTest struct {
IP string
Port int
Error bool
}
func TestDecodeAddress(t *testing.T) {
assert := assert.New(t)
addr := map[string]AddrTest{
"0500000A:0016": {
IP: "10.0.0.5",
Port: 22,
},
"0100007F:D1C2": {
IP: "127.0.0.1",
Port: 53698,
},
"11111:0035": {
Error: true,
},
"0100007F:BLAH": {
Error: true,
},
"0085002452100113070057A13F025401:0035": {
IP: "2400:8500:1301:1052:a157:7:154:23f",
Port: 53,
},
"00855210011307F025401:0035": {
Error: true,
},
}
for src, dst := range addr {
family := syscall.AF_INET
if len(src) > 13 {
family = syscall.AF_INET6
}
addr, err := decodeAddress(uint32(family), src)
if dst.Error {
assert.NotNil(err, src)
} else {
assert.Nil(err, src)
assert.Equal(dst.IP, addr.IP, src)
assert.Equal(dst.Port, int(addr.Port), src)
}
}
}
func TestReverse(t *testing.T) {
src := []byte{0x01, 0x02, 0x03}
assert.Equal(t, []byte{0x03, 0x02, 0x01}, Reverse(src))
}
func TestConntrackStatFileParsing(t *testing.T) {
tmpfile, err := ioutil.TempFile("", "proc_net_stat_conntrack")
defer os.Remove(tmpfile.Name())
assert.Nil(t, err, "Temporary file creation failed: ", err)
data := []byte(`
entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete search_restart
0000007b 00000000 00000000 00000000 000b115a 00000084 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 0000004a
0000007b 00000000 00000000 00000000 0007eee5 00000068 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000035
0000007b 00000000 00000000 00000000 0090346b 00000057 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000025
0000007b 00000000 00000000 00000000 0005920f 00000069 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000064
0000007b 00000000 00000000 00000000 000331ff 00000059 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 0000003b
0000007b 00000000 00000000 00000000 000314ea 00000066 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000054
0000007b 00000000 00000000 00000000 0002b270 00000055 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 0000003d
0000007b 00000000 00000000 00000000 0002f67d 00000057 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000042
`)
// Expected results
slist := NewConntrackStatList()
slist.Append(&ConntrackStat{
Entries: 123,
Searched: 0,
Found: 0,
New: 0,
Invalid: 725338,
Ignore: 132,
Delete: 0,
DeleteList: 0,
Insert: 0,
InsertFailed: 0,
Drop: 0,
EarlyDrop: 0,
IcmpError: 0,
ExpectNew: 0,
ExpectCreate: 0,
ExpectDelete: 0,
SearchRestart: 74,
})
slist.Append(&ConntrackStat{123, 0, 0, 0, 519909, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 53})
slist.Append(&ConntrackStat{123, 0, 0, 0, 9450603, 87, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 37})
slist.Append(&ConntrackStat{123, 0, 0, 0, 365071, 105, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 100})
slist.Append(&ConntrackStat{123, 0, 0, 0, 209407, 89, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 59})
slist.Append(&ConntrackStat{123, 0, 0, 0, 201962, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 84})
slist.Append(&ConntrackStat{123, 0, 0, 0, 176752, 85, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 61})
slist.Append(&ConntrackStat{123, 0, 0, 0, 194173, 87, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 66})
// Write data to tempfile
_, err = tmpfile.Write(data)
assert.Nil(t, err, "Temporary file writing failed: ", err)
// Function under test
stats, err := conntrackStatsFromFile(tmpfile.Name(), true)
assert.Equal(t, 8, len(stats), "Expected 8 results")
summary := &ConntrackStat{}
for i, exp := range slist.Items() {
st := stats[i]
assert.Equal(t, exp.Entries, st.Entries)
summary.Entries += st.Entries
assert.Equal(t, exp.Searched, st.Searched)
summary.Searched += st.Searched
assert.Equal(t, exp.Found, st.Found)
summary.Found += st.Found
assert.Equal(t, exp.New, st.New)
summary.New += st.New
assert.Equal(t, exp.Invalid, st.Invalid)
summary.Invalid += st.Invalid
assert.Equal(t, exp.Ignore, st.Ignore)
summary.Ignore += st.Ignore
assert.Equal(t, exp.Delete, st.Delete)
summary.Delete += st.Delete
assert.Equal(t, exp.DeleteList, st.DeleteList)
summary.DeleteList += st.DeleteList
assert.Equal(t, exp.Insert, st.Insert)
summary.Insert += st.Insert
assert.Equal(t, exp.InsertFailed, st.InsertFailed)
summary.InsertFailed += st.InsertFailed
assert.Equal(t, exp.Drop, st.Drop)
summary.Drop += st.Drop
assert.Equal(t, exp.EarlyDrop, st.EarlyDrop)
summary.EarlyDrop += st.EarlyDrop
assert.Equal(t, exp.IcmpError, st.IcmpError)
summary.IcmpError += st.IcmpError
assert.Equal(t, exp.ExpectNew, st.ExpectNew)
summary.ExpectNew += st.ExpectNew
assert.Equal(t, exp.ExpectCreate, st.ExpectCreate)
summary.ExpectCreate += st.ExpectCreate
assert.Equal(t, exp.ExpectDelete, st.ExpectDelete)
summary.ExpectDelete += st.ExpectDelete
assert.Equal(t, exp.SearchRestart, st.SearchRestart)
summary.SearchRestart += st.SearchRestart
}
// Test summary grouping
totals, err := conntrackStatsFromFile(tmpfile.Name(), false)
for i, st := range totals {
assert.Equal(t, summary.Entries, st.Entries)
assert.Equal(t, summary.Searched, st.Searched)
assert.Equal(t, summary.Found, st.Found)
assert.Equal(t, summary.New, st.New)
assert.Equal(t, summary.Invalid, st.Invalid)
assert.Equal(t, summary.Ignore, st.Ignore)
assert.Equal(t, summary.Delete, st.Delete)
assert.Equal(t, summary.DeleteList, st.DeleteList)
assert.Equal(t, summary.Insert, st.Insert)
assert.Equal(t, summary.InsertFailed, st.InsertFailed)
assert.Equal(t, summary.Drop, st.Drop)
assert.Equal(t, summary.EarlyDrop, st.EarlyDrop)
assert.Equal(t, summary.IcmpError, st.IcmpError)
assert.Equal(t, summary.ExpectNew, st.ExpectNew)
assert.Equal(t, summary.ExpectCreate, st.ExpectCreate)
assert.Equal(t, summary.ExpectDelete, st.ExpectDelete)
assert.Equal(t, summary.SearchRestart, st.SearchRestart)
assert.Equal(t, 0, i) // Should only have one element
}
}
| [
"\"CIRCLECI\"",
"\"CIRCLECI\""
]
| []
| [
"CIRCLECI"
]
| [] | ["CIRCLECI"] | go | 1 | 0 | |
python/pi_only/games_pi_only.py | # WS2812 LED Matrix Gamecontrol (Tetris, Snake, Pong)
# by M Oehler
# https://hackaday.io/project/11064-raspberry-pi-retro-gaming-led-display
# ported from
# Tetromino (a Tetris clone)
# By Al Sweigart [email protected]
# http://inventwithpython.com/pygame
# Released under a "Simplified BSD" license
import random, time, sys, os, pickle
from PIL import Image
# If Pi = False the script runs in simulation mode using pygame lib
PI = True
import pygame
from pygame.locals import *
if PI:
os.environ["SDL_VIDEODRIVER"] = "dummy" #dummy display for pygame joystick usage
import board
import neopixel
import subprocess
from luma.led_matrix.device import max7219
from luma.core.interface.serial import spi, noop
from luma.core.render import canvas
from luma.core.virtual import viewport
from luma.core.legacy import text, show_message
from luma.core.legacy.font import proportional, CP437_FONT, TINY_FONT, SINCLAIR_FONT, LCD_FONT
# only modify this two values for size adaption!
PIXEL_X=10
PIXEL_Y=20
SIZE= 20
FPS = 15
BOXSIZE = 20
WINDOWWIDTH = BOXSIZE * PIXEL_X
WINDOWHEIGHT = BOXSIZE * PIXEL_Y
BOARDWIDTH = PIXEL_X
BOARDHEIGHT = PIXEL_Y
BLANK = '.'
MOVESIDEWAYSFREQ = 0.15
MOVEDOWNFREQ = 0.15
FALLING_SPEED = 0.8
LED_BRIGHTNESS = 0.6
# R G B
WHITE = (255, 255, 255)
GRAY = (185, 185, 185)
BLACK = ( 0, 0, 0)
RED = (255, 0, 0)
LIGHTRED = (175, 20, 20)
GREEN = ( 0, 255, 0)
LIGHTGREEN = ( 20, 175, 20)
BLUE = ( 0, 0, 255)
LIGHTBLUE = ( 20, 20, 175)
YELLOW = (255, 255, 0)
LIGHTYELLOW = (175, 175, 20)
CYAN = ( 0, 255, 255)
MAGENTA = (255, 0, 255)
ORANGE = (255, 100, 0)
SCORES =(0,40,100,300,1200)
BORDERCOLOR = BLUE
BGCOLOR = BLACK
TEXTCOLOR = WHITE
TEXTSHADOWCOLOR = GRAY
COLORS = (BLUE,GREEN,RED,YELLOW,CYAN,MAGENTA,ORANGE)
LIGHTCOLORS = (LIGHTBLUE, LIGHTGREEN, LIGHTRED, LIGHTYELLOW)
#assert len(COLORS) == len(LIGHTCOLORS) # each color must have light color
TEMPLATEWIDTH = 5
TEMPLATEHEIGHT = 5
S_SHAPE_TEMPLATE = [['.....',
'.....',
'..OO.',
'.OO..',
'.....'],
['.....',
'..O..',
'..OO.',
'...O.',
'.....']]
Z_SHAPE_TEMPLATE = [['.....',
'.....',
'.OO..',
'..OO.',
'.....'],
['.....',
'..O..',
'.OO..',
'.O...',
'.....']]
I_SHAPE_TEMPLATE = [['..O..',
'..O..',
'..O..',
'..O..',
'.....'],
['.....',
'.....',
'OOOO.',
'.....',
'.....']]
O_SHAPE_TEMPLATE = [['.....',
'.....',
'.OO..',
'.OO..',
'.....']]
J_SHAPE_TEMPLATE = [['.....',
'.O...',
'.OOO.',
'.....',
'.....'],
['.....',
'..OO.',
'..O..',
'..O..',
'.....'],
['.....',
'.....',
'.OOO.',
'...O.',
'.....'],
['.....',
'..O..',
'..O..',
'.OO..',
'.....']]
L_SHAPE_TEMPLATE = [['.....',
'...O.',
'.OOO.',
'.....',
'.....'],
['.....',
'..O..',
'..O..',
'..OO.',
'.....'],
['.....',
'.....',
'.OOO.',
'.O...',
'.....'],
['.....',
'.OO..',
'..O..',
'..O..',
'.....']]
T_SHAPE_TEMPLATE = [['.....',
'..O..',
'.OOO.',
'.....',
'.....'],
['.....',
'..O..',
'..OO.',
'..O..',
'.....'],
['.....',
'.....',
'.OOO.',
'..O..',
'.....'],
['.....',
'..O..',
'.OO..',
'..O..',
'.....']]
PIECES = {'S': S_SHAPE_TEMPLATE,
'Z': Z_SHAPE_TEMPLATE,
'I': I_SHAPE_TEMPLATE,
'J': J_SHAPE_TEMPLATE,
'L': L_SHAPE_TEMPLATE,
'O': O_SHAPE_TEMPLATE,
'T': T_SHAPE_TEMPLATE}
PIECES_ORDER = {'S': 0,'Z': 1,'I': 2,'J': 3,'L': 4,'O': 5,'T': 6}
# snake constants #
UP = 'up'
DOWN = 'down'
LEFT = 'left'
RIGHT = 'right'
HEAD = 0 # syntactic sugar: index of the worm's head
# font clock #
clock_font = [
0x1F, 0x11, 0x1F,
0x00, 0x00, 0x1F,
0x1D, 0x15, 0x17,
0x15, 0x15, 0x1F,
0x07, 0x04, 0x1F,
0x17, 0x15, 0x1D,
0x1F, 0x15, 0x1D,
0x01, 0x01, 0x1F,
0x1F, 0x15, 0x1F,
0x17, 0x15, 0x1F]
theTetrisFont = [
0x78,0x78,0x1E,0x1E, #S
0x1E,0x1E,0x78,0x78, #Z
0x00,0xFF,0xFF,0x00, #I
0x06,0x06,0x7E,0x7E, #J
0x7E,0x7E,0x06,0x06, #L
0x3C,0x3C,0x3C,0x3C, #O
0x7E,0x7E,0x18,0x18, #T
]
if PI:
serial = spi(port=0, device=0, gpio=noop())
device = max7219(serial, cascaded=4, blocks_arranged_in_reverse_order=True)
pixel_pin = board.D18
# The number of NeoPixels
num_pixels = PIXEL_X*PIXEL_Y
ORDER = neopixel.GRB
pixels = neopixel.NeoPixel(pixel_pin, num_pixels, brightness=LED_BRIGHTNESS, auto_write=False,pixel_order=ORDER)
# key server for controller #
QKEYDOWN=0
QKEYUP=1
JKEY_X=3
JKEY_Y=4
JKEY_A=0
JKEY_B=1
JKEY_R=7
JKEY_L=6
JKEY_SEL=10
JKEY_START=11
mykeys = {
K_1: JKEY_A,
K_2: JKEY_B,
K_3: JKEY_Y,
K_4: JKEY_X,
K_x: JKEY_SEL,
K_s: JKEY_START
}
mask = bytearray([1,2,4,8,16,32,64,128])
# main #
def main():
global FPSCLOCK, DISPLAYSURF, BASICFONT, BIGFONT
global a1_counter ,RUNNING
a1_counter=0
RUNNING=True
joystick_detected=False
joystick_cnt=0
if not PI:
pygame.init()
FPSCLOCK = pygame.time.Clock()
DISPLAYSURF = pygame.display.set_mode((PIXEL_X*SIZE, PIXEL_Y*SIZE))
BASICFONT = pygame.font.Font('freesansbold.ttf', 18)
BIGFONT = pygame.font.Font('freesansbold.ttf', 100)
pygame.display.set_caption('Pi Games')
DISPLAYSURF.fill(BGCOLOR)
pygame.display.update()
drawImage('pi.bmp')
time.sleep(2)
else:
device.contrast(200)
pygame.init()
drawImage('/home/pi/pi.bmp')
pygame.joystick.init()
while joystick_detected==False:
show_message(device,"Waiting for controller...",fill="white", font=proportional(CP437_FONT), scroll_delay=0.01)
pygame.joystick.quit()
pygame.joystick.init()
try:
joystick = pygame.joystick.Joystick(0) # create a joystick instance
joystick.init() # init instance
# print("Initialized joystick: {}".format(joystick.get_name()))
joystick_detected = True
except pygame.error:
print("no joystick found.")
joystick_detected = False
clearScreen()
drawClock(1)
if PI:
show_message(device,"Let's play",fill="white", font=proportional(CP437_FONT))
while True:
clearScreen()
#drawSymbols()
if PI:
drawImage('/home/pi/select.bmp')
else:
drawImage('select.bmp')
updateScreen()
if not PI:
checkForQuit()
#check if joystick is still connected
if PI:
if joystick_cnt==50:
joystick_cnt=0
pygame.joystick.quit()
pygame.joystick.init()
try:
joystick = pygame.joystick.Joystick(0) # create a joystick instance
joystick.init() # init instance
# print("Initialized joystick: {}".format(joystick.get_name()))
joystick_detected = True
except pygame.error:
print("no joystick found.")
joystick_detected = False
else:
joystick_cnt+=1
pygame.event.pump()
for event in pygame.event.get():
# print("event detected {}".format(event))
if event.type == pygame.JOYBUTTONDOWN or event.type == KEYDOWN:
if event.type == pygame.JOYBUTTONDOWN:
myevent = event.button
else:
if event.key in mykeys:
myevent = mykeys[event.key]
else:
myevent = -1
if (myevent == JKEY_B):
drawClock(1)
if (myevent == JKEY_A):
runPongGame()
if (myevent == JKEY_X):
runTetrisGame()
if (myevent == JKEY_Y):
runSnakeGame()
if (myevent == JKEY_START):
shutdownScreen()
if event.type == pygame.QUIT: # get all the QUIT events
terminate() # terminate if any QUIT events are present
time.sleep(.1)
terminate()
# gaming main routines #
def runPongGame():
down = 0
up = 1
left = 0
right = 1
lowerbarx = PIXEL_X//2
upperbarx = PIXEL_X//2
score1 = 0
score2 = 0
ballx = PIXEL_X//2
bally = PIXEL_Y//2
directiony = down
directionx = left
movingRightUpper = False
movingLeftUpper = False
movingRightLower = False
movingLeftLower = False
restart=False
lastLowerMoveSidewaysTime = time.time()
lastUpperMoveSidewaysTime = time.time()
while True: # main game loop
pygame.event.pump()
for event in pygame.event.get():
if event.type == pygame.JOYAXISMOTION:
axis = event.axis
val = round(event.value)
if (axis == 0 and val == -1):
movingLeftLower = True
movingRightLower = False
if (axis == 0 and val == 1):
movingLeftLower = False
movingRightLower = True
if (val == 0):
movingLeftLower = False
movingRightLower = False
if event.type == pygame.JOYBUTTONDOWN:
# print("Joystick button pressed: {}".format(event.button))
if (event.button == JKEY_A):
movingLeftUpper = True
movingRightUpper = False
if (event.button == JKEY_B):
movingLeftUpper = False
movingRightUpper = True
if (event.button == JKEY_SEL):
# quit game
return
if event.type == pygame.JOYBUTTONUP:
movingLeftUpper = False
movingRightUpper = False
if event.type == pygame.KEYDOWN:
if(event.key==K_LEFT):
movingLeftLower = True
movingRightLower = False
if(event.key==K_RIGHT):
movingLeftLower = False
movingRightLower = True
if(event.key==K_1):
movingLeftUpper = True
movingRightUpper = False
if(event.key==K_2):
movingLeftUpper = False
movingRightUpper = True
if(event.key==K_s):
return
if event.type == pygame.KEYUP:
movingLeftLower = False
movingRightLower = False
movingLeftUpper = False
movingRightUpper = False
if (movingLeftLower) and time.time() - lastLowerMoveSidewaysTime > MOVESIDEWAYSFREQ:
if lowerbarx >1:
lowerbarx-=1;
lastLowerMoveSidewaysTime = time.time()
if (movingRightLower) and time.time() - lastLowerMoveSidewaysTime > MOVESIDEWAYSFREQ:
if lowerbarx <PIXEL_X-2:
lowerbarx+=1;
lastLowerMoveSidewaysTime = time.time()
if (movingLeftUpper) and time.time() - lastUpperMoveSidewaysTime > MOVESIDEWAYSFREQ:
if upperbarx >1:
upperbarx-=1;
lastUpperMoveSidewaysTime = time.time()
if (movingRightUpper) and time.time() - lastUpperMoveSidewaysTime > MOVESIDEWAYSFREQ:
if upperbarx <PIXEL_X-2:
upperbarx+=1;
lastUpperMoveSidewaysTime = time.time()
if not PI:
checkForQuit()
if (directiony == up):
if (bally>1):
bally-=1
else:
if (abs(ballx-upperbarx)<2):
directiony = down
if (ballx==upperbarx+1):
if (directionx==left):
directionx=right
if (ballx==upperbarx-1):
if (directionx==right):
directionx=left
elif ((ballx-upperbarx==2) and (directionx==left)):
directionx=right
directiony = down
elif ((ballx-upperbarx==-2) and (directionx==right)):
directionx=left
directiony = down
else:
bally-=1
score1+=1
restart = True
else:
if (bally<PIXEL_Y-2):
bally+=1
else:
if (abs(ballx-lowerbarx)<2):
directiony = up
if (ballx==lowerbarx+1):
if (directionx==left):
directionx=right
if (ballx==lowerbarx-1):
if (directionx==right):
directionx=left
elif ((ballx-lowerbarx==2) and (directionx==left)):
directionx=right
directiony = up
elif ((ballx-lowerbarx==-2) and (directionx==right)):
directionx=left
directiony = up
else:
bally+=1
score2+=1
restart = True
if (directionx == left):
if (ballx>0):
if (ballx==1):
ballx-=1
else:
ballx-=random.randint(1,2)
else:
directionx = right
ballx+=1
if(directiony == up):
if(bally>2):
bally-=1
if(directiony == down):
if(bally<PIXEL_Y-2):
bally+=1
else:
if (ballx<PIXEL_X-1):
if (ballx==8):
ballx+=1
else:
ballx+=random.randint(1,2)
else:
directionx = left
ballx-=random.randint(1,2)
if(directiony == up):
if(bally>3):
bally-=random.randint(0,2)
if(directiony == down):
if(bally<PIXEL_Y-3):
bally+=random.randint(0,2)
clearScreen()
drawBall(ballx,bally)
drawBar(upperbarx,0)
drawBar(lowerbarx,PIXEL_Y-1)
twoscoreText(score1,score2)
updateScreen()
if (score1 == 9) or (score2 == 9):
time.sleep(3)
return
if restart:
time.sleep(1)
ballx=PIXEL_X//2
bally=PIXEL_Y//2
if directiony==down:
directiony = up
else:
directiony = down
restart=False
else:
time.sleep(.1)
def runSnakeGame():
# Set a random start point.
startx = random.randint(2, BOARDWIDTH-2 )
starty = random.randint(2, BOARDHEIGHT -2 )
wormCoords = [{'x': startx, 'y': starty},
{'x': startx - 1, 'y': starty},
{'x': startx - 2, 'y': starty}]
direction = RIGHT
score = 0
if os.path.isfile('/home/pi/hs_snake.p')==True:
try:
highscore = pickle.load(open("/home/pi/hs_snake.p","rb"))
except EOFError:
highscore = 0
else:
highscore=0
if PI:
show_message(device,"Snake Highscore: " + str(highscore),fill="white", font=proportional(CP437_FONT), scroll_delay=0.01)
# Start the apple in a random place.
apple = getRandomLocation(wormCoords)
while True: # main game loop
olddirection = direction
pygame.event.pump()
for event in pygame.event.get():
if event.type == pygame.JOYAXISMOTION:
if (olddirection== direction): #only one direction change per step
axis = event.axis
val = round(event.value)
if (axis == 0 and val == -1):
if direction != RIGHT:
direction = LEFT
if (axis == 0 and val == 1):
if direction != LEFT:
direction = RIGHT
if (axis == 1 and val == 1):
if direction != UP:
direction = DOWN
if (axis == 1 and val == -1):
if direction != DOWN:
direction = UP
if event.type == pygame.KEYDOWN:
if (event.key==K_LEFT):
if direction != RIGHT:
direction = LEFT
if (event.key==K_RIGHT):
if direction != LEFT:
direction = RIGHT
if (event.key==K_DOWN):
if direction != UP:
direction = DOWN
if (event.key==K_UP):
if direction != DOWN:
direction = UP
if (event.key == JKEY_SEL):
#quit game
return
if event.type == pygame.JOYBUTTONDOWN:
if (event.button==JKEY_SEL):
# quit game
return
# check if the worm has hit itself or the edge
if wormCoords[HEAD]['x'] == -1 or wormCoords[HEAD]['x'] == BOARDWIDTH or wormCoords[HEAD]['y'] == -1 or wormCoords[HEAD]['y'] == BOARDHEIGHT:
time.sleep(1.5)
if score > highscore:
highscore = score
if PI:
pickle.dump(highscore, open("/home/pi/hs_snake.p", "wb"))
show_message(device,"New Highscore !!!",fill="white", font=proportional(CP437_FONT), scroll_delay=0.01)
return # game over
for wormBody in wormCoords[1:]:
if wormBody['x'] == wormCoords[HEAD]['x'] and wormBody['y'] == wormCoords[HEAD]['y']:
time.sleep(1.5)
if score > highscore:
highscore = score
if PI:
pickle.dump(highscore, open("/home/pi/hs_snake.p", "wb"))
show_message(device,"New Highscore !!!",fill="white", font=proportional(CP437_FONT), scroll_delay=0.01)
return # game over
# check if worm has eaten an apple
if wormCoords[HEAD]['x'] == apple['x'] and wormCoords[HEAD]['y'] == apple['y']:
# don't remove worm's tail segment
score += 1
apple = getRandomLocation(wormCoords) # set a new apple somewhere
else:
del wormCoords[-1] # remove worm's tail segment
# move the worm by adding a segment in the direction it is moving
if direction == UP:
if wormCoords[HEAD]['y'] == 0 :
newHead = {'x': wormCoords[HEAD]['x'], 'y': BOARDHEIGHT-1}
else:
newHead = {'x': wormCoords[HEAD]['x'], 'y': wormCoords[HEAD]['y'] - 1}
elif direction == DOWN:
if wormCoords[HEAD]['y'] == BOARDHEIGHT-1 :
newHead = {'x': wormCoords[HEAD]['x'], 'y': 0}
else:
newHead = {'x': wormCoords[HEAD]['x'], 'y': wormCoords[HEAD]['y'] + 1}
elif direction == LEFT:
if wormCoords[HEAD]['x'] == 0 :
newHead = {'x': BOARDWIDTH -1, 'y': wormCoords[HEAD]['y'] }
else:
newHead = {'x': wormCoords[HEAD]['x'] - 1, 'y': wormCoords[HEAD]['y']}
elif direction == RIGHT:
if wormCoords[HEAD]['x'] == BOARDWIDTH-1:
newHead = {'x': 0, 'y': wormCoords[HEAD]['y']}
else:
newHead = {'x': wormCoords[HEAD]['x'] + 1, 'y': wormCoords[HEAD]['y']}
if not PI:
checkForQuit()
wormCoords.insert(0, newHead)
clearScreen()
drawWorm(wormCoords)
drawApple(apple)
scoreText(score)
updateScreen()
time.sleep(.15)
def runTetrisGame():
# setup varia
# bles for the start of the game
#if PI:
#device.contrast(255)
#device.show()
board = getBlankBoard()
lastMoveDownTime = time.time()
lastMoveSidewaysTime = time.time()
lastFallTime = time.time()
movingDown = False # note: there is no movingUp variable
movingLeft = False
movingRight = False
score = 0
oldscore = -1
oldpiece = 10
lines = 0
level, fallFreq = calculateLevelAndFallFreq(lines)
if os.path.isfile('/home/pi/hs_tetris.p')==True:
try:
highscore = pickle.load(open("/home/pi/hs_tetris.p","rb"))
except EOFError:
highscore = 0
else:
highscore=0
if PI:
show_message(device,"Tetris Highscore: " + str(highscore),fill="white", font=proportional(CP437_FONT), scroll_delay=0.01)
fallingPiece = getNewPiece()
nextPiece = getNewPiece()
while True: # game loop
if fallingPiece == None:
# No falling piece in play, so start a new piece at the top
fallingPiece = nextPiece
nextPiece = getNewPiece()
lastFallTime = time.time() # reset lastFallTime
if not isValidPosition(board, fallingPiece):
time.sleep(2)
if score > highscore:
highscore = score
if PI:
pickle.dump(highscore, open("/home/pi/hs_tetris.p", "wb"))
show_message(device,"New Highscore !!!",fill="white", font=proportional(CP437_FONT), scroll_delay=0.01)
return # can't fit a new piece on the board, so game over
if not PI:
checkForQuit()
pygame.event.pump()
for event in pygame.event.get():
# print("event detected {}".format(event))
if event.type == pygame.JOYAXISMOTION:
axis = event.axis
val = round(event.value)
if (axis == 0 and val == 0):
# no motion or down motion
movingLeft = movingRight = False
if (axis == 1 and val == 0) :
movingDown = False
if (axis==0 and val== -1) and isValidPosition(board, fallingPiece, adjX=-1):
fallingPiece['x'] -= 1
movingLeft = True
movingRight = False
lastMoveSidewaysTime = time.time()
if (axis == 0 and val== 1) and isValidPosition(board, fallingPiece, adjX=1):
fallingPiece['x'] += 1
movingLeft = False
movingRight = True
lastMoveSidewaysTime = time.time()
if (axis==1 and val == 1):
movingDown = True
if isValidPosition(board, fallingPiece, adjY=1):
fallingPiece['y'] += 1
lastMoveDownTime = time.time()
if (axis==1 and val == -1):
fallingPiece['rotation'] = (fallingPiece['rotation'] + 1) % len(PIECES[fallingPiece['shape']])
if not isValidPosition(board, fallingPiece):
fallingPiece['rotation'] = (fallingPiece['rotation'] - 1) % len(PIECES[fallingPiece['shape']])
if event.type == pygame.KEYDOWN:
if (event.key==K_LEFT) and isValidPosition(board, fallingPiece, adjX=-1):
fallingPiece['x'] -= 1
movingLeft = True
movingRight = False
lastMoveSidewaysTime = time.time()
if (event.key==K_RIGHT) and isValidPosition(board, fallingPiece, adjX=1):
fallingPiece['x'] += 1
movingLeft = False
movingRight = True
lastMoveSidewaysTime = time.time()
if (event.key==K_DOWN):
movingDown = True
if isValidPosition(board, fallingPiece, adjY=1):
fallingPiece['y'] += 1
lastMoveDownTime = time.time()
if (event.key==K_UP):
fallingPiece['rotation'] = (fallingPiece['rotation'] + 1) % len(PIECES[fallingPiece['shape']])
if not isValidPosition(board, fallingPiece):
fallingPiece['rotation'] = (fallingPiece['rotation'] - 1) % len(PIECES[fallingPiece['shape']])
if (event.key == K_3):
fallingPiece['rotation'] = (fallingPiece['rotation'] -1) % len(PIECES[fallingPiece['shape']])
if not isValidPosition(board, fallingPiece):
fallingPiece['rotation'] = (fallingPiece['rotation'] + 1) % len(PIECES[fallingPiece['shape']])
if (event.key == K_4):
movingDown = False
movingLeft = False
movingRight = False
for i in range(1, BOARDHEIGHT):
if not isValidPosition(board, fallingPiece, adjY=i):
break
score+=i #TODO: more digits on numbercounter, more scores
fallingPiece['y'] += i - 1
if event.type == pygame.KEYUP:
movingDown = False
movingLeft = False
movingRight = False
if event.type == pygame.JOYBUTTONDOWN:
# print("Joystick button pressed: {}".format(event.button))
if (event.button == JKEY_A):
fallingPiece['rotation'] = (fallingPiece['rotation'] -1) % len(PIECES[fallingPiece['shape']])
if not isValidPosition(board, fallingPiece):
fallingPiece['rotation'] = (fallingPiece['rotation'] + 1) % len(PIECES[fallingPiece['shape']])
if (event.button == JKEY_Y):
movingDown = False
movingLeft = False
movingRight = False
for i in range(1, BOARDHEIGHT):
if not isValidPosition(board, fallingPiece, adjY=i):
break
score+=i #TODO: more digits on numbercounter, more scores
fallingPiece['y'] += i - 1
# return
# handle moving the piece because of user input
if (movingLeft or movingRight) and time.time() - lastMoveSidewaysTime > MOVESIDEWAYSFREQ:
if movingLeft and isValidPosition(board, fallingPiece, adjX=-1):
fallingPiece['x'] -= 1
elif movingRight and isValidPosition(board, fallingPiece, adjX=1):
fallingPiece['x'] += 1
lastMoveSidewaysTime = time.time()
if movingDown and time.time() - lastMoveDownTime > MOVEDOWNFREQ and isValidPosition(board, fallingPiece, adjY=1):
fallingPiece['y'] += 1
lastMoveDownTime = time.time()
# let the piece fall if it is time to fall
if time.time() - lastFallTime > fallFreq:
# see if the piece has landed
if not isValidPosition(board, fallingPiece, adjY=1):
# falling piece has landed, set it on the board
addToBoard(board, fallingPiece)
remLine = removeCompleteLines(board)
# count lines for level calculation
lines += remLine
# more lines, more points per line
score += SCORES[remLine]*level
level, fallFreq = calculateLevelAndFallFreq(lines)
fallingPiece = None
else:
# piece did not land, just move the piece down
fallingPiece['y'] += 1
lastFallTime = time.time()
# drawing everything on the screen
clearScreen()
drawBoard(board)
#scoreText(score)
if score>oldscore:
scoreTetris(score,level,PIECES_ORDER.get(nextPiece['shape']))
oldscore = score
if oldpiece!=PIECES_ORDER.get(nextPiece['shape']):
scoreTetris(score,level,PIECES_ORDER.get(nextPiece['shape']))
oldpiece=PIECES_ORDER.get(nextPiece['shape'])
#drawStatus(score, level)
#drawNextPiece(nextPiece)
if fallingPiece != None:
drawPiece(fallingPiece)
updateScreen()
#FPSCLOCK.tick(FPS)
time.sleep(.05)
def drawClock(color):
joystick_cnt=0
if PI:
device.clear();
device.show();
hour = time.localtime().tm_hour
minute= time.localtime().tm_min
second= time.localtime().tm_sec
while True:
pygame.event.pump()
for event in pygame.event.get(): # User did something
# print("event detected {}".format(event))
# Possible joystick actions: JOYAXISMOTION JOYBALLMOTION JOYBUTTONDOWN JOYBUTTONUP JOYHATMOTION
if event.type == pygame.JOYBUTTONDOWN or event.type == KEYDOWN:
if event.type == pygame.JOYBUTTONDOWN:
myevent = event.button
else:
if event.key in mykeys:
myevent = mykeys[event.key]
else:
myevent = -1
# print("Joystick button pressed: {}".format(event.button))
if (myevent==JKEY_X):
# print("exiting clock")
clearScreen()
updateScreen()
return
if (myevent == JKEY_A):
color = color + 1
if (color > (len(COLORS) - 1)):
color = 0
if event.type == pygame.QUIT: # get all the QUIT events
terminate() # terminate if any QUIT events are present
#check if joystick is still connected
if PI:
if joystick_cnt==25:
joystick_cnt=0
pygame.joystick.quit()
pygame.joystick.init()
try:
joystick = pygame.joystick.Joystick(0) # create a joystick instance
joystick.init() # init instance
# print("Initialized joystick: {}".format(joystick.get_name()))
#joystick_detected = True
except pygame.error:
print("no joystick found.")
#joystick_detected = False
else:
joystick_cnt+=1
ltime = time.localtime()
hour = ltime.tm_hour
minute= ltime.tm_min
second= ltime.tm_sec
clearScreen()
drawnumber(int(hour/10),2,1,color)
drawnumber(int(hour%10),6,1,color)
drawnumber(int(minute/10),2,8,color)
drawnumber(int(minute%10),6,8,color)
drawnumber(int(second/10),2,15,color)
drawnumber(int(second%10),6,15,color)
updateScreen()
time.sleep(.2)
def shutdownScreen():
if PI:
device.clear();
device.show();
drawImage('/home/pi/shutdown.bmp')
show_message(device,"Press Select to shutdown!",fill="white", font=proportional(CP437_FONT), scroll_delay=0.01)
else:
drawImage('shutdown.bmp')
while True:
pygame.event.pump()
for event in pygame.event.get(): # User did something
# print("event detected {}".format(event))
# Possible joystick actions: JOYAXISMOTION JOYBALLMOTION JOYBUTTONDOWN JOYBUTTONUP JOYHATMOTION
if event.type == pygame.JOYBUTTONDOWN or event.type == KEYDOWN:
if event.type == pygame.JOYBUTTONDOWN:
myevent = event.button
else:
if event.key in mykeys:
myevent = mykeys[event.key]
else:
myevent = -1
# print("Joystick button pressed: {}".format(event.button))
if (myevent!=JKEY_SEL):
# print("exiting clock")
clearScreen()
updateScreen()
return
else:
if not PI:
terminate()
else:
clearScreen()
updateScreen()
show_message(device,"Shutdown...",fill="white", font=proportional(CP437_FONT), scroll_delay=0.01)
subprocess.Popen(['shutdown','-h','now'])
#call("sudo nohup shutdown -h now", shell=True)
terminate()
if event.type == pygame.QUIT: # get all the QUIT events
terminate() # terminate if any QUIT events are present
updateScreen()
time.sleep(.2)
def drawImage(filename):
im = Image.open(filename)
for row in range(0,BOARDHEIGHT):
for col in range(0,BOARDWIDTH):
r,g,b = im.getpixel((col,row))
drawPixelRgb(col,row,r,g,b)
updateScreen()
def drawHalfImage(filename,offset):
im = Image.open(filename)
if offset>10:
offset = 10
for row in range(0,10):
for col in range(0,10):
r,g,b = im.getpixel((col,row))
drawPixelRgb(col,row+offset,r,g,b)
# drawing #
def clearScreen():
if PI:
pixels.fill((0,0,0))
else:
DISPLAYSURF.fill(BGCOLOR)
def updateScreen():
if PI:
pixels.show()
else:
pygame.display.update()
def drawPixel(x,y,color):
if color == BLANK:
return
if PI:
try:
if (x>=0 and y>=0 and color >=0):
if x%2==1:
pixels[x*PIXEL_Y+y] = COLORS[color]
else:
pixels[x*PIXEL_Y+(PIXEL_Y-1-y)] = COLORS[color]
except:
print(str(x) + ' --- ' + str(y))
else:
pygame.draw.rect(DISPLAYSURF, COLORS[color], (x*SIZE+1, y*SIZE+1, SIZE-2, SIZE-2))
def drawPixelRgb(x,y,r,g,b):
if PI:
if (x>=0 and y>=0):
if x%2==1:
pixels[x*PIXEL_Y+y] = (r,g,b)
else:
pixels[x*PIXEL_Y+(PIXEL_Y-1-y)] = (r,g,b)
else:
pygame.draw.rect(DISPLAYSURF, (r,g,b), (x*SIZE+1, y*SIZE+1, SIZE-2, SIZE-2))
def drawnumber(number,offsetx,offsety,color):
for x in range(0,3):
for y in range(0,5):
if clock_font[3*number + x]&mask[y]:
drawPixel(offsetx+x,offsety+y,color)
def drawnumberMAX7219(number,offsetx,offsety,draw1):
for x in range(0,3):
for y in range(0,5):
if clock_font[3*number+2- x]&mask[y]:
drawScorePixel(offsetx+x,offsety+y,1,draw1)
elif clock_font[3*number+2- x]&mask[y]:
drawScorePixel(offsetx+x,offsety+y,0,draw1)
def drawTetrisMAX7219(piece,offsetx,offsety,draw1):
for x in range(0,4):
for y in range(0,8):
if theTetrisFont[4*piece + x]&mask[y]:
drawScorePixel(offsetx+x,offsety+y,1,draw1)
elif theTetrisFont[4*piece + x]&mask[y]:
drawScorePixel(offsetx+x,offsety+y,0,draw1)
def drawScorePixel(x,y,on,draw):
if PI:
draw.point((31-x,y), fill= "white")
#time.sleep(.01)
else:
pygame.draw.rect(DISPLAYSURF, COLORS[2], (64-2*x, 410+2*y,2,2))
def makeTextObjs(text, font, color):
surf = font.render(text, True, color)
return surf, surf.get_rect()
def scrollText(text):
if PI:
show_message(device,text,fill="white", font=proportional(CP437_FONT))
else:
titleSurf, titleRect = makeTextObjs(str(text), BASICFONT, TEXTCOLOR)
titleRect.center = (int(WINDOWWIDTH / 2) - 3, int(WINDOWHEIGHT / 2) - 3)
DISPLAYSURF.blit(titleSurf, titleRect)
def scoreText(score):
_score=score
if _score>999:
_score = 999
if PI:
with canvas(device) as draw:
for i in range(0,3):
text(draw, ((3-i)*8, 0), str(_score%10), fill="white")
_score //=10
else:
titleSurf, titleRect = makeTextObjs(str(_score), BASICFONT, TEXTCOLOR)
titleRect.center = (int(WINDOWWIDTH / 2) - 3, int(WINDOWHEIGHT / 2) - 3)
DISPLAYSURF.blit(titleSurf, titleRect)
def scoreTetris(score,level,nextpiece):
#if PI:
#device.clear()
_score=score
if _score>999999:
_score = 999999
if PI:
# one point per level
with canvas(device) as draw1:
for i in range(0,level):
drawScorePixel(i*2,7,1,draw1)
# score as 6 digit value
for i in range(0,6):
drawnumberMAX7219(_score%10,i*4,0,draw1)
_score //=10
# draw next piece
drawTetrisMAX7219(nextpiece,27,0,draw1)
if PI:
device.show()
def twoscoreText(score1,score2):
_score1=score1
_score2=score2
if _score1>9:
_score1 = 9
if _score2>9:
_score2 = 9
if PI:
with canvas(device) as draw:
text(draw, (0, 0), str(_score1), fill="white")
text(draw, (8, 0), ":", fill="white")
text(draw, (16, 0), str(_score2), fill="white")
text(draw, (24, 0), " ", fill="white")
else:
titleSurf, titleRect = makeTextObjs(str(_score1)+':'+str(_score2), BASICFONT, TEXTCOLOR)
titleRect.center = (int(WINDOWWIDTH / 2) - 3, int(WINDOWHEIGHT / 2) - 3)
DISPLAYSURF.blit(titleSurf, titleRect)
# program flow #
def terminate():
RUNNING = False
pygame.quit()
exit()
def checkForQuit():
for event in pygame.event.get(QUIT): # get all the QUIT events
terminate() # terminate if any QUIT events are present
for event in pygame.event.get(KEYUP): # get all the KEYUP events
if event.key == K_ESCAPE:
terminate() # terminate if the KEYUP event was for the Esc key
pygame.event.post(event) # put the other KEYUP event objects back
# tetris subroutines #
def calculateLevelAndFallFreq(lines):
# Based on the score, return the level the player is on and
# how many seconds pass until a falling piece falls one space.
level = int(lines / 10) + 1
# limit level to 10
if level >10:
level = 10
fallFreq = FALLING_SPEED - (level * 0.05)
if fallFreq <= 0.05:
fallFreq = 0.05
return level, fallFreq
def getNewPiece():
# return a random new piece in a random rotation and color
shape = random.choice(list(PIECES.keys()))
newPiece = {'shape': shape,
'rotation': random.randint(0, len(PIECES[shape]) - 1),
'x': int(BOARDWIDTH / 2) - int(TEMPLATEWIDTH / 2),
'y': -2, # start it above the board (i.e. less than 0)
'color': PIECES_ORDER.get(shape)}
return newPiece
def addToBoard(board, piece):
# fill in the board based on piece's location, shape, and rotation
for x in range(TEMPLATEWIDTH):
for y in range(TEMPLATEHEIGHT):
if PIECES[piece['shape']][piece['rotation']][y][x] != BLANK:
board[x + piece['x']][y + piece['y']] = piece['color']
def isOnBoard(x, y):
return x >= 0 and x < BOARDWIDTH and y < BOARDHEIGHT
def isValidPosition(board, piece, adjX=0, adjY=0):
# Return True if the piece is within the board and not colliding
for x in range(TEMPLATEWIDTH):
for y in range(TEMPLATEHEIGHT):
isAboveBoard = y + piece['y'] + adjY < 0
if isAboveBoard or PIECES[piece['shape']][piece['rotation']][y][x] == BLANK:
continue
if not isOnBoard(x + piece['x'] + adjX, y + piece['y'] + adjY):
return False
if board[x + piece['x'] + adjX][y + piece['y'] + adjY] != BLANK:
return False
return True
def isCompleteLine(board, y):
# Return True if the line filled with boxes with no gaps.
for x in range(BOARDWIDTH):
if board[x][y] == BLANK:
return False
return True
def removeCompleteLines(board):
# Remove any completed lines on the board, move everything above them down, and return the number of complete lines.
numLinesRemoved = 0
y = BOARDHEIGHT - 1 # start y at the bottom of the board
while y >= 0:
if isCompleteLine(board, y):
# Remove the line and pull boxes down by one line.
for pullDownY in range(y, 0, -1):
for x in range(BOARDWIDTH):
board[x][pullDownY] = board[x][pullDownY-1]
# Set very top line to blank.
for x in range(BOARDWIDTH):
board[x][0] = BLANK
numLinesRemoved += 1
# Note on the next iteration of the loop, y is the same.
# This is so that if the line that was pulled down is also
# complete, it will be removed.
else:
y -= 1 # move on to check next row up
return numLinesRemoved
def drawBoard(matrix):
for i in range(0,BOARDWIDTH):
for j in range(0,BOARDHEIGHT):
drawPixel(i,j,matrix[i][j])
def getBlankBoard():
# create and return a new blank board data structure
board = []
for i in range(BOARDWIDTH):
board.append([BLANK] * BOARDHEIGHT)
return board
def drawPiece(piece, pixelx=None, pixely=None):
shapeToDraw = PIECES[piece['shape']][piece['rotation']]
if pixelx == None and pixely == None:
# if pixelx & pixely hasn't been specified, use the location stored in the piece data structure
pixelx=piece['x']
pixely=piece['y']
# draw each of the boxes that make up the piece
for x in range(TEMPLATEWIDTH):
for y in range(TEMPLATEHEIGHT):
if shapeToDraw[y][x] != BLANK:
drawPixel( pixelx+ x , pixely+y,piece['color'])
# snake subroutines #
def getRandomLocation(wormCoords):
while True:
x = random.randint(0, BOARDWIDTH - 1)
y = random.randint(0, BOARDHEIGHT - 1)
if {'x': x, 'y': y} in wormCoords:
print('no apples on worm')
else:
break
return {'x': x, 'y': y}
def drawWorm(wormCoords):
for coord in wormCoords:
x = coord['x']
y = coord['y']
drawPixel(x,y,1)
def drawApple(coord):
x = coord['x']
y = coord['y']
drawPixel(x,y,2)
# pong subroutines #
def drawBar(x,y):
drawPixel(x-1,y,1)
drawPixel(x,y,1)
drawPixel(x+1,y,1)
def drawBall(x,y):
drawPixel(x,y,0)
if __name__ == '__main__':
main()
| []
| []
| [
"SDL_VIDEODRIVER"
]
| [] | ["SDL_VIDEODRIVER"] | python | 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.