filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
connect/mongodb.go
|
package connect
import (
"context"
"log"
"os"
"time"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/options"
)
var (
client *mongo.Client
)
// Mongo returns a context.CancelFunc and connects to the Mongo Database
func Mongo() context.CancelFunc {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
var err error
client, err = mongo.NewClient(options.Client().ApplyURI(os.Getenv("MONGO_URL")))
if err != nil {
log.Fatalln("Unable to create mongo client!, Error:", err.Error())
return nil
}
err = client.Connect(ctx)
if err != nil {
log.Fatalln("Unable to Connect to mongo!, Error:", err.Error())
return nil
}
log.Println("Database Successfully Connected")
return cancel
}
// Collection returns a Collection type to the specific database
func Collection(db, col string) *mongo.Collection {
return client.Database(db).Collection(col)
}
|
[
"\"MONGO_URL\""
] |
[] |
[
"MONGO_URL"
] |
[]
|
["MONGO_URL"]
|
go
| 1 | 0 | |
test/init_test.go
|
package test
import (
"net/http"
"os"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/chyroc/lark"
)
type App struct {
AppID string
AppSecret string
CustomURL string
CustomSecret string
}
func (r *App) Ins() *lark.Lark {
if IsInCI() {
return lark.New(
lark.WithAppCredential(r.AppID, r.AppSecret),
lark.WithTimeout(time.Second*20),
)
}
return lark.New(
lark.WithAppCredential(r.AppID, r.AppSecret),
lark.WithTimeout(time.Second*20),
lark.WithLogger(lark.NewLoggerStdout(), lark.LogLevelTrace),
)
}
func (r *App) CustomBot() *lark.Lark {
return lark.New(
lark.WithCustomBot(r.CustomURL, r.CustomSecret),
lark.WithTimeout(time.Second*20),
// lark.WithLogger(lark.NewLoggerStdout(), lark.LogLevelDebug),
)
}
type Helpdesk struct {
AppID string
AppSecret string
ID string
Token string
}
func (r *Helpdesk) Ins() *lark.Lark {
return lark.New(
lark.WithAppCredential(r.AppID, r.AppSecret),
lark.WithHelpdeskCredential(r.ID, r.Token),
lark.WithTimeout(time.Second*20),
)
}
var HelpdeskAllPermission = Helpdesk{
AppID: os.Getenv("LARK_APP_ALL_PERMISSION_APP_ID"),
AppSecret: os.Getenv("LARK_APP_ALL_PERMISSION_APP_SECRET"),
ID: os.Getenv("LARK_HELPDESK_ALL_PERMISSION_ID"),
Token: os.Getenv("LARK_HELPDESK_ALL_PERMISSION_TOKEN"),
}
type User struct {
UserID string
OpenID string
Name string
AccessToken map[string]string
RefreshToken map[string]string
}
func (r User) OneAccessToken() string {
for _, v := range r.AccessToken {
return v
}
return ""
}
type Chat struct {
ChatID string
Name string
}
var AppNoPermission = App{
AppID: os.Getenv("LARK_APP_NO_PERMISSION_APP_ID"),
AppSecret: os.Getenv("LARK_APP_NO_PERMISSION_APP_SECRET"),
}
var AppAllPermission = App{
AppID: os.Getenv("LARK_APP_ALL_PERMISSION_APP_ID"),
AppSecret: os.Getenv("LARK_APP_ALL_PERMISSION_APP_SECRET"),
}
var AppCustomBotNoValid = App{
CustomURL: os.Getenv("LARK_APP_CUSTOM_BOT_NO_VALID_WEBHOOK_URL"),
}
var AppCustomBotCheckCanSendWord = App{
CustomURL: os.Getenv("LARK_APP_CUSTOM_BOT_CHECK_CAN_SEND_WEBHOOK_URL"),
}
var AppCustomBotCheckSign = App{
CustomURL: os.Getenv("LARK_APP_CUSTOM_BOT_CHECK_SIGN_WEBHOOK_URL"),
CustomSecret: os.Getenv("LARK_APP_CUSTOM_BOT_CHECK_SIGN_SIGN"),
}
var UserAdmin = User{
UserID: os.Getenv("LARK_USER_ADMIN_USER_ID"),
OpenID: os.Getenv("LARK_USER_ADMIN_OPEN_ID"),
Name: os.Getenv("LARK_USER_ADMIN_NAME"),
AccessToken: map[string]string{
os.Getenv("LARK_APP_ALL_PERMISSION_APP_ID"): os.Getenv("LARK_ACCESS_TOKEN_ALL_PERMISSION_APP"),
},
RefreshToken: map[string]string{
os.Getenv("LARK_APP_ALL_PERMISSION_APP_ID"): os.Getenv("LARK_REFRESH_TOKEN_ALL_PERMISSION_APP"),
},
}
// 这个群公共,必须设置「群公共」三个字
var ChatContainALLPermissionApp = Chat{
ChatID: os.Getenv("LARK_CHAT_CONTAINS_APP_PERMISSION_APP_CHAT_ID"),
Name: "包含「lark-sdk」的群",
}
var ChatNotContainALLPermissionApp = Chat{
ChatID: os.Getenv("LARK_CHAT_NOT_CONTAINS_APP_PERMISSION_APP_CHAT_ID"),
Name: "不包含「lark-sdk」的群",
}
var ChatForSendMessage = Chat{
ChatID: os.Getenv("LARK_CHAT_FOR_SEND_MSG_CHAT_ID"),
Name: "for-send-message",
}
type File struct {
Key string
}
var File1 = File{
Key: os.Getenv("LARK_FILE_KEY_TEST_FILE_1_PNG"), // this is file of ./test/file_1.png
}
var File2 = File{
Key: os.Getenv("LARK_FILE_KEY_TEST_FILE_2_DOC"), // ./test/file_2.docx
}
type Message struct {
MessageID string
ChatID string
}
var MessageAdminSendTextInChatContainAllPermissionApp = Message{
MessageID: os.Getenv("LARK_MESSAGE_ADMIN_SEND_TEST_IN_CHAT_CONTAINS_ALL_PERMISSION_APP"),
ChatID: os.Getenv("LARK_CHAT_CONTAINS_APP_PERMISSION_APP_CHAT_ID"),
}
var MessageAdminSendImageInChatContainAllPermissionApp = Message{
MessageID: os.Getenv("LARK_MESSAGE_ADMIN_SEND_IMAGE_IN_CHAT_CONTAINS_ALL_PERMISSION_APP"),
ChatID: os.Getenv("LARK_CHAT_CONTAINS_APP_PERMISSION_APP_CHAT_ID"),
}
var MessageAllPermissionAppSendTextInChatContainAllPermissionApp = Message{
MessageID: os.Getenv("LARK_MESSAGE_ALL_PERMISSION_APP_SEND_TEXT_IN_CHAT_CONTAINS_ALL_PERMISSION_APP"),
ChatID: os.Getenv("LARK_CHAT_CONTAINS_APP_PERMISSION_APP_CHAT_ID"),
}
type Approval struct {
Code string `json:"code"`
}
var ApprovalALLField = Approval{
Code: os.Getenv("LARK_APPROVAL_ALL_FIELD"),
}
func Test_Config(t *testing.T) {
as := assert.New(t)
as.NotEmpty(AppNoPermission.AppID)
as.NotEmpty(AppNoPermission.AppSecret)
as.NotEmpty(AppAllPermission.AppID)
as.NotEmpty(AppAllPermission.AppSecret)
as.NotEmpty(UserAdmin.UserID)
as.NotEmpty(UserAdmin.OpenID)
as.NotEmpty(ChatContainALLPermissionApp.ChatID)
as.NotEmpty(ChatNotContainALLPermissionApp.ChatID)
as.NotEmpty(ChatForSendMessage.ChatID)
as.NotEmpty(File1.Key)
as.NotEmpty(File2.Key)
as.NotEmpty(MessageAdminSendTextInChatContainAllPermissionApp.ChatID)
as.NotEmpty(MessageAdminSendTextInChatContainAllPermissionApp.MessageID)
as.NotEmpty(MessageAdminSendImageInChatContainAllPermissionApp.ChatID)
as.NotEmpty(MessageAdminSendImageInChatContainAllPermissionApp.MessageID)
as.NotEmpty(MessageAllPermissionAppSendTextInChatContainAllPermissionApp.ChatID)
as.NotEmpty(MessageAllPermissionAppSendTextInChatContainAllPermissionApp.MessageID)
}
type fakeHTTPWriter struct {
header http.Header
code int
lock sync.Mutex
data []byte
}
func newFakeHTTPWriter() *fakeHTTPWriter {
return &fakeHTTPWriter{
header: map[string][]string{},
}
}
func (r *fakeHTTPWriter) Header() http.Header {
return r.header
}
func (r *fakeHTTPWriter) Write(bytes []byte) (int, error) {
r.lock.Lock()
defer r.lock.Unlock()
r.data = append(r.data, bytes...)
return len(bytes), nil
}
func (r *fakeHTTPWriter) WriteHeader(statusCode int) {
r.code = statusCode
}
func (r *fakeHTTPWriter) str() string {
return string(r.data)
}
|
[
"\"LARK_APP_ALL_PERMISSION_APP_ID\"",
"\"LARK_APP_ALL_PERMISSION_APP_SECRET\"",
"\"LARK_HELPDESK_ALL_PERMISSION_ID\"",
"\"LARK_HELPDESK_ALL_PERMISSION_TOKEN\"",
"\"LARK_APP_NO_PERMISSION_APP_ID\"",
"\"LARK_APP_NO_PERMISSION_APP_SECRET\"",
"\"LARK_APP_ALL_PERMISSION_APP_ID\"",
"\"LARK_APP_ALL_PERMISSION_APP_SECRET\"",
"\"LARK_APP_CUSTOM_BOT_NO_VALID_WEBHOOK_URL\"",
"\"LARK_APP_CUSTOM_BOT_CHECK_CAN_SEND_WEBHOOK_URL\"",
"\"LARK_APP_CUSTOM_BOT_CHECK_SIGN_WEBHOOK_URL\"",
"\"LARK_APP_CUSTOM_BOT_CHECK_SIGN_SIGN\"",
"\"LARK_USER_ADMIN_USER_ID\"",
"\"LARK_USER_ADMIN_OPEN_ID\"",
"\"LARK_USER_ADMIN_NAME\"",
"\"LARK_APP_ALL_PERMISSION_APP_ID\"",
"\"LARK_ACCESS_TOKEN_ALL_PERMISSION_APP\"",
"\"LARK_APP_ALL_PERMISSION_APP_ID\"",
"\"LARK_REFRESH_TOKEN_ALL_PERMISSION_APP\"",
"\"LARK_CHAT_CONTAINS_APP_PERMISSION_APP_CHAT_ID\"",
"\"LARK_CHAT_NOT_CONTAINS_APP_PERMISSION_APP_CHAT_ID\"",
"\"LARK_CHAT_FOR_SEND_MSG_CHAT_ID\"",
"\"LARK_FILE_KEY_TEST_FILE_1_PNG\"",
"\"LARK_FILE_KEY_TEST_FILE_2_DOC\"",
"\"LARK_MESSAGE_ADMIN_SEND_TEST_IN_CHAT_CONTAINS_ALL_PERMISSION_APP\"",
"\"LARK_CHAT_CONTAINS_APP_PERMISSION_APP_CHAT_ID\"",
"\"LARK_MESSAGE_ADMIN_SEND_IMAGE_IN_CHAT_CONTAINS_ALL_PERMISSION_APP\"",
"\"LARK_CHAT_CONTAINS_APP_PERMISSION_APP_CHAT_ID\"",
"\"LARK_MESSAGE_ALL_PERMISSION_APP_SEND_TEXT_IN_CHAT_CONTAINS_ALL_PERMISSION_APP\"",
"\"LARK_CHAT_CONTAINS_APP_PERMISSION_APP_CHAT_ID\"",
"\"LARK_APPROVAL_ALL_FIELD\""
] |
[] |
[
"LARK_MESSAGE_ADMIN_SEND_TEST_IN_CHAT_CONTAINS_ALL_PERMISSION_APP",
"LARK_APP_ALL_PERMISSION_APP_SECRET",
"LARK_APP_CUSTOM_BOT_CHECK_SIGN_SIGN",
"LARK_APP_NO_PERMISSION_APP_SECRET",
"LARK_HELPDESK_ALL_PERMISSION_TOKEN",
"LARK_APP_CUSTOM_BOT_CHECK_SIGN_WEBHOOK_URL",
"LARK_USER_ADMIN_USER_ID",
"LARK_FILE_KEY_TEST_FILE_2_DOC",
"LARK_APPROVAL_ALL_FIELD",
"LARK_MESSAGE_ADMIN_SEND_IMAGE_IN_CHAT_CONTAINS_ALL_PERMISSION_APP",
"LARK_APP_ALL_PERMISSION_APP_ID",
"LARK_USER_ADMIN_NAME",
"LARK_CHAT_NOT_CONTAINS_APP_PERMISSION_APP_CHAT_ID",
"LARK_FILE_KEY_TEST_FILE_1_PNG",
"LARK_APP_CUSTOM_BOT_NO_VALID_WEBHOOK_URL",
"LARK_HELPDESK_ALL_PERMISSION_ID",
"LARK_MESSAGE_ALL_PERMISSION_APP_SEND_TEXT_IN_CHAT_CONTAINS_ALL_PERMISSION_APP",
"LARK_APP_NO_PERMISSION_APP_ID",
"LARK_USER_ADMIN_OPEN_ID",
"LARK_CHAT_CONTAINS_APP_PERMISSION_APP_CHAT_ID",
"LARK_CHAT_FOR_SEND_MSG_CHAT_ID",
"LARK_ACCESS_TOKEN_ALL_PERMISSION_APP",
"LARK_REFRESH_TOKEN_ALL_PERMISSION_APP",
"LARK_APP_CUSTOM_BOT_CHECK_CAN_SEND_WEBHOOK_URL"
] |
[]
|
["LARK_MESSAGE_ADMIN_SEND_TEST_IN_CHAT_CONTAINS_ALL_PERMISSION_APP", "LARK_APP_ALL_PERMISSION_APP_SECRET", "LARK_APP_CUSTOM_BOT_CHECK_SIGN_SIGN", "LARK_APP_NO_PERMISSION_APP_SECRET", "LARK_HELPDESK_ALL_PERMISSION_TOKEN", "LARK_APP_CUSTOM_BOT_CHECK_SIGN_WEBHOOK_URL", "LARK_USER_ADMIN_USER_ID", "LARK_FILE_KEY_TEST_FILE_2_DOC", "LARK_APPROVAL_ALL_FIELD", "LARK_MESSAGE_ADMIN_SEND_IMAGE_IN_CHAT_CONTAINS_ALL_PERMISSION_APP", "LARK_APP_ALL_PERMISSION_APP_ID", "LARK_USER_ADMIN_NAME", "LARK_CHAT_NOT_CONTAINS_APP_PERMISSION_APP_CHAT_ID", "LARK_FILE_KEY_TEST_FILE_1_PNG", "LARK_APP_CUSTOM_BOT_NO_VALID_WEBHOOK_URL", "LARK_HELPDESK_ALL_PERMISSION_ID", "LARK_MESSAGE_ALL_PERMISSION_APP_SEND_TEXT_IN_CHAT_CONTAINS_ALL_PERMISSION_APP", "LARK_APP_NO_PERMISSION_APP_ID", "LARK_USER_ADMIN_OPEN_ID", "LARK_CHAT_CONTAINS_APP_PERMISSION_APP_CHAT_ID", "LARK_CHAT_FOR_SEND_MSG_CHAT_ID", "LARK_ACCESS_TOKEN_ALL_PERMISSION_APP", "LARK_REFRESH_TOKEN_ALL_PERMISSION_APP", "LARK_APP_CUSTOM_BOT_CHECK_CAN_SEND_WEBHOOK_URL"]
|
go
| 24 | 0 | |
isimip_publisher/tests/test_commands.py
|
import os
import shutil
from pathlib import Path
import pytest
from dotenv import load_dotenv
from isimip_publisher.utils.database import (Dataset, Resource,
init_database_session)
@pytest.fixture(scope='session')
def setup():
load_dotenv(Path().cwd() / '.env')
base_dir = Path(__file__).parent.parent.parent
test_dir = base_dir / 'testing'
shutil.rmtree(test_dir / 'work', ignore_errors=True)
shutil.rmtree(test_dir / 'public', ignore_errors=True)
shutil.rmtree(test_dir / 'archive', ignore_errors=True)
session = init_database_session(os.getenv('DATABASE'))
for resource in session.query(Resource):
session.delete(resource)
session.commit()
for dataset in session.query(Dataset):
for file in dataset.files:
session.delete(file)
session.delete(dataset)
session.commit()
def test_empty(setup, script_runner):
response = script_runner.run('isimip-publisher')
assert not response.stderr
assert response.returncode == 0
def test_help(setup, script_runner):
response = script_runner.run('isimip-publisher', '--help')
assert response.success, response.stderr
assert response.stdout
assert not response.stderr
def test_list_remote(setup, script_runner):
response = script_runner.run('isimip-publisher', 'list_remote', 'round/product/sector')
assert response.success, response.stderr
assert response.stdout
assert not response.stderr
assert len(response.stdout.splitlines()) == 12
def test_match_remote(setup, script_runner):
response = script_runner.run('isimip-publisher', 'match_remote', 'round/product/sector')
assert response.success, response.stderr
assert not response.stderr
assert len(response.stdout.splitlines()) == 12
def test_fetch_files(setup, script_runner):
response = script_runner.run('isimip-publisher', 'fetch_files', 'round/product/sector')
assert response.success, response.stderr
assert not response.stdout
assert response.stderr.strip().startswith('fetch_files')
def test_list_local(setup, script_runner):
response = script_runner.run('isimip-publisher', 'list_local', 'round/product/sector')
assert response.success, response.stderr
assert response.stdout
assert not response.stderr
assert len(response.stdout.splitlines()) == 12
def test_match_local(setup, script_runner):
response = script_runner.run('isimip-publisher', 'match_local', 'round/product/sector')
assert response.success, response.stderr
assert not response.stderr
assert len(response.stdout.splitlines()) == 12
def test_write_jsons(setup, script_runner):
response = script_runner.run('isimip-publisher', 'write_jsons', 'round/product/sector')
assert response.success, response.stderr
assert not response.stdout
assert response.stderr.strip().startswith('write_jsons')
def test_insert_datasets(setup, script_runner):
response = script_runner.run('isimip-publisher', 'insert_datasets', 'round/product/sector')
assert response.success, response.stderr
assert not response.stdout
assert response.stderr.strip().startswith('insert_datasets')
def test_publish_datasets(setup, script_runner):
response = script_runner.run('isimip-publisher', 'publish_datasets', 'round/product/sector')
assert response.success, response.stderr
assert not response.stdout
assert response.stderr.strip().startswith('publish_datasets')
def test_list_public(setup, script_runner):
response = script_runner.run('isimip-publisher', 'list_public', 'round/product/sector')
assert response.success, response.stderr
assert response.stdout
assert not response.stderr
assert len(response.stdout.splitlines()) == 12
def test_match_public(setup, script_runner):
response = script_runner.run('isimip-publisher', 'match_public', 'round/product/sector')
assert response.success, response.stderr
assert not response.stderr
assert len(response.stdout.splitlines()) == 12
def test_link_files(setup, script_runner):
response = script_runner.run('isimip-publisher', 'link_files', 'round/product/sector/model', 'round/product/sector2/model')
assert response.success, response.stderr
assert not response.stdout
assert response.stderr.strip().startswith('link_files')
def test_link_datasets(setup, script_runner):
response = script_runner.run('isimip-publisher', 'link_datasets', 'round/product/sector/model', 'round/product/sector2/model')
assert response.success, response.stderr
assert not response.stdout
assert response.stderr.strip().startswith('link_datasets')
def test_insert_doi(setup, script_runner):
response = script_runner.run('isimip-publisher', 'insert_doi', 'testing/resources/test.json')
assert response.success, response.stderr
assert not response.stdout
assert not response.stderr
def test_update_doi(setup, script_runner):
response = script_runner.run('isimip-publisher', 'update_doi', 'testing/resources/test1.json')
assert response.success, response.stderr
assert not response.stdout
assert not response.stderr
def test_update_index(setup, script_runner):
response = script_runner.run('isimip-publisher', 'update_index', 'round/product/sector/model')
assert response.success, response.stderr
assert not response.stdout
assert not response.stderr
def test_check(setup, script_runner):
response = script_runner.run('isimip-publisher', 'check', 'round/product/sector/model')
assert response.success, response.stderr
assert not response.stdout
assert not response.stderr
def test_archive_datasets(setup, script_runner):
response = script_runner.run('isimip-publisher', 'archive_datasets', 'round/product/sector/model2')
assert response.success, response.stderr
assert not response.stdout
assert response.stderr.strip().startswith('archive_datasets')
def test_clean(setup, script_runner):
response = script_runner.run('isimip-publisher', 'clean', 'round/product/sector/model')
assert response.success, response.stderr
assert not response.stdout
assert not response.stderr
|
[] |
[] |
[
"DATABASE"
] |
[]
|
["DATABASE"]
|
python
| 1 | 0 | |
go/test/endtoend/recovery/unshardedrecovery/recovery.go
|
/*
Copyright 2020 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package unshardedrecovery
import (
"context"
"flag"
"fmt"
"os"
"os/exec"
"path"
"testing"
"vitess.io/vitess/go/test/endtoend/recovery"
"vitess.io/vitess/go/test/endtoend/sharding/initialsharding"
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/vtgate/vtgateconn"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"vitess.io/vitess/go/test/endtoend/cluster"
)
var (
primary *cluster.Vttablet
replica1 *cluster.Vttablet
replica2 *cluster.Vttablet
replica3 *cluster.Vttablet
localCluster *cluster.LocalProcessCluster
newInitDBFile string
cell = cluster.DefaultCell
hostname = "localhost"
keyspaceName = "ks"
dbPassword = "VtDbaPass"
shardKsName = fmt.Sprintf("%s/%s", keyspaceName, shardName)
dbCredentialFile string
shardName = "0"
commonTabletArg = []string{
"-vreplication_healthcheck_topology_refresh", "1s",
"-vreplication_healthcheck_retry_delay", "1s",
"-vreplication_retry_delay", "1s",
"-degraded_threshold", "5s",
"-lock_tables_timeout", "5s",
"-watch_replication_stream",
"-serving_state_grace_period", "1s"}
recoveryKS1 = "recovery_ks1"
recoveryKS2 = "recovery_ks2"
vtInsertTest = `create table vt_insert_test (
id bigint auto_increment,
msg varchar(64),
primary key (id)
) Engine=InnoDB`
vSchema = `{
"tables": {
"vt_insert_test": {}
}
}`
)
// TestMainImpl creates cluster for unsharded recovery testing.
func TestMainImpl(m *testing.M) {
defer cluster.PanicHandler(nil)
flag.Parse()
exitCode, err := func() (int, error) {
localCluster = cluster.NewCluster(cell, hostname)
defer localCluster.Teardown()
// Start topo server
err := localCluster.StartTopo()
if err != nil {
return 1, err
}
// Start keyspace
keyspace := &cluster.Keyspace{
Name: keyspaceName,
}
localCluster.Keyspaces = append(localCluster.Keyspaces, *keyspace)
dbCredentialFile = initialsharding.WriteDbCredentialToTmp(localCluster.TmpDirectory)
initDb, _ := os.ReadFile(path.Join(os.Getenv("VTROOT"), "/config/init_db.sql"))
sql := string(initDb)
newInitDBFile = path.Join(localCluster.TmpDirectory, "init_db_with_passwords.sql")
sql = sql + initialsharding.GetPasswordUpdateSQL(localCluster)
// https://github.com/vitessio/vitess/issues/8315
oldAlterTableMode := `
SET GLOBAL old_alter_table = ON;
`
sql = sql + oldAlterTableMode
os.WriteFile(newInitDBFile, []byte(sql), 0666)
extraArgs := []string{"-db-credentials-file", dbCredentialFile}
commonTabletArg = append(commonTabletArg, "-db-credentials-file", dbCredentialFile)
shard := cluster.Shard{
Name: shardName,
}
var mysqlProcs []*exec.Cmd
for i := 0; i < 4; i++ {
tabletType := "replica"
if i == 0 {
tabletType = "primary"
}
tablet := localCluster.NewVttabletInstance(tabletType, 0, cell)
tablet.VttabletProcess = localCluster.VtprocessInstanceFromVttablet(tablet, shard.Name, keyspaceName)
tablet.VttabletProcess.DbPassword = dbPassword
tablet.VttabletProcess.ExtraArgs = commonTabletArg
if recovery.UseXb {
tablet.VttabletProcess.ExtraArgs = append(tablet.VttabletProcess.ExtraArgs, recovery.XbArgs...)
}
tablet.VttabletProcess.SupportsBackup = true
tablet.VttabletProcess.EnableSemiSync = true
tablet.MysqlctlProcess = *cluster.MysqlCtlProcessInstance(tablet.TabletUID, tablet.MySQLPort, localCluster.TmpDirectory)
tablet.MysqlctlProcess.InitDBFile = newInitDBFile
tablet.MysqlctlProcess.ExtraArgs = extraArgs
proc, err := tablet.MysqlctlProcess.StartProcess()
if err != nil {
return 1, err
}
mysqlProcs = append(mysqlProcs, proc)
shard.Vttablets = append(shard.Vttablets, tablet)
}
for _, proc := range mysqlProcs {
if err := proc.Wait(); err != nil {
return 1, err
}
}
primary = shard.Vttablets[0]
replica1 = shard.Vttablets[1]
replica2 = shard.Vttablets[2]
replica3 = shard.Vttablets[3]
for _, tablet := range []cluster.Vttablet{*primary, *replica1} {
if err := tablet.VttabletProcess.Setup(); err != nil {
return 1, err
}
}
if err := localCluster.VtctlclientProcess.InitShardPrimary(keyspaceName, shard.Name, cell, primary.TabletUID); err != nil {
return 1, err
}
return m.Run(), nil
}()
if err != nil {
log.Error(err.Error())
os.Exit(1)
} else {
os.Exit(exitCode)
}
}
// TestRecoveryImpl does following
// - create a shard with primary and replica1 only
// - run InitShardPrimary
// - insert some data
// - take a backup
// - insert more data on the primary
// - take another backup
// - create a recovery keyspace after first backup
// - bring up tablet_replica2 in the new keyspace
// - check that new tablet does not have data created after backup1
// - create second recovery keyspace after second backup
// - bring up tablet_replica3 in second keyspace
// - check that new tablet has data created after backup1 but not data created after backup2
// - check that vtgate queries work correctly
func TestRecoveryImpl(t *testing.T) {
defer cluster.PanicHandler(t)
defer tabletsTeardown()
verifyInitialReplication(t)
err := localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias)
assert.NoError(t, err)
backups := listBackups(t)
require.Equal(t, len(backups), 1)
assert.Contains(t, backups[0], replica1.Alias)
_, err = primary.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test2')", keyspaceName, true)
assert.NoError(t, err)
cluster.VerifyRowsInTablet(t, replica1, keyspaceName, 2)
err = localCluster.VtctlclientProcess.ApplyVSchema(keyspaceName, vSchema)
assert.NoError(t, err)
output, err := localCluster.VtctlclientProcess.ExecuteCommandWithOutput("GetVSchema", keyspaceName)
assert.NoError(t, err)
assert.Contains(t, output, "vt_insert_test")
recovery.RestoreTablet(t, localCluster, replica2, recoveryKS1, "0", keyspaceName, commonTabletArg)
output, err = localCluster.VtctlclientProcess.ExecuteCommandWithOutput("GetSrvVSchema", cell)
assert.NoError(t, err)
assert.Contains(t, output, keyspaceName)
assert.Contains(t, output, recoveryKS1)
err = localCluster.VtctlclientProcess.ExecuteCommand("GetSrvKeyspace", cell, keyspaceName)
assert.NoError(t, err)
output, err = localCluster.VtctlclientProcess.ExecuteCommandWithOutput("GetVSchema", recoveryKS1)
assert.NoError(t, err)
assert.Contains(t, output, "vt_insert_test")
cluster.VerifyRowsInTablet(t, replica2, keyspaceName, 1)
cluster.VerifyLocalMetadata(t, replica2, recoveryKS1, shardName, cell)
// update the original row in primary
_, err = primary.VttabletProcess.QueryTablet("update vt_insert_test set msg = 'msgx1' where id = 1", keyspaceName, true)
assert.NoError(t, err)
//verify that primary has new value
qr, err := primary.VttabletProcess.QueryTablet("select msg from vt_insert_test where id = 1", keyspaceName, true)
assert.NoError(t, err)
assert.Equal(t, "msgx1", fmt.Sprintf("%s", qr.Rows[0][0].ToBytes()))
//verify that restored replica has old value
qr, err = replica2.VttabletProcess.QueryTablet("select msg from vt_insert_test where id = 1", keyspaceName, true)
assert.NoError(t, err)
assert.Equal(t, "test1", fmt.Sprintf("%s", qr.Rows[0][0].ToBytes()))
err = localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias)
assert.NoError(t, err)
_, err = primary.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test3')", keyspaceName, true)
assert.NoError(t, err)
cluster.VerifyRowsInTablet(t, replica1, keyspaceName, 3)
recovery.RestoreTablet(t, localCluster, replica3, recoveryKS2, "0", keyspaceName, commonTabletArg)
output, err = localCluster.VtctlclientProcess.ExecuteCommandWithOutput("GetVSchema", recoveryKS2)
assert.NoError(t, err)
assert.Contains(t, output, "vt_insert_test")
cluster.VerifyRowsInTablet(t, replica3, keyspaceName, 2)
// update the original row in primary
_, err = primary.VttabletProcess.QueryTablet("update vt_insert_test set msg = 'msgx2' where id = 1", keyspaceName, true)
assert.NoError(t, err)
//verify that primary has new value
qr, err = primary.VttabletProcess.QueryTablet("select msg from vt_insert_test where id = 1", keyspaceName, true)
assert.NoError(t, err)
assert.Equal(t, "msgx2", fmt.Sprintf("%s", qr.Rows[0][0].ToBytes()))
//verify that restored replica has old value
qr, err = replica3.VttabletProcess.QueryTablet("select msg from vt_insert_test where id = 1", keyspaceName, true)
assert.NoError(t, err)
assert.Equal(t, "msgx1", fmt.Sprintf("%s", qr.Rows[0][0].ToBytes()))
vtgateInstance := localCluster.NewVtgateInstance()
vtgateInstance.TabletTypesToWait = "REPLICA"
err = vtgateInstance.Setup()
localCluster.VtgateGrpcPort = vtgateInstance.GrpcPort
assert.NoError(t, err)
defer vtgateInstance.TearDown()
err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.primary", keyspaceName, shardName), 1)
assert.NoError(t, err)
err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", keyspaceName, shardName), 1)
assert.NoError(t, err)
err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", recoveryKS1, shardName), 1)
assert.NoError(t, err)
err = vtgateInstance.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.replica", recoveryKS2, shardName), 1)
assert.NoError(t, err)
// Build vtgate grpc connection
grpcAddress := fmt.Sprintf("%s:%d", localCluster.Hostname, localCluster.VtgateGrpcPort)
vtgateConn, err := vtgateconn.Dial(context.Background(), grpcAddress)
assert.NoError(t, err)
defer vtgateConn.Close()
session := vtgateConn.Session("@replica", nil)
//check that vtgate doesn't route queries to new tablet
recovery.VerifyQueriesUsingVtgate(t, session, "select count(*) from vt_insert_test", "INT64(3)")
recovery.VerifyQueriesUsingVtgate(t, session, "select msg from vt_insert_test where id = 1", `VARCHAR("msgx2")`)
recovery.VerifyQueriesUsingVtgate(t, session, fmt.Sprintf("select count(*) from %s.vt_insert_test", recoveryKS1), "INT64(1)")
recovery.VerifyQueriesUsingVtgate(t, session, fmt.Sprintf("select msg from %s.vt_insert_test where id = 1", recoveryKS1), `VARCHAR("test1")`)
recovery.VerifyQueriesUsingVtgate(t, session, fmt.Sprintf("select count(*) from %s.vt_insert_test", recoveryKS2), "INT64(2)")
recovery.VerifyQueriesUsingVtgate(t, session, fmt.Sprintf("select msg from %s.vt_insert_test where id = 1", recoveryKS2), `VARCHAR("msgx1")`)
// check that new keyspace is accessible with 'use ks'
cluster.ExecuteQueriesUsingVtgate(t, session, "use "+recoveryKS1+"@replica")
recovery.VerifyQueriesUsingVtgate(t, session, "select count(*) from vt_insert_test", "INT64(1)")
cluster.ExecuteQueriesUsingVtgate(t, session, "use "+recoveryKS2+"@replica")
recovery.VerifyQueriesUsingVtgate(t, session, "select count(*) from vt_insert_test", "INT64(2)")
// check that new tablet is accessible with use `ks:shard`
cluster.ExecuteQueriesUsingVtgate(t, session, "use `"+recoveryKS1+":0@replica`")
recovery.VerifyQueriesUsingVtgate(t, session, "select count(*) from vt_insert_test", "INT64(1)")
cluster.ExecuteQueriesUsingVtgate(t, session, "use `"+recoveryKS2+":0@replica`")
recovery.VerifyQueriesUsingVtgate(t, session, "select count(*) from vt_insert_test", "INT64(2)")
}
// verifyInitialReplication will create schema in primary, insert some data to primary and verify the same data in replica.
func verifyInitialReplication(t *testing.T) {
_, err := primary.VttabletProcess.QueryTablet(vtInsertTest, keyspaceName, true)
assert.NoError(t, err)
_, err = primary.VttabletProcess.QueryTablet("insert into vt_insert_test (msg) values ('test1')", keyspaceName, true)
assert.NoError(t, err)
cluster.VerifyRowsInTablet(t, replica1, keyspaceName, 1)
}
func listBackups(t *testing.T) []string {
output, err := localCluster.ListBackups(shardKsName)
assert.NoError(t, err)
return output
}
func tabletsTeardown() {
var mysqlProcs []*exec.Cmd
for _, tablet := range []*cluster.Vttablet{primary, replica1, replica2, replica3} {
proc, _ := tablet.MysqlctlProcess.StopProcess()
mysqlProcs = append(mysqlProcs, proc)
tablet.VttabletProcess.TearDown()
}
for _, proc := range mysqlProcs {
proc.Wait()
}
}
|
[
"\"VTROOT\""
] |
[] |
[
"VTROOT"
] |
[]
|
["VTROOT"]
|
go
| 1 | 0 | |
examples/service/api/recording/page/recording_page_example.go
|
package main
import (
"log"
"os"
"github.com/RJPearson94/twilio-sdk-go"
v2010 "github.com/RJPearson94/twilio-sdk-go/service/api/v2010"
"github.com/RJPearson94/twilio-sdk-go/service/api/v2010/account/recordings"
"github.com/RJPearson94/twilio-sdk-go/session/credentials"
)
var apiClient *v2010.V2010
func init() {
creds, err := credentials.New(credentials.Account{
Sid: os.Getenv("TWILIO_ACCOUNT_SID"),
AuthToken: os.Getenv("TWILIO_AUTH_TOKEN"),
})
if err != nil {
log.Panicf("%s", err.Error())
}
apiClient = twilio.NewWithCredentials(creds).API.V2010
}
func main() {
resp, err := apiClient.
Account("ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX").
Recordings.
Page(&recordings.RecordingsPageOptions{})
if err != nil {
log.Panicf("%s", err.Error())
}
log.Printf("%v recording(s) found on page", len(resp.Recordings))
}
|
[
"\"TWILIO_ACCOUNT_SID\"",
"\"TWILIO_AUTH_TOKEN\""
] |
[] |
[
"TWILIO_AUTH_TOKEN",
"TWILIO_ACCOUNT_SID"
] |
[]
|
["TWILIO_AUTH_TOKEN", "TWILIO_ACCOUNT_SID"]
|
go
| 2 | 0 | |
airflow/providers/google/marketing_platform/example_dags/example_display_video.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG that shows how to use DisplayVideo.
"""
import os
from typing import Dict
from airflow import models
from airflow.providers.google.cloud.transfers.gcs_to_bigquery import GCSToBigQueryOperator
from airflow.providers.google.marketing_platform.hooks.display_video import GoogleDisplayVideo360Hook
from airflow.providers.google.marketing_platform.operators.display_video import (
GoogleDisplayVideo360CreateReportOperator, GoogleDisplayVideo360CreateSDFDownloadTaskOperator,
GoogleDisplayVideo360DeleteReportOperator, GoogleDisplayVideo360DownloadLineItemsOperator,
GoogleDisplayVideo360DownloadReportOperator, GoogleDisplayVideo360RunReportOperator,
GoogleDisplayVideo360SDFtoGCSOperator, GoogleDisplayVideo360UploadLineItemsOperator,
)
from airflow.providers.google.marketing_platform.sensors.display_video import (
GoogleDisplayVideo360GetSDFDownloadOperationSensor, GoogleDisplayVideo360ReportSensor,
)
from airflow.utils import dates
# [START howto_display_video_env_variables]
BUCKET = os.environ.get("GMP_DISPLAY_VIDEO_BUCKET", "gs://test-display-video-bucket")
ADVERTISER_ID = os.environ.get("GMP_ADVERTISER_ID", 1234567)
OBJECT_NAME = os.environ.get("GMP_OBJECT_NAME", "files/report.csv")
PATH_TO_UPLOAD_FILE = os.environ.get("GCP_GCS_PATH_TO_UPLOAD_FILE", "test-gcs-example.txt")
PATH_TO_SAVED_FILE = os.environ.get("GCP_GCS_PATH_TO_SAVED_FILE", "test-gcs-example-download.txt")
BUCKET_FILE_LOCATION = PATH_TO_UPLOAD_FILE.rpartition("/")[-1]
SDF_VERSION = os.environ.get("GMP_SDF_VERSION", "SDF_VERSION_5_1")
BQ_DATA_SET = os.environ.get("GMP_BQ_DATA_SET", "airflow_test")
GMP_PARTNER_ID = os.environ.get("GMP_PARTNER_ID", 123)
ENTITY_TYPE = os.environ.get("GMP_ENTITY_TYPE", "LineItem")
ERF_SOURCE_OBJECT = GoogleDisplayVideo360Hook.erf_uri(GMP_PARTNER_ID, ENTITY_TYPE)
REPORT = {
"kind": "doubleclickbidmanager#query",
"metadata": {
"title": "Polidea Test Report",
"dataRange": "LAST_7_DAYS",
"format": "CSV",
"sendNotification": False,
},
"params": {
"type": "TYPE_GENERAL",
"groupBys": ["FILTER_DATE", "FILTER_PARTNER"],
"filters": [{"type": "FILTER_PARTNER", "value": 1486931}],
"metrics": ["METRIC_IMPRESSIONS", "METRIC_CLICKS"],
"includeInviteData": True,
},
"schedule": {"frequency": "ONE_TIME"},
}
PARAMS = {"dataRange": "LAST_14_DAYS", "timezoneCode": "America/New_York"}
CREATE_SDF_DOWNLOAD_TASK_BODY_REQUEST: Dict = {
"version": SDF_VERSION,
"advertiserId": ADVERTISER_ID,
"inventorySourceFilter": {"inventorySourceIds": []},
}
DOWNLOAD_LINE_ITEMS_REQUEST: Dict = {
"filterType": ADVERTISER_ID,
"format": "CSV",
"fileSpec": "EWF"}
# [END howto_display_video_env_variables]
with models.DAG(
"example_display_video",
schedule_interval=None, # Override to match your needs,
start_date=dates.days_ago(1)
) as dag:
# [START howto_google_display_video_createquery_report_operator]
create_report = GoogleDisplayVideo360CreateReportOperator(
body=REPORT, task_id="create_report"
)
report_id = "{{ task_instance.xcom_pull('create_report', key='report_id') }}"
# [END howto_google_display_video_createquery_report_operator]
# [START howto_google_display_video_runquery_report_operator]
run_report = GoogleDisplayVideo360RunReportOperator(
report_id=report_id, params=PARAMS, task_id="run_report"
)
# [END howto_google_display_video_runquery_report_operator]
# [START howto_google_display_video_wait_report_operator]
wait_for_report = GoogleDisplayVideo360ReportSensor(
task_id="wait_for_report", report_id=report_id
)
# [END howto_google_display_video_wait_report_operator]
# [START howto_google_display_video_getquery_report_operator]
get_report = GoogleDisplayVideo360DownloadReportOperator(
report_id=report_id,
task_id="get_report",
bucket_name=BUCKET,
report_name="test1.csv",
)
# [END howto_google_display_video_getquery_report_operator]
# [START howto_google_display_video_deletequery_report_operator]
delete_report = GoogleDisplayVideo360DeleteReportOperator(
report_id=report_id, task_id="delete_report"
)
# [END howto_google_display_video_deletequery_report_operator]
# [START howto_google_display_video_upload_multiple_entity_read_files_to_big_query]
upload_erf_to_bq = GCSToBigQueryOperator(
task_id='upload_erf_to_bq',
bucket=BUCKET,
source_objects=ERF_SOURCE_OBJECT,
destination_project_dataset_table=f"{BQ_DATA_SET}.gcs_to_bq_table",
write_disposition='WRITE_TRUNCATE',
dag=dag)
# [END howto_google_display_video_upload_multiple_entity_read_files_to_big_query]
# [START howto_google_display_video_download_line_items_operator]
download_line_items = GoogleDisplayVideo360DownloadLineItemsOperator(
task_id="download_line_items",
request_body=DOWNLOAD_LINE_ITEMS_REQUEST,
bucket_name=BUCKET,
object_name=OBJECT_NAME,
gzip=False,
)
# [END howto_google_display_video_download_line_items_operator]
# [START howto_google_display_video_upload_line_items_operator]
upload_line_items = GoogleDisplayVideo360UploadLineItemsOperator(
task_id="upload_line_items",
bucket_name=BUCKET,
object_name=BUCKET_FILE_LOCATION,
)
# [END howto_google_display_video_upload_line_items_operator]
# [START howto_google_display_video_create_sdf_download_task_operator]
create_sdf_download_task = GoogleDisplayVideo360CreateSDFDownloadTaskOperator(
task_id="create_sdf_download_task", body_request=CREATE_SDF_DOWNLOAD_TASK_BODY_REQUEST
)
operation_name = '{{ task_instance.xcom_pull("create_sdf_download_task")["name"] }}'
# [END howto_google_display_video_create_sdf_download_task_operator]
# [START howto_google_display_video_wait_for_operation_sensor]
wait_for_operation = GoogleDisplayVideo360GetSDFDownloadOperationSensor(
task_id="wait_for_operation", operation_name=operation_name,
)
# [END howto_google_display_video_wait_for_operation_sensor]
# [START howto_google_display_video_save_sdf_in_gcs_operator]
save_sdf_in_gcs = GoogleDisplayVideo360SDFtoGCSOperator(
task_id="save_sdf_in_gcs",
operation_name=operation_name,
bucket_name=BUCKET,
object_name=BUCKET_FILE_LOCATION,
gzip=False,
)
# [END howto_google_display_video_save_sdf_in_gcs_operator]
# [START howto_google_display_video_gcs_to_big_query_operator]
upload_sdf_to_big_query = GCSToBigQueryOperator(
task_id="upload_sdf_to_big_query",
bucket=BUCKET,
source_objects=['{{ task_instance.xcom_pull("upload_sdf_to_bigquery")}}'],
destination_project_dataset_table=f"{BQ_DATA_SET}.gcs_to_bq_table",
schema_fields=[
{"name": "name", "type": "STRING", "mode": "NULLABLE"},
{"name": "post_abbr", "type": "STRING", "mode": "NULLABLE"},
],
write_disposition="WRITE_TRUNCATE",
dag=dag,
)
# [END howto_google_display_video_gcs_to_big_query_operator]
create_report >> run_report >> wait_for_report >> get_report >> delete_report
create_sdf_download_task >> wait_for_operation >> save_sdf_in_gcs >> upload_sdf_to_big_query
|
[] |
[] |
[
"GMP_BQ_DATA_SET",
"GMP_ENTITY_TYPE",
"GCP_GCS_PATH_TO_SAVED_FILE",
"GMP_DISPLAY_VIDEO_BUCKET",
"GMP_ADVERTISER_ID",
"GMP_OBJECT_NAME",
"GCP_GCS_PATH_TO_UPLOAD_FILE",
"GMP_SDF_VERSION",
"GMP_PARTNER_ID"
] |
[]
|
["GMP_BQ_DATA_SET", "GMP_ENTITY_TYPE", "GCP_GCS_PATH_TO_SAVED_FILE", "GMP_DISPLAY_VIDEO_BUCKET", "GMP_ADVERTISER_ID", "GMP_OBJECT_NAME", "GCP_GCS_PATH_TO_UPLOAD_FILE", "GMP_SDF_VERSION", "GMP_PARTNER_ID"]
|
python
| 9 | 0 | |
app.py
|
import os
import config
import logging
#print("DEBUG: {}".format(os.environ['DEBUG']))
#print("SQLALCHEMY_DATABASE_URI: {}".format(os.environ['SQLALCHEMY_DATABASE_URI']))
# Get theapplication instance
connex_app = config.connex_app
# connect logging between gunicorn and Flask
#gunicorn_logger = logging.getLogger("gunicorn.error")
gunicorn_logger = logging.getLogger("gunicorn.info")
connex_app.app.logger.handlers = gunicorn_logger.handlers
connex_app.app.logger.setLevel(gunicorn_logger.level)
# Read the swagger.yml file to configure the endpoints
connex_app.add_api('houston_service.yml', strict_validation=True)
if __name__ == "__main__":
connex_app.run(port=3000, debug=os.environ['DEBUG'])
|
[] |
[] |
[
"SQLALCHEMY_DATABASE_URI",
"DEBUG"
] |
[]
|
["SQLALCHEMY_DATABASE_URI", "DEBUG"]
|
python
| 2 | 0 | |
config.py
|
import os
class Config:
SECRET_KEY = os.environ.get("SECRET_KEY") or "hard to guess string"
TEMPORAL_FOLDER = os.environ.get("TEMPORAL_FOLER") or "tmp"
TEMPLATES_AUTORELOAD = True
class DevelopmentConfig(Config):
DEBUG = True
class ProductionConfig(Config):
DEBUG = False
config = {
"development": DevelopmentConfig,
"production": ProductionConfig,
"default": DevelopmentConfig
}
|
[] |
[] |
[
"SECRET_KEY",
"TEMPORAL_FOLER"
] |
[]
|
["SECRET_KEY", "TEMPORAL_FOLER"]
|
python
| 2 | 0 | |
test/e2e/e2e_test.go
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"context"
"fmt"
"math"
"os"
"sort"
"strings"
"testing"
"time"
v1 "k8s.io/api/core/v1"
schedulingv1 "k8s.io/api/scheduling/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
coreinformers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
v1qos "k8s.io/kubectl/pkg/util/qos"
"sigs.k8s.io/descheduler/cmd/descheduler/app/options"
deschedulerapi "sigs.k8s.io/descheduler/pkg/api"
"sigs.k8s.io/descheduler/pkg/descheduler"
"sigs.k8s.io/descheduler/pkg/descheduler/client"
"sigs.k8s.io/descheduler/pkg/descheduler/evictions"
eutils "sigs.k8s.io/descheduler/pkg/descheduler/evictions/utils"
nodeutil "sigs.k8s.io/descheduler/pkg/descheduler/node"
podutil "sigs.k8s.io/descheduler/pkg/descheduler/pod"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies"
"sigs.k8s.io/descheduler/pkg/descheduler/strategies/nodeutilization"
"sigs.k8s.io/descheduler/pkg/utils"
)
func MakePodSpec(priorityClassName string, gracePeriod *int64) v1.PodSpec {
return v1.PodSpec{
Containers: []v1.Container{{
Name: "pause",
ImagePullPolicy: "Never",
Image: "kubernetes/pause",
Ports: []v1.ContainerPort{{ContainerPort: 80}},
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("200Mi"),
},
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100m"),
v1.ResourceMemory: resource.MustParse("100Mi"),
},
},
}},
PriorityClassName: priorityClassName,
TerminationGracePeriodSeconds: gracePeriod,
}
}
// RcByNameContainer returns a ReplicationController with specified name and container
func RcByNameContainer(name, namespace string, replicas int32, labels map[string]string, gracePeriod *int64, priorityClassName string) *v1.ReplicationController {
zeroGracePeriod := int64(0)
// Add "name": name to the labels, overwriting if it exists.
labels["name"] = name
if gracePeriod == nil {
gracePeriod = &zeroGracePeriod
}
return &v1.ReplicationController{
TypeMeta: metav1.TypeMeta{
Kind: "ReplicationController",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: v1.ReplicationControllerSpec{
Replicas: func(i int32) *int32 { return &i }(replicas),
Selector: map[string]string{
"name": name,
},
Template: &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: labels,
},
Spec: MakePodSpec(priorityClassName, gracePeriod),
},
},
}
}
func initializeClient(t *testing.T) (clientset.Interface, coreinformers.NodeInformer, podutil.GetPodsAssignedToNodeFunc, chan struct{}) {
clientSet, err := client.CreateClient(os.Getenv("KUBECONFIG"))
if err != nil {
t.Errorf("Error during client creation with %v", err)
}
stopChannel := make(chan struct{})
sharedInformerFactory := informers.NewSharedInformerFactory(clientSet, 0)
nodeInformer := sharedInformerFactory.Core().V1().Nodes()
podInformer := sharedInformerFactory.Core().V1().Pods()
getPodsAssignedToNode, err := podutil.BuildGetPodsAssignedToNodeFunc(podInformer)
if err != nil {
t.Errorf("build get pods assigned to node function error: %v", err)
}
sharedInformerFactory.Start(stopChannel)
sharedInformerFactory.WaitForCacheSync(stopChannel)
waitForNodesReady(context.Background(), t, clientSet, nodeInformer)
return clientSet, nodeInformer, getPodsAssignedToNode, stopChannel
}
func waitForNodesReady(ctx context.Context, t *testing.T, clientSet clientset.Interface, nodeInformer coreinformers.NodeInformer) {
if err := wait.PollImmediate(5*time.Second, 30*time.Second, func() (bool, error) {
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
return false, err
}
readyNodes, err := nodeutil.ReadyNodes(ctx, clientSet, nodeInformer, "")
if err != nil {
return false, err
}
if len(nodeList.Items) != len(readyNodes) {
t.Logf("%v/%v nodes are ready. Waiting for all nodes to be ready...", len(readyNodes), len(nodeList.Items))
return false, nil
}
return true, nil
}); err != nil {
t.Fatalf("Error waiting for nodes to be ready: %v", err)
}
}
func runPodLifetimeStrategy(
ctx context.Context,
t *testing.T,
clientset clientset.Interface,
nodeInformer coreinformers.NodeInformer,
namespaces *deschedulerapi.Namespaces,
priorityClass string,
priority *int32,
evictCritical bool,
labelSelector *metav1.LabelSelector,
getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc,
) {
// Run descheduler.
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientset)
if err != nil || len(evictionPolicyGroupVersion) == 0 {
t.Fatalf("%v", err)
}
nodes, err := nodeutil.ReadyNodes(ctx, clientset, nodeInformer, "")
if err != nil {
t.Fatalf("%v", err)
}
maxPodLifeTimeSeconds := uint(1)
strategies.PodLifeTime(
ctx,
clientset,
deschedulerapi.DeschedulerStrategy{
Enabled: true,
Params: &deschedulerapi.StrategyParameters{
PodLifeTime: &deschedulerapi.PodLifeTime{MaxPodLifeTimeSeconds: &maxPodLifeTimeSeconds},
Namespaces: namespaces,
ThresholdPriority: priority,
ThresholdPriorityClassName: priorityClass,
LabelSelector: labelSelector,
},
},
nodes,
evictions.NewPodEvictor(
clientset,
evictionPolicyGroupVersion,
false,
nil,
nil,
nodes,
getPodsAssignedToNode,
false,
evictCritical,
false,
false,
false,
),
getPodsAssignedToNode,
)
}
func getPodNames(pods []v1.Pod) []string {
names := []string{}
for _, pod := range pods {
names = append(names, pod.Name)
}
return names
}
func intersectStrings(lista, listb []string) []string {
commonNames := []string{}
for _, stra := range lista {
for _, strb := range listb {
if stra == strb {
commonNames = append(commonNames, stra)
break
}
}
}
return commonNames
}
func TestLowNodeUtilization(t *testing.T) {
ctx := context.Background()
clientSet, _, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh)
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
t.Errorf("Error listing node with %v", err)
}
nodes, workerNodes := splitNodesAndWorkerNodes(nodeList.Items)
t.Log("Creating testing namespace")
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil {
t.Fatalf("Unable to create ns %v", testNamespace.Name)
}
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
// Make all worker nodes resource balanced
cleanUp, err := createBalancedPodForNodes(t, ctx, clientSet, testNamespace.Name, workerNodes, 0.5)
if err != nil {
t.Fatalf("Unable to create load balancing pods: %v", err)
}
defer cleanUp()
t.Log("Creating pods each consuming 10% of node's allocatable")
nodeCpu := workerNodes[0].Status.Allocatable[v1.ResourceCPU]
tenthOfCpu := int64(float64((&nodeCpu).MilliValue()) * 0.1)
t.Log("Creating pods all bound to a single node")
for i := 0; i < 4; i++ {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("lnu-pod-%v", i),
Namespace: testNamespace.Name,
Labels: map[string]string{"test": "node-utilization", "name": "test-rc-node-utilization"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "pause",
ImagePullPolicy: "Never",
Image: "kubernetes/pause",
Ports: []v1.ContainerPort{{ContainerPort: 80}},
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(tenthOfCpu, resource.DecimalSI),
},
Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(tenthOfCpu, resource.DecimalSI),
},
},
}},
Affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchFields: []v1.NodeSelectorRequirement{
{Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{workerNodes[0].Name}},
},
},
},
},
},
},
},
}
t.Logf("Creating pod %v in %v namespace for node %v", pod.Name, pod.Namespace, workerNodes[0].Name)
_, err := clientSet.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
t.Logf("Error creating LNU pods: %v", err)
if err = clientSet.CoreV1().Pods(pod.Namespace).DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(labels.Set(map[string]string{"test": "node-utilization", "name": "test-rc-node-utilization"})).String(),
}); err != nil {
t.Fatalf("Unable to delete LNU pods: %v", err)
}
return
}
}
t.Log("Creating RC with 4 replicas owning the created pods")
rc := RcByNameContainer("test-rc-node-utilization", testNamespace.Name, int32(4), map[string]string{"test": "node-utilization"}, nil, "")
if _, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(ctx, rc, metav1.CreateOptions{}); err != nil {
t.Fatalf("Error creating RC %v", err)
}
defer deleteRC(ctx, t, clientSet, rc)
waitForRCPodsRunning(ctx, t, clientSet, rc)
// Run LowNodeUtilization strategy
podEvictor := initPodEvictorOrFail(t, clientSet, getPodsAssignedToNode, nodes)
podFilter, err := podutil.NewOptions().WithFilter(podEvictor.Evictable().IsEvictable).BuildFilterFunc()
if err != nil {
t.Errorf("Error initializing pod filter function, %v", err)
}
podsOnMosttUtilizedNode, err := podutil.ListPodsOnANode(workerNodes[0].Name, getPodsAssignedToNode, podFilter)
if err != nil {
t.Errorf("Error listing pods on a node %v", err)
}
podsBefore := len(podsOnMosttUtilizedNode)
t.Log("Running LowNodeUtilization strategy")
nodeutilization.LowNodeUtilization(
ctx,
clientSet,
deschedulerapi.DeschedulerStrategy{
Enabled: true,
Params: &deschedulerapi.StrategyParameters{
NodeResourceUtilizationThresholds: &deschedulerapi.NodeResourceUtilizationThresholds{
Thresholds: deschedulerapi.ResourceThresholds{
v1.ResourceCPU: 70,
},
TargetThresholds: deschedulerapi.ResourceThresholds{
v1.ResourceCPU: 80,
},
},
},
},
workerNodes,
podEvictor,
getPodsAssignedToNode,
)
waitForTerminatingPodsToDisappear(ctx, t, clientSet, rc.Namespace)
podFilter, err = podutil.NewOptions().WithFilter(podEvictor.Evictable().IsEvictable).BuildFilterFunc()
if err != nil {
t.Errorf("Error initializing pod filter function, %v", err)
}
podsOnMosttUtilizedNode, err = podutil.ListPodsOnANode(workerNodes[0].Name, getPodsAssignedToNode, podFilter)
if err != nil {
t.Errorf("Error listing pods on a node %v", err)
}
podsAfter := len(podsOnMosttUtilizedNode)
if podsAfter >= podsBefore {
t.Fatalf("No pod has been evicted from %v node", workerNodes[0].Name)
}
t.Logf("Number of pods on node %v changed from %v to %v", workerNodes[0].Name, podsBefore, podsAfter)
}
// TODO(jchaloup): add testcases for two included/excluded namespaces
func TestNamespaceConstraintsInclude(t *testing.T) {
ctx := context.Background()
clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh)
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil {
t.Fatalf("Unable to create ns %v", testNamespace.Name)
}
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
rc := RcByNameContainer("test-rc-podlifetime", testNamespace.Name, 5, map[string]string{"test": "podlifetime-include"}, nil, "")
if _, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(ctx, rc, metav1.CreateOptions{}); err != nil {
t.Errorf("Error creating deployment %v", err)
}
defer deleteRC(ctx, t, clientSet, rc)
// wait for a while so all the pods are at least few seconds older
time.Sleep(5 * time.Second)
// it's assumed all new pods are named differently from currently running -> no name collision
podList, err := clientSet.CoreV1().Pods(rc.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rc.Spec.Template.Labels).String()})
if err != nil {
t.Fatalf("Unable to list pods: %v", err)
}
if len(podList.Items) != 5 {
t.Fatalf("Expected 5 replicas, got %v instead", len(podList.Items))
}
initialPodNames := getPodNames(podList.Items)
sort.Strings(initialPodNames)
t.Logf("Existing pods: %v", initialPodNames)
t.Logf("set the strategy to delete pods from %v namespace", rc.Namespace)
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, &deschedulerapi.Namespaces{
Include: []string{rc.Namespace},
}, "", nil, false, nil, getPodsAssignedToNode)
// All pods are supposed to be deleted, wait until all the old pods are deleted
if err := wait.PollImmediate(time.Second, 20*time.Second, func() (bool, error) {
podList, err := clientSet.CoreV1().Pods(rc.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rc.Spec.Template.Labels).String()})
if err != nil {
return false, nil
}
includePodNames := getPodNames(podList.Items)
// validate all pod were deleted
if len(intersectStrings(initialPodNames, includePodNames)) > 0 {
t.Logf("Waiting until %v pods get deleted", intersectStrings(initialPodNames, includePodNames))
// check if there's at least one pod not in Terminating state
for _, pod := range podList.Items {
// In case podList contains newly created pods
if len(intersectStrings(initialPodNames, []string{pod.Name})) == 0 {
continue
}
if pod.DeletionTimestamp == nil {
t.Logf("Pod %v not in terminating state", pod.Name)
return false, nil
}
}
t.Logf("All %v pods are terminating", intersectStrings(initialPodNames, includePodNames))
}
return true, nil
}); err != nil {
t.Fatalf("Error waiting for pods to be deleted: %v", err)
}
}
func TestNamespaceConstraintsExclude(t *testing.T) {
ctx := context.Background()
clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh)
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil {
t.Fatalf("Unable to create ns %v", testNamespace.Name)
}
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
rc := RcByNameContainer("test-rc-podlifetime", testNamespace.Name, 5, map[string]string{"test": "podlifetime-exclude"}, nil, "")
if _, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(ctx, rc, metav1.CreateOptions{}); err != nil {
t.Errorf("Error creating deployment %v", err)
}
defer deleteRC(ctx, t, clientSet, rc)
// wait for a while so all the pods are at least few seconds older
time.Sleep(5 * time.Second)
// it's assumed all new pods are named differently from currently running -> no name collision
podList, err := clientSet.CoreV1().Pods(rc.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rc.Spec.Template.Labels).String()})
if err != nil {
t.Fatalf("Unable to list pods: %v", err)
}
if len(podList.Items) != 5 {
t.Fatalf("Expected 5 replicas, got %v instead", len(podList.Items))
}
initialPodNames := getPodNames(podList.Items)
sort.Strings(initialPodNames)
t.Logf("Existing pods: %v", initialPodNames)
t.Logf("set the strategy to delete pods from namespaces except the %v namespace", rc.Namespace)
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, &deschedulerapi.Namespaces{
Exclude: []string{rc.Namespace},
}, "", nil, false, nil, getPodsAssignedToNode)
t.Logf("Waiting 10s")
time.Sleep(10 * time.Second)
podList, err = clientSet.CoreV1().Pods(rc.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rc.Spec.Template.Labels).String()})
if err != nil {
t.Fatalf("Unable to list pods after running strategy: %v", err)
}
excludePodNames := getPodNames(podList.Items)
sort.Strings(excludePodNames)
t.Logf("Existing pods: %v", excludePodNames)
// validate no pods were deleted
if len(intersectStrings(initialPodNames, excludePodNames)) != 5 {
t.Fatalf("None of %v pods are expected to be deleted", initialPodNames)
}
}
func TestEvictSystemCriticalPriority(t *testing.T) {
testEvictSystemCritical(t, false)
}
func TestEvictSystemCriticalPriorityClass(t *testing.T) {
testEvictSystemCritical(t, true)
}
func testEvictSystemCritical(t *testing.T, isPriorityClass bool) {
highPriority := int32(1000)
lowPriority := int32(500)
ctx := context.Background()
clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh)
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil {
t.Fatalf("Unable to create ns %v", testNamespace.Name)
}
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
// create two priority classes
highPriorityClass := &schedulingv1.PriorityClass{
ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name()) + "-highpriority"},
Value: highPriority,
}
if _, err := clientSet.SchedulingV1().PriorityClasses().Create(ctx, highPriorityClass, metav1.CreateOptions{}); err != nil {
t.Fatalf("Error creating priorityclass %s: %v", highPriorityClass.Name, err)
}
defer clientSet.SchedulingV1().PriorityClasses().Delete(ctx, highPriorityClass.Name, metav1.DeleteOptions{})
lowPriorityClass := &schedulingv1.PriorityClass{
ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name()) + "-lowpriority"},
Value: lowPriority,
}
if _, err := clientSet.SchedulingV1().PriorityClasses().Create(ctx, lowPriorityClass, metav1.CreateOptions{}); err != nil {
t.Fatalf("Error creating priorityclass %s: %v", lowPriorityClass.Name, err)
}
defer clientSet.SchedulingV1().PriorityClasses().Delete(ctx, lowPriorityClass.Name, metav1.DeleteOptions{})
// Create a replication controller with the "system-node-critical" priority class (this gives the pods a priority of 2000001000)
rcCriticalPriority := RcByNameContainer("test-rc-podlifetime-criticalpriority", testNamespace.Name, 3,
map[string]string{"test": "podlifetime-criticalpriority"}, nil, "system-node-critical")
if _, err := clientSet.CoreV1().ReplicationControllers(rcCriticalPriority.Namespace).Create(ctx, rcCriticalPriority, metav1.CreateOptions{}); err != nil {
t.Errorf("Error creating rc %s: %v", rcCriticalPriority.Name, err)
}
defer deleteRC(ctx, t, clientSet, rcCriticalPriority)
// create two RCs with different priority classes in the same namespace
rcHighPriority := RcByNameContainer("test-rc-podlifetime-highpriority", testNamespace.Name, 3,
map[string]string{"test": "podlifetime-highpriority"}, nil, highPriorityClass.Name)
if _, err := clientSet.CoreV1().ReplicationControllers(rcHighPriority.Namespace).Create(ctx, rcHighPriority, metav1.CreateOptions{}); err != nil {
t.Errorf("Error creating rc %s: %v", rcHighPriority.Name, err)
}
defer deleteRC(ctx, t, clientSet, rcHighPriority)
rcLowPriority := RcByNameContainer("test-rc-podlifetime-lowpriority", testNamespace.Name, 3,
map[string]string{"test": "podlifetime-lowpriority"}, nil, lowPriorityClass.Name)
if _, err := clientSet.CoreV1().ReplicationControllers(rcLowPriority.Namespace).Create(ctx, rcLowPriority, metav1.CreateOptions{}); err != nil {
t.Errorf("Error creating rc %s: %v", rcLowPriority.Name, err)
}
defer deleteRC(ctx, t, clientSet, rcLowPriority)
// wait for a while so all the pods are at least few seconds older
time.Sleep(5 * time.Second)
// it's assumed all new pods are named differently from currently running -> no name collision
podListCriticalPriority, err := clientSet.CoreV1().Pods(rcCriticalPriority.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rcCriticalPriority.Spec.Template.Labels).String()})
if err != nil {
t.Fatalf("Unable to list pods: %v", err)
}
podListHighPriority, err := clientSet.CoreV1().Pods(rcHighPriority.Namespace).List(
ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rcHighPriority.Spec.Template.Labels).String()})
if err != nil {
t.Fatalf("Unable to list pods: %v", err)
}
podListLowPriority, err := clientSet.CoreV1().Pods(rcLowPriority.Namespace).List(
ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rcLowPriority.Spec.Template.Labels).String()})
if err != nil {
t.Fatalf("Unable to list pods: %v", err)
}
if (len(podListHighPriority.Items) + len(podListLowPriority.Items) + len(podListCriticalPriority.Items)) != 9 {
t.Fatalf("Expected 9 replicas, got %v instead", len(podListHighPriority.Items)+len(podListLowPriority.Items)+len(podListCriticalPriority.Items))
}
initialPodNames := append(getPodNames(podListHighPriority.Items), getPodNames(podListLowPriority.Items)...)
initialPodNames = append(initialPodNames, getPodNames(podListCriticalPriority.Items)...)
sort.Strings(initialPodNames)
t.Logf("Existing pods: %v", initialPodNames)
if isPriorityClass {
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, highPriorityClass.Name, nil, true, nil, getPodsAssignedToNode)
} else {
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", &highPriority, true, nil, getPodsAssignedToNode)
}
// All pods are supposed to be deleted, wait until all pods in the test namespace are terminating
t.Logf("All pods in the test namespace, no matter their priority (including system-node-critical and system-cluster-critical), will be deleted")
if err := wait.PollImmediate(time.Second, 30*time.Second, func() (bool, error) {
podList, err := clientSet.CoreV1().Pods(testNamespace.Name).List(ctx, metav1.ListOptions{})
if err != nil {
return false, nil
}
currentPodNames := getPodNames(podList.Items)
// validate all pod were deleted
if len(intersectStrings(initialPodNames, currentPodNames)) > 0 {
t.Logf("Waiting until %v pods get deleted", intersectStrings(initialPodNames, currentPodNames))
// check if there's at least one pod not in Terminating state
for _, pod := range podList.Items {
// In case podList contains newly created pods
if len(intersectStrings(initialPodNames, []string{pod.Name})) == 0 {
continue
}
if pod.DeletionTimestamp == nil {
t.Logf("Pod %v not in terminating state", pod.Name)
return false, nil
}
}
t.Logf("All %v pods are terminating", intersectStrings(initialPodNames, currentPodNames))
}
return true, nil
}); err != nil {
t.Fatalf("Error waiting for pods to be deleted: %v", err)
}
}
func TestThresholdPriority(t *testing.T) {
testPriority(t, false)
}
func TestThresholdPriorityClass(t *testing.T) {
testPriority(t, true)
}
func testPriority(t *testing.T, isPriorityClass bool) {
highPriority := int32(1000)
lowPriority := int32(500)
ctx := context.Background()
clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh)
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil {
t.Fatalf("Unable to create ns %v", testNamespace.Name)
}
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
// create two priority classes
highPriorityClass := &schedulingv1.PriorityClass{
ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name()) + "-highpriority"},
Value: highPriority,
}
if _, err := clientSet.SchedulingV1().PriorityClasses().Create(ctx, highPriorityClass, metav1.CreateOptions{}); err != nil {
t.Fatalf("Error creating priorityclass %s: %v", highPriorityClass.Name, err)
}
defer clientSet.SchedulingV1().PriorityClasses().Delete(ctx, highPriorityClass.Name, metav1.DeleteOptions{})
lowPriorityClass := &schedulingv1.PriorityClass{
ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name()) + "-lowpriority"},
Value: lowPriority,
}
if _, err := clientSet.SchedulingV1().PriorityClasses().Create(ctx, lowPriorityClass, metav1.CreateOptions{}); err != nil {
t.Fatalf("Error creating priorityclass %s: %v", lowPriorityClass.Name, err)
}
defer clientSet.SchedulingV1().PriorityClasses().Delete(ctx, lowPriorityClass.Name, metav1.DeleteOptions{})
// create two RCs with different priority classes in the same namespace
rcHighPriority := RcByNameContainer("test-rc-podlifetime-highpriority", testNamespace.Name, 5,
map[string]string{"test": "podlifetime-highpriority"}, nil, highPriorityClass.Name)
if _, err := clientSet.CoreV1().ReplicationControllers(rcHighPriority.Namespace).Create(ctx, rcHighPriority, metav1.CreateOptions{}); err != nil {
t.Errorf("Error creating rc %s: %v", rcHighPriority.Name, err)
}
defer deleteRC(ctx, t, clientSet, rcHighPriority)
rcLowPriority := RcByNameContainer("test-rc-podlifetime-lowpriority", testNamespace.Name, 5,
map[string]string{"test": "podlifetime-lowpriority"}, nil, lowPriorityClass.Name)
if _, err := clientSet.CoreV1().ReplicationControllers(rcLowPriority.Namespace).Create(ctx, rcLowPriority, metav1.CreateOptions{}); err != nil {
t.Errorf("Error creating rc %s: %v", rcLowPriority.Name, err)
}
defer deleteRC(ctx, t, clientSet, rcLowPriority)
// wait for a while so all the pods are at least few seconds older
time.Sleep(5 * time.Second)
// it's assumed all new pods are named differently from currently running -> no name collision
podListHighPriority, err := clientSet.CoreV1().Pods(rcHighPriority.Namespace).List(
ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rcHighPriority.Spec.Template.Labels).String()})
if err != nil {
t.Fatalf("Unable to list pods: %v", err)
}
podListLowPriority, err := clientSet.CoreV1().Pods(rcLowPriority.Namespace).List(
ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rcLowPriority.Spec.Template.Labels).String()})
if err != nil {
t.Fatalf("Unable to list pods: %v", err)
}
if len(podListHighPriority.Items)+len(podListLowPriority.Items) != 10 {
t.Fatalf("Expected 10 replicas, got %v instead", len(podListHighPriority.Items)+len(podListLowPriority.Items))
}
expectReservePodNames := getPodNames(podListHighPriority.Items)
expectEvictPodNames := getPodNames(podListLowPriority.Items)
sort.Strings(expectReservePodNames)
sort.Strings(expectEvictPodNames)
t.Logf("Pods not expected to be evicted: %v, pods expected to be evicted: %v", expectReservePodNames, expectEvictPodNames)
if isPriorityClass {
t.Logf("set the strategy to delete pods with priority lower than priority class %s", highPriorityClass.Name)
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, highPriorityClass.Name, nil, false, nil, getPodsAssignedToNode)
} else {
t.Logf("set the strategy to delete pods with priority lower than %d", highPriority)
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", &highPriority, false, nil, getPodsAssignedToNode)
}
t.Logf("Waiting 10s")
time.Sleep(10 * time.Second)
// check if all pods with high priority class are not evicted
podListHighPriority, err = clientSet.CoreV1().Pods(rcHighPriority.Namespace).List(
ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rcHighPriority.Spec.Template.Labels).String()})
if err != nil {
t.Fatalf("Unable to list pods after running strategy: %v", err)
}
excludePodNames := getPodNames(podListHighPriority.Items)
sort.Strings(excludePodNames)
t.Logf("Existing high priority pods: %v", excludePodNames)
// validate no pods were deleted
if len(intersectStrings(expectReservePodNames, excludePodNames)) != 5 {
t.Fatalf("None of %v high priority pods are expected to be deleted", expectReservePodNames)
}
// check if all pods with low priority class are evicted
if err := wait.PollImmediate(time.Second, 30*time.Second, func() (bool, error) {
podListLowPriority, err := clientSet.CoreV1().Pods(rcLowPriority.Namespace).List(
ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rcLowPriority.Spec.Template.Labels).String()})
if err != nil {
return false, nil
}
includePodNames := getPodNames(podListLowPriority.Items)
// validate all pod were deleted
if len(intersectStrings(expectEvictPodNames, includePodNames)) > 0 {
t.Logf("Waiting until %v low priority pods get deleted", intersectStrings(expectEvictPodNames, includePodNames))
// check if there's at least one pod not in Terminating state
for _, pod := range podListLowPriority.Items {
// In case podList contains newly created pods
if len(intersectStrings(expectEvictPodNames, []string{pod.Name})) == 0 {
continue
}
if pod.DeletionTimestamp == nil {
t.Logf("Pod %v not in terminating state", pod.Name)
return false, nil
}
}
t.Logf("All %v pods are terminating", intersectStrings(expectEvictPodNames, includePodNames))
}
return true, nil
}); err != nil {
t.Fatalf("Error waiting for pods to be deleted: %v", err)
}
}
func TestPodLabelSelector(t *testing.T) {
ctx := context.Background()
clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh)
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil {
t.Fatalf("Unable to create ns %v", testNamespace.Name)
}
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
// create two replicationControllers with different labels
rcEvict := RcByNameContainer("test-rc-podlifetime-evict", testNamespace.Name, 5, map[string]string{"test": "podlifetime-evict"}, nil, "")
if _, err := clientSet.CoreV1().ReplicationControllers(rcEvict.Namespace).Create(ctx, rcEvict, metav1.CreateOptions{}); err != nil {
t.Errorf("Error creating rc %v", err)
}
defer deleteRC(ctx, t, clientSet, rcEvict)
rcReserve := RcByNameContainer("test-rc-podlifetime-reserve", testNamespace.Name, 5, map[string]string{"test": "podlifetime-reserve"}, nil, "")
if _, err := clientSet.CoreV1().ReplicationControllers(rcReserve.Namespace).Create(ctx, rcReserve, metav1.CreateOptions{}); err != nil {
t.Errorf("Error creating rc %v", err)
}
defer deleteRC(ctx, t, clientSet, rcReserve)
// wait for a while so all the pods are at least few seconds older
time.Sleep(5 * time.Second)
// it's assumed all new pods are named differently from currently running -> no name collision
podListEvict, err := clientSet.CoreV1().Pods(rcEvict.Namespace).List(
ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rcEvict.Spec.Template.Labels).String()})
if err != nil {
t.Fatalf("Unable to list pods: %v", err)
}
podListReserve, err := clientSet.CoreV1().Pods(rcReserve.Namespace).List(
ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rcReserve.Spec.Template.Labels).String()})
if err != nil {
t.Fatalf("Unable to list pods: %v", err)
}
if len(podListEvict.Items)+len(podListReserve.Items) != 10 {
t.Fatalf("Expected 10 replicas, got %v instead", len(podListEvict.Items)+len(podListReserve.Items))
}
expectReservePodNames := getPodNames(podListReserve.Items)
expectEvictPodNames := getPodNames(podListEvict.Items)
sort.Strings(expectReservePodNames)
sort.Strings(expectEvictPodNames)
t.Logf("Pods not expected to be evicted: %v, pods expected to be evicted: %v", expectReservePodNames, expectEvictPodNames)
t.Logf("set the strategy to delete pods with label test:podlifetime-evict")
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", nil, false, &metav1.LabelSelector{MatchLabels: map[string]string{"test": "podlifetime-evict"}}, getPodsAssignedToNode)
t.Logf("Waiting 10s")
time.Sleep(10 * time.Second)
// check if all pods without target label are not evicted
podListReserve, err = clientSet.CoreV1().Pods(rcReserve.Namespace).List(
ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rcReserve.Spec.Template.Labels).String()})
if err != nil {
t.Fatalf("Unable to list pods after running strategy: %v", err)
}
reservedPodNames := getPodNames(podListReserve.Items)
sort.Strings(reservedPodNames)
t.Logf("Existing reserved pods: %v", reservedPodNames)
// validate no pods were deleted
if len(intersectStrings(expectReservePodNames, reservedPodNames)) != 5 {
t.Fatalf("None of %v unevictable pods are expected to be deleted", expectReservePodNames)
}
// check if all selected pods are evicted
if err := wait.PollImmediate(time.Second, 30*time.Second, func() (bool, error) {
podListEvict, err := clientSet.CoreV1().Pods(rcEvict.Namespace).List(
ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rcEvict.Spec.Template.Labels).String()})
if err != nil {
return false, nil
}
newPodNames := getPodNames(podListEvict.Items)
// validate all pod were deleted
if len(intersectStrings(expectEvictPodNames, newPodNames)) > 0 {
t.Logf("Waiting until %v selected pods get deleted", intersectStrings(expectEvictPodNames, newPodNames))
// check if there's at least one pod not in Terminating state
for _, pod := range podListEvict.Items {
// In case podList contains newly created pods
if len(intersectStrings(expectEvictPodNames, []string{pod.Name})) == 0 {
continue
}
if pod.DeletionTimestamp == nil {
t.Logf("Pod %v not in terminating state", pod.Name)
return false, nil
}
}
t.Logf("All %v pods are terminating", intersectStrings(expectEvictPodNames, newPodNames))
}
return true, nil
}); err != nil {
t.Fatalf("Error waiting for pods to be deleted: %v", err)
}
}
func TestEvictAnnotation(t *testing.T) {
ctx := context.Background()
clientSet, nodeInformer, getPodsAssignedToNode, stopCh := initializeClient(t)
defer close(stopCh)
testNamespace := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "e2e-" + strings.ToLower(t.Name())}}
if _, err := clientSet.CoreV1().Namespaces().Create(ctx, testNamespace, metav1.CreateOptions{}); err != nil {
t.Fatalf("Unable to create ns %v", testNamespace.Name)
}
defer clientSet.CoreV1().Namespaces().Delete(ctx, testNamespace.Name, metav1.DeleteOptions{})
t.Log("Create RC with pods with local storage which require descheduler.alpha.kubernetes.io/evict annotation to be set for eviction")
rc := RcByNameContainer("test-rc-evict-annotation", testNamespace.Name, int32(5), map[string]string{"test": "annotation"}, nil, "")
rc.Spec.Template.Annotations = map[string]string{"descheduler.alpha.kubernetes.io/evict": "true"}
rc.Spec.Template.Spec.Volumes = []v1.Volume{
{
Name: "sample",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{
SizeLimit: resource.NewQuantity(int64(10), resource.BinarySI),
},
},
},
}
if _, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Create(ctx, rc, metav1.CreateOptions{}); err != nil {
t.Errorf("Error creating deployment %v", err)
}
defer deleteRC(ctx, t, clientSet, rc)
t.Logf("Waiting 10s to make pods 10s old")
time.Sleep(10 * time.Second)
podList, err := clientSet.CoreV1().Pods(rc.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rc.Spec.Template.Labels).String()})
if err != nil {
t.Fatalf("Unable to list pods: %v", err)
}
if len(podList.Items) != 5 {
t.Fatalf("Expected 5 replicas, got %v instead", len(podList.Items))
}
initialPodNames := getPodNames(podList.Items)
sort.Strings(initialPodNames)
t.Logf("Existing pods: %v", initialPodNames)
t.Log("Running PodLifetime strategy")
runPodLifetimeStrategy(ctx, t, clientSet, nodeInformer, nil, "", nil, false, nil, getPodsAssignedToNode)
if err := wait.PollImmediate(5*time.Second, time.Minute, func() (bool, error) {
podList, err = clientSet.CoreV1().Pods(rc.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rc.Spec.Template.Labels).String()})
if err != nil {
return false, fmt.Errorf("Unable to list pods after running strategy: %v", err)
}
excludePodNames := getPodNames(podList.Items)
sort.Strings(excludePodNames)
t.Logf("Existing pods: %v", excludePodNames)
// validate no pods were deleted
if len(intersectStrings(initialPodNames, excludePodNames)) > 0 {
t.Logf("Not every pods was evicted")
return false, nil
}
return true, nil
}); err != nil {
t.Fatalf("Error waiting for pods to be deleted: %v", err)
}
}
func TestDeschedulingInterval(t *testing.T) {
ctx := context.Background()
clientSet, err := client.CreateClient(os.Getenv("KUBECONFIG"))
if err != nil {
t.Errorf("Error during client creation with %v", err)
}
// By default, the DeschedulingInterval param should be set to 0, meaning Descheduler only runs once then exits
s, err := options.NewDeschedulerServer()
if err != nil {
t.Fatalf("Unable to initialize server: %v", err)
}
s.Client = clientSet
deschedulerPolicy := &deschedulerapi.DeschedulerPolicy{}
c := make(chan bool, 1)
go func() {
evictionPolicyGroupVersion, err := eutils.SupportEviction(s.Client)
if err != nil || len(evictionPolicyGroupVersion) == 0 {
t.Errorf("Error when checking support for eviction: %v", err)
}
if err := descheduler.RunDeschedulerStrategies(ctx, s, deschedulerPolicy, evictionPolicyGroupVersion); err != nil {
t.Errorf("Error running descheduler strategies: %+v", err)
}
c <- true
}()
select {
case <-c:
// successfully returned
case <-time.After(3 * time.Minute):
t.Errorf("descheduler.Run timed out even without descheduling-interval set")
}
}
func waitForRCPodsRunning(ctx context.Context, t *testing.T, clientSet clientset.Interface, rc *v1.ReplicationController) {
if err := wait.PollImmediate(5*time.Second, 30*time.Second, func() (bool, error) {
podList, err := clientSet.CoreV1().Pods(rc.Namespace).List(ctx, metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(labels.Set(rc.Spec.Template.ObjectMeta.Labels)).String(),
})
if err != nil {
return false, err
}
if len(podList.Items) != int(*rc.Spec.Replicas) {
t.Logf("Waiting for %v pods to be created, got %v instead", *rc.Spec.Replicas, len(podList.Items))
return false, nil
}
for _, pod := range podList.Items {
if pod.Status.Phase != v1.PodRunning {
t.Logf("Pod %v not running yet, is %v instead", pod.Name, pod.Status.Phase)
return false, nil
}
}
return true, nil
}); err != nil {
t.Fatalf("Error waiting for pods running: %v", err)
}
}
func waitForTerminatingPodsToDisappear(ctx context.Context, t *testing.T, clientSet clientset.Interface, namespace string) {
if err := wait.PollImmediate(5*time.Second, 30*time.Second, func() (bool, error) {
podList, err := clientSet.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{})
if err != nil {
return false, err
}
for _, pod := range podList.Items {
if pod.DeletionTimestamp != nil {
t.Logf("Pod %v still terminating", pod.Name)
return false, nil
}
}
return true, nil
}); err != nil {
t.Fatalf("Error waiting for terminating pods to disappear: %v", err)
}
}
func deleteRC(ctx context.Context, t *testing.T, clientSet clientset.Interface, rc *v1.ReplicationController) {
// set number of replicas to 0
rcdeepcopy := rc.DeepCopy()
rcdeepcopy.Spec.Replicas = func(i int32) *int32 { return &i }(0)
if _, err := clientSet.CoreV1().ReplicationControllers(rcdeepcopy.Namespace).Update(ctx, rcdeepcopy, metav1.UpdateOptions{}); err != nil {
t.Fatalf("Error updating replica controller %v", err)
}
// wait 30 seconds until all pods are deleted
if err := wait.PollImmediate(5*time.Second, 30*time.Second, func() (bool, error) {
scale, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).GetScale(ctx, rc.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
return scale.Spec.Replicas == 0, nil
}); err != nil {
t.Fatalf("Error deleting rc pods %v", err)
}
if err := wait.PollImmediate(5*time.Second, time.Minute, func() (bool, error) {
podList, _ := clientSet.CoreV1().Pods(rc.Namespace).List(ctx, metav1.ListOptions{LabelSelector: labels.SelectorFromSet(rc.Spec.Template.Labels).String()})
t.Logf("Waiting for %v RC pods to disappear, still %v remaining", rc.Name, len(podList.Items))
if len(podList.Items) > 0 {
return false, nil
}
return true, nil
}); err != nil {
t.Fatalf("Error waiting for rc pods to disappear: %v", err)
}
if err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Delete(ctx, rc.Name, metav1.DeleteOptions{}); err != nil {
t.Fatalf("Error deleting rc %v", err)
}
if err := wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) {
_, err := clientSet.CoreV1().ReplicationControllers(rc.Namespace).Get(ctx, rc.Name, metav1.GetOptions{})
if err != nil && strings.Contains(err.Error(), "not found") {
return true, nil
}
return false, nil
}); err != nil {
t.Fatalf("Error deleting rc %v", err)
}
}
var balancePodLabel = map[string]string{"podname": "priority-balanced-memory"}
// track min memory limit based on crio minimum. pods cannot set a limit lower than this
// see: https://github.com/cri-o/cri-o/blob/29805b13e9a43d9d22628553db337ce1c1bec0a8/internal/config/cgmgr/cgmgr.go#L23
// see: https://bugzilla.redhat.com/show_bug.cgi?id=1595256
var crioMinMemLimit = 12 * 1024 * 1024
var podRequestedResource = &v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("100Mi"),
v1.ResourceCPU: resource.MustParse("100m"),
},
Requests: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("100Mi"),
v1.ResourceCPU: resource.MustParse("100m"),
},
}
// createBalancedPodForNodes creates a pod per node that asks for enough resources to make all nodes have the same mem/cpu usage ratio.
// TODO(jchaloup): The function is updated version of what under https://github.com/kubernetes/kubernetes/blob/84483a5/test/e2e/scheduling/priorities.go#L478.
// Import it once the function is moved under k8s.io/components-helper repo and modified to work for both priority and predicates cases.
func createBalancedPodForNodes(
t *testing.T,
ctx context.Context,
cs clientset.Interface,
ns string,
nodes []*v1.Node,
ratio float64,
) (func(), error) {
cleanUp := func() {
// Delete all remaining pods
err := cs.CoreV1().Pods(ns).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(labels.Set(balancePodLabel)).String(),
})
if err != nil {
t.Logf("Failed to delete memory balanced pods: %v.", err)
} else {
err := wait.PollImmediate(2*time.Second, time.Minute, func() (bool, error) {
podList, err := cs.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(labels.Set(balancePodLabel)).String(),
})
if err != nil {
t.Logf("Failed to list memory balanced pods: %v.", err)
return false, nil
}
if len(podList.Items) > 0 {
return false, nil
}
return true, nil
})
if err != nil {
t.Logf("Failed to wait until all memory balanced pods are deleted: %v.", err)
}
}
}
// find the max, if the node has the max,use the one, if not,use the ratio parameter
var maxCPUFraction, maxMemFraction float64 = ratio, ratio
cpuFractionMap := make(map[string]float64)
memFractionMap := make(map[string]float64)
for _, node := range nodes {
cpuFraction, memFraction, _, _ := computeCPUMemFraction(t, cs, node, podRequestedResource)
cpuFractionMap[node.Name] = cpuFraction
memFractionMap[node.Name] = memFraction
if cpuFraction > maxCPUFraction {
maxCPUFraction = cpuFraction
}
if memFraction > maxMemFraction {
maxMemFraction = memFraction
}
}
// we need the max one to keep the same cpu/mem use rate
ratio = math.Max(maxCPUFraction, maxMemFraction)
for _, node := range nodes {
memAllocatable, found := node.Status.Allocatable[v1.ResourceMemory]
if !found {
t.Fatalf("Failed to get node's Allocatable[v1.ResourceMemory]")
}
memAllocatableVal := memAllocatable.Value()
cpuAllocatable, found := node.Status.Allocatable[v1.ResourceCPU]
if !found {
t.Fatalf("Failed to get node's Allocatable[v1.ResourceCPU]")
}
cpuAllocatableMil := cpuAllocatable.MilliValue()
needCreateResource := v1.ResourceList{}
cpuFraction := cpuFractionMap[node.Name]
memFraction := memFractionMap[node.Name]
needCreateResource[v1.ResourceCPU] = *resource.NewMilliQuantity(int64((ratio-cpuFraction)*float64(cpuAllocatableMil)), resource.DecimalSI)
// add crioMinMemLimit to ensure that all pods are setting at least that much for a limit, while keeping the same ratios
needCreateResource[v1.ResourceMemory] = *resource.NewQuantity(int64((ratio-memFraction)*float64(memAllocatableVal)+float64(crioMinMemLimit)), resource.BinarySI)
gracePeriod := int64(1)
// Don't set OwnerReferences to avoid pod eviction
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "filler-pod-" + string(uuid.NewUUID()),
Namespace: ns,
Labels: balancePodLabel,
},
Spec: v1.PodSpec{
Affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchFields: []v1.NodeSelectorRequirement{
{Key: "metadata.name", Operator: v1.NodeSelectorOpIn, Values: []string{node.Name}},
},
},
},
},
},
},
Containers: []v1.Container{
{
Name: "pause",
Image: "kubernetes/pause",
Resources: v1.ResourceRequirements{
Limits: needCreateResource,
Requests: needCreateResource,
},
},
},
// PriorityClassName: conf.PriorityClassName,
TerminationGracePeriodSeconds: &gracePeriod,
},
}
t.Logf("Creating pod %v in %v namespace for node %v", pod.Name, pod.Namespace, node.Name)
_, err := cs.CoreV1().Pods(pod.Namespace).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
t.Logf("Error creating filler pod: %v", err)
return cleanUp, err
}
waitForPodRunning(ctx, t, cs, pod)
}
for _, node := range nodes {
t.Log("Compute Cpu, Mem Fraction after create balanced pods.")
computeCPUMemFraction(t, cs, node, podRequestedResource)
}
return cleanUp, nil
}
func computeCPUMemFraction(t *testing.T, cs clientset.Interface, node *v1.Node, resourceReq *v1.ResourceRequirements) (float64, float64, int64, int64) {
t.Logf("ComputeCPUMemFraction for node: %v", node.Name)
totalRequestedCPUResource := resourceReq.Requests.Cpu().MilliValue()
totalRequestedMemResource := resourceReq.Requests.Memory().Value()
allpods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{})
if err != nil {
t.Fatalf("Expect error of invalid, got : %v", err)
}
for _, pod := range allpods.Items {
if pod.Spec.NodeName == node.Name {
req, _ := utils.PodRequestsAndLimits(&pod)
if _, ok := req[v1.ResourceCPU]; !ok {
req[v1.ResourceCPU] = *resource.NewMilliQuantity(0, resource.DecimalSI)
}
if _, ok := req[v1.ResourceMemory]; !ok {
req[v1.ResourceMemory] = *resource.NewQuantity(0, resource.BinarySI)
}
cpuRequest := req[v1.ResourceCPU]
memoryRequest := req[v1.ResourceMemory]
t.Logf("Pod for on the node: %v, Cpu: %v, Mem: %v", pod.Name, (&cpuRequest).MilliValue(), (&memoryRequest).Value())
// Ignore best effort pods while computing fractions as they won't be taken in account by scheduler.
if v1qos.GetPodQOS(&pod) == v1.PodQOSBestEffort {
continue
}
totalRequestedCPUResource += (&cpuRequest).MilliValue()
totalRequestedMemResource += (&memoryRequest).Value()
}
}
cpuAllocatable, found := node.Status.Allocatable[v1.ResourceCPU]
if !found {
t.Fatalf("Failed to get node's Allocatable[v1.ResourceCPU]")
}
cpuAllocatableMil := cpuAllocatable.MilliValue()
floatOne := float64(1)
cpuFraction := float64(totalRequestedCPUResource) / float64(cpuAllocatableMil)
if cpuFraction > floatOne {
cpuFraction = floatOne
}
memAllocatable, found := node.Status.Allocatable[v1.ResourceMemory]
if !found {
t.Fatalf("Failed to get node's Allocatable[v1.ResourceMemory]")
}
memAllocatableVal := memAllocatable.Value()
memFraction := float64(totalRequestedMemResource) / float64(memAllocatableVal)
if memFraction > floatOne {
memFraction = floatOne
}
t.Logf("Node: %v, totalRequestedCPUResource: %v, cpuAllocatableMil: %v, cpuFraction: %v", node.Name, totalRequestedCPUResource, cpuAllocatableMil, cpuFraction)
t.Logf("Node: %v, totalRequestedMemResource: %v, memAllocatableVal: %v, memFraction: %v", node.Name, totalRequestedMemResource, memAllocatableVal, memFraction)
return cpuFraction, memFraction, cpuAllocatableMil, memAllocatableVal
}
func waitForPodRunning(ctx context.Context, t *testing.T, clientSet clientset.Interface, pod *v1.Pod) {
if err := wait.PollImmediate(5*time.Second, 30*time.Second, func() (bool, error) {
podItem, err := clientSet.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
if podItem.Status.Phase != v1.PodRunning {
t.Logf("Pod %v not running yet, is %v instead", podItem.Name, podItem.Status.Phase)
return false, nil
}
return true, nil
}); err != nil {
t.Fatalf("Error waiting for pod running: %v", err)
}
}
func waitForPodsRunning(ctx context.Context, t *testing.T, clientSet clientset.Interface, labelMap map[string]string, desireRunningPodNum int, namespace string) {
if err := wait.PollImmediate(10*time.Second, 60*time.Second, func() (bool, error) {
podList, err := clientSet.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{
LabelSelector: labels.SelectorFromSet(labelMap).String(),
})
if err != nil {
return false, err
}
if len(podList.Items) != desireRunningPodNum {
t.Logf("Waiting for %v pods to be running, got %v instead", desireRunningPodNum, len(podList.Items))
return false, nil
}
for _, pod := range podList.Items {
if pod.Status.Phase != v1.PodRunning {
t.Logf("Pod %v not running yet, is %v instead", pod.Name, pod.Status.Phase)
return false, nil
}
}
return true, nil
}); err != nil {
t.Fatalf("Error waiting for pods running: %v", err)
}
}
func splitNodesAndWorkerNodes(nodes []v1.Node) ([]*v1.Node, []*v1.Node) {
var allNodes []*v1.Node
var workerNodes []*v1.Node
for i := range nodes {
node := nodes[i]
allNodes = append(allNodes, &node)
if _, exists := node.Labels["node-role.kubernetes.io/control-plane"]; !exists {
workerNodes = append(workerNodes, &node)
}
}
return allNodes, workerNodes
}
func initPodEvictorOrFail(t *testing.T, clientSet clientset.Interface, getPodsAssignedToNode podutil.GetPodsAssignedToNodeFunc, nodes []*v1.Node) *evictions.PodEvictor {
evictionPolicyGroupVersion, err := eutils.SupportEviction(clientSet)
if err != nil || len(evictionPolicyGroupVersion) == 0 {
t.Fatalf("Error creating eviction policy group: %v", err)
}
return evictions.NewPodEvictor(
clientSet,
evictionPolicyGroupVersion,
false,
nil,
nil,
nodes,
getPodsAssignedToNode,
true,
false,
false,
false,
false,
)
}
|
[
"\"KUBECONFIG\"",
"\"KUBECONFIG\""
] |
[] |
[
"KUBECONFIG"
] |
[]
|
["KUBECONFIG"]
|
go
| 1 | 0 | |
kivyeExemplosDocumentacao/linguagem-kivy/Acessando-Widgets-definidos-com-a-linguagem-kv.py
|
import os
os.environ['KIVY_GL_BACKEND'] = 'angle_sdl2'
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.screenmanager import Screen
from kivy.lang import Builder
from kivy.properties import ObjectProperty
Builder.load_file('view\exemplo1.kv')
class MyWidget(Screen):
pass
class MyFirstWidget(BoxLayout):
txt_inpt = ObjectProperty(None) # LEMBRE-SE: usar ObjectProperty é considerado uma boa pratica
def check_status(self, btn):
pass
print('button state is: {state}'.format(state=btn.state))
print('text input text is: {txt}'.format(txt=self.txt_inpt))
def hulk_smash(self):
self.ids.hulk.text = "hulk: puny god!"
self.ids["loki"].text = "loki: >_<!!!" # alternative syntax
class Exemplo1(App):
def build(self):
return MyFirstWidget()
if __name__=='__main__':
Exemplo1().run()
|
[] |
[] |
[
"KIVY_GL_BACKEND"
] |
[]
|
["KIVY_GL_BACKEND"]
|
python
| 1 | 0 | |
tests/test_pocs.py
|
import os
import threading
import time
import pytest
import requests
from astropy import units as u
from panoptes.pocs import hardware
from panoptes.pocs.core import POCS
from panoptes.pocs.observatory import Observatory
from panoptes.utils.config.client import set_config
from panoptes.utils.serializers import to_json, to_yaml
from panoptes.pocs.mount import create_mount_simulator
from panoptes.pocs.dome import create_dome_simulator
from panoptes.pocs.camera import create_cameras_from_config
from panoptes.pocs.scheduler import create_scheduler_from_config
from panoptes.pocs.utils.location import create_location_from_config
def reset_conf(config_host, config_port):
url = f'http://{config_host}:{config_port}/reset-config'
response = requests.post(url,
data=to_json({'reset': True}),
headers={'Content-Type': 'application/json'}
)
assert response.ok
@pytest.fixture(scope='function')
def cameras():
return create_cameras_from_config()
@pytest.fixture(scope='function')
def mount():
return create_mount_simulator()
@pytest.fixture(scope='function')
def pocstime_night():
return "2020-01-01 08:00:00"
@pytest.fixture(scope='function')
def pocstime_day():
return "2020-01-01 22:00:00"
@pytest.fixture(scope='function')
def site_details():
return create_location_from_config()
@pytest.fixture(scope='function')
def scheduler(site_details):
return create_scheduler_from_config(observer=site_details['observer'])
@pytest.fixture(scope='function')
def observatory(cameras, mount, site_details, scheduler):
"""Return a valid Observatory instance with a specific config."""
obs = Observatory(scheduler=scheduler, simulator=['power', 'weather'])
for cam_name, cam in cameras.items():
obs.add_camera(cam_name, cam)
obs.set_mount(mount)
return obs
@pytest.fixture(scope='function')
def dome():
set_config('dome', {
'brand': 'Simulacrum',
'driver': 'simulator',
})
return create_dome_simulator()
@pytest.fixture(scope='function')
def pocs(observatory, config_host, config_port):
os.environ['POCSTIME'] = '2020-01-01 08:00:00'
pocs = POCS(observatory, run_once=True, simulators=['power'])
yield pocs
pocs.power_down()
reset_conf(config_host, config_port)
@pytest.fixture(scope='function')
def pocs_with_dome(pocs, dome):
# Add dome to config
os.environ['POCSTIME'] = '2020-01-01 08:00:00'
pocs.observatory.set_dome(dome)
yield pocs
pocs.power_down()
# An observation that is valid during the day
@pytest.fixture(scope='module')
def valid_observation():
return {
'name': 'TEST TARGET',
'position': '100.00 deg +00.887 deg',
'priority': '100',
'exptime': 2,
'min_nexp': 2,
'exp_set_size': 2,
}
# An observation that is valid at night
@pytest.fixture(scope='module')
def valid_observation_day():
return {
'name': 'TEST TARGET',
'position': '300.00 deg +70.887 deg',
'priority': '100',
'exptime': 2,
'min_nexp': 2,
'exp_set_size': 2,
}
def test_observatory_cannot_observe(pocs):
scheduler = pocs.observatory.scheduler
pocs.observatory.scheduler = None
assert pocs.initialize() is False
pocs.observatory.scheduler = scheduler
assert pocs.initialize()
assert pocs.is_initialized
# Make sure we can do it twice.
assert pocs.initialize()
assert pocs.is_initialized
def test_simple_simulator(pocs, caplog):
assert isinstance(pocs, POCS)
pocs.set_config('simulator', 'all')
assert pocs.is_initialized is not True
# Not initialized returns false and gives warning.
assert pocs.run() is False
log_record = caplog.records[-1]
assert log_record.message == 'POCS not initialized' and log_record.levelname == "WARNING"
pocs.initialize()
assert pocs.is_initialized
pocs.state = 'parking'
pocs.next_state = 'parking'
assert pocs._lookup_trigger() == 'set_park'
pocs.state = 'foo'
assert pocs._lookup_trigger() == 'parking'
assert pocs.is_safe()
def test_is_weather_and_dark_simulator(pocs, pocstime_night, pocstime_day):
pocs.initialize()
# Night simulator
pocs.set_config('simulator', 'all')
os.environ['POCSTIME'] = pocstime_night # is dark
assert pocs.is_dark() is True
os.environ['POCSTIME'] = pocstime_day # is day
assert pocs.is_dark() is True
# No night simulator
pocs.set_config('simulator', hardware.get_all_names(without=['night']))
os.environ['POCSTIME'] = pocstime_night # is dark
assert pocs.is_dark() is True
os.environ['POCSTIME'] = pocstime_day # is day
assert pocs.is_dark() is False
pocs.set_config('simulator', ['camera', 'mount', 'weather', 'night'])
assert pocs.is_weather_safe() is True
def test_is_weather_safe_no_simulator(pocs):
pocs.initialize()
pocs.set_config('simulator', hardware.get_all_names(without=['weather']))
# Set a specific time
os.environ['POCSTIME'] = '2020-01-01 18:00:00'
# Insert a dummy weather record
pocs.db.insert_current('weather', {'safe': True})
assert pocs.is_weather_safe() is True
# Set a time 181 seconds later
os.environ['POCSTIME'] = '2020-01-01 18:05:01'
assert pocs.is_weather_safe() is False
def test_no_ac_power(pocs):
# Simulator makes AC power safe
assert pocs.has_ac_power() is True
# Remove 'power' from simulator
pocs.set_config('simulator', hardware.get_all_names(without=['power']))
pocs.initialize()
# With simulator removed the power should fail
assert pocs.has_ac_power() is False
for v in [True, 12.4, 0., False]:
has_power = bool(v)
# Add a fake power entry in data base
pocs.db.insert_current('power', {'main': v})
# Check for safe entry in database
assert pocs.has_ac_power() == has_power
assert pocs.is_safe() == has_power
# Check for stale entry in database
assert pocs.has_ac_power(stale=0.1) is False
# But double check it still matches longer entry
assert pocs.has_ac_power() == has_power
# Remove entry and try again
pocs.db.clear_current('power')
assert pocs.has_ac_power() is False
def test_power_down_while_running(pocs):
assert pocs.connected is True
assert not pocs.observatory.has_dome
pocs.initialize()
pocs.get_ready()
assert pocs.state == 'ready'
pocs.power_down()
assert pocs.observatory.mount.is_parked
assert pocs.connected is False
def test_power_down_dome_while_running(pocs_with_dome):
pocs = pocs_with_dome
assert pocs.connected is True
assert pocs.observatory.has_dome
assert not pocs.observatory.dome.is_connected
pocs.initialize()
assert pocs.observatory.dome.is_connected
pocs.get_ready()
assert pocs.state == 'ready'
pocs.power_down()
assert pocs.observatory.mount.is_parked
assert pocs.connected is False
assert not pocs.observatory.dome.is_connected
def test_run_no_targets_and_exit(pocs):
os.environ['POCSTIME'] = '2020-01-01 19:00:00'
pocs.set_config('simulator', 'all')
pocs.state = 'sleeping'
pocs.initialize()
pocs.observatory.scheduler.clear_available_observations()
assert pocs.is_initialized is True
pocs.run(exit_when_done=True, run_once=True)
assert pocs.state == 'sleeping'
def test_pocs_park_to_ready_with_observations(pocs):
# We don't want to run_once here
pocs.run_once = False
assert pocs.is_safe() is True
assert pocs.state == 'sleeping'
pocs.next_state = 'ready'
assert pocs.initialize()
assert pocs.goto_next_state()
assert pocs.state == 'ready'
assert pocs.goto_next_state()
assert pocs.state == 'scheduling'
assert pocs.observatory.current_observation is not None
# Manually set to parking
pocs.next_state = 'parking'
assert pocs.goto_next_state()
assert pocs.state == 'parking'
assert pocs.observatory.current_observation is None
assert pocs.observatory.mount.is_parked
assert pocs.goto_next_state()
assert pocs.state == 'parked'
# Should be safe and still have valid observations so next state should be ready
assert pocs.goto_next_state()
assert pocs.state == 'ready'
pocs.power_down()
assert pocs.connected is False
def test_pocs_park_to_ready_without_observations(pocs):
os.environ['POCSTIME'] = '2020-01-01 08:00:00'
pocs.logger.warning(f'Inserting safe weather reading')
pocs.db.insert_current('weather', {'safe': True})
assert pocs.is_safe() is True
assert pocs.state == 'sleeping'
pocs.next_state = 'ready'
assert pocs.initialize()
pocs.logger.warning(f'Moving to ready')
assert pocs.goto_next_state()
assert pocs.state == 'ready'
pocs.logger.warning(f'Moving to scheduling')
assert pocs.goto_next_state()
assert pocs.observatory.current_observation is not None
pocs.next_state = 'parking'
pocs.logger.warning(f'Moving to parking')
assert pocs.goto_next_state()
assert pocs.state == 'parking'
assert pocs.observatory.current_observation is None
assert pocs.observatory.mount.is_parked
# No valid obs
pocs.observatory.scheduler.clear_available_observations()
pocs.interrupted = True
assert pocs.goto_next_state()
assert pocs.state == 'parked'
pocs.power_down()
assert pocs.connected is False
assert pocs.is_safe() is False
def test_run_wait_until_safe(observatory, valid_observation_day, pocstime_day, pocstime_night):
os.environ['POCSTIME'] = pocstime_day
# Remove weather simulator, else it would always be safe.
observatory.set_config('simulator', hardware.get_all_names(without=['night']))
pocs = POCS(observatory)
pocs.set_config('wait_delay', 5) # Check safety every 5 seconds.
pocs.observatory.scheduler.clear_available_observations()
pocs.observatory.scheduler.add_observation(valid_observation_day)
assert pocs.connected is True
assert pocs.is_initialized is False
pocs.initialize()
pocs.logger.info('Starting observatory run')
# Not dark and unit is is connected but not set.
assert not pocs.is_dark()
assert pocs.is_initialized
assert pocs.connected
assert pocs.do_states
assert pocs.next_state is None
pocs.set_config('wait_delay', 1)
def start_pocs():
# Start running, BLOCKING.
pocs.logger.info(f'start_pocs ENTER')
pocs.run(run_once=True, exit_when_done=True)
# After done running.
assert pocs.is_weather_safe() is True
pocs.power_down()
observatory.logger.info('start_pocs EXIT')
pocs_thread = threading.Thread(target=start_pocs, daemon=True)
pocs_thread.start()
assert pocs.is_safe(park_if_not_safe=False) is False
# Wait to pretend we're waiting for horizon
time.sleep(5)
os.environ['POCSTIME'] = pocstime_night
assert pocs.is_dark()
pocs.logger.warning(f'Waiting to get to slewing state...')
while pocs.next_state != 'slewing':
time.sleep(1)
pocs.logger.warning(f'Stopping states via pocs.DO_STATES')
observatory.set_config('pocs.DO_STATES', False)
observatory.logger.warning(f'Waiting on pocs_thread')
pocs_thread.join(timeout=300)
assert pocs_thread.is_alive() is False
def test_unsafe_park(observatory, valid_observation, pocstime_night):
os.environ['POCSTIME'] = pocstime_night
# Remove weather simulator, else it would always be safe.
observatory.set_config('simulator', hardware.get_all_names(without=['night', 'weather']))
pocs = POCS(observatory)
pocs.set_config('wait_delay', 5) # Check safety every 5 seconds.
pocs.observatory.scheduler.clear_available_observations()
pocs.observatory.scheduler.add_observation(valid_observation)
observatory.logger.warning(f'Inserting safe weather reading')
observatory.db.insert_current('weather', {'safe': True})
assert pocs.connected is True
assert pocs.is_initialized is False
pocs.initialize()
pocs.logger.info('Starting observatory run')
# Weather is bad and unit is is connected but not set.
assert pocs.is_safe()
assert pocs.is_initialized
assert pocs.connected
assert pocs.do_states
assert pocs.next_state is None
pocs.set_config('wait_delay', 1)
def start_pocs():
# Start running, BLOCKING.
pocs.logger.info(f'start_pocs ENTER')
pocs.run(run_once=True, exit_when_done=True)
# After done running.
assert pocs.is_weather_safe() is False
pocs.power_down()
observatory.logger.info('start_pocs EXIT')
pocs_thread = threading.Thread(target=start_pocs, daemon=True)
pocs_thread.start()
# Insert bad weather report while slewing
pocs.logger.info(f'Waiting to get to slewing state...')
while pocs.state != "slewing":
pass
pocs.logger.info("Inserting bad weather record.")
observatory.db.insert_current('weather', {'safe': False})
# No longer safe, so should transition to parking
pocs.logger.info(f'Waiting to get to parked state...')
while True:
if pocs.state in ['parking', 'parked']:
break
assert pocs.state in ["slewing", "parking", "parked"] # Should be one of these states
time.sleep(0.5)
pocs.logger.warning(f'Stopping states via pocs.DO_STATES')
observatory.set_config('pocs.DO_STATES', False)
observatory.logger.warning(f'Waiting on pocs_thread')
pocs_thread.join(timeout=300)
assert pocs_thread.is_alive() is False
def test_run_power_down_interrupt(observatory,
valid_observation,
pocstime_night
):
os.environ['POCSTIME'] = pocstime_night
observatory.logger.info('start_pocs ENTER')
# Remove weather simulator, else it would always be safe.
observatory.set_config('simulator', 'all')
pocs = POCS(observatory)
pocs.set_config('wait_delay', 5) # Check safety every 5 seconds.
pocs.observatory.scheduler.clear_available_observations()
pocs.observatory.scheduler.add_observation(valid_observation)
pocs.initialize()
pocs.logger.info('Starting observatory run')
# Weather is bad and unit is is connected but not set.
assert pocs.connected
assert pocs.do_states
assert pocs.is_initialized
assert pocs.next_state is None
def start_pocs():
observatory.logger.info('start_pocs ENTER')
pocs.run(exit_when_done=True, run_once=True)
pocs.power_down()
observatory.logger.info('start_pocs EXIT')
pocs_thread = threading.Thread(target=start_pocs, daemon=True)
pocs_thread.start()
while pocs.next_state != 'slewing':
pocs.logger.debug(
f'Waiting to get to slewing state. Currently next_state={pocs.next_state}')
time.sleep(1)
pocs.logger.warning(f'Stopping states via pocs.DO_STATES')
observatory.set_config('pocs.DO_STATES', False)
observatory.logger.debug(f'Waiting on pocs_thread')
pocs_thread.join(timeout=300)
assert pocs_thread.is_alive() is False
def test_custom_state_file(observatory, temp_file, config_host, config_port):
state_table = POCS.load_state_table()
assert isinstance(state_table, dict)
with open(temp_file, 'w') as f:
f.write(to_yaml(state_table))
file_path = os.path.abspath(temp_file)
pocs = POCS(observatory, state_machine_file=file_path, run_once=True, simulators=['power'])
pocs.initialize()
pocs.power_down()
reset_conf(config_host, config_port)
def test_free_space(pocs, caplog):
assert pocs.has_free_space()
assert pocs.has_free_space(required_space=999 * u.terabyte) is False
assert 'No disk space' in caplog.records[-1].message
assert caplog.records[-1].levelname == 'ERROR'
def test_run_complete(pocs, valid_observation):
os.environ['POCSTIME'] = '2020-01-01 08:00:00'
pocs.set_config('simulator', 'all')
pocs.observatory.scheduler.clear_available_observations()
pocs.observatory.scheduler.add_observation(valid_observation)
pocs.initialize()
assert pocs.is_initialized is True
pocs.run(exit_when_done=True, run_once=True)
assert pocs.state == 'sleeping'
pocs.power_down()
|
[] |
[] |
[
"POCSTIME"
] |
[]
|
["POCSTIME"]
|
python
| 1 | 0 | |
gamershub/wsgi.py
|
"""
WSGI config for gamershub project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings.staging")
application = get_wsgi_application() # pylint: disable=C0103
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
adapters/handlers/rest/configure_api.go
|
// _ _
// __ _____ __ ___ ___ __ _| |_ ___
// \ \ /\ / / _ \/ _` \ \ / / |/ _` | __/ _ \
// \ V V / __/ (_| |\ V /| | (_| | || __/
// \_/\_/ \___|\__,_| \_/ |_|\__,_|\__\___|
//
// Copyright © 2016 - 2019 SeMI Holding B.V. (registered @ Dutch Chamber of Commerce no 75221632). All rights reserved.
// LICENSE WEAVIATE OPEN SOURCE: https://www.semi.technology/playbook/playbook/contract-weaviate-OSS.html
// LICENSE WEAVIATE ENTERPRISE: https://www.semi.technology/playbook/contract-weaviate-enterprise.html
// CONCEPT: Bob van Luijt (@bobvanluijt)
// CONTACT: [email protected]
//
package rest
import (
"context"
"net/http"
"net/url"
"os"
"time"
"github.com/coreos/etcd/clientv3"
"github.com/elastic/go-elasticsearch/v5"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
"github.com/semi-technologies/weaviate/adapters/clients/contextionary"
"github.com/semi-technologies/weaviate/adapters/handlers/rest/operations"
"github.com/semi-technologies/weaviate/adapters/handlers/rest/state"
"github.com/semi-technologies/weaviate/adapters/locks"
"github.com/semi-technologies/weaviate/adapters/repos/esvector"
"github.com/semi-technologies/weaviate/adapters/repos/etcd"
"github.com/semi-technologies/weaviate/entities/models"
"github.com/semi-technologies/weaviate/entities/search"
"github.com/semi-technologies/weaviate/usecases/classification"
"github.com/semi-technologies/weaviate/usecases/config"
"github.com/semi-technologies/weaviate/usecases/kinds"
"github.com/semi-technologies/weaviate/usecases/network/common/peers"
schemaUC "github.com/semi-technologies/weaviate/usecases/schema"
"github.com/semi-technologies/weaviate/usecases/schema/migrate"
"github.com/semi-technologies/weaviate/usecases/telemetry"
"github.com/semi-technologies/weaviate/usecases/traverser"
libvectorizer "github.com/semi-technologies/weaviate/usecases/vectorizer"
"github.com/sirupsen/logrus"
)
func makeConfigureServer(appState *state.State) func(*http.Server, string, string) {
return func(s *http.Server, scheme, addr string) {
// Add properties to the config
appState.ServerConfig.Hostname = addr
appState.ServerConfig.Scheme = scheme
}
}
type vectorRepo interface {
kinds.BatchVectorRepo
traverser.VectorSearcher
classification.VectorRepo
SetSchemaGetter(schemaUC.SchemaGetter)
InitCacheIndexing(int, time.Duration, time.Duration)
WaitForStartup(time.Duration) error
}
type vectorizer interface {
kinds.Vectorizer
traverser.CorpiVectorizer
SetIndexChecker(libvectorizer.IndexCheck)
}
type explorer interface {
GetClass(ctx context.Context, params traverser.GetParams) ([]interface{}, error)
Concepts(ctx context.Context, params traverser.ExploreParams) ([]search.Result, error)
}
func configureAPI(api *operations.WeaviateAPI) http.Handler {
appState, etcdClient, esClient := startupRoutine()
api.ServeError = errors.ServeError
api.JSONConsumer = runtime.JSONConsumer()
api.OidcAuth = func(token string, scopes []string) (*models.Principal, error) {
return appState.OIDC.ValidateAndExtract(token, scopes)
}
api.Logger = func(msg string, args ...interface{}) {
appState.Logger.WithField("action", "restapi_management").Infof(msg, args...)
}
var vectorRepo vectorRepo
var vectorMigrator migrate.Migrator
var vectorizer vectorizer
var migrator migrate.Migrator
var explorer explorer
repo := esvector.NewRepo(esClient, appState.Logger, nil,
appState.ServerConfig.Config.VectorIndex.DenormalizationDepth)
vectorMigrator = esvector.NewMigrator(repo)
vectorRepo = repo
migrator = vectorMigrator
vectorizer = libvectorizer.New(appState.Contextionary, nil)
explorer = traverser.NewExplorer(repo, vectorizer, libvectorizer.NormalizedDistance)
schemaRepo := etcd.NewSchemaRepo(etcdClient)
classifierRepo := etcd.NewClassificationRepo(etcdClient)
schemaManager, err := schemaUC.NewManager(migrator, schemaRepo,
appState.Locks, appState.Network, appState.Logger, appState.Contextionary, appState.Authorizer, appState.StopwordDetector)
if err != nil {
appState.Logger.
WithField("action", "startup").WithError(err).
Fatal("could not initialize schema manager")
os.Exit(1)
}
vectorRepo.SetSchemaGetter(schemaManager)
vectorizer.SetIndexChecker(schemaManager)
err = vectorRepo.WaitForStartup(2 * time.Minute)
if err != nil {
appState.Logger.
WithError(err).
WithField("action", "startup").WithError(err).
Fatal("esvector didn't start up")
os.Exit(1)
}
vectorRepo.InitCacheIndexing(
appState.ServerConfig.Config.VectorIndex.CacheCycleBulkSize,
time.Duration(appState.ServerConfig.Config.VectorIndex.CacheCycleIdleWaitTime)*time.Millisecond,
time.Duration(appState.ServerConfig.Config.VectorIndex.CacheCycleBusyWaitTime)*time.Millisecond,
)
kindsManager := kinds.NewManager(appState.Locks,
schemaManager, appState.Network, appState.ServerConfig, appState.Logger,
appState.Authorizer, vectorizer, vectorRepo)
batchKindsManager := kinds.NewBatchManager(vectorRepo, vectorizer, appState.Locks,
schemaManager, appState.Network, appState.ServerConfig, appState.Logger,
appState.Authorizer)
vectorInspector := libvectorizer.NewInspector(appState.Contextionary)
kindsTraverser := traverser.NewTraverser(appState.ServerConfig, appState.Locks,
appState.Logger, appState.Authorizer, vectorizer,
vectorRepo, explorer, schemaManager)
classifier := classification.New(schemaManager, classifierRepo, vectorRepo, appState.Authorizer)
updateSchemaCallback := makeUpdateSchemaCall(appState.Logger, appState, kindsTraverser)
schemaManager.RegisterSchemaUpdateCallback(updateSchemaCallback)
// manually update schema once
schema := schemaManager.GetSchemaSkipAuth()
updateSchemaCallback(schema)
appState.Network.RegisterUpdatePeerCallback(func(peers peers.Peers) {
schemaManager.TriggerSchemaUpdateCallbacks()
})
appState.Network.RegisterSchemaGetter(schemaManager)
setupSchemaHandlers(api, appState.TelemetryLogger, schemaManager)
setupKindHandlers(api, appState.TelemetryLogger, kindsManager)
setupKindBatchHandlers(api, appState.TelemetryLogger, batchKindsManager)
setupC11yHandlers(api, appState.TelemetryLogger, vectorInspector, appState.Contextionary)
setupGraphQLHandlers(api, appState.TelemetryLogger, appState)
setupMiscHandlers(api, appState.TelemetryLogger, appState.ServerConfig, appState.Network, schemaManager, appState.Contextionary)
setupClassificationHandlers(api, appState.TelemetryLogger, classifier)
api.ServerShutdown = func() {}
configureServer = makeConfigureServer(appState)
setupMiddlewares := makeSetupMiddlewares(appState)
setupGlobalMiddleware := makeSetupGlobalMiddleware(appState)
return setupGlobalMiddleware(api.Serve(setupMiddlewares))
}
// TODO: Split up and don't write into global variables. Instead return an appState
func startupRoutine() (*state.State, *clientv3.Client, *elasticsearch.Client) {
appState := &state.State{}
// context for the startup procedure. (So far the only subcommand respecting
// the context is the schema initialization, as this uses the etcd client
// requiring context. Nevertheless it would make sense to have everything
// that goes on in here pay attention to the context, so we can have a
// "startup in x seconds or fail")
ctx := context.Background()
// The timeout is arbitrary we have to adjust it as we go along, if we
// realize it is to big/small
ctx, cancel := context.WithTimeout(ctx, 120*time.Second)
defer cancel()
logger := logger()
appState.Logger = logger
logger.WithField("action", "startup").WithField("startup_time_left", timeTillDeadline(ctx)).
Debug("created startup context, nothing done so far")
// Load the config using the flags
serverConfig := &config.WeaviateConfig{}
appState.ServerConfig = serverConfig
err := serverConfig.LoadConfig(connectorOptionGroup, logger)
if err != nil {
logger.WithField("action", "startup").WithError(err).Error("could not load config")
logger.Exit(1)
}
logger.WithField("action", "startup").WithField("startup_time_left", timeTillDeadline(ctx)).
Debug("config loaded")
appState.OIDC = configureOIDC(appState)
appState.AnonymousAccess = configureAnonymousAccess(appState)
appState.Authorizer = configureAuthorizer(appState)
logger.WithField("action", "startup").WithField("startup_time_left", timeTillDeadline(ctx)).
Debug("configured OIDC and anonymous access client")
appState.Network = connectToNetwork(logger, appState.ServerConfig.Config)
logger.WithField("action", "startup").WithField("startup_time_left", timeTillDeadline(ctx)).
Debug("network configured")
logger.WithField("action", "startup").WithField("startup_time_left", timeTillDeadline(ctx)).
Debug("created db connector")
// parse config store URL
configURL := serverConfig.Config.ConfigurationStorage.URL
configStore, err := url.Parse(configURL)
if err != nil || configURL == "" {
logger.WithField("action", "startup").WithField("url", configURL).
WithError(err).Error("cannot parse config store URL")
logger.Exit(1)
}
// Construct a distributed lock
etcdClient, err := clientv3.New(clientv3.Config{Endpoints: []string{configStore.String()}})
if err != nil {
logger.WithField("action", "startup").
WithError(err).Error("cannot construct distributed lock with etcd")
logger.Exit(1)
}
logger.WithField("action", "startup").WithField("startup_time_left", timeTillDeadline(ctx)).
Debug("created etcd client")
esClient, err := elasticsearch.NewClient(elasticsearch.Config{
Addresses: []string{serverConfig.Config.VectorIndex.URL},
})
if err != nil {
logger.WithField("action", "startup").
WithError(err).Error("cannot create es client for vector index")
logger.Exit(1)
}
logger.WithField("action", "startup").WithField("startup_time_left", timeTillDeadline(ctx)).
Debug("created es client for vector index")
appState.TelemetryLogger = configureTelemetry(appState, etcdClient, logger)
// new lock
etcdLock, err := locks.NewEtcdLock(etcdClient, "/weaviate/schema-connector-rw-lock", logger)
if err != nil {
logger.WithField("action", "startup").
WithError(err).Error("cannot create etcd-based lock")
logger.Exit(1)
}
appState.Locks = etcdLock
// appState.Locks = &dummyLock{}
logger.WithField("action", "startup").WithField("startup_time_left", timeTillDeadline(ctx)).
Debug("created etcd session")
// END remove
logger.WithField("action", "startup").WithField("startup_time_left", timeTillDeadline(ctx)).
Debug("initialized schema")
logger.WithField("action", "startup").WithField("startup_time_left", timeTillDeadline(ctx)).
Debug("initialized stopword detector")
c11y, err := contextionary.NewClient(appState.ServerConfig.Config.Contextionary.URL)
if err != nil {
logger.WithField("action", "startup").
WithError(err).Error("cannot create c11y client")
logger.Exit(1)
}
appState.StopwordDetector = c11y
appState.Contextionary = c11y
return appState, etcdClient, esClient
}
func configureTelemetry(appState *state.State, etcdClient *clientv3.Client,
logger logrus.FieldLogger) *telemetry.RequestsLog {
// Extract environment variables needed for logging
mainLog := telemetry.NewLog()
loggingInterval := appState.ServerConfig.Config.Telemetry.Interval
loggingURL := appState.ServerConfig.Config.Telemetry.RemoteURL
loggingDisabled := appState.ServerConfig.Config.Telemetry.Disabled
loggingDebug := appState.ServerConfig.Config.Debug
if loggingURL == "" {
loggingURL = telemetry.DefaultURL
}
if loggingInterval == 0 {
loggingInterval = telemetry.DefaultInterval
}
// Propagate the peer name (if any), debug toggle and the enabled toggle to the requestsLog
if appState.ServerConfig.Config.Network != nil {
mainLog.PeerName = appState.ServerConfig.Config.Network.PeerName
}
mainLog.Debug = loggingDebug
mainLog.Disabled = loggingDisabled
// Initialize a non-expiring context for the reporter
reportingContext := context.Background()
// Initialize the reporter
reporter := telemetry.NewReporter(reportingContext, mainLog, loggingInterval, loggingURL, loggingDisabled, loggingDebug, etcdClient, logger)
// Start reporting
go func() {
reporter.Start()
}()
return mainLog
}
// logger does not parse the regular config object, as logging needs to be
// configured before the configuration is even loaded/parsed. We are thus
// "manually" reading the desired env vars and set reasonable defaults if they
// are not set.
//
// Defaults to log level info and json format
func logger() *logrus.Logger {
logger := logrus.New()
if os.Getenv("LOG_FORMAT") != "text" {
logger.SetFormatter(&logrus.JSONFormatter{})
}
if os.Getenv("LOG_LEVEL") == "debug" {
logger.SetLevel(logrus.DebugLevel)
} else {
logger.SetLevel(logrus.InfoLevel)
}
return logger
}
type dummyLock struct{}
func (d *dummyLock) LockConnector() (func() error, error) {
return func() error { return nil }, nil
}
func (d *dummyLock) LockSchema() (func() error, error) {
return func() error { return nil }, nil
}
|
[
"\"LOG_FORMAT\"",
"\"LOG_LEVEL\""
] |
[] |
[
"LOG_FORMAT",
"LOG_LEVEL"
] |
[]
|
["LOG_FORMAT", "LOG_LEVEL"]
|
go
| 2 | 0 | |
actstream/runtests/manage.py
|
#!/usr/bin/env python
# http://ericholscher.com/blog/2009/jun/29/enable-setuppy-test-your-django-apps/
# http://www.travisswicegood.com/2010/01/17/django-virtualenv-pip-and-fabric/
# http://code.djangoproject.com/svn/django/trunk/tests/runtests.py
# https://github.com/tomchristie/django-rest-framework/blob/master/rest_framework/runtests/runtests.py
import os
import sys
import warnings
warnings.filterwarnings("ignore")
# fix sys path so we don't need to setup PYTHONPATH
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")))
os.environ['DJANGO_SETTINGS_MODULE'] = 'actstream.runtests.settings'
engine = os.environ.get('DATABASE_ENGINE', 'django.db.backends.sqlite3')
if engine.startswith('mysql'):
engine = 'django.db.backends.mysql'
elif engine.startswith('postgre'):
engine = 'django.db.backends.postgresql_psycopg2'
else:
engine = 'django.db.backends.sqlite3'
try:
import django
except SyntaxError:
sys.stderr.write('Unable to import django (older python version)\n')
exit(0)
PYPY = hasattr(sys, 'pypy_version_info')
version = sys.version_info[:2]
PY3 = version[0] == 3
if PYPY and engine.endswith('psycopg2') and bytes != str:
sys.stderr.write('PyPy3 does not have a psycopg implementation\n')
exit(0)
if PY3 and django.VERSION[:2] >= (1, 9) and version <= (3, 3):
sys.stderr.write('Django>=1.9 does not support Python<=3.3\n')
exit(0)
if PY3 and django.VERSION[:2] <= (1, 8) and version >= (3, 5):
sys.stderr.write('Django<=1.8 does not support Python>=3.5\n')
exit(0)
if PY3 and django.VERSION[:2] == (1, 8) and version <= (3, 3):
sys.stderr.write('Django 1.8 does not support Python<=3.3\n')
exit(0)
if django.VERSION[:2] <= (1, 4) and PY3:
sys.stderr.write('Django<=1.4 does not support Python3\n')
exit(0)
if version == (2, 6) and django.VERSION[:2] >= (1, 7):
sys.stderr.write('Django>=1.7 does not support Python2.6\n')
exit(0)
os.environ['DATABASE_ENGINE'] = engine
try:
from psycopg2cffi import compat
compat.register()
except ImportError:
pass
try:
import pymysql
pymysql.install_as_MySQLdb()
except ImportError:
pass
try:
django.setup()
except AttributeError:
pass
if __name__ == '__main__':
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[] |
[] |
[
"DATABASE_ENGINE",
"DJANGO_SETTINGS_MODULE"
] |
[]
|
["DATABASE_ENGINE", "DJANGO_SETTINGS_MODULE"]
|
python
| 2 | 0 | |
support/historyarchive/archive_test.go
|
// Copyright 2016 DiamNet Development Foundation and contributors. Licensed
// under the Apache License, Version 2.0. See the COPYING file at the root
// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
package historyarchive
import (
"bytes"
"crypto/rand"
"crypto/sha256"
"fmt"
"io/ioutil"
"math/big"
"os"
"testing"
"github.com/diamnet/go/xdr"
"github.com/stretchr/testify/assert"
)
func GetTestS3Archive() *Archive {
mx := big.NewInt(0xffffffff)
r, e := rand.Int(rand.Reader, mx)
if e != nil {
panic(e)
}
return MustConnect(fmt.Sprintf("s3://history-stg.diamnet.org/dev/archivist/test-%s", r),
ConnectOptions{S3Region: "eu-west-1"})
}
func GetTestMockArchive() *Archive {
return MustConnect("mock://test", ConnectOptions{})
}
var tmpdirs []string
func GetTestFileArchive() *Archive {
d, e := ioutil.TempDir("/tmp", "archivist")
if e != nil {
panic(e)
}
if tmpdirs == nil {
tmpdirs = []string{d}
} else {
tmpdirs = append(tmpdirs, d)
}
return MustConnect("file://"+d, ConnectOptions{})
}
func cleanup() {
for _, d := range tmpdirs {
os.RemoveAll(d)
}
}
func GetTestArchive() *Archive {
ty := os.Getenv("ARCHIVIST_TEST_TYPE")
if ty == "file" {
return GetTestFileArchive()
} else if ty == "s3" {
return GetTestS3Archive()
} else {
return GetTestMockArchive()
}
}
func (arch *Archive) AddRandomBucket() (Hash, error) {
var h Hash
buf := make([]byte, 1024)
_, e := rand.Read(buf)
if e != nil {
return h, e
}
h = sha256.Sum256(buf)
pth := BucketPath(h)
e = arch.backend.PutFile(pth, ioutil.NopCloser(bytes.NewReader(buf)))
return h, e
}
func (arch *Archive) AddRandomCheckpointFile(cat string, chk uint32) error {
buf := make([]byte, 1024)
_, e := rand.Read(buf)
if e != nil {
return e
}
pth := CategoryCheckpointPath(cat, chk)
return arch.backend.PutFile(pth, ioutil.NopCloser(bytes.NewReader(buf)))
}
func (arch *Archive) AddRandomCheckpoint(chk uint32) error {
opts := &CommandOptions{Force: true}
for _, cat := range Categories() {
if cat == "history" {
var has HistoryArchiveState
has.CurrentLedger = chk
for i := 0; i < NumLevels; i++ {
curr, e := arch.AddRandomBucket()
if e != nil {
return e
}
snap, e := arch.AddRandomBucket()
if e != nil {
return e
}
next, e := arch.AddRandomBucket()
if e != nil {
return e
}
has.CurrentBuckets[i].Curr = curr.String()
has.CurrentBuckets[i].Snap = snap.String()
has.CurrentBuckets[i].Next.Output = next.String()
}
arch.PutCheckpointHAS(chk, has, opts)
arch.PutRootHAS(has, opts)
} else {
arch.AddRandomCheckpointFile(cat, chk)
}
}
return nil
}
func (arch *Archive) PopulateRandomRange(rng Range) error {
for chk := range rng.Checkpoints() {
if e := arch.AddRandomCheckpoint(chk); e != nil {
return e
}
}
return nil
}
func testRange() Range {
return Range{Low: 63, High: 0x3bf}
}
func testOptions() *CommandOptions {
return &CommandOptions{Range: testRange(), Concurrency: 16}
}
func GetRandomPopulatedArchive() *Archive {
a := GetTestArchive()
a.PopulateRandomRange(testRange())
return a
}
func TestScan(t *testing.T) {
defer cleanup()
opts := testOptions()
GetRandomPopulatedArchive().Scan(opts)
}
func countMissing(arch *Archive, opts *CommandOptions) int {
n := 0
arch.Scan(opts)
for _, missing := range arch.CheckCheckpointFilesMissing(opts) {
n += len(missing)
}
n += len(arch.CheckBucketsMissing())
return n
}
func TestMirror(t *testing.T) {
defer cleanup()
opts := testOptions()
src := GetRandomPopulatedArchive()
dst := GetTestArchive()
Mirror(src, dst, opts)
assert.Equal(t, 0, countMissing(dst, opts))
}
func copyFile(category string, checkpoint uint32, src *Archive, dst *Archive) {
pth := CategoryCheckpointPath(category, checkpoint)
rdr, err := src.backend.GetFile(pth)
if err != nil {
panic(err)
}
if err = dst.backend.PutFile(pth, rdr); err != nil {
panic(err)
}
}
func TestMirrorThenRepair(t *testing.T) {
defer cleanup()
opts := testOptions()
src := GetRandomPopulatedArchive()
dst := GetTestArchive()
Mirror(src, dst, opts)
assert.Equal(t, 0, countMissing(dst, opts))
bad := opts.Range.Low + uint32(opts.Range.Size()/2)
src.AddRandomCheckpoint(bad)
copyFile("history", bad, src, dst)
assert.NotEqual(t, 0, countMissing(dst, opts))
Repair(src, dst, opts)
assert.Equal(t, 0, countMissing(dst, opts))
}
func TestDryRunNoRepair(t *testing.T) {
defer cleanup()
opts := testOptions()
src := GetRandomPopulatedArchive()
dst := GetTestArchive()
Mirror(src, dst, opts)
assert.Equal(t, 0, countMissing(dst, opts))
bad := opts.Range.Low + uint32(opts.Range.Size()/2)
src.AddRandomCheckpoint(bad)
copyFile("history", bad, src, dst)
assert.NotEqual(t, 0, countMissing(dst, opts))
opts.DryRun = true
Repair(src, dst, opts)
assert.NotEqual(t, 0, countMissing(dst, opts))
}
func TestXdrDecode(t *testing.T) {
xdrbytes := []byte{
0, 0, 0, 0, // entry type 0, liveentry
0, 32, 223, 100, // lastmodified 2154340
0, 0, 0, 0, // entry type 0, account
0, 0, 0, 0, // key type 0
23, 140, 68, 253, // ed25519 key (32 bytes)
184, 162, 186, 195,
118, 239, 158, 210,
100, 241, 174, 254,
108, 110, 165, 140,
75, 76, 83, 141,
104, 212, 227, 80,
1, 214, 157, 7,
0, 0, 0, 29, // 64bit balance: 125339976000
46, 216, 65, 64,
0, 0, 129, 170, // 64bit seqnum: 142567144423475
0, 0, 0, 51,
0, 0, 0, 1, // numsubentries: 1
0, 0, 0, 1, // inflationdest type, populated
0, 0, 0, 0, // key type 0
87, 240, 19, 71, // ed25519 key (32 bytes)
52, 91, 9, 62,
213, 239, 178, 85,
161, 119, 108, 251,
168, 90, 76, 116,
12, 48, 134, 248,
115, 255, 117, 50,
19, 18, 170, 203,
0, 0, 0, 0, // flags
0, 0, 0, 19, // homedomain: 19 bytes + 1 null padding
99, 101, 110, 116, // "centaurus.xcoins.de"
97, 117, 114, 117,
115, 46, 120, 99,
111, 105, 110, 115,
46, 100, 101, 0,
1, 0, 0, 0, // thresholds
0, 0, 0, 0, // signers (null)
0, 0, 0, 0, // entry.account.ext.v: 0
0, 0, 0, 0, // entry.ext.v: 0
}
assert.Equal(t, len(xdrbytes), 152)
var tmp xdr.BucketEntry
n, err := xdr.Unmarshal(bytes.NewReader(xdrbytes[:]), &tmp)
fmt.Printf("Decoded %d bytes\n", n)
if err != nil {
panic(err)
}
assert.Equal(t, len(xdrbytes), n)
var out bytes.Buffer
n, err = xdr.Marshal(&out, &tmp)
fmt.Printf("Encoded %d bytes\n", n)
if err != nil {
panic(err)
}
assert.Equal(t, out.Len(), n)
assert.Equal(t, out.Bytes(), xdrbytes)
}
|
[
"\"ARCHIVIST_TEST_TYPE\""
] |
[] |
[
"ARCHIVIST_TEST_TYPE"
] |
[]
|
["ARCHIVIST_TEST_TYPE"]
|
go
| 1 | 0 | |
calico_cni/ipam.py
|
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
import os
import sys
from netaddr import IPNetwork
from pycalico.ipam import IPAMClient
from util import configure_logging, print_cni_error
from constants import *
# Logging config.
LOG_FILENAME = "ipam.log"
_log = logging.getLogger("calico_cni")
class IpamPlugin(object):
def __init__(self, config, environment):
self.config = config
"""
Dictionary representation of the config passed via stdin.
"""
self.env = environment
"""
Current environment (e.g os.environ)
"""
self.command = None
"""
Command indicating which action to take - one of "ADD" or "DEL".
"""
self.container_id = None
"""
Identifier for the container for which we are performing IPAM.
"""
self.datastore_client = IPAMClient()
"""
Access to the datastore client. Relies on ETCD_AUTHORITY environment
variable being set by the calling plugin.
"""
# Validate the given config and environment and set fields
# using the given config and environment.
self._parse_config()
def execute(self):
"""
Assigns or releases IP addresses for the specified container.
:return:
"""
if self.command == "ADD":
# Assign an IP address for this container.
_log.info("Assigning address to container %s", self.container_id)
ipv4, ipv6 = self._assign_address(handle_id=self.container_id)
# Output the response and exit successfully.
print json.dumps({"ip4": {"ip": str(ipv4.cidr),},
"ip6": {"ip": str(ipv6.cidr),},})
else:
# Release any IP addresses for this container.
assert self.command == CNI_CMD_DELETE, \
"Invalid command: %s" % self.command
# Release IPs using the container_id as the handle.
_log.info("Releasing addresses on container %s",
self.container_id)
try:
self.datastore_client.release_ip_by_handle(handle_id=self.container_id)
except KeyError:
_log.warning("No IPs assigned to container_id %s",
self.container_id)
def _assign_address(self, handle_id, ipv4_pool=None, ipv6_pool=None):
"""
Assigns an IPv4 and IPv6 address within the given pools.
If no pools are given, they will be automatically chosen.
:return: A tuple of (IPv4, IPv6) address assigned.
"""
ipv4 = IPNetwork("0.0.0.0")
ipv6 = IPNetwork("::")
pool = (ipv4_pool, ipv6_pool)
try:
ipv4_addrs, ipv6_addrs = self.datastore_client.auto_assign_ips(
num_v4=1, num_v6=1, handle_id=handle_id, attributes=None,
pool=pool
)
_log.debug("Allocated ip4s: %s, ip6s: %s", ipv4_addrs, ipv6_addrs)
except RuntimeError as err:
_log.error("Cannot auto assign IPAddress: %s", err.message)
_exit_on_error(code=ERR_CODE_FAILED_ASSIGNMENT,
message="Failed to assign IP address",
details=err.message)
else:
try:
ipv4 = ipv4_addrs[0]
except IndexError:
_log.error("No IPv4 address returned, exiting")
_exit_on_error(code=ERR_CODE_FAILED_ASSIGNMENT,
message="No IPv4 addresses available in pool",
details = "")
try:
ipv6 = ipv6_addrs[0]
except IndexError:
_log.error("No IPv6 address returned, exiting")
_exit_on_error(code=ERR_CODE_FAILED_ASSIGNMENT,
message="No IPv6 addresses available in pool",
details="")
_log.info("Assigned IPv4: %s, IPv6: %s", ipv4, ipv6)
return IPNetwork(ipv4), IPNetwork(ipv6)
def _parse_config(self):
"""
Validates that the plugins environment and given config contain
the required values.
"""
_log.debug('Environment: %s', json.dumps(self.env, indent=2))
_log.debug('Network config: %s', json.dumps(self.config, indent=2))
# Check the given environment contains the required fields.
try:
self.command = self.env[CNI_COMMAND_ENV]
except KeyError:
_exit_on_error(code=ERR_CODE_INVALID_ARGUMENT,
message="Arguments Invalid",
details="CNI_COMMAND not found in environment")
else:
# If the command is present, make sure it is valid.
if self.command not in [CNI_CMD_ADD, CNI_CMD_DELETE]:
_exit_on_error(code=ERR_CODE_INVALID_ARGUMENT,
message="Arguments Invalid",
details="Invalid command '%s'" % self.command)
try:
self.container_id = self.env[CNI_CONTAINERID_ENV]
except KeyError:
_exit_on_error(code=ERR_CODE_INVALID_ARGUMENT,
message="Arguments Invalid",
details="CNI_CONTAINERID not found in environment")
def _exit_on_error(code, message, details=""):
"""
Return failure information to the calling plugin as specified in the CNI spec and exit.
:param code: Error code to return (int)
:param message: Short error message to return.
:param details: Detailed error message to return.
:return:
"""
print_cni_error(code, message, details)
_log.debug("Exiting with rc=%s", code)
sys.exit(code)
def main():
# Read config file from stdin.
_log.debug("Reading config from stdin")
conf_raw = ''.join(sys.stdin.readlines()).replace('\n', '')
config = json.loads(conf_raw)
# Get the log level from the config file, default to INFO.
log_level = config.get(LOG_LEVEL_KEY, "INFO").upper()
# Setup logger. We log to file and to stderr based on the
# log level provided in the network configuration file.
configure_logging(_log, LOG_FILENAME,
log_level=log_level,
stderr_level=logging.INFO)
# Get copy of environment.
env = os.environ.copy()
# Create plugin instance.
plugin = IpamPlugin(config, env)
try:
# Execute IPAM.
plugin.execute()
except Exception, e:
_log.exception("Unhandled exception")
_exit_on_error(ERR_CODE_UNHANDLED,
message="Unhandled Exception",
details=e.message)
if __name__ == '__main__': # pragma: no cover
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
docs/conf.py
|
# -*- coding: utf-8 -*-
#
# Documentation build configuration file, created by karr_lab_build_utils.
#
import datetime
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosectionlabel',
'sphinx.ext.coverage',
'sphinx.ext.imgconverter',
'sphinx.ext.linkcode',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx_fontawesome',
'sphinxcontrib.addmetahtml',
'sphinxcontrib.bibtex',
'sphinxcontrib.googleanalytics',
'sphinxcontrib.spelling',
'sphinxprettysearchresults',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'karr_lab_aws_manager'
copyright = u'{}, Karr Lab'.format(datetime.datetime.now().year)
author = u'Karr Lab'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import re
filename = os.path.join(os.path.dirname(__file__), "..", "karr_lab_aws_manager", "_version.py")
if os.path.isfile(filename):
verstrline = open(filename, "rt").read()
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(VSRE, verstrline, re.M)
if mo:
version = mo.group(1)
else:
version = None
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- section/figure/table numbering options -------------------------------
numfig = True
numfig_format = {
'figure': 'Figure %s',
'table': 'Table %s',
'code-block': 'Listing %s',
'section': 'Section %s',
}
# -- image converter options ----------------------------------------------
image_converter_args = [
'-density', '150',
'-quality', '00',
]
# -- linkcode options -----------------------------------------------------
def linkcode_resolve(domain, info):
if domain != 'py':
return None
if not info['module']:
return None
rel_filename = info['module'].replace('.', '/')
if os.path.isfile(os.path.join(os.path.dirname(__file__), '..', rel_filename + '.py')):
return "https://github.com/KarrLab/karr_lab_aws_manager/blob/master/{}.py".format(rel_filename)
else:
return "https://github.com/KarrLab/karr_lab_aws_manager/blob/master/{}/__init__.py".format(rel_filename)
# -- napoleon options -----------------------------------------------------
napoleon_google_docstring = True
napoleon_numpy_docstring = False
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
# -- Options for HTML output ----------------------------------------------
import sphinx_rtd_theme
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'karr_lab_aws_manager v0.0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'karr_lab_aws_manager-doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'karr_lab_aws_manager.tex', u'karr_lab_aws_manager documentation',
u'Karr Lab', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'karr_lab_aws_manager', u'karr_lab_aws_manager documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'karr_lab_aws_manager', u'karr_lab_aws_manager documentation',
author, 'karr_lab_aws_manager', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
# -- Google analytics ID --------------------------------------------------
googleanalytics_id = 'UA-86340737-1'
# -- if RTD, redirect to https://docs.karrlab.org ------------------------
addmetahtml_content = '<meta http-equiv="refresh" content="0; url=https://docs.karrlab.org/karr_lab_aws_manager" />'
addmetahtml_enabled = os.getenv('READTHEDOCS', '') == 'True'
|
[] |
[] |
[
"READTHEDOCS"
] |
[]
|
["READTHEDOCS"]
|
python
| 1 | 0 | |
class5/collateral/config_rm_user.py
|
import os
from getpass import getpass
import yaml
from netmiko import ConnectHandler
def load_devices(device_file="lab_devices.yml"):
device_dict = {}
with open(device_file) as f:
device_dict = yaml.safe_load(f)
return device_dict
if __name__ == "__main__":
# Code so automated tests will run properly
# Check for environment variable, if that fails, use getpass().
password = (
os.getenv("NETMIKO_PASSWORD") if os.getenv("NETMIKO_PASSWORD") else getpass()
)
device_dict = load_devices()
cisco3 = device_dict["cisco3"]
for device in (cisco3,):
device["password"] = password
net_connect = ConnectHandler(**device)
cmd = "no username my_user"
output = net_connect.config_mode()
output += net_connect.send_command_timing(
cmd, strip_prompt=False, strip_command=False
)
if "confirm" in output:
output += net_connect.send_command_timing(
"y", strip_prompt=False, strip_command=False
)
output += net_connect.exit_config_mode()
output += net_connect.save_config()
print(output)
net_connect.disconnect()
|
[] |
[] |
[
"NETMIKO_PASSWORD"
] |
[]
|
["NETMIKO_PASSWORD"]
|
python
| 1 | 0 | |
mamase/settings/production.py
|
from .base import *
import os
env = os.environ.copy()
SECRET_KEY = env['SECRET_KEY']
DEBUG = False
TEMPLATE_DEBUG = False
try:
from .local import *
except ImportError:
pass
# Parse database configuration from $DATABASE_URL
import dj_database_url
DATABASES = {'default': dj_database_url.config()}
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
#STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
STATICFILES_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
COMPRESS_OFFLINE = True
COMPRESS_CSS_FILTERS = [
'compressor.filters.css_default.CssAbsoluteFilter',
'compressor.filters.cssmin.CSSMinFilter',
]
COMPRESS_CSS_HASHING_METHOD = 'content'
AWS_QUERYSTRING_AUTH = False
AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
AWS_STORAGE_BUCKET_NAME = os.environ['S3_BUCKET_NAME']
AWS_PRELOAD_METADATA = False
AWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME
AWS_HEADERS = {
'Access-Control-Allow-Origin': '*'
}
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
MEDIAFILES_LOCATION = 'media'
MEDIA_URL = 'https://mamase.s3.amazonaws.com/media/'
AWS_S3_CUSTOM_DOMAIN = '%s.s3.amazonaws.com' % AWS_STORAGE_BUCKET_NAME
WAGTAILSEARCH_BACKENDS = {
'default': {
'BACKEND': 'wagtail.wagtailsearch.backends.elasticsearch.ElasticSearch',
'URLS': [os.environ.get('SEARCHBOX_URL')],
'INDEX': 'mamase',
'TIMEOUT': 5,
}
}
SEND_BROKEN_LINK_EMAILS = True
MANAGERS = ADMINS
#Let us try redis. Killing two birds with one stone. For result backend and also as the broker
#BROKER_URL = os.environ.get('CLOUDAMQP_URL')
CELERY_RESULT_BACKEND = os.environ['REDIS_URL']
BROKER_URL = os.environ['REDIS_URL']
STATICFILES_LOCATION = 'static'
STATICFILES_STORAGE = 'custom_storages.StaticStorage'
STATIC_URL = "https://%s/%s/" % (AWS_S3_CUSTOM_DOMAIN, STATICFILES_LOCATION)
COMPRESS_STORAGE = STATICFILES_STORAGE
|
[] |
[] |
[
"AWS_SECRET_ACCESS_KEY",
"S3_BUCKET_NAME",
"CLOUDAMQP_URL",
"AWS_ACCESS_KEY_ID",
"SEARCHBOX_URL",
"REDIS_URL"
] |
[]
|
["AWS_SECRET_ACCESS_KEY", "S3_BUCKET_NAME", "CLOUDAMQP_URL", "AWS_ACCESS_KEY_ID", "SEARCHBOX_URL", "REDIS_URL"]
|
python
| 6 | 0 | |
oviewer/action.go
|
package oviewer
import (
"fmt"
"log"
"os"
"os/exec"
"runtime"
"strconv"
"strings"
"time"
)
// toggleWrapMode toggles wrapMode each time it is called.
func (root *Root) toggleWrapMode() {
root.Doc.WrapMode = !root.Doc.WrapMode
root.Doc.x = 0
root.setMessagef("Set WrapMode %t", root.Doc.WrapMode)
}
// toggleColumnMode toggles ColumnMode each time it is called.
func (root *Root) toggleColumnMode() {
root.Doc.ColumnMode = !root.Doc.ColumnMode
root.setMessagef("Set ColumnMode %t", root.Doc.ColumnMode)
}
// toggleAlternateRows toggles the AlternateRows each time it is called.
func (root *Root) toggleAlternateRows() {
root.Doc.AlternateRows = !root.Doc.AlternateRows
root.setMessagef("Set AlternateRows %t", root.Doc.AlternateRows)
}
// toggleLineNumMode toggles LineNumMode every time it is called.
func (root *Root) toggleLineNumMode() {
root.Doc.LineNumMode = !root.Doc.LineNumMode
root.ViewSync()
root.setMessagef("Set LineNumMode %t", root.Doc.LineNumMode)
}
// toggleFollowMode toggles follow mode.
func (root *Root) toggleFollowMode() {
root.Doc.FollowMode = !root.Doc.FollowMode
}
// toggleFollowAll toggles follow all mode.
func (root *Root) toggleFollowAll() {
root.General.FollowAll = !root.General.FollowAll
}
// closeFile close the file.
func (root *Root) closeFile() {
if root.screenMode != Docs {
return
}
if root.Doc.checkClose() {
root.setMessage("already closed")
return
}
if err := root.Doc.close(); err != nil {
log.Printf("closeFile: %s", err)
}
root.setMessagef("close file %s", root.Doc.FileName)
log.Printf("close file %s", root.Doc.FileName)
}
// reload reload a current document.
func (root *Root) reload(m *Document) {
if m.preventReload {
root.setMessagef("cannot reload: %s", m.FileName)
return
}
if err := m.reload(); err != nil {
log.Printf("cannot reload: %s", err)
return
}
root.releaseEventBuffer()
// Reserve time to read.
time.Sleep(100 * time.Millisecond)
}
// toggleWatch toggles watch mode.
func (root *Root) toggleWatch() {
root.Doc.WatchMode = !root.Doc.WatchMode
if root.Doc.WatchMode {
root.watchStart()
}
}
// watchStart starts watch mode.
func (root *Root) watchStart() {
m := root.Doc
m.WatchInterval = max(m.WatchInterval, 1)
if m.ticker != nil {
log.Println("watch stop")
m.ticker.Stop()
}
log.Printf("watch start at interval %d", m.WatchInterval)
m.ticker = time.NewTicker(time.Duration(m.WatchInterval) * time.Second)
go func() {
for {
<-m.ticker.C
if m.WatchMode {
ev := &eventReload{}
ev.SetEventNow()
ev.m = m
err := root.Screen.PostEvent(ev)
if err != nil {
log.Println(err)
}
} else {
log.Println("watch stop")
m.ticker.Stop()
return
}
}
}()
}
// goLine will move to the specified line.
func (root *Root) goLine(input string) {
if !strings.Contains(input, ".") {
// Line number only.
lN, err := strconv.Atoi(input)
if err != nil {
root.setMessage(ErrInvalidNumber.Error())
return
}
lN = root.moveLine(lN - 1)
root.setMessagef("Moved to line %d", lN+1)
return
}
// Line number and number of wrapping lines.
inputs := strings.Split(input, ".")
lN, err := strconv.Atoi(inputs[0])
if err != nil {
root.setMessage(ErrInvalidNumber.Error())
return
}
nTh, err := strconv.Atoi(inputs[1])
if err != nil {
root.setMessage(ErrInvalidNumber.Error())
return
}
lN, nTh = root.moveLineNth(lN-1, nTh)
root.setMessagef("Moved to line %d.%d", lN+1, nTh)
}
// goLineNumber moves to the specified line number.
func (root *Root) goLineNumber(ln int) {
ln = root.moveLine(ln - root.Doc.firstLine())
root.setMessagef("Moved to line %d", ln+1)
}
// markNext moves to the next mark.
func (root *Root) markNext() {
if len(root.Doc.marked) == 0 {
return
}
if len(root.Doc.marked) > root.Doc.markedPoint+1 {
root.Doc.markedPoint++
} else {
root.Doc.markedPoint = 0
}
root.goLineNumber(root.Doc.marked[root.Doc.markedPoint])
}
// markPrev moves to the previous mark.
func (root *Root) markPrev() {
if len(root.Doc.marked) == 0 {
return
}
if root.Doc.markedPoint > 0 {
root.Doc.markedPoint--
} else {
root.Doc.markedPoint = len(root.Doc.marked) - 1
}
root.goLineNumber(root.Doc.marked[root.Doc.markedPoint])
}
// addMark marks the current line number.
func (root *Root) addMark() {
c := min(root.Doc.topLN+root.Doc.firstLine(), root.Doc.endNum)
root.Doc.marked = removeInt(root.Doc.marked, c)
root.Doc.marked = append(root.Doc.marked, c)
root.setMessagef("Marked to line %d", c-root.Doc.firstLine()+1)
}
// removeMark removes the current line number from the mark.
func (root *Root) removeMark() {
c := root.Doc.topLN + root.Doc.firstLine()
marked := removeInt(root.Doc.marked, c)
if len(root.Doc.marked) == len(marked) {
root.setMessagef("Not marked line %d", c-root.Doc.firstLine()+1)
return
}
root.Doc.marked = marked
root.setMessagef("Remove the mark at line %d", c-root.Doc.firstLine()+1)
}
// removeAllMark removes all marks.
func (root *Root) removeAllMark() {
root.Doc.marked = nil
root.Doc.markedPoint = 0
root.setMessage("Remove all marks")
}
// setHeader sets the number of lines in the header.
func (root *Root) setHeader(input string) {
num, err := strconv.Atoi(input)
if err != nil {
root.setMessagef("Set header: %s", ErrInvalidNumber.Error())
return
}
if num < 0 || num > root.vHight-1 {
root.setMessagef("Set header %d: %s", num, ErrOutOfRange.Error())
return
}
if root.Doc.Header == num {
return
}
root.Doc.Header = num
root.setMessagef("Set header lines %d", num)
root.Doc.ClearCache()
}
// setSkipLines sets the number of lines to skip.
func (root *Root) setSkipLines(input string) {
num, err := strconv.Atoi(input)
if err != nil {
root.setMessagef("Set skip line: %s", ErrInvalidNumber.Error())
return
}
if num < 0 || num > root.vHight-1 {
root.setMessagef("Set skip line: %s", ErrOutOfRange.Error())
return
}
if root.Doc.SkipLines == num {
return
}
root.Doc.SkipLines = num
root.setMessagef("Set skip lines %d", num)
root.Doc.ClearCache()
}
// suspend suspends the current screen display and runs the shell.
// It will return when you exit the shell.
func (root *Root) suspend() {
log.Println("Suspend")
if err := root.Screen.Suspend(); err != nil {
log.Println(err)
return
}
fmt.Println("suspended ov")
shell := os.Getenv("SHELL")
if shell == "" {
if runtime.GOOS == "windows" {
shell = "CMD.EXE"
} else {
shell = "/bin/sh"
}
}
c := exec.Command(shell, "-l")
c.Stdin = os.Stdin
c.Stdout = os.Stdout
c.Stderr = os.Stderr
if err := c.Run(); err != nil {
log.Println(err)
}
fmt.Println("resume ov")
if err := root.Screen.Resume(); err != nil {
log.Println(err)
}
log.Println("Resume")
}
// toggleMouse toggles mouse control.
// When disabled, the mouse is controlled on the terminal side.
func (root *Root) toggleMouse() {
root.Config.DisableMouse = !root.Config.DisableMouse
if root.Config.DisableMouse {
root.Screen.DisableMouse()
root.setMessage("Disable Mouse")
} else {
root.Screen.EnableMouse()
root.setMessage("Enable Mouse")
}
}
// setViewMode switches to the preset display mode.
// Set header lines and columMode together.
func (root *Root) setViewMode(input string) {
c, ok := root.Config.Mode[input]
if !ok {
if input != "general" {
root.setMessagef("%s mode not found", input)
return
}
c = root.General
}
root.Doc.general = c
root.Doc.ClearCache()
root.ViewSync()
root.setMessagef("Set mode %s", input)
}
// setDelimiter sets the delimiter string.
func (root *Root) setDelimiter(input string) {
root.Doc.ColumnDelimiter = input
root.setMessagef("Set delimiter %s", input)
}
// setTabWidth sets the tab width.
func (root *Root) setTabWidth(input string) {
width, err := strconv.Atoi(input)
if err != nil {
root.setMessage(ErrInvalidNumber.Error())
return
}
if root.Doc.TabWidth == width {
return
}
root.Doc.TabWidth = width
root.setMessagef("Set tab width %d", width)
root.Doc.ClearCache()
}
func (root *Root) setWatchInterval(input string) {
interval, err := strconv.Atoi(input)
if err != nil {
root.setMessage(ErrInvalidNumber.Error())
return
}
if root.Doc.WatchInterval == interval {
return
}
root.Doc.WatchInterval = interval
if root.Doc.WatchInterval == 0 {
root.Doc.WatchMode = false
return
}
root.Doc.WatchMode = true
root.watchStart()
root.setMessagef("Set watch interval %d", interval)
}
func (root *Root) setWriteBA(input string) {
ba := strings.Split(input, ":")
bstr := ba[0]
if bstr == "" {
bstr = "0"
}
before, err := strconv.Atoi(bstr)
if err != nil {
root.setMessage(ErrInvalidNumber.Error())
return
}
root.BeforeWriteOriginal = before
if len(ba) > 1 {
astr := ba[1]
if astr == "" {
astr = "0"
}
after, err := strconv.Atoi(astr)
if err != nil {
root.setMessage(ErrInvalidNumber.Error())
return
}
root.AfterWriteOriginal = after
}
root.debugMessage(fmt.Sprintf("Before:After:%d:%d", root.BeforeWriteOriginal, root.AfterWriteOriginal))
root.IsWriteOriginal = true
root.Quit()
}
// resize is a wrapper function that calls viewSync.
func (root *Root) resize() {
root.ViewSync()
}
// ViewSync redraws the whole thing.
func (root *Root) ViewSync() {
root.resetSelect()
root.prepareStartX()
root.prepareView()
root.Screen.Sync()
}
// TailSync move to tail and sync.
func (root *Root) TailSync() {
root.moveBottom()
root.ViewSync()
}
// prepareStartX prepares startX.
func (root *Root) prepareStartX() {
root.startX = 0
if root.Doc.LineNumMode {
root.startX = len(fmt.Sprintf("%d", root.Doc.BufEndNum())) + 1
}
}
// updateEndNum updates the last line number.
func (root *Root) updateEndNum() {
root.debugMessage(fmt.Sprintf("Update EndNum:%d", root.Doc.BufEndNum()))
root.prepareStartX()
root.drawStatus()
root.Screen.Sync()
}
|
[
"\"SHELL\""
] |
[] |
[
"SHELL"
] |
[]
|
["SHELL"]
|
go
| 1 | 0 | |
fairseq_cli/interactive.py
|
#!/usr/bin/env python3 -u
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Translate raw text with a trained model. Batches data on-the-fly.
"""
import ast
import fileinput
import logging
import math
import os
import sys
import time
from argparse import Namespace
from collections import namedtuple
import numpy as np
import torch
from fairseq import checkpoint_utils, distributed_utils, options, tasks, utils
from fairseq.dataclass.configs import FairseqConfig
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.token_generation_constraints import pack_constraints, unpack_constraints
from fairseq_cli.generate import get_symbols_to_strip_from_output
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
logger = logging.getLogger("fairseq_cli.interactive")
Batch = namedtuple("Batch", "ids src_tokens src_lengths constraints")
Translation = namedtuple("Translation", "src_str hypos pos_scores alignments")
def buffered_read(input, buffer_size):
buffer = []
with fileinput.input(files=[input], openhook=fileinput.hook_encoded("utf-8")) as h:
for src_str in h:
buffer.append(src_str.strip())
if len(buffer) >= buffer_size:
yield buffer
buffer = []
if len(buffer) > 0:
yield buffer
def make_batches(lines, cfg, task, max_positions, encode_fn):
def encode_fn_target(x):
return encode_fn(x)
if cfg.generation.constraints:
# Strip (tab-delimited) contraints, if present, from input lines,
# store them in batch_constraints
batch_constraints = [list() for _ in lines]
for i, line in enumerate(lines):
if "\t" in line:
lines[i], *batch_constraints[i] = line.split("\t")
# Convert each List[str] to List[Tensor]
for i, constraint_list in enumerate(batch_constraints):
batch_constraints[i] = [
task.target_dictionary.encode_line(
encode_fn_target(constraint),
append_eos=False,
add_if_not_exist=False,
)
for constraint in constraint_list
]
if cfg.generation.constraints:
constraints_tensor = pack_constraints(batch_constraints)
else:
constraints_tensor = None
tokens, lengths = task.get_interactive_tokens_and_lengths(lines, encode_fn)
itr = task.get_batch_iterator(
dataset=task.build_dataset_for_inference(
tokens, lengths, constraints=constraints_tensor
),
max_tokens=cfg.dataset.max_tokens,
max_sentences=cfg.dataset.batch_size,
max_positions=max_positions,
ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test,
).next_epoch_itr(shuffle=False)
for batch in itr:
ids = batch["id"]
src_tokens = batch["net_input"]["src_tokens"]
src_lengths = batch["net_input"]["src_lengths"]
constraints = batch.get("constraints", None)
yield Batch(
ids=ids,
src_tokens=src_tokens,
src_lengths=src_lengths,
constraints=constraints,
)
def main(cfg: FairseqConfig):
if isinstance(cfg, Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
start_time = time.time()
total_translate_time = 0
utils.import_user_module(cfg.common)
if cfg.interactive.buffer_size < 1:
cfg.interactive.buffer_size = 1
if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None:
cfg.dataset.batch_size = 1
assert (
not cfg.generation.sampling or cfg.generation.nbest == cfg.generation.beam
), "--sampling requires --nbest to be equal to --beam"
assert (
not cfg.dataset.batch_size
or cfg.dataset.batch_size <= cfg.interactive.buffer_size
), "--batch-size cannot be larger than --buffer-size"
logger.info(cfg)
# Fix seed for stochastic decoding
if cfg.common.seed is not None and not cfg.generation.no_seed_provided:
np.random.seed(cfg.common.seed)
utils.set_torch_seed(cfg.common.seed)
use_cuda = torch.cuda.is_available() and not cfg.common.cpu
# Setup task, e.g., translation
task = tasks.setup_task(cfg.task)
# Load ensemble
overrides = ast.literal_eval(cfg.common_eval.model_overrides)
logger.info("loading model(s) from {}".format(cfg.common_eval.path))
models, _model_args = checkpoint_utils.load_model_ensemble(
utils.split_paths(cfg.common_eval.path),
arg_overrides=overrides,
task=task,
suffix=cfg.checkpoint.checkpoint_suffix,
strict=(cfg.checkpoint.checkpoint_shard_count == 1),
num_shards=cfg.checkpoint.checkpoint_shard_count,
)
# Set dictionaries
src_dict = task.source_dictionary
tgt_dict = task.target_dictionary
# Optimize ensemble for generation
for model in models:
if model is None:
continue
if cfg.common.fp16:
model.half()
if use_cuda and not cfg.distributed_training.pipeline_model_parallel:
model.cuda()
model.prepare_for_inference_(cfg)
# Initialize generator
generator = task.build_generator(models, cfg.generation)
# Handle tokenization and BPE
tokenizer = task.build_tokenizer(cfg.tokenizer)
bpe = task.build_bpe(cfg.bpe)
def encode_fn(x):
if tokenizer is not None:
x = tokenizer.encode(x)
if bpe is not None:
x = bpe.encode(x)
return x
def decode_fn(x):
if bpe is not None:
x = bpe.decode(x)
if tokenizer is not None:
x = tokenizer.decode(x)
return x
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(cfg.generation.replace_unk)
max_positions = utils.resolve_max_positions(
task.max_positions(), *[model.max_positions() for model in models]
)
if cfg.generation.constraints:
logger.warning(
"NOTE: Constrained decoding currently assumes a shared subword vocabulary."
)
if cfg.interactive.buffer_size > 1:
logger.info("Sentence buffer size: %s", cfg.interactive.buffer_size)
logger.info("NOTE: hypothesis and token scores are output in base 2")
logger.info("Type the input sentence and press return:")
start_id = 0
for inputs in buffered_read(cfg.interactive.input, cfg.interactive.buffer_size):
results = []
for batch in make_batches(inputs, cfg, task, max_positions, encode_fn):
bsz = batch.src_tokens.size(0)
src_tokens = batch.src_tokens
src_lengths = batch.src_lengths
constraints = batch.constraints
if use_cuda:
src_tokens = src_tokens.cuda()
src_lengths = src_lengths.cuda()
if constraints is not None:
constraints = constraints.cuda()
sample = {
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
},
}
translate_start_time = time.time()
translations = task.inference_step(
generator, models, sample, constraints=constraints
)
translate_time = time.time() - translate_start_time
total_translate_time += translate_time
list_constraints = [[] for _ in range(bsz)]
if cfg.generation.constraints:
list_constraints = [unpack_constraints(c) for c in constraints]
for i, (id, hypos) in enumerate(zip(batch.ids.tolist(), translations)):
src_tokens_i = utils.strip_pad(src_tokens[i], tgt_dict.pad())
constraints = list_constraints[i]
results.append(
(
start_id + id,
src_tokens_i,
hypos,
{
"constraints": constraints,
"time": translate_time / len(translations),
},
)
)
# sort output to match input order
for id_, src_tokens, hypos, info in sorted(results, key=lambda x: x[0]):
src_str = ""
if src_dict is not None:
src_str = src_dict.string(src_tokens, cfg.common_eval.post_process)
print("S-{}\t{}".format(id_, src_str))
print("W-{}\t{:.3f}\tseconds".format(id_, info["time"]))
for constraint in info["constraints"]:
print(
"C-{}\t{}".format(
id_,
tgt_dict.string(constraint, cfg.common_eval.post_process),
)
)
# Process top predictions
for hypo in hypos[: min(len(hypos), cfg.generation.nbest)]:
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=hypo["tokens"].int().cpu(),
src_str=src_str,
alignment=hypo["alignment"],
align_dict=align_dict,
tgt_dict=tgt_dict,
remove_bpe=cfg.common_eval.post_process,
extra_symbols_to_ignore=get_symbols_to_strip_from_output(generator),
)
detok_hypo_str = decode_fn(hypo_str)
score = hypo["score"] / math.log(2) # convert to base 2
# original hypothesis (after tokenization and BPE)
print("H-{}\t{}\t{}".format(id_, score, hypo_str))
# detokenized hypothesis
print("D-{}\t{}\t{}".format(id_, score, detok_hypo_str))
print(
"P-{}\t{}".format(
id_,
" ".join(
map(
lambda x: "{:.4f}".format(x),
# convert from base e to base 2
hypo["positional_scores"].div_(math.log(2)).tolist(),
)
),
)
)
if cfg.generation.print_alignment:
alignment_str = " ".join(
["{}-{}".format(src, tgt) for src, tgt in alignment]
)
print("A-{}\t{}".format(id_, alignment_str))
# update running id_ counter
start_id += len(inputs)
logger.info(
"Total time: {:.3f} seconds; translation time: {:.3f}".format(
time.time() - start_time, total_translate_time
)
)
def cli_main():
parser = options.get_interactive_generation_parser()
args = options.parse_args_and_arch(parser)
distributed_utils.call_main(convert_namespace_to_omegaconf(args), main)
if __name__ == "__main__":
cli_main()
|
[] |
[] |
[
"LOGLEVEL"
] |
[]
|
["LOGLEVEL"]
|
python
| 1 | 0 | |
google-api-go-generator/gen.go
|
// Copyright 2011 Google LLC. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"bytes"
"encoding/json"
"errors"
"flag"
"fmt"
"go/format"
"io"
"io/ioutil"
"log"
"net/http"
"net/url"
"os"
"os/exec"
"path/filepath"
"regexp"
"sort"
"strconv"
"strings"
"time"
"unicode"
"google.golang.org/api/google-api-go-generator/internal/disco"
"google.golang.org/api/internal/version"
)
const (
googleDiscoveryURL = "https://www.googleapis.com/discovery/v1/apis"
)
var (
apiToGenerate = flag.String("api", "*", "The API ID to generate, like 'tasks:v1'. A value of '*' means all.")
useCache = flag.Bool("cache", true, "Use cache of discovered Google API discovery documents.")
genDir = flag.String("gendir", defaultGenDir(), "Directory to use to write out generated Go files")
build = flag.Bool("build", false, "Compile generated packages.")
install = flag.Bool("install", false, "Install generated packages.")
apisURL = flag.String("discoveryurl", googleDiscoveryURL, "URL to root discovery document")
publicOnly = flag.Bool("publiconly", true, "Only build public, released APIs. Only applicable for Google employees.")
jsonFile = flag.String("api_json_file", "", "If non-empty, the path to a local file on disk containing the API to generate. Exclusive with setting --api.")
output = flag.String("output", "", "(optional) Path to source output file. If not specified, the API name and version are used to construct an output path (e.g. tasks/v1).")
apiPackageBase = flag.String("api_pkg_base", "google.golang.org/api", "Go package prefix to use for all generated APIs.")
baseURL = flag.String("base_url", "", "(optional) Override the default service API URL. If empty, the service's root URL will be used.")
headerPath = flag.String("header_path", "", "If non-empty, prepend the contents of this file to generated services.")
gensupportPkg = flag.String("gensupport_pkg", "google.golang.org/api/internal/gensupport", "Go package path of the 'api/internal/gensupport' support package.")
googleapiPkg = flag.String("googleapi_pkg", "google.golang.org/api/googleapi", "Go package path of the 'api/googleapi' support package.")
optionPkg = flag.String("option_pkg", "google.golang.org/api/option", "Go package path of the 'api/option' support package.")
internalOptionPkg = flag.String("internaloption_pkg", "google.golang.org/api/option/internaloption", "Go package path of the 'api/option/internaloption' support package.")
htransportPkg = flag.String("htransport_pkg", "google.golang.org/api/transport/http", "Go package path of the 'api/transport/http' support package.")
copyrightYear = flag.String("copyright_year", fmt.Sprintf("%d", time.Now().Year()), "Year for copyright.")
serviceTypes = []string{"Service", "APIService"}
)
// API represents an API to generate, as well as its state while it's
// generating.
type API struct {
// Fields needed before generating code, to select and find the APIs
// to generate.
// These fields usually come from the "directory item" JSON objects
// that are provided by the googleDiscoveryURL. We unmarshal a directory
// item directly into this struct.
ID string `json:"id"`
Name string `json:"name"`
Version string `json:"version"`
DiscoveryLink string `json:"discoveryRestUrl"` // absolute
doc *disco.Document
// TODO(jba): remove m when we've fully converted to using disco.
m map[string]interface{}
forceJSON []byte // if non-nil, the JSON schema file. else fetched.
usedNames namePool
schemas map[string]*Schema // apiName -> schema
responseTypes map[string]bool
p func(format string, args ...interface{}) // print raw
pn func(format string, args ...interface{}) // print with newline
}
func (a *API) sortedSchemaNames() (names []string) {
for name := range a.schemas {
names = append(names, name)
}
sort.Strings(names)
return
}
func (a *API) Schema(name string) *Schema {
return a.schemas[name]
}
type generateError struct {
api *API
error error
}
func (e *generateError) Error() string {
return fmt.Sprintf("API %s failed to generate code: %v", e.api.ID, e.error)
}
type compileError struct {
api *API
output string
}
func (e *compileError) Error() string {
return fmt.Sprintf("API %s failed to compile:\n%v", e.api.ID, e.output)
}
func main() {
flag.Parse()
if *install {
*build = true
}
var (
apiIds = []string{}
matches = []*API{}
errors = []error{}
)
for _, api := range getAPIs() {
apiIds = append(apiIds, api.ID)
if !api.want() {
continue
}
matches = append(matches, api)
log.Printf("Generating API %s", api.ID)
err := api.WriteGeneratedCode()
if err != nil && err != errNoDoc {
errors = append(errors, &generateError{api, err})
continue
}
if *build && err == nil {
var args []string
if *install {
args = append(args, "install")
} else {
args = append(args, "build")
}
args = append(args, api.Target())
out, err := exec.Command("go", args...).CombinedOutput()
if err != nil {
errors = append(errors, &compileError{api, string(out)})
}
}
}
if len(matches) == 0 {
log.Fatalf("No APIs matched %q; options are %v", *apiToGenerate, apiIds)
}
if len(errors) > 0 {
log.Printf("%d API(s) failed to generate or compile:", len(errors))
for _, ce := range errors {
log.Println(ce.Error())
}
os.Exit(1)
}
}
func (a *API) want() bool {
if *jsonFile != "" {
// Return true early, before calling a.JSONFile()
// which will require a GOPATH be set. This is for
// integration with Google's build system genrules
// where there is no GOPATH.
return true
}
// Skip this API if we're in cached mode and the files don't exist on disk.
if *useCache {
if _, err := os.Stat(a.JSONFile()); os.IsNotExist(err) {
return false
}
}
return *apiToGenerate == "*" || *apiToGenerate == a.ID
}
func getAPIs() []*API {
if *jsonFile != "" {
return getAPIsFromFile()
}
var bytes []byte
var source string
apiListFile := filepath.Join(genDirRoot(), "api-list.json")
if *useCache {
if !*publicOnly {
log.Fatalf("-cache=true not compatible with -publiconly=false")
}
var err error
bytes, err = ioutil.ReadFile(apiListFile)
if err != nil {
log.Fatal(err)
}
source = apiListFile
} else {
bytes = slurpURL(*apisURL)
if *publicOnly {
if err := writeFile(apiListFile, bytes); err != nil {
log.Fatal(err)
}
}
source = *apisURL
}
apis, err := unmarshalAPIs(bytes)
if err != nil {
log.Fatalf("error decoding JSON in %s: %v", source, err)
}
if !*publicOnly && *apiToGenerate != "*" {
apis = append(apis, apiFromID(*apiToGenerate))
}
return apis
}
func unmarshalAPIs(bytes []byte) ([]*API, error) {
var itemObj struct{ Items []*API }
if err := json.Unmarshal(bytes, &itemObj); err != nil {
return nil, err
}
return itemObj.Items, nil
}
func apiFromID(apiID string) *API {
parts := strings.Split(apiID, ":")
if len(parts) != 2 {
log.Fatalf("malformed API name: %q", apiID)
}
return &API{
ID: apiID,
Name: parts[0],
Version: parts[1],
}
}
// getAPIsFromFile handles the case of generating exactly one API
// from the flag given in --api_json_file
func getAPIsFromFile() []*API {
if *apiToGenerate != "*" {
log.Fatalf("Can't set --api with --api_json_file.")
}
if !*publicOnly {
log.Fatalf("Can't set --publiconly with --api_json_file.")
}
a, err := apiFromFile(*jsonFile)
if err != nil {
log.Fatal(err)
}
return []*API{a}
}
func apiFromFile(file string) (*API, error) {
jsonBytes, err := ioutil.ReadFile(file)
if err != nil {
return nil, fmt.Errorf("Error reading %s: %v", file, err)
}
doc, err := disco.NewDocument(jsonBytes)
if err != nil {
return nil, fmt.Errorf("reading document from %q: %v", file, err)
}
a := &API{
ID: doc.ID,
Name: doc.Name,
Version: doc.Version,
forceJSON: jsonBytes,
doc: doc,
}
return a, nil
}
func writeFile(file string, contents []byte) error {
// Don't write it if the contents are identical.
existing, err := ioutil.ReadFile(file)
if err == nil && (bytes.Equal(existing, contents) || basicallyEqual(existing, contents)) {
return nil
}
outdir := filepath.Dir(file)
if err = os.MkdirAll(outdir, 0755); err != nil {
return fmt.Errorf("failed to Mkdir %s: %v", outdir, err)
}
return ioutil.WriteFile(file, contents, 0644)
}
var ignoreLines = regexp.MustCompile(`(?m)^\s+"(?:etag|revision)": ".+\n`)
// basicallyEqual reports whether a and b are equal except for boring
// differences like ETag updates.
func basicallyEqual(a, b []byte) bool {
return ignoreLines.Match(a) && ignoreLines.Match(b) &&
bytes.Equal(ignoreLines.ReplaceAll(a, nil), ignoreLines.ReplaceAll(b, nil))
}
func slurpURL(urlStr string) []byte {
if *useCache {
log.Fatalf("Invalid use of slurpURL in cached mode for URL %s", urlStr)
}
req, err := http.NewRequest("GET", urlStr, nil)
if err != nil {
log.Fatal(err)
}
if *publicOnly {
req.Header.Add("X-User-IP", "0.0.0.0") // hack
}
res, err := http.DefaultClient.Do(req)
if err != nil {
log.Fatalf("Error fetching URL %s: %v", urlStr, err)
}
if res.StatusCode >= 300 {
log.Printf("WARNING: URL %s served status code %d", urlStr, res.StatusCode)
return nil
}
bs, err := ioutil.ReadAll(res.Body)
if err != nil {
log.Fatalf("Error reading body of URL %s: %v", urlStr, err)
}
return bs
}
func panicf(format string, args ...interface{}) {
panic(fmt.Sprintf(format, args...))
}
// namePool keeps track of used names and assigns free ones based on a
// preferred name
type namePool struct {
m map[string]bool // lazily initialized
}
// oddVersionRE matches unusual API names like directory_v1.
var oddVersionRE = regexp.MustCompile(`^(.+)_(v[\d\.]+)$`)
// renameVersion conditionally rewrites the provided version such
// that the final path component of the import path doesn't look
// like a Go identifier. This keeps the consistency that import paths
// for the generated Go packages look like:
// google.golang.org/api/NAME/v<version>
// and have package NAME.
// See https://github.com/google/google-api-go-client/issues/78
func renameVersion(version string) string {
if version == "alpha" || version == "beta" {
return "v0." + version
}
if m := oddVersionRE.FindStringSubmatch(version); m != nil {
return m[1] + "/" + m[2]
}
return version
}
func (p *namePool) Get(preferred string) string {
if p.m == nil {
p.m = make(map[string]bool)
}
name := preferred
tries := 0
for p.m[name] {
tries++
name = fmt.Sprintf("%s%d", preferred, tries)
}
p.m[name] = true
return name
}
func genDirRoot() string {
if *genDir == "" {
log.Fatalf("-gendir option must be set.")
}
return *genDir
}
func defaultGenDir() string {
// TODO(cbro): consider using $CWD
paths := filepath.SplitList(os.Getenv("GOPATH"))
if len(paths) == 0 {
return ""
}
return filepath.Join(paths[0], "src", "google.golang.org", "api")
}
func (a *API) SourceDir() string {
return filepath.Join(genDirRoot(), a.Package(), renameVersion(a.Version))
}
func (a *API) DiscoveryURL() string {
if a.DiscoveryLink == "" {
log.Fatalf("API %s has no DiscoveryLink", a.ID)
}
return a.DiscoveryLink
}
func (a *API) Package() string {
return strings.ToLower(a.Name)
}
func (a *API) Target() string {
return fmt.Sprintf("%s/%s/%s", *apiPackageBase, a.Package(), renameVersion(a.Version))
}
// ServiceType returns the name of the type to use for the root API struct
// (typically "Service").
func (a *API) ServiceType() string {
if a.Name == "monitoring" && a.Version == "v3" {
// HACK(deklerk) monitoring:v3 should always use call its overall
// service struct "Service", even though there is a "Service" in its
// schema (we re-map it to MService later).
return "Service"
}
switch a.Name {
case "appengine", "content": // retained for historical compatibility.
return "APIService"
default:
for _, t := range serviceTypes {
if _, ok := a.schemas[t]; !ok {
return t
}
}
panic("all service types are used, please consider introducing a new type to serviceTypes.")
}
}
// GetName returns a free top-level function/type identifier in the package.
// It tries to return your preferred match if it's free.
func (a *API) GetName(preferred string) string {
return a.usedNames.Get(preferred)
}
func (a *API) apiBaseURL() string {
var base, rel string
switch {
case *baseURL != "":
base, rel = *baseURL, a.doc.BasePath
case a.doc.RootURL != "":
base, rel = a.doc.RootURL, a.doc.ServicePath
default:
base, rel = *apisURL, a.doc.BasePath
}
return resolveRelative(base, rel)
}
func (a *API) needsDataWrapper() bool {
for _, feature := range a.doc.Features {
if feature == "dataWrapper" {
return true
}
}
return false
}
func (a *API) jsonBytes() []byte {
if a.forceJSON == nil {
var slurp []byte
var err error
if *useCache {
slurp, err = ioutil.ReadFile(a.JSONFile())
if err != nil {
log.Fatal(err)
}
} else {
slurp = slurpURL(a.DiscoveryURL())
if slurp != nil {
// Make sure that keys are sorted by re-marshalling.
d := make(map[string]interface{})
json.Unmarshal(slurp, &d)
if err != nil {
log.Fatal(err)
}
var err error
slurp, err = json.MarshalIndent(d, "", " ")
if err != nil {
log.Fatal(err)
}
}
}
a.forceJSON = slurp
}
return a.forceJSON
}
func (a *API) JSONFile() string {
return filepath.Join(a.SourceDir(), a.Package()+"-api.json")
}
var errNoDoc = errors.New("could not read discovery doc")
// WriteGeneratedCode generates code for a.
// It returns errNoDoc if we couldn't read the discovery doc.
func (a *API) WriteGeneratedCode() error {
genfilename := *output
jsonBytes := a.jsonBytes()
// Skip generation if we don't have the discovery doc.
if jsonBytes == nil {
// No message here, because slurpURL printed one.
return errNoDoc
}
if genfilename == "" {
if err := writeFile(a.JSONFile(), jsonBytes); err != nil {
return err
}
outdir := a.SourceDir()
err := os.MkdirAll(outdir, 0755)
if err != nil {
return fmt.Errorf("failed to Mkdir %s: %v", outdir, err)
}
pkg := a.Package()
genfilename = filepath.Join(outdir, pkg+"-gen.go")
}
code, err := a.GenerateCode()
errw := writeFile(genfilename, code)
if err == nil {
err = errw
}
if err != nil {
return err
}
return nil
}
var docsLink string
func (a *API) GenerateCode() ([]byte, error) {
pkg := a.Package()
jsonBytes := a.jsonBytes()
var err error
if a.doc == nil {
a.doc, err = disco.NewDocument(jsonBytes)
if err != nil {
return nil, err
}
}
// Buffer the output in memory, for gofmt'ing later.
var buf bytes.Buffer
a.p = func(format string, args ...interface{}) {
_, err := fmt.Fprintf(&buf, format, args...)
if err != nil {
panic(err)
}
}
a.pn = func(format string, args ...interface{}) {
a.p(format+"\n", args...)
}
wf := func(path string) error {
f, err := os.Open(path)
if err != nil {
return err
}
defer f.Close()
_, err = io.Copy(&buf, f)
return err
}
p, pn := a.p, a.pn
if *headerPath != "" {
if err := wf(*headerPath); err != nil {
return nil, err
}
}
pn(`// Copyright %s Google LLC.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated file. DO NOT EDIT.
`, *copyrightYear)
pn("// Package %s provides access to the %s.", pkg, a.doc.Title)
if r := replacementPackage[pkg]; r != "" {
pn("//")
pn("// This package is DEPRECATED. Use package %s instead.", r)
}
docsLink = a.doc.DocumentationLink
if docsLink != "" {
pn("//")
pn("// For product documentation, see: %s", docsLink)
}
pn("//")
pn("// Creating a client")
pn("//")
pn("// Usage example:")
pn("//")
pn("// import %q", a.Target())
pn("// ...")
pn("// ctx := context.Background()")
pn("// %sService, err := %s.NewService(ctx)", pkg, pkg)
pn("//")
pn("// In this example, Google Application Default Credentials are used for authentication.")
pn("//")
pn("// For information on how to create and obtain Application Default Credentials, see https://developers.google.com/identity/protocols/application-default-credentials.")
pn("//")
pn("// Other authentication options")
pn("//")
if len(a.doc.Auth.OAuth2Scopes) > 1 {
pn(`// By default, all available scopes (see "Constants") are used to authenticate. To restrict scopes, use option.WithScopes:`)
pn("//")
// NOTE: the first scope tends to be the broadest. Use the last one to demonstrate restriction.
pn("// %sService, err := %s.NewService(ctx, option.WithScopes(%s.%s))", pkg, pkg, pkg, scopeIdentifier(a.doc.Auth.OAuth2Scopes[len(a.doc.Auth.OAuth2Scopes)-1]))
pn("//")
}
pn("// To use an API key for authentication (note: some APIs do not support API keys), use option.WithAPIKey:")
pn("//")
pn(`// %sService, err := %s.NewService(ctx, option.WithAPIKey("AIza..."))`, pkg, pkg)
pn("//")
pn("// To use an OAuth token (e.g., a user token obtained via a three-legged OAuth flow), use option.WithTokenSource:")
pn("//")
pn("// config := &oauth2.Config{...}")
pn("// // ...")
pn("// token, err := config.Exchange(ctx, ...)")
pn("// %sService, err := %s.NewService(ctx, option.WithTokenSource(config.TokenSource(ctx, token)))", pkg, pkg)
pn("//")
pn("// See https://godoc.org/google.golang.org/api/option/ for details on options.")
pn("package %s // import %q", pkg, a.Target())
p("\n")
pn("import (")
for _, imp := range []string{
"bytes",
"context",
"encoding/json",
"errors",
"fmt",
"io",
"net/http",
"net/url",
"strconv",
"strings",
} {
pn(" %q", imp)
}
pn("")
for _, imp := range []struct {
pkg string
lname string
}{
{*gensupportPkg, "gensupport"},
{*googleapiPkg, "googleapi"},
{*optionPkg, "option"},
{*internalOptionPkg, "internaloption"},
{*htransportPkg, "htransport"},
} {
pn(" %s %q", imp.lname, imp.pkg)
}
pn(")")
pn("\n// Always reference these packages, just in case the auto-generated code")
pn("// below doesn't.")
pn("var _ = bytes.NewBuffer")
pn("var _ = strconv.Itoa")
pn("var _ = fmt.Sprintf")
pn("var _ = json.NewDecoder")
pn("var _ = io.Copy")
pn("var _ = url.Parse")
pn("var _ = gensupport.MarshalJSON")
pn("var _ = googleapi.Version")
pn("var _ = errors.New")
pn("var _ = strings.Replace")
pn("var _ = context.Canceled")
pn("var _ = internaloption.WithDefaultEndpoint")
pn("")
pn("const apiId = %q", a.doc.ID)
pn("const apiName = %q", a.doc.Name)
pn("const apiVersion = %q", a.doc.Version)
pn("const basePath = %q", a.apiBaseURL())
a.generateScopeConstants()
a.PopulateSchemas()
service := a.ServiceType()
// Reserve names (ignore return value; we're the first caller).
a.GetName("New")
a.GetName(service)
pn("// NewService creates a new %s.", service)
pn("func NewService(ctx context.Context, opts ...option.ClientOption) (*%s, error) {", service)
if len(a.doc.Auth.OAuth2Scopes) != 0 {
pn("scopesOption := option.WithScopes(")
for _, scope := range a.doc.Auth.OAuth2Scopes {
pn("%q,", scope.ID)
}
pn(")")
pn("// NOTE: prepend, so we don't override user-specified scopes.")
pn("opts = append([]option.ClientOption{scopesOption}, opts...)")
}
pn("opts = append(opts, internaloption.WithDefaultEndpoint(basePath))")
pn("client, endpoint, err := htransport.NewClient(ctx, opts...)")
pn("if err != nil { return nil, err }")
pn("s, err := New(client)")
pn("if err != nil { return nil, err }")
pn(`if endpoint != "" { s.BasePath = endpoint }`)
pn("return s, nil")
pn("}\n")
pn("// New creates a new %s. It uses the provided http.Client for requests.", service)
pn("//")
pn("// Deprecated: please use NewService instead.")
pn("// To provide a custom HTTP client, use option.WithHTTPClient.")
pn("// If you are using google.golang.org/api/googleapis/transport.APIKey, use option.WithAPIKey with NewService instead.")
pn("func New(client *http.Client) (*%s, error) {", service)
pn("if client == nil { return nil, errors.New(\"client is nil\") }")
pn("s := &%s{client: client, BasePath: basePath}", service)
for _, res := range a.doc.Resources { // add top level resources.
pn("s.%s = New%s(s)", resourceGoField(res, nil), resourceGoType(res))
}
pn("return s, nil")
pn("}")
pn("\ntype %s struct {", service)
pn(" client *http.Client")
pn(" BasePath string // API endpoint base URL")
pn(" UserAgent string // optional additional User-Agent fragment")
for _, res := range a.doc.Resources {
pn("\n\t%s\t*%s", resourceGoField(res, nil), resourceGoType(res))
}
pn("}")
pn("\nfunc (s *%s) userAgent() string {", service)
pn(` if s.UserAgent == "" { return googleapi.UserAgent }`)
pn(` return googleapi.UserAgent + " " + s.UserAgent`)
pn("}\n")
for _, res := range a.doc.Resources {
a.generateResource(res)
}
a.responseTypes = make(map[string]bool)
for _, meth := range a.APIMethods() {
meth.cacheResponseTypes(a)
}
for _, res := range a.doc.Resources {
a.cacheResourceResponseTypes(res)
}
for _, name := range a.sortedSchemaNames() {
a.schemas[name].writeSchemaCode(a)
}
for _, meth := range a.APIMethods() {
meth.generateCode()
}
for _, res := range a.doc.Resources {
a.generateResourceMethods(res)
}
clean, err := format.Source(buf.Bytes())
if err != nil {
return buf.Bytes(), err
}
return clean, nil
}
func (a *API) generateScopeConstants() {
scopes := a.doc.Auth.OAuth2Scopes
if len(scopes) == 0 {
return
}
a.pn("// OAuth2 scopes used by this API.")
a.pn("const (")
n := 0
for _, scope := range scopes {
if n > 0 {
a.p("\n")
}
n++
ident := scopeIdentifier(scope)
if scope.Description != "" {
a.p("%s", asComment("\t", scope.Description))
}
a.pn("\t%s = %q", ident, scope.ID)
}
a.p(")\n\n")
}
func scopeIdentifier(s disco.Scope) string {
if s.ID == "openid" {
return "OpenIDScope"
}
urlStr := s.ID
const prefix = "https://www.googleapis.com/auth/"
if !strings.HasPrefix(urlStr, prefix) {
const https = "https://"
if !strings.HasPrefix(urlStr, https) {
log.Fatalf("Unexpected oauth2 scope %q doesn't start with %q", urlStr, https)
}
ident := validGoIdentifer(depunct(urlStr[len(https):], true)) + "Scope"
return ident
}
ident := validGoIdentifer(initialCap(urlStr[len(prefix):])) + "Scope"
return ident
}
// Schema is a disco.Schema that has been bestowed an identifier, whether by
// having an "id" field at the top of the schema or with an
// automatically generated one in populateSubSchemas.
//
// TODO: While sub-types shouldn't need to be promoted to schemas,
// API.GenerateCode iterates over API.schemas to figure out what
// top-level Go types to write. These should be separate concerns.
type Schema struct {
api *API
typ *disco.Schema
apiName string // the native API-defined name of this type
goName string // lazily populated by GoName
goReturnType string // lazily populated by GoReturnType
props []*Property
}
type Property struct {
s *Schema // the containing Schema
p *disco.Property
assignedGoName string
}
func (p *Property) Type() *disco.Schema {
return p.p.Schema
}
func (p *Property) GoName() string {
return initialCap(p.p.Name)
}
func (p *Property) Default() string {
return p.p.Schema.Default
}
func (p *Property) Description() string {
return p.p.Schema.Description
}
func (p *Property) Enum() ([]string, bool) {
typ := p.p.Schema
if typ.Enums != nil {
return typ.Enums, true
}
// Check if this has an array of string enums.
if typ.ItemSchema != nil {
if enums := typ.ItemSchema.Enums; enums != nil && typ.ItemSchema.Type == "string" {
return enums, true
}
}
return nil, false
}
func (p *Property) EnumDescriptions() []string {
if desc := p.p.Schema.EnumDescriptions; desc != nil {
return desc
}
// Check if this has an array of string enum descriptions.
if items := p.p.Schema.ItemSchema; items != nil {
if desc := items.EnumDescriptions; desc != nil {
return desc
}
}
return nil
}
func (p *Property) Pattern() (string, bool) {
return p.p.Schema.Pattern, (p.p.Schema.Pattern != "")
}
func (p *Property) TypeAsGo() string {
return p.s.api.typeAsGo(p.Type(), false)
}
// A FieldName uniquely identifies a field within a Schema struct for an API.
type fieldName struct {
api string // The ID of an API.
schema string // The Go name of a Schema struct.
field string // The Go name of a field.
}
// pointerFields is a list of fields that should use a pointer type.
// This makes it possible to distinguish between a field being unset vs having
// an empty value.
var pointerFields = []fieldName{
{api: "androidpublisher:v1.1", schema: "InappPurchase", field: "PurchaseType"},
{api: "androidpublisher:v2", schema: "ProductPurchase", field: "PurchaseType"},
{api: "androidpublisher:v3", schema: "ProductPurchase", field: "PurchaseType"},
{api: "androidpublisher:v2", schema: "SubscriptionPurchase", field: "CancelReason"},
{api: "androidpublisher:v2", schema: "SubscriptionPurchase", field: "PaymentState"},
{api: "androidpublisher:v2", schema: "SubscriptionPurchase", field: "PurchaseType"},
{api: "androidpublisher:v3", schema: "SubscriptionPurchase", field: "PurchaseType"},
{api: "cloudmonitoring:v2beta2", schema: "Point", field: "BoolValue"},
{api: "cloudmonitoring:v2beta2", schema: "Point", field: "DoubleValue"},
{api: "cloudmonitoring:v2beta2", schema: "Point", field: "Int64Value"},
{api: "cloudmonitoring:v2beta2", schema: "Point", field: "StringValue"},
{api: "compute:alpha", schema: "Scheduling", field: "AutomaticRestart"},
{api: "compute:beta", schema: "MetadataItems", field: "Value"},
{api: "compute:beta", schema: "Scheduling", field: "AutomaticRestart"},
{api: "compute:v1", schema: "MetadataItems", field: "Value"},
{api: "compute:v1", schema: "Scheduling", field: "AutomaticRestart"},
{api: "content:v2", schema: "AccountUser", field: "Admin"},
{api: "datastore:v1beta2", schema: "Property", field: "BlobKeyValue"},
{api: "datastore:v1beta2", schema: "Property", field: "BlobValue"},
{api: "datastore:v1beta2", schema: "Property", field: "BooleanValue"},
{api: "datastore:v1beta2", schema: "Property", field: "DateTimeValue"},
{api: "datastore:v1beta2", schema: "Property", field: "DoubleValue"},
{api: "datastore:v1beta2", schema: "Property", field: "Indexed"},
{api: "datastore:v1beta2", schema: "Property", field: "IntegerValue"},
{api: "datastore:v1beta2", schema: "Property", field: "StringValue"},
{api: "datastore:v1beta3", schema: "Value", field: "BlobValue"},
{api: "datastore:v1beta3", schema: "Value", field: "BooleanValue"},
{api: "datastore:v1beta3", schema: "Value", field: "DoubleValue"},
{api: "datastore:v1beta3", schema: "Value", field: "IntegerValue"},
{api: "datastore:v1beta3", schema: "Value", field: "StringValue"},
{api: "datastore:v1beta3", schema: "Value", field: "TimestampValue"},
{api: "genomics:v1beta2", schema: "Dataset", field: "IsPublic"},
{api: "monitoring:v3", schema: "TypedValue", field: "BoolValue"},
{api: "monitoring:v3", schema: "TypedValue", field: "DoubleValue"},
{api: "monitoring:v3", schema: "TypedValue", field: "Int64Value"},
{api: "monitoring:v3", schema: "TypedValue", field: "StringValue"},
{api: "servicecontrol:v1", schema: "MetricValue", field: "BoolValue"},
{api: "servicecontrol:v1", schema: "MetricValue", field: "DoubleValue"},
{api: "servicecontrol:v1", schema: "MetricValue", field: "Int64Value"},
{api: "servicecontrol:v1", schema: "MetricValue", field: "StringValue"},
{api: "sqladmin:v1beta4", schema: "Settings", field: "StorageAutoResize"},
{api: "storage:v1", schema: "BucketLifecycleRuleCondition", field: "IsLive"},
{api: "storage:v1beta2", schema: "BucketLifecycleRuleCondition", field: "IsLive"},
{api: "tasks:v1", schema: "Task", field: "Completed"},
{api: "youtube:v3", schema: "ChannelSectionSnippet", field: "Position"},
{api: "youtube:v3", schema: "MonitorStreamInfo", field: "EnableMonitorStream"},
}
// forcePointerType reports whether p should be represented as a pointer type in its parent schema struct.
func (p *Property) forcePointerType() bool {
if p.UnfortunateDefault() {
return true
}
name := fieldName{api: p.s.api.ID, schema: p.s.GoName(), field: p.GoName()}
for _, pf := range pointerFields {
if pf == name {
return true
}
}
return false
}
// UnfortunateDefault reports whether p may be set to a zero value, but has a non-zero default.
func (p *Property) UnfortunateDefault() bool {
switch p.TypeAsGo() {
default:
return false
case "bool":
return p.Default() == "true"
case "string":
if p.Default() == "" {
return false
}
// String fields are considered to "allow" a zero value if either:
// (a) they are an enum, and one of the permitted enum values is the empty string, or
// (b) they have a validation pattern which matches the empty string.
pattern, hasPat := p.Pattern()
enum, hasEnum := p.Enum()
if hasPat && hasEnum {
log.Printf("Encountered enum property which also has a pattern: %#v", p)
return false // don't know how to handle this, so ignore.
}
return (hasPat && emptyPattern(pattern)) ||
(hasEnum && emptyEnum(enum))
case "float64", "int64", "uint64", "int32", "uint32":
if p.Default() == "" {
return false
}
if f, err := strconv.ParseFloat(p.Default(), 64); err == nil {
return f != 0.0
}
// The default value has an unexpected form. Whatever it is, it's non-zero.
return true
}
}
// emptyPattern reports whether a pattern matches the empty string.
func emptyPattern(pattern string) bool {
if re, err := regexp.Compile(pattern); err == nil {
return re.MatchString("")
}
log.Printf("Encountered bad pattern: %s", pattern)
return false
}
// emptyEnum reports whether a property enum list contains the empty string.
func emptyEnum(enum []string) bool {
for _, val := range enum {
if val == "" {
return true
}
}
return false
}
func (a *API) typeAsGo(s *disco.Schema, elidePointers bool) string {
switch s.Kind {
case disco.SimpleKind:
return mustSimpleTypeConvert(s.Type, s.Format)
case disco.ArrayKind:
as := s.ElementSchema()
if as.Type == "string" {
switch as.Format {
case "int64":
return "googleapi.Int64s"
case "uint64":
return "googleapi.Uint64s"
case "int32":
return "googleapi.Int32s"
case "uint32":
return "googleapi.Uint32s"
case "float64":
return "googleapi.Float64s"
}
}
return "[]" + a.typeAsGo(as, elidePointers)
case disco.ReferenceKind:
rs := s.RefSchema
if rs.Kind == disco.SimpleKind {
// Simple top-level schemas get named types (see writeSchemaCode).
// Use the name instead of using the equivalent simple Go type.
return a.schemaNamed(rs.Name).GoName()
}
return a.typeAsGo(rs, elidePointers)
case disco.MapKind:
es := s.ElementSchema()
if es.Type == "string" {
// If the element schema has a type "string", it's going to be
// transmitted as a string, and the Go map type must reflect that.
// This is true even if the format is, say, "int64". When type =
// "string" and format = "int64" at top level, we can use the json
// "string" tag option to unmarshal the string to an int64, but
// inside a map we can't.
return "map[string]string"
}
// Due to historical baggage (maps used to be a separate code path),
// the element types of maps never have pointers in them. From this
// level down, elide pointers in types.
return "map[string]" + a.typeAsGo(es, true)
case disco.AnyStructKind:
return "googleapi.RawMessage"
case disco.StructKind:
tls := a.schemaNamed(s.Name)
if elidePointers || s.Variant != nil {
return tls.GoName()
}
return "*" + tls.GoName()
default:
panic(fmt.Sprintf("unhandled typeAsGo for %+v", s))
}
}
func (a *API) schemaNamed(name string) *Schema {
s := a.schemas[name]
if s == nil {
panicf("no top-level schema named %q", name)
}
return s
}
func (s *Schema) properties() []*Property {
if s.props != nil {
return s.props
}
if s.typ.Kind != disco.StructKind {
panic("called properties on non-object schema")
}
for _, p := range s.typ.Properties {
s.props = append(s.props, &Property{
s: s,
p: p,
})
}
return s.props
}
func (s *Schema) HasContentType() bool {
for _, p := range s.properties() {
if p.GoName() == "ContentType" && p.TypeAsGo() == "string" {
return true
}
}
return false
}
func (s *Schema) populateSubSchemas() (outerr error) {
defer func() {
r := recover()
if r == nil {
return
}
outerr = fmt.Errorf("%v", r)
}()
addSubStruct := func(subApiName string, t *disco.Schema) {
if s.api.schemas[subApiName] != nil {
panic("dup schema apiName: " + subApiName)
}
if t.Name != "" {
panic("subtype already has name: " + t.Name)
}
t.Name = subApiName
subs := &Schema{
api: s.api,
typ: t,
apiName: subApiName,
}
s.api.schemas[subApiName] = subs
err := subs.populateSubSchemas()
if err != nil {
panicf("in sub-struct %q: %v", subApiName, err)
}
}
switch s.typ.Kind {
case disco.StructKind:
for _, p := range s.properties() {
subApiName := fmt.Sprintf("%s.%s", s.apiName, p.p.Name)
switch p.Type().Kind {
case disco.SimpleKind, disco.ReferenceKind, disco.AnyStructKind:
// Do nothing.
case disco.MapKind:
mt := p.Type().ElementSchema()
if mt.Kind == disco.SimpleKind || mt.Kind == disco.ReferenceKind {
continue
}
addSubStruct(subApiName, mt)
case disco.ArrayKind:
at := p.Type().ElementSchema()
if at.Kind == disco.SimpleKind || at.Kind == disco.ReferenceKind {
continue
}
addSubStruct(subApiName, at)
case disco.StructKind:
addSubStruct(subApiName, p.Type())
default:
panicf("Unknown type for %q: %v", subApiName, p.Type())
}
}
case disco.ArrayKind:
subApiName := fmt.Sprintf("%s.Item", s.apiName)
switch at := s.typ.ElementSchema(); at.Kind {
case disco.SimpleKind, disco.ReferenceKind, disco.AnyStructKind:
// Do nothing.
case disco.MapKind:
mt := at.ElementSchema()
if k := mt.Kind; k != disco.SimpleKind && k != disco.ReferenceKind {
addSubStruct(subApiName, mt)
}
case disco.ArrayKind:
at := at.ElementSchema()
if k := at.Kind; k != disco.SimpleKind && k != disco.ReferenceKind {
addSubStruct(subApiName, at)
}
case disco.StructKind:
addSubStruct(subApiName, at)
default:
panicf("Unknown array type for %q: %v", subApiName, at)
}
case disco.AnyStructKind, disco.MapKind, disco.SimpleKind, disco.ReferenceKind:
// Do nothing.
default:
fmt.Fprintf(os.Stderr, "in populateSubSchemas, schema is: %v", s.typ)
panicf("populateSubSchemas: unsupported type for schema %q", s.apiName)
panic("unreachable")
}
return nil
}
// GoName returns (or creates and returns) the bare Go name
// of the apiName, making sure that it's a proper Go identifier
// and doesn't conflict with an existing name.
func (s *Schema) GoName() string {
if s.goName == "" {
if s.typ.Kind == disco.MapKind {
s.goName = s.api.typeAsGo(s.typ, false)
} else {
base := initialCap(s.apiName)
// HACK(deklerk) Re-maps monitoring's Service field to MService so
// that the overall struct for this API can keep its name "Service".
// This takes care of "Service" the initial "goName" for "Service"
// refs.
if s.api.Name == "monitoring" && base == "Service" {
base = "MService"
}
s.goName = s.api.GetName(base)
if base == "Service" && s.goName != "Service" {
// Detect the case where a resource is going to clash with the
// root service object.
panicf("Clash on name Service")
}
}
}
return s.goName
}
// GoReturnType returns the Go type to use as the return type.
// If a type is a struct, it will return *StructType,
// for a map it will return map[string]ValueType,
// for (not yet supported) slices it will return []ValueType.
func (s *Schema) GoReturnType() string {
if s.goReturnType == "" {
if s.typ.Kind == disco.MapKind {
s.goReturnType = s.GoName()
} else {
s.goReturnType = "*" + s.GoName()
}
}
return s.goReturnType
}
func (s *Schema) writeSchemaCode(api *API) {
switch s.typ.Kind {
case disco.SimpleKind:
apitype := s.typ.Type
typ := mustSimpleTypeConvert(apitype, s.typ.Format)
s.api.pn("\ntype %s %s", s.GoName(), typ)
case disco.StructKind:
s.writeSchemaStruct(api)
case disco.MapKind, disco.AnyStructKind:
// Do nothing.
case disco.ArrayKind:
log.Printf("TODO writeSchemaCode for arrays for %s", s.GoName())
default:
fmt.Fprintf(os.Stderr, "in writeSchemaCode, schema is: %+v", s.typ)
panicf("writeSchemaCode: unsupported type for schema %q", s.apiName)
}
}
func (s *Schema) writeVariant(api *API, v *disco.Variant) {
s.api.p("\ntype %s map[string]interface{}\n\n", s.GoName())
// Write out the "Type" method that identifies the variant type.
s.api.pn("func (t %s) Type() string {", s.GoName())
s.api.pn(" return googleapi.VariantType(t)")
s.api.p("}\n\n")
// Write out helper methods to convert each possible variant.
for _, m := range v.Map {
if m.TypeValue == "" && m.Ref == "" {
log.Printf("TODO variant %s ref %s not yet supported.", m.TypeValue, m.Ref)
continue
}
s.api.pn("func (t %s) %s() (r %s, ok bool) {", s.GoName(), initialCap(m.TypeValue), m.Ref)
s.api.pn(" if t.Type() != %q {", initialCap(m.TypeValue))
s.api.pn(" return r, false")
s.api.pn(" }")
s.api.pn(" ok = googleapi.ConvertVariant(map[string]interface{}(t), &r)")
s.api.pn(" return r, ok")
s.api.p("}\n\n")
}
}
func (s *Schema) Description() string {
return s.typ.Description
}
func (s *Schema) writeSchemaStruct(api *API) {
if v := s.typ.Variant; v != nil {
s.writeVariant(api, v)
return
}
s.api.p("\n")
des := s.Description()
if des != "" {
s.api.p("%s", asComment("", fmt.Sprintf("%s: %s", s.GoName(), des)))
}
s.api.pn("type %s struct {", s.GoName())
np := new(namePool)
forceSendName := np.Get("ForceSendFields")
nullFieldsName := np.Get("NullFields")
if s.isResponseType() {
np.Get("ServerResponse") // reserve the name
}
firstFieldName := "" // used to store a struct field name for use in documentation.
for i, p := range s.properties() {
if i > 0 {
s.api.p("\n")
}
pname := np.Get(p.GoName())
if pname[0] == '@' {
// HACK(cbro): ignore JSON-LD special fields until we can figure out
// the correct Go representation for them.
continue
}
p.assignedGoName = pname
des := p.Description()
if des != "" {
s.api.p("%s", asComment("\t", fmt.Sprintf("%s: %s", pname, des)))
}
addFieldValueComments(s.api.p, p, "\t", des != "")
var extraOpt string
if p.Type().IsIntAsString() {
extraOpt += ",string"
}
typ := p.TypeAsGo()
if p.forcePointerType() {
typ = "*" + typ
}
s.api.pn(" %s %s `json:\"%s,omitempty%s\"`", pname, typ, p.p.Name, extraOpt)
if firstFieldName == "" {
firstFieldName = pname
}
}
if s.isResponseType() {
if firstFieldName != "" {
s.api.p("\n")
}
s.api.p("%s", asComment("\t", "ServerResponse contains the HTTP response code and headers from the server."))
s.api.pn(" googleapi.ServerResponse `json:\"-\"`")
}
if firstFieldName == "" {
// There were no fields in the struct, so there is no point
// adding any custom JSON marshaling code.
s.api.pn("}")
return
}
commentFmtStr := "%s is a list of field names (e.g. %q) to " +
"unconditionally include in API requests. By default, fields " +
"with empty values are omitted from API requests. However, " +
"any non-pointer, non-interface field appearing in %s will " +
"be sent to the server regardless of whether the field is " +
"empty or not. This may be used to include empty fields in " +
"Patch requests."
comment := fmt.Sprintf(commentFmtStr, forceSendName, firstFieldName, forceSendName)
s.api.p("\n")
s.api.p("%s", asComment("\t", comment))
s.api.pn("\t%s []string `json:\"-\"`", forceSendName)
commentFmtStr = "%s is a list of field names (e.g. %q) to " +
"include in API requests with the JSON null value. " +
"By default, fields with empty values are omitted from API requests. However, " +
"any field with an empty value appearing in %s will be sent to the server as null. " +
"It is an error if a field in this list has a non-empty value. This may be used to " +
"include null fields in Patch requests."
comment = fmt.Sprintf(commentFmtStr, nullFieldsName, firstFieldName, nullFieldsName)
s.api.p("\n")
s.api.p("%s", asComment("\t", comment))
s.api.pn("\t%s []string `json:\"-\"`", nullFieldsName)
s.api.pn("}")
s.writeSchemaMarshal(forceSendName, nullFieldsName)
s.writeSchemaUnmarshal()
}
// writeSchemaMarshal writes a custom MarshalJSON function for s, which allows
// fields to be explicitly transmitted by listing them in the field identified
// by forceSendFieldName, and allows fields to be transmitted with the null value
// by listing them in the field identified by nullFieldsName.
func (s *Schema) writeSchemaMarshal(forceSendFieldName, nullFieldsName string) {
s.api.pn("func (s *%s) MarshalJSON() ([]byte, error) {", s.GoName())
s.api.pn("\ttype NoMethod %s", s.GoName())
// pass schema as methodless type to prevent subsequent calls to MarshalJSON from recursing indefinitely.
s.api.pn("\traw := NoMethod(*s)")
s.api.pn("\treturn gensupport.MarshalJSON(raw, s.%s, s.%s)", forceSendFieldName, nullFieldsName)
s.api.pn("}")
}
func (s *Schema) writeSchemaUnmarshal() {
var floatProps []*Property
for _, p := range s.properties() {
if p.p.Schema.Type == "number" {
floatProps = append(floatProps, p)
}
}
if len(floatProps) == 0 {
return
}
pn := s.api.pn
pn("\nfunc (s *%s) UnmarshalJSON(data []byte) error {", s.GoName())
pn(" type NoMethod %s", s.GoName()) // avoid infinite recursion
pn(" var s1 struct {")
// Hide the float64 fields of the schema with fields that correctly
// unmarshal special values.
for _, p := range floatProps {
typ := "gensupport.JSONFloat64"
if p.forcePointerType() {
typ = "*" + typ
}
pn("%s %s `json:\"%s\"`", p.assignedGoName, typ, p.p.Name)
}
pn(" *NoMethod") // embed the schema
pn(" }")
// Set the schema value into the wrapper so its other fields are unmarshaled.
pn(" s1.NoMethod = (*NoMethod)(s)")
pn(" if err := json.Unmarshal(data, &s1); err != nil {")
pn(" return err")
pn(" }")
// Copy each shadowing field into the field it shadows.
for _, p := range floatProps {
n := p.assignedGoName
if p.forcePointerType() {
pn("if s1.%s != nil { s.%s = (*float64)(s1.%s) }", n, n, n)
} else {
pn("s.%s = float64(s1.%s)", n, n)
}
}
pn(" return nil")
pn("}")
}
// isResponseType returns true for all types that are used as a response.
func (s *Schema) isResponseType() bool {
return s.api.responseTypes["*"+s.goName]
}
// PopulateSchemas reads all the API types ("schemas") from the JSON file
// and converts them to *Schema instances, returning an identically
// keyed map, additionally containing subresources. For instance,
//
// A resource "Foo" of type "object" with a property "bar", also of type
// "object" (an anonymous sub-resource), will get a synthetic API name
// of "Foo.bar".
//
// A resource "Foo" of type "array" with an "items" of type "object"
// will get a synthetic API name of "Foo.Item".
func (a *API) PopulateSchemas() {
if a.schemas != nil {
panic("")
}
a.schemas = make(map[string]*Schema)
for name, ds := range a.doc.Schemas {
s := &Schema{
api: a,
apiName: name,
typ: ds,
}
a.schemas[name] = s
err := s.populateSubSchemas()
if err != nil {
panicf("Error populating schema with API name %q: %v", name, err)
}
}
}
func (a *API) generateResource(r *disco.Resource) {
pn := a.pn
t := resourceGoType(r)
pn(fmt.Sprintf("func New%s(s *%s) *%s {", t, a.ServiceType(), t))
pn("rs := &%s{s : s}", t)
for _, res := range r.Resources {
pn("rs.%s = New%s(s)", resourceGoField(res, r), resourceGoType(res))
}
pn("return rs")
pn("}")
pn("\ntype %s struct {", t)
pn(" s *%s", a.ServiceType())
for _, res := range r.Resources {
pn("\n\t%s\t*%s", resourceGoField(res, r), resourceGoType(res))
}
pn("}")
for _, res := range r.Resources {
a.generateResource(res)
}
}
func (a *API) cacheResourceResponseTypes(r *disco.Resource) {
for _, meth := range a.resourceMethods(r) {
meth.cacheResponseTypes(a)
}
for _, res := range r.Resources {
a.cacheResourceResponseTypes(res)
}
}
func (a *API) generateResourceMethods(r *disco.Resource) {
for _, meth := range a.resourceMethods(r) {
meth.generateCode()
}
for _, res := range r.Resources {
a.generateResourceMethods(res)
}
}
func resourceGoField(r, parent *disco.Resource) string {
// Avoid conflicts with method names.
und := ""
if parent != nil {
for _, m := range parent.Methods {
if m.Name == r.Name {
und = "_"
break
}
}
}
// Note: initialCap(r.Name + "_") doesn't work because initialCap calls depunct.
return initialCap(r.Name) + und
}
func resourceGoType(r *disco.Resource) string {
return initialCap(r.FullName + "Service")
}
func (a *API) resourceMethods(r *disco.Resource) []*Method {
ms := []*Method{}
for _, m := range r.Methods {
ms = append(ms, &Method{
api: a,
r: r,
m: m,
})
}
return ms
}
type Method struct {
api *API
r *disco.Resource // or nil if a API-level (top-level) method
m *disco.Method
params []*Param // all Params, of each type, lazily set by first call of Params method.
}
func (m *Method) Id() string {
return m.m.ID
}
func (m *Method) responseType() *Schema {
return m.api.schemas[m.m.Response.RefSchema.Name]
}
func (m *Method) supportsMediaUpload() bool {
return m.m.MediaUpload != nil
}
func (m *Method) mediaUploadPath() string {
return m.m.MediaUpload.Protocols["simple"].Path
}
func (m *Method) supportsMediaDownload() bool {
if m.supportsMediaUpload() {
// storage.objects.insert claims support for download in
// addition to upload but attempting to do so fails.
// This situation doesn't apply to any other methods.
return false
}
return m.m.SupportsMediaDownload
}
func (m *Method) supportsPaging() (*pageTokenGenerator, string, bool) {
ptg := m.pageTokenGenerator()
if ptg == nil {
return nil, "", false
}
// Check that the response type has the next page token.
s := m.responseType()
if s == nil || s.typ.Kind != disco.StructKind {
return nil, "", false
}
for _, prop := range s.properties() {
if isPageTokenName(prop.p.Name) && prop.Type().Type == "string" {
return ptg, prop.GoName(), true
}
}
return nil, "", false
}
type pageTokenGenerator struct {
isParam bool // is the page token a URL parameter?
name string // param or request field name
requestName string // empty for URL param
}
func (p *pageTokenGenerator) genGet() string {
if p.isParam {
return fmt.Sprintf("c.urlParams_.Get(%q)", p.name)
}
return fmt.Sprintf("c.%s.%s", p.requestName, p.name)
}
func (p *pageTokenGenerator) genSet(valueExpr string) string {
if p.isParam {
return fmt.Sprintf("c.%s(%s)", initialCap(p.name), valueExpr)
}
return fmt.Sprintf("c.%s.%s = %s", p.requestName, p.name, valueExpr)
}
func (p *pageTokenGenerator) genDeferBody() string {
if p.isParam {
return p.genSet(p.genGet())
}
return fmt.Sprintf("func (pt string) { %s }(%s)", p.genSet("pt"), p.genGet())
}
// pageTokenGenerator returns a pageTokenGenerator that will generate code to
// get/set the page token for a subsequent page in the context of the generated
// Pages method. It returns nil if there is no page token.
func (m *Method) pageTokenGenerator() *pageTokenGenerator {
matches := m.grepParams(func(p *Param) bool { return isPageTokenName(p.p.Name) })
switch len(matches) {
case 1:
if matches[0].p.Required {
// The page token is a required parameter (e.g. because there is
// a separate API call to start an iteration), and so the relevant
// call factory method takes the page token instead.
return nil
}
n := matches[0].p.Name
return &pageTokenGenerator{true, n, ""}
case 0: // No URL parameter, but maybe a request field.
if m.m.Request == nil {
return nil
}
rs := m.m.Request
if rs.RefSchema != nil {
rs = rs.RefSchema
}
for _, p := range rs.Properties {
if isPageTokenName(p.Name) {
return &pageTokenGenerator{false, initialCap(p.Name), validGoIdentifer(strings.ToLower(rs.Name))}
}
}
return nil
default:
panicf("too many page token parameters for method %s", m.m.Name)
return nil
}
}
func isPageTokenName(s string) bool {
return s == "pageToken" || s == "nextPageToken"
}
func (m *Method) Params() []*Param {
if m.params == nil {
for _, p := range m.m.Parameters {
m.params = append(m.params, &Param{
method: m,
p: p,
})
}
}
return m.params
}
func (m *Method) grepParams(f func(*Param) bool) []*Param {
matches := make([]*Param, 0)
for _, param := range m.Params() {
if f(param) {
matches = append(matches, param)
}
}
return matches
}
func (m *Method) NamedParam(name string) *Param {
matches := m.grepParams(func(p *Param) bool {
return p.p.Name == name
})
if len(matches) < 1 {
log.Panicf("failed to find named parameter %q", name)
}
if len(matches) > 1 {
log.Panicf("found multiple parameters for parameter name %q", name)
}
return matches[0]
}
func (m *Method) OptParams() []*Param {
return m.grepParams(func(p *Param) bool {
return !p.p.Required
})
}
func (meth *Method) cacheResponseTypes(api *API) {
if retType := responseType(api, meth.m); retType != "" && strings.HasPrefix(retType, "*") {
api.responseTypes[retType] = true
}
}
// convertMultiParams builds a []string temp variable from a slice
// of non-strings and returns the name of the temp variable.
func convertMultiParams(a *API, param string) string {
a.pn(" var %v_ []string", param)
a.pn(" for _, v := range %v {", param)
a.pn(" %v_ = append(%v_, fmt.Sprint(v))", param, param)
a.pn(" }")
return param + "_"
}
func (meth *Method) generateCode() {
res := meth.r // may be nil if a top-level method
a := meth.api
p, pn := a.p, a.pn
pn("\n// method id %q:", meth.Id())
retType := responseType(a, meth.m)
if meth.IsRawResponse() {
retType = "*http.Response"
}
retTypeComma := retType
if retTypeComma != "" {
retTypeComma += ", "
}
args := meth.NewArguments()
methodName := initialCap(meth.m.Name)
prefix := ""
if res != nil {
prefix = initialCap(res.FullName)
}
callName := a.GetName(prefix + methodName + "Call")
pn("\ntype %s struct {", callName)
pn(" s *%s", a.ServiceType())
for _, arg := range args.l {
if arg.location != "query" {
pn(" %s %s", arg.goname, arg.gotype)
}
}
pn(" urlParams_ gensupport.URLParams")
httpMethod := meth.m.HTTPMethod
if httpMethod == "GET" {
pn(" ifNoneMatch_ string")
}
if meth.supportsMediaUpload() {
pn(" mediaInfo_ *gensupport.MediaInfo")
}
pn(" ctx_ context.Context")
pn(" header_ http.Header")
pn("}")
p("\n%s", asComment("", methodName+": "+meth.m.Description))
if res != nil {
if url := canonicalDocsURL[fmt.Sprintf("%v%v/%v", docsLink, res.Name, meth.m.Name)]; url != "" {
pn("// For details, see %v", url)
}
}
var servicePtr string
if res == nil {
pn("func (s *Service) %s(%s) *%s {", methodName, args, callName)
servicePtr = "s"
} else {
pn("func (r *%s) %s(%s) *%s {", resourceGoType(res), methodName, args, callName)
servicePtr = "r.s"
}
pn(" c := &%s{s: %s, urlParams_: make(gensupport.URLParams)}", callName, servicePtr)
for _, arg := range args.l {
// TODO(gmlewis): clean up and consolidate this section.
// See: https://code-review.googlesource.com/#/c/3520/18/google-api-go-generator/gen.go
if arg.location == "query" {
switch arg.gotype {
case "[]string":
pn(" c.urlParams_.SetMulti(%q, append([]string{}, %v...))", arg.apiname, arg.goname)
case "string":
pn(" c.urlParams_.Set(%q, %v)", arg.apiname, arg.goname)
default:
if strings.HasPrefix(arg.gotype, "[]") {
tmpVar := convertMultiParams(a, arg.goname)
pn(" c.urlParams_.SetMulti(%q, %v)", arg.apiname, tmpVar)
} else {
pn(" c.urlParams_.Set(%q, fmt.Sprint(%v))", arg.apiname, arg.goname)
}
}
continue
}
if arg.gotype == "[]string" {
pn(" c.%s = append([]string{}, %s...)", arg.goname, arg.goname) // Make a copy of the []string.
continue
}
pn(" c.%s = %s", arg.goname, arg.goname)
}
pn(" return c")
pn("}")
for _, opt := range meth.OptParams() {
if opt.p.Location != "query" {
panicf("optional parameter has unsupported location %q", opt.p.Location)
}
setter := initialCap(opt.p.Name)
des := opt.p.Description
des = strings.Replace(des, "Optional.", "", 1)
des = strings.TrimSpace(des)
p("\n%s", asComment("", fmt.Sprintf("%s sets the optional parameter %q: %s", setter, opt.p.Name, des)))
addFieldValueComments(p, opt, "", true)
np := new(namePool)
np.Get("c") // take the receiver's name
paramName := np.Get(validGoIdentifer(opt.p.Name))
typePrefix := ""
if opt.p.Repeated {
typePrefix = "..."
}
pn("func (c *%s) %s(%s %s%s) *%s {", callName, setter, paramName, typePrefix, opt.GoType(), callName)
if opt.p.Repeated {
if opt.GoType() == "string" {
pn("c.urlParams_.SetMulti(%q, append([]string{}, %v...))", opt.p.Name, paramName)
} else {
tmpVar := convertMultiParams(a, paramName)
pn(" c.urlParams_.SetMulti(%q, %v)", opt.p.Name, tmpVar)
}
} else {
if opt.GoType() == "string" {
pn("c.urlParams_.Set(%q, %v)", opt.p.Name, paramName)
} else {
pn("c.urlParams_.Set(%q, fmt.Sprint(%v))", opt.p.Name, paramName)
}
}
pn("return c")
pn("}")
}
if meth.supportsMediaUpload() {
comment := "Media specifies the media to upload in one or more chunks. " +
"The chunk size may be controlled by supplying a MediaOption generated by googleapi.ChunkSize. " +
"The chunk size defaults to googleapi.DefaultUploadChunkSize." +
"The Content-Type header used in the upload request will be determined by sniffing the contents of r, " +
"unless a MediaOption generated by googleapi.ContentType is supplied." +
"\nAt most one of Media and ResumableMedia may be set."
// TODO(mcgreevy): Ensure that r is always closed before Do returns, and document this.
// See comments on https://code-review.googlesource.com/#/c/3970/
p("\n%s", asComment("", comment))
pn("func (c *%s) Media(r io.Reader, options ...googleapi.MediaOption) *%s {", callName, callName)
// We check if the body arg, if any, has a content type and apply it here.
// In practice, this only happens for the storage API today.
// TODO(djd): check if we can cope with the developer setting the body's Content-Type field
// after they've made this call.
if ba := args.bodyArg(); ba != nil {
if ba.schema.HasContentType() {
pn(" if ct := c.%s.ContentType; ct != \"\" {", ba.goname)
pn(" options = append([]googleapi.MediaOption{googleapi.ContentType(ct)}, options...)")
pn(" }")
}
}
pn(" c.mediaInfo_ = gensupport.NewInfoFromMedia(r, options)")
pn(" return c")
pn("}")
comment = "ResumableMedia specifies the media to upload in chunks and can be canceled with ctx. " +
"\n\nDeprecated: use Media instead." +
"\n\nAt most one of Media and ResumableMedia may be set. " +
`mediaType identifies the MIME media type of the upload, such as "image/png". ` +
`If mediaType is "", it will be auto-detected. ` +
`The provided ctx will supersede any context previously provided to ` +
`the Context method.`
p("\n%s", asComment("", comment))
pn("func (c *%s) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *%s {", callName, callName)
pn(" c.ctx_ = ctx")
pn(" c.mediaInfo_ = gensupport.NewInfoFromResumableMedia(r, size, mediaType)")
pn(" return c")
pn("}")
comment = "ProgressUpdater provides a callback function that will be called after every chunk. " +
"It should be a low-latency function in order to not slow down the upload operation. " +
"This should only be called when using ResumableMedia (as opposed to Media)."
p("\n%s", asComment("", comment))
pn("func (c *%s) ProgressUpdater(pu googleapi.ProgressUpdater) *%s {", callName, callName)
pn(`c.mediaInfo_.SetProgressUpdater(pu)`)
pn("return c")
pn("}")
}
comment := "Fields allows partial responses to be retrieved. " +
"See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse " +
"for more information."
p("\n%s", asComment("", comment))
pn("func (c *%s) Fields(s ...googleapi.Field) *%s {", callName, callName)
pn(`c.urlParams_.Set("fields", googleapi.CombineFields(s))`)
pn("return c")
pn("}")
if httpMethod == "GET" {
// Note that non-GET responses are excluded from supporting If-None-Match.
// See https://github.com/google/google-api-go-client/issues/107 for more info.
comment := "IfNoneMatch sets the optional parameter which makes the operation fail if " +
"the object's ETag matches the given value. This is useful for getting updates " +
"only after the object has changed since the last request. " +
"Use googleapi.IsNotModified to check whether the response error from Do " +
"is the result of In-None-Match."
p("\n%s", asComment("", comment))
pn("func (c *%s) IfNoneMatch(entityTag string) *%s {", callName, callName)
pn(" c.ifNoneMatch_ = entityTag")
pn(" return c")
pn("}")
}
doMethod := "Do method"
if meth.supportsMediaDownload() {
doMethod = "Do and Download methods"
}
commentFmtStr := "Context sets the context to be used in this call's %s. " +
"Any pending HTTP request will be aborted if the provided context is canceled."
comment = fmt.Sprintf(commentFmtStr, doMethod)
p("\n%s", asComment("", comment))
if meth.supportsMediaUpload() {
comment = "This context will supersede any context previously provided to " +
"the ResumableMedia method."
p("%s", asComment("", comment))
}
pn("func (c *%s) Context(ctx context.Context) *%s {", callName, callName)
pn(`c.ctx_ = ctx`)
pn("return c")
pn("}")
comment = "Header returns an http.Header that can be modified by the caller to add " +
"HTTP headers to the request."
p("\n%s", asComment("", comment))
pn("func (c *%s) Header() http.Header {", callName)
pn(" if c.header_ == nil {")
pn(" c.header_ = make(http.Header)")
pn(" }")
pn(" return c.header_")
pn("}")
pn("\nfunc (c *%s) doRequest(alt string) (*http.Response, error) {", callName)
pn(`reqHeaders := make(http.Header)`)
pn(`reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/%s")`, version.Repo)
pn("for k, v := range c.header_ {")
pn(" reqHeaders[k] = v")
pn("}")
pn(`reqHeaders.Set("User-Agent",c.s.userAgent())`)
if httpMethod == "GET" {
pn(`if c.ifNoneMatch_ != "" {`)
pn(` reqHeaders.Set("If-None-Match", c.ifNoneMatch_)`)
pn("}")
}
pn("var body io.Reader = nil")
if meth.IsRawRequest() {
pn("body = c.body_")
} else {
if ba := args.bodyArg(); ba != nil && httpMethod != "GET" {
if meth.m.ID == "ml.projects.predict" {
// TODO(cbro): move ML API to rawHTTP (it will be a breaking change)
// Skip JSONReader for APIs that require clients to pass in JSON already.
pn("body = strings.NewReader(c.%s.HttpBody.Data)", ba.goname)
} else {
style := "WithoutDataWrapper"
if a.needsDataWrapper() {
style = "WithDataWrapper"
}
pn("body, err := googleapi.%s.JSONReader(c.%s)", style, ba.goname)
pn("if err != nil { return nil, err }")
}
pn(`reqHeaders.Set("Content-Type", "application/json")`)
}
pn(`c.urlParams_.Set("alt", alt)`)
pn(`c.urlParams_.Set("prettyPrint", "false")`)
}
pn("urls := googleapi.ResolveRelative(c.s.BasePath, %q)", meth.m.Path)
if meth.supportsMediaUpload() {
pn("if c.mediaInfo_ != nil {")
pn(" urls = googleapi.ResolveRelative(c.s.BasePath, %q)", meth.mediaUploadPath())
pn(` c.urlParams_.Set("uploadType", c.mediaInfo_.UploadType())`)
pn("}")
pn("if body == nil {")
pn(" body = new(bytes.Buffer)")
pn(` reqHeaders.Set("Content-Type", "application/json")`)
pn("}")
pn("body, getBody, cleanup := c.mediaInfo_.UploadRequest(reqHeaders, body)")
pn("defer cleanup()")
}
pn(`urls += "?" + c.urlParams_.Encode()`)
pn("req, err := http.NewRequest(%q, urls, body)", httpMethod)
pn("if err != nil { return nil, err }")
pn("req.Header = reqHeaders")
if meth.supportsMediaUpload() {
pn("req.GetBody = getBody")
}
// Replace param values after NewRequest to avoid reencoding them.
// E.g. Cloud Storage API requires '%2F' in entity param to be kept, but url.Parse replaces it with '/'.
argsForLocation := args.forLocation("path")
if len(argsForLocation) > 0 {
pn(`googleapi.Expand(req.URL, map[string]string{`)
for _, arg := range argsForLocation {
pn(`"%s": %s,`, arg.apiname, arg.exprAsString("c."))
}
pn(`})`)
}
pn("return gensupport.SendRequest(c.ctx_, c.s.client, req)")
pn("}")
if meth.supportsMediaDownload() {
pn("\n// Download fetches the API endpoint's \"media\" value, instead of the normal")
pn("// API response value. If the returned error is nil, the Response is guaranteed to")
pn("// have a 2xx status code. Callers must close the Response.Body as usual.")
pn("func (c *%s) Download(opts ...googleapi.CallOption) (*http.Response, error) {", callName)
pn(`gensupport.SetOptions(c.urlParams_, opts...)`)
pn(`res, err := c.doRequest("media")`)
pn("if err != nil { return nil, err }")
pn("if err := googleapi.CheckMediaResponse(res); err != nil {")
pn("res.Body.Close()")
pn("return nil, err")
pn("}")
pn("return res, nil")
pn("}")
}
mapRetType := strings.HasPrefix(retTypeComma, "map[")
pn("\n// Do executes the %q call.", meth.m.ID)
if retTypeComma != "" && !mapRetType && !meth.IsRawResponse() {
commentFmtStr := "Exactly one of %v or error will be non-nil. " +
"Any non-2xx status code is an error. " +
"Response headers are in either %v.ServerResponse.Header " +
"or (if a response was returned at all) in error.(*googleapi.Error).Header. " +
"Use googleapi.IsNotModified to check whether the returned error was because " +
"http.StatusNotModified was returned."
comment := fmt.Sprintf(commentFmtStr, retType, retType)
p("%s", asComment("", comment))
}
pn("func (c *%s) Do(opts ...googleapi.CallOption) (%serror) {", callName, retTypeComma)
nilRet := ""
if retTypeComma != "" {
nilRet = "nil, "
}
pn(`gensupport.SetOptions(c.urlParams_, opts...)`)
if meth.IsRawResponse() {
pn(`return c.doRequest("")`)
} else {
pn(`res, err := c.doRequest("json")`)
if retTypeComma != "" && !mapRetType {
pn("if res != nil && res.StatusCode == http.StatusNotModified {")
pn(" if res.Body != nil { res.Body.Close() }")
pn(" return nil, &googleapi.Error{")
pn(" Code: res.StatusCode,")
pn(" Header: res.Header,")
pn(" }")
pn("}")
}
pn("if err != nil { return %serr }", nilRet)
pn("defer googleapi.CloseBody(res)")
pn("if err := googleapi.CheckResponse(res); err != nil { return %serr }", nilRet)
if meth.supportsMediaUpload() {
pn(`rx := c.mediaInfo_.ResumableUpload(res.Header.Get("Location"))`)
pn("if rx != nil {")
pn(" rx.Client = c.s.client")
pn(" rx.UserAgent = c.s.userAgent()")
pn(" ctx := c.ctx_")
pn(" if ctx == nil {")
// TODO(mcgreevy): Require context when calling Media, or Do.
pn(" ctx = context.TODO()")
pn(" }")
pn(" res, err = rx.Upload(ctx)")
pn(" if err != nil { return %serr }", nilRet)
pn(" defer res.Body.Close()")
pn(" if err := googleapi.CheckResponse(res); err != nil { return %serr }", nilRet)
pn("}")
}
if retTypeComma == "" {
pn("return nil")
} else {
if mapRetType {
pn("var ret %s", responseType(a, meth.m))
} else {
pn("ret := &%s{", responseTypeLiteral(a, meth.m))
pn(" ServerResponse: googleapi.ServerResponse{")
pn(" Header: res.Header,")
pn(" HTTPStatusCode: res.StatusCode,")
pn(" },")
pn("}")
}
if a.needsDataWrapper() {
pn("target := &struct {")
pn(" Data %s `json:\"data\"`", responseType(a, meth.m))
pn("}{ret}")
} else {
pn("target := &ret")
}
if meth.m.ID == "ml.projects.predict" {
pn("var b bytes.Buffer")
pn("if _, err := io.Copy(&b, res.Body); err != nil { return nil, err }")
pn("if err := res.Body.Close(); err != nil { return nil, err }")
pn("if err := json.NewDecoder(bytes.NewReader(b.Bytes())).Decode(target); err != nil { return nil, err }")
pn("ret.Data = b.String()")
} else {
pn("if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err }")
}
pn("return ret, nil")
}
}
bs, err := json.MarshalIndent(meth.m.JSONMap, "\t// ", " ")
if err != nil {
panic(err)
}
pn("// %s\n", string(bs))
pn("}")
if ptg, rname, ok := meth.supportsPaging(); ok {
// We can assume retType is non-empty.
pn("")
pn("// Pages invokes f for each page of results.")
pn("// A non-nil error returned from f will halt the iteration.")
pn("// The provided context supersedes any context provided to the Context method.")
pn("func (c *%s) Pages(ctx context.Context, f func(%s) error) error {", callName, retType)
pn(" c.ctx_ = ctx")
pn(` defer %s // reset paging to original point`, ptg.genDeferBody())
pn(" for {")
pn(" x, err := c.Do()")
pn(" if err != nil { return err }")
pn(" if err := f(x); err != nil { return err }")
pn(` if x.%s == "" { return nil }`, rname)
pn(ptg.genSet("x." + rname))
pn(" }")
pn("}")
}
}
// A Field provides methods that describe the characteristics of a Param or Property.
type Field interface {
Default() string
Enum() ([]string, bool)
EnumDescriptions() []string
UnfortunateDefault() bool
}
type Param struct {
method *Method
p *disco.Parameter
callFieldName string // empty means to use the default
}
func (p *Param) Default() string {
return p.p.Default
}
func (p *Param) Enum() ([]string, bool) {
if e := p.p.Enums; e != nil {
return e, true
}
return nil, false
}
func (p *Param) EnumDescriptions() []string {
return p.p.EnumDescriptions
}
func (p *Param) UnfortunateDefault() bool {
// We do not do anything special for Params with unfortunate defaults.
return false
}
func (p *Param) GoType() string {
typ, format := p.p.Type, p.p.Format
if typ == "string" && strings.Contains(format, "int") && p.p.Location != "query" {
panic("unexpected int parameter encoded as string, not in query: " + p.p.Name)
}
t, ok := simpleTypeConvert(typ, format)
if !ok {
panic("failed to convert parameter type " + fmt.Sprintf("type=%q, format=%q", typ, format))
}
return t
}
// goCallFieldName returns the name of this parameter's field in a
// method's "Call" struct.
func (p *Param) goCallFieldName() string {
if p.callFieldName != "" {
return p.callFieldName
}
return validGoIdentifer(p.p.Name)
}
// APIMethods returns top-level ("API-level") methods. They don't have an associated resource.
func (a *API) APIMethods() []*Method {
meths := []*Method{}
for _, m := range a.doc.Methods {
meths = append(meths, &Method{
api: a,
r: nil, // to be explicit
m: m,
})
}
return meths
}
func resolveRelative(basestr, relstr string) string {
u, err := url.Parse(basestr)
if err != nil {
panicf("Error parsing base URL %q: %v", basestr, err)
}
rel, err := url.Parse(relstr)
if err != nil {
panicf("Error parsing relative URL %q: %v", relstr, err)
}
u = u.ResolveReference(rel)
return u.String()
}
func (meth *Method) IsRawRequest() bool {
if meth.m.Request == nil {
return false
}
// TODO(cbro): enable across other APIs.
if meth.api.Name != "healthcare" {
return false
}
return meth.m.Request.Ref == "HttpBody"
}
func (meth *Method) IsRawResponse() bool {
if meth.m.Response == nil {
return false
}
if meth.IsRawRequest() {
// always match raw requests with raw responses.
return true
}
// TODO(cbro): enable across other APIs.
if meth.api.Name != "healthcare" {
return false
}
return meth.m.Response.Ref == "HttpBody"
}
func (meth *Method) NewArguments() *arguments {
args := &arguments{
method: meth,
m: make(map[string]*argument),
}
pnames := meth.m.ParameterOrder
if len(pnames) == 0 {
// No parameterOrder; collect required parameters and sort by name.
for _, reqParam := range meth.grepParams(func(p *Param) bool { return p.p.Required }) {
pnames = append(pnames, reqParam.p.Name)
}
sort.Strings(pnames)
}
for _, pname := range pnames {
arg := meth.NewArg(pname, meth.NamedParam(pname))
args.AddArg(arg)
}
if rs := meth.m.Request; rs != nil {
if meth.IsRawRequest() {
args.AddArg(&argument{
goname: "body_",
gotype: "io.Reader",
})
} else {
args.AddArg(meth.NewBodyArg(rs))
}
}
return args
}
func (meth *Method) NewBodyArg(ds *disco.Schema) *argument {
s := meth.api.schemaNamed(ds.RefSchema.Name)
return &argument{
goname: validGoIdentifer(strings.ToLower(ds.Ref)),
apiname: "REQUEST",
gotype: "*" + s.GoName(),
apitype: ds.Ref,
location: "body",
schema: s,
}
}
func (meth *Method) NewArg(apiname string, p *Param) *argument {
apitype := p.p.Type
des := p.p.Description
goname := validGoIdentifer(apiname) // but might be changed later, if conflicts
if strings.Contains(des, "identifier") && !strings.HasSuffix(strings.ToLower(goname), "id") {
goname += "id" // yay
p.callFieldName = goname
}
gotype := mustSimpleTypeConvert(apitype, p.p.Format)
if p.p.Repeated {
gotype = "[]" + gotype
}
return &argument{
apiname: apiname,
apitype: apitype,
goname: goname,
gotype: gotype,
location: p.p.Location,
}
}
type argument struct {
method *Method
schema *Schema // Set if location == "body".
apiname, apitype string
goname, gotype string
location string // "path", "query", "body"
}
func (a *argument) String() string {
return a.goname + " " + a.gotype
}
func (a *argument) exprAsString(prefix string) string {
switch a.gotype {
case "[]string":
log.Printf("TODO(bradfitz): only including the first parameter in path query.")
return prefix + a.goname + `[0]`
case "string":
return prefix + a.goname
case "integer", "int64":
return "strconv.FormatInt(" + prefix + a.goname + ", 10)"
case "uint64":
return "strconv.FormatUint(" + prefix + a.goname + ", 10)"
case "bool":
return "strconv.FormatBool(" + prefix + a.goname + ")"
}
log.Panicf("unknown type: apitype=%q, gotype=%q", a.apitype, a.gotype)
return ""
}
// arguments are the arguments that a method takes
type arguments struct {
l []*argument
m map[string]*argument
method *Method
}
func (args *arguments) forLocation(loc string) []*argument {
matches := make([]*argument, 0)
for _, arg := range args.l {
if arg.location == loc {
matches = append(matches, arg)
}
}
return matches
}
func (args *arguments) bodyArg() *argument {
for _, arg := range args.l {
if arg.location == "body" {
return arg
}
}
return nil
}
func (args *arguments) AddArg(arg *argument) {
n := 1
oname := arg.goname
for {
_, present := args.m[arg.goname]
if !present {
args.m[arg.goname] = arg
args.l = append(args.l, arg)
return
}
n++
arg.goname = fmt.Sprintf("%s%d", oname, n)
}
}
func (a *arguments) String() string {
var buf bytes.Buffer
for i, arg := range a.l {
if i != 0 {
buf.Write([]byte(", "))
}
buf.Write([]byte(arg.String()))
}
return buf.String()
}
var urlRE = regexp.MustCompile(`^http\S+$`)
func asComment(pfx, c string) string {
var buf bytes.Buffer
const maxLen = 70
r := strings.NewReplacer(
"\n", "\n"+pfx+"// ",
"`\"", `"`,
"\"`", `"`,
)
for len(c) > 0 {
line := c
if len(line) < maxLen {
fmt.Fprintf(&buf, "%s// %s\n", pfx, r.Replace(line))
break
}
// Don't break URLs.
if !urlRE.MatchString(line[:maxLen]) {
line = line[:maxLen]
}
si := strings.LastIndex(line, " ")
if nl := strings.Index(line, "\n"); nl != -1 && nl < si {
si = nl
}
if si != -1 {
line = line[:si]
}
fmt.Fprintf(&buf, "%s// %s\n", pfx, r.Replace(line))
c = c[len(line):]
if si != -1 {
c = c[1:]
}
}
return buf.String()
}
func simpleTypeConvert(apiType, format string) (gotype string, ok bool) {
// From http://tools.ietf.org/html/draft-zyp-json-schema-03#section-5.1
switch apiType {
case "boolean":
gotype = "bool"
case "string":
gotype = "string"
switch format {
case "int64", "uint64", "int32", "uint32":
gotype = format
}
case "number":
gotype = "float64"
case "integer":
gotype = "int64"
case "any":
gotype = "interface{}"
}
return gotype, gotype != ""
}
func mustSimpleTypeConvert(apiType, format string) string {
if gotype, ok := simpleTypeConvert(apiType, format); ok {
return gotype
}
panic(fmt.Sprintf("failed to simpleTypeConvert(%q, %q)", apiType, format))
}
func responseType(api *API, m *disco.Method) string {
if m.Response == nil {
return ""
}
ref := m.Response.Ref
if ref != "" {
if s := api.schemas[ref]; s != nil {
return s.GoReturnType()
}
return "*" + ref
}
return ""
}
// Strips the leading '*' from a type name so that it can be used to create a literal.
func responseTypeLiteral(api *API, m *disco.Method) string {
v := responseType(api, m)
if strings.HasPrefix(v, "*") {
return v[1:]
}
return v
}
// initialCap returns the identifier with a leading capital letter.
// it also maps "foo-bar" to "FooBar".
func initialCap(ident string) string {
if ident == "" {
panic("blank identifier")
}
return depunct(ident, true)
}
func validGoIdentifer(ident string) string {
id := depunct(ident, false)
switch id {
case "break", "default", "func", "interface", "select",
"case", "defer", "go", "map", "struct",
"chan", "else", "goto", "package", "switch",
"const", "fallthrough", "if", "range", "type",
"continue", "for", "import", "return", "var":
return id + "_"
}
return id
}
// depunct removes '-', '.', '$', '/', '_' from identifers, making the
// following character uppercase. Multiple '_' are preserved.
func depunct(ident string, needCap bool) string {
var buf bytes.Buffer
preserve_ := false
for i, c := range ident {
if c == '_' {
if preserve_ || strings.HasPrefix(ident[i:], "__") {
preserve_ = true
} else {
needCap = true
continue
}
} else {
preserve_ = false
}
if c == '-' || c == '.' || c == '$' || c == '/' {
needCap = true
continue
}
if needCap {
c = unicode.ToUpper(c)
needCap = false
}
buf.WriteByte(byte(c))
}
return buf.String()
}
func addFieldValueComments(p func(format string, args ...interface{}), field Field, indent string, blankLine bool) {
var lines []string
if enum, ok := field.Enum(); ok {
desc := field.EnumDescriptions()
lines = append(lines, asComment(indent, "Possible values:"))
defval := field.Default()
for i, v := range enum {
more := ""
if v == defval {
more = " (default)"
}
if len(desc) > i && desc[i] != "" {
more = more + " - " + desc[i]
}
lines = append(lines, asComment(indent, ` "`+v+`"`+more))
}
} else if field.UnfortunateDefault() {
lines = append(lines, asComment("\t", fmt.Sprintf("Default: %s", field.Default())))
}
if blankLine && len(lines) > 0 {
p(indent + "//\n")
}
for _, l := range lines {
p("%s", l)
}
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
metricbeat/module/mysql/testing.go
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package mysql
import (
"os"
"github.com/go-sql-driver/mysql"
)
// Helper functions for testing used in the mysql MetricSets.
// GetMySQLEnvDSN returns the MySQL server DSN to use for testing. It
// reads the value from the MYSQL_DSN environment variable and returns
// root@tcp(127.0.0.1:3306)/ if it is not set.
func GetMySQLEnvDSN() string {
dsn := os.Getenv("MYSQL_DSN")
if len(dsn) == 0 {
c := mysql.NewConfig()
c.Net = "tcp"
c.Addr = "127.0.0.1:3306"
c.User = "root"
c.Passwd = "test"
dsn = c.FormatDSN()
}
return dsn
}
|
[
"\"MYSQL_DSN\""
] |
[] |
[
"MYSQL_DSN"
] |
[]
|
["MYSQL_DSN"]
|
go
| 1 | 0 | |
pymsa/util/tool.py
|
import os
import subprocess
from abc import abstractmethod, ABC
from pathlib import Path
class Tool(ABC):
def __init__(self, exe: str, full_name: str, exe_path: str):
self.exe = exe
self.full_name = full_name
self.exe_path = exe_path
def run(self, parameters: dict):
if self._exe_exists():
command = self._create_command(parameters)
return self.run_command(command)
else:
raise Exception('{0} executable could been found on path {1}'.format(self.exe, self.exe_path))
@abstractmethod
def run_command(self, command):
pass
def _create_command(self, parameters: dict) -> str:
return self.exe + ''.join(' {} {} '.format(key, val) for key, val in parameters.items())
def _exe_exists(self) -> bool:
return Path(self.exe_path).is_file()
class StrikeEx(Tool):
def __init__(self, exe_path: str = '/usr/local/bin/strike'):
super(StrikeEx, self).__init__('strike', 'Single structure induced evaluation', exe_path)
def run_command(self, command) -> float:
bytess = subprocess.check_output(command, shell=True, env=os.environ.copy())
return float("".join(map(chr, bytess)).split('\n')[-2])
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
envconfig.go
|
// Copyright (c) 2013 Kelsey Hightower. All rights reserved.
// Use of this source code is governed by the MIT License that can be found in
// the LICENSE file.
package envconfig
import (
"encoding"
"errors"
"fmt"
"os"
"reflect"
"regexp"
"strconv"
"strings"
"time"
)
// ErrInvalidSpecification indicates that a specification is of the wrong type.
var ErrInvalidSpecification = errors.New("specification must be a struct pointer")
var gatherRegexp = regexp.MustCompile("([^A-Z]+|[A-Z][^A-Z]+|[A-Z]+)")
// A ParseError occurs when an environment variable cannot be converted to
// the type required by a struct field during assignment.
type ParseError struct {
KeyName string
FieldName string
TypeName string
Value string
Err error
}
// Decoder has the same semantics as Setter, but takes higher precedence.
// It is provided for historical compatibility.
type Decoder interface {
Decode(value string) error
}
// Setter is implemented by types can self-deserialize values.
// Any type that implements flag.Value also implements Setter.
type Setter interface {
Set(value string) error
}
func (e *ParseError) Error() string {
return fmt.Sprintf("envconfig.Process: assigning %[1]s to %[2]s: converting '%[3]s' to type %[4]s. details: %[5]s", e.KeyName, e.FieldName, e.Value, e.TypeName, e.Err)
}
// varInfo maintains information about the configuration variable
type varInfo struct {
Name string
Alt []string
Key string
Field reflect.Value
Tags reflect.StructTag
}
// GatherInfo gathers information about the specified struct
func gatherInfo(prefix string, spec interface{}) ([]varInfo, error) {
s := reflect.ValueOf(spec)
if s.Kind() != reflect.Ptr {
return nil, ErrInvalidSpecification
}
s = s.Elem()
if s.Kind() != reflect.Struct {
return nil, ErrInvalidSpecification
}
typeOfSpec := s.Type()
// over allocate an info array, we will extend if needed later
infos := make([]varInfo, 0, s.NumField())
for i := 0; i < s.NumField(); i++ {
f := s.Field(i)
ftype := typeOfSpec.Field(i)
if !f.CanSet() || isTrue(ftype.Tag.Get("ignored")) {
continue
}
for f.Kind() == reflect.Ptr {
if f.IsNil() {
if f.Type().Elem().Kind() != reflect.Struct {
// nil pointer to a non-struct: leave it alone
break
}
// nil pointer to struct: create a zero instance
f.Set(reflect.New(f.Type().Elem()))
}
f = f.Elem()
}
// Capture information about the config variable
info := varInfo{
Name: ftype.Name,
Field: f,
Tags: ftype.Tag,
Alt: generateAlternatives(strings.ToUpper(ftype.Tag.Get("envconfig")), ftype.Name),
}
// Default to the field name as the env var name (will be upcased)
info.Key = info.Name
// Best effort to un-pick camel casing as separate words
if isTrue(ftype.Tag.Get("split_words")) {
words := gatherRegexp.FindAllStringSubmatch(ftype.Name, -1)
if len(words) > 0 {
var name []string
for _, words := range words {
name = append(name, words[0])
}
info.Key = strings.Join(name, "_")
}
}
if info.Alt[0] != "" {
info.Key = info.Alt[0]
} else {
info.Alt = generateAlternatives(strings.ToUpper(info.Key), ftype.Name)
}
if prefix != "" {
info.Key = fmt.Sprintf("%s_%s", prefix, info.Key)
info.Alt = generateAlternatives(strings.ToUpper(info.Key), ftype.Name)
}
info.Key = strings.ToUpper(info.Key)
infos = append(infos, info)
if f.Kind() == reflect.Struct {
// honor Decode if present
if decoderFrom(f) == nil && setterFrom(f) == nil && textUnmarshaler(f) == nil && binaryUnmarshaler(f) == nil {
innerPrefix := prefix
if !ftype.Anonymous {
innerPrefix = info.Key
}
embeddedPtr := f.Addr().Interface()
embeddedInfos, err := gatherInfo(innerPrefix, embeddedPtr)
if err != nil {
return nil, err
}
infos = append(infos[:len(infos)-1], embeddedInfos...)
continue
}
}
}
return infos, nil
}
func generateAlternatives(matrice, name string) []string {
alts := []string{matrice}
split := strings.Split(matrice, "_")
for i := 1; i < len(split); i++ {
alt := strings.Join(split[i:], "_")
if alt == name {
break
}
alts = append(alts, alt)
}
return alts
}
// CheckDisallowed checks that no environment variables with the prefix are set
// that we don't know how or want to parse. This is likely only meaningful with
// a non-empty prefix.
func CheckDisallowed(prefix string, spec interface{}) error {
infos, err := gatherInfo(prefix, spec)
if err != nil {
return err
}
vars := make(map[string]struct{})
for _, info := range infos {
vars[info.Key] = struct{}{}
}
if prefix != "" {
prefix = strings.ToUpper(prefix) + "_"
}
for _, env := range os.Environ() {
if !strings.HasPrefix(env, prefix) {
continue
}
v := strings.SplitN(env, "=", 2)[0]
if _, found := vars[v]; !found {
return fmt.Errorf("unknown environment variable %s", v)
}
}
return nil
}
// Process populates the specified struct based on environment variables
func Process(prefix string, spec interface{}) error {
infos, err := gatherInfo(prefix, spec)
for _, info := range infos {
// `os.Getenv` cannot differentiate between an explicitly set empty value
// and an unset value. `os.LookupEnv` is preferred to `syscall.Getenv`,
// but it is only available in go1.5 or newer. We're using Go build tags
// here to use os.LookupEnv for >=go1.5
value, ok := lookupEnv(info.Key)
if !ok {
for _, alt := range info.Alt {
value, ok = lookupEnv(alt)
if ok {
break
}
}
}
def := info.Tags.Get("default")
if def != "" && !ok {
value = def
}
req := info.Tags.Get("required")
if !ok && def == "" {
if isTrue(req) {
return fmt.Errorf("required key %s missing value", info.Key)
}
continue
}
err = processField(value, info.Field)
if err != nil {
return &ParseError{
KeyName: info.Key,
FieldName: info.Name,
TypeName: info.Field.Type().String(),
Value: value,
Err: err,
}
}
}
return err
}
// MustProcess is the same as Process but panics if an error occurs
func MustProcess(prefix string, spec interface{}) {
if err := Process(prefix, spec); err != nil {
panic(err)
}
}
func processField(value string, field reflect.Value) error {
typ := field.Type()
decoder := decoderFrom(field)
if decoder != nil {
return decoder.Decode(value)
}
// look for Set method if Decode not defined
setter := setterFrom(field)
if setter != nil {
return setter.Set(value)
}
if t := textUnmarshaler(field); t != nil {
return t.UnmarshalText([]byte(value))
}
if b := binaryUnmarshaler(field); b != nil {
return b.UnmarshalBinary([]byte(value))
}
if typ.Kind() == reflect.Ptr {
typ = typ.Elem()
if field.IsNil() {
field.Set(reflect.New(typ))
}
field = field.Elem()
}
switch typ.Kind() {
case reflect.String:
field.SetString(value)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
var (
val int64
err error
)
if field.Kind() == reflect.Int64 && typ.PkgPath() == "time" && typ.Name() == "Duration" {
var d time.Duration
d, err = time.ParseDuration(value)
val = int64(d)
} else {
val, err = strconv.ParseInt(value, 0, typ.Bits())
}
if err != nil {
return err
}
field.SetInt(val)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
val, err := strconv.ParseUint(value, 0, typ.Bits())
if err != nil {
return err
}
field.SetUint(val)
case reflect.Bool:
val, err := strconv.ParseBool(value)
if err != nil {
return err
}
field.SetBool(val)
case reflect.Float32, reflect.Float64:
val, err := strconv.ParseFloat(value, typ.Bits())
if err != nil {
return err
}
field.SetFloat(val)
case reflect.Slice:
vals := strings.Split(value, ",")
sl := reflect.MakeSlice(typ, len(vals), len(vals))
for i, val := range vals {
err := processField(val, sl.Index(i))
if err != nil {
return err
}
}
field.Set(sl)
case reflect.Map:
mp := reflect.MakeMap(typ)
if len(strings.TrimSpace(value)) != 0 {
pairs := strings.Split(value, ",")
for _, pair := range pairs {
kvpair := strings.Split(pair, ":")
if len(kvpair) != 2 {
return fmt.Errorf("invalid map item: %q", pair)
}
k := reflect.New(typ.Key()).Elem()
err := processField(kvpair[0], k)
if err != nil {
return err
}
v := reflect.New(typ.Elem()).Elem()
err = processField(kvpair[1], v)
if err != nil {
return err
}
mp.SetMapIndex(k, v)
}
}
field.Set(mp)
}
return nil
}
func interfaceFrom(field reflect.Value, fn func(interface{}, *bool)) {
// it may be impossible for a struct field to fail this check
if !field.CanInterface() {
return
}
var ok bool
fn(field.Interface(), &ok)
if !ok && field.CanAddr() {
fn(field.Addr().Interface(), &ok)
}
}
func decoderFrom(field reflect.Value) (d Decoder) {
interfaceFrom(field, func(v interface{}, ok *bool) { d, *ok = v.(Decoder) })
return d
}
func setterFrom(field reflect.Value) (s Setter) {
interfaceFrom(field, func(v interface{}, ok *bool) { s, *ok = v.(Setter) })
return s
}
func textUnmarshaler(field reflect.Value) (t encoding.TextUnmarshaler) {
interfaceFrom(field, func(v interface{}, ok *bool) { t, *ok = v.(encoding.TextUnmarshaler) })
return t
}
func binaryUnmarshaler(field reflect.Value) (b encoding.BinaryUnmarshaler) {
interfaceFrom(field, func(v interface{}, ok *bool) { b, *ok = v.(encoding.BinaryUnmarshaler) })
return b
}
func isTrue(s string) bool {
b, _ := strconv.ParseBool(s)
return b
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| 0 | 0 | |
pkg/utils/string/strings.go
|
package stringutils
import (
"unicode"
"github.com/gojek/stevedore/pkg/utils/map"
)
// Contains checks if the given string array contains given string
func Contains(items []string, input string) bool {
for _, item := range items {
if item == input {
return true
}
}
return false
}
// Expand replaces ${var} in the string based on the mapping function.
// For example, os.ExpandEnv(s) is equivalent to os.Expand(s, os.Getenv).
func Expand(s string, mapping func(string, bool) string) string {
var buf []byte
// ${} is all ASCII, so bytes are fine for this operation.
i := 0
for j := 0; j < len(s); j++ {
if s[j] == '$' && s[j+1] == '{' && j+2 < len(s) {
if buf == nil {
buf = make([]byte, 0, 2*len(s))
}
buf = append(buf, s[i:j]...)
name, w := getShellName(s[j+1:])
if name == "" && w > 0 {
// Encountered invalid syntax; eat the
// characters.
} else if name == "" {
// Valid syntax, but $ was not followed by a
// name. Leave the dollar character untouched.
buf = append(buf, s[j])
} else {
if (j != 0 && !unicode.IsSpace(rune(s[j-1]))) || (j+w+1 < len(s) && !unicode.IsSpace(rune(s[j+w+1]))) {
// interpolated variable
buf = append(buf, mapping(name, true)...)
} else {
buf = append(buf, mapping(name, false)...)
}
}
j += w
i = j + 1
}
}
if buf == nil {
return s
}
return string(buf) + s[i:]
}
// getShellName returns the name that begins the string and the number of bytes
// consumed to extract it. If the name is enclosed in {}, it's part of a ${}
// expansion and two more bytes are needed than the length of the name.
func getShellName(s string) (string, int) {
switch {
case s[0] == '{':
if len(s) > 2 && isShellSpecialVar(s[1]) && s[2] == '}' {
return s[1:2], 3
}
// Scan to closing brace
for i := 1; i < len(s); i++ {
if s[i] == '}' {
if i == 1 {
return "", 2 // Bad syntax; eat "${}"
}
return s[1:i], i + 1
}
}
return "", 1 // Bad syntax; eat "${"
case isShellSpecialVar(s[0]):
return s[0:1], 1
}
// Scan alphanumerics.
var i int
for i = 0; i < len(s) && isAlphaNum(s[i]); i++ {
}
return s[:i], i
}
// isShellSpecialVar reports whether the character identifies a special
// shell variable such as $*.
func isShellSpecialVar(c uint8) bool {
switch c {
case '*', '#', '$', '@', '!', '?', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
return true
}
return false
}
// isAlphaNum reports whether the byte is an ASCII letter, number, or underscore
func isAlphaNum(c uint8) bool {
return c == '_' || '0' <= c && c <= '9' || 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z'
}
// Unique removes the duplicates in the given array
func Unique(items []string) []string {
itemsMap := map[string]interface{}{}
for _, item := range items {
itemsMap[item] = true
}
return maputils.Keys(itemsMap)
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| 0 | 0 | |
vendor/github.com/openshift/installer/pkg/types/validation/installconfig.go
|
package validation
import (
"fmt"
"net"
"net/url"
"os"
"regexp"
"sort"
"strconv"
"strings"
dockerref "github.com/containers/image/docker/reference"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/ssh"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation/field"
operv1 "github.com/openshift/api/operator/v1"
"github.com/openshift/installer/pkg/ipnet"
"github.com/openshift/installer/pkg/types"
"github.com/openshift/installer/pkg/types/alibabacloud"
alibabacloudvalidation "github.com/openshift/installer/pkg/types/alibabacloud/validation"
"github.com/openshift/installer/pkg/types/aws"
awsvalidation "github.com/openshift/installer/pkg/types/aws/validation"
"github.com/openshift/installer/pkg/types/azure"
azurevalidation "github.com/openshift/installer/pkg/types/azure/validation"
"github.com/openshift/installer/pkg/types/baremetal"
baremetalvalidation "github.com/openshift/installer/pkg/types/baremetal/validation"
"github.com/openshift/installer/pkg/types/gcp"
gcpvalidation "github.com/openshift/installer/pkg/types/gcp/validation"
"github.com/openshift/installer/pkg/types/ibmcloud"
ibmcloudvalidation "github.com/openshift/installer/pkg/types/ibmcloud/validation"
"github.com/openshift/installer/pkg/types/libvirt"
libvirtvalidation "github.com/openshift/installer/pkg/types/libvirt/validation"
"github.com/openshift/installer/pkg/types/openstack"
openstackvalidation "github.com/openshift/installer/pkg/types/openstack/validation"
"github.com/openshift/installer/pkg/types/ovirt"
ovirtvalidation "github.com/openshift/installer/pkg/types/ovirt/validation"
"github.com/openshift/installer/pkg/types/vsphere"
vspherevalidation "github.com/openshift/installer/pkg/types/vsphere/validation"
"github.com/openshift/installer/pkg/validate"
)
const (
masterPoolName = "master"
)
// list of known plugins that require hostPrefix to be set
var pluginsUsingHostPrefix = sets.NewString(string(operv1.NetworkTypeOpenShiftSDN), string(operv1.NetworkTypeOVNKubernetes))
// ValidateInstallConfig checks that the specified install config is valid.
func ValidateInstallConfig(c *types.InstallConfig) field.ErrorList {
allErrs := field.ErrorList{}
if c.TypeMeta.APIVersion == "" {
return field.ErrorList{field.Required(field.NewPath("apiVersion"), "install-config version required")}
}
switch v := c.APIVersion; v {
case types.InstallConfigVersion:
// Current version
default:
return field.ErrorList{field.Invalid(field.NewPath("apiVersion"), c.TypeMeta.APIVersion, fmt.Sprintf("install-config version must be %q", types.InstallConfigVersion))}
}
if c.SSHKey != "" {
if c.FIPS == true {
allErrs = append(allErrs, validateFIPSconfig(c)...)
} else {
if err := validate.SSHPublicKey(c.SSHKey); err != nil {
allErrs = append(allErrs, field.Invalid(field.NewPath("sshKey"), c.SSHKey, err.Error()))
}
}
}
if c.AdditionalTrustBundle != "" {
if err := validate.CABundle(c.AdditionalTrustBundle); err != nil {
allErrs = append(allErrs, field.Invalid(field.NewPath("additionalTrustBundle"), c.AdditionalTrustBundle, err.Error()))
}
}
nameErr := validate.ClusterName(c.ObjectMeta.Name)
if c.Platform.GCP != nil || c.Platform.Azure != nil {
nameErr = validate.ClusterName1035(c.ObjectMeta.Name)
}
if c.Platform.Ovirt != nil {
// FIX-ME: As soon bz#1915122 get resolved remove the limitation of 14 chars for the clustername
nameErr = validate.ClusterNameMaxLength(c.ObjectMeta.Name, 14)
}
if nameErr != nil {
allErrs = append(allErrs, field.Invalid(field.NewPath("metadata", "name"), c.ObjectMeta.Name, nameErr.Error()))
}
baseDomainErr := validate.DomainName(c.BaseDomain, true)
if baseDomainErr != nil {
allErrs = append(allErrs, field.Invalid(field.NewPath("baseDomain"), c.BaseDomain, baseDomainErr.Error()))
}
if nameErr == nil && baseDomainErr == nil {
clusterDomain := c.ClusterDomain()
if err := validate.DomainName(clusterDomain, true); err != nil {
allErrs = append(allErrs, field.Invalid(field.NewPath("baseDomain"), clusterDomain, err.Error()))
}
}
if c.Networking != nil {
allErrs = append(allErrs, validateNetworking(c.Networking, field.NewPath("networking"))...)
allErrs = append(allErrs, validateNetworkingIPVersion(c.Networking, &c.Platform)...)
allErrs = append(allErrs, validateNetworkingForPlatform(c.Networking, &c.Platform, field.NewPath("networking"))...)
} else {
allErrs = append(allErrs, field.Required(field.NewPath("networking"), "networking is required"))
}
allErrs = append(allErrs, validatePlatform(&c.Platform, field.NewPath("platform"), c.Networking, c)...)
if c.ControlPlane != nil {
allErrs = append(allErrs, validateControlPlane(&c.Platform, c.ControlPlane, field.NewPath("controlPlane"))...)
} else {
allErrs = append(allErrs, field.Required(field.NewPath("controlPlane"), "controlPlane is required"))
}
allErrs = append(allErrs, validateCompute(&c.Platform, c.ControlPlane, c.Compute, field.NewPath("compute"))...)
if err := validate.ImagePullSecret(c.PullSecret); err != nil {
allErrs = append(allErrs, field.Invalid(field.NewPath("pullSecret"), c.PullSecret, err.Error()))
}
if c.Proxy != nil {
allErrs = append(allErrs, validateProxy(c.Proxy, c, field.NewPath("proxy"))...)
}
allErrs = append(allErrs, validateImageContentSources(c.ImageContentSources, field.NewPath("imageContentSources"))...)
if _, ok := validPublishingStrategies[c.Publish]; !ok {
allErrs = append(allErrs, field.NotSupported(field.NewPath("publish"), c.Publish, validPublishingStrategyValues))
}
allErrs = append(allErrs, validateCloudCredentialsMode(c.CredentialsMode, field.NewPath("credentialsMode"), c.Platform)...)
if c.Publish == types.InternalPublishingStrategy {
switch platformName := c.Platform.Name(); platformName {
case aws.Name, azure.Name, gcp.Name, alibabacloud.Name:
default:
allErrs = append(allErrs, field.Invalid(field.NewPath("publish"), c.Publish, fmt.Sprintf("Internal publish strategy is not supported on %q platform", platformName)))
}
}
return allErrs
}
// ipAddressType indicates the address types provided for a given field
type ipAddressType struct {
IPv4 bool
IPv6 bool
Primary corev1.IPFamily
}
// ipAddressTypeByField is a map of field path to ipAddressType
type ipAddressTypeByField map[string]ipAddressType
// ipNetByField is a map of field path to the IPNets
type ipNetByField map[string][]ipnet.IPNet
// inferIPVersionFromInstallConfig infers the user's desired ip version from the networking config.
// Presence field names match the field path of the struct within the Networking type. This function
// assumes a valid install config.
func inferIPVersionFromInstallConfig(n *types.Networking) (hasIPv4, hasIPv6 bool, presence ipAddressTypeByField, addresses ipNetByField) {
if n == nil {
return
}
addresses = make(ipNetByField)
for _, network := range n.MachineNetwork {
addresses["machineNetwork"] = append(addresses["machineNetwork"], network.CIDR)
}
for _, network := range n.ServiceNetwork {
addresses["serviceNetwork"] = append(addresses["serviceNetwork"], network)
}
for _, network := range n.ClusterNetwork {
addresses["clusterNetwork"] = append(addresses["clusterNetwork"], network.CIDR)
}
presence = make(ipAddressTypeByField)
for k, ipnets := range addresses {
for i, ipnet := range ipnets {
has := presence[k]
if ipnet.IP.To4() != nil {
has.IPv4 = true
if i == 0 {
has.Primary = corev1.IPv4Protocol
}
if k == "serviceNetwork" {
hasIPv4 = true
}
} else {
has.IPv6 = true
if i == 0 {
has.Primary = corev1.IPv6Protocol
}
if k == "serviceNetwork" {
hasIPv6 = true
}
}
presence[k] = has
}
}
return
}
func ipnetworksToStrings(networks []ipnet.IPNet) []string {
var diag []string
for _, sn := range networks {
diag = append(diag, sn.String())
}
return diag
}
// validateNetworkingIPVersion checks parameters for consistency when the user
// requests single-stack IPv6 or dual-stack modes.
func validateNetworkingIPVersion(n *types.Networking, p *types.Platform) field.ErrorList {
var allErrs field.ErrorList
hasIPv4, hasIPv6, presence, addresses := inferIPVersionFromInstallConfig(n)
switch {
case hasIPv4 && hasIPv6:
if n.NetworkType == string(operv1.NetworkTypeOpenShiftSDN) {
allErrs = append(allErrs, field.Invalid(field.NewPath("networking", "networkType"), n.NetworkType, "dual-stack IPv4/IPv6 is not supported for this networking plugin"))
}
if len(n.ServiceNetwork) != 2 {
allErrs = append(allErrs, field.Invalid(field.NewPath("networking", "serviceNetwork"), strings.Join(ipnetworksToStrings(n.ServiceNetwork), ", "), "when installing dual-stack IPv4/IPv6 you must provide two service networks, one for each IP address type"))
}
experimentalDualStackEnabled, _ := strconv.ParseBool(os.Getenv("OPENSHIFT_INSTALL_EXPERIMENTAL_DUAL_STACK"))
switch {
case p.Azure != nil && experimentalDualStackEnabled:
logrus.Warnf("Using experimental Azure dual-stack support")
case p.BareMetal != nil:
apiVIPIPFamily := corev1.IPv6Protocol
if net.ParseIP(p.BareMetal.APIVIP).To4() != nil {
apiVIPIPFamily = corev1.IPv4Protocol
}
if apiVIPIPFamily != presence["machineNetwork"].Primary {
allErrs = append(allErrs, field.Invalid(field.NewPath("networking", "baremetal", "apiVIP"), p.BareMetal.APIVIP, "VIP for the API must be of the same IP family with machine network's primary IP Family for dual-stack IPv4/IPv6"))
}
ingressVIPIPFamily := corev1.IPv6Protocol
if net.ParseIP(p.BareMetal.IngressVIP).To4() != nil {
ingressVIPIPFamily = corev1.IPv4Protocol
}
if ingressVIPIPFamily != presence["machineNetwork"].Primary {
allErrs = append(allErrs, field.Invalid(field.NewPath("networking", "baremetal", "ingressVIP"), p.BareMetal.IngressVIP, "VIP for the Ingress must be of the same IP family with machine network's primary IP Family for dual-stack IPv4/IPv6"))
}
case p.None != nil:
default:
allErrs = append(allErrs, field.Invalid(field.NewPath("networking"), "DualStack", "dual-stack IPv4/IPv6 is not supported for this platform, specify only one type of address"))
}
for k, v := range presence {
switch {
case v.IPv4 && !v.IPv6:
allErrs = append(allErrs, field.Invalid(field.NewPath("networking", k), strings.Join(ipnetworksToStrings(addresses[k]), ", "), "dual-stack IPv4/IPv6 requires an IPv6 network in this list"))
case !v.IPv4 && v.IPv6:
allErrs = append(allErrs, field.Invalid(field.NewPath("networking", k), strings.Join(ipnetworksToStrings(addresses[k]), ", "), "dual-stack IPv4/IPv6 requires an IPv4 network in this list"))
}
// FIXME: we should allow either all-networks-IPv4Primary or
// all-networks-IPv6Primary, but the latter currently causes
// confusing install failures, so block it.
if v.IPv4 && v.IPv6 && v.Primary != corev1.IPv4Protocol {
allErrs = append(allErrs, field.Invalid(field.NewPath("networking", k), strings.Join(ipnetworksToStrings(addresses[k]), ", "), "IPv4 addresses must be listed before IPv6 addresses"))
}
}
case hasIPv6:
if n.NetworkType == string(operv1.NetworkTypeOpenShiftSDN) {
allErrs = append(allErrs, field.Invalid(field.NewPath("networking", "networkType"), n.NetworkType, "IPv6 is not supported for this networking plugin"))
}
switch {
case p.BareMetal != nil:
case p.None != nil:
case p.Azure != nil && p.Azure.CloudName == azure.StackCloud:
allErrs = append(allErrs, field.Invalid(field.NewPath("networking"), "IPv6", "Azure Stack does not support IPv6"))
default:
allErrs = append(allErrs, field.Invalid(field.NewPath("networking"), "IPv6", "single-stack IPv6 is not supported for this platform"))
}
case hasIPv4:
if len(n.ServiceNetwork) > 1 {
allErrs = append(allErrs, field.Invalid(field.NewPath("networking", "serviceNetwork"), strings.Join(ipnetworksToStrings(n.ServiceNetwork), ", "), "only one service network can be specified"))
}
default:
// we should have a validation error for no specified machineNetwork, serviceNetwork, or clusterNetwork
}
return allErrs
}
func validateNetworking(n *types.Networking, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if n.NetworkType == "" {
allErrs = append(allErrs, field.Required(fldPath.Child("networkType"), "network provider type required"))
}
if len(n.MachineNetwork) > 0 {
for i, network := range n.MachineNetwork {
if err := validate.SubnetCIDR(&network.CIDR.IPNet); err != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Child("machineNetwork").Index(i), network.CIDR.String(), err.Error()))
}
for j, subNetwork := range n.MachineNetwork[0:i] {
if validate.DoCIDRsOverlap(&network.CIDR.IPNet, &subNetwork.CIDR.IPNet) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("machineNetwork").Index(i), network.CIDR.String(), fmt.Sprintf("machine network must not overlap with machine network %d", j)))
}
}
}
} else {
allErrs = append(allErrs, field.Required(fldPath.Child("machineNetwork"), "at least one machine network is required"))
}
for i, sn := range n.ServiceNetwork {
if err := validate.ServiceSubnetCIDR(&sn.IPNet); err != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Child("serviceNetwork").Index(i), sn.String(), err.Error()))
}
for _, network := range n.MachineNetwork {
if validate.DoCIDRsOverlap(&sn.IPNet, &network.CIDR.IPNet) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("serviceNetwork").Index(i), sn.String(), "service network must not overlap with any of the machine networks"))
}
}
for j, snn := range n.ServiceNetwork[0:i] {
if validate.DoCIDRsOverlap(&sn.IPNet, &snn.IPNet) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("serviceNetwork").Index(i), sn.String(), fmt.Sprintf("service network must not overlap with service network %d", j)))
}
}
}
if len(n.ServiceNetwork) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("serviceNetwork"), "a service network is required"))
}
for i, cn := range n.ClusterNetwork {
allErrs = append(allErrs, validateClusterNetwork(n, &cn, i, fldPath.Child("clusterNetwork").Index(i))...)
}
if len(n.ClusterNetwork) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("clusterNetwork"), "cluster network required"))
}
return allErrs
}
func validateNetworkingForPlatform(n *types.Networking, platform *types.Platform, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
switch {
case platform.Libvirt != nil:
errMsg := "overlaps with default Docker Bridge subnet"
for idx, mn := range n.MachineNetwork {
if validate.DoCIDRsOverlap(&mn.CIDR.IPNet, validate.DockerBridgeCIDR) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("machineNewtork").Index(idx), mn.CIDR.String(), errMsg))
}
}
for idx, sn := range n.ServiceNetwork {
if validate.DoCIDRsOverlap(&sn.IPNet, validate.DockerBridgeCIDR) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("serviceNetwork").Index(idx), sn.String(), errMsg))
}
}
for idx, cn := range n.ClusterNetwork {
if validate.DoCIDRsOverlap(&cn.CIDR.IPNet, validate.DockerBridgeCIDR) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("clusterNetwork").Index(idx), cn.CIDR.String(), errMsg))
}
}
default:
warningMsgFmt := "%s: %s overlaps with default Docker Bridge subnet"
for idx, mn := range n.MachineNetwork {
if validate.DoCIDRsOverlap(&mn.CIDR.IPNet, validate.DockerBridgeCIDR) {
logrus.Warnf(warningMsgFmt, fldPath.Child("machineNetwork").Index(idx), mn.CIDR.String())
}
}
for idx, sn := range n.ServiceNetwork {
if validate.DoCIDRsOverlap(&sn.IPNet, validate.DockerBridgeCIDR) {
logrus.Warnf(warningMsgFmt, fldPath.Child("serviceNetwork").Index(idx), sn.String())
}
}
for idx, cn := range n.ClusterNetwork {
if validate.DoCIDRsOverlap(&cn.CIDR.IPNet, validate.DockerBridgeCIDR) {
logrus.Warnf(warningMsgFmt, fldPath.Child("clusterNetwork").Index(idx), cn.CIDR.String())
}
}
}
return allErrs
}
func validateClusterNetwork(n *types.Networking, cn *types.ClusterNetworkEntry, idx int, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if err := validate.SubnetCIDR(&cn.CIDR.IPNet); err != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Child("cidr"), cn.CIDR.IPNet.String(), err.Error()))
}
for _, network := range n.MachineNetwork {
if validate.DoCIDRsOverlap(&cn.CIDR.IPNet, &network.CIDR.IPNet) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("cidr"), cn.CIDR.String(), "cluster network must not overlap with any of the machine networks"))
}
}
for i, sn := range n.ServiceNetwork {
if validate.DoCIDRsOverlap(&cn.CIDR.IPNet, &sn.IPNet) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("cidr"), cn.CIDR.String(), fmt.Sprintf("cluster network must not overlap with service network %d", i)))
}
}
for i, acn := range n.ClusterNetwork[0:idx] {
if validate.DoCIDRsOverlap(&cn.CIDR.IPNet, &acn.CIDR.IPNet) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("cidr"), cn.CIDR.String(), fmt.Sprintf("cluster network must not overlap with cluster network %d", i)))
}
}
if cn.HostPrefix < 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("hostPrefix"), cn.HostPrefix, "hostPrefix must be positive"))
}
// ignore hostPrefix if the plugin does not use it and has it unset
if pluginsUsingHostPrefix.Has(n.NetworkType) || (cn.HostPrefix != 0) {
if ones, bits := cn.CIDR.Mask.Size(); cn.HostPrefix < int32(ones) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("hostPrefix"), cn.HostPrefix, "cluster network host subnetwork prefix must not be larger size than CIDR "+cn.CIDR.String()))
} else if bits == 128 && cn.HostPrefix != 64 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("hostPrefix"), cn.HostPrefix, "cluster network host subnetwork prefix must be 64 for IPv6 networks"))
}
}
return allErrs
}
func validateControlPlane(platform *types.Platform, pool *types.MachinePool, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if pool.Name != masterPoolName {
allErrs = append(allErrs, field.NotSupported(fldPath.Child("name"), pool.Name, []string{masterPoolName}))
}
if pool.Replicas != nil && *pool.Replicas == 0 {
allErrs = append(allErrs, field.Invalid(fldPath.Child("replicas"), pool.Replicas, "number of control plane replicas must be positive"))
}
allErrs = append(allErrs, ValidateMachinePool(platform, pool, fldPath)...)
return allErrs
}
func validateCompute(platform *types.Platform, control *types.MachinePool, pools []types.MachinePool, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
poolNames := map[string]bool{}
for i, p := range pools {
poolFldPath := fldPath.Index(i)
if p.Name != "worker" {
allErrs = append(allErrs, field.NotSupported(poolFldPath.Child("name"), p.Name, []string{"worker"}))
}
if poolNames[p.Name] {
allErrs = append(allErrs, field.Duplicate(poolFldPath.Child("name"), p.Name))
}
poolNames[p.Name] = true
if control != nil && control.Architecture != p.Architecture {
allErrs = append(allErrs, field.Invalid(poolFldPath.Child("architecture"), p.Architecture, "heteregeneous multi-arch is not supported; compute pool architecture must match control plane"))
}
allErrs = append(allErrs, ValidateMachinePool(platform, &p, poolFldPath)...)
}
return allErrs
}
func validatePlatform(platform *types.Platform, fldPath *field.Path, network *types.Networking, c *types.InstallConfig) field.ErrorList {
allErrs := field.ErrorList{}
activePlatform := platform.Name()
platforms := make([]string, len(types.PlatformNames))
copy(platforms, types.PlatformNames)
platforms = append(platforms, types.HiddenPlatformNames...)
sort.Strings(platforms)
i := sort.SearchStrings(platforms, activePlatform)
if i == len(platforms) || platforms[i] != activePlatform {
allErrs = append(allErrs, field.Invalid(fldPath, activePlatform, fmt.Sprintf("must specify one of the platforms (%s)", strings.Join(platforms, ", "))))
}
validate := func(n string, value interface{}, validation func(*field.Path) field.ErrorList) {
if n != activePlatform {
allErrs = append(allErrs, field.Invalid(fldPath, activePlatform, fmt.Sprintf("must only specify a single type of platform; cannot use both %q and %q", activePlatform, n)))
}
allErrs = append(allErrs, validation(fldPath.Child(n))...)
}
if platform.AlibabaCloud != nil {
validate(alibabacloud.Name, platform.AlibabaCloud, func(f *field.Path) field.ErrorList {
return alibabacloudvalidation.ValidatePlatform(platform.AlibabaCloud, network, f)
})
}
if platform.AWS != nil {
validate(aws.Name, platform.AWS, func(f *field.Path) field.ErrorList { return awsvalidation.ValidatePlatform(platform.AWS, f) })
}
if platform.Azure != nil {
validate(azure.Name, platform.Azure, func(f *field.Path) field.ErrorList {
return azurevalidation.ValidatePlatform(platform.Azure, c.Publish, f)
})
}
if platform.GCP != nil {
validate(gcp.Name, platform.GCP, func(f *field.Path) field.ErrorList { return gcpvalidation.ValidatePlatform(platform.GCP, f) })
}
if platform.IBMCloud != nil {
validate(ibmcloud.Name, platform.IBMCloud, func(f *field.Path) field.ErrorList { return ibmcloudvalidation.ValidatePlatform(platform.IBMCloud, f) })
}
if platform.Libvirt != nil {
validate(libvirt.Name, platform.Libvirt, func(f *field.Path) field.ErrorList { return libvirtvalidation.ValidatePlatform(platform.Libvirt, f) })
}
if platform.OpenStack != nil {
validate(openstack.Name, platform.OpenStack, func(f *field.Path) field.ErrorList {
return openstackvalidation.ValidatePlatform(platform.OpenStack, network, f, c)
})
}
if platform.VSphere != nil {
validate(vsphere.Name, platform.VSphere, func(f *field.Path) field.ErrorList { return vspherevalidation.ValidatePlatform(platform.VSphere, f) })
}
if platform.BareMetal != nil {
validate(baremetal.Name, platform.BareMetal, func(f *field.Path) field.ErrorList {
return baremetalvalidation.ValidatePlatform(platform.BareMetal, network, f, c)
})
}
if platform.Ovirt != nil {
validate(ovirt.Name, platform.Ovirt, func(f *field.Path) field.ErrorList {
return ovirtvalidation.ValidatePlatform(platform.Ovirt, f)
})
}
return allErrs
}
func validateProxy(p *types.Proxy, c *types.InstallConfig, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if p.HTTPProxy == "" && p.HTTPSProxy == "" {
allErrs = append(allErrs, field.Required(fldPath, "must include httpProxy or httpsProxy"))
}
if p.HTTPProxy != "" {
allErrs = append(allErrs, validateURI(p.HTTPProxy, fldPath.Child("httpProxy"), []string{"http"})...)
if c.Networking != nil {
allErrs = append(allErrs, validateIPProxy(p.HTTPProxy, c.Networking, fldPath.Child("httpProxy"))...)
}
}
if p.HTTPSProxy != "" {
allErrs = append(allErrs, validateURI(p.HTTPSProxy, fldPath.Child("httpsProxy"), []string{"http", "https"})...)
if c.Networking != nil {
allErrs = append(allErrs, validateIPProxy(p.HTTPSProxy, c.Networking, fldPath.Child("httpsProxy"))...)
}
}
if p.NoProxy != "" && p.NoProxy != "*" {
if strings.Contains(p.NoProxy, " ") {
allErrs = append(allErrs, field.Invalid(fldPath.Child("noProxy"), p.NoProxy, fmt.Sprintf("noProxy must not have spaces")))
}
for idx, v := range strings.Split(p.NoProxy, ",") {
v = strings.TrimSpace(v)
errDomain := validate.NoProxyDomainName(v)
_, _, errCIDR := net.ParseCIDR(v)
if errDomain != nil && errCIDR != nil {
allErrs = append(allErrs, field.Invalid(fldPath.Child("noProxy"), p.NoProxy, fmt.Sprintf(
"each element of noProxy must be a CIDR or domain without wildcard characters, which is violated by element %d %q", idx, v)))
}
}
}
return allErrs
}
func validateImageContentSources(groups []types.ImageContentSource, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
for gidx, group := range groups {
groupf := fldPath.Index(gidx)
if err := validateNamedRepository(group.Source); err != nil {
allErrs = append(allErrs, field.Invalid(groupf.Child("source"), group.Source, err.Error()))
}
for midx, mirror := range group.Mirrors {
if err := validateNamedRepository(mirror); err != nil {
allErrs = append(allErrs, field.Invalid(groupf.Child("mirrors").Index(midx), mirror, err.Error()))
continue
}
}
}
return allErrs
}
func validateNamedRepository(r string) error {
ref, err := dockerref.ParseNamed(r)
if err != nil {
// If a mirror name is provided without the named reference,
// then the name is not considered canonical and will cause
// an error. e.g. registry.lab.redhat.com:5000 will result
// in an error. Instead we will check whether the input is
// a valid hostname as a workaround.
if err == dockerref.ErrNameNotCanonical {
// If the hostname string contains a port, lets attempt
// to split them
host, _, err := net.SplitHostPort(r)
if err != nil {
host = r
}
if err = validate.Host(host); err != nil {
return errors.Wrap(err, "the repository provided is invalid")
}
return nil
}
return errors.Wrap(err, "failed to parse")
}
if !dockerref.IsNameOnly(ref) {
return errors.New("must be repository--not reference")
}
return nil
}
var (
validPublishingStrategies = map[types.PublishingStrategy]struct{}{
types.ExternalPublishingStrategy: {},
types.InternalPublishingStrategy: {},
}
validPublishingStrategyValues = func() []string {
v := make([]string, 0, len(validPublishingStrategies))
for m := range validPublishingStrategies {
v = append(v, string(m))
}
sort.Strings(v)
return v
}()
)
func validateCloudCredentialsMode(mode types.CredentialsMode, fldPath *field.Path, platform types.Platform) field.ErrorList {
if mode == "" {
return nil
}
allErrs := field.ErrorList{}
allowedAzureModes := []types.CredentialsMode{types.PassthroughCredentialsMode, types.ManualCredentialsMode}
if platform.Azure != nil && platform.Azure.CloudName == azure.StackCloud {
allowedAzureModes = []types.CredentialsMode{types.ManualCredentialsMode}
}
// validPlatformCredentialsModes is a map from the platform name to a slice of credentials modes that are valid
// for the platform. If a platform name is not in the map, then the credentials mode cannot be set for that platform.
validPlatformCredentialsModes := map[string][]types.CredentialsMode{
alibabacloud.Name: {types.ManualCredentialsMode},
aws.Name: {types.MintCredentialsMode, types.PassthroughCredentialsMode, types.ManualCredentialsMode},
azure.Name: allowedAzureModes,
gcp.Name: {types.MintCredentialsMode, types.PassthroughCredentialsMode, types.ManualCredentialsMode},
ibmcloud.Name: {types.ManualCredentialsMode},
}
if validModes, ok := validPlatformCredentialsModes[platform.Name()]; ok {
validModesSet := sets.NewString()
for _, m := range validModes {
validModesSet.Insert(string(m))
}
if !validModesSet.Has(string(mode)) {
allErrs = append(allErrs, field.NotSupported(fldPath, mode, validModesSet.List()))
}
} else {
allErrs = append(allErrs, field.Invalid(fldPath, mode, fmt.Sprintf("cannot be set when using the %q platform", platform.Name())))
}
return allErrs
}
// validateURI checks if the given url is of the right format. It also checks if the scheme of the uri
// provided is within the list of accepted schema provided as part of the input.
func validateURI(uri string, fldPath *field.Path, schemes []string) field.ErrorList {
parsed, err := url.ParseRequestURI(uri)
if err != nil {
return field.ErrorList{field.Invalid(fldPath, uri, err.Error())}
}
for _, scheme := range schemes {
if scheme == parsed.Scheme {
return nil
}
}
return field.ErrorList{field.NotSupported(fldPath, parsed.Scheme, schemes)}
}
// validateIPProxy checks if the given proxy string is an IP and if so checks the service and
// cluster networks and returns error if the IP belongs in them. Returns nil if the proxy is
// not an IP address.
func validateIPProxy(proxy string, n *types.Networking, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
parsed, err := url.ParseRequestURI(proxy)
if err != nil {
return allErrs
}
proxyIP := net.ParseIP(parsed.Hostname())
if proxyIP == nil {
return nil
}
for _, network := range n.ClusterNetwork {
if network.CIDR.Contains(proxyIP) {
allErrs = append(allErrs, field.Invalid(fldPath, proxy, "proxy value is part of the cluster networks"))
break
}
}
for _, network := range n.ServiceNetwork {
if network.Contains(proxyIP) {
allErrs = append(allErrs, field.Invalid(fldPath, proxy, "proxy value is part of the service networks"))
break
}
}
return allErrs
}
// validateFIPSconfig checks if the current install-config is compatible with FIPS standards
// and returns an error if it's not the case. As of this writing, only rsa or ecdsa algorithms are supported
// for ssh keys on FIPS.
func validateFIPSconfig(c *types.InstallConfig) field.ErrorList {
allErrs := field.ErrorList{}
sshParsedKey, _, _, _, err := ssh.ParseAuthorizedKey([]byte(c.SSHKey))
if err != nil {
allErrs = append(allErrs, field.Invalid(field.NewPath("sshKey"), c.SSHKey, fmt.Sprintf("Fatal error trying to parse configured public key: %s", err)))
} else {
sshKeyType := sshParsedKey.Type()
re := regexp.MustCompile(`^ecdsa-sha2-nistp\d{3}$|^ssh-rsa$`)
if !re.MatchString(sshKeyType) {
allErrs = append(allErrs, field.Invalid(field.NewPath("sshKey"), c.SSHKey, fmt.Sprintf("SSH key type %s unavailable when FIPS is enabled. Please use rsa or ecdsa.", sshKeyType)))
}
}
return allErrs
}
|
[
"\"OPENSHIFT_INSTALL_EXPERIMENTAL_DUAL_STACK\""
] |
[] |
[
"OPENSHIFT_INSTALL_EXPERIMENTAL_DUAL_STACK"
] |
[]
|
["OPENSHIFT_INSTALL_EXPERIMENTAL_DUAL_STACK"]
|
go
| 1 | 0 | |
meetings/management/commands/check_examination.py
|
import logging
import os
import sys
from bilibili_api.user import get_videos_g
from django.core.management.base import BaseCommand
from meetings.models import Record
from obs import ObsClient
logger = logging.getLogger('log')
class Command(BaseCommand):
def handle(self, *args, **options):
uid = int(os.getenv('BILI_UID', ''))
if not uid:
logger.error('uid is required')
sys.exit(1)
videos = get_videos_g(uid=uid)
# 所有过审视频的bvid集合
bvs = [x['bvid'] for x in videos]
logger.info('所有B站过审视频的bvid: {}'.format(bvs))
logger.info('B站过审视频数: {}'.format(len(bvs)))
access_key_id = os.getenv('ACCESS_KEY_ID', '')
secret_access_key = os.getenv('SECRET_ACCESS_KEY', '')
endpoint = os.getenv('OBS_ENDPOINT', '')
bucketName = os.getenv('OBS_BUCKETNAME', '')
if not access_key_id or not secret_access_key or not endpoint or not bucketName:
logger.error('losing required arguments for ObsClient')
sys.exit(1)
obs_client = ObsClient(access_key_id=access_key_id,
secret_access_key=secret_access_key,
server='https://{}'.format(endpoint))
bili_mids = [int(x.mid) for x in Record.objects.filter(platform='bilibili', url__isnull=True)]
logger.info('所有还未上传B站的会议的mid: {}'.format(bili_mids))
all_bili_mids = [int(x.mid) for x in Record.objects.filter(platform='bilibili')]
for mid in all_bili_mids:
obs_record = Record.objects.get(mid=mid, platform='obs')
url = obs_record.url
object_key = url.split('/', 3)[-1]
# 获取对象的metadata
metadata = obs_client.getObjectMetadata(bucketName, object_key)
metadata_dict = {x: y for x, y in metadata['header']}
if 'bvid' not in metadata_dict.keys():
logger.info('meeting {}: 未上传B站,跳过'.format(mid))
else:
logger.info('meeting {}: bvid为{}'.format(mid, metadata_dict['bvid']))
if metadata_dict['bvid'] not in bvs:
logger.info('meetings: {}: 上传至B站,还未过审'.format(mid))
else:
bili_url = 'https://www.bilibili.com/{}'.format(metadata_dict['bvid'])
Record.objects.filter(mid=mid, platform='bilibili').update(url=bili_url)
logger.info('meeting {}: B站已过审,刷新播放地址'.format(mid))
|
[] |
[] |
[
"OBS_ENDPOINT",
"BILI_UID",
"SECRET_ACCESS_KEY",
"OBS_BUCKETNAME",
"ACCESS_KEY_ID"
] |
[]
|
["OBS_ENDPOINT", "BILI_UID", "SECRET_ACCESS_KEY", "OBS_BUCKETNAME", "ACCESS_KEY_ID"]
|
python
| 5 | 0 | |
test-features.py
|
#!/usr/bin/env python
# Copyright (c) 2014 Alain Martin
import argparse
import ast
import os
import re
import subprocess
import sys
import tempfile
FEATURE_EXT = '.feature'
REPO_ROOT = os.path.dirname(os.path.abspath(__file__))
def compiler_arg_choices():
compilers_dir = os.path.join(REPO_ROOT, 'compilers')
return [os.path.basename(file_name)
for file_name in os.listdir(compilers_dir)]
def parse_args():
def existing_dir_or_file(path):
if not os.path.exists(path):
message = 'No such file or directory %s' % os.path.abspath(path)
raise argparse.ArgumentTypeError(message)
return path
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('-c', '--compiler',
required=True,
choices=compiler_arg_choices())
arg_parser.add_argument('input_path',
type=existing_dir_or_file,
nargs='?',
default=os.path.curdir)
return arg_parser.parse_args(sys.argv[1:])
class Compiler(object):
@staticmethod
def from_file(compiler_file_path):
with open(compiler_file_path, 'r') as compiler_file:
settings = ast.literal_eval(compiler_file.read())
return Compiler(
settings['exe'], settings['options'], settings['env'])
def __init__(self, exe, options, env):
self.exe = exe
self.options = options
self.env = env
def call_env(self):
call_env = os.environ.copy()
for key in self.env:
if key not in call_env:
call_env[key] = ''
for path in self.env[key]:
call_env[key] += os.pathsep + path
return call_env
def compile(self, source_file_path):
compiler_cmd = [self.exe, source_file_path] + self.options
call_env = self.call_env()
return_code = 0
output = ''
try:
output = subprocess.check_output(
compiler_cmd, env=call_env, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as error:
return_code = error.returncode
output = error.output
return return_code, output
class Status(object):
ERROR = 'ERROR'
PASSED = 'PASSED'
FAILED = 'FAILED'
def get_return_type(result):
if result is None:
return 'DNC'
if result in ('true', 'false'):
return 'Boolean'
if result.isdigit():
return 'Integer'
if result.startswith('TypeList<'):
return 'TypeList'
if len(result) == 1:
return 'Type'
class Feature(object):
def __init__(self, line, name, has_arguments, return_type):
self.line = line
self.name = name
self.has_arguments = has_arguments is not None
self.return_type = return_type
@staticmethod
def from_declaration(line):
feature_declaration_regex = re.compile(r'^(.+?)(?:<(.*)>)? -> (.+)$')
match = feature_declaration_regex.search(line)
if match:
name, has_arguments, return_type = match.groups()
return Feature(line, name, has_arguments, return_type)
def run_test(self, feature_test, compiler):
if (self.name != feature_test.feature_name
or self.has_arguments != (feature_test.arguments is not None)
or (self.return_type != get_return_type(feature_test.result)
and feature_test.result is not None)):
print '[ %-6s ] %s\ndoes not match %s' % (
'ERROR', feature_test.line, self.line)
return Status.ERROR
return feature_test.run(self, compiler)
test_code_skeleton = '''
#include "KL/TypeList.hpp"
#include <type_traits>
using A = {A};
using B = {B};
using C = {C};
using namespace KL;
class Test{feature_name}
{{
void test()
{{
{result_type} Result = TypeList<{pack}>::{feature_name}{arguments};
static_assert({assertion}, "!");
}}
}};
'''
def get_result_type(return_type):
if return_type in ('Boolean', 'Integer'):
return 'const auto'
if return_type in ('Type', 'TypeList'):
return 'using'
def get_assertion(return_type, result):
if result is None:
return 'true'
if return_type in ('Boolean', 'Integer'):
return '%s == Result' % result
if return_type in ('TypeList', 'Type'):
return 'std::is_same<%s, Result>::value' % result
class FeatureTest(object):
def __init__(self, line, feature_name, pack, arguments, result):
self.line = line
self.feature_name = feature_name
self.pack = pack
self.arguments = arguments
self.result = result
@staticmethod
def from_declaration(line):
feature_test_declaration_regex = re.compile(
r'^TypeList<(.*)>::(.+?)(?:<(.*)>)?'
r' (?:NOT COMPILE|== (.+))$')
match = feature_test_declaration_regex.search(line)
if match:
pack, feature_name, arguments, result = match.groups()
return FeatureTest(line, feature_name, pack, arguments, result)
def run(self, feature, compiler):
arguments = ''
if feature.has_arguments:
arguments += '<' + self.arguments + '>'
if feature.return_type in ('Boolean', 'Integer'):
arguments += '::value'
test_code = test_code_skeleton.format(
feature_name=feature.name,
result_type=get_result_type(feature.return_type),
pack=self.pack,
arguments=arguments,
assertion=get_assertion(feature.return_type, self.result),
A='void',
B='bool',
C='char',
)
temp_file_descriptor = None
temp_file_path = None
temp_file = None
return_code = None
try:
temp_file_descriptor, temp_file_path = tempfile.mkstemp(
suffix='.cpp')
temp_file = os.fdopen(temp_file_descriptor, 'w')
temp_file.write(test_code)
temp_file.close()
return_code, output = compiler.compile(temp_file_path)
finally:
if temp_file:
temp_file.close()
elif temp_file_descriptor:
os.close(temp_file_descriptor)
if temp_file_path:
os.remove(temp_file_path)
if return_code is not None:
if (return_code == 0) == (self.result is not None):
print '[ %-6s ] %s' % ('PASS', self.line)
return Status.PASSED
else:
print '[ %-6s ] %s' % ('FAIL!', self.line)
print output
return Status.FAILED
return Status.ERROR
def test_feature_file(feature_file_path, compiler):
feature = None
status = []
with open(feature_file_path, 'r') as feature_file:
for line in feature_file:
if not line.isspace():
line = line.rstrip()
if not feature:
feature = Feature.from_declaration(line)
if feature:
print '[--------] %s' % feature.line
else:
print 'Failed to parse feature "%s" in %s' % (
line, feature_file_path)
return [Status.ERROR]
else:
test = FeatureTest.from_declaration(line)
if test:
status.append(feature.run_test(test, compiler))
else:
print 'Failed to parse feature test "%s" in %s' % (
line, feature_file_path)
status.append(Status.ERROR)
print ('[--------] %s passed' % status.count(Status.PASSED)
+ ', %s failed' % status.count(Status.FAILED)
+ ', %s errored\n' % status.count(Status.ERROR))
return status
def find_feature_files(path):
if os.path.isfile(path) and os.path.splitext(path)[1] == FEATURE_EXT:
yield path
return
for root, _, file_names in os.walk(path):
for file_name in file_names:
file_path = os.path.join(root, file_name)
if os.path.splitext(file_path)[1] == FEATURE_EXT:
yield file_path
def test_features(compiler, input_path):
compiler_file_path = os.path.join(REPO_ROOT, 'compilers', compiler)
compiler = Compiler.from_file(compiler_file_path)
feature_files = find_feature_files(input_path)
status = []
for feature_file_path in feature_files:
status += test_feature_file(feature_file_path, compiler)
print '[ TOTAL ] %s error%s, %s failed test%s, %s passed test%s' % (
status.count(Status.ERROR), 's'[status.count(Status.ERROR) == 1:],
status.count(Status.FAILED), 's'[status.count(Status.FAILED) == 1:],
status.count(Status.PASSED), 's'[status.count(Status.PASSED) == 1:])
return 1 if Status.ERROR in status else status.count(Status.FAILED)
if __name__ == '__main__':
sys.exit(test_features(**vars(parse_args())))
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
repository/badger/db.go
|
package badger
import (
"fmt"
"os"
"runtime"
badger "github.com/dgraph-io/badger/v3"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
type DB struct {
DB *badger.DB
userCounter prometheus.Counter
}
func NewDB() (*DB, func(), error) {
return newDB(os.Getenv("BADGER_DIR"), "webshell")
}
func newDB(dir string, prometheusNameSpace string) (*DB, func(), error) {
db, err := badger.Open(badger.DefaultOptions(dir))
if err != nil {
return nil, nil, fmt.Errorf("failed to open db file: %w", err)
}
userCounter := promauto.NewCounter(prometheus.CounterOpts{
Namespace: prometheusNameSpace,
Name: "user_counter",
})
err = db.View(func(txn *badger.Txn) error {
opts := badger.DefaultIteratorOptions
opts.PrefetchSize = runtime.NumCPU()
it := txn.NewIterator(opts)
defer it.Close()
for it.Rewind(); it.Valid(); it.Next() {
userCounter.Inc()
}
return nil
})
if err != nil {
return nil, nil, fmt.Errorf("failed in transaction: %w", err)
}
return &DB{
DB: db,
userCounter: userCounter,
}, func() { db.Close() }, nil
}
|
[
"\"BADGER_DIR\""
] |
[] |
[
"BADGER_DIR"
] |
[]
|
["BADGER_DIR"]
|
go
| 1 | 0 | |
internal/envvars/env.go
|
package envvars
import "os"
// ForceVerify indicates that all verifications should be performed, even if
// e.g. SkipBuggyChecks() says otherwise.
var ForceVerify bool = os.Getenv("THEMA_FORCEVERIFY") == "1"
|
[
"\"THEMA_FORCEVERIFY\""
] |
[] |
[
"THEMA_FORCEVERIFY"
] |
[]
|
["THEMA_FORCEVERIFY"]
|
go
| 1 | 0 | |
install-poetry.py
|
"""
This script will install Poetry and its dependencies.
It does, in order:
- Downloads the virtualenv package to a temporary directory and add it to sys.path.
- Creates a virtual environment in the correct OS data dir which will be
- `%APPDATA%\\pypoetry` on Windows
- ~/Library/Application Support/pypoetry on MacOS
- `${XDG_DATA_HOME}/pypoetry` (or `~/.local/share/pypoetry` if it's not set) on UNIX systems
- In `${POETRY_HOME}` if it's set.
- Installs the latest or given version of Poetry inside this virtual environment.
- Installs a `poetry` script in the Python user directory (or `${POETRY_HOME/bin}` if `POETRY_HOME` is set).
"""
import argparse
import json
import os
import re
import shutil
import site
import subprocess
import sys
import tempfile
from contextlib import closing
from contextlib import contextmanager
from functools import cmp_to_key
from io import UnsupportedOperation
from pathlib import Path
from typing import Optional
from urllib.request import Request
from urllib.request import urlopen
SHELL = os.getenv("SHELL", "")
WINDOWS = sys.platform.startswith("win") or (sys.platform == "cli" and os.name == "nt")
MACOS = sys.platform == "darwin"
FOREGROUND_COLORS = {
"black": 30,
"red": 31,
"green": 32,
"yellow": 33,
"blue": 34,
"magenta": 35,
"cyan": 36,
"white": 37,
}
BACKGROUND_COLORS = {
"black": 40,
"red": 41,
"green": 42,
"yellow": 43,
"blue": 44,
"magenta": 45,
"cyan": 46,
"white": 47,
}
OPTIONS = {"bold": 1, "underscore": 4, "blink": 5, "reverse": 7, "conceal": 8}
def style(fg, bg, options):
codes = []
if fg:
codes.append(FOREGROUND_COLORS[fg])
if bg:
codes.append(BACKGROUND_COLORS[bg])
if options:
if not isinstance(options, (list, tuple)):
options = [options]
for option in options:
codes.append(OPTIONS[option])
return "\033[{}m".format(";".join(map(str, codes)))
STYLES = {
"info": style("cyan", None, None),
"comment": style("yellow", None, None),
"success": style("green", None, None),
"error": style("red", None, None),
"warning": style("yellow", None, None),
"b": style(None, None, ("bold",)),
}
def is_decorated():
if WINDOWS:
return (
os.getenv("ANSICON") is not None
or "ON" == os.getenv("ConEmuANSI")
or "xterm" == os.getenv("Term")
)
if not hasattr(sys.stdout, "fileno"):
return False
try:
return os.isatty(sys.stdout.fileno())
except UnsupportedOperation:
return False
def is_interactive():
if not hasattr(sys.stdin, "fileno"):
return False
try:
return os.isatty(sys.stdin.fileno())
except UnsupportedOperation:
return False
def colorize(style, text):
if not is_decorated():
return text
return "{}{}\033[0m".format(STYLES[style], text)
def string_to_bool(value):
value = value.lower()
return value in {"true", "1", "y", "yes"}
def data_dir(version: Optional[str] = None) -> Path:
if os.getenv("POETRY_HOME"):
return Path(os.getenv("POETRY_HOME")).expanduser()
if WINDOWS:
const = "CSIDL_APPDATA"
path = os.path.normpath(_get_win_folder(const))
path = os.path.join(path, "pypoetry")
elif MACOS:
path = os.path.expanduser("~/Library/Application Support/pypoetry")
else:
path = os.getenv("XDG_DATA_HOME", os.path.expanduser("~/.local/share"))
path = os.path.join(path, "pypoetry")
if version:
path = os.path.join(path, version)
return Path(path)
def bin_dir(version: Optional[str] = None) -> Path:
if os.getenv("POETRY_HOME"):
return Path(os.getenv("POETRY_HOME"), "bin").expanduser()
user_base = site.getuserbase()
if WINDOWS:
bin_dir = os.path.join(user_base, "Scripts")
else:
bin_dir = os.path.join(user_base, "bin")
return Path(bin_dir)
def _get_win_folder_from_registry(csidl_name):
import winreg as _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders",
)
directory, _ = _winreg.QueryValueEx(key, shell_folder_name)
return directory
return dir
def _get_win_folder_with_ctypes(csidl_name):
import ctypes
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
if WINDOWS:
try:
from ctypes import windll # noqa
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
_get_win_folder = _get_win_folder_from_registry
@contextmanager
def temporary_directory(*args, **kwargs):
try:
from tempfile import TemporaryDirectory
except ImportError:
name = tempfile.mkdtemp(*args, **kwargs)
yield name
shutil.rmtree(name)
else:
with TemporaryDirectory(*args, **kwargs) as name:
yield name
PRE_MESSAGE = """# Welcome to {poetry}!
This will download and install the latest version of {poetry},
a dependency and package manager for Python.
It will add the `poetry` command to {poetry}'s bin directory, located at:
{poetry_home_bin}
You can uninstall at any time by executing this script with the --uninstall option,
and these changes will be reverted.
"""
POST_MESSAGE = """{poetry} ({version}) is installed now. Great!
You can test that everything is set up by executing:
`{test_command}`
"""
POST_MESSAGE_NOT_IN_PATH = """{poetry} ({version}) is installed now. Great!
To get started you need {poetry}'s bin directory ({poetry_home_bin}) in your `PATH`
environment variable.
{configure_message}
Alternatively, you can call {poetry} explicitly with `{poetry_executable}`.
You can test that everything is set up by executing:
`{test_command}`
"""
POST_MESSAGE_CONFIGURE_UNIX = """
Add `export PATH="{poetry_home_bin}:$PATH"` to your shell configuration file.
"""
POST_MESSAGE_CONFIGURE_FISH = """
You can execute `set -U fish_user_paths {poetry_home_bin} $fish_user_paths`
"""
POST_MESSAGE_CONFIGURE_WINDOWS = """"""
class Cursor:
def __init__(self) -> None:
self._output = sys.stdout
def move_up(self, lines: int = 1) -> "Cursor":
self._output.write("\x1b[{}A".format(lines))
return self
def move_down(self, lines: int = 1) -> "Cursor":
self._output.write("\x1b[{}B".format(lines))
return self
def move_right(self, columns: int = 1) -> "Cursor":
self._output.write("\x1b[{}C".format(columns))
return self
def move_left(self, columns: int = 1) -> "Cursor":
self._output.write("\x1b[{}D".format(columns))
return self
def move_to_column(self, column: int) -> "Cursor":
self._output.write("\x1b[{}G".format(column))
return self
def move_to_position(self, column: int, row: int) -> "Cursor":
self._output.write("\x1b[{};{}H".format(row + 1, column))
return self
def save_position(self) -> "Cursor":
self._output.write("\x1b7")
return self
def restore_position(self) -> "Cursor":
self._output.write("\x1b8")
return self
def hide(self) -> "Cursor":
self._output.write("\x1b[?25l")
return self
def show(self) -> "Cursor":
self._output.write("\x1b[?25h\x1b[?0c")
return self
def clear_line(self) -> "Cursor":
"""
Clears all the output from the current line.
"""
self._output.write("\x1b[2K")
return self
def clear_line_after(self) -> "Cursor":
"""
Clears all the output from the current line after the current position.
"""
self._output.write("\x1b[K")
return self
def clear_output(self) -> "Cursor":
"""
Clears all the output from the cursors' current position
to the end of the screen.
"""
self._output.write("\x1b[0J")
return self
def clear_screen(self) -> "Cursor":
"""
Clears the entire screen.
"""
self._output.write("\x1b[2J")
return self
class Installer:
METADATA_URL = "https://pypi.org/pypi/poetry/json"
VERSION_REGEX = re.compile(
r"v?(\d+)(?:\.(\d+))?(?:\.(\d+))?(?:\.(\d+))?"
"("
"[._-]?"
r"(?:(stable|beta|b|rc|RC|alpha|a|patch|pl|p)((?:[.-]?\d+)*)?)?"
"([.-]?dev)?"
")?"
r"(?:\+[^\s]+)?"
)
def __init__(
self,
version: Optional[str] = None,
preview: bool = False,
force: bool = False,
accept_all: bool = False,
git: Optional[str] = None,
path: Optional[str] = None,
) -> None:
self._version = version
self._preview = preview
self._force = force
self._accept_all = accept_all
self._git = git
self._path = path
self._data_dir = data_dir()
self._bin_dir = bin_dir()
self._cursor = Cursor()
def allows_prereleases(self) -> bool:
return self._preview
def run(self) -> int:
if self._git:
version = self._git
elif self._path:
version = self._path
else:
version, current_version = self.get_version()
if version is None:
return 0
self.display_pre_message()
self.ensure_directories()
def _is_self_upgrade_supported(x):
mx = self.VERSION_REGEX.match(x)
if mx is None:
# the version is not semver, perhaps scm or file, we assume upgrade is supported
return True
vx = tuple(int(p) for p in mx.groups()[:3]) + (mx.group(5),)
return vx >= (1, 1, 7)
if version and not _is_self_upgrade_supported(version):
self._write(
colorize(
"warning",
f"You are installing {version}. When using the current installer, this version does not support "
f"updating using the 'self update' command. Please use 1.1.7 or later.",
)
)
if not self._accept_all:
continue_install = input("Do you want to continue? ([y]/n) ") or "y"
if continue_install.lower() in {"n", "no"}:
return 0
try:
self.install(version)
except subprocess.CalledProcessError as e:
print(
colorize("error", f"\nAn error has occurred: {e}\n{e.stdout.decode()}")
)
return e.returncode
self._write("")
self.display_post_message(version)
return 0
def install(self, version, upgrade=False):
"""
Installs Poetry in $POETRY_HOME.
"""
self._write(
"Installing {} ({})".format(
colorize("info", "Poetry"), colorize("info", version)
)
)
env_path = self.make_env(version)
self.install_poetry(version, env_path)
self.make_bin(version)
self._overwrite(
"Installing {} ({}): {}".format(
colorize("info", "Poetry"),
colorize("b", version),
colorize("success", "Done"),
)
)
self._data_dir.joinpath("VERSION").write_text(version)
return 0
def uninstall(self) -> int:
if not self._data_dir.exists():
self._write(
"{} is not currently installed.".format(colorize("info", "Poetry"))
)
return 1
version = None
if self._data_dir.joinpath("VERSION").exists():
version = self._data_dir.joinpath("VERSION").read_text().strip()
if version:
self._write(
"Removing {} ({})".format(
colorize("info", "Poetry"), colorize("b", version)
)
)
else:
self._write("Removing {}".format(colorize("info", "Poetry")))
shutil.rmtree(str(self._data_dir))
for script in ["poetry", "poetry.bat"]:
if self._bin_dir.joinpath(script).exists():
self._bin_dir.joinpath(script).unlink()
return 0
def make_env(self, version: str) -> Path:
self._overwrite(
"Installing {} ({}): {}".format(
colorize("info", "Poetry"),
colorize("b", version),
colorize("comment", "Creating environment"),
)
)
env_path = self._data_dir.joinpath("venv")
with temporary_directory() as tmp_dir:
subprocess.call(
[sys.executable, "-m", "pip", "install", "virtualenv", "-t", tmp_dir],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
sys.path.insert(0, tmp_dir)
import virtualenv
virtualenv.cli_run([str(env_path), "--clear"])
# We add a special file so that Poetry can detect
# its own virtual environment
env_path.joinpath("poetry_env").touch()
return env_path
def make_bin(self, version: str) -> None:
self._overwrite(
"Installing {} ({}): {}".format(
colorize("info", "Poetry"),
colorize("b", version),
colorize("comment", "Creating script"),
)
)
self._bin_dir.mkdir(parents=True, exist_ok=True)
script = "poetry"
target_script = "venv/bin/poetry"
if WINDOWS:
script = "poetry.exe"
target_script = "venv/Scripts/poetry.exe"
if self._bin_dir.joinpath(script).exists():
self._bin_dir.joinpath(script).unlink()
try:
self._bin_dir.joinpath(script).symlink_to(
self._data_dir.joinpath(target_script)
)
except OSError:
# This can happen if the user
# does not have the correct permission on Windows
shutil.copy(
self._data_dir.joinpath(target_script), self._bin_dir.joinpath(script)
)
def install_poetry(self, version: str, env_path: Path) -> None:
self._overwrite(
"Installing {} ({}): {}".format(
colorize("info", "Poetry"),
colorize("b", version),
colorize("comment", "Installing Poetry"),
)
)
if WINDOWS:
python = env_path.joinpath("Scripts/python.exe")
else:
python = env_path.joinpath("bin/python")
if self._git:
specification = "git+" + version
elif self._path:
specification = version
else:
specification = f"poetry=={version}"
subprocess.run(
[str(python), "-m", "pip", "install", specification],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
check=True,
)
def display_pre_message(self) -> None:
kwargs = {
"poetry": colorize("info", "Poetry"),
"poetry_home_bin": colorize("comment", self._bin_dir),
}
self._write(PRE_MESSAGE.format(**kwargs))
def display_post_message(self, version: str) -> None:
if WINDOWS:
return self.display_post_message_windows(version)
if SHELL == "fish":
return self.display_post_message_fish(version)
return self.display_post_message_unix(version)
def display_post_message_windows(self, version: str) -> None:
path = self.get_windows_path_var()
message = POST_MESSAGE_NOT_IN_PATH
if path and str(self._bin_dir) in path:
message = POST_MESSAGE
self._write(
message.format(
poetry=colorize("info", "Poetry"),
version=colorize("b", version),
poetry_home_bin=colorize("comment", self._bin_dir),
poetry_executable=colorize("b", self._bin_dir.joinpath("poetry")),
configure_message=POST_MESSAGE_CONFIGURE_WINDOWS.format(
poetry_home_bin=colorize("comment", self._bin_dir)
),
test_command=colorize("b", "poetry --version"),
)
)
def get_windows_path_var(self) -> Optional[str]:
import winreg
with winreg.ConnectRegistry(None, winreg.HKEY_CURRENT_USER) as root:
with winreg.OpenKey(root, "Environment", 0, winreg.KEY_ALL_ACCESS) as key:
path, _ = winreg.QueryValueEx(key, "PATH")
return path
def display_post_message_fish(self, version: str) -> None:
fish_user_paths = subprocess.check_output(
["fish", "-c", "echo $fish_user_paths"]
).decode("utf-8")
message = POST_MESSAGE_NOT_IN_PATH
if fish_user_paths and str(self._bin_dir) in fish_user_paths:
message = POST_MESSAGE
self._write(
message.format(
poetry=colorize("info", "Poetry"),
version=colorize("b", version),
poetry_home_bin=colorize("comment", self._bin_dir),
poetry_executable=colorize("b", self._bin_dir.joinpath("poetry")),
configure_message=POST_MESSAGE_CONFIGURE_FISH.format(
poetry_home_bin=colorize("comment", self._bin_dir)
),
test_command=colorize("b", "poetry --version"),
)
)
def display_post_message_unix(self, version: str) -> None:
paths = os.getenv("PATH", "").split(":")
message = POST_MESSAGE_NOT_IN_PATH
if paths and str(self._bin_dir) in paths:
message = POST_MESSAGE
self._write(
message.format(
poetry=colorize("info", "Poetry"),
version=colorize("b", version),
poetry_home_bin=colorize("comment", self._bin_dir),
poetry_executable=colorize("b", self._bin_dir.joinpath("poetry")),
configure_message=POST_MESSAGE_CONFIGURE_UNIX.format(
poetry_home_bin=colorize("comment", self._bin_dir)
),
test_command=colorize("b", "poetry --version"),
)
)
def ensure_directories(self) -> None:
self._data_dir.mkdir(parents=True, exist_ok=True)
self._bin_dir.mkdir(parents=True, exist_ok=True)
def get_version(self):
current_version = None
if self._data_dir.joinpath("VERSION").exists():
current_version = self._data_dir.joinpath("VERSION").read_text().strip()
self._write(colorize("info", "Retrieving Poetry metadata"))
metadata = json.loads(self._get(self.METADATA_URL).decode())
def _compare_versions(x, y):
mx = self.VERSION_REGEX.match(x)
my = self.VERSION_REGEX.match(y)
vx = tuple(int(p) for p in mx.groups()[:3]) + (mx.group(5),)
vy = tuple(int(p) for p in my.groups()[:3]) + (my.group(5),)
if vx < vy:
return -1
elif vx > vy:
return 1
return 0
self._write("")
releases = sorted(
metadata["releases"].keys(), key=cmp_to_key(_compare_versions)
)
if self._version and self._version not in releases:
self._write(
colorize("error", "Version {} does not exist.".format(self._version))
)
return None, None
version = self._version
if not version:
for release in reversed(releases):
m = self.VERSION_REGEX.match(release)
if m.group(5) and not self.allows_prereleases():
continue
version = release
break
if current_version == version and not self._force:
self._write(
"The latest version ({}) is already installed.".format(
colorize("b", version)
)
)
return None, current_version
return version, current_version
def _write(self, line) -> None:
sys.stdout.write(line + "\n")
def _overwrite(self, line) -> None:
if not is_decorated():
return self._write(line)
self._cursor.move_up()
self._cursor.clear_line()
self._write(line)
def _get(self, url):
request = Request(url, headers={"User-Agent": "Python Poetry"})
with closing(urlopen(request)) as r:
return r.read()
def main():
parser = argparse.ArgumentParser(
description="Installs the latest (or given) version of poetry"
)
parser.add_argument(
"-p",
"--preview",
help="install preview version",
dest="preview",
action="store_true",
default=False,
)
parser.add_argument("--version", help="install named version", dest="version")
parser.add_argument(
"-f",
"--force",
help="install on top of existing version",
dest="force",
action="store_true",
default=False,
)
parser.add_argument(
"-y",
"--yes",
help="accept all prompts",
dest="accept_all",
action="store_true",
default=False,
)
parser.add_argument(
"--uninstall",
help="uninstall poetry",
dest="uninstall",
action="store_true",
default=False,
)
parser.add_argument(
"--path",
dest="path",
action="store",
help=(
"Install from a given path (file or directory) instead of "
"fetching the latest version of Poetry available online."
),
)
parser.add_argument(
"--git",
dest="git",
action="store",
help=(
"Install from a git repository instead of fetching the latest version "
"of Poetry available online."
),
)
args = parser.parse_args()
installer = Installer(
version=args.version or os.getenv("POETRY_VERSION"),
preview=args.preview or string_to_bool(os.getenv("POETRY_PREVIEW", "0")),
force=args.force,
accept_all=args.accept_all
or string_to_bool(os.getenv("POETRY_ACCEPT", "0"))
or not is_interactive(),
path=args.path,
git=args.git,
)
if args.uninstall or string_to_bool(os.getenv("POETRY_UNINSTALL", "0")):
return installer.uninstall()
return installer.run()
if __name__ == "__main__":
sys.exit(main())
|
[] |
[] |
[
"XDG_DATA_HOME",
"POETRY_VERSION",
"ANSICON",
"SHELL",
"ConEmuANSI",
"Term",
"POETRY_PREVIEW",
"POETRY_ACCEPT",
"PATH",
"POETRY_HOME",
"POETRY_UNINSTALL"
] |
[]
|
["XDG_DATA_HOME", "POETRY_VERSION", "ANSICON", "SHELL", "ConEmuANSI", "Term", "POETRY_PREVIEW", "POETRY_ACCEPT", "PATH", "POETRY_HOME", "POETRY_UNINSTALL"]
|
python
| 11 | 0 | |
likelihoods/Planck2018_highTTTEEE/__init__.py
|
import os
import clik
import numpy as np
### Some important variables ###
clik_root = os.environ.get('PLANCK_2018_DATA')
if clik_root == None:
raise ValueError('The environment variable PLANCK_2018_DATA is not set.')
### Planck 2018 high ells TT, TE, EE
hell_TTTEEE = clik.clik(clik_root + '/hi_l/plik/plik_rd12_HM_v22b_TTTEEE.clik')
hell_TTTEEE_pars = hell_TTTEEE.get_extra_parameter_names()
def get_loglike(class_input, likes_input, class_run):
args = np.concatenate((
class_run.lensed_cl()['tt'][:2509] * 1e12 * class_run.T_cmb()**2.,
class_run.lensed_cl()['ee'][:2509] * 1e12 * class_run.T_cmb()**2.,
class_run.lensed_cl()['te'][:2509] * 1e12 * class_run.T_cmb()**2.,
np.array([likes_input[par] for par in hell_TTTEEE_pars])
))
return hell_TTTEEE(args)
|
[] |
[] |
[
"PLANCK_2018_DATA"
] |
[]
|
["PLANCK_2018_DATA"]
|
python
| 1 | 0 | |
pkg/anago/stage.go
|
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package anago
import (
"fmt"
"os"
"path/filepath"
"regexp"
"strings"
"time"
"github.com/blang/semver"
intoto "github.com/in-toto/in-toto-golang/in_toto"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"k8s.io/release/pkg/build"
"k8s.io/release/pkg/changelog"
"k8s.io/release/pkg/gcp/gcb"
"k8s.io/release/pkg/release"
"sigs.k8s.io/bom/pkg/provenance"
"sigs.k8s.io/bom/pkg/spdx"
"sigs.k8s.io/release-sdk/git"
"sigs.k8s.io/release-utils/log"
)
// stageClient is a client for staging releases.
//counterfeiter:generate . stageClient
type stageClient interface {
// Submit can be used to submit a Google Cloud Build (GCB) job.
Submit(stream bool) error
// InitState initializes the default internal state.
InitState()
// InitLogFile sets up the log file target.
InitLogFile() error
// Validate if the provided `ReleaseOptions` are correctly set.
ValidateOptions() error
// CheckPrerequisites verifies that a valid GITHUB_TOKEN environment
// variable is set. It also checks for the existence and version of
// required packages and if the correct Google Cloud project is set. A
// basic hardware check will ensure that enough disk space is available,
// too.
CheckPrerequisites() error
// CheckReleaseBranchState discovers if the provided release branch has to
// be created.
CheckReleaseBranchState() error
// GenerateReleaseVersion discovers the next versions to be released.
GenerateReleaseVersion() error
// PrepareWorkspace verifies that the working directory is in the desired
// state. This means that the build directory is cleaned up and the checked
// out repository is in a clean state.
PrepareWorkspace() error
// TagRepository creates all necessary git objects by tagging the
// repository for the provided `versions` the main version `versionPrime`
// and the `parentBranch`.
TagRepository() error
// Build runs 'make cross-in-a-container' by using the latest kubecross
// container image. This step also build all necessary release tarballs.
Build() error
// GenerateChangelog builds the CHANGELOG-x.y.md file and commits it
// into the local repository.
GenerateChangelog() error
// VerifyArtifacts performs verification of the generated artifacts
VerifyArtifacts() error
// GenerateBillOfMaterials generates the SBOM documents for the Kubernetes
// source code and the release artifacts.
GenerateBillOfMaterials() error
// StageArtifacts copies the build artifacts to a Google Cloud Bucket.
StageArtifacts() error
}
// DefaultStage is the default staging implementation used in production.
type DefaultStage struct {
impl stageImpl
options *StageOptions
state *StageState
}
// NewDefaultStage creates a new defaultStage instance.
func NewDefaultStage(options *StageOptions) *DefaultStage {
return &DefaultStage{&defaultStageImpl{}, options, nil}
}
// SetImpl can be used to set the internal stage implementation.
func (d *DefaultStage) SetImpl(impl stageImpl) {
d.impl = impl
}
// SetState fixes the current state. Mainly used for passing
// arbitrary values during testing
func (d *DefaultStage) SetState(state *StageState) {
d.state = state
}
// State returns the internal state.
func (d *DefaultStage) State() *StageState {
return d.state
}
// defaultStageImpl is the default internal stage client implementation.
type defaultStageImpl struct{}
// stageImpl is the implementation of the stage client.
//counterfeiter:generate . stageImpl
type stageImpl interface {
Submit(options *gcb.Options) error
ToFile(fileName string) error
CheckPrerequisites() error
BranchNeedsCreation(
branch, releaseType string, buildVersion semver.Version,
) (bool, error)
PrepareWorkspaceStage(noMock bool) error
GenerateReleaseVersion(
releaseType, version, branch string, branchFromMaster bool,
) (*release.Versions, error)
OpenRepo(repoPath string) (*git.Repo, error)
RevParse(repo *git.Repo, rev string) (string, error)
RevParseTag(repo *git.Repo, rev string) (string, error)
Checkout(repo *git.Repo, rev string, args ...string) error
CurrentBranch(repo *git.Repo) (string, error)
CommitEmpty(repo *git.Repo, msg string) error
Tag(repo *git.Repo, name, message string) error
Merge(repo *git.Repo, rev string) error
CheckReleaseBucket(options *build.Options) error
DockerHubLogin() error
MakeCross(version string) error
GenerateChangelog(options *changelog.Options) error
StageLocalSourceTree(
options *build.Options, workDir, buildVersion string,
) error
DeleteLocalSourceTarball(*build.Options, string) error
StageLocalArtifacts(options *build.Options) error
PushReleaseArtifacts(
options *build.Options, srcPath, gcsPath string,
) error
PushContainerImages(options *build.Options) error
GenerateVersionArtifactsBOM(string) error
GenerateSourceTreeBOM(options *spdx.DocGenerateOptions) (*spdx.Document, error)
WriteSourceBOM(spdxDoc *spdx.Document, version string) error
ListBinaries(version string) ([]struct{ Path, Platform, Arch string }, error)
ListImageArchives(string) ([]string, error)
ListTarballs(version string) ([]string, error)
BuildBaseArtifactsSBOM(*spdx.DocGenerateOptions) (*spdx.Document, error)
AddBinariesToSBOM(*spdx.Document, string) error
AddTarfilesToSBOM(*spdx.Document, string) error
VerifyArtifacts([]string) error
GenerateAttestation(*StageState, *StageOptions) (*provenance.Statement, error)
PushAttestation(*provenance.Statement, *StageOptions) error
GetProvenanceSubjects(*StageOptions, string) ([]intoto.Subject, error)
GetOutputDirSubjects(*StageOptions, string, string) ([]intoto.Subject, error)
}
func (d *defaultStageImpl) Submit(options *gcb.Options) error {
return gcb.New(options).Submit()
}
func (d *defaultStageImpl) ToFile(fileName string) error {
return log.ToFile(fileName)
}
func (d *defaultStageImpl) CheckPrerequisites() error {
return release.NewPrerequisitesChecker().Run(workspaceDir)
}
func (d *defaultStageImpl) BranchNeedsCreation(
branch, releaseType string, buildVersion semver.Version,
) (bool, error) {
return release.NewBranchChecker().NeedsCreation(
branch, releaseType, buildVersion,
)
}
func (d *defaultStageImpl) PrepareWorkspaceStage(noMock bool) error {
if err := release.PrepareWorkspaceStage(gitRoot, noMock); err != nil {
return err
}
return os.Chdir(gitRoot)
}
func (d *defaultStageImpl) GenerateReleaseVersion(
releaseType, version, branch string, branchFromMaster bool,
) (*release.Versions, error) {
return release.GenerateReleaseVersion(
releaseType, version, branch, branchFromMaster,
)
}
func (d *defaultStageImpl) OpenRepo(repoPath string) (*git.Repo, error) {
return git.OpenRepo(repoPath)
}
func (d *defaultStageImpl) RevParse(repo *git.Repo, rev string) (string, error) {
return repo.RevParse(rev)
}
func (d *defaultStageImpl) RevParseTag(repo *git.Repo, rev string) (string, error) {
return repo.RevParseTag(rev)
}
func (d *defaultStageImpl) Checkout(repo *git.Repo, rev string, args ...string) error {
return repo.Checkout(rev, args...)
}
func (d *defaultStageImpl) CurrentBranch(repo *git.Repo) (string, error) {
return repo.CurrentBranch()
}
func (d *defaultStageImpl) CommitEmpty(repo *git.Repo, msg string) error {
return repo.CommitEmpty(msg)
}
func (d *defaultStageImpl) Tag(repo *git.Repo, name, message string) error {
return repo.Tag(name, message)
}
func (d *defaultStageImpl) Merge(repo *git.Repo, rev string) error {
return repo.Merge(rev)
}
func (d *defaultStageImpl) MakeCross(version string) error {
return build.NewMake().MakeCross(version)
}
func (d *defaultStageImpl) DockerHubLogin() error {
return release.DockerHubLogin()
}
func (d *defaultStageImpl) GenerateChangelog(options *changelog.Options) error {
return changelog.New(options).Run()
}
func (d *defaultStageImpl) CheckReleaseBucket(
options *build.Options,
) error {
return build.NewInstance(options).CheckReleaseBucket()
}
func (d *defaultStageImpl) StageLocalSourceTree(
options *build.Options, workDir, buildVersion string,
) error {
return build.NewInstance(options).StageLocalSourceTree(workDir, buildVersion)
}
func (d *defaultStageImpl) DeleteLocalSourceTarball(options *build.Options, workDir string) error {
return build.NewInstance(options).DeleteLocalSourceTarball(workDir)
}
func (d *defaultStageImpl) StageLocalArtifacts(
options *build.Options,
) error {
return build.NewInstance(options).StageLocalArtifacts()
}
func (d *defaultStageImpl) PushReleaseArtifacts(
options *build.Options, srcPath, gcsPath string,
) error {
return build.NewInstance(options).PushReleaseArtifacts(srcPath, gcsPath)
}
func (d *defaultStageImpl) PushContainerImages(
options *build.Options,
) error {
return build.NewInstance(options).PushContainerImages()
}
func (d *DefaultStage) Submit(stream bool) error {
options := gcb.NewDefaultOptions()
options.Stream = stream
options.Stage = true
options.NoMock = d.options.NoMock
options.Branch = d.options.ReleaseBranch
options.ReleaseType = d.options.ReleaseType
return d.impl.Submit(options)
}
// ListBinaries returns a list of all the binaries obtained
// from the build with platform and arch details
func (d *defaultStageImpl) ListBinaries(version string) (list []struct{ Path, Platform, Arch string }, err error) {
return release.ListBuildBinaries(gitRoot, version)
}
// ListImageArchives returns a list of the image archives produced
// fior the specified version
func (d *defaultStageImpl) ListImageArchives(version string) ([]string, error) {
return release.ListBuildImages(gitRoot, version)
}
// ListTarballs returns the produced tarballs produced for this version
func (d *defaultStageImpl) ListTarballs(version string) ([]string, error) {
return release.ListBuildTarballs(gitRoot, version)
}
// VerifyArtifacts check the artifacts produced are correct
func (d *defaultStageImpl) VerifyArtifacts(versions []string) error {
// Create a new artifact checker to verify the consistency of
// the produced artifacts.
checker := release.NewArtifactCheckerWithOptions(
&release.ArtifactCheckerOptions{
GitRoot: gitRoot,
Versions: versions,
},
)
// Ensure binaries are of the correct architecture
if err := checker.CheckBinaryArchitectures(); err != nil {
return errors.Wrap(err, "checking binary architectures")
}
return nil
}
func (d *DefaultStage) InitLogFile() error {
logrus.SetFormatter(
&logrus.TextFormatter{FullTimestamp: true, ForceColors: true},
)
logFile := filepath.Join(os.TempDir(), "stage.log")
if err := d.impl.ToFile(logFile); err != nil {
return errors.Wrap(err, "setup log file")
}
d.state.logFile = logFile
logrus.Infof("Additionally logging to file %s", d.state.logFile)
return nil
}
func (d *DefaultStage) InitState() {
d.state = &StageState{DefaultState()}
}
func (d *DefaultStage) ValidateOptions() error {
if err := d.options.Validate(d.state.State); err != nil {
return errors.Wrap(err, "validating options")
}
return nil
}
func (d *DefaultStage) CheckPrerequisites() error {
return d.impl.CheckPrerequisites()
}
func (d *DefaultStage) CheckReleaseBranchState() error {
createReleaseBranch, err := d.impl.BranchNeedsCreation(
d.options.ReleaseBranch,
d.options.ReleaseType,
d.state.semverBuildVersion,
)
if err != nil {
return errors.Wrap(err, "check if release branch needs creation")
}
d.state.createReleaseBranch = createReleaseBranch
return nil
}
func (d *DefaultStage) GenerateReleaseVersion() error {
versions, err := d.impl.GenerateReleaseVersion(
d.options.ReleaseType,
d.options.BuildVersion,
d.options.ReleaseBranch,
d.state.createReleaseBranch,
)
if err != nil {
return errors.Wrap(err, "generating release versions for stage")
}
// Set the versions on the state
d.state.versions = versions
return nil
}
func (d *DefaultStage) PrepareWorkspace() error {
if err := d.impl.PrepareWorkspaceStage(d.options.NoMock); err != nil {
return errors.Wrap(err, "prepare workspace")
}
return nil
}
func (d *DefaultStage) TagRepository() error {
repo, err := d.impl.OpenRepo(gitRoot)
if err != nil {
return errors.Wrap(err, "open Kubernetes repository")
}
for _, version := range d.state.versions.Ordered() {
logrus.Infof("Preparing version %s", version)
// Ensure that the tag not already exists
if _, err := d.impl.RevParseTag(repo, version); err == nil {
return errors.Errorf("tag %s already exists", version)
}
// Usually the build version contains a commit we can reference. If
// not, because the build version is exactly a tag, then we fallback to
// that tag.
commit := d.options.BuildVersion
if len(d.state.semverBuildVersion.Build) > 0 {
commit = d.state.semverBuildVersion.Build[0]
}
if d.state.createReleaseBranch {
logrus.Infof("Creating release branch %s", d.options.ReleaseBranch)
if version == d.state.versions.Prime() {
logrus.Infof("Version %s is the prime version", version)
logrus.Infof(
"Creating release branch %s from commit %s",
d.options.ReleaseBranch, commit,
)
if err := d.impl.Checkout(
repo, "-b", d.options.ReleaseBranch, commit,
); err != nil {
return errors.Wrap(err, "create new release branch")
}
} else {
logrus.Infof(
"Version %s is not the prime, checking out %s branch",
version, git.DefaultBranch,
)
if err := d.impl.Checkout(repo, git.DefaultBranch); err != nil {
return errors.Wrapf(err, "checkout %s branch", git.DefaultBranch)
}
}
} else {
logrus.Infof("Checking out branch %s", d.options.ReleaseBranch)
if err := d.impl.Checkout(repo, d.options.ReleaseBranch); err != nil {
return errors.Wrapf(err, "checking out branch %s", d.options.ReleaseBranch)
}
}
// `branch == ""` in case we checked out a commit directly, which is
// then in detached head state.
branch, err := d.impl.CurrentBranch(repo)
if err != nil {
return errors.Wrap(err, "get current branch")
}
if branch != "" {
logrus.Infof("Current branch is %s", branch)
}
// For release branches, we create an empty release commit to avoid
// potential ambiguous 'git describe' logic between the official
// release, 'x.y.z' and the next beta of that release branch,
// 'x.y.(z+1)-beta.0'.
//
// We avoid doing this empty release commit on 'master', as:
// - there is a potential for branch conflicts as upstream/master
// moves ahead
// - we're checking out a git ref, as opposed to a branch, which
// means the tag will detached from 'upstream/master'
//
// A side-effect of the tag being detached from 'master' is the primary
// build job (ci-kubernetes-build) will build as the previous alpha,
// instead of the assumed tag. This causes the next anago run against
// 'master' to fail due to an old build version.
//
// Example: 'v1.18.0-alpha.2.663+df908c3aad70be'
// (should instead be:
// 'v1.18.0-alpha.3.<commits-since-tag>+<commit-ish>')
//
// ref:
// - https://github.com/kubernetes/release/issues/1020
// - https://github.com/kubernetes/release/pull/1030
// - https://github.com/kubernetes/release/issues/1080
// - https://github.com/kubernetes/kubernetes/pull/88074
// When tagging a release branch, always create an empty commit:
if strings.HasPrefix(branch, "release-") {
logrus.Infof("Creating empty release commit for tag %s", version)
if err := d.impl.CommitEmpty(
repo,
fmt.Sprintf("Release commit for Kubernetes %s", version),
); err != nil {
return errors.Wrap(err, "create empty release commit")
}
}
// If we are on master/main we do not create an empty commit,
// but we detach the head at the specified commit to avoid having
// commits merged between the BuildVersion commit and the tag:
if branch != "" && !strings.HasPrefix(branch, "release-") {
logrus.Infof("Detaching HEAD at commit %s to create tag %s", commit, version)
if err := d.impl.Checkout(repo, commit); err != nil {
return errors.Wrap(err, "checkout release commit")
}
}
// If a custom ref is provided, try to merge it into the release
// branch.
ref := release.GetK8sRef()
if ref != release.DefaultK8sRef {
logrus.Infof("Merging custom ref: %s", ref)
if err := d.impl.Merge(repo, git.Remotify(ref)); err != nil {
return errors.Wrap(err, "merge k8s ref")
}
}
// Tag the repository:
logrus.Infof("Tagging version %s", version)
if err := d.impl.Tag(
repo,
version,
fmt.Sprintf(
"Kubernetes %s release %s", d.options.ReleaseType, version,
),
); err != nil {
return errors.Wrap(err, "tag version")
}
// if we are working on master/main at this point, we are in
// detached HEAD state. So we checkout the branch again.
// The next stage (build) will checkout the branch it needs, but
// let's not end this step with a detached HEAD
if branch != "" && !strings.HasPrefix(branch, "release-") {
logrus.Infof("Checking out %s to reattach HEAD", d.options.ReleaseBranch)
if err := d.impl.Checkout(repo, d.options.ReleaseBranch); err != nil {
return errors.Wrapf(err, "checking out branch %s", d.options.ReleaseBranch)
}
}
}
return nil
}
func (d *DefaultStage) Build() error {
// Log in to Docker Hub to avoid getting rate limited
if err := d.impl.DockerHubLogin(); err != nil {
return errors.Wrap(err, "loging into Docker Hub")
}
// Call MakeCross for each of the versions we are building
for _, version := range d.state.versions.Ordered() {
if err := d.impl.MakeCross(version); err != nil {
return errors.Wrap(err, "build artifacts")
}
}
return nil
}
// VerifyArtifacts checks the artifacts to ensure they are correct
func (d *DefaultStage) VerifyArtifacts() error {
return d.impl.VerifyArtifacts(d.state.versions.Ordered())
}
func (d *DefaultStage) GenerateChangelog() error {
branch := d.options.ReleaseBranch
if d.state.createReleaseBranch {
branch = git.DefaultBranch
}
return d.impl.GenerateChangelog(&changelog.Options{
RepoPath: gitRoot,
Tag: d.state.versions.Prime(),
Branch: branch,
Bucket: d.options.Bucket(),
HTMLFile: releaseNotesHTMLFile,
JSONFile: releaseNotesJSONFile,
Dependencies: true,
CloneCVEMaps: true,
Tars: filepath.Join(
gitRoot,
fmt.Sprintf("%s-%s", release.BuildDir, d.state.versions.Prime()),
release.ReleaseTarsPath,
),
})
}
// AddBinariesToSBOM reads the produced "naked" binaries and adds them to the sbom
func (d *defaultStageImpl) AddBinariesToSBOM(sbom *spdx.Document, version string) error {
binaries, err := d.ListBinaries(version)
if err != nil {
return errors.Wrapf(err, "Getting binaries list for %s", version)
}
// Add the binaries, taking care of their docs
for _, bin := range binaries {
file := spdx.NewFile()
if err := file.ReadSourceFile(bin.Path); err != nil {
return errors.Wrapf(err, "reading binary sourcefile from %s", bin.Path)
}
file.Name = filepath.Join("bin", bin.Platform, bin.Arch, filepath.Base(bin.Path))
file.FileName = file.Name
file.LicenseConcluded = LicenseIdentifier
if err := sbom.AddFile(file); err != nil {
return errors.Wrap(err, "adding file to artifacts sbom")
}
file.AddRelationship(&spdx.Relationship{
FullRender: false,
PeerReference: "SPDXRef-Package-kubernetes",
PeerExtReference: fmt.Sprintf("kubernetes-%s", version),
Comment: "Source code",
Type: spdx.GENERATED_FROM,
})
}
return nil
}
// AddImagesToSBOM reads the image archives from disk and adds them to the sbom
func (d *defaultStageImpl) AddTarfilesToSBOM(sbom *spdx.Document, version string) error {
tarballs, err := d.ListTarballs(version)
if err != nil {
return errors.Wrapf(err, "listing release tarballs for %s", version)
}
// Once the initial doc is generated, add the tarfiles
for _, tar := range tarballs {
file := spdx.NewFile()
if err := file.ReadSourceFile(tar); err != nil {
return errors.Wrapf(err, "reading tarball sourcefile from %s", tar)
}
file.Name = filepath.Base(tar)
file.LicenseConcluded = LicenseIdentifier
file.FileName = filepath.Base(tar)
if err := sbom.AddFile(file); err != nil {
return errors.Wrap(err, "adding file to artifacts sbom")
}
file.AddRelationship(&spdx.Relationship{
FullRender: false,
PeerReference: "SPDXRef-Package-kubernetes",
PeerExtReference: fmt.Sprintf("kubernetes-%s", version),
Comment: "Source code",
Type: spdx.GENERATED_FROM,
})
}
return nil
}
func (d *defaultStageImpl) BuildBaseArtifactsSBOM(options *spdx.DocGenerateOptions) (*spdx.Document, error) {
logrus.Info("Generating release artifacts SBOM")
return spdx.NewDocBuilder().Generate(options)
}
func (d *defaultStageImpl) GenerateVersionArtifactsBOM(version string) error {
images, err := d.ListImageArchives(version)
if err != nil {
return errors.Wrap(err, "getting artifacts list")
}
// Build the base artifacts sbom. We only pass it the images for
// now as the binaries and tarballs need more processing
doc, err := d.BuildBaseArtifactsSBOM(&spdx.DocGenerateOptions{
Name: fmt.Sprintf("Kubernetes Release %s", version),
AnalyseLayers: false,
OnlyDirectDeps: false,
License: LicenseIdentifier,
Namespace: fmt.Sprintf("https://sbom.k8s.io/%s/release", version),
ScanLicenses: false,
Tarballs: images,
OutputFile: filepath.Join(),
})
if err != nil {
return errors.Wrapf(err, "generating base artifacts sbom for %s", version)
}
// Add the binaries and tarballs
if err := d.AddBinariesToSBOM(doc, version); err != nil {
return errors.Wrapf(err, "adding binaries to %s SBOM", version)
}
if err := d.AddTarfilesToSBOM(doc, version); err != nil {
return errors.Wrapf(err, "adding tarballs to %s SBOM", version)
}
// Reference the source code SBOM as external document
extRef := spdx.ExternalDocumentRef{
ID: fmt.Sprintf("kubernetes-%s", version),
URI: fmt.Sprintf("https://sbom.k8s.io/%s/source", version),
}
if err := extRef.ReadSourceFile(
filepath.Join(os.TempDir(), fmt.Sprintf("source-bom-%s.spdx", version)),
); err != nil {
return errors.Wrap(err, "reading the source file as external reference")
}
doc.ExternalDocRefs = append(doc.ExternalDocRefs, extRef)
// Stamp all packages. We do this here because it includes both images and
for _, pkg := range doc.Packages {
pkg.AddRelationship(&spdx.Relationship{
FullRender: false,
PeerReference: "SPDXRef-Package-kubernetes",
PeerExtReference: fmt.Sprintf("kubernetes-%s", version),
Comment: "Source code",
Type: spdx.GENERATED_FROM,
})
}
// Write the Releas Artifacts SBOM to disk
if err := doc.Write(filepath.Join(os.TempDir(), fmt.Sprintf("release-bom-%s.spdx", version))); err != nil {
return errors.Wrapf(err, "writing artifacts SBOM for %s", version)
}
return nil
}
func (d *defaultStageImpl) GenerateSourceTreeBOM(
options *spdx.DocGenerateOptions,
) (*spdx.Document, error) {
logrus.Info("Generating Kubernetes source SBOM file")
doc, err := spdx.NewDocBuilder().Generate(options)
return doc, errors.Wrap(err, "Generating kubernetes source code SBOM")
}
// WriteSourceBOM takes a source code SBOM and writes it into a file, updating
// its Namespace to match the final destination
func (d *defaultStageImpl) WriteSourceBOM(
spdxDoc *spdx.Document, version string,
) error {
spdxDoc.Namespace = fmt.Sprintf("https://sbom.k8s.io/%s/source", version)
spdxDoc.Name = fmt.Sprintf("kubernetes-%s", version)
return errors.Wrap(
spdxDoc.Write(filepath.Join(os.TempDir(), fmt.Sprintf("source-bom-%s.spdx", version))),
"writing the source code SBOM",
)
}
func (d *DefaultStage) GenerateBillOfMaterials() error {
// For the Kubernetes source, we only generate the SBOM once as both
// versions are cut from the same point in the git history. The
// resulting SPDX document will be customized for each version
// in WriteSourceBOM() before writing the actual files.
spdxDOC, err := d.impl.GenerateSourceTreeBOM(&spdx.DocGenerateOptions{
ProcessGoModules: true,
License: LicenseIdentifier,
OutputFile: "/tmp/kubernetes-source.spdx",
Namespace: "https://sbom.k8s.io/REPLACE/source", // This one gets replaced when writing to disk
ScanLicenses: true,
Directories: []string{gitRoot},
})
if err != nil {
return errors.Wrap(err, "generating the kubernetes source SBOM")
}
// We generate an artifacts sbom for each of the versions
// we are building
for _, version := range d.state.versions.Ordered() {
// Render the common source SBOM for this version
if err := d.impl.WriteSourceBOM(spdxDOC, version); err != nil {
return errors.Wrapf(err, "writing SBOM for version %s", version)
}
// Render the artifacts SBOM for version
if err := d.impl.GenerateVersionArtifactsBOM(version); err != nil {
return errors.Wrapf(err, "generating SBOM for version %s", version)
}
}
return nil
}
func (d *DefaultStage) StageArtifacts() error {
// Generat the intoto attestation, reloaded with the current run data
statement, err := d.impl.GenerateAttestation(d.state, d.options)
if err != nil {
return errors.Wrap(err, "generating the provenance attestation")
}
// Init a the push options we will use
pushBuildOptions := &build.Options{
Bucket: d.options.Bucket(),
Registry: d.options.ContainerRegistry(),
AllowDup: true,
ValidateRemoteImageDigests: true,
}
if err := d.impl.CheckReleaseBucket(pushBuildOptions); err != nil {
return errors.Wrap(err, "check release bucket access")
}
// Stage the local source tree
if err := d.impl.StageLocalSourceTree(
pushBuildOptions,
workspaceDir,
d.options.BuildVersion,
); err != nil {
return errors.Wrap(err, "staging local source tree")
}
// Add the sources tarball to the attestation
subjects, err := d.impl.GetProvenanceSubjects(
d.options, filepath.Join(workspaceDir, release.SourcesTar),
)
if err != nil {
return errors.Wrap(err, "adding sources tarball to provenance attestation")
}
statement.Subject = append(statement.Subject, subjects...)
for _, version := range d.state.versions.Ordered() {
logrus.Infof("Staging artifacts for version %s", version)
buildDir := filepath.Join(
gitRoot, fmt.Sprintf("%s-%s", release.BuildDir, version),
)
// Set the version-specific option for the push
pushBuildOptions.Version = version
pushBuildOptions.BuildDir = buildDir
// Stage local artifacts and write checksums
if err := d.impl.StageLocalArtifacts(pushBuildOptions); err != nil {
return errors.Wrap(err, "staging local artifacts")
}
gcsPath := filepath.Join(
d.options.Bucket(), release.StagePath, d.options.BuildVersion, version,
)
// Push gcs-stage to GCS
if err := d.impl.PushReleaseArtifacts(
pushBuildOptions,
filepath.Join(buildDir, release.GCSStagePath, version),
filepath.Join(gcsPath, release.GCSStagePath, version),
); err != nil {
return errors.Wrap(err, "pushing release artifacts")
}
// Push container release-images to GCS
if err := d.impl.PushReleaseArtifacts(
pushBuildOptions,
filepath.Join(buildDir, release.ImagesPath),
filepath.Join(gcsPath, release.ImagesPath),
); err != nil {
return errors.Wrap(err, "pushing release artifacts")
}
// Push container images into registry
if err := d.impl.PushContainerImages(pushBuildOptions); err != nil {
return errors.Wrap(err, "pushing container images")
}
// Add artifacts to the attestation, this should get both release-images
// and gcs-stage directories in one call.
subjects, err = d.impl.GetOutputDirSubjects(
d.options, filepath.Join(buildDir), version,
)
if err != nil {
return errors.Wrapf(err, "adding provenance of release-images for version %s", version)
}
statement.Subject = append(statement.Subject, subjects...)
}
// Push the attestation metadata file to the bucket
if err := d.impl.PushAttestation(statement, d.options); err != nil {
return errors.Wrap(err, "writing provenance metadata to disk")
}
// Delete the local source tarball
if err := d.impl.DeleteLocalSourceTarball(pushBuildOptions, workspaceDir); err != nil {
return errors.Wrap(err, "delete source tarball")
}
args := ""
if d.options.NoMock {
args += " --nomock"
}
if d.options.ReleaseType != DefaultOptions().ReleaseType {
args += " --type=" + d.options.ReleaseType
}
if d.options.ReleaseBranch != DefaultOptions().ReleaseBranch {
args += " --branch=" + d.options.ReleaseBranch
}
args += " --build-version=" + d.options.BuildVersion
logrus.Infof(
"To release this staged build, run:\n\n$ krel release%s", args,
)
return nil
}
// GenerateAttestation creates a provenance attestation with its predicate
// preloaded with the current krel run information
func (d *defaultStageImpl) GenerateAttestation(state *StageState, options *StageOptions) (attestation *provenance.Statement, err error) {
// Build the arguments RawMessage:
arguments := map[string]string{
"--type=": options.ReleaseType,
"--branch=": options.ReleaseBranch,
"--build-version=": options.BuildVersion,
}
if options.NoMock {
arguments["--nomock"] = "true"
}
// Fetch the last commit:
repo, err := git.OpenRepo(gitRoot)
if err != nil {
return nil, errors.Wrap(err, "opening repository to check commit hash")
}
// TODO: When this PR merges and the commit is part of a release:
// https://github.com/kubernetes-sigs/release-sdk/pull/6
// and k/release is bumped, replace the commit logic with this line:
// commitSHA, err := repo.LastCommitSha()
logData, err := repo.ShowLastCommit()
if err != nil {
return nil, errors.Wrap(err, "getting last commit data")
}
re := regexp.MustCompile(`commit\s+([a-f0-9]{40})`)
commitSHA := re.FindString(logData)
if commitSHA == "" {
return nil, errors.New("Unable to find last commit sha in git output")
}
// Create the predicate to populate it with the current
// run metadata:
p := provenance.NewSLSAPredicate()
// TODO: In regular runs, this will insert "master", we should
// record the git sha of the commit in k/release we are using.
p.Builder.ID = fmt.Sprintf(
"pkg:github/%s/%s@%s", os.Getenv("TOOL_ORG"),
os.Getenv("TOOL_REPO"), os.Getenv("TOOL_REF"),
)
// Some of these fields have yet to be checked to assign the
// correct values to them
// This is commented as the in-toto go port does not have it
// p.Metadata.BuildInvocationID: os.Getenv("BUILD_ID"),
p.Metadata.Completeness.Arguments = true // The arguments are complete as we know the from GCB
p.Metadata.Completeness.Materials = true // The materials are complete as we only use the github repo
startTime := state.startTime.UTC()
endTime := time.Now().UTC()
p.Metadata.BuildStartedOn = &startTime
p.Metadata.BuildFinishedOn = &endTime
p.Recipe.Type = "https://cloudbuild.googleapis.com/CloudBuildYaml@v1"
p.Recipe.EntryPoint = "https://github.com/kubernetes/release/blob/master/gcb/stage/cloudbuild.yaml"
p.Recipe.Arguments = arguments
p.AddMaterial("git+https://github.com/kubernetes/kubernetes", intoto.DigestSet{"sha1": commitSHA})
// Create the new attestation and attach the predicate
attestation = provenance.NewSLSAStatement()
attestation.Predicate = p
return attestation, nil
}
// PushAttestation writes the provenance metadata to the staging location in
// the Google Cloud Bucket.
func (d *defaultStageImpl) PushAttestation(attestation *provenance.Statement, options *StageOptions) (err error) {
gcsPath := filepath.Join(options.Bucket(), release.StagePath, options.BuildVersion)
// Create a temporary file:
f, err := os.CreateTemp("", "provenance-")
if err != nil {
return errors.Wrap(err, "creating temp file for provenance metadata")
}
// Write the provenance statement to disk:
if err := attestation.Write(f.Name()); err != nil {
return errors.Wrap(err, "writing provenance attestation to disk")
}
// TODO for SLSA2: Sign the attestation
// Upload the metadata file to the staging bucket
pushBuildOptions := &build.Options{
Bucket: options.Bucket(),
AllowDup: true,
}
if err := d.CheckReleaseBucket(pushBuildOptions); err != nil {
return errors.Wrap(err, "check release bucket access")
}
// Push the provenance file to GCS
return errors.Wrap(
d.PushReleaseArtifacts(pushBuildOptions, f.Name(), filepath.Join(gcsPath, release.ProvenanceFilename)),
"pushing provenance manifest",
)
}
// GetOutputDirSubjects reads the built artifacts and returns them
// as intoto subjects. All paths are translated to their final path in the bucket
func (d *defaultStageImpl) GetOutputDirSubjects(
options *StageOptions, path, version string) ([]intoto.Subject, error) {
return release.NewProvenanceReader(&release.ProvenanceReaderOptions{
Bucket: options.Bucket(),
BuildVersion: options.BuildVersion,
WorkspaceDir: workspaceDir,
}).GetBuildSubjects(path, version)
}
// GetProvenanceSubjects returns artifacts as intoto subjects, normalized to
// the staging bucket location
func (d *defaultStageImpl) GetProvenanceSubjects(
options *StageOptions, path string) ([]intoto.Subject, error) {
return release.NewProvenanceReader(&release.ProvenanceReaderOptions{
Bucket: options.Bucket(),
BuildVersion: options.BuildVersion,
WorkspaceDir: workspaceDir,
}).GetStagingSubjects(path)
}
|
[
"\"TOOL_ORG\"",
"\"TOOL_REPO\"",
"\"TOOL_REF\"",
"\"BUILD_ID\""
] |
[] |
[
"BUILD_ID",
"TOOL_REPO",
"TOOL_ORG",
"TOOL_REF"
] |
[]
|
["BUILD_ID", "TOOL_REPO", "TOOL_ORG", "TOOL_REF"]
|
go
| 4 | 0 | |
alad/run.py
|
import time
import numpy as np
import tensorflow as tf
#import tf_cnnvis
import logging
import importlib
import sys
import os
from utils.adapt_data import batch_fill
from utils.evaluations import save_results, heatmap
from utils.constants import IMAGES_DATASETS
FREQ_PRINT = 200 # print frequency image tensorboard [20]
FREQ_EV = 1
PATIENCE = 10
def get_getter(ema): # to update neural net with moving avg variables, suitable for ss learning cf Saliman
def ema_getter(getter, name, *args, **kwargs):
var = getter(name, *args, **kwargs)
ema_var = ema.average(var)
return ema_var if ema_var else var
return ema_getter
def display_parameters(batch_size, starting_lr, ema_decay, degree, label,
allow_zz, score_method, do_spectral_norm):
"""See parameters
"""
print('Batch size: ', batch_size)
print('Starting learning rate: ', starting_lr)
print('EMA Decay: ', ema_decay)
print('Degree for L norms: ', degree)
print('Anomalous label: ', label)
print('Score method: ', score_method)
print('Discriminator zz enabled: ', allow_zz)
print('Spectral Norm enabled: ', do_spectral_norm)
def display_progression_epoch(j, id_max):
"""See epoch progression
"""
batch_progression = int((j / id_max) * 100)
sys.stdout.write(str(batch_progression) + ' % epoch' + chr(13))
_ = sys.stdout.flush
def create_logdir(dataset, label, rd,
allow_zz, score_method, do_spectral_norm):
""" Directory to save training logs, weights, biases, etc."""
model = 'alad_sn{}_dzz{}'.format(do_spectral_norm, allow_zz)
return "train_logs/{}/{}/dzzenabled{}/{}/label{}/" \
"rd{}".format(dataset, model, allow_zz,
score_method, label, rd)
def train_and_test(dataset, nb_epochs, degree, random_seed, label,
allow_zz, enable_sm, score_method,
enable_early_stop, do_spectral_norm):
""" Runs the AliCE on the specified dataset
Note:
Saves summaries on tensorboard. To display them, please use cmd line
tensorboard --logdir=model.training_logdir() --port=number
Args:
dataset (str): name of the dataset
nb_epochs (int): number of epochs
degree (int): degree of the norm in the feature matching
random_seed (int): trying different seeds for averaging the results
label (int): label which is normal for image experiments
allow_zz (bool): allow the d_zz discriminator or not for ablation study
enable_sm (bool): allow TF summaries for monitoring the training
score_method (str): which metric to use for the ablation study
enable_early_stop (bool): allow early stopping for determining the number of epochs
do_spectral_norm (bool): allow spectral norm or not for ablation study
"""
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
logger = logging.getLogger("ALAD.run.{}.{}".format(
dataset, label))
# Import model and data
network = importlib.import_module('alad.{}_utilities'.format(dataset))
data = importlib.import_module("data.{}".format(dataset))
# Parameters
starting_lr = network.learning_rate
batch_size = network.batch_size
latent_dim = network.latent_dim
ema_decay = 0.999
global_step = tf.Variable(0, name='global_step', trainable=False)
# Placeholders
x_pl = tf.placeholder(tf.float32, shape=data.get_shape_input(),
name="input_x")
z_pl = tf.placeholder(tf.float32, shape=[None, latent_dim],
name="input_z")
is_training_pl = tf.placeholder(tf.bool, [], name='is_training_pl')
learning_rate = tf.placeholder(tf.float32, shape=(), name="lr_pl")
# Data
# label 部分都已经转换为 0 normal数据 / 1 abnormal数据
logger.info('Data loading...')
trainx, trainy = data.get_train(label)
if enable_early_stop: validx, validy = data.get_valid(label)
trainx_copy = trainx.copy()
testx, testy = data.get_test(label)
rng = np.random.RandomState(random_seed)
nr_batches_train = int(trainx.shape[0] / batch_size)
nr_batches_test = int(testx.shape[0] / batch_size)
logger.info('Building graph...')
logger.warn("ALAD is training with the following parameters:")
display_parameters(batch_size, starting_lr, ema_decay, degree, label,
allow_zz, score_method, do_spectral_norm)
gen = network.decoder
enc = network.encoder
dis_xz = network.discriminator_xz
dis_xx = network.discriminator_xx
dis_zz = network.discriminator_zz
with tf.variable_scope('encoder_model'):
z_gen = enc(x_pl, is_training=is_training_pl,
do_spectral_norm=do_spectral_norm)
with tf.variable_scope('generator_model'):
x_gen = gen(z_pl, is_training=is_training_pl)
rec_x = gen(z_gen, is_training=is_training_pl, reuse=True)
with tf.variable_scope('encoder_model'):
rec_z = enc(x_gen, is_training=is_training_pl, reuse=True,
do_spectral_norm=do_spectral_norm)
with tf.variable_scope('discriminator_model_xz'):
l_encoder, inter_layer_inp_xz = dis_xz(x_pl, z_gen,
is_training=is_training_pl,
do_spectral_norm=do_spectral_norm)
l_generator, inter_layer_rct_xz = dis_xz(x_gen, z_pl,
is_training=is_training_pl,
reuse=True,
do_spectral_norm=do_spectral_norm)
with tf.variable_scope('discriminator_model_xx'):
x_logit_real, inter_layer_inp_xx = dis_xx(x_pl, x_pl,
is_training=is_training_pl,
do_spectral_norm=do_spectral_norm)
x_logit_fake, inter_layer_rct_xx = dis_xx(x_pl, rec_x, is_training=is_training_pl,
reuse=True, do_spectral_norm=do_spectral_norm)
with tf.variable_scope('discriminator_model_zz'):
z_logit_real, _ = dis_zz(z_pl, z_pl, is_training=is_training_pl,
do_spectral_norm=do_spectral_norm)
z_logit_fake, _ = dis_zz(z_pl, rec_z, is_training=is_training_pl,
reuse=True, do_spectral_norm=do_spectral_norm)
with tf.name_scope('loss_functions'):
# discriminator xz
loss_dis_enc = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.ones_like(l_encoder),logits=l_encoder))
loss_dis_gen = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.zeros_like(l_generator),logits=l_generator))
dis_loss_xz = loss_dis_gen + loss_dis_enc
# discriminator xx
x_real_dis = tf.nn.sigmoid_cross_entropy_with_logits(
logits=x_logit_real, labels=tf.ones_like(x_logit_real))
x_fake_dis = tf.nn.sigmoid_cross_entropy_with_logits(
logits=x_logit_fake, labels=tf.zeros_like(x_logit_fake))
dis_loss_xx = tf.reduce_mean(x_real_dis + x_fake_dis)
# discriminator zz
z_real_dis = tf.nn.sigmoid_cross_entropy_with_logits(
logits=z_logit_real, labels=tf.ones_like(z_logit_real))
z_fake_dis = tf.nn.sigmoid_cross_entropy_with_logits(
logits=z_logit_fake, labels=tf.zeros_like(z_logit_fake))
dis_loss_zz = tf.reduce_mean(z_real_dis + z_fake_dis)
loss_discriminator = dis_loss_xz + dis_loss_xx + dis_loss_zz if \
allow_zz else dis_loss_xz + dis_loss_xx
# generator and encoder
gen_loss_xz = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.ones_like(l_generator),logits=l_generator))
enc_loss_xz = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.zeros_like(l_encoder), logits=l_encoder))
x_real_gen = tf.nn.sigmoid_cross_entropy_with_logits(
logits=x_logit_real, labels=tf.zeros_like(x_logit_real))
x_fake_gen = tf.nn.sigmoid_cross_entropy_with_logits(
logits=x_logit_fake, labels=tf.ones_like(x_logit_fake))
z_real_gen = tf.nn.sigmoid_cross_entropy_with_logits(
logits=z_logit_real, labels=tf.zeros_like(z_logit_real))
z_fake_gen = tf.nn.sigmoid_cross_entropy_with_logits(
logits=z_logit_fake, labels=tf.ones_like(z_logit_fake))
cost_x = tf.reduce_mean(x_real_gen + x_fake_gen)
cost_z = tf.reduce_mean(z_real_gen + z_fake_gen)
cycle_consistency_loss = cost_x + cost_z if allow_zz else cost_x
loss_generator = gen_loss_xz + cycle_consistency_loss
loss_encoder = enc_loss_xz + cycle_consistency_loss
with tf.name_scope('optimizers'):
# control op dependencies for batch norm and trainable variables
tvars = tf.trainable_variables()
dxzvars = [var for var in tvars if 'discriminator_model_xz' in var.name]
dxxvars = [var for var in tvars if 'discriminator_model_xx' in var.name]
dzzvars = [var for var in tvars if 'discriminator_model_zz' in var.name]
gvars = [var for var in tvars if 'generator_model' in var.name]
evars = [var for var in tvars if 'encoder_model' in var.name]
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
update_ops_gen = [x for x in update_ops if ('generator_model' in x.name)]
update_ops_enc = [x for x in update_ops if ('encoder_model' in x.name)]
update_ops_dis_xz = [x for x in update_ops if
('discriminator_model_xz' in x.name)]
update_ops_dis_xx = [x for x in update_ops if
('discriminator_model_xx' in x.name)]
update_ops_dis_zz = [x for x in update_ops if
('discriminator_model_zz' in x.name)]
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate,
beta1=0.5)
with tf.control_dependencies(update_ops_gen):
gen_op = optimizer.minimize(loss_generator, var_list=gvars,
global_step=global_step)
with tf.control_dependencies(update_ops_enc):
enc_op = optimizer.minimize(loss_encoder, var_list=evars)
with tf.control_dependencies(update_ops_dis_xz):
dis_op_xz = optimizer.minimize(dis_loss_xz, var_list=dxzvars)
with tf.control_dependencies(update_ops_dis_xx):
dis_op_xx = optimizer.minimize(dis_loss_xx, var_list=dxxvars)
with tf.control_dependencies(update_ops_dis_zz):
dis_op_zz = optimizer.minimize(dis_loss_zz, var_list=dzzvars)
# Exponential Moving Average for inference
def train_op_with_ema_dependency(vars, op):
ema = tf.train.ExponentialMovingAverage(decay=ema_decay)
maintain_averages_op = ema.apply(vars)
with tf.control_dependencies([op]):
train_op = tf.group(maintain_averages_op)
return train_op, ema
train_gen_op, gen_ema = train_op_with_ema_dependency(gvars, gen_op)
train_enc_op, enc_ema = train_op_with_ema_dependency(evars, enc_op)
train_dis_op_xz, xz_ema = train_op_with_ema_dependency(dxzvars,
dis_op_xz)
train_dis_op_xx, xx_ema = train_op_with_ema_dependency(dxxvars,
dis_op_xx)
train_dis_op_zz, zz_ema = train_op_with_ema_dependency(dzzvars,
dis_op_zz)
with tf.variable_scope('encoder_model'):
z_gen_ema = enc(x_pl, is_training=is_training_pl,
getter=get_getter(enc_ema), reuse=True,
do_spectral_norm=do_spectral_norm)
with tf.variable_scope('generator_model'):
rec_x_ema = gen(z_gen_ema, is_training=is_training_pl,
getter=get_getter(gen_ema), reuse=True)
x_gen_ema = gen(z_pl, is_training=is_training_pl,
getter=get_getter(gen_ema), reuse=True)
with tf.variable_scope('discriminator_model_xx'):
l_encoder_emaxx, inter_layer_inp_emaxx = dis_xx(x_pl, x_pl,
is_training=is_training_pl,
getter=get_getter(xx_ema),
reuse=True,
do_spectral_norm=do_spectral_norm)
l_generator_emaxx, inter_layer_rct_emaxx = dis_xx(x_pl, rec_x_ema,
is_training=is_training_pl,
getter=get_getter(
xx_ema),
reuse=True,
do_spectral_norm=do_spectral_norm)
with tf.name_scope('Testing'):
with tf.variable_scope('Scores'):
score_ch = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.ones_like(l_generator_emaxx),
logits=l_generator_emaxx)
score_ch = tf.squeeze(score_ch)
rec = x_pl - rec_x_ema
rec = tf.contrib.layers.flatten(rec)
score_l1 = tf.norm(rec, ord=1, axis=1,
keep_dims=False, name='d_loss')
score_l1 = tf.squeeze(score_l1)
rec = x_pl - rec_x_ema
rec = tf.contrib.layers.flatten(rec)
score_l2 = tf.norm(rec, ord=2, axis=1,
keep_dims=False, name='d_loss')
score_l2 = tf.squeeze(score_l2)
inter_layer_inp, inter_layer_rct = inter_layer_inp_emaxx, \
inter_layer_rct_emaxx
fm = inter_layer_inp - inter_layer_rct
fm = tf.contrib.layers.flatten(fm)
score_fm = tf.norm(fm, ord=degree, axis=1,
keep_dims=False, name='d_loss')
score_fm = tf.squeeze(score_fm)
if enable_early_stop:
rec_error_valid = tf.reduce_mean(score_fm)
if enable_sm:
with tf.name_scope('summary'):
with tf.name_scope('dis_summary'):
tf.summary.scalar('loss_discriminator', loss_discriminator, ['dis'])
tf.summary.scalar('loss_dis_encoder', loss_dis_enc, ['dis'])
tf.summary.scalar('loss_dis_gen', loss_dis_gen, ['dis'])
tf.summary.scalar('loss_dis_xz', dis_loss_xz, ['dis'])
tf.summary.scalar('loss_dis_xx', dis_loss_xx, ['dis'])
if allow_zz:
tf.summary.scalar('loss_dis_zz', dis_loss_zz, ['dis'])
with tf.name_scope('gen_summary'):
tf.summary.scalar('loss_generator', loss_generator, ['gen'])
tf.summary.scalar('loss_encoder', loss_encoder, ['gen'])
tf.summary.scalar('loss_encgen_dxx', cost_x, ['gen'])
if allow_zz:
tf.summary.scalar('loss_encgen_dzz', cost_z, ['gen'])
if enable_early_stop:
with tf.name_scope('validation_summary'):
tf.summary.scalar('valid', rec_error_valid, ['v'])
with tf.name_scope('img_summary'):
heatmap_pl_latent = tf.placeholder(tf.float32,
shape=(1, 480, 640, 3),
name="heatmap_pl_latent")
sum_op_latent = tf.summary.image('heatmap_latent', heatmap_pl_latent)
if dataset in IMAGES_DATASETS:
with tf.name_scope('image_summary'):
tf.summary.image('reconstruct', rec_x, 8, ['image'])
tf.summary.image('input_images', x_pl, 8, ['image'])
else:
heatmap_pl_rec = tf.placeholder(tf.float32, shape=(1, 480, 640, 3),
name="heatmap_pl_rec")
with tf.name_scope('image_summary'):
tf.summary.image('heatmap_rec', heatmap_pl_rec, 1, ['image'])
sum_op_dis = tf.summary.merge_all('dis')
sum_op_gen = tf.summary.merge_all('gen')
sum_op = tf.summary.merge([sum_op_dis, sum_op_gen])
sum_op_im = tf.summary.merge_all('image')
sum_op_valid = tf.summary.merge_all('v')
logdir = create_logdir(dataset, label, random_seed, allow_zz, score_method,
do_spectral_norm)
saver = tf.train.Saver(max_to_keep=2)
save_model_secs = None if enable_early_stop else 20
sv = tf.train.Supervisor(logdir=logdir, save_summaries_secs=None, saver=saver, save_model_secs=save_model_secs)
logger.info('Start training...')
with sv.managed_session(config=config) as sess:
step = sess.run(global_step)
logger.info('Initialization done at step {}'.format(step/nr_batches_train))
writer = tf.summary.FileWriter(logdir, sess.graph)
train_batch = 0
epoch = 0
best_valid_loss = 0
request_stop = False
while not sv.should_stop() and epoch < nb_epochs:
lr = starting_lr
begin = time.time()
# construct randomly permuted minibatches
trainx = trainx[rng.permutation(trainx.shape[0])] # shuffling dataset
trainx_copy = trainx_copy[rng.permutation(trainx.shape[0])]
train_loss_dis_xz, train_loss_dis_xx, train_loss_dis_zz, \
train_loss_dis, train_loss_gen, train_loss_enc = [0, 0, 0, 0, 0, 0]
# Training
for t in range(nr_batches_train):
display_progression_epoch(t, nr_batches_train)
ran_from = t * batch_size
ran_to = (t + 1) * batch_size
# train discriminator
feed_dict = {x_pl: trainx[ran_from:ran_to],
z_pl: np.random.normal(size=[batch_size, latent_dim]),
is_training_pl: True,
learning_rate:lr}
_, _, _, ld, ldxz, ldxx, ldzz, step = sess.run([train_dis_op_xz,
train_dis_op_xx,
train_dis_op_zz,
loss_discriminator,
dis_loss_xz,
dis_loss_xx,
dis_loss_zz,
global_step],
feed_dict=feed_dict)
train_loss_dis += ld
train_loss_dis_xz += ldxz
train_loss_dis_xx += ldxx
train_loss_dis_zz += ldzz
# train generator and encoder
feed_dict = {x_pl: trainx_copy[ran_from:ran_to],
z_pl: np.random.normal(size=[batch_size, latent_dim]),
is_training_pl: True,
learning_rate:lr}
_,_, le, lg = sess.run([train_gen_op,
train_enc_op,
loss_encoder,
loss_generator],
feed_dict=feed_dict)
train_loss_gen += lg
train_loss_enc += le
if enable_sm:
sm = sess.run(sum_op, feed_dict=feed_dict)
writer.add_summary(sm, step)
if t % FREQ_PRINT == 0 and dataset in IMAGES_DATASETS: # inspect reconstruction
t = np.random.randint(0, trainx.shape[0]-batch_size)
ran_from = t
ran_to = t + batch_size
feed_dict = {x_pl: trainx[ran_from:ran_to],
z_pl: np.random.normal(
size=[batch_size, latent_dim]),
is_training_pl: False}
sm = sess.run(sum_op_im, feed_dict=feed_dict)
writer.add_summary(sm, step)#train_batch)
train_batch += 1
train_loss_gen /= nr_batches_train
train_loss_enc /= nr_batches_train
train_loss_dis /= nr_batches_train
train_loss_dis_xz /= nr_batches_train
train_loss_dis_xx /= nr_batches_train
train_loss_dis_zz /= nr_batches_train
logger.info('Epoch terminated')
if allow_zz:
print("Epoch %d | time = %ds | loss gen = %.4f | loss enc = %.4f | "
"loss dis = %.4f | loss dis xz = %.4f | loss dis xx = %.4f | "
"loss dis zz = %.4f"
% (epoch, time.time() - begin, train_loss_gen,
train_loss_enc, train_loss_dis, train_loss_dis_xz,
train_loss_dis_xx, train_loss_dis_zz))
else:
print("Epoch %d | time = %ds | loss gen = %.4f | loss enc = %.4f | "
"loss dis = %.4f | loss dis xz = %.4f | loss dis xx = %.4f | "
% (epoch, time.time() - begin, train_loss_gen,
train_loss_enc, train_loss_dis, train_loss_dis_xz,
train_loss_dis_xx))
##EARLY STOPPING
if (epoch + 1) % FREQ_EV == 0 and enable_early_stop:
valid_loss = 0
feed_dict = {x_pl: validx,
z_pl: np.random.normal(size=[validx.shape[0], latent_dim]),
is_training_pl: False}
vl, lat = sess.run([rec_error_valid, rec_z], feed_dict=feed_dict)
valid_loss += vl
if enable_sm:
sm = sess.run(sum_op_valid, feed_dict=feed_dict)
writer.add_summary(sm, step) # train_batch)
logger.info('Validation: valid loss {:.4f}'.format(valid_loss))
if (valid_loss < best_valid_loss or epoch == FREQ_EV-1):
best_valid_loss = valid_loss
logger.info("Best model - valid loss = {:.4f} - saving...".format(best_valid_loss))
sv.saver.save(sess, logdir+'/model.ckpt', global_step=step)
nb_without_improvements = 0
else:
nb_without_improvements += FREQ_EV
if nb_without_improvements > PATIENCE:
sv.request_stop()
logger.warning(
"Early stopping at epoch {} with weights from epoch {}".format(
epoch, epoch - nb_without_improvements))
epoch += 1
sv.saver.save(sess, logdir+'/model.ckpt', global_step=step)
logger.warn('Testing evaluation...')
scores_ch = []
scores_l1 = []
scores_l2 = []
scores_fm = []
inference_time = []
# Create scores
for t in range(nr_batches_test):
# construct randomly permuted minibatches
ran_from = t * batch_size
ran_to = (t + 1) * batch_size
begin_test_time_batch = time.time()
feed_dict = {x_pl: testx[ran_from:ran_to],
z_pl: np.random.normal(size=[batch_size, latent_dim]),
is_training_pl:False}
scores_ch += sess.run(score_ch, feed_dict=feed_dict).tolist()
scores_l1 += sess.run(score_l1, feed_dict=feed_dict).tolist()
scores_l2 += sess.run(score_l2, feed_dict=feed_dict).tolist()
scores_fm += sess.run(score_fm, feed_dict=feed_dict).tolist()
inference_time.append(time.time() - begin_test_time_batch)
inference_time = np.mean(inference_time)
logger.info('Testing : mean inference time is %.4f' % (inference_time))
if testx.shape[0] % batch_size != 0:
batch, size = batch_fill(testx, batch_size)
feed_dict = {x_pl: batch,
z_pl: np.random.normal(size=[batch_size, latent_dim]),
is_training_pl: False}
bscores_ch = sess.run(score_ch,feed_dict=feed_dict).tolist()
bscores_l1 = sess.run(score_l1,feed_dict=feed_dict).tolist()
bscores_l2 = sess.run(score_l2,feed_dict=feed_dict).tolist()
bscores_fm = sess.run(score_fm,feed_dict=feed_dict).tolist()
scores_ch += bscores_ch[:size]
scores_l1 += bscores_l1[:size]
scores_l2 += bscores_l2[:size]
scores_fm += bscores_fm[:size]
model = 'alad_sn{}_dzz{}'.format(do_spectral_norm, allow_zz)
save_results(scores_ch, testy, model, dataset, 'ch',
'dzzenabled{}'.format(allow_zz), label, random_seed, step)
save_results(scores_l1, testy, model, dataset, 'l1',
'dzzenabled{}'.format(allow_zz), label, random_seed, step)
save_results(scores_l2, testy, model, dataset, 'l2',
'dzzenabled{}'.format(allow_zz), label, random_seed, step)
save_results(scores_fm, testy, model, dataset, 'fm',
'dzzenabled{}'.format(allow_zz), label, random_seed, step)
def run(args):
""" Runs the training process"""
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
with tf.Graph().as_default():
# Set the graph level seed
tf.set_random_seed(args.rd)
train_and_test(args.dataset, args.nb_epochs, args.d, args.rd, args.label,
args.enable_dzz, args.enable_sm, args.m,
args.enable_early_stop, args.sn)
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
talksapp/main.go
|
// Copyright 2013 The Go Authors. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file or at
// https://developers.google.com/open-source/licenses/bsd.
// Package talksapp implements the talks.godoc.org server.
package talksapp
import (
"bytes"
"context"
"errors"
"fmt"
"html/template"
"io"
"net/http"
"os"
"path"
"time"
"google.golang.org/appengine"
"google.golang.org/appengine/log"
"google.golang.org/appengine/memcache"
"google.golang.org/appengine/urlfetch"
"github.com/golang/gddo/gosrc"
"github.com/golang/gddo/httputil"
"golang.org/x/tools/present"
)
var (
presentTemplates = map[string]*template.Template{
".article": parsePresentTemplate("article.tmpl"),
".slide": parsePresentTemplate("slides.tmpl"),
}
homeArticle = loadHomeArticle()
contactEmail = "[email protected]"
// used for mocking in tests
getPresentation = gosrc.GetPresentation
playCompileURL = "https://play.golang.org/compile"
githubToken = os.Getenv("GITHUB_TOKEN")
githubClientID = os.Getenv("GITHUB_CLIENT_ID")
githubClientSecret = os.Getenv("GITHUB_CLIENT_SECRET")
)
func init() {
http.Handle("/", handlerFunc(serveRoot))
http.Handle("/compile", handlerFunc(serveCompile))
http.Handle("/bot.html", handlerFunc(serveBot))
present.PlayEnabled = true
if s := os.Getenv("CONTACT_EMAIL"); s != "" {
contactEmail = s
}
if !appengine.IsDevAppServer() {
if githubToken == "" && githubClientID == "" && githubClientSecret == "" {
panic("missing GitHub metadata, follow the instructions on README.md")
}
if githubToken != "" && (githubClientID != "" || githubClientSecret != "") {
panic("GitHub token and client secret given, follow the instructions on README.md")
}
}
}
func playable(c present.Code) bool {
return present.PlayEnabled && c.Play && c.Ext == ".go"
}
func parsePresentTemplate(name string) *template.Template {
t := present.Template()
t = t.Funcs(template.FuncMap{"playable": playable})
if _, err := t.ParseFiles("present/templates/"+name, "present/templates/action.tmpl"); err != nil {
panic(err)
}
t = t.Lookup("root")
if t == nil {
panic("root template not found for " + name)
}
return t
}
func loadHomeArticle() []byte {
const fname = "assets/home.article"
f, err := os.Open(fname)
if err != nil {
panic(err)
}
defer f.Close()
doc, err := present.Parse(f, fname, 0)
if err != nil {
panic(err)
}
var buf bytes.Buffer
if err := renderPresentation(&buf, fname, doc); err != nil {
panic(err)
}
return buf.Bytes()
}
func renderPresentation(w io.Writer, fname string, doc *present.Doc) error {
t := presentTemplates[path.Ext(fname)]
if t == nil {
return errors.New("unknown template extension")
}
data := struct {
*present.Doc
Template *template.Template
PlayEnabled bool
NotesEnabled bool
}{doc, t, true, true}
return t.Execute(w, &data)
}
type presFileNotFoundError string
func (s presFileNotFoundError) Error() string { return fmt.Sprintf("File %s not found.", string(s)) }
func writeHTMLHeader(w http.ResponseWriter, status int) {
w.Header().Set("Content-Type", "text/html; charset=utf-8")
w.WriteHeader(status)
}
func writeTextHeader(w http.ResponseWriter, status int) {
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.WriteHeader(status)
}
func httpClient(r *http.Request) *http.Client {
ctx, _ := context.WithTimeout(appengine.NewContext(r), 10*time.Second)
return &http.Client{
Transport: &httputil.AuthTransport{
GithubToken: githubToken,
GithubClientID: githubClientID,
GithubClientSecret: githubClientSecret,
Base: &urlfetch.Transport{Context: ctx},
UserAgent: fmt.Sprintf("%s (+http://%s/-/bot)", appengine.AppID(ctx), r.Host),
},
}
}
type handlerFunc func(http.ResponseWriter, *http.Request) error
func (f handlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx := appengine.NewContext(r)
err := f(w, r)
if err == nil {
return
} else if gosrc.IsNotFound(err) {
writeTextHeader(w, 400)
io.WriteString(w, "Not Found.")
} else if e, ok := err.(*gosrc.RemoteError); ok {
writeTextHeader(w, 500)
fmt.Fprintf(w, "Error accessing %s.\n%v", e.Host, e)
log.Infof(ctx, "Remote error %s: %v", e.Host, e)
} else if e, ok := err.(presFileNotFoundError); ok {
writeTextHeader(w, 200)
io.WriteString(w, e.Error())
} else if err != nil {
writeTextHeader(w, 500)
io.WriteString(w, "Internal server error.")
log.Errorf(ctx, "Internal error %v", err)
}
}
func serveRoot(w http.ResponseWriter, r *http.Request) error {
switch {
case r.Method != "GET" && r.Method != "HEAD":
writeTextHeader(w, 405)
_, err := io.WriteString(w, "Method not supported.")
return err
case r.URL.Path == "/":
writeHTMLHeader(w, 200)
_, err := w.Write(homeArticle)
return err
default:
return servePresentation(w, r)
}
}
func servePresentation(w http.ResponseWriter, r *http.Request) error {
ctx := appengine.NewContext(r)
importPath := r.URL.Path[1:]
item, err := memcache.Get(ctx, importPath)
if err == nil {
writeHTMLHeader(w, 200)
w.Write(item.Value)
return nil
} else if err != memcache.ErrCacheMiss {
log.Errorf(ctx, "Could not get item from Memcache: %v", err)
}
log.Infof(ctx, "Fetching presentation %s.", importPath)
pres, err := getPresentation(ctx, httpClient(r), importPath)
if err != nil {
return err
}
parser := &present.Context{
ReadFile: func(name string) ([]byte, error) {
if p, ok := pres.Files[name]; ok {
return p, nil
}
return nil, presFileNotFoundError(name)
},
}
doc, err := parser.Parse(bytes.NewReader(pres.Files[pres.Filename]), pres.Filename, 0)
if err != nil {
return err
}
var buf bytes.Buffer
if err := renderPresentation(&buf, importPath, doc); err != nil {
return err
}
if err := memcache.Add(ctx, &memcache.Item{
Key: importPath,
Value: buf.Bytes(),
Expiration: time.Hour,
}); err != nil {
log.Errorf(ctx, "Could not cache presentation %s: %v", importPath, err)
return nil
}
writeHTMLHeader(w, 200)
_, err = w.Write(buf.Bytes())
return err
}
func serveCompile(w http.ResponseWriter, r *http.Request) error {
client := urlfetch.Client(appengine.NewContext(r))
if err := r.ParseForm(); err != nil {
return err
}
resp, err := client.PostForm(playCompileURL, r.Form)
if err != nil {
return err
}
defer resp.Body.Close()
w.Header().Set("Content-Type", resp.Header.Get("Content-Type"))
_, err = io.Copy(w, resp.Body)
return err
}
func serveBot(w http.ResponseWriter, r *http.Request) error {
ctx := appengine.NewContext(r)
writeTextHeader(w, 200)
_, err := fmt.Fprintf(w, "Contact %s for help with the %s bot.", contactEmail, appengine.AppID(ctx))
return err
}
|
[
"\"GITHUB_TOKEN\"",
"\"GITHUB_CLIENT_ID\"",
"\"GITHUB_CLIENT_SECRET\"",
"\"CONTACT_EMAIL\""
] |
[] |
[
"GITHUB_CLIENT_ID",
"CONTACT_EMAIL",
"GITHUB_TOKEN",
"GITHUB_CLIENT_SECRET"
] |
[]
|
["GITHUB_CLIENT_ID", "CONTACT_EMAIL", "GITHUB_TOKEN", "GITHUB_CLIENT_SECRET"]
|
go
| 4 | 0 | |
pkg/kubectl/cmd/util/factory_builder.go
|
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// this file contains factories with no other dependencies
package util
import (
"os"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/client-go/dynamic"
scaleclient "k8s.io/client-go/scale"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/kubectl/plugins"
"k8s.io/kubernetes/pkg/kubectl/resource"
)
type ring2Factory struct {
clientAccessFactory ClientAccessFactory
objectMappingFactory ObjectMappingFactory
}
func NewBuilderFactory(clientAccessFactory ClientAccessFactory, objectMappingFactory ObjectMappingFactory) BuilderFactory {
f := &ring2Factory{
clientAccessFactory: clientAccessFactory,
objectMappingFactory: objectMappingFactory,
}
return f
}
// NewBuilder returns a new resource builder for structured api objects.
func (f *ring2Factory) NewBuilder() *resource.Builder {
clientMapperFunc := resource.ClientMapperFunc(f.objectMappingFactory.ClientForMapping)
mapper, typer := f.objectMappingFactory.Object()
unstructuredClientMapperFunc := resource.ClientMapperFunc(f.objectMappingFactory.UnstructuredClientForMapping)
categoryExpander := f.objectMappingFactory.CategoryExpander()
return resource.NewBuilder(
&resource.Mapper{
RESTMapper: mapper,
ObjectTyper: typer,
ClientMapper: clientMapperFunc,
Decoder: InternalVersionDecoder(),
},
&resource.Mapper{
RESTMapper: mapper,
ObjectTyper: typer,
ClientMapper: unstructuredClientMapperFunc,
Decoder: unstructured.UnstructuredJSONScheme,
},
categoryExpander,
)
}
// PluginLoader loads plugins from a path set by the KUBECTL_PLUGINS_PATH env var.
// If this env var is not set, it defaults to
// "~/.kube/plugins", plus
// "./kubectl/plugins" directory under the "data dir" directory specified by the XDG
// system directory structure spec for the given platform.
func (f *ring2Factory) PluginLoader() plugins.PluginLoader {
if len(os.Getenv("KUBECTL_PLUGINS_PATH")) > 0 {
return plugins.KubectlPluginsPathPluginLoader()
}
return plugins.TolerantMultiPluginLoader{
plugins.XDGDataDirsPluginLoader(),
plugins.UserDirPluginLoader(),
}
}
func (f *ring2Factory) PluginRunner() plugins.PluginRunner {
return &plugins.ExecPluginRunner{}
}
func (f *ring2Factory) ScaleClient() (scaleclient.ScalesGetter, error) {
discoClient, err := f.clientAccessFactory.DiscoveryClient()
if err != nil {
return nil, err
}
restClient, err := f.clientAccessFactory.RESTClient()
if err != nil {
return nil, err
}
resolver := scaleclient.NewDiscoveryScaleKindResolver(discoClient)
mapper, _ := f.objectMappingFactory.Object()
return scaleclient.New(restClient, mapper, dynamic.LegacyAPIPathResolverFunc, resolver), nil
}
func (f *ring2Factory) Scaler(mapping *meta.RESTMapping) (kubectl.Scaler, error) {
clientset, err := f.clientAccessFactory.ClientSet()
if err != nil {
return nil, err
}
scalesGetter, err := f.ScaleClient()
if err != nil {
return nil, err
}
gvk := mapping.GroupVersionKind.GroupVersion().WithResource(mapping.Resource)
return kubectl.ScalerFor(mapping.GroupVersionKind.GroupKind(), clientset.Batch(), scalesGetter, gvk.GroupResource()), nil
}
func (f *ring2Factory) Reaper(mapping *meta.RESTMapping) (kubectl.Reaper, error) {
clientset, clientsetErr := f.clientAccessFactory.ClientSet()
if clientsetErr != nil {
return nil, clientsetErr
}
scaler, err := f.ScaleClient()
if err != nil {
return nil, err
}
reaper, reaperErr := kubectl.ReaperFor(mapping.GroupVersionKind.GroupKind(), clientset, scaler)
if kubectl.IsNoSuchReaperError(reaperErr) {
return nil, reaperErr
}
return reaper, reaperErr
}
|
[
"\"KUBECTL_PLUGINS_PATH\""
] |
[] |
[
"KUBECTL_PLUGINS_PATH"
] |
[]
|
["KUBECTL_PLUGINS_PATH"]
|
go
| 1 | 0 | |
fsl_sub/cmdline.py
|
#!/usr/bin/python
# fsl_sub python module
# Copyright (c) 2018-2021 University of Oxford (Duncan Mortimer)
import argparse
import getpass
import logging
import os
import socket
import sys
import traceback
from ruamel.yaml import YAML
from fsl_sub import (
submit,
report,
delete_job,
)
from fsl_sub.config import (
read_config,
method_config,
coprocessor_config,
has_queues,
has_coprocessor,
uses_projects,
)
from fsl_sub.config import example_config as e_conf
import fsl_sub.consts
from fsl_sub.coprocessors import (
coproc_info,
coproc_classes,
)
from fsl_sub.exceptions import (
ArgumentError,
CommandError,
BadConfiguration,
BadSubmission,
GridOutputError,
InstallError,
NoFsl,
NoModule,
NotAFslDir,
PackageError,
UpdateError,
CONFIG_ERROR,
SUBMISSION_ERROR,
RUNNER_ERROR,
)
from fsl_sub.shell_modules import (
get_modules,
find_module_cmd,
)
from fsl_sub.parallel import (
parallel_envs,
process_pe_def,
)
from fsl_sub.projects import (
get_project_env,
)
from fsl_sub.utils import (
available_plugins,
blank_none,
conda_check_update,
conda_find_packages,
conda_install,
conda_update,
file_is_image,
find_fsldir,
load_plugins,
minutes_to_human,
titlize_key,
user_input,
yaml_repr_none,
)
from fsl_sub.version import VERSION
class MyArgParseFormatter(
argparse.ArgumentDefaultsHelpFormatter,
argparse.RawDescriptionHelpFormatter):
pass
def build_parser(
config=None, cp_info=None,
plugin_name=None, plugin_version=None):
'''Parse the command line, returns a dict keyed on option'''
logger = logging.getLogger(__name__)
if config is None:
config = read_config()
if cp_info is None:
cp_info = coproc_info()
if has_queues():
ll_envs = parallel_envs(config['queues'])
else:
ll_envs = None
# Build the epilog...
epilog = ''
mconf = method_config(config['method'])
if mconf['queues']:
epilog += '''
Queues:
There are several batch queues configured on the cluster:
'''
q_defs = []
for qname, q in config['queues'].items():
q_defs.append((qname, q))
q_defs.sort(key=lambda x: x[0])
for qname, q in q_defs:
pad = " " * 10
if q.get('slot_size', None) is not None:
qss = "{0}{1}B per slot; ".format(
q['slot_size'],
fsl_sub.consts.RAMUNITS)
else:
qss = ''
epilog += (
"{qname}:\n{q_pad}{timelimit} max run time; {qss} "
"{q[max_size]}{rmu}B maximum\n".format(
qname=qname,
q_pad=pad,
timelimit=minutes_to_human(q['time']),
q=q,
qss=qss,
rmu=fsl_sub.consts.RAMUNITS
))
if 'copros' in q:
cp_str = ''
for cp, cpdef in q['copros'].items():
if cp_str != '':
cp_str += '; '
cp_str += cp
if 'classes' in cpdef:
cp_str += " ({0})".format(','.join(cpdef['classes']))
epilog += (
pad + "Coprocessors available: "
+ cp_str + '\n'
)
if 'parallel_envs' in q:
epilog += (
pad + "Parallel environments available: "
+ "; ".join(q['parallel_envs']) + '\n'
)
if 'map_ram' in q and q['map_ram']:
epilog += (
pad + "Supports splitting into multiple slots." + '\n'
)
epilog += '\n'
if cp_info['available']:
epilog += "Co-processors available:"
for cp in cp_info['available']:
epilog += '\n' + cp + '\n'
try:
cp_def = coprocessor_config(cp)
except BadConfiguration:
continue
if find_module_cmd():
if cp_def['uses_modules']:
epilog += " Available toolkits:" + '\n'
try:
module_list = get_modules(cp_def['module_parent'])
except NoModule as e:
raise BadConfiguration from e
epilog += " " + ', '.join(module_list) + '\n'
cp_classes = coproc_classes(cp)
if cp_classes:
epilog += (
" Co-processor classes available: " + '\n'
)
for cpclass in cp_classes:
epilog += (
" " + ": ".join(
(cpclass, cp_def['class_types'][cpclass]['doc']))
+ '\n'
)
logger.debug(epilog)
parser = argparse.ArgumentParser(
prog="fsl_sub",
formatter_class=MyArgParseFormatter,
description='FSL cluster submission.',
epilog=epilog,
)
single_g = parser.add_argument_group(
'Simple Tasks',
'Options for submitting individual tasks.'
)
basic_g = parser.add_argument_group(
'Basic options',
'Options that specify individual and array tasks.'
)
array_g = parser.add_argument_group(
'Array Tasks',
'Options for submitting and controlling array tasks.'
)
advanced_g = parser.add_argument_group(
'Advanced',
'Advanced queueing options not typically required.')
email_g = parser.add_argument_group(
'Emailing',
'Email notification options.')
copro_g = parser.add_argument_group(
'Co-processors',
'Options for requesting co-processors, e.g. GPUs')
query_g = parser.add_argument_group(
'Query configuration',
'Options for checking on fsl_sub capabilities'
)
if 'architecture' in mconf and mconf['architecture']:
advanced_g.add_argument(
'-a', '--arch',
action='append',
default=None,
help="Architecture [e.g., lx-amd64].")
else:
advanced_g.add_argument(
'-a', '--arch',
action='append',
default=None,
help="Architectures not available.")
if cp_info['available']:
copro_g.add_argument(
'-c', '--coprocessor',
default=None,
choices=cp_info['available'],
help="Request a co-processor, further details below.")
copro_g.add_argument(
'--coprocessor_multi',
default=1,
help="Request multiple co-processors for a job. This may take "
"the form of simple number or a complex definition of devices. "
"See your cluster documentation for details."
)
else:
copro_g.add_argument(
'-c', '--coprocessor',
default=None,
help="No co-processor configured - ignored.")
copro_g.add_argument(
'--coprocessor_multi',
default=1,
help="No co-processor configured - ignored"
)
if cp_info['classes']:
copro_g.add_argument(
'--coprocessor_class',
default=None,
choices=cp_info['classes'],
help="Request a specific co-processor hardware class. "
"Details of which classes are available for each co-processor "
"are below."
)
copro_g.add_argument(
'--coprocessor_class_strict',
action='store_true',
help="If set will only allow running on this class. "
"The default is to use this class and all more capable devices."
)
else:
copro_g.add_argument(
'--coprocessor_class',
default=None,
help="No co-processor classes configured - ignored."
)
copro_g.add_argument(
'--coprocessor_class_strict',
action='store_true',
help="No co-processor classes configured - ignored."
)
if cp_info['toolkits']:
copro_g.add_argument(
'--coprocessor_toolkit',
default=None,
choices=cp_info['toolkits'],
help="Request a specific version of the co-processor software "
"tools. Will default to the latest version available. "
"If you wish to use the toolkit defined in your current "
" environment, give the value '-1' to this argument."
)
else:
copro_g.add_argument(
'--coprocessor_toolkit',
default=None,
help="No co-processor toolkits configured - ignored."
)
advanced_g.add_argument(
'--debug',
action='store_true',
help=argparse.SUPPRESS
)
if 'script_conf' in mconf and mconf['script_conf']:
advanced_g.add_argument(
'-F', '--usescript',
action='store_true',
help="Use flags embedded in scripts to set queuing options - "
"all other options ignored."
)
else:
advanced_g.add_argument(
'-F', '--usescript',
action='store_true',
help="Use flags embedded in scripts to set queuing options - "
"not supported"
)
basic_g.add_argument(
'-j', '--jobhold',
default=None,
help="Place a hold on this task until specified job id has "
"completed."
)
basic_g.add_argument(
'--not_requeueable',
action='store_true',
help="Job cannot be requeued in the event of a node failure"
)
if 'array_holds' in mconf and mconf['array_holds']:
array_g.add_argument(
'--array_hold',
default=None,
help="Place a parallel hold on the specified array task. Each"
"sub-task is held until the equivalent sub-task in the"
"parent array task completes."
)
else:
array_g.add_argument(
'--array_hold',
default=None,
help="Not supported - will be converted to simple job hold"
)
basic_g.add_argument(
'-l', '--logdir',
default=None,
help="Where to output logfiles."
)
if mconf['mail_support']:
email_g.add_argument(
'-m', '--mailoptions',
default=None,
help="Specify job mail options, see your queuing software for "
"details."
)
email_g.add_argument(
'-M', '--mailto',
default="{username}@{hostname}".format(
username=getpass.getuser(),
hostname=socket.gethostname()
),
metavar="EMAIL_ADDRESS",
help="Who to email."
)
else:
email_g.add_argument(
'-m', '--mailoptions',
default=None,
help="Not supported - will be ignored"
)
email_g.add_argument(
'-M', '--mailto',
default="{username}@{hostname}".format(
username=getpass.getuser(),
hostname=socket.gethostname()
),
help="Not supported - will be ignored"
)
basic_g.add_argument(
'-n', '--novalidation',
action='store_true',
help="Don't check for presence of script/binary in your search"
"path (use where the software is only available on the "
"compute node)."
)
basic_g.add_argument(
'-N', '--name',
default=None,
help="Specify jobname as it will appear on queue. If not specified "
"then the job name will be the name of the script/binary submitted."
)
if 'job_priorities' in mconf and mconf['job_priorities']:
min = mconf['min_priority']
max = mconf['max_priority']
if min > max:
min = max
max = mconf['min_priority']
advanced_g.add_argument(
'-p', '--priority',
default=None,
type=int,
metavar="-".join((
str(min),
str(max)
)),
choices=range(min, max),
help="Specify a lower job priority (where supported)."
"Takes a negative integer."
)
else:
advanced_g.add_argument(
'-p', '--priority',
default=None,
type=int,
help="Not supported on this platform."
)
if has_queues():
basic_g.add_argument(
'-q', '--queue',
default=None,
help="Select a particular queue - see below for details. "
"Instead of choosing a queue try to specify the time required."
)
else:
basic_g.add_argument(
'-q', '--queue',
default=None,
help="Not relevant when not running in a cluster environment"
)
advanced_g.add_argument(
'-r', '--resource',
default=None,
action='append',
help="Pass a resource request or constraint string through to the job "
"scheduler. See your scheduler's instructions for details."
)
advanced_g.add_argument(
'--delete_job',
default=None,
type=int,
help="Deletes a queued/running job."
)
basic_g.add_argument(
'-R', '--jobram',
default=None,
type=int,
metavar=fsl_sub.consts.RAMUNITS + 'B',
help="Max total RAM required for job (integer in "
+ fsl_sub.consts.RAMUNITS + "B). "
"This is very important if your job requires more "
"than the queue slot memory limit as then your job can be "
"split over multiple slots automatically - see autoslotsbyram."
)
if ll_envs is not None:
advanced_g.add_argument(
'-s', '--parallelenv',
default=None,
metavar="PARALLELENV,THREADS",
help="Takes a comma-separated argument <pename>,<threads>."
"Submit a multi-threaded (or resource limited) task - requires a "
"parallel environment (<pename>) to be configured on the "
"requested queues. <threads> specifies the number of "
"threads/hosts required. e.g. '{pe_name},2'.\n"
"Some schedulers only support the threads part so specify "
"'threads' as a <pename>.".format(
pe_name=ll_envs[0])
)
else:
advanced_g.add_argument(
'-s', '--parallelenv',
default=None,
metavar="PARALLELENV,THREADS",
help="No parallel environments configured"
)
array_g.add_argument(
'-t', '--array_task',
default=None,
help="Specify a task file of commands to execute in parallel."
)
array_g.add_argument(
'--array_native',
default=None,
help="Binary/Script will handle array task internally. "
"Mutually exclusive with --array_task. Requires "
"an argument n[-m[:s]] which provides number of tasks (n) or "
"start (n), end (m) and increment of sub-task ID between sub-"
"tasks (s). Binary/script can use FSLSUB_JOBID_VAR, "
"FSLSUB_ARRAYTASKID_VAR, FSLSUB_ARRAYSTARTID_VAR, "
"FSLSUB_ARRAYENDID_VAR, FSLSUB_ARRAYSTEPSIZE_VAR, "
"FSLSUB_ARRAYCOUNT_VAR environment variables to identify the "
"environment variables that are set by the cluster manager to "
"identify the sub-task that is running."
)
advanced_g.add_argument(
'-x', '--array_limit',
default=None,
type=int,
metavar="NUMBER",
help="Specify the maximum number of parallel job sub-tasks to run "
"concurrently."
)
advanced_g.add_argument(
'--keep_jobscript',
action="store_true",
help="Whether to create and save a job submission script that records "
"the submission and command arguments. This will produce a file "
"'wrapper_<jobid>.sh' (jobid is the process ID of fsl_sub if using the "
"built-in shell backend and the file will be stored in the current directory "
"or the log directory (if specified)). In the case of a queue backend this "
"file can be submitted with the -F option."
)
query_g.add_argument(
'--has_coprocessor',
default=None,
metavar='COPROCESSOR_NAME',
help="fsl_sub returns with exit code of 0 if specified coprocessor is configured. "
"Exits with a return code of 1 if the coprocessor is not configured/availble. "
)
query_g.add_argument(
'--has_queues',
action="store_true",
help="fsl_sub returns with exit code of 0 if there is a compute cluster with queues "
"configured. "
"Exits with a return code of 1 if we are using the shell plugin. "
)
if has_queues():
advanced_g.add_argument(
'--export',
action='append',
default=[],
help="Job will inherit this environment variable. Repeat this option "
"for as many variables as you require or configure your ~/.fsl_sub.yml "
"file:\nexport_vars:\n - MYENVVAR\n - ANOTHERENVVAR\n"
"If you need to change the value of an environment variable then this is "
"achieved by providing:\n--export=VARNAME=NEWVALUE\n."
)
if uses_projects():
advanced_g.add_argument(
'--project',
default=None,
help="Request job is run against the specified project/account"
)
else:
advanced_g.add_argument(
'--project',
default=None,
help="Projects not used"
)
basic_g.add_argument(
'-S', '--noramsplit',
action='store_true',
help="Disable the automatic requesting of multiple threads "
"sufficient to allow your job to run within the RAM constraints."
)
else:
advanced_g.add_argument(
'--project',
default=None,
type=str,
help="Not relevant when not running in a cluster environment"
)
basic_g.add_argument(
'-S', '--noramsplit',
action='store_true',
help="Not relevant when not running in a cluster environment"
)
basic_g.add_argument(
'-T', '--jobtime',
default=None,
type=int,
metavar="MINUTES",
help="Estimated job length in minutes, used to automatically choose "
"the queue name."
)
query_g.add_argument(
'--show_config',
action="store_true",
help="Display the configuration currently in force"
)
parser.add_argument(
'-v', '--verbose',
action='store_true',
help="Verbose mode."
)
version_string = '%(prog)s ' + VERSION
if plugin_name is not None:
version_string += " ({0} {1})".format(
plugin_name, plugin_version
)
parser.add_argument(
'-V', '--version',
action='version',
version=version_string)
parser.add_argument(
'-z', '--fileisimage',
default=None,
metavar='file',
help="If <file> already exists and is an MRI image file, do nothing "
"and exit."
)
single_g.add_argument(
'args',
nargs=argparse.REMAINDER,
default=None,
help="Program (and arguments) to submit to queue "
"(not used with array tasks).")
return parser
def example_config_parser(parser_class=argparse.ArgumentParser):
'''Parse the command line, returns a dict keyed on option'''
logger = logging.getLogger(__name__)
plug_ins = available_plugins()
if len(plug_ins) == 1:
default = plug_ins[0]
else:
default = plug_ins[-1]
logger.debug("plugins found:")
logger.debug(plug_ins)
parser = parser_class(
prog="fsl_sub_config",
description='FSL cluster submission configuration examples.',
)
parser.add_argument(
'plugin',
choices=plug_ins,
nargs='?',
default=default,
help="Output an example fsl_sub configuration which may be "
"customised for your system."
)
return parser
def report_parser(parser_class=argparse.ArgumentParser):
'''Parse the command line, returns a dict keyed on option'''
parser = parser_class(
prog="fsl_sub_report",
description='FSL cluster job reporting.',
)
parser.add_argument(
'job_id',
type=int,
help="Report job details for this job ID."
)
parser.add_argument(
'--subjob_id',
type=int,
help="Report job details for this subjob ID's only."
)
parser.add_argument(
'--parseable',
action="store_true",
help="Include all output '|' separated"
)
return parser
class LogFormatter(logging.Formatter):
default_fmt = logging.Formatter('%(levelname)s:%(name)s: %(message)s')
info_fmt = logging.Formatter('%(message)s')
def format(self, record):
if record.levelno >= logging.INFO:
return self.info_fmt.format(record)
else:
return self.default_fmt.format(record)
def example_config(args=None):
lhdr = logging.StreamHandler()
fmt = LogFormatter()
lhdr.setFormatter(fmt)
logger = logging.getLogger('fsl_sub')
logger.addHandler(lhdr)
example_parser = example_config_parser()
options = example_parser.parse_args(args=args)
yaml = YAML()
yaml.indent(mapping=2, sequence=4, offset=2)
yaml.compact(seq_seq=False, seq_map=False)
yaml_config = e_conf(options.plugin)
yaml.representer.add_representer(type(None), yaml_repr_none)
yaml.dump(yaml_config, sys.stdout)
def report_cmd(args=None):
lhdr = logging.StreamHandler()
fmt = LogFormatter()
lhdr.setFormatter(fmt)
logger = logging.getLogger('fsl_sub')
plugin_logger = logging.getLogger('fsl_sub.plugins')
logger.addHandler(lhdr)
plugin_logger.addHandler(lhdr)
cmd_parser = report_parser()
options = cmd_parser.parse_args(args=args)
try:
job_details = report(options.job_id, options.subjob_id)
except BadConfiguration as e:
cmd_parser.error("Bad configuration: " + str(e))
order = [
'id', 'name',
'script', 'arguments',
'submission_time', 'parents',
'children', 'job_directory', 'tasks',
]
task_order = [
'status', 'start_time',
'end_time', 'sub_time',
'utime', 'stime',
'exit_status', 'error_messages',
'maxmemory'
]
if job_details is None:
cmd_parser.error(
"Unrecognised job id " + str(options.job_id))
if not options.parseable:
if 'fake' in job_details:
print("No queuing software configured")
return
print("Job Details\n===========")
for key in order:
try:
detail = job_details[key]
except KeyError:
continue
if key != 'tasks':
if detail is None:
continue
print("{0}: ".format(titlize_key(key)), end='')
if key == 'submission_time':
print("{0}".format(
detail.strftime('%d/%m/%Y %H:%M:%S')))
elif detail is not None:
print("{0}".format(
str(detail)
))
else:
sub_tasks = False
if len(detail.items()) > 1:
sub_tasks = True
print("Tasks...")
else:
print("Task...")
for task, task_info in detail.items():
if sub_tasks:
print("Array ID: " + str(task))
for task_key in task_order:
try:
task_detail = task_info[task_key]
except KeyError:
continue
if task_detail is None:
continue
if task_key == 'status':
print(
"Job state: " + fsl_sub.consts.REPORTING[
task_detail])
else:
print("{}: ".format(titlize_key(task_key)), end='')
if task_key in ['utime', 'stime', ]:
print("{0}s".format(task_detail))
elif task_key in ['maxmemory']:
print("{0}MB".format(task_detail))
elif task_key in [
'sub_time', 'start_time', 'end_time']:
print(task_detail.strftime(
'%d/%m/%Y %H:%M:%S'))
elif isinstance(task_detail, (list, tuple)):
print(', '.join([str(a) for a in task_detail]))
else:
print(str(task_detail))
else:
for sub_task, td in job_details['tasks'].items():
line = []
line.append(job_details['id'])
line.append(sub_task)
for key in order:
if key == 'id':
continue
if key == 'submission_time':
line.append(
job_details[key].strftime('%d/%m/%Y %H:%M:%S'))
else:
line.append(blank_none(job_details[key]))
for key in task_order:
if key == 'status':
print(
"Job state: " + fsl_sub.consts.REPORTING[
td[key]])
else:
print("{0}: ".format(titlize_key(td)), end='')
if key in ['utime', 'stime', ]:
print("{0}s".format(blank_none(td)))
if key in ['maxmemory']:
print("{0}MB".format(blank_none(td)))
if key in ['sub_time', 'start_time', 'end_time']:
print(td[key].strftime(
'%d/%m/%Y %H:%M:%S'))
else:
print(blank_none(td))
print('|'.join(line))
def main(args=None):
lhdr = logging.StreamHandler()
fmt = LogFormatter()
lhdr.setFormatter(fmt)
logger = logging.getLogger('fsl_sub')
logger.addHandler(lhdr)
try:
config = read_config()
cp_info = coproc_info()
except BadConfiguration as e:
logger.error("Error in fsl_sub configuration - " + str(e))
sys.exit(CONFIG_ERROR)
PLUGINS = load_plugins()
grid_module = 'fsl_sub_plugin_' + config['method']
if grid_module not in PLUGINS:
raise BadConfiguration(
"{} not a supported method".format(config['method']))
try:
plugin_version = PLUGINS[grid_module].plugin_version
except AttributeError as e:
raise BadConfiguration(
"Failed to load plugin " + grid_module
+ " ({0})".format(str(e))
)
cmd_parser = build_parser(
config, cp_info, plugin_name=grid_module,
plugin_version=plugin_version())
options = vars(cmd_parser.parse_args(args=args))
if options['show_config']:
yaml = YAML()
yaml.indent(mapping=2, sequence=4, offset=2)
yaml.compact(seq_seq=False, seq_map=False)
yaml.representer.add_representer(type(None), yaml_repr_none)
yaml.dump(config, sys.stdout)
sys.exit(0)
if options['has_coprocessor'] is not None:
has_copro = has_coprocessor(options['has_coprocessor'])
if has_copro:
print("Yes")
sys.exit(0)
else:
print("No")
sys.exit(1)
if options['has_queues']:
has_qs = has_queues()
if has_qs:
print("Yes")
sys.exit(0)
else:
print("No")
sys.exit(1)
if options['delete_job'] is not None:
output, failed = delete_job(options['delete_job'])
if failed:
print(output, file=sys.stderr)
else:
print(output)
sys.exit(failed)
if not cp_info['available']:
options['coprocessor'] = None
options['coprocessor_class'] = None
options['coprocessor_class_strict'] = False
options['coprocessor_toolkits'] = None
options['coprocessor_multi'] = 1
else:
if not cp_info['classes']:
options['coprocessor_class'] = None
options['coprocessor_class_strict'] = False
if not cp_info['toolkits']:
options['coprocessor_toolkits'] = None
if options['verbose']:
logger.setLevel(logging.INFO)
if options['debug']:
logger.setLevel(logging.DEBUG)
os.environ['FSLSUB_DEBUG'] = '1'
if options['array_task'] and options['args']:
cmd_parser.error(
"Individual and array tasks are mutually exclusive."
)
if options['fileisimage']:
logger.debug("Check file is image requested")
try:
if file_is_image(options['fileisimage']):
logger.info("File is an image")
sys.exit(0)
except NoFsl as e:
logger.warn(
"No FSL found - " + str(e)
+ " assuming is image")
sys.exit(0)
except CommandError as e:
cmd_parser.error(str(e))
if options['parallelenv'] and config['method'] != 'shell':
try:
pe_name, threads = process_pe_def(
options['parallelenv'], config['queues'])
except ArgumentError as e:
cmd_parser.error(str(e))
else:
pe_name, threads = (None, 1, )
# If not already set, set FSLSUB_PARALLEL to 0 - shell plugin
# will use this to know it may decide freely the number of threads
if 'FSLSUB_PARALLEL' not in os.environ.keys():
os.environ['FSLSUB_PARALLEL'] = '0'
if options['array_task'] is not None:
if options['array_native']:
cmd_parser.error(
"Array task files mutually exclusive with"
" array native mode.")
array_task = True
command = options['array_task']
elif options['array_native'] is None:
array_task = False
if (
options['array_hold'] is not None
or options['array_limit'] is not None):
cmd_parser.error(
"Array controls not applicable to non-array tasks")
command = options['args']
else:
array_task = True
command = options['args']
if not command:
cmd_parser.error("No command or array task file provided")
for hold_spec in ['jobhold', 'array_hold']:
if options[hold_spec]:
options[hold_spec] = options[hold_spec].split(',')
if 'mailoptions' not in options:
options['mailoptions'] = None
if 'mailto' not in options:
options['mailto'] = None
if uses_projects():
project = get_project_env(options['project'])
else:
project = None
try:
exports = options['export']
except KeyError:
exports = []
keep_jobscript = options['keep_jobscript']
try:
job_id = submit(
command,
architecture=options['arch'],
array_hold=options['array_hold'],
array_limit=options['array_limit'],
array_specifier=options['array_native'],
array_task=array_task,
coprocessor=options['coprocessor'],
coprocessor_toolkit=options['coprocessor_toolkit'],
coprocessor_class=options['coprocessor_class'],
coprocessor_class_strict=options['coprocessor_class_strict'],
coprocessor_multi=options['coprocessor_multi'],
name=options['name'],
parallel_env=pe_name,
queue=options['queue'],
threads=threads,
jobhold=options['jobhold'],
jobram=options['jobram'],
jobtime=options['jobtime'],
logdir=options['logdir'],
mail_on=options['mailoptions'],
mailto=options['mailto'],
priority=options['priority'],
ramsplit=not options['noramsplit'],
resources=options['resource'],
usescript=options['usescript'],
validate_command=not options['novalidation'],
requeueable=not options['not_requeueable'],
as_tuple=False,
project=project,
export_vars=exports,
keep_jobscript=keep_jobscript
)
except BadSubmission as e:
cmd_parser.exit(
message="Error submitting job - " + str(e) + '\n',
status=SUBMISSION_ERROR)
except GridOutputError as e:
cmd_parser.exit(
message="Error submitting job - output from submission "
"not understood. " + str(e) + '\n',
status=RUNNER_ERROR)
except Exception as e:
traceback.print_exc(file=sys.stderr)
cmd_parser.error("Unexpected error: " + str(e) + '\n')
print(job_id)
def update_parser(parser_class=argparse.ArgumentParser):
'''Parse the command line, returns a dict keyed on option'''
logger = logging.getLogger(__name__)
logger.debug("updater: parsing arguments")
parser = parser_class(
prog="fsl_sub_update",
usage="Check for fsl_sub updates",
description="Check online for fsl_sub updates"
)
parser.add_argument(
'--check', '-c', help="Check for updates", action="store_true"
)
parser.add_argument(
'--yes', '-y', help="Update without confirmation", action="store_true"
)
parser.add_argument(
'--test_local', '-t', help=argparse.SUPPRESS
)
return parser
def update(args=None):
lhdr = logging.StreamHandler()
fmt = LogFormatter()
lhdr.setFormatter(fmt)
logger = logging.getLogger('fsl_sub')
logger.addHandler(lhdr)
options = update_parser().parse_args(args=args)
try:
fsldir = find_fsldir()
except NotAFslDir:
sys.exit("FSL not found - use conda update/pip install --upgrade to update when installed outside of FSL")
# Check for updates
try:
updates = conda_check_update(fsldir=fsldir)
if updates is None:
print("No updates available")
sys.exit(0)
print("Available updates:")
for u, v in updates.items():
print("{0} ({1} -> {2})".format(
u, v['old_version'], v['version']
))
except Exception as e:
sys.exit(
"Unable to check for updates! ({0})".format(
str(e)))
if not options.check:
if not options.yes:
answer = user_input('Install pending updates? ')
if answer.strip().lower() not in ['y', 'yes', ]:
sys.exit('Aborted')
try:
updated = conda_update(fsldir=fsldir)
print("{0} updated.".format(", ".join(updated)))
except UpdateError as e:
sys.exit(
"Unable to update! ({0})".format(
str(e)
)
)
def instplugin_parser(parser_class=argparse.ArgumentParser):
'''Parse the command line, returns a dict keyed on option'''
logger = logging.getLogger(__name__)
logger.debug("plugin installer: parsing arguments")
parser = parser_class(
prog="fsl_sub_install_plugin",
usage="Download and install fsl_sub plugins",
description="Simplify the installation of cluster backends for fsl_sub"
)
parser.add_argument(
'--list', '-l', help="List available plugins", action="store_true"
)
parser.add_argument(
'--install', '-i', help="Install requested plugin", default=None
)
parser.add_argument(
'--test_local', '-t', help=argparse.SUPPRESS
)
return parser
def install_plugin(args=None):
lhdr = logging.StreamHandler()
fmt = LogFormatter()
lhdr.setFormatter(fmt)
logger = logging.getLogger('fsl_sub')
logger.addHandler(lhdr)
inst_parser = instplugin_parser()
options = inst_parser.parse_args(args=args)
fsldir = find_fsldir()
try:
fsl_sub_plugins = conda_find_packages(
'fsl_sub_plugin_*', fsldir=fsldir)
except PackageError as e:
sys.exit(str(e))
if options.list or options.install is None:
print('Available plugins:')
for index, plugin in enumerate(fsl_sub_plugins):
if not options.list:
print("{0}: {1}".format(index + 1, plugin))
else:
print(plugin)
if options.list:
sys.exit(0)
else:
try:
plugin_index = int(user_input("Which backend? "))
conda_pkg = fsl_sub_plugins[plugin_index - 1]
except (ValueError, IndexError, ):
sys.exit("Invalid plugin number")
# Install
if options.install:
if options.install in fsl_sub_plugins:
conda_pkg = options.install
else:
sys.exit("Unrecognised plugin")
try:
conda_install(conda_pkg)
except InstallError as e:
sys.exit(
"Unable to install requested plugin! ({0})".format(
str(e)))
print("Plugin {0} installed". format(conda_pkg))
print(
"""You can generate an example config file with:
fsl_sub_config {plugin}
The configuration file can be copied to {fsldir_etc_fslconf} calling
it fsl_sub.yml, or put in your home folder calling it .fsl_sub.yml.
A copy in your home folder will override the file in
{fsldir_etc_fslconf}. Finally, the environment variable FSLSUB_CONF
can be set to point at the configuration file, this will override all
other files.""".format(
plugin=conda_pkg.replace('fsl_sub_plugin_', ''),
fsldir_etc_fslconf=os.path.join(fsldir, 'etc', 'fslconf'))
)
|
[] |
[] |
[
"FSLSUB_DEBUG",
"FSLSUB_PARALLEL"
] |
[]
|
["FSLSUB_DEBUG", "FSLSUB_PARALLEL"]
|
python
| 2 | 0 | |
file_project/asgi.py
|
"""
ASGI config for file_project project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'file_project.settings')
application = get_asgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
rasa/cli/x.py
|
import argparse
import asyncio
import importlib.util
import logging
from multiprocessing.process import BaseProcess
from multiprocessing import get_context
from packaging import version
import os
import signal
import sys
import traceback
from typing import Iterable, List, Optional, Text, Tuple
import aiohttp
from rasa.exceptions import MissingDependencyException
import ruamel.yaml as yaml
from rasa import telemetry
from rasa.cli import SubParsersAction
from rasa.cli.arguments import x as arguments
import rasa.cli.utils
from rasa.constants import (
DEFAULT_LOG_LEVEL_RASA_X,
DEFAULT_RASA_PORT,
DEFAULT_RASA_X_PORT,
)
from rasa.shared.constants import (
DEFAULT_CONFIG_PATH,
DEFAULT_CREDENTIALS_PATH,
DEFAULT_DOMAIN_PATH,
DEFAULT_ENDPOINTS_PATH,
DOCS_BASE_URL_RASA_X,
)
from rasa.core.utils import AvailableEndpoints
from rasa.shared.exceptions import RasaXTermsError
import rasa.shared.utils.cli
import rasa.shared.utils.io
import rasa.utils.common
from rasa.utils.endpoints import EndpointConfig
import rasa.utils.io
logger = logging.getLogger(__name__)
DEFAULT_EVENTS_DB = "events.db"
def add_subparser(
subparsers: SubParsersAction, parents: List[argparse.ArgumentParser]
) -> None:
"""Add all rasa x parsers.
Args:
subparsers: subparser we are going to attach to
parents: Parent parsers, needed to ensure tree structure in argparse
"""
x_parser_args = {
"parents": parents,
"conflict_handler": "resolve",
"formatter_class": argparse.ArgumentDefaultsHelpFormatter,
}
if is_rasa_x_installed():
# we'll only show the help msg for the command if Rasa X is actually installed
x_parser_args["help"] = "Starts the Rasa X interface."
shell_parser = subparsers.add_parser("x", **x_parser_args)
shell_parser.set_defaults(func=rasa_x)
arguments.set_x_arguments(shell_parser)
def _rasa_service(
args: argparse.Namespace,
endpoints: AvailableEndpoints,
rasa_x_url: Optional[Text] = None,
credentials_path: Optional[Text] = None,
) -> None:
"""Starts the Rasa application."""
from rasa.core.run import serve_application
# needs separate logging configuration as it is started in its own process
rasa.utils.common.configure_logging_and_warnings(args.loglevel)
rasa.utils.io.configure_colored_logging(args.loglevel)
if not credentials_path:
credentials_path = _prepare_credentials_for_rasa_x(
args.credentials, rasa_x_url=rasa_x_url
)
serve_application(
endpoints=endpoints,
port=args.port,
credentials=credentials_path,
cors=args.cors,
auth_token=args.auth_token,
enable_api=True,
jwt_secret=args.jwt_secret,
jwt_method=args.jwt_method,
ssl_certificate=args.ssl_certificate,
ssl_keyfile=args.ssl_keyfile,
ssl_ca_file=args.ssl_ca_file,
ssl_password=args.ssl_password,
)
def _prepare_credentials_for_rasa_x(
credentials_path: Optional[Text], rasa_x_url: Optional[Text] = None
) -> Text:
credentials_path = str(
rasa.cli.utils.get_validated_path(
credentials_path, "credentials", DEFAULT_CREDENTIALS_PATH, True
)
)
if credentials_path:
credentials = rasa.shared.utils.io.read_config_file(credentials_path)
else:
credentials = {}
# this makes sure the Rasa X is properly configured no matter what
if rasa_x_url:
credentials["rasa"] = {"url": rasa_x_url}
dumped_credentials = yaml.dump(credentials, default_flow_style=False)
tmp_credentials = rasa.utils.io.create_temporary_file(dumped_credentials, "yml")
return tmp_credentials
def _overwrite_endpoints_for_local_x(
endpoints: AvailableEndpoints, rasa_x_token: Text, rasa_x_url: Text
) -> None:
endpoints.model = _get_model_endpoint(endpoints.model, rasa_x_token, rasa_x_url)
endpoints.event_broker = _get_event_broker_endpoint(endpoints.event_broker)
def _get_model_endpoint(
model_endpoint: Optional[EndpointConfig], rasa_x_token: Text, rasa_x_url: Text
) -> EndpointConfig:
# If you change that, please run a test with Rasa X and speak to the bot
default_rasax_model_server_url = f"{rasa_x_url}/models/tags/production"
model_endpoint = model_endpoint or EndpointConfig()
# Checking if endpoint.yml has existing url, if so give
# warning we are overwriting the endpoint.yml file.
custom_url = model_endpoint.url
if custom_url and custom_url != default_rasax_model_server_url:
logger.info(
f"Ignoring url '{custom_url}' from 'endpoints.yml' and using "
f"'{default_rasax_model_server_url}' instead."
)
custom_wait_time_pulls = model_endpoint.kwargs.get("wait_time_between_pulls")
return EndpointConfig(
default_rasax_model_server_url,
token=rasa_x_token,
wait_time_between_pulls=custom_wait_time_pulls or 2,
)
def _get_event_broker_endpoint(
event_broker_endpoint: Optional[EndpointConfig],
) -> EndpointConfig:
import questionary
default_event_broker_endpoint = EndpointConfig(
type="sql", dialect="sqlite", db=DEFAULT_EVENTS_DB
)
if not event_broker_endpoint:
return default_event_broker_endpoint
elif not _is_correct_event_broker(event_broker_endpoint):
rasa.shared.utils.cli.print_error(
f"Rasa X currently only supports a SQLite event broker with path "
f"'{DEFAULT_EVENTS_DB}' when running locally. You can deploy Rasa X "
f"with Docker ({DOCS_BASE_URL_RASA_X}/installation-and-setup/"
f"docker-compose-quick-install/) if you want to use other event broker "
f"configurations."
)
continue_with_default_event_broker = questionary.confirm(
"Do you want to continue with the default SQLite event broker?"
).ask()
if not continue_with_default_event_broker:
sys.exit(0)
return default_event_broker_endpoint
else:
return event_broker_endpoint
def _is_correct_event_broker(event_broker: EndpointConfig) -> bool:
return all(
[
event_broker.type == "sql",
event_broker.kwargs.get("dialect", "").lower() == "sqlite",
event_broker.kwargs.get("db") == DEFAULT_EVENTS_DB,
]
)
def start_rasa_for_local_rasa_x(
args: argparse.Namespace, rasa_x_token: Text
) -> BaseProcess:
"""Starts the Rasa X API with Rasa as a background process."""
credentials_path, endpoints_path = _get_credentials_and_endpoints_paths(args)
endpoints = AvailableEndpoints.read_endpoints(endpoints_path)
rasa_x_url = f"http://localhost:{args.rasa_x_port}/api"
_overwrite_endpoints_for_local_x(endpoints, rasa_x_token, rasa_x_url)
vars(args).update(
dict(
nlu_model=None,
cors="*",
auth_token=args.auth_token,
enable_api=True,
endpoints=endpoints,
)
)
ctx = get_context("spawn")
p = ctx.Process(
target=_rasa_service,
args=(args, endpoints, rasa_x_url, credentials_path),
daemon=True,
)
p.start()
return p
def is_rasa_x_installed() -> bool:
"""Check if Rasa X is installed."""
# we could also do something like checking if `import rasax` works,
# the issue with that is that it actually does import the package and this
# takes some time that we don't want to spend when booting the CLI
return importlib.util.find_spec("rasax") is not None
def generate_rasa_x_token(length: int = 16) -> Text:
"""Generate a hexadecimal secret token used to access the Rasa X API.
A new token is generated on every `rasa x` command.
"""
from secrets import token_hex
return token_hex(length)
def _configure_logging(args: argparse.Namespace) -> None:
from rasa.core.utils import configure_file_logging
from rasa.utils.common import configure_logging_and_warnings
log_level = args.loglevel or DEFAULT_LOG_LEVEL_RASA_X
if isinstance(log_level, str):
log_level = logging.getLevelName(log_level)
logging.basicConfig(level=log_level)
rasa.utils.io.configure_colored_logging(args.loglevel)
configure_logging_and_warnings(
log_level, warn_only_once=False, filter_repeated_logs=False
)
configure_file_logging(logging.root, args.log_file, False)
logging.getLogger("werkzeug").setLevel(logging.WARNING)
logging.getLogger("engineio").setLevel(logging.WARNING)
logging.getLogger("pika").setLevel(logging.WARNING)
logging.getLogger("socketio").setLevel(logging.ERROR)
if not log_level == logging.DEBUG:
logging.getLogger().setLevel(logging.WARNING)
logging.getLogger("py.warnings").setLevel(logging.ERROR)
def is_rasa_project_setup(args: argparse.Namespace, project_path: Text) -> bool:
"""Checks if `project_path` contains a valid Rasa Open Source project.
Args:
args: Command-line arguments.
project_path: Path to the possible Rasa Open Source project.
Returns:
`True` if `project_path` is a valid Rasa Open Source project, `False` otherwise.
"""
config_path = _get_config_path(args)
domain_path = _get_domain_path(args)
mandatory_files = [config_path, domain_path]
for f in mandatory_files:
if not os.path.exists(os.path.join(project_path, f)):
return False
return True
def _validate_rasa_x_start(args: argparse.Namespace, project_path: Text) -> None:
if not is_rasa_x_installed():
rasa.shared.utils.cli.print_error_and_exit(
"Rasa X is not installed. The `rasa x` "
"command requires an installation of Rasa X. "
"Instructions on how to install Rasa X can be found here: "
"https://rasa.com/docs/rasa-x/."
)
if args.port == args.rasa_x_port:
rasa.shared.utils.cli.print_error_and_exit(
"The port for Rasa X '{}' and the port of the Rasa server '{}' are the "
"same. We need two different ports, one to run Rasa X (e.g. delivering the "
"UI) and another one to run a normal Rasa server.\nPlease specify two "
"different ports using the arguments '--port' and '--rasa-x-port'.".format(
args.rasa_x_port, args.port
)
)
if not is_rasa_project_setup(args, project_path):
rasa.shared.utils.cli.print_error_and_exit(
"This directory is not a valid Rasa project. Use 'rasa init' "
"to create a new Rasa project or switch to a valid Rasa project "
"directory (see "
"https://rasa.com/docs/rasa/command-line-interface#rasa-init)."
)
domain_path = _get_domain_path(args)
_validate_domain(os.path.join(project_path, domain_path))
if args.data and not os.path.exists(args.data):
rasa.shared.utils.cli.print_warning(
"The provided data path ('{}') does not exists. Rasa X will start "
"without any training data.".format(args.data)
)
def _validate_domain(domain_path: Text) -> None:
from rasa.shared.core.domain import Domain, InvalidDomain
try:
Domain.load(domain_path)
except InvalidDomain as e:
rasa.shared.utils.cli.print_error_and_exit(
"The provided domain file could not be loaded. " "Error: {}".format(e)
)
def rasa_x(args: argparse.Namespace) -> None:
from rasa.cli.utils import signal_handler
signal.signal(signal.SIGINT, signal_handler)
_configure_logging(args)
if version.parse(rasa.version.__version__) >= version.parse("3.0.0"):
rasa.shared.utils.io.raise_warning(
f"Your version of rasa '{rasa.version.__version__}' is currently "
f"not supported by Rasa X. Running `rasa x` CLI command with rasa "
f"version higher or equal to 3.0.0 will result in errors.",
UserWarning,
)
if args.production:
run_in_production(args)
else:
run_locally(args)
async def _pull_runtime_config_from_server(
config_endpoint: Optional[Text],
attempts: int = 60,
wait_time_between_pulls: float = 5,
keys: Iterable[Text] = ("endpoints", "credentials"),
) -> Optional[List[Text]]:
"""Pull runtime config from `config_endpoint`.
Returns a list of paths to yaml dumps, each containing the contents of one of
`keys`.
"""
while attempts:
try:
async with aiohttp.ClientSession() as session:
async with session.get(config_endpoint) as resp:
if resp.status == 200:
rjs = await resp.json()
try:
return [
rasa.utils.io.create_temporary_file(rjs[k])
for k in keys
]
except KeyError as e:
rasa.shared.utils.cli.print_error_and_exit(
"Failed to find key '{}' in runtime config. "
"Exiting.".format(e)
)
else:
logger.debug(
"Failed to get a proper response from remote "
"server. Status Code: {}. Response: '{}'"
"".format(resp.status, await resp.text())
)
except aiohttp.ClientError as e:
logger.debug(f"Failed to connect to server. Retrying. {e}")
await asyncio.sleep(wait_time_between_pulls)
attempts -= 1
rasa.shared.utils.cli.print_error_and_exit(
"Could not fetch runtime config from server at '{}'. "
"Exiting.".format(config_endpoint)
)
def run_in_production(args: argparse.Namespace) -> None:
from rasa.shared.utils.cli import print_success
print_success("Starting Rasa X in production mode... 🚀")
credentials_path, endpoints_path = _get_credentials_and_endpoints_paths(args)
endpoints = AvailableEndpoints.read_endpoints(endpoints_path)
_rasa_service(args, endpoints, None, credentials_path)
def _get_config_path(args: argparse.Namespace) -> Optional[Text]:
config_path = rasa.cli.utils.get_validated_path(
args.config, "config", DEFAULT_CONFIG_PATH
)
return str(config_path)
def _get_domain_path(args: argparse.Namespace) -> Optional[Text]:
domain_path = rasa.cli.utils.get_validated_path(
args.domain, "domain", DEFAULT_DOMAIN_PATH
)
return str(domain_path)
def _get_credentials_and_endpoints_paths(
args: argparse.Namespace,
) -> Tuple[Optional[Text], Optional[Text]]:
config_endpoint = args.config_endpoint
if config_endpoint:
endpoints_config_path, credentials_path = asyncio.run(
_pull_runtime_config_from_server(config_endpoint)
)
else:
endpoints_config_path = rasa.cli.utils.get_validated_path(
args.endpoints, "endpoints", DEFAULT_ENDPOINTS_PATH, True
)
credentials_path = None
return credentials_path, endpoints_config_path
def _prevent_failure_if_git_is_not_available() -> None:
"""Rasa X uses the `git` package, which will fail to import if git is not available.
Git isn't needed locally, which means we can silence this error to allow
users to use local mode even if git is not available on their machine.
Fixes regression https://github.com/RasaHQ/rasa/issues/7140
"""
if os.environ.get("GIT_PYTHON_REFRESH") is None:
os.environ["GIT_PYTHON_REFRESH"] = "quiet"
def run_locally(args: argparse.Namespace) -> None:
"""Run a Rasa X instance locally.
Args:
args: commandline arguments
"""
_prevent_failure_if_git_is_not_available()
try:
# noinspection PyUnresolvedReferences
from rasax.community import local
except ModuleNotFoundError:
raise MissingDependencyException(
f"Rasa X does not seem to be installed, but it is needed for this "
f"CLI command. You can find more information on how to install Rasa X "
f"in local mode in the documentation: "
f"{DOCS_BASE_URL_RASA_X}/installation-and-setup/install/local-mode"
)
args.rasa_x_port = args.rasa_x_port or DEFAULT_RASA_X_PORT
args.port = args.port or DEFAULT_RASA_PORT
project_path = "."
_validate_rasa_x_start(args, project_path)
rasa_x_token = generate_rasa_x_token()
process = start_rasa_for_local_rasa_x(args, rasa_x_token=rasa_x_token)
config_path = _get_config_path(args)
domain_path = _get_domain_path(args)
telemetry.track_rasa_x_local()
# noinspection PyBroadException
try:
local.main(
args,
project_path,
args.data,
token=rasa_x_token,
config_path=config_path,
domain_path=domain_path,
)
except RasaXTermsError:
# User didn't accept the Rasa X terms.
pass
except Exception:
print(traceback.format_exc())
rasa.shared.utils.cli.print_error(
"Sorry, something went wrong (see error above). Make sure to start "
"Rasa X with valid data and valid domain and config files. Please, "
"also check any warnings that popped up.\nIf you need help fixing "
"the issue visit our forum: https://forum.rasa.com/."
)
finally:
process.terminate()
|
[] |
[] |
[
"GIT_PYTHON_REFRESH"
] |
[]
|
["GIT_PYTHON_REFRESH"]
|
python
| 1 | 0 | |
crystaldb/db.py
|
# !/usr/bin/python
# -*- coding:utf-8 -*-
# ***********************************************************************
# Author: Zhichang Fu
# Created Time: 2018-08-25 11:08:53
# Version: 1.0.3 changed by 2018-12-09
# 1.Add two tables combined with join operator.
# ***********************************************************************
from __future__ import print_function
import os
import time
import datetime
import re
from DBUtils.PooledDB import PooledDB
from .utils import (threadeddict, safestr, safeunicode, storage, iterbetter,
add_space)
from .exception import UnknownParamstyle, _ItplError
from .compat import string_types, numeric_types, PY2, iteritems
from .config import TOKEN, OP, JOIN
try:
from urllib import parse as urlparse
from urllib.parse import unquote
except ImportError:
import urlparse
from urllib import unquote
try:
import ast
except ImportError:
ast = None
__all__ = [
"DB",
"Operator",
"SQLQuery",
"MySQLDB",
"Select",
"Update",
"Insert",
"Delete",
"Table",
"Transaction",
]
tokenprog = re.compile(TOKEN)
def sqlify(obj):
"""
converts `obj` to its proper SQL version
:example:
>>> sqlify(None)
'NULL'
>>> sqlify(True)
"'t'"
>>> sqlify(3)
'3'
"""
# because `1 == True and hash(1) == hash(True)`
# we have to do this the hard way...
if obj is None:
return 'NULL'
elif obj is True:
return "'t'"
elif obj is False:
return "'f'"
elif isinstance(obj, numeric_types):
return str(obj)
elif isinstance(obj, datetime.datetime):
return repr(obj.isoformat())
else:
if PY2 and isinstance(obj, unicode):
# Strings are always UTF8 in Py3
obj = obj.encode('utf8')
return repr(obj)
def sqllist(lst):
"""
Converts the arguments for use in something like a WHERE clause.
:example:
>>> sqllist(['a', 'b'])
'a, b'
>>> sqllist('a')
'a'
"""
if isinstance(lst, string_types):
return lst
else:
return ', '.join(lst)
def sqlwhere(data, grouping=' AND '):
"""
Converts a two-tuple (key, value) iterable `data` to an SQL WHERE clause `SQLQuery`.
:example:
>>> sqlwhere((('cust_id', 2), ('order_id',3)))
<sql: 'cust_id = 2 AND order_id = 3'>
>>> sqlwhere((('order_id', 3), ('cust_id', 2)), grouping=', ')
<sql: 'order_id = 3, cust_id = 2'>
>>> sqlwhere((('a', 'a'), ('b', 'b'))).query()
'a = %s AND b = %s'
"""
return SQLQuery.join([k + ' = ' + sqlparam(v) for k, v in data], grouping)
class SQLParam(object):
"""
Parameter in SQLQuery.
:example:
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam("joe")])
>>> q
<sql: "SELECT * FROM test WHERE name='joe'">
>>> q.query()
'SELECT * FROM test WHERE name=%s'
>>> q.values()
['joe']
"""
__slots__ = ["value"]
def __init__(self, value):
self.value = value
def get_marker(self, paramstyle='pyformat'):
if paramstyle == 'qmark':
return '?'
elif paramstyle == 'numeric':
return ':1'
elif paramstyle is None or paramstyle in ['format', 'pyformat']:
return '%s'
raise UnknownParamstyle(paramstyle)
def sqlquery(self):
return SQLQuery([self])
def __add__(self, other):
return self.sqlquery() + other
def __radd__(self, other):
return other + self.sqlquery()
def __str__(self):
return str(self.value)
def __eq__(self, other):
return isinstance(other, SQLParam) and other.value == self.value
def __repr__(self):
return '<param: %s>' % repr(self.value)
sqlparam = SQLParam
class SQLQuery(object):
"""
You can pass this sort of thing as a clause in any db function.
Otherwise, you can pass a dictionary to the keyword argument `vars`
and the function will call reparam for you.
Internally, consists of `items`, which is a list of strings and
SQLParams, which get concatenated to produce the actual query.
"""
__slots__ = ["items"]
# tested in sqlquote's docstring
def __init__(self, items=None):
"""Creates a new SQLQuery.
:param items: which is a list of strings and SQLParams,
which get concatenated to produce the actual query.
:example:
>>> SQLQuery("x")
<sql: 'x'>
>>> q = SQLQuery(['SELECT * FROM ', 'test', ' WHERE x=', SQLParam(1)])
>>> q
<sql: 'SELECT * FROM test WHERE x=1'>
>>> q.query(), q.values()
('SELECT * FROM test WHERE x=%s', [1])
>>> SQLQuery(SQLParam(1))
<sql: '1'>
"""
if items is None:
self.items = []
elif isinstance(items, list):
self.items = items
elif isinstance(items, SQLParam):
self.items = [items]
elif isinstance(items, SQLQuery):
self.items = list(items.items)
else:
self.items = [items]
# Take care of SQLLiterals
for i, item in enumerate(self.items):
if isinstance(item, SQLParam) and isinstance(
item.value, SQLLiteral):
self.items[i] = item.value.v
def append(self, value):
self.items.append(value)
def __add__(self, other):
if isinstance(other, string_types):
items = [other]
elif isinstance(other, SQLQuery):
items = other.items
else:
return NotImplemented
return SQLQuery(self.items + items)
def __radd__(self, other):
if isinstance(other, string_types):
items = [other]
elif isinstance(other, SQLQuery):
items = other.items
else:
return NotImplemented
return SQLQuery(items + self.items)
def __iadd__(self, other):
if isinstance(other, (string_types, SQLParam)):
self.items.append(other)
elif isinstance(other, SQLQuery):
self.items.extend(other.items)
else:
return NotImplemented
return self
def __len__(self):
return len(self.query())
def __eq__(self, other):
return isinstance(other, SQLQuery) and other.items == self.items
def query(self, paramstyle=None):
"""
Returns the query part of the sql query.
:example:
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
>>> q.query()
'SELECT * FROM test WHERE name=%s'
>>> q.query(paramstyle='qmark')
'SELECT * FROM test WHERE name=?'
"""
s = []
for x in self.items:
if isinstance(x, SQLParam):
x = x.get_marker(paramstyle)
s.append(safestr(x))
else:
x = safestr(x)
# automatically escape % characters in the query
# For backward compatability, ignore escaping when the query looks already escaped
if paramstyle in ['format', 'pyformat']:
if '%' in x and '%%' not in x:
x = x.replace('%', '%%')
s.append(x)
return "".join(s)
def values(self):
"""
Returns the values of the parameters used in the sql query.
:example:
>>> q = SQLQuery(["SELECT * FROM test WHERE name=", SQLParam('joe')])
>>> q.values()
['joe']
"""
return [i.value for i in self.items if isinstance(i, SQLParam)]
def join(items, sep=' ', prefix=None, suffix=None, target=None):
"""
Joins multiple queries.
:param target: if target argument is provided, the items are appended to target
instead of creating a new SQLQuery.
:example:
>>> SQLQuery.join(['a', 'b'], ', ')
<sql: 'a, b'>
Optinally, prefix and suffix arguments can be provided.
>>> SQLQuery.join(['a', 'b'], ', ', prefix='(', suffix=')')
<sql: '(a, b)'>
"""
if target is None:
target = SQLQuery()
target_items = target.items
if prefix:
target_items.append(prefix)
for i, item in enumerate(items):
if i != 0 and sep != "":
target_items.append(sep)
if isinstance(item, SQLQuery):
target_items.extend(item.items)
elif item == "": # joins with empty strings
continue
else:
target_items.append(item)
if suffix:
target_items.append(suffix)
return target
join = staticmethod(join)
def _str(self):
try:
return self.query() % tuple([sqlify(x) for x in self.values()])
except (ValueError, TypeError):
return self.query()
def __str__(self):
return safestr(self._str())
def __unicode__(self):
return safeunicode(self._str())
def __repr__(self):
return '<sql: %s>' % repr(str(self))
def sqlquote(a):
"""
Ensures `a` is quoted properly for use in a SQL query.
:example:
>>> 'WHERE x = ' + sqlquote(True) + ' AND y = ' + sqlquote(3)
<sql: "WHERE x = 't' AND y = 3">
>>> 'WHERE x = ' + sqlquote(True) + ' AND y IN ' + sqlquote([2, 3])
<sql: "WHERE x = 't' AND y IN (2, 3)">
"""
if isinstance(a, list):
return _sqllist(a)
else:
return sqlparam(a).sqlquery()
class _Node(object):
def __init__(self, type, first, second=None):
self.type = type
self.first = first
self.second = second
def __eq__(self, other):
return (isinstance(other, _Node) and self.type == other.type
and self.first == other.first and self.second == other.second)
def __repr__(self):
return "Node(%r, %r, %r)" % (self.type, self.first, self.second)
class Parser:
"""Parser to parse string templates like "Hello $name".
Loosely based on <http://lfw.org/python/Itpl.py> (public domain, Ka-Ping Yee)
"""
namechars = "abcdefghijklmnopqrstuvwxyz" \
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_"
def __init__(self):
self.reset()
def reset(self):
self.pos = 0
self.level = 0
self.text = ""
self.format = ""
def parse(self, text, _format="$"):
"""Parses the given text and returns a parse tree.
"""
self.reset()
self.text = text
self._format = _format
return self.parse_all()
def parse_all(self):
while True:
dollar = self.text.find(self._format, self.pos)
if dollar < 0:
break
nextchar = self.text[dollar + 1]
if nextchar in self.namechars:
yield _Node("text", self.text[self.pos:dollar])
self.pos = dollar + 1
yield self.parse_expr()
# for supporting ${x.id}, for backward compataility
elif nextchar == '{':
saved_pos = self.pos
self.pos = dollar + 2 # skip "${"
expr = self.parse_expr()
if self.text[self.pos] == '}':
self.pos += 1
yield _Node("text", self.text[self.pos:dollar])
yield expr
else:
self.pos = saved_pos
break
else:
yield _Node("text", self.text[self.pos:dollar + 1])
self.pos = dollar + 1
# $$ is used to escape $
if nextchar == self._format:
self.pos += 1
if self.pos < len(self.text):
yield _Node("text", self.text[self.pos:])
def match(self):
match = tokenprog.match(self.text, self.pos)
if match is None:
raise _ItplError(self.text, self.pos)
return match, match.end()
def is_literal(self, text):
return text and text[0] in "0123456789\"'"
def parse_expr(self):
match, pos = self.match()
if self.is_literal(match.group()):
expr = _Node("literal", match.group())
else:
expr = _Node("param", self.text[self.pos:pos])
self.pos = pos
while self.pos < len(self.text):
if self.text[self.pos] == "." and \
self.pos + 1 < len(self.text) and \
self.text[self.pos + 1] in self.namechars:
self.pos += 1
match, pos = self.match()
attr = match.group()
expr = _Node("getattr", expr, attr)
self.pos = pos
elif self.text[self.pos] == "[":
saved_pos = self.pos
self.pos += 1
key = self.parse_expr()
if self.text[self.pos] == ']':
self.pos += 1
expr = _Node("getitem", expr, key)
else:
self.pos = saved_pos
break
else:
break
return expr
class SafeEval(object):
"""Safe evaluator for binding params to db queries.
"""
def safeeval(self, text, mapping, _format="$"):
nodes = Parser().parse(text, _format)
return SQLQuery.join([self.eval_node(node, mapping) for node in nodes],
"")
def eval_node(self, node, mapping):
if node.type == "text":
return node.first
else:
return sqlquote(self.eval_expr(node, mapping))
def eval_expr(self, node, mapping):
if node.type == "literal":
return ast.literal_eval(node.first)
elif node.type == "getattr":
return getattr(self.eval_expr(node.first, mapping), node.second)
elif node.type == "getitem":
return self.eval_expr(node.first, mapping)[self.eval_expr(
node.second, mapping)]
elif node.type == "param":
return mapping[node.first]
class SQLLiteral:
"""
Protects a string from `sqlquote`.
:example:
>>> sqlquote('NOW()')
<sql: "'NOW()'">
>>> sqlquote(SQLLiteral('NOW()'))
<sql: 'NOW()'>
"""
def __init__(self, v):
self.v = v
def __repr__(self):
return "<literal: %r>" % self.v
sqlliteral = SQLLiteral
def _sqllist(values):
"""
Convert list object to `SQLQuery` object.
:example:
>>> _sqllist([1, 2, 3])
<sql: '(1, 2, 3)'>
"""
items = []
items.append('(')
for i, v in enumerate(values):
if i != 0:
items.append(', ')
items.append(sqlparam(v))
items.append(')')
return SQLQuery(items)
def reparam(string_, dictionary):
"""
Takes a string and a dictionary and interpolates the string
using values from the dictionary. Returns an `SQLQuery` for the result.
:example:
>>> reparam("s = $s", dict(s=True))
<sql: "s = 't'">
>>> reparam("s IN $s", dict(s=[1, 2]))
<sql: 's IN (1, 2)'>
"""
_format = ":" if ":" in string_ else "$"
return SafeEval().safeeval(string_, dictionary, _format)
class Transaction:
"""Database transaction."""
def __init__(self, ctx):
self.ctx = ctx
self.transaction_count = transaction_count = len(ctx.transactions)
class transaction_engine:
"""Transaction Engine used in top level transactions."""
def do_transact(self):
ctx.commit(unload=False)
def do_commit(self):
ctx.commit()
def do_rollback(self):
ctx.rollback()
class subtransaction_engine:
"""Transaction Engine used in sub transactions."""
def query(self, q):
db_cursor = ctx.db.cursor()
ctx.db_execute(db_cursor, SQLQuery(q % transaction_count))
def do_transact(self):
self.query('SAVEPOINT webpy_sp_%s')
def do_commit(self):
self.query('RELEASE SAVEPOINT webpy_sp_%s')
def do_rollback(self):
self.query('ROLLBACK TO SAVEPOINT webpy_sp_%s')
class dummy_engine:
"""Transaction Engine used instead of subtransaction_engine
when sub transactions are not supported."""
do_transact = do_commit = do_rollback = lambda self: None
if self.transaction_count:
# nested transactions are not supported in some databases
if self.ctx.get('ignore_nested_transactions'):
self.engine = dummy_engine()
else:
self.engine = subtransaction_engine()
else:
self.engine = transaction_engine()
self.engine.do_transact()
self.ctx.transactions.append(self)
def __enter__(self):
return self
def __exit__(self, exctype, excvalue, traceback):
if exctype is not None:
self.rollback()
else:
self.commit()
def commit(self):
if len(self.ctx.transactions) > self.transaction_count:
self.engine.do_commit()
self.ctx.transactions = self.ctx.transactions[:self.
transaction_count]
def rollback(self):
if len(self.ctx.transactions) > self.transaction_count:
self.engine.do_rollback()
self.ctx.transactions = self.ctx.transactions[:self.
transaction_count]
class DB(object):
"""Database, which implement sql related operation method."""
def __init__(self, db_module, params, pool=False, **kwargs):
"""
Create a database.
:param db_module: mysql
:param params: The dictionary contains parameters such as username,
password, etc.
:param pool: Add connection pool
:param kwargs: DBUtils params:
mincached: the initial number of idle connections in the pool
(the default of 0 means no connections are made at startup)
maxcached: the maximum number of idle connections in the pool
(the default value of 0 or None means unlimited pool size)
maxshared: maximum number of shared connections allowed
(the default value of 0 or None means all connections are dedicated)
When this maximum number is reached, connections are
shared if they have been requested as shareable.
maxconnections: maximum number of connections generally allowed
(the default value of 0 or None means any number of connections)
blocking: determines behavior when exceeding the maximum
(if this is set to true, block and wait until the number of
connections decreases, but by default an error will be reported)
maxusage: maximum number of reuses of a single connection
(the default of 0 or None means unlimited reuse)
When this maximum usage number of the connection is reached,
the connection is automatically reset (closed and reopened).
setsession: an optional list of SQL commands that may serve to
prepare the session, e.g. ["set datestyle to german", ...]
reset: how connections should be reset when returned to the pool
(False or None to rollback transcations started with begin(),
the default value True always issues a rollback for safety's sake)
failures: an optional exception class or a tuple of exception classes
for which the connection failover mechanism shall be applied,
if the default (OperationalError, InternalError) is not adequate
"""
if 'driver' in params:
params.pop('driver')
self.db_module = db_module
self.params = params
self.pool = pool
self.kwargs = kwargs
self.raw_sql_flag = False
self.autocommit = kwargs.get("autocommit", False)
self._ctx = threadeddict()
# flag to enable/disable printing queries
self.print_flag = False
if "debug" in params:
self.print_flag = params.get('debug')
del params["debug"]
else:
self.print_flag = os.environ.get('debug', False)
self.get_debug_queries = False
self.get_debug_queries_info = {}
if "get_debug_queries" in params:
self.get_debug_queries = params.get('get_debug_queries')
del params["get_debug_queries"]
else:
self.get_debug_queries = os.environ.get('get_debug_queries', False)
self.supports_multiple_insert = False
def _getctx(self):
if not self._ctx.get('db'):
self._load_context(self._ctx)
return self._ctx
ctx = property(_getctx)
def _load_context(self, ctx):
ctx.dbq_count = 0
ctx.transactions = [] # stack of transactions
ctx.db = self._connect(self.params)
ctx.db_execute = self._db_execute
if not hasattr(ctx.db, 'commit'):
ctx.db.commit = lambda: None
if not hasattr(ctx.db, 'rollback'):
ctx.db.rollback = lambda: None
def commit():
return ctx.db.commit()
def rollback():
return ctx.db.rollback()
ctx.commit = commit
ctx.rollback = rollback
def _unload_context(self, ctx):
del ctx.db
def _connect(self, params):
if self.pool:
maxcached = self.kwargs.get("maxcached", 0)
mincached = self.kwargs.get("mincached", 0)
maxshared = self.kwargs.get("maxshared", 0)
reset = self.kwargs.get("reset", False)
maxconnections = self.kwargs.get("maxconnections", 0)
maxusage = self.kwargs.get("maxusage", 0)
setsession = ['SET AUTOCOMMIT = 0']
if self.autocommit:
setsession = ['SET AUTOCOMMIT = 1']
return PooledDB(self.db_module,
mincached=mincached,
maxcached=maxcached,
maxshared=maxshared,
maxconnections=maxconnections,
maxusage=maxusage,
setsession=setsession,
reset=reset,
**params)
conn = self.db_module.connect(**params)
if self.autocommit:
conn.autocommit(True)
if self.print_flag:
print("AutoCommit:", conn.get_autocommit())
return conn
def _db_cursor(self):
return self.ctx.db.cursor(), None
def _db_pool_cursor(self):
conn = self.ctx.db.connection()
cursor = conn.cursor()
return cursor, conn
def _param_marker(self):
"""Returns parameter marker based on paramstyle attribute
if this database."""
style = getattr(self, 'paramstyle', 'pyformat')
if style == 'qmark':
return '?'
elif style == 'numeric':
return ':1'
elif style in ['format', 'pyformat']:
return '%s'
raise UnknownParamstyle(style)
def _db_execute(self, cur, sql_query):
"""executes an sql query"""
self.ctx.dbq_count += 1
start_time = time.time() * 1000
run_time = lambda: "%.4f" % (time.time() * 1000 - start_time)
try_cnt = 2
while try_cnt > 0:
try:
query, params = self._process_query(sql_query)
out = cur.execute(query, params)
except Exception:
try_cnt -= 1
if self.print_flag:
print('ERR:', str(sql_query))
if self.ctx.transactions:
self.ctx.transactions[-1].rollback()
else:
self.ctx.rollback()
try:
if not self.pool:
self.ctx.db.ping()
except Exception:
self.ctx.db = self._connect(self.params)
cur, _ = self._db_cursor()
continue
raise
break
if self.print_flag:
print("{} ({}): {}".format(run_time(), self.ctx.dbq_count,
str(sql_query)))
if self.get_debug_queries:
self.get_debug_queries_info = dict(run_time=run_time(),
sql="{}".format(sql_query))
return out
def _process_query(self, sql_query):
"""Takes the SQLQuery object and returns query string and parameters.
"""
paramstyle = getattr(self, 'paramstyle', 'pyformat')
query = sql_query.query(paramstyle)
params = sql_query.values()
return query, params
def raw_sql(self, sql_query, values=None):
if not isinstance(sql_query, SQLQuery):
sql_query = reparam(sql_query, values)
query, params = self._process_query(sql_query)
return query, params
def query(self, sql_query, vars=None, processed=False, _test=False):
"""
Execute SQL query `sql_query` using dictionary `vars` to interpolate it.
If `processed=True`, `vars` is a `reparam`-style list to use
instead of interpolating.
:param sql_query: select `sql`.
:return : The result of the query is the list object of the iterator.
"""
if vars is None:
vars = {}
if not processed and not isinstance(sql_query, SQLQuery):
sql_query = reparam(sql_query, vars)
if _test:
return sql_query
db_cursor, conn = self._db_pool_cursor(
) if self.pool else self._db_cursor()
self._db_execute(db_cursor, sql_query)
if db_cursor.description:
names = [x[0] for x in db_cursor.description]
#def iterwrapper():
# row = db_cursor.fetchone()
# while row:
# yield storage(dict(zip(names, row)))
# row = db_cursor.fetchone()
#out = iterbetter(iterwrapper())
#out.__len__ = lambda: int(db_cursor.rowcount)
#out.list = lambda: [
# storage(dict(zip(names, x))) for x in db_cursor.fetchall()
#]
out = [storage(dict(zip(names, x))) for x in db_cursor.fetchall()]
else:
out = db_cursor.rowcount
if not self.autocommit and not self.ctx.transactions:
if not self.pool:
self.ctx.commit()
else:
conn.commit()
db_cursor.close()
if self.pool:
conn.close()
return out
def select(self, tables, fields=None, distinct=False):
"""
Query method which return `Select` object.
:param tables : tables name.
:param fields : fields to be queried.
:return : `Select` objects which contain various query methods.
"""
return Select(self, tables, fields, self.raw_sql_flag, distinct)
def operator(self, tablename, test=False, default=False):
"""The entry point for the write operation, including `insert`、
`update` and `delete` method.
:param tablename: table name
:param test: if true, return sql statement, otherwise return sql query
result.
:param default: designed for the insert method, execute the
default insert sql when `values` is None.
:return : `Oerator` object which contain `insert`、`update` and
`delete` method.
:example:
tablename is `user`, which having age, name and birthday fields.
`db_handle.operator("user").insert(**values)`.
`db_handle.operator("user").update(where, age=19, name="xiao2")`.
`db_handle.operator("user").delete(dict(id=1))`
"""
return Operator(self, tablename, test, default, self.raw_sql_flag)
def insert(self,
tablename,
seqname=None,
ignore=None,
test=False,
default=False,
**values):
"""Insert method that execute through `Operate` object.
:param tablename: tablename which you wanna be write data.
:param seqname: if true, return lastest-insert-id, otherwise return
the row count.
:param ignore: if true, execute the `INSERT IGNORE INTO...` sql.
:param test: if true, return sql statement, otherwise return sql query
result.
:param default: execute the default insert sql when
`values` is None.
:param values: dictionary object that contains data which should save
to database.
"""
return Operator(self, tablename, test, default,
self.raw_sql_flag).insert(seqname, ignore, **values)
def insert_duplicate_update(self,
tablename,
seqname=None,
vars=None,
test=False,
default=False,
**values):
"""Update data if it exists in `tablename`, otherwise insert new data
into `tablename`.
"""
return Operator(self, tablename, test, default,
self.raw_sql_flag).insert_duplicate_update(
seqname, vars, **values)
def multiple_insert(self,
tablename,
values,
seqname=None,
test=False,
default=False):
"""Inserts multiple rows into `tablename`.
:param values: The `values` must be a list of dictioanries
"""
return Operator(self, tablename, test, default,
self.raw_sql_flag).multiple_insert(values, seqname)
def update(self, tables, where, vars=None, test=False, **values):
"""Update `tables` with clause `where` (interpolated using `vars`)
and setting `values`."""
return Operator(self, tables, test,
self.raw_sql_flag).update(where, vars, **values)
def delete(self, tablename, where, using=None, vars=None, _test=False):
"""Deletes from `table` with clauses `where` and `using`."""
return Operator(self, tablename, _test,
self.raw_sql_flag).delete(where, using, vars)
def _get_insert_default_values_query(self, table):
"""Default insert sql"""
return "INSERT INTO %s DEFAULT VALUES" % table
def _process_insert_query(self, query, tablename, seqname):
return query
def transaction(self):
"""Start a transaction."""
return Transaction(self.ctx)
def close(self):
"""Close db connection"""
self.ctx.db.close()
self._unload_context(self.ctx)
def table(self, tables):
return Table(self, tables)
class Operator(object):
"""`Operator` object that integrates write operations,
including insert, update, delete method."""
def __init__(self,
database,
tablename,
_test=False,
_default=False,
_raw_sql_flag=False):
self.database = database
self.tablename = tablename
self._test = _test
self._default = _default
self._raw_sql_flag = _raw_sql_flag
def insert(self, seqname=None, ignore=None, **values):
return Insert(self.database, self.tablename, seqname, self._test,
self._default,
self._raw_sql_flag).insert(ignore, **values)
def insert_duplicate_update(self, where, seqname=None, vars=None,
**values):
return Insert(self.database, self.tablename, seqname, self._test,
self._default,
self._raw_sql_flag).insert_duplicate_update(
where, vars, **values)
def multiple_insert(self, values, seqname=None):
return Insert(self.database, self.tablename, seqname, self._test,
self._default,
self._raw_sql_flag).multiple_insert(values)
def update(self, where, vars=None, **values):
return Update(self.database, self.tablename, self._test,
self._raw_sql_flag).update(where, vars, **values)
def delete(self, where, using=None, vars=None):
return Delete(self.database, self.tablename, self._test,
self._raw_sql_flag).delete(where, using, vars)
class BaseQuery(object):
"""Base query object."""
def __init__(self):
self.cur_table = None
def _where_dict(self, where, opt=OP.EQ, join=" AND "):
"""Convert dictionary object into `SQLQuery` object.
:param where: dictionary object.
:param opt: mark, may be `=` or `>`, `<`
:param join: contection string, `AND` or `,`
"""
where_clauses = []
for k, v in where.items():
k = "{}.{}".format(self.cur_table, k) if self.cur_table else k
where_clauses.append(k + ' {} '.format(opt) + sqlquote(v))
if where_clauses:
return SQLQuery.join(where_clauses, join)
else:
return None
def _where(self, where, vars=None, join=" AND "):
if vars is None:
vars = {}
if isinstance(where, numeric_types):
where = "id = " + sqlparam(where)
#@@@ for backward-compatibility
elif isinstance(where, (list, tuple)) and len(where) == 2:
where = SQLQuery(where[0], where[1])
elif isinstance(where, dict):
where = self._where_dict(where, join=join)
elif isinstance(where, SQLQuery):
pass
else:
where = reparam(where, vars)
return where
def _execute(self, sql):
"""Execute sql
:param sql: sql expression
:return : return row count."""
db_cursor, conn = self._db_cursor()
self.database._db_execute(db_cursor, sql)
if not self.database.autocommit and not self.database.ctx.transactions:
if self.database.pool:
conn.commit()
else:
self.database.ctx.commit()
out = db_cursor.rowcount
self._cursor_close(db_cursor, conn)
return out
def _db_cursor(self):
db_cursor, conn = self.database._db_pool_cursor(
) if self.database.pool else self.database._db_cursor()
return db_cursor, conn
def _cursor_close(self, db_cursor, conn):
db_cursor.close()
if self.database.pool:
conn.close()
return
class Insert(BaseQuery):
"""Insert operations"""
def __init__(self,
database,
tablename,
seqname=None,
_default=False,
_test=False,
_raw_sql_flag=False):
"""
:param seqname: if true, return lastest-insert-id, otherwise return
row count.
"""
self.database = database
self.tablename = tablename
self._test = _test
self._default = _default
self.seqname = seqname
self._raw_sql_flag = _raw_sql_flag
super(Insert, self).__init__()
def _execute(self, sql):
db_cursor, conn = self._db_cursor()
if self.seqname:
sql = self.database._process_insert_query(sql, self.tablename,
self.seqname)
if isinstance(sql, tuple):
# for some databases, a separate query has to be made to find
# the id of the inserted row.
q1, q2 = sql
result = self.database._db_execute(db_cursor, q1)
self.database._db_execute(db_cursor, q2)
else:
result = self.database._db_execute(db_cursor, sql)
try:
out = db_cursor.fetchone()[0]
except Exception:
out = result
if not self.database.ctx.transactions:
if self.database.pool:
conn.commit()
else:
self.database.ctx.commit()
return out
def insert(self, ignore=None, **values):
"""
Inserts `values` into `tablename`
"""
def q(x):
return "(" + x + ")"
if values:
#needed for Py3 compatibility with the above doctests
sorted_values = sorted(values.items(), key=lambda t: t[0])
_keys = SQLQuery.join(map(lambda t: t[0], sorted_values), ', ')
_values = SQLQuery.join(
[sqlparam(v) for v in map(lambda t: t[1], sorted_values)],
', ')
head_query = "INSERT IGNORE INTO" if ignore else "INSERT INTO"
sql_query = "{} {} ".format(
head_query,
self.tablename) + q(_keys) + ' VALUES ' + q(_values)
else:
if self._default:
sql_query = SQLQuery(
self._get_insert_default_values_query(self.tablename))
else:
raise ValueError("values is empty.")
if self._test or self._raw_sql_flag:
return sql_query
return self._execute(sql_query)
def insert_duplicate_update(self, where=None, vars=None, **values):
if not where:
return self.insert(**values)
if not values:
raise ValueError("insert operation need values.")
if vars is None:
vars = {}
where = self._where(where, vars, join=" , ")
sorted_values = sorted(values.items(), key=lambda t: t[0])
def q(x):
return "(" + x + ")"
_keys = SQLQuery.join(map(lambda t: t[0], sorted_values), ', ')
_values = SQLQuery.join(
[sqlparam(v) for v in map(lambda t: t[1], sorted_values)], ', ')
sql_query = "INSERT INTO {} ".format(
self.tablename) + q(_keys) + ' VALUES ' + q(
_values) + " ON DUPLICATE KEY UPDATE " + where
if self._test or self._raw_sql_flag:
return sql_query
return self._execute(sql_query)
def multiple_insert(self, values):
"""
Inserts multiple rows into `tablename`.
:param values: The `values` must be a list of dictioanries,
one for each row to be inserted, each with the same set of keys.
:returns: the list of ids of the inserted rows.
"""
if not values:
return []
if not self.database.supports_multiple_insert:
out = [self.insert(**v) for v in values]
if self.seqname is False:
return None
else:
return out
keys = values[0].keys()
#@@ make sure all keys are valid
for v in values:
if v.keys() != keys:
raise ValueError('Not all rows have the same keys')
#enforce query order for the above doctest compatibility with Py3
keys = sorted(keys)
sql_query = SQLQuery('INSERT INTO {} ({}) VALUES '.format(
self.tablename, ', '.join(keys)))
for i, row in enumerate(values):
if i != 0:
sql_query.append(", ")
SQLQuery.join([SQLParam(row[k]) for k in keys],
sep=", ",
target=sql_query,
prefix="(",
suffix=")")
if self._test or self._raw_sql_flag:
return sql_query
out = self._execute(sql_query)
if self.seqname:
out = range(out - len(values) + 1, out + 1)
return out
class Update(BaseQuery):
def __init__(self, database, tables, _test=False, _raw_sql_flag=False):
self.database = database
self.tables = tables
self._test = _test
self._raw_sql_flag = _raw_sql_flag
super(Update, self).__init__()
def update(self, where, vars=None, **values):
"""
Update `tables` with clause `where` (interpolated using `vars`)
and setting `values`.
"""
if vars is None:
vars = {}
where = self._where(where, vars)
values = sorted(values.items(), key=lambda t: t[0])
query = ("UPDATE " + sqllist(self.tables) + " SET " +
sqlwhere(values, ', ') + " WHERE " + where)
if self._test or self._raw_sql_flag:
return query
return self._execute(query)
class Delete(BaseQuery):
def __init__(self, database, tablename, _test=False, _raw_sql_flag=False):
self.database = database
self.tablename = tablename
self._test = _test
self._raw_sql_flag = _raw_sql_flag
super(Delete, self).__init__()
def delete(self, where, using=None, vars=None):
"""
Deletes from `table` with clauses `where` and `using`.
"""
if vars is None:
vars = {}
where = self._where(where, vars)
sql_query = 'DELETE FROM ' + self.tablename
if using:
sql_query += ' USING ' + sqllist(using)
if where:
sql_query += ' WHERE ' + where
if self._test or self._raw_sql_flag:
return sql_query
return self._execute(sql_query)
class MetaData(BaseQuery):
"""Various ways to integrate query syntax"""
def __init__(self, database, tables, _test=False):
self.database = database
self._tables = tables
self._where = None
self._what = None
self._group = None
self._order = None
self._limit = None
self._offset = None
self._join_type = None
self._join_expression = None
self._test = _test
super(MetaData, self).__init__()
try:
self.cur_table = tables if isinstance(tables,
string_types) else tables[0]
except Exception:
self.cur_table = None
def _sql_clauses(self,
what,
tables,
where,
group,
order,
limit,
offset,
join=JOIN.INNER_JOIN,
join_expression=None):
return (
('SELECT', what),
('FROM', sqllist(tables)),
(join, join_expression),
('WHERE', where),
('GROUP BY', group),
('ORDER BY', order),
# The limit and offset could be the values provided by
# the end-user and are potentially unsafe.
# Using them as parameters to avoid any risk.
('LIMIT', limit and SQLParam(limit).sqlquery()),
('OFFSET', offset and SQLParam(offset).sqlquery()))
def _gen_clause(self, sql, val, vars=None):
if isinstance(val, numeric_types):
if sql == 'WHERE':
nout = 'id = ' + sqlquote(val)
else:
nout = SQLQuery(val)
#@@@
elif isinstance(val, (list, tuple)) and len(val) == 2:
nout = SQLQuery(val[0], val[1]) # backwards-compatibility
elif sql == 'WHERE' and isinstance(val, dict):
nout = self._where_dict(val)
elif isinstance(val, SQLQuery):
nout = val
else:
nout = reparam(val, vars)
def xjoin(a, b):
if a and b:
return a + ' ' + b
else:
return a or b
return xjoin(sql, nout)
def _query(self, vars=None, _raw_sql_flag=False):
sql_clauses = self._sql_clauses(self._what, self._tables, self._where,
self._group, self._order, self._limit,
self._offset, self._join_type,
self._join_expression)
clauses = [
self._gen_clause(sql, val, vars) for sql, val in sql_clauses
if val is not None
]
qout = SQLQuery.join(clauses)
if self._test or _raw_sql_flag:
return qout
return self.database.query(qout, processed=True)
def query(self, _raw_sql_flag=False):
return self._query(_raw_sql_flag=_raw_sql_flag)
def first(self):
query_result = self._query()
return query_result[0] if query_result else None
def all(self):
return self._query()
def order_by(self, order_vars, _reversed=False):
"""Order by syntax
:param order_vars: must be a string object or list object
:param _reversed: ASC or DESC, default ASC"""
if isinstance(order_vars, string_types):
order_vars = ", ".join([
"{}.{}".format(self.cur_table, field)
for field in order_vars.split(",")
])
if _reversed:
self._order = order_vars + " DESC "
else:
self._order = order_vars
elif isinstance(order_vars, list):
order_vars = [
"{}.{}".format(self.cur_table, field) for field in order_vars
]
if _reversed:
self._order = " DESC , ".join(order_vars) + " DESC "
else:
self._order = ", ".join(order_vars)
else:
raise ValueError("Order by values is wrong.")
return self
def limit(self, num):
self._limit = num
return self._query()
def offset(self, num):
self._offset = num
return self
def having(self):
pass
def _join(self, table, using, what=None, _join_type=JOIN.INNER_JOIN):
self._join_type = _join_type
if what:
self._what += ", " + what
self._join_expression = add_space(" {0} ON {0}.{2} = {1}.{2}".format(
table, self.cur_table, using))
self.cur_table = table
return self
class Select(object):
def __init__(self,
database,
tables,
fields=None,
_raw_sql_flag=False,
distinct=False):
self.distinct = distinct
self._metadata = MetaData(database, tables)
self._metadata._what = self._what_fields(self._metadata.cur_table,
fields)
self._raw_sql_flag = _raw_sql_flag
def _opt_where(self, opt=OP.EQ, **kwargs):
opt_expression = self._metadata._where_dict(kwargs,
opt) if kwargs else ""
if opt_expression:
if self._metadata._where:
self._metadata._where += add_space(OP.AND) + opt_expression
else:
self._metadata._where = opt_expression
return self
def _what_fields(self, cur_table, fields=None):
if fields and not isinstance(fields, list):
raise ValueError("fields must be list object.")
if fields and self._metadata.cur_table:
fields = ["{}.{}".format(cur_table, field) for field in fields]
default_fields_string = "{}.*".format(cur_table) if cur_table else '*'
fields_string = ", ".join(fields) if fields else default_fields_string
return "{} {}".format(OP.DISTINCT, fields_string) if self.distinct \
else fields_string
def filter_by(self, **kwargs):
if not kwargs:
return self._metadata
if "where" in kwargs and kwargs.get("where"):
self._metadata._where = kwargs.get("where")
else:
self._metadata._where = kwargs
return self._metadata
def get(self, **kwargs):
if "where" in kwargs and kwargs.get("where"):
self._metadata._where = kwargs.get("where")
else:
self._metadata._where = kwargs
return self._metadata._query()
def all(self):
return self._metadata._query()
def count(self, distinct=None, **kwargs):
if "where" in kwargs and kwargs.get("where"):
self._metadata._where = kwargs.get("where")
else:
self._opt_where(OP.EQ, **kwargs)
count_str = "COUNT(DISTINCT {}.{})".format(
self._metadata.cur_table, distinct) if distinct else "COUNT(*)"
self._metadata._what = count_str + " AS COUNT"
query_result = self._metadata._query()
return query_result[0]["COUNT"]
def distinct(self):
self.distinct = True
return self
def like(self, **kwargs):
return self._opt_where(OP.LIKE, **kwargs)
def not_like(self, **kwargs):
return self._opt_where(OP.NOT_LIKE, **kwargs)
def filter(self, **kwargs):
return self._opt_where(OP.EQ, **kwargs)
def lt(self, **kwargs):
return self._opt_where(OP.LT, **kwargs)
def lte(self, **kwargs):
return self._opt_where(OP.LTE, **kwargs)
def gt(self, **kwargs):
return self._opt_where(OP.GT, **kwargs)
def gte(self, **kwargs):
return self._opt_where(OP.GTE, **kwargs)
def eq(self, **kwargs):
return self._opt_where(OP.EQ, **kwargs)
def ne(self, **kwargs):
return self._opt_where(OP.NE, **kwargs)
def between(self, **kwargs):
where_clauses = []
for k, v in kwargs.items():
if not isinstance(v, list) and len(v) != 2:
raise ValueError(
"between param must be list object and length equal 2.")
where_clauses.append(
"{}.{}".format(self._metadata.cur_table, k) +
" BETWEEN {} AND {} ".format(sqlquote(v[0]), sqlquote(v[1])))
if not where_clauses:
return self
between_expression = SQLQuery.join(where_clauses, add_space(OP.AND))
if self._metadata._where:
self._metadata._where += add_space(OP.AND) + between_expression
else:
self._metadata._where = between_expression
return self
def in_(self, **kwargs):
where_clauses = []
for k, v in kwargs.items():
if not isinstance(v, list):
raise ValueError("param must be list object")
where_clauses.append("{}.{}".format(self._metadata.cur_table, k) +
" {} {} ".format(OP.IN, sqlquote(v)))
if not where_clauses:
return self
in_expression = SQLQuery.join(where_clauses, add_space(OP.AND))
if self._metadata._where:
self._metadata._where += add_space(OP.AND) + in_expression
else:
self._metadata._where = in_expression
return self
def not_in(self, **kwargs):
where_clauses = []
for k, v in kwargs.items():
if not isinstance(v, list):
raise ValueError("param must be list object")
where_clauses.append("{}.{}".format(self._metadata.cur_table, k) +
" {} {} ".format(OP.NOT_IN, sqlquote(v)))
if not where_clauses:
return self
in_expression = SQLQuery.join(where_clauses, add_space(OP.AND))
if self._metadata._where:
self._metadata._where += add_space(OP.AND) + in_expression
else:
self._metadata._where = in_expression
return self
def first(self):
return self._metadata.first()
def query(self):
return self._metadata.query(self._raw_sql_flag)
def order_by(self, order_vars, _reversed=False):
return self._metadata.order_by(order_vars, _reversed)
def limit(self, num):
return self._metadata.limit(num)
def offset(self, num):
return self._metadata.offset(num)
def inner_join(self, table, using, fields=None, **kwargs):
what = None
if fields:
what = self._what_fields(table, fields)
self._metadata._join(table, using, what, JOIN.INNER_JOIN)
return self._opt_where(OP.EQ, **kwargs)
def left_join(self, table, using, fields, **kwargs):
what = None
if fields:
what = self._what_fields(table, fields)
self._metadata._join(table, using, what, JOIN.LEFT_JOIN)
return self._opt_where(OP.EQ, **kwargs)
def right_join(self, table, using, fields, **kwargs):
what = None
if fields:
what = self._what_fields(table, fields)
self._metadata._join(table, using, what, JOIN.RIGHT_JOIN)
return self._opt_where(OP.EQ, **kwargs)
class Table(object):
def __init__(self, database, tables):
self.database = database
self.tables = tables
def bind(self, database=None):
if database:
self.database = database
return self
def insert(self,
seqname=None,
test=False,
default=None,
ignore=False,
**values):
return Insert(self.database, self.tables, seqname, default,
test).insert(ignore, **values)
def insert_duplicate_update(self,
where,
vars=None,
seqname=None,
test=False,
**values):
return Insert(self.database, self.tables, seqname,
_test=test).insert_duplicate_update(
where, vars, **values)
def update(self, where, vars=None, test=None, **values):
return Update(self.database, self.tables,
test).update(where, vars, **values)
def select(self, fields=None):
return Select(self.database, self.tables, fields)
def delete(self, where, using=None, vars=None, test=False):
return Delete(self.database, self.tables,
test).delete(where, using, vars)
class MySQLDB(DB):
"""MySQLDB class, about importing mysqldb module and
and the required parameters."""
def __init__(self,
maxcached=0,
mincached=0,
maxshared=0,
maxconnections=0,
maxusage=0,
pool=False,
autocommit=False,
reset=False,
**params):
db = import_driver(["MySQLdb", "pymysql", "mysql.connector"],
preferred=params.pop('driver', None))
if db.__name__ == "pymysql" or db.__name__ == "mysql.connector":
if 'passwd' in params:
params['password'] = params['passwd']
del params['passwd']
if 'charset' not in params:
params['charset'] = 'utf8'
elif params['charset'] is None:
del params['charset']
self.paramstyle = db.paramstyle = 'pyformat' # it's both, like psycopg
self.dbname = "mysql"
DB.__init__(self,
db,
params,
pool,
maxcached=maxcached,
mincached=mincached,
maxshared=maxshared,
maxconnections=maxconnections,
maxusage=maxusage,
autocommit=autocommit,
reset=reset)
self.supports_multiple_insert = True
def _process_insert_query(self, query, tablename, seqname):
return query, SQLQuery('SELECT last_insert_id();')
def _get_insert_default_values_query(self, table):
return "INSERT INTO %s () VALUES()" % table
def import_driver(drivers, preferred=None):
"""Import the first available driver or preferred driver.
"""
if preferred:
drivers = [preferred]
for d in drivers:
try:
return __import__(d, None, None, ['x'])
except ImportError:
pass
raise ImportError("Unable to import " + " or ".join(drivers))
|
[] |
[] |
[
"debug",
"get_debug_queries"
] |
[]
|
["debug", "get_debug_queries"]
|
python
| 2 | 0 | |
bot.py
|
import logging
import random
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
import urllib.request
import json
import imdb
import os
BOT_TOKEN = os.environ.get("BOT_TOKEN")
OMDB_API_KEY = '558c75c8'
ia = imdb.IMDb()
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
def start(update, context):
update.message.reply_text('Hi! \nWelcome to the *IMDb Bot*. \nSend me the name of any movie or TV show to get its details. \nHappy viewing! \n \nCreated by [Karan Malik](https://karan-malik.github.io)',parse_mode='markdown')
def help(update, context):
update.message.reply_text('Send me the name of any movie to get its details. \nTry out "Avengers Endgame"')
def error(update, context):
logger.warning('Update "%s" caused error "%s"', update, context.error)
def reply(update, context):
movie_name=update.message.text
search = ia.search_movie(movie_name)
id='tt'+search[0].movieID
url= 'http://www.omdbapi.com/?i='+id+'&apikey='+OMDB_API_KEY
x=urllib.request.urlopen(url)
for line in x:
x=line.decode()
data=json.loads(x)
ans=''
ans+='*'+data['Title']+'* ('+data['Year']+')'+'\n\n'
ans+='*IMDb Rating*: '+data['imdbRating']+' \n'
ans+='*Cast*: '+data['Actors']+'\n'
ans+='*Genre*: '+data['Genre']+'\n\n'
ans+='*Plot*: '+data['Plot']+'\n'
ans+='[.]('+data['Poster']+')'
update.message.reply_text(ans,parse_mode='markdown')
def main():
updater = Updater(BOT_TOKEN, use_context=True)
dp = updater.dispatcher
dp.add_handler(CommandHandler("start", start))
dp.add_handler(CommandHandler("help", help))
dp.add_handler(MessageHandler(Filters.text, reply))
dp.add_error_handler(error)
updater.start_polling()
updater.idle()
if __name__ == '__main__':
main()
|
[] |
[] |
[
"BOT_TOKEN"
] |
[]
|
["BOT_TOKEN"]
|
python
| 1 | 0 | |
pkg/server/server.go
|
package server
import (
"fmt"
"os"
"strings"
hooks "github.com/appscode/kubernetes-webhook-util/admission/v1beta1"
admissionreview "github.com/appscode/kubernetes-webhook-util/registry/admissionreview/v1beta1"
reg_util "github.com/appscode/kutil/admissionregistration/v1beta1"
dynamic_util "github.com/appscode/kutil/dynamic"
"github.com/appscode/stash/apis/repositories"
"github.com/appscode/stash/apis/repositories/install"
"github.com/appscode/stash/apis/repositories/v1alpha1"
api "github.com/appscode/stash/apis/stash/v1alpha1"
"github.com/appscode/stash/pkg/controller"
"github.com/appscode/stash/pkg/eventer"
snapregistry "github.com/appscode/stash/pkg/registry/snapshot"
admission "k8s.io/api/admission/v1beta1"
core "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer"
"k8s.io/apimachinery/pkg/version"
"k8s.io/apiserver/pkg/registry/rest"
genericapiserver "k8s.io/apiserver/pkg/server"
"k8s.io/client-go/kubernetes"
store "kmodules.xyz/objectstore-api/api/v1"
)
const (
apiserviceName = "v1alpha1.admission.stash.appscode.com"
)
var (
Scheme = runtime.NewScheme()
Codecs = serializer.NewCodecFactory(Scheme)
)
func init() {
install.Install(Scheme)
admission.AddToScheme(Scheme)
// we need to add the options to empty v1
// TODO fix the server code to avoid this
metav1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
// TODO: keep the generic API server from wanting this
unversioned := schema.GroupVersion{Group: "", Version: "v1"}
Scheme.AddUnversionedTypes(unversioned,
&metav1.Status{},
&metav1.APIVersions{},
&metav1.APIGroupList{},
&metav1.APIGroup{},
&metav1.APIResourceList{},
)
}
type StashConfig struct {
GenericConfig *genericapiserver.RecommendedConfig
ExtraConfig *controller.Config
}
// StashServer contains state for a Kubernetes cluster master/api server.
type StashServer struct {
GenericAPIServer *genericapiserver.GenericAPIServer
Controller *controller.StashController
}
func (op *StashServer) Run(stopCh <-chan struct{}) error {
// sync cache
go op.Controller.Run(stopCh)
return op.GenericAPIServer.PrepareRun().Run(stopCh)
}
type completedConfig struct {
GenericConfig genericapiserver.CompletedConfig
ExtraConfig *controller.Config
}
type CompletedConfig struct {
// Embed a private pointer that cannot be instantiated outside of this package.
*completedConfig
}
// Complete fills in any fields not set that are required to have valid data. It's mutating the receiver.
func (c *StashConfig) Complete() CompletedConfig {
completedCfg := completedConfig{
c.GenericConfig.Complete(),
c.ExtraConfig,
}
completedCfg.GenericConfig.Version = &version.Info{
Major: "1",
Minor: "1",
}
return CompletedConfig{&completedCfg}
}
// New returns a new instance of StashServer from the given config.
func (c completedConfig) New() (*StashServer, error) {
genericServer, err := c.GenericConfig.New("stash-apiserver", genericapiserver.NewEmptyDelegate()) // completion is done in Complete, no need for a second time
if err != nil {
return nil, err
}
ctrl, err := c.ExtraConfig.New()
if err != nil {
return nil, err
}
var admissionHooks []hooks.AdmissionHook
if c.ExtraConfig.EnableValidatingWebhook {
admissionHooks = append(admissionHooks,
ctrl.NewResticWebhook(),
ctrl.NewRecoveryWebhook(),
ctrl.NewRepositoryWebhook(),
)
}
if c.ExtraConfig.EnableMutatingWebhook {
admissionHooks = append(admissionHooks,
ctrl.NewDeploymentWebhook(),
ctrl.NewDaemonSetWebhook(),
ctrl.NewStatefulSetWebhook(),
ctrl.NewReplicationControllerWebhook(),
ctrl.NewReplicaSetWebhook(),
)
}
s := &StashServer{
GenericAPIServer: genericServer,
Controller: ctrl,
}
for _, versionMap := range admissionHooksByGroupThenVersion(admissionHooks...) {
// TODO we're going to need a later k8s.io/apiserver so that we can get discovery to list a different group version for
// our endpoint which we'll use to back some custom storage which will consume the AdmissionReview type and give back the correct response
apiGroupInfo := genericapiserver.APIGroupInfo{
VersionedResourcesStorageMap: map[string]map[string]rest.Storage{},
// TODO unhardcode this. It was hardcoded before, but we need to re-evaluate
OptionsExternalVersion: &schema.GroupVersion{Version: "v1"},
Scheme: Scheme,
ParameterCodec: metav1.ParameterCodec,
NegotiatedSerializer: Codecs,
}
for _, admissionHooks := range versionMap {
for i := range admissionHooks {
admissionHook := admissionHooks[i]
admissionResource, _ := admissionHook.Resource()
admissionVersion := admissionResource.GroupVersion()
// just overwrite the groupversion with a random one. We don't really care or know.
apiGroupInfo.PrioritizedVersions = appendUniqueGroupVersion(apiGroupInfo.PrioritizedVersions, admissionVersion)
admissionReview := admissionreview.NewREST(admissionHook.Admit)
v1alpha1storage, ok := apiGroupInfo.VersionedResourcesStorageMap[admissionVersion.Version]
if !ok {
v1alpha1storage = map[string]rest.Storage{}
}
v1alpha1storage[admissionResource.Resource] = admissionReview
apiGroupInfo.VersionedResourcesStorageMap[admissionVersion.Version] = v1alpha1storage
}
}
if err := s.GenericAPIServer.InstallAPIGroup(&apiGroupInfo); err != nil {
return nil, err
}
}
for i := range admissionHooks {
admissionHook := admissionHooks[i]
postStartName := postStartHookName(admissionHook)
if len(postStartName) == 0 {
continue
}
s.GenericAPIServer.AddPostStartHookOrDie(postStartName,
func(context genericapiserver.PostStartHookContext) error {
return admissionHook.Initialize(c.ExtraConfig.ClientConfig, context.StopCh)
},
)
}
if c.ExtraConfig.EnableValidatingWebhook {
s.GenericAPIServer.AddPostStartHookOrDie("validating-webhook-xray",
func(context genericapiserver.PostStartHookContext) error {
go func() {
xray := reg_util.NewCreateValidatingWebhookXray(c.ExtraConfig.ClientConfig, apiserviceName, &api.Repository{
TypeMeta: metav1.TypeMeta{
APIVersion: api.SchemeGroupVersion.String(),
Kind: api.ResourceKindRepository,
},
ObjectMeta: metav1.ObjectMeta{
Name: "test-repository-for-webhook-xray",
Namespace: "default",
},
Spec: api.RepositorySpec{
WipeOut: true,
Backend: store.Backend{
Local: &store.LocalSpec{
VolumeSource: core.VolumeSource{
HostPath: &core.HostPathVolumeSource{
Path: "/tmp/test",
},
},
MountPath: "/tmp/test",
},
},
},
}, context.StopCh)
if err := xray.IsActive(); err != nil {
w, _, e2 := dynamic_util.DetectWorkload(
c.ExtraConfig.ClientConfig,
core.SchemeGroupVersion.WithResource("pods"),
os.Getenv("MY_POD_NAMESPACE"),
os.Getenv("MY_POD_NAME"))
if e2 == nil {
eventer.CreateEventWithLog(
kubernetes.NewForConfigOrDie(c.ExtraConfig.ClientConfig),
"stash-operator",
w,
core.EventTypeWarning,
eventer.EventReasonAdmissionWebhookNotActivated,
err.Error())
}
panic(err)
}
}()
return nil
},
)
}
{
apiGroupInfo := genericapiserver.NewDefaultAPIGroupInfo(repositories.GroupName, Scheme, metav1.ParameterCodec, Codecs)
v1alpha1storage := map[string]rest.Storage{}
v1alpha1storage[v1alpha1.ResourcePluralSnapshot] = snapregistry.NewREST(c.ExtraConfig.ClientConfig)
apiGroupInfo.VersionedResourcesStorageMap["v1alpha1"] = v1alpha1storage
if err := s.GenericAPIServer.InstallAPIGroup(&apiGroupInfo); err != nil {
return nil, err
}
}
return s, nil
}
func appendUniqueGroupVersion(slice []schema.GroupVersion, elems ...schema.GroupVersion) []schema.GroupVersion {
m := map[schema.GroupVersion]bool{}
for _, gv := range slice {
m[gv] = true
}
for _, e := range elems {
m[e] = true
}
out := make([]schema.GroupVersion, 0, len(m))
for gv := range m {
out = append(out, gv)
}
return out
}
func postStartHookName(hook hooks.AdmissionHook) string {
var ns []string
gvr, _ := hook.Resource()
ns = append(ns, fmt.Sprintf("admit-%s.%s.%s", gvr.Resource, gvr.Version, gvr.Group))
if len(ns) == 0 {
return ""
}
return strings.Join(append(ns, "init"), "-")
}
func admissionHooksByGroupThenVersion(admissionHooks ...hooks.AdmissionHook) map[string]map[string][]hooks.AdmissionHook {
ret := map[string]map[string][]hooks.AdmissionHook{}
for i := range admissionHooks {
hook := admissionHooks[i]
gvr, _ := hook.Resource()
group, ok := ret[gvr.Group]
if !ok {
group = map[string][]hooks.AdmissionHook{}
ret[gvr.Group] = group
}
group[gvr.Version] = append(group[gvr.Version], hook)
}
return ret
}
|
[
"\"MY_POD_NAMESPACE\"",
"\"MY_POD_NAME\""
] |
[] |
[
"MY_POD_NAME",
"MY_POD_NAMESPACE"
] |
[]
|
["MY_POD_NAME", "MY_POD_NAMESPACE"]
|
go
| 2 | 0 | |
setup.py
|
#! /usr/bin/env python
from __future__ import print_function
import sys
import os
import glob
import platform
# distutils is deprecated and vendored into setuptools now.
from setuptools import setup
from setuptools import Extension
from setuptools import find_packages
# Extra compiler arguments passed to *all* extensions.
global_compile_args = []
# Extra compiler arguments passed to C++ extensions
cpp_compile_args = []
# Extra linker arguments passed to C++ extensions
cpp_link_args = []
# Extra compiler arguments passed to the main extension
main_compile_args = []
# workaround segfaults on openbsd and RHEL 3 / CentOS 3 . see
# https://bitbucket.org/ambroff/greenlet/issue/11/segfault-on-openbsd-i386
# https://github.com/python-greenlet/greenlet/issues/4
# https://github.com/python-greenlet/greenlet/issues/94
# pylint:disable=too-many-boolean-expressions
is_linux = sys.platform.startswith('linux') # could be linux or linux2
if ((sys.platform == "openbsd4" and os.uname()[-1] == "i386")
or ("-with-redhat-3." in platform.platform() and platform.machine() == 'i686')
or (sys.platform == "sunos5" and os.uname()[-1] == "sun4v")
or ("SunOS" in platform.platform() and platform.machine() == "sun4v")
or (is_linux and platform.machine() == "ppc")):
global_compile_args.append("-Os")
if sys.platform == 'darwin':
# The clang compiler doesn't use --std=c++11 by default
cpp_compile_args.append("--std=gnu++11")
elif sys.platform == 'win32' and "MSC" in platform.python_compiler():
# Older versions of MSVC (Python 2.7) don't handle C++ exceptions
# correctly by default. While newer versions do handle exceptions by default,
# they don't do it fully correctly. So we need an argument on all versions.
#"/EH" == exception handling.
# "s" == standard C++,
# "c" == extern C functions don't throw
# OR
# "a" == standard C++, and Windows SEH; anything may throw, compiler optimizations
# around try blocks are less aggressive.
# /EHsc is suggested, as /EHa isn't supposed to be linked to other things not built
# with it.
# See https://docs.microsoft.com/en-us/cpp/build/reference/eh-exception-handling-model?view=msvc-160
handler = "/EHsc"
cpp_compile_args.append(handler)
# To disable most optimizations:
#cpp_compile_args.append('/Od')
# To enable assertions:
#cpp_compile_args.append('/UNDEBUG')
# To enable more compile-time warnings (/Wall produces a mountain of output).
#cpp_compile_args.append('/W4')
# To link with the debug C runtime...except we can't because we need
# the Python debug lib too, and they're not around by default
# cpp_compile_args.append('/MDd')
# Support fiber-safe thread-local storage: "the compiler mustn't
# cache the address of the TLS array, or optimize it as a common
# subexpression across a function call." This would probably solve
# some of the issues we had with MSVC caching the thread local
# variables on the stack, leading to having to split some
# functions up. Revisit those.
cpp_compile_args.append("/GT")
def readfile(filename):
with open(filename, 'r') as f: # pylint:disable=unspecified-encoding
return f.read()
GREENLET_SRC_DIR = 'src/greenlet/'
GREENLET_HEADER_DIR = GREENLET_SRC_DIR
GREENLET_HEADER = GREENLET_HEADER_DIR + 'greenlet.h'
GREENLET_TEST_DIR = 'src/greenlet/tests/'
# The location of the platform specific assembly files
# for switching.
GREENLET_PLATFORM_DIR = GREENLET_SRC_DIR + 'platform/'
def _find_platform_headers():
return glob.glob(GREENLET_PLATFORM_DIR + "switch_*.h")
def _find_impl_headers():
return glob.glob(GREENLET_SRC_DIR + "*.hpp")
if hasattr(sys, "pypy_version_info"):
ext_modules = []
headers = []
else:
headers = [GREENLET_HEADER]
if sys.platform == 'win32' and '64 bit' in sys.version:
# this works when building with msvc, not with 64 bit gcc
# switch_<platform>_masm.obj can be created with setup_switch_<platform>_masm.cmd
obj_fn = 'switch_arm64_masm.obj' if platform.machine() == 'ARM64' else 'switch_x64_masm.obj'
extra_objects = [os.path.join(GREENLET_PLATFORM_DIR, obj_fn)]
else:
extra_objects = []
if sys.platform == 'win32' and os.environ.get('GREENLET_STATIC_RUNTIME') in ('1', 'yes'):
main_compile_args.append('/MT')
elif hasattr(os, 'uname') and os.uname()[4] in ['ppc64el', 'ppc64le']:
main_compile_args.append('-fno-tree-dominator-opts')
ext_modules = [
Extension(
name='greenlet._greenlet',
sources=[
GREENLET_SRC_DIR + 'greenlet.cpp',
],
language='c++',
extra_objects=extra_objects,
extra_compile_args=global_compile_args + main_compile_args + cpp_compile_args,
extra_link_args=cpp_link_args,
depends=[
GREENLET_HEADER,
GREENLET_SRC_DIR + 'slp_platformselect.h',
] + _find_platform_headers() + _find_impl_headers()
),
# Test extensions.
#
# We used to try hard to not include these in built
# distributions, because we only distributed ``greenlet.so``.
# That's really not important, now we have a clean layout with
# the test directory nested inside a greenlet directory. See
# https://github.com/python-greenlet/greenlet/issues/184 and
# 189
Extension(
name='greenlet.tests._test_extension',
sources=[GREENLET_TEST_DIR + '_test_extension.c'],
include_dirs=[GREENLET_HEADER_DIR],
extra_compile_args=global_compile_args,
),
Extension(
name='greenlet.tests._test_extension_cpp',
sources=[GREENLET_TEST_DIR + '_test_extension_cpp.cpp'],
language="c++",
include_dirs=[GREENLET_HEADER_DIR],
extra_compile_args=global_compile_args + cpp_compile_args,
extra_link_args=cpp_link_args,
),
]
def get_greenlet_version():
with open('src/greenlet/__init__.py') as f: # pylint:disable=unspecified-encoding
looking_for = '__version__ = \''
for line in f:
if line.startswith(looking_for):
version = line[len(looking_for):-2]
return version
raise ValueError("Unable to find version")
setup(
name="greenlet",
version=get_greenlet_version(),
description='Lightweight in-process concurrent programming',
long_description=readfile("README.rst"),
long_description_content_type="text/x-rst",
url="https://greenlet.readthedocs.io/",
keywords="greenlet coroutine concurrency threads cooperative",
author="Alexey Borzenkov",
author_email="[email protected]",
maintainer='Jason Madden',
maintainer_email='[email protected]',
project_urls={
'Bug Tracker': 'https://github.com/python-greenlet/greenlet/issues',
'Source Code': 'https://github.com/python-greenlet/greenlet/',
'Documentation': 'https://greenlet.readthedocs.io/',
},
license="MIT License",
platforms=['any'],
package_dir={'': 'src'},
packages=find_packages('src'),
include_package_data=True,
headers=headers,
ext_modules=ext_modules,
classifiers=[
"Development Status :: 5 - Production/Stable",
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: C',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules'
],
extras_require={
'docs': [
'Sphinx',
# 0.18b1 breaks sphinx 1.8.5 which is the latest version that runs
# on Python 2. The version pin sphinx itself contains isn't specific enough.
'docutils < 0.18; python_version < "3"',
],
'test': [
'objgraph',
'faulthandler; python_version == "2.7" and platform_python_implementation == "CPython"',
],
},
python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*",
zip_safe=False,
)
|
[] |
[] |
[
"GREENLET_STATIC_RUNTIME"
] |
[]
|
["GREENLET_STATIC_RUNTIME"]
|
python
| 1 | 0 | |
src/main/java/org/gosulang/gradle/tasks/gosudoc/CommandLineGosuDoc.java
|
package org.gosulang.gradle.tasks.gosudoc;
import org.apache.tools.ant.taskdefs.condition.Os;
import org.gosulang.gradle.tasks.Util;
import org.gradle.api.GradleException;
import org.gradle.api.Project;
import org.gradle.api.file.FileCollection;
import org.gradle.api.logging.Logger;
import org.gradle.api.logging.Logging;
import org.gradle.api.tasks.compile.ForkOptions;
import org.gradle.process.ExecResult;
import org.gradle.process.JavaExecSpec;
import org.gradle.tooling.BuildException;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.jar.Attributes;
import java.util.jar.JarOutputStream;
import java.util.jar.Manifest;
import java.util.regex.Pattern;
public class CommandLineGosuDoc {
private static final Logger LOGGER = Logging.getLogger(CommandLineGosuDoc.class);
private final FileCollection _source;
private final File _targetDir;
private final FileCollection _projectClasspath;
private final FileCollection _gosuClasspath;
private final GosuDocOptions _options;
private final Project _project;
public CommandLineGosuDoc(FileCollection source, File targetDir, FileCollection gosuClasspath, FileCollection projectClasspath, GosuDocOptions options, Project project) {
_source = source;
_targetDir = targetDir;
_gosuClasspath = gosuClasspath;
_projectClasspath = projectClasspath;
_options = options;
_project = project;
}
public void execute() {
String startupMsg = "Initializing gosudoc generator";
if(_project.getName().isEmpty()) {
startupMsg += " for " + _project.getName();
}
LOGGER.info(startupMsg);
//'source' is a FileCollection with explicit paths.
// We don't want that, so instead we create a temp directory with the contents of 'source'
// Copying 'source' to the temp dir should honor its include/exclude patterns
// Finally, the tmpdir will be the sole inputdir passed to the gosudoc task
final File tmpDir = new File(_project.getBuildDir(), "tmp/gosudoc");
_project.delete(tmpDir);
_project.copy(copySpec -> copySpec.from(_source).into(tmpDir));
List<String> gosudocArgs = new ArrayList<>();
gosudocArgs.add("-inputDirs");
gosudocArgs.add(tmpDir.getAbsolutePath());
gosudocArgs.add("-output");
gosudocArgs.add(_targetDir.getAbsolutePath());
if(_options.isVerbose()) {
gosudocArgs.add("-verbose");
}
ByteArrayOutputStream stdout = new ByteArrayOutputStream();
ByteArrayOutputStream stderr = new ByteArrayOutputStream();
FileCollection jointClasspath = _project.files(Util.findToolsJar()).plus(_gosuClasspath).plus(_projectClasspath);
// make temporary classpath jar with Class-Path attribute because jointClasspath will be way too long in some cases
File classpathJar;
try {
classpathJar = createClasspathJarFromFileCollection(jointClasspath);
} catch (IOException e) {
throw new BuildException("Error creating classpath JAR for gosudoc generation", e);
}
LOGGER.info("Created classpathJar at " + classpathJar.getAbsolutePath());
ExecResult result = _project.javaexec(javaExecSpec -> {
javaExecSpec.setWorkingDir((Object) _project.getProjectDir()); // Gradle 4.0 overloads ProcessForkOptions#setWorkingDir; must upcast to Object for backwards compatibility
setJvmArgs(javaExecSpec, _options.getForkOptions());
javaExecSpec.setMain("gw.gosudoc.cli.Gosudoc")
.setClasspath(_project.files(classpathJar))
.setArgs((Iterable<?>) gosudocArgs); // Gradle 4.0 overloads JavaExecSpec#setArgs; must upcast to Iterable<?> for backwards compatibility
javaExecSpec.setStandardOutput(stdout);
javaExecSpec.setErrorOutput(stderr);
javaExecSpec.setIgnoreExitValue(true); //otherwise fails immediately before displaying output
});
LOGGER.info(stdout.toString());
String errorContent = stderr.toString();
if(errorContent != null && !errorContent.isEmpty()) {
String regex = "^ERROR:\\s";
Pattern p = Pattern.compile(regex, Pattern.MULTILINE);
if(p.matcher(errorContent).find()) {
throw new GradleException("gosudoc failed with errors: \n" + errorContent);
} else LOGGER.warn(errorContent);
}
result.assertNormalExitValue();
}
private File createClasspathJarFromFileCollection(FileCollection classpath) throws IOException {
File tempFile;
if (LOGGER.isDebugEnabled()) {
tempFile = File.createTempFile(CommandLineGosuDoc.class.getName(), "classpath.jar", new File(_targetDir.getAbsolutePath()));
} else {
tempFile = File.createTempFile(CommandLineGosuDoc.class.getName(), "classpath.jar");
tempFile.deleteOnExit();
}
LOGGER.info("Creating classpath JAR at " + tempFile.getAbsolutePath());
Manifest man = new Manifest();
man.getMainAttributes().putValue(Attributes.Name.MANIFEST_VERSION.toString(), "1.0");
man.getMainAttributes().putValue(Attributes.Name.CLASS_PATH.toString(), convertFileCollectionToURIs(classpath));
//noinspection EmptyTryBlock
try(FileOutputStream fos = new FileOutputStream(tempFile);
JarOutputStream jarOut = new JarOutputStream(fos, man)) {
//This is a bit silly.
//The try-with-resources construct with two autoclosable resources saves us
//from having to deal with a boilerplate finally block to close the streams.
//Further, the JarOutputStream constructor with Manifest attribute does all the work we need,
//which is why the try block is intentionally empty.
}
return tempFile;
}
private String convertFileCollectionToURIs(FileCollection files) {
List<String> entries = new ArrayList<>();
//noinspection Convert2streamapi
for(File entry : files) {
LOGGER.info("Encoding " + entry.getAbsolutePath());
entries.add(entry.toURI().toString());
}
return String.join(" ", entries);
}
private void setJvmArgs( JavaExecSpec spec, ForkOptions forkOptions) {
if(forkOptions.getMemoryInitialSize() != null && !forkOptions.getMemoryInitialSize().isEmpty()) {
spec.setMinHeapSize(forkOptions.getMemoryInitialSize());
}
if(forkOptions.getMemoryMaximumSize() != null && !forkOptions.getMemoryMaximumSize().isEmpty()) {
spec.setMaxHeapSize(forkOptions.getMemoryMaximumSize());
}
List<String> args = new ArrayList<>();
//respect JAVA_OPTS, if it exists
String JAVA_OPTS = System.getenv("JAVA_OPTS");
if(JAVA_OPTS != null) {
args.add(JAVA_OPTS);
}
args.addAll(forkOptions.getJvmArgs());
if(Os.isFamily(Os.FAMILY_MAC)) {
args.add("-Xdock:name=gosudoc");
}
spec.setJvmArgs((Iterable<?>) args); // Gradle 4.0 overloads JavaForkOptions#setJvmArgs; must upcast to Iterable<?> for backwards compatibility
}
}
|
[
"\"JAVA_OPTS\""
] |
[] |
[
"JAVA_OPTS"
] |
[]
|
["JAVA_OPTS"]
|
java
| 1 | 0 | |
src/testcases/CWE191_Integer_Underflow/s01/CWE191_Integer_Underflow__int_Environment_sub_04.java
|
/* TEMPLATE GENERATED TESTCASE FILE
Filename: CWE191_Integer_Underflow__int_Environment_sub_04.java
Label Definition File: CWE191_Integer_Underflow__int.label.xml
Template File: sources-sinks-04.tmpl.java
*/
/*
* @description
* CWE: 191 Integer Underflow
* BadSource: Environment Read data from an environment variable
* GoodSource: A hardcoded non-zero, non-min, non-max, even number
* Sinks: sub
* GoodSink: Ensure there will not be an underflow before subtracting 1 from data
* BadSink : Subtract 1 from data, which can cause an Underflow
* Flow Variant: 04 Control flow: if(PRIVATE_STATIC_FINAL_TRUE) and if(PRIVATE_STATIC_FINAL_FALSE)
*
* */
package testcases.CWE191_Integer_Underflow.s01;
import testcasesupport.*;
import javax.servlet.http.*;
import java.util.logging.Level;
public class CWE191_Integer_Underflow__int_Environment_sub_04 extends AbstractTestCase
{
/* The two variables below are declared "final", so a tool should
* be able to identify that reads of these will always return their
* initialized values.
*/
private static final boolean PRIVATE_STATIC_FINAL_TRUE = true;
private static final boolean PRIVATE_STATIC_FINAL_FALSE = false;
public void bad() throws Throwable
{
int data;
if (PRIVATE_STATIC_FINAL_TRUE)
{
data = Integer.MIN_VALUE; /* Initialize data */
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
{
String stringNumber = System.getenv("ADD");
if (stringNumber != null) // avoid NPD incidental warnings
{
try
{
data = Integer.parseInt(stringNumber.trim());
}
catch(NumberFormatException exceptNumberFormat)
{
IO.logger.log(Level.WARNING, "Number format exception parsing data from string", exceptNumberFormat);
}
}
}
}
else
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run
* but ensure data is inititialized before the Sink to avoid compiler errors */
data = 0;
}
if (PRIVATE_STATIC_FINAL_TRUE)
{
/* POTENTIAL FLAW: if data == Integer.MIN_VALUE, this will overflow */
int result = (int)(data - 1);
IO.writeLine("result: " + result);
}
}
/* goodG2B1() - use goodsource and badsink by changing first PRIVATE_STATIC_FINAL_TRUE to PRIVATE_STATIC_FINAL_FALSE */
private void goodG2B1() throws Throwable
{
int data;
if (PRIVATE_STATIC_FINAL_FALSE)
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run
* but ensure data is inititialized before the Sink to avoid compiler errors */
data = 0;
}
else
{
/* FIX: Use a hardcoded number that won't cause underflow, overflow, divide by zero, or loss-of-precision issues */
data = 2;
}
if (PRIVATE_STATIC_FINAL_TRUE)
{
/* POTENTIAL FLAW: if data == Integer.MIN_VALUE, this will overflow */
int result = (int)(data - 1);
IO.writeLine("result: " + result);
}
}
/* goodG2B2() - use goodsource and badsink by reversing statements in first if */
private void goodG2B2() throws Throwable
{
int data;
if (PRIVATE_STATIC_FINAL_TRUE)
{
/* FIX: Use a hardcoded number that won't cause underflow, overflow, divide by zero, or loss-of-precision issues */
data = 2;
}
else
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run
* but ensure data is inititialized before the Sink to avoid compiler errors */
data = 0;
}
if (PRIVATE_STATIC_FINAL_TRUE)
{
/* POTENTIAL FLAW: if data == Integer.MIN_VALUE, this will overflow */
int result = (int)(data - 1);
IO.writeLine("result: " + result);
}
}
/* goodB2G1() - use badsource and goodsink by changing second PRIVATE_STATIC_FINAL_TRUE to PRIVATE_STATIC_FINAL_FALSE */
private void goodB2G1() throws Throwable
{
int data;
if (PRIVATE_STATIC_FINAL_TRUE)
{
data = Integer.MIN_VALUE; /* Initialize data */
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
{
String stringNumber = System.getenv("ADD");
if (stringNumber != null) // avoid NPD incidental warnings
{
try
{
data = Integer.parseInt(stringNumber.trim());
}
catch(NumberFormatException exceptNumberFormat)
{
IO.logger.log(Level.WARNING, "Number format exception parsing data from string", exceptNumberFormat);
}
}
}
}
else
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run
* but ensure data is inititialized before the Sink to avoid compiler errors */
data = 0;
}
if (PRIVATE_STATIC_FINAL_FALSE)
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run */
IO.writeLine("Benign, fixed string");
}
else
{
/* FIX: Add a check to prevent an overflow from occurring */
if (data > Integer.MIN_VALUE)
{
int result = (int)(data - 1);
IO.writeLine("result: " + result);
}
else
{
IO.writeLine("data value is too small to perform subtraction.");
}
}
}
/* goodB2G2() - use badsource and goodsink by reversing statements in second if */
private void goodB2G2() throws Throwable
{
int data;
if (PRIVATE_STATIC_FINAL_TRUE)
{
data = Integer.MIN_VALUE; /* Initialize data */
/* get environment variable ADD */
/* POTENTIAL FLAW: Read data from an environment variable */
{
String stringNumber = System.getenv("ADD");
if (stringNumber != null) // avoid NPD incidental warnings
{
try
{
data = Integer.parseInt(stringNumber.trim());
}
catch(NumberFormatException exceptNumberFormat)
{
IO.logger.log(Level.WARNING, "Number format exception parsing data from string", exceptNumberFormat);
}
}
}
}
else
{
/* INCIDENTAL: CWE 561 Dead Code, the code below will never run
* but ensure data is inititialized before the Sink to avoid compiler errors */
data = 0;
}
if (PRIVATE_STATIC_FINAL_TRUE)
{
/* FIX: Add a check to prevent an overflow from occurring */
if (data > Integer.MIN_VALUE)
{
int result = (int)(data - 1);
IO.writeLine("result: " + result);
}
else
{
IO.writeLine("data value is too small to perform subtraction.");
}
}
}
public void good() throws Throwable
{
goodG2B1();
goodG2B2();
goodB2G1();
goodB2G2();
}
/* Below is the main(). It is only used when building this testcase on
* its own for testing or for building a binary to use in testing binary
* analysis tools. It is not used when compiling all the testcases as one
* application, which is how source code analysis tools are tested.
*/
public static void main(String[] args) throws ClassNotFoundException,
InstantiationException, IllegalAccessException
{
mainFromParent(args);
}
}
|
[
"\"ADD\"",
"\"ADD\"",
"\"ADD\""
] |
[] |
[
"ADD"
] |
[]
|
["ADD"]
|
java
| 1 | 0 | |
ch03/ChildrenForm2.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ChildrenForm2.ui'
#
# Created by: PyQt5 UI code generator 5.11.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(400, 300)
self.textEdit = QtWidgets.QTextEdit(Form)
self.textEdit.setGeometry(QtCore.QRect(10, 10, 381, 281))
self.textEdit.setObjectName("textEdit")
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.textEdit.setHtml(_translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'PMingLiU\'; font-size:9pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">我是子窗口</p></body></html>"))
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
soracom/generated/cmd/lora_network_sets_get.go
|
// Code generated by soracom-cli generate-cmd. DO NOT EDIT.
package cmd
import (
"net/url"
"os"
"github.com/spf13/cobra"
)
// LoraNetworkSetsGetCmdNsId holds value of 'ns_id' option
var LoraNetworkSetsGetCmdNsId string
func init() {
LoraNetworkSetsGetCmd.Flags().StringVar(&LoraNetworkSetsGetCmdNsId, "ns-id", "", TRAPI("ID of the target LoRa network set."))
LoraNetworkSetsCmd.AddCommand(LoraNetworkSetsGetCmd)
}
// LoraNetworkSetsGetCmd defines 'get' subcommand
var LoraNetworkSetsGetCmd = &cobra.Command{
Use: "get",
Short: TRAPI("/lora_network_sets/{ns_id}:get:summary"),
Long: TRAPI(`/lora_network_sets/{ns_id}:get:description`),
RunE: func(cmd *cobra.Command, args []string) error {
opt := &apiClientOptions{
BasePath: "/v1",
Language: getSelectedLanguage(),
}
ac := newAPIClient(opt)
if v := os.Getenv("SORACOM_VERBOSE"); v != "" {
ac.SetVerbose(true)
}
err := authHelper(ac, cmd, args)
if err != nil {
cmd.SilenceUsage = true
return err
}
param, err := collectLoraNetworkSetsGetCmdParams(ac)
if err != nil {
return err
}
body, err := ac.callAPI(param)
if err != nil {
cmd.SilenceUsage = true
return err
}
if body == "" {
return nil
}
if rawOutput {
_, err = os.Stdout.Write([]byte(body))
} else {
return prettyPrintStringAsJSON(body)
}
return err
},
}
func collectLoraNetworkSetsGetCmdParams(ac *apiClient) (*apiParams, error) {
var parsedBody interface{}
var err error
err = checkIfRequiredStringParameterIsSupplied("ns_id", "ns-id", "path", parsedBody, LoraNetworkSetsGetCmdNsId)
if err != nil {
return nil, err
}
return &apiParams{
method: "GET",
path: buildPathForLoraNetworkSetsGetCmd("/lora_network_sets/{ns_id}"),
query: buildQueryForLoraNetworkSetsGetCmd(),
noRetryOnError: noRetryOnError,
}, nil
}
func buildPathForLoraNetworkSetsGetCmd(path string) string {
escapedNsId := url.PathEscape(LoraNetworkSetsGetCmdNsId)
path = strReplace(path, "{"+"ns_id"+"}", escapedNsId, -1)
return path
}
func buildQueryForLoraNetworkSetsGetCmd() url.Values {
result := url.Values{}
return result
}
|
[
"\"SORACOM_VERBOSE\""
] |
[] |
[
"SORACOM_VERBOSE"
] |
[]
|
["SORACOM_VERBOSE"]
|
go
| 1 | 0 | |
build/tools/roomservice.py
|
#!/usr/bin/env python
# Copyright (C) 2012-2013, The CyanogenMod Project
# (C) 2017, The LineageOS Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import base64
import json
import netrc
import os
import re
import sys
try:
# For python3
import urllib.error
import urllib.parse
import urllib.request
except ImportError:
# For python2
import imp
import urllib2
import urlparse
urllib = imp.new_module('urllib')
urllib.error = urllib2
urllib.parse = urlparse
urllib.request = urllib2
from xml.etree import ElementTree
product = sys.argv[1]
if len(sys.argv) > 2:
depsonly = sys.argv[2]
else:
depsonly = None
try:
device = product[product.index("_") + 1:]
except:
device = product
if not depsonly:
print("Device %s not found. Attempting to retrieve device repository from LineageOS Github (http://github.com/LineageOS)." % device)
repositories = []
try:
authtuple = netrc.netrc().authenticators("api.github.com")
if authtuple:
auth_string = ('%s:%s' % (authtuple[0], authtuple[2])).encode()
githubauth = base64.encodestring(auth_string).decode().replace('\n', '')
else:
githubauth = None
except:
githubauth = None
def add_auth(githubreq):
if githubauth:
githubreq.add_header("Authorization","Basic %s" % githubauth)
if not depsonly:
githubreq = urllib.request.Request("https://api.github.com/search/repositories?q=%s+user:LineageOS+in:name+fork:true" % device)
add_auth(githubreq)
try:
result = json.loads(urllib.request.urlopen(githubreq).read().decode())
except urllib.error.URLError:
print("Failed to search GitHub")
sys.exit()
except ValueError:
print("Failed to parse return data from GitHub")
sys.exit()
for res in result.get('items', []):
repositories.append(res)
local_manifests = r'.repo/local_manifests'
if not os.path.exists(local_manifests): os.makedirs(local_manifests)
def exists_in_tree(lm, path):
for child in lm.getchildren():
if child.attrib['path'] == path:
return True
return False
# in-place prettyprint formatter
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def get_default_revision():
m = ElementTree.parse(".repo/manifest.xml")
d = m.findall('default')[0]
r = d.get('revision')
return r.replace('refs/heads/', '').replace('refs/tags/', '')
def get_from_manifest(devicename):
try:
lm = ElementTree.parse(".repo/local_manifests/roomservice.xml")
lm = lm.getroot()
except:
lm = ElementTree.Element("manifest")
for localpath in lm.findall("project"):
if re.search("android_device_.*_%s$" % device, localpath.get("name")):
return localpath.get("path")
return None
def is_in_manifest(projectpath):
try:
lm = ElementTree.parse(".repo/local_manifests/roomservice.xml")
lm = lm.getroot()
except:
lm = ElementTree.Element("manifest")
for localpath in lm.findall("project"):
if localpath.get("path") == projectpath:
return True
# Search in main manifest, too
try:
lm = ElementTree.parse(".repo/manifest.xml")
lm = lm.getroot()
except:
lm = ElementTree.Element("manifest")
for localpath in lm.findall("project"):
if localpath.get("path") == projectpath:
return True
# ... and don't forget the lineage snippet
try:
lm = ElementTree.parse(".repo/manifests/snippets/lineage.xml")
lm = lm.getroot()
except:
lm = ElementTree.Element("manifest")
for localpath in lm.findall("project"):
if localpath.get("path") == projectpath:
return True
return False
def add_to_manifest(repositories, fallback_branch = None):
try:
lm = ElementTree.parse(".repo/local_manifests/roomservice.xml")
lm = lm.getroot()
except:
lm = ElementTree.Element("manifest")
for repository in repositories:
repo_name = repository['repository']
repo_target = repository['target_path']
print('Checking if %s is fetched from %s' % (repo_target, repo_name))
if is_in_manifest(repo_target):
print('LineageOS/%s already fetched to %s' % (repo_name, repo_target))
continue
print('Adding dependency: LineageOS/%s -> %s' % (repo_name, repo_target))
project = ElementTree.Element("project", attrib = { "path": repo_target,
"remote": "github", "name": "LineageOS/%s" % repo_name })
if 'branch' in repository:
project.set('revision',repository['branch'])
elif fallback_branch:
print("Using fallback branch %s for %s" % (fallback_branch, repo_name))
project.set('revision', fallback_branch)
else:
print("Using default branch for %s" % repo_name)
lm.append(project)
indent(lm, 0)
raw_xml = ElementTree.tostring(lm).decode()
raw_xml = '<?xml version="1.0" encoding="UTF-8"?>\n' + raw_xml
f = open('.repo/local_manifests/roomservice.xml', 'w')
f.write(raw_xml)
f.close()
def fetch_dependencies(repo_path, fallback_branch = None):
print('Looking for dependencies in %s' % repo_path)
dependencies_path = repo_path + '/lineage.dependencies'
syncable_repos = []
verify_repos = []
if os.path.exists(dependencies_path):
dependencies_file = open(dependencies_path, 'r')
dependencies = json.loads(dependencies_file.read())
fetch_list = []
for dependency in dependencies:
if not is_in_manifest(dependency['target_path']):
fetch_list.append(dependency)
syncable_repos.append(dependency['target_path'])
verify_repos.append(dependency['target_path'])
elif re.search("android_device_.*_.*$", dependency['repository']):
verify_repos.append(dependency['target_path'])
dependencies_file.close()
if len(fetch_list) > 0:
print('Adding dependencies to manifest')
add_to_manifest(fetch_list, fallback_branch)
else:
print('%s has no additional dependencies.' % repo_path)
if len(syncable_repos) > 0:
print('Syncing dependencies')
os.system('repo sync --force-sync %s' % ' '.join(syncable_repos))
for deprepo in verify_repos:
fetch_dependencies(deprepo)
def has_branch(branches, revision):
return revision in [branch['name'] for branch in branches]
if depsonly:
repo_path = get_from_manifest(device)
if repo_path:
fetch_dependencies(repo_path)
else:
print("Trying dependencies-only mode on a non-existing device tree?")
sys.exit()
else:
for repository in repositories:
repo_name = repository['name']
if re.match(r"^android_device_[^_]*_" + device + "$", repo_name):
print("Found repository: %s" % repository['name'])
manufacturer = repo_name.replace("android_device_", "").replace("_" + device, "")
default_revision = get_default_revision()
print("Default revision: %s" % default_revision)
print("Checking branch info")
githubreq = urllib.request.Request(repository['branches_url'].replace('{/branch}', ''))
add_auth(githubreq)
result = json.loads(urllib.request.urlopen(githubreq).read().decode())
## Try tags, too, since that's what releases use
if not has_branch(result, default_revision):
githubreq = urllib.request.Request(repository['tags_url'].replace('{/tag}', ''))
add_auth(githubreq)
result.extend (json.loads(urllib.request.urlopen(githubreq).read().decode()))
repo_path = "device/%s/%s" % (manufacturer, device)
adding = {'repository':repo_name,'target_path':repo_path}
fallback_branch = None
if not has_branch(result, default_revision):
if os.getenv('ROOMSERVICE_BRANCHES'):
fallbacks = list(filter(bool, os.getenv('ROOMSERVICE_BRANCHES').split(' ')))
for fallback in fallbacks:
if has_branch(result, fallback):
print("Using fallback branch: %s" % fallback)
fallback_branch = fallback
break
if not fallback_branch:
print("Default revision %s not found in %s. Bailing." % (default_revision, repo_name))
print("Branches found:")
for branch in [branch['name'] for branch in result]:
print(branch)
print("Use the ROOMSERVICE_BRANCHES environment variable to specify a list of fallback branches.")
sys.exit()
add_to_manifest([adding], fallback_branch)
print("Syncing repository to retrieve project.")
os.system('repo sync --force-sync %s' % repo_path)
print("Repository synced!")
fetch_dependencies(repo_path, fallback_branch)
print("Done")
sys.exit()
print("Repository for %s not found in the LineageOS Github repository list. If this is in error, you may need to manually add it to your local_manifests/roomservice.xml." % device)
|
[] |
[] |
[
"ROOMSERVICE_BRANCHES"
] |
[]
|
["ROOMSERVICE_BRANCHES"]
|
python
| 1 | 0 | |
certbot-dns-luadns/setup.py
|
from distutils.version import LooseVersion
import os
import sys
from setuptools import __version__ as setuptools_version
from setuptools import find_packages
from setuptools import setup
version = '1.11.0.dev0'
# Remember to update local-oldest-requirements.txt when changing the minimum
# acme/certbot version.
install_requires = [
'dns-lexicon>=2.2.1', # Support for >1 TXT record per name
'setuptools',
'zope.interface',
]
if not os.environ.get('SNAP_BUILD'):
install_requires.extend([
'acme>=0.31.0',
'certbot>=1.1.0',
])
elif 'bdist_wheel' in sys.argv[1:]:
raise RuntimeError('Unset SNAP_BUILD when building wheels '
'to include certbot dependencies.')
if os.environ.get('SNAP_BUILD'):
install_requires.append('packaging')
setuptools_known_environment_markers = (LooseVersion(setuptools_version) >= LooseVersion('36.2'))
if setuptools_known_environment_markers:
install_requires.append('mock ; python_version < "3.3"')
elif 'bdist_wheel' in sys.argv[1:]:
raise RuntimeError('Error, you are trying to build certbot wheels using an old version '
'of setuptools. Version 36.2+ of setuptools is required.')
elif sys.version_info < (3,3):
install_requires.append('mock')
docs_extras = [
'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags
'sphinx_rtd_theme',
]
setup(
name='certbot-dns-luadns',
version=version,
description="LuaDNS Authenticator plugin for Certbot",
url='https://github.com/certbot/certbot',
author="Certbot Project",
author_email='[email protected]',
license='Apache License 2.0',
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Plugins',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Security',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Networking',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
extras_require={
'docs': docs_extras,
},
entry_points={
'certbot.plugins': [
'dns-luadns = certbot_dns_luadns._internal.dns_luadns:Authenticator',
],
},
)
|
[] |
[] |
[
"SNAP_BUILD"
] |
[]
|
["SNAP_BUILD"]
|
python
| 1 | 0 | |
command_test.go
|
package webdriver
import (
"fmt"
"os"
"os/exec"
"strings"
"testing"
"time"
)
// fakeExecCommand is a replacement for `exec.Command` that we can control
// using the TestHelperProcess function.
//
// For more information, see:
// * https://npf.io/2015/06/testing-exec-command/
// * https://golang.org/src/os/exec/exec_test.go
func fakeExecCommand(command string, args ...string) *exec.Cmd {
// Use `go test` to run the `TestHelperProcess` test with our arguments.
cs := []string{"-test.run=TestHelperProcess", "--", command}
cs = append(cs, args...)
cmd := exec.Command(os.Args[0], cs...)
cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
return cmd
}
func TestHelperProcess(t *testing.T) {
// If this function (which masquerades as a test) is run on its own, then
// just return quietly.
if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
return
}
args := os.Args
for len(args) > 0 {
if args[0] == "--" {
args = args[1:]
break
}
args = args[1:]
}
if len(args) == 0 {
fmt.Fprintf(os.Stderr, "No command\n")
os.Exit(2)
}
cmd, args := args[0], args[1:]
switch cmd {
case "echo":
fmt.Printf("%s\n", strings.Join(args, " "))
os.Exit(0)
case "Xvfb":
// Print out the X11 screen of "1".
screenNumber := "1"
file := os.NewFile(uintptr(3), "pipe")
_, err := file.Write([]byte(screenNumber + "\n"))
if err != nil {
panic(err)
}
time.Sleep(time.Second * 3)
file.Close()
os.Exit(0)
case "xauth":
os.Exit(0)
}
fmt.Fprintf(os.Stderr, "%s: command not found\n", cmd)
os.Exit(127)
}
func TestFakeExecCommand(t *testing.T) {
cmd := fakeExecCommand("echo", "hello", "world")
outputBytes, err := cmd.Output()
if err != nil {
t.Fatalf("Could not get output: %s", err.Error())
}
outputString := string(outputBytes)
if outputString != "hello world\n" {
t.Fatalf("outputString = %s, want = %s", outputString, "hello world\n")
}
}
|
[
"\"GO_WANT_HELPER_PROCESS\""
] |
[] |
[
"GO_WANT_HELPER_PROCESS"
] |
[]
|
["GO_WANT_HELPER_PROCESS"]
|
go
| 1 | 0 | |
vendor/github.com/openshift/installer/pkg/asset/installconfig/ibmcloud/client.go
|
package ibmcloud
import (
"context"
"fmt"
"net/http"
"os"
"time"
"github.com/IBM/go-sdk-core/v5/core"
"github.com/IBM/networking-go-sdk/dnsrecordsv1"
"github.com/IBM/networking-go-sdk/zonesv1"
"github.com/IBM/platform-services-go-sdk/iamidentityv1"
"github.com/IBM/platform-services-go-sdk/resourcecontrollerv2"
"github.com/IBM/platform-services-go-sdk/resourcemanagerv2"
"github.com/IBM/vpc-go-sdk/vpcv1"
"github.com/pkg/errors"
)
//go:generate mockgen -source=./client.go -destination=./mock/ibmcloudclient_generated.go -package=mock
// API represents the calls made to the API.
type API interface {
GetAuthenticatorAPIKeyDetails(ctx context.Context) (*iamidentityv1.APIKey, error)
GetCISInstance(ctx context.Context, crnstr string) (*resourcecontrollerv2.ResourceInstance, error)
GetDNSRecordsByName(ctx context.Context, crnstr string, zoneID string, recordName string) ([]dnsrecordsv1.DnsrecordDetails, error)
GetDNSZoneIDByName(ctx context.Context, name string) (string, error)
GetDNSZones(ctx context.Context) ([]DNSZoneResponse, error)
GetEncryptionKey(ctx context.Context, keyCRN string) (*EncryptionKeyResponse, error)
GetResourceGroups(ctx context.Context) ([]resourcemanagerv2.ResourceGroup, error)
GetResourceGroup(ctx context.Context, nameOrID string) (*resourcemanagerv2.ResourceGroup, error)
GetSubnet(ctx context.Context, subnetID string) (*vpcv1.Subnet, error)
GetVSIProfiles(ctx context.Context) ([]vpcv1.InstanceProfile, error)
GetVPC(ctx context.Context, vpcID string) (*vpcv1.VPC, error)
GetVPCZonesForRegion(ctx context.Context, region string) ([]string, error)
}
// Client makes calls to the IBM Cloud API.
type Client struct {
managementAPI *resourcemanagerv2.ResourceManagerV2
controllerAPI *resourcecontrollerv2.ResourceControllerV2
vpcAPI *vpcv1.VpcV1
Authenticator *core.IamAuthenticator
}
// cisServiceID is the Cloud Internet Services' catalog service ID.
const cisServiceID = "75874a60-cb12-11e7-948e-37ac098eb1b9"
// VPCResourceNotFoundError represents an error for a VPC resoruce that is not found.
type VPCResourceNotFoundError struct{}
// Error returns the error message for the VPCResourceNotFoundError error type.
func (e *VPCResourceNotFoundError) Error() string {
return "Not Found"
}
// DNSZoneResponse represents a DNS zone response.
type DNSZoneResponse struct {
// Name is the domain name of the zone.
Name string
// ID is the zone's ID.
ID string
// CISInstanceCRN is the IBM Cloud Resource Name for the CIS instance where
// the DNS zone is managed.
CISInstanceCRN string
// CISInstanceName is the display name of the CIS instance where the DNS zone
// is managed.
CISInstanceName string
// ResourceGroupID is the resource group ID of the CIS instance.
ResourceGroupID string
}
// EncryptionKeyResponse represents an encryption key response.
type EncryptionKeyResponse struct{}
// NewClient initializes a client with a session.
func NewClient() (*Client, error) {
apiKey := os.Getenv("IC_API_KEY")
authenticator := &core.IamAuthenticator{
ApiKey: apiKey,
}
client := &Client{
Authenticator: authenticator,
}
if err := client.loadSDKServices(); err != nil {
return nil, errors.Wrap(err, "failed to load IBM SDK services")
}
return client, nil
}
func (c *Client) loadSDKServices() error {
servicesToLoad := []func() error{
c.loadResourceManagementAPI,
c.loadResourceControllerAPI,
c.loadVPCV1API,
}
// Call all the load functions.
for _, fn := range servicesToLoad {
if err := fn(); err != nil {
return err
}
}
return nil
}
// GetAuthenticatorAPIKeyDetails gets detailed information on the API key used
// for authentication to the IBM Cloud APIs
func (c *Client) GetAuthenticatorAPIKeyDetails(ctx context.Context) (*iamidentityv1.APIKey, error) {
iamIdentityService, err := iamidentityv1.NewIamIdentityV1(&iamidentityv1.IamIdentityV1Options{
Authenticator: c.Authenticator,
})
if err != nil {
return nil, err
}
options := iamIdentityService.NewGetAPIKeysDetailsOptions()
options.SetIamAPIKey(c.Authenticator.ApiKey)
details, _, err := iamIdentityService.GetAPIKeysDetailsWithContext(ctx, options)
if err != nil {
return nil, err
}
return details, nil
}
// GetCISInstance gets a specific Cloud Internet Services instance by its CRN.
func (c *Client) GetCISInstance(ctx context.Context, crnstr string) (*resourcecontrollerv2.ResourceInstance, error) {
_, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
options := c.controllerAPI.NewGetResourceInstanceOptions(crnstr)
resourceInstance, _, err := c.controllerAPI.GetResourceInstance(options)
if err != nil {
return nil, errors.Wrap(err, "failed to get cis instances")
}
return resourceInstance, nil
}
// GetDNSRecordsByName gets DNS records in specific Cloud Internet Services instance
// by its CRN, zone ID, and DNS record name.
func (c *Client) GetDNSRecordsByName(ctx context.Context, crnstr string, zoneID string, recordName string) ([]dnsrecordsv1.DnsrecordDetails, error) {
// Set CIS DNS record service
dnsService, err := dnsrecordsv1.NewDnsRecordsV1(&dnsrecordsv1.DnsRecordsV1Options{
Authenticator: c.Authenticator,
Crn: core.StringPtr(crnstr),
ZoneIdentifier: core.StringPtr(zoneID),
})
if err != nil {
return nil, err
}
// Get CIS DNS records by name
records, _, err := dnsService.ListAllDnsRecordsWithContext(ctx, &dnsrecordsv1.ListAllDnsRecordsOptions{
Name: core.StringPtr(recordName),
})
if err != nil {
return nil, errors.Wrap(err, "could not retrieve DNS records")
}
return records.Result, nil
}
// GetDNSZoneIDByName gets the CIS zone ID from its domain name.
func (c *Client) GetDNSZoneIDByName(ctx context.Context, name string) (string, error) {
zones, err := c.GetDNSZones(ctx)
if err != nil {
return "", err
}
for _, z := range zones {
if z.Name == name {
return z.ID, nil
}
}
return "", fmt.Errorf("DNS zone %q not found", name)
}
// GetDNSZones returns all of the active DNS zones managed by CIS.
func (c *Client) GetDNSZones(ctx context.Context) ([]DNSZoneResponse, error) {
_, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
options := c.controllerAPI.NewListResourceInstancesOptions()
options.SetResourceID(cisServiceID)
listResourceInstancesResponse, _, err := c.controllerAPI.ListResourceInstances(options)
if err != nil {
return nil, errors.Wrap(err, "failed to get cis instance")
}
var allZones []DNSZoneResponse
for _, instance := range listResourceInstancesResponse.Resources {
crnstr := instance.CRN
zonesService, err := zonesv1.NewZonesV1(&zonesv1.ZonesV1Options{
Authenticator: c.Authenticator,
Crn: crnstr,
})
if err != nil {
return nil, errors.Wrap(err, "failed to list DNS zones")
}
options := zonesService.NewListZonesOptions()
listZonesResponse, _, err := zonesService.ListZones(options)
if listZonesResponse == nil {
return nil, err
}
for _, zone := range listZonesResponse.Result {
if *zone.Status == "active" {
zoneStruct := DNSZoneResponse{
Name: *zone.Name,
ID: *zone.ID,
CISInstanceCRN: *instance.CRN,
CISInstanceName: *instance.Name,
ResourceGroupID: *instance.ResourceGroupID,
}
allZones = append(allZones, zoneStruct)
}
}
}
return allZones, nil
}
// GetEncryptionKey gets data for an encryption key
func (c *Client) GetEncryptionKey(ctx context.Context, keyCRN string) (*EncryptionKeyResponse, error) {
// TODO: IBM: Call KMS / Hyperprotect Crpyto APIs.
return &EncryptionKeyResponse{}, nil
}
// GetResourceGroup gets a resource group by its name or ID.
func (c *Client) GetResourceGroup(ctx context.Context, nameOrID string) (*resourcemanagerv2.ResourceGroup, error) {
_, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
groups, err := c.GetResourceGroups(ctx)
if err != nil {
return nil, err
}
for idx, rg := range groups {
if *rg.ID == nameOrID || *rg.Name == nameOrID {
return &groups[idx], nil
}
}
return nil, fmt.Errorf("resource group %q not found", nameOrID)
}
// GetResourceGroups gets the list of resource groups.
func (c *Client) GetResourceGroups(ctx context.Context) ([]resourcemanagerv2.ResourceGroup, error) {
_, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
apikey, err := c.GetAuthenticatorAPIKeyDetails(ctx)
if err != nil {
return nil, err
}
options := c.managementAPI.NewListResourceGroupsOptions()
options.SetAccountID(*apikey.AccountID)
listResourceGroupsResponse, _, err := c.managementAPI.ListResourceGroupsWithContext(ctx, options)
if err != nil {
return nil, err
}
return listResourceGroupsResponse.Resources, nil
}
// GetSubnet gets a subnet by its ID.
func (c *Client) GetSubnet(ctx context.Context, subnetID string) (*vpcv1.Subnet, error) {
_, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
subnet, detailedResponse, err := c.vpcAPI.GetSubnet(&vpcv1.GetSubnetOptions{ID: &subnetID})
if detailedResponse.GetStatusCode() == http.StatusNotFound {
return nil, &VPCResourceNotFoundError{}
}
return subnet, err
}
// GetVSIProfiles gets a list of all VSI profiles.
func (c *Client) GetVSIProfiles(ctx context.Context) ([]vpcv1.InstanceProfile, error) {
listInstanceProfilesOptions := c.vpcAPI.NewListInstanceProfilesOptions()
profiles, _, err := c.vpcAPI.ListInstanceProfilesWithContext(ctx, listInstanceProfilesOptions)
if err != nil {
return nil, errors.Wrap(err, "failed to list vpc vsi profiles")
}
return profiles.Profiles, nil
}
// GetVPC gets a VPC by its ID.
func (c *Client) GetVPC(ctx context.Context, vpcID string) (*vpcv1.VPC, error) {
_, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
regions, err := c.getVPCRegions(ctx)
if err != nil {
return nil, err
}
for _, region := range regions {
err := c.vpcAPI.SetServiceURL(fmt.Sprintf("%s/v1", *region.Endpoint))
if err != nil {
return nil, errors.Wrap(err, "failed to set vpc api service url")
}
if vpc, detailedResponse, err := c.vpcAPI.GetVPC(c.vpcAPI.NewGetVPCOptions(vpcID)); err != nil {
if detailedResponse.GetStatusCode() != http.StatusNotFound {
return nil, err
}
} else if vpc != nil {
return vpc, nil
}
}
return nil, &VPCResourceNotFoundError{}
}
// GetVPCZonesForRegion gets the supported zones for a VPC region.
func (c *Client) GetVPCZonesForRegion(ctx context.Context, region string) ([]string, error) {
_, cancel := context.WithTimeout(ctx, 1*time.Minute)
defer cancel()
regionZonesOptions := c.vpcAPI.NewListRegionZonesOptions(region)
zones, _, err := c.vpcAPI.ListRegionZonesWithContext(ctx, regionZonesOptions)
if err != nil {
return nil, err
}
response := make([]string, len(zones.Zones))
for idx, zone := range zones.Zones {
response[idx] = *zone.Name
}
return response, err
}
func (c *Client) getVPCRegions(ctx context.Context) ([]vpcv1.Region, error) {
listRegionsOptions := c.vpcAPI.NewListRegionsOptions()
listRegionsResponse, _, err := c.vpcAPI.ListRegionsWithContext(ctx, listRegionsOptions)
if err != nil {
return nil, errors.Wrap(err, "failed to list vpc regions")
}
return listRegionsResponse.Regions, nil
}
func (c *Client) loadResourceManagementAPI() error {
options := &resourcemanagerv2.ResourceManagerV2Options{
Authenticator: c.Authenticator,
}
resourceManagerV2Service, err := resourcemanagerv2.NewResourceManagerV2(options)
if err != nil {
return err
}
c.managementAPI = resourceManagerV2Service
return nil
}
func (c *Client) loadResourceControllerAPI() error {
options := &resourcecontrollerv2.ResourceControllerV2Options{
Authenticator: c.Authenticator,
}
resourceControllerV2Service, err := resourcecontrollerv2.NewResourceControllerV2(options)
if err != nil {
return err
}
c.controllerAPI = resourceControllerV2Service
return nil
}
func (c *Client) loadVPCV1API() error {
vpcService, err := vpcv1.NewVpcV1(&vpcv1.VpcV1Options{
Authenticator: c.Authenticator,
})
if err != nil {
return err
}
c.vpcAPI = vpcService
return nil
}
|
[
"\"IC_API_KEY\""
] |
[] |
[
"IC_API_KEY"
] |
[]
|
["IC_API_KEY"]
|
go
| 1 | 0 | |
cmd/serve/main.go
|
package main
import (
"flag"
"os"
"github.com/daison12006013/lucid/internal/kernel"
"github.com/daison12006013/lucid/pkg/env"
)
func init() {
env.LoadEnv()
var host string
var port string
flag.StringVar(&host, "host", os.Getenv("HOST"), "Host to use")
flag.StringVar(&port, "port", os.Getenv("PORT"), "Port to use")
flag.Parse()
if len(host) > 0 {
os.Setenv("HOST", host)
}
if len(port) > 0 {
os.Setenv("PORT", port)
}
}
func main() {
kernel.
Init().
Run().
WithGracefulShutdown()
}
|
[
"\"HOST\"",
"\"PORT\""
] |
[] |
[
"PORT",
"HOST"
] |
[]
|
["PORT", "HOST"]
|
go
| 2 | 0 | |
setup.py
|
# Copyright 2022 MosaicML Composer authors
# SPDX-License-Identifier: Apache-2.0
"""Composer package setup."""
import os
import site
import sys
import textwrap
import setuptools
from setuptools import setup
from setuptools.command.develop import develop as develop_orig
_IS_ROOT = os.getuid() == 0
_IS_USER = '--user' in sys.argv[1:]
_IS_VIRTUALENV = 'VIRTUAL_ENV' in os.environ
# From https://stackoverflow.com/questions/51292333/how-to-tell-from-setup-py-if-the-module-is-being-installed-in-editable-mode
class develop(develop_orig):
"""Override the ``develop`` class to error if attempting an editable install as root."""
def run(self):
if _IS_ROOT and (not _IS_VIRTUALENV) and (not _IS_USER):
raise RuntimeError(
textwrap.dedent("""\
When installing in editable mode as root outside of a virtual environment,
please specify `--user`. Editable installs as the root user outside of a virtual environment
do not work without the `--user` flag. Please instead run something like: `pip install --user -e .`"""
))
super().run()
# From https://github.com/pypa/pip/issues/7953#issuecomment-645133255
site.ENABLE_USER_SITE = _IS_USER
def package_files(prefix: str, directory: str, extension: str):
"""Get all the files to package."""
# from https://stackoverflow.com/a/36693250
paths = []
for (path, _, filenames) in os.walk(os.path.join(prefix, directory)):
for filename in filenames:
if filename.endswith(extension):
paths.append(os.path.relpath(os.path.join(path, filename), prefix))
return paths
with open('README.md', 'r', encoding='utf-8') as fh:
long_description = fh.read()
# Hide the content between <!-- SETUPTOOLS_LONG_DESCRIPTION_HIDE_BEGIN --> and
# <!-- SETUPTOOLS_LONG_DESCRIPTION_HIDE_END --> tags in the README
while True:
start_tag = '<!-- SETUPTOOLS_LONG_DESCRIPTION_HIDE_BEGIN -->'
end_tag = '<!-- SETUPTOOLS_LONG_DESCRIPTION_HIDE_END -->'
start = long_description.find(start_tag)
end = long_description.find(end_tag)
if start == -1:
assert end == -1, 'there should be a balanced number of start and ends'
break
else:
assert end != -1, 'there should be a balanced number of start and ends'
long_description = long_description[:start] + long_description[end + len(end_tag):]
install_requires = [
'pyyaml>=6.0,<7',
'tqdm>=4.62.3,<5',
'torchmetrics>=0.7.0,<0.8',
'torch_optimizer>=0.1.0,<0.2',
'torchvision>=0.10.0', # torchvision has strict pytorch requirements
'torch>=1.9,<2',
'yahp>=0.1.1,<0.2',
'requests>=2.26.0,<3',
'numpy>=1.21.5,<2',
'psutil>=5.8.0,<6',
'coolname>=1.1.0,<2',
'py-cpuinfo>=8.0.0',
]
extra_deps = {}
extra_deps['base'] = []
extra_deps['dev'] = [
# Imports for docs builds and running tests
# Pinning versions strictly to avoid random test failures.
# Should manually update dependency versions occassionally.
'custom_inherit==2.3.2',
'junitparser==2.4.3',
'coverage[toml]==6.3.2',
'fasteners==0.17.3', # object store tests require fasteners
'pytest==7.1.0',
'toml==0.10.2',
'ipython==7.32.0',
'ipykernel==6.9.2',
'jupyter==1.0.0',
'yamllint==1.26.3',
'pytest-timeout==2.1.0',
'recommonmark==0.7.1',
'sphinx==4.4.0',
'pre-commit>=2.18.1,<3',
# embedding md in rst require docutils>=0.17. See
# https://myst-parser.readthedocs.io/en/latest/sphinx/use.html?highlight=parser#include-markdown-files-into-an-rst-file
'docutils==0.17.1',
'sphinx_markdown_tables==0.0.15',
'sphinx-argparse==0.3.1',
'sphinxcontrib.katex==0.8.6',
'sphinxext.opengraph==0.6.1',
'sphinxemoji==0.2.0',
'furo==2022.3.4',
'sphinx-copybutton==0.5.0',
'tabulate==0.8.9', # for auto-generating tables
'testbook==0.4.2',
'myst-parser==0.16.1',
'sphinx_panels==0.6.0',
'sphinxcontrib-images==0.9.4',
'pytest_codeblocks==0.15.0',
'traitlets==5.1.1', # required by testbook. Version 5.2.2 has an import bug, so pinning to 5.1.1, which worked previously.
'nbsphinx==0.8.8',
'pandoc==2.2',
'pypandoc==1.8.1',
'GitPython==3.1.27',
'moto[s3]>=3.1.12,<3.2',
]
extra_deps['deepspeed'] = [
'deepspeed==0.5.10', # TODO should this be >=0.5.10,<0.6
]
extra_deps['wandb'] = [
'wandb>=0.12.17,<0.13',
]
extra_deps['unet'] = [
'monai>=0.8.0,<0.9',
'scikit-learn>=1.0.1,<2',
]
extra_deps['vit'] = [
'vit_pytorch==0.27',
]
extra_deps['timm'] = [
'timm>=0.5.4,<0.6',
]
extra_deps['coco'] = [
'pycocotools>=2.0.4,<3',
]
extra_deps['nlp'] = [
'transformers>=4.11,<5',
'datasets>=1.14,<2',
]
extra_deps['mlperf'] = [
# TODO: use pip when available: https://github.com/mlcommons/logging/issues/218
# "mlperf_logging @ git+https://github.com/mlperf/logging.git",
'py-cpuinfo>=8.0.0,<9',
]
extra_deps['streaming'] = [
'boto3>=1.21.45,<2',
'paramiko>=2.11.0,<3',
]
extra_deps['libcloud'] = [
'apache-libcloud>=3.3.1,<4',
]
extra_deps['onnx'] = [
'onnx>=1.11.0,<2',
'onnxruntime>=1.11.0,<2',
]
extra_deps['all'] = set(dep for deps in extra_deps.values() for dep in deps)
composer_data_files = ['py.typed']
composer_data_files += package_files('composer', 'yamls', '.yaml')
composer_data_files += package_files('composer', 'algorithms', '.json')
package_name = os.environ.get('COMPOSER_PACKAGE_NAME', 'mosaicml')
if package_name != 'mosaicml':
print(f'`Building composer as `{package_name}`)', file=sys.stderr)
setup(name=package_name,
version='0.7.0',
author='MosaicML',
author_email='[email protected]',
description='Composer provides well-engineered implementations of efficient training methods to give '
'the tools that help you train a better model for cheaper.',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/mosaicml/composer',
include_package_data=True,
package_data={
'composer': composer_data_files,
},
packages=setuptools.find_packages(exclude=['docker*', 'examples*', 'scripts*', 'tests*']),
classifiers=[
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
install_requires=install_requires,
entry_points={
'console_scripts':
['composer = composer.cli.launcher:main', 'composer_collect_env = composer.utils.collect_env:main'],
},
extras_require=extra_deps,
dependency_links=['https://developer.download.nvidia.com/compute/redist'],
python_requires='>=3.7',
ext_package='composer',
cmdclass={'develop': develop})
# only visible if user installs with verbose -v flag
# Printing to stdout as not to interfere with setup.py CLI flags (e.g. --version)
print('*' * 20, file=sys.stderr)
print(textwrap.dedent("""\
NOTE: For best performance, we recommend installing Pillow-SIMD
for accelerated image processing operations. To install:
\t pip uninstall pillow && pip install pillow-simd"""),
file=sys.stderr)
print('*' * 20, file=sys.stderr)
|
[] |
[] |
[
"COMPOSER_PACKAGE_NAME"
] |
[]
|
["COMPOSER_PACKAGE_NAME"]
|
python
| 1 | 0 | |
cinder/openstack/common/config/generator.py
|
# Copyright 2012 SINA Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Extracts OpenStack config option info from module(s)."""
from __future__ import print_function
import imp
import os
import re
import socket
import sys
import textwrap
from oslo.config import cfg
import six
from cinder.openstack.common import gettextutils
from cinder.openstack.common import importutils
gettextutils.install('cinder')
STROPT = "StrOpt"
BOOLOPT = "BoolOpt"
INTOPT = "IntOpt"
FLOATOPT = "FloatOpt"
LISTOPT = "ListOpt"
MULTISTROPT = "MultiStrOpt"
OPT_TYPES = {
STROPT: 'string value',
BOOLOPT: 'boolean value',
INTOPT: 'integer value',
FLOATOPT: 'floating point value',
LISTOPT: 'list value',
MULTISTROPT: 'multi valued',
}
OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
FLOATOPT, LISTOPT,
MULTISTROPT]))
PY_EXT = ".py"
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
"../../../../"))
WORDWRAP_WIDTH = 60
def generate(srcfiles):
mods_by_pkg = dict()
for filepath in srcfiles:
pkg_name = filepath.split(os.sep)[1]
mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
os.path.basename(filepath).split('.')[0]])
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
# NOTE(lzyeval): place top level modules before packages
pkg_names = filter(lambda x: x.endswith(PY_EXT), mods_by_pkg.keys())
pkg_names.sort()
ext_names = filter(lambda x: x not in pkg_names, mods_by_pkg.keys())
ext_names.sort()
pkg_names.extend(ext_names)
# opts_by_group is a mapping of group name to an options list
# The options list is a list of (module, options) tuples
opts_by_group = {'DEFAULT': []}
extra_modules = os.getenv("OSLO_CONFIG_GENERATOR_EXTRA_MODULES", "")
if extra_modules:
for module_name in extra_modules.split(','):
module_name = module_name.strip()
module = _import_module(module_name)
if module:
for group, opts in _list_opts(module):
opts_by_group.setdefault(group, []).append((module_name,
opts))
for pkg_name in pkg_names:
mods = mods_by_pkg.get(pkg_name)
mods.sort()
for mod_str in mods:
if mod_str.endswith('.__init__'):
mod_str = mod_str[:mod_str.rfind(".")]
mod_obj = _import_module(mod_str)
if not mod_obj:
raise RuntimeError("Unable to import module %s" % mod_str)
for group, opts in _list_opts(mod_obj):
opts_by_group.setdefault(group, []).append((mod_str, opts))
print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
for group, opts in opts_by_group.items():
print_group_opts(group, opts)
def _import_module(mod_str):
try:
if mod_str.startswith('bin.'):
imp.load_source(mod_str[4:], os.path.join('bin', mod_str[4:]))
return sys.modules[mod_str[4:]]
else:
return importutils.import_module(mod_str)
except Exception as e:
sys.stderr.write("Error importing module %s: %s\n" % (mod_str, str(e)))
return None
def _is_in_group(opt, group):
"Check if opt is in group."
for key, value in group._opts.items():
if value['opt'] == opt:
return True
return False
def _guess_groups(opt, mod_obj):
# is it in the DEFAULT group?
if _is_in_group(opt, cfg.CONF):
return 'DEFAULT'
# what other groups is it in?
for key, value in cfg.CONF.items():
if isinstance(value, cfg.CONF.GroupAttr):
if _is_in_group(opt, value._group):
return value._group.name
raise RuntimeError(
"Unable to find group for option %s, "
"maybe it's defined twice in the same group?"
% opt.name
)
def _list_opts(obj):
def is_opt(o):
return (isinstance(o, cfg.Opt) and
not isinstance(o, cfg.SubCommandOpt))
opts = list()
for attr_str in dir(obj):
attr_obj = getattr(obj, attr_str)
if is_opt(attr_obj):
opts.append(attr_obj)
elif (isinstance(attr_obj, list) and
all(map(lambda x: is_opt(x), attr_obj))):
opts.extend(attr_obj)
ret = {}
for opt in opts:
ret.setdefault(_guess_groups(opt, obj), []).append(opt)
return ret.items()
def print_group_opts(group, opts_by_module):
print("[%s]" % group)
print('')
for mod, opts in opts_by_module:
print('#')
print('# Options defined in %s' % mod)
print('#')
print('')
for opt in opts:
_print_opt(opt)
print('')
def _get_my_ip():
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return None
def _sanitize_default(name, value):
"""Set up a reasonably sensible default for pybasedir, my_ip and host."""
if value.startswith(sys.prefix):
# NOTE(jd) Don't use os.path.join, because it is likely to think the
# second part is an absolute pathname and therefore drop the first
# part.
value = os.path.normpath("/usr/" + value[len(sys.prefix):])
elif value.startswith(BASEDIR):
return value.replace(BASEDIR, '/usr/lib/python/site-packages')
elif BASEDIR in value:
return value.replace(BASEDIR, '')
elif value == _get_my_ip():
return '10.0.0.1'
elif value == socket.gethostname() and 'host' in name:
return 'cinder'
elif value.strip() != value:
return '"%s"' % value
return value
def _print_opt(opt):
opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help
if not opt_help:
sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
opt_help = ""
opt_type = None
try:
opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
except (ValueError, AttributeError) as err:
sys.stderr.write("%s\n" % str(err))
sys.exit(1)
opt_help += ' (' + OPT_TYPES[opt_type] + ')'
print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH)))
if opt.deprecated_opts:
for deprecated_opt in opt.deprecated_opts:
if deprecated_opt.name:
deprecated_group = (deprecated_opt.group if
deprecated_opt.group else "DEFAULT")
print('# Deprecated group/name - [%s]/%s' %
(deprecated_group,
deprecated_opt.name))
try:
if opt_default is None:
print('#%s=<None>' % opt_name)
elif opt_type == STROPT:
assert(isinstance(opt_default, six.string_types))
print('#%s=%s' % (opt_name, _sanitize_default(opt_name,
opt_default)))
elif opt_type == BOOLOPT:
assert(isinstance(opt_default, bool))
print('#%s=%s' % (opt_name, str(opt_default).lower()))
elif opt_type == INTOPT:
assert(isinstance(opt_default, int) and
not isinstance(opt_default, bool))
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == FLOATOPT:
assert(isinstance(opt_default, float))
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == LISTOPT:
assert(isinstance(opt_default, list))
print('#%s=%s' % (opt_name, ','.join(opt_default)))
elif opt_type == MULTISTROPT:
assert(isinstance(opt_default, list))
if not opt_default:
opt_default = ['']
for default in opt_default:
print('#%s=%s' % (opt_name, default))
print('')
except Exception:
sys.stderr.write('Error in option "%s"\n' % opt_name)
sys.exit(1)
def main():
generate(sys.argv[1:])
if __name__ == '__main__':
main()
|
[] |
[] |
[
"OSLO_CONFIG_GENERATOR_EXTRA_MODULES"
] |
[]
|
["OSLO_CONFIG_GENERATOR_EXTRA_MODULES"]
|
python
| 1 | 0 | |
conans/client/conan_api.py
|
import os
import sys
import requests
from collections import OrderedDict
import conans
from conans import __version__ as client_version
from conans.client.cmd.create import create
from conans.client.hook_manager import HookManager
from conans.client.recorder.action_recorder import ActionRecorder
from conans.client.client_cache import ClientCache
from conans.client.conf import MIN_SERVER_COMPATIBLE_VERSION, ConanClientConfigParser
from conans.client.manager import ConanManager
from conans.client.migrations import ClientMigrator
from conans.client.output import ConanOutput, ScopedOutput
from conans.client.profile_loader import read_profile, profile_from_args, \
read_conaninfo_profile
from conans.client.recorder.search_recorder import SearchRecorder
from conans.client.recorder.upload_recoder import UploadRecorder
from conans.client.remote_manager import RemoteManager
from conans.client.remote_registry import RemoteRegistry
from conans.client.rest.auth_manager import ConanApiAuthManager
from conans.client.rest.rest_client import RestApiClient
from conans.client.rest.conan_requester import ConanRequester
from conans.client.rest.version_checker import VersionCheckerRequester
from conans.client.runner import ConanRunner
from conans.client.store.localdb import LocalDB
from conans.client.cmd.test import PackageTester
from conans.client.userio import UserIO
from conans.errors import ConanException
from conans.model.ref import ConanFileReference, PackageReference, check_valid_ref
from conans.model.version import Version
from conans.paths import get_conan_user_home, CONANINFO, BUILD_INFO
from conans.util.env_reader import get_env
from conans.util.files import save_files, exception_message_safe, mkdir
from conans.util.log import configure_logger
from conans.util.tracer import log_command, log_exception
from conans.tools import set_global_instances
from conans.client.cmd.uploader import CmdUpload
from conans.client.cmd.profile import cmd_profile_update, cmd_profile_get,\
cmd_profile_delete_key, cmd_profile_create, cmd_profile_list
from conans.client.cmd.search import Search
from conans.client.cmd.user import users_clean, users_list, user_set
from conans.client.importer import undo_imports, run_imports
from conans.client.cmd.export import cmd_export, export_alias, export_source, export_recipe
from conans.unicode import get_cwd
from conans.client.remover import ConanRemover
from conans.client.cmd.download import download
from conans.model.workspace import Workspace
from conans.client.graph.graph_manager import GraphManager
from conans.client.loader import ConanFileLoader
from conans.client.graph.proxy import ConanProxy
from conans.client.graph.python_requires import ConanPythonRequire
from conans.client.graph.range_resolver import RangeResolver
from conans.client import packager
from conans.client.source import config_source_local
from conans.client.cmd.build import build
from conans.client.cmd.export_pkg import export_pkg
from conans.client import tools
default_manifest_folder = '.conan_manifests'
def get_request_timeout():
timeout = os.getenv("CONAN_REQUEST_TIMEOUT")
try:
return float(timeout) if timeout is not None else None
except ValueError:
raise ConanException("Specify a numeric parameter for 'request_timeout'")
def get_basic_requester(client_cache):
requester = requests.Session()
# Manage the verify and the client certificates and setup proxies
return ConanRequester(requester, client_cache, get_request_timeout())
def api_method(f):
def wrapper(*args, **kwargs):
the_self = args[0]
try:
curdir = get_cwd()
log_command(f.__name__, kwargs)
with tools.environment_append(the_self._client_cache.conan_config.env_vars):
# Patch the globals in tools
return f(*args, **kwargs)
except Exception as exc:
msg = exception_message_safe(exc)
try:
log_exception(exc, msg)
except BaseException:
pass
raise
finally:
os.chdir(curdir)
return wrapper
def _make_abs_path(path, cwd=None, default=None):
"""convert 'path' to absolute if necessary (could be already absolute)
if not defined (empty, or None), will return 'default' one or 'cwd'
"""
cwd = cwd or get_cwd()
if not path:
abs_path = default or cwd
elif os.path.isabs(path):
abs_path = path
else:
abs_path = os.path.normpath(os.path.join(cwd, path))
return abs_path
def _get_conanfile_path(path, cwd, py):
"""
param py= True: Must be .py, False: Must be .txt, None: Try .py, then .txt
"""
candidate_paths = list()
path = _make_abs_path(path, cwd)
if os.path.isdir(path): # Can be a folder
if py:
path = os.path.join(path, "conanfile.py")
candidate_paths.append(path)
elif py is False:
path = os.path.join(path, "conanfile.txt")
candidate_paths.append(path)
else:
path_py = os.path.join(path, "conanfile.py")
candidate_paths.append(path_py)
if os.path.exists(path_py):
path = path_py
else:
path = os.path.join(path, "conanfile.txt")
candidate_paths.append(path)
else:
candidate_paths.append(path)
if not os.path.isfile(path): # Must exist
raise ConanException("Conanfile not found at %s" % " or ".join(candidate_paths))
if py and not path.endswith(".py"):
raise ConanException("A conanfile.py is needed, " + path + " is not acceptable")
return path
class ConanAPIV1(object):
@staticmethod
def instance_remote_manager(requester, client_cache, user_io, _client_version,
min_server_compatible_version, hook_manager):
# Verify client version against remotes
version_checker_req = VersionCheckerRequester(requester, _client_version,
min_server_compatible_version,
user_io.out)
# To handle remote connections
put_headers = client_cache.read_put_headers()
rest_api_client = RestApiClient(user_io.out, requester=version_checker_req,
put_headers=put_headers)
# To store user and token
localdb = LocalDB(client_cache.localdb)
# Wraps RestApiClient to add authentication support (same interface)
auth_manager = ConanApiAuthManager(rest_api_client, user_io, localdb)
# Handle remote connections
remote_manager = RemoteManager(client_cache, auth_manager, user_io.out, hook_manager)
return localdb, rest_api_client, remote_manager
@staticmethod
def factory(interactive=None):
"""Factory"""
# Respect color env setting or check tty if unset
color_set = "CONAN_COLOR_DISPLAY" in os.environ
if ((color_set and get_env("CONAN_COLOR_DISPLAY", 1))
or (not color_set
and hasattr(sys.stdout, "isatty")
and sys.stdout.isatty())):
import colorama
if get_env("PYCHARM_HOSTED"): # in PyCharm disable convert/strip
colorama.init(convert=False, strip=False)
else:
colorama.init()
color = True
else:
color = False
out = ConanOutput(sys.stdout, color)
user_io = UserIO(out=out)
try:
user_home = get_conan_user_home()
client_cache = migrate_and_get_client_cache(user_home, out)
sys.path.append(os.path.join(user_home, "python"))
except Exception as e:
out.error(str(e))
raise
with tools.environment_append(client_cache.conan_config.env_vars):
# Adjust CONAN_LOGGING_LEVEL with the env readed
conans.util.log.logger = configure_logger()
# Create Hook Manager
hook_manager = HookManager(client_cache.hooks_path, get_env("CONAN_HOOKS", list()),
user_io.out)
# Get the new command instance after migrations have been done
requester = get_basic_requester(client_cache)
_, _, remote_manager = ConanAPIV1.instance_remote_manager(
requester,
client_cache, user_io,
Version(client_version),
Version(MIN_SERVER_COMPATIBLE_VERSION),
hook_manager)
# Adjust global tool variables
set_global_instances(out, requester)
# Settings preprocessor
if interactive is None:
interactive = not get_env("CONAN_NON_INTERACTIVE", False)
conan = ConanAPIV1(client_cache, user_io, get_conan_runner(), remote_manager,
hook_manager, interactive=interactive)
return conan, client_cache, user_io
def __init__(self, client_cache, user_io, runner, remote_manager, hook_manager,
interactive=True):
assert isinstance(user_io, UserIO)
assert isinstance(client_cache, ClientCache)
self._client_cache = client_cache
self._user_io = user_io
self._runner = runner
self._remote_manager = remote_manager
self._registry = RemoteRegistry(self._client_cache.registry, self._user_io.out)
if not interactive:
self._user_io.disable_input()
self._proxy = ConanProxy(client_cache, self._user_io.out, remote_manager,
registry=self._registry)
resolver = RangeResolver(self._user_io.out, client_cache, self._proxy)
python_requires = ConanPythonRequire(self._proxy, resolver)
self._loader = ConanFileLoader(self._runner, self._user_io.out, python_requires)
self._graph_manager = GraphManager(self._user_io.out, self._client_cache, self._registry,
self._remote_manager, self._loader, self._proxy,
resolver)
self._hook_manager = hook_manager
def _init_manager(self, action_recorder):
"""Every api call gets a new recorder and new manager"""
return ConanManager(self._client_cache, self._user_io,
self._remote_manager, action_recorder, self._registry,
self._graph_manager, self._hook_manager)
@api_method
def new(self, name, header=False, pure_c=False, test=False, exports_sources=False, bare=False,
cwd=None, visual_versions=None, linux_gcc_versions=None, linux_clang_versions=None,
osx_clang_versions=None, shared=None, upload_url=None, gitignore=None,
gitlab_gcc_versions=None, gitlab_clang_versions=None,
circleci_gcc_versions=None, circleci_clang_versions=None, circleci_osx_versions=None):
from conans.client.cmd.new import cmd_new
cwd = os.path.abspath(cwd or get_cwd())
files = cmd_new(name, header=header, pure_c=pure_c, test=test,
exports_sources=exports_sources, bare=bare,
visual_versions=visual_versions,
linux_gcc_versions=linux_gcc_versions,
linux_clang_versions=linux_clang_versions,
osx_clang_versions=osx_clang_versions, shared=shared,
upload_url=upload_url, gitignore=gitignore,
gitlab_gcc_versions=gitlab_gcc_versions,
gitlab_clang_versions=gitlab_clang_versions,
circleci_gcc_versions=circleci_gcc_versions,
circleci_clang_versions=circleci_clang_versions,
circleci_osx_versions=circleci_osx_versions)
save_files(cwd, files)
for f in sorted(files):
self._user_io.out.success("File saved: %s" % f)
@api_method
def inspect(self, path, attributes, remote_name=None):
try:
reference = ConanFileReference.loads(path)
except ConanException:
reference = None
cwd = get_cwd()
conanfile_path = _get_conanfile_path(path, cwd, py=True)
else:
update = True if remote_name else False
result = self._proxy.get_recipe(reference, update, update, remote_name,
ActionRecorder())
conanfile_path, _, _, reference = result
conanfile = self._loader.load_basic(conanfile_path, self._user_io.out)
result = OrderedDict()
if not attributes:
attributes = ['name', 'version', 'url', 'homepage', 'license', 'author',
'description', 'topics', 'generators', 'exports', 'exports_sources',
'short_paths', 'apply_env', 'build_policy', 'settings', 'options',
'default_options']
for attribute in attributes:
try:
attr = getattr(conanfile, attribute)
result[attribute] = attr
except AttributeError as e:
raise ConanException(str(e))
return result
@api_method
def test(self, path, reference, profile_name=None, settings=None, options=None, env=None,
remote_name=None, update=False, build_modes=None, cwd=None, test_build_folder=None):
settings = settings or []
options = options or []
env = env or []
conanfile_path = _get_conanfile_path(path, cwd, py=True)
cwd = cwd or get_cwd()
profile = profile_from_args(profile_name, settings, options, env, cwd,
self._client_cache)
reference = ConanFileReference.loads(reference)
recorder = ActionRecorder()
manager = self._init_manager(recorder)
pt = PackageTester(manager, self._user_io)
pt.install_build_and_test(conanfile_path, reference, profile, remote_name,
update, build_modes=build_modes,
test_build_folder=test_build_folder)
@api_method
def create(self, conanfile_path, name=None, version=None, user=None, channel=None,
profile_name=None, settings=None,
options=None, env=None, test_folder=None, not_export=False,
build_modes=None,
keep_source=False, keep_build=False, verify=None,
manifests=None, manifests_interactive=None,
remote_name=None, update=False, cwd=None, test_build_folder=None):
"""
API method to create a conan package
:param test_folder: default None - looks for default 'test' or 'test_package' folder),
string - test_folder path
False - disabling tests
"""
settings = settings or []
options = options or []
env = env or []
try:
cwd = cwd or os.getcwd()
recorder = ActionRecorder()
conanfile_path = _get_conanfile_path(conanfile_path, cwd, py=True)
reference, conanfile = self._loader.load_export(conanfile_path, name, version, user,
channel)
# Make sure keep_source is set for keep_build
keep_source = keep_source or keep_build
# Forcing an export!
if not not_export:
cmd_export(conanfile_path, conanfile, reference, keep_source, self._user_io.out,
self._client_cache, self._hook_manager)
recorder.recipe_exported(reference)
if build_modes is None: # Not specified, force build the tested library
build_modes = [conanfile.name]
manifests = _parse_manifests_arguments(verify, manifests, manifests_interactive, cwd)
manifest_folder, manifest_interactive, manifest_verify = manifests
profile = profile_from_args(profile_name, settings, options, env,
cwd, self._client_cache)
manager = self._init_manager(recorder)
recorder.add_recipe_being_developed(reference)
create(reference, manager, self._user_io, profile, remote_name, update, build_modes,
manifest_folder, manifest_verify, manifest_interactive, keep_build,
test_build_folder, test_folder, conanfile_path)
return recorder.get_info()
except ConanException as exc:
recorder.error = True
exc.info = recorder.get_info()
raise
@api_method
def export_pkg(self, conanfile_path, name, channel, source_folder=None, build_folder=None,
package_folder=None, install_folder=None, profile_name=None, settings=None,
options=None, env=None, force=False, user=None, version=None, cwd=None):
settings = settings or []
options = options or []
env = env or []
cwd = cwd or get_cwd()
try:
recorder = ActionRecorder()
# Checks that info files exists if the install folder is specified
if install_folder and not existing_info_files(_make_abs_path(install_folder, cwd)):
raise ConanException("The specified install folder doesn't contain '%s' and '%s' "
"files" % (CONANINFO, BUILD_INFO))
conanfile_path = _get_conanfile_path(conanfile_path, cwd, py=True)
if package_folder:
if build_folder or source_folder:
raise ConanException("package folder definition incompatible with build "
"and source folders")
package_folder = _make_abs_path(package_folder, cwd)
build_folder = _make_abs_path(build_folder, cwd)
install_folder = _make_abs_path(install_folder, cwd, default=build_folder)
source_folder = _make_abs_path(source_folder, cwd,
default=os.path.dirname(conanfile_path))
# Checks that no both settings and info files are specified
infos_present = existing_info_files(install_folder)
if profile_name or settings or options or env or not infos_present:
profile = profile_from_args(profile_name, settings, options, env=env,
cwd=cwd, client_cache=self._client_cache)
else:
profile = read_conaninfo_profile(install_folder)
reference, conanfile = self._loader.load_export(conanfile_path, name, version, user,
channel)
recorder.recipe_exported(reference)
recorder.add_recipe_being_developed(reference)
cmd_export(conanfile_path, conanfile, reference, False, self._user_io.out,
self._client_cache, self._hook_manager)
export_pkg(self._client_cache, self._graph_manager, self._hook_manager, recorder,
self._user_io.out,
reference, source_folder=source_folder, build_folder=build_folder,
package_folder=package_folder, install_folder=install_folder,
profile=profile, force=force)
return recorder.get_info()
except ConanException as exc:
recorder.error = True
exc.info = recorder.get_info()
raise
@api_method
def download(self, reference, remote_name=None, package=None, recipe=False):
if package and recipe:
raise ConanException("recipe parameter cannot be used together with package")
# Install packages without settings (fixed ids or all)
conan_ref = ConanFileReference.loads(reference)
if check_valid_ref(conan_ref, allow_pattern=False):
recorder = ActionRecorder()
download(conan_ref, package, remote_name, recipe, self._registry, self._remote_manager,
self._client_cache, self._user_io.out, recorder, self._loader,
self._hook_manager)
else:
raise ConanException("Provide a valid full reference without wildcards.")
@api_method
def install_reference(self, reference, settings=None, options=None, env=None,
remote_name=None, verify=None, manifests=None,
manifests_interactive=None, build=None, profile_name=None,
update=False, generators=None, install_folder=None, cwd=None):
try:
recorder = ActionRecorder()
cwd = cwd or os.getcwd()
install_folder = _make_abs_path(install_folder, cwd)
manifests = _parse_manifests_arguments(verify, manifests, manifests_interactive, cwd)
manifest_folder, manifest_interactive, manifest_verify = manifests
profile = profile_from_args(profile_name, settings, options, env, cwd,
self._client_cache)
if not generators: # We don't want the default txt
generators = False
mkdir(install_folder)
manager = self._init_manager(recorder)
manager.install(reference=reference, install_folder=install_folder,
remote_name=remote_name, profile=profile, build_modes=build,
update=update, manifest_folder=manifest_folder,
manifest_verify=manifest_verify,
manifest_interactive=manifest_interactive,
generators=generators)
return recorder.get_info()
except ConanException as exc:
recorder.error = True
exc.info = recorder.get_info()
raise
@api_method
def install(self, path="", settings=None, options=None, env=None,
remote_name=None, verify=None, manifests=None,
manifests_interactive=None, build=None, profile_name=None,
update=False, generators=None, no_imports=False, install_folder=None, cwd=None):
try:
recorder = ActionRecorder()
cwd = cwd or os.getcwd()
manifests = _parse_manifests_arguments(verify, manifests, manifests_interactive, cwd)
manifest_folder, manifest_interactive, manifest_verify = manifests
profile = profile_from_args(profile_name, settings, options, env, cwd,
self._client_cache)
wspath = _make_abs_path(path, cwd)
if install_folder:
if os.path.isabs(install_folder):
wsinstall_folder = install_folder
else:
wsinstall_folder = os.path.join(cwd, install_folder)
else:
wsinstall_folder = None
workspace = Workspace.get_workspace(wspath, wsinstall_folder)
if workspace:
self._user_io.out.success("Using conanws.yml file from %s" % workspace._base_folder)
manager = self._init_manager(recorder)
manager.install_workspace(profile, workspace, remote_name, build, update)
return
install_folder = _make_abs_path(install_folder, cwd)
conanfile_path = _get_conanfile_path(path, cwd, py=None)
manager = self._init_manager(recorder)
manager.install(reference=conanfile_path,
install_folder=install_folder,
remote_name=remote_name,
profile=profile,
build_modes=build,
update=update,
manifest_folder=manifest_folder,
manifest_verify=manifest_verify,
manifest_interactive=manifest_interactive,
generators=generators,
no_imports=no_imports)
return recorder.get_info()
except ConanException as exc:
recorder.error = True
exc.info = recorder.get_info()
raise
@api_method
def config_get(self, item):
config_parser = ConanClientConfigParser(self._client_cache.conan_conf_path)
self._user_io.out.info(config_parser.get_item(item))
return config_parser.get_item(item)
@api_method
def config_set(self, item, value):
config_parser = ConanClientConfigParser(self._client_cache.conan_conf_path)
config_parser.set_item(item, value)
self._client_cache.invalidate()
@api_method
def config_rm(self, item):
config_parser = ConanClientConfigParser(self._client_cache.conan_conf_path)
config_parser.rm_item(item)
self._client_cache.invalidate()
@api_method
def config_install(self, item, verify_ssl, config_type=None, args=None):
# _make_abs_path, but could be not a path at all
if item is not None and os.path.exists(item) and not os.path.isabs(item):
item = os.path.abspath(item)
from conans.client.conf.config_installer import configuration_install
return configuration_install(item, self._client_cache, self._user_io.out, verify_ssl,
requester=self._remote_manager._auth_manager._rest_client.requester, # FIXME: Look out!
config_type=config_type, args=args)
def _info_get_profile(self, reference, install_folder, profile_name, settings, options, env):
cwd = get_cwd()
try:
reference = ConanFileReference.loads(reference)
except ConanException:
reference = _get_conanfile_path(reference, cwd=None, py=None)
if install_folder or not (profile_name or settings or options or env):
# When not install folder is specified but neither any setting, we try to read the
# info from cwd
install_folder = _make_abs_path(install_folder, cwd)
if existing_info_files(install_folder):
return reference, read_conaninfo_profile(install_folder)
return reference, profile_from_args(profile_name, settings, options, env=env,
cwd=cwd, client_cache=self._client_cache)
@api_method
def info_build_order(self, reference, settings=None, options=None, env=None,
profile_name=None, remote_name=None, build_order=None, check_updates=None,
install_folder=None):
reference, profile = self._info_get_profile(reference, install_folder, profile_name,
settings, options, env)
recorder = ActionRecorder()
deps_graph, _, _ = self._graph_manager.load_graph(reference, None, profile, ["missing"],
check_updates, False, remote_name,
recorder, workspace=None)
return deps_graph.build_order(build_order)
@api_method
def info_nodes_to_build(self, reference, build_modes, settings=None, options=None, env=None,
profile_name=None, remote_name=None, check_updates=None,
install_folder=None):
reference, profile = self._info_get_profile(reference, install_folder, profile_name,
settings, options, env)
recorder = ActionRecorder()
deps_graph, conanfile, _ = self._graph_manager.load_graph(reference, None, profile,
build_modes, check_updates,
False, remote_name, recorder,
workspace=None)
nodes_to_build = deps_graph.nodes_to_build()
return nodes_to_build, conanfile
@api_method
def info(self, reference, remote_name=None, settings=None, options=None, env=None,
profile_name=None, update=False, install_folder=None, build=None):
reference, profile = self._info_get_profile(reference, install_folder, profile_name,
settings, options, env)
recorder = ActionRecorder()
deps_graph, conanfile, _ = self._graph_manager.load_graph(reference, None, profile, build,
update, False, remote_name,
recorder, workspace=None)
return deps_graph, conanfile
@api_method
def build(self, conanfile_path, source_folder=None, package_folder=None, build_folder=None,
install_folder=None, should_configure=True, should_build=True, should_install=True,
should_test=True, cwd=None):
cwd = cwd or get_cwd()
conanfile_path = _get_conanfile_path(conanfile_path, cwd, py=True)
build_folder = _make_abs_path(build_folder, cwd)
install_folder = _make_abs_path(install_folder, cwd, default=build_folder)
source_folder = _make_abs_path(source_folder, cwd, default=os.path.dirname(conanfile_path))
default_pkg_folder = os.path.join(build_folder, "package")
package_folder = _make_abs_path(package_folder, cwd, default=default_pkg_folder)
build(self._graph_manager, self._hook_manager, conanfile_path, self._user_io.out,
source_folder, build_folder, package_folder, install_folder,
should_configure=should_configure, should_build=should_build,
should_install=should_install, should_test=should_test)
@api_method
def package(self, path, build_folder, package_folder, source_folder=None, install_folder=None,
cwd=None):
cwd = cwd or get_cwd()
conanfile_path = _get_conanfile_path(path, cwd, py=True)
build_folder = _make_abs_path(build_folder, cwd)
install_folder = _make_abs_path(install_folder, cwd, default=build_folder)
source_folder = _make_abs_path(source_folder, cwd, default=os.path.dirname(conanfile_path))
default_pkg_folder = os.path.join(build_folder, "package")
package_folder = _make_abs_path(package_folder, cwd, default=default_pkg_folder)
if package_folder == build_folder:
raise ConanException("Cannot 'conan package' to the build folder. "
"--build-folder and package folder can't be the same")
output = ScopedOutput("PROJECT", self._user_io.out)
conanfile = self._graph_manager.load_consumer_conanfile(conanfile_path, install_folder,
output, deps_info_required=True)
packager.create_package(conanfile, None, source_folder, build_folder, package_folder,
install_folder, output, self._hook_manager, conanfile_path, None,
local=True, copy_info=True)
@api_method
def source(self, path, source_folder=None, info_folder=None, cwd=None):
cwd = cwd or get_cwd()
conanfile_path = _get_conanfile_path(path, cwd, py=True)
source_folder = _make_abs_path(source_folder, cwd)
info_folder = _make_abs_path(info_folder, cwd)
mkdir(source_folder)
if not os.path.exists(info_folder):
raise ConanException("Specified info-folder doesn't exist")
output = ScopedOutput("PROJECT", self._user_io.out)
# only infos if exist
conanfile = self._graph_manager.load_consumer_conanfile(conanfile_path, info_folder, output)
conanfile_folder = os.path.dirname(conanfile_path)
if conanfile_folder != source_folder:
output.info("Executing exports to: %s" % source_folder)
export_recipe(conanfile, conanfile_folder, source_folder, output)
export_source(conanfile, conanfile_folder, source_folder, output)
config_source_local(source_folder, conanfile, output, conanfile_path,
self._hook_manager)
@api_method
def imports(self, path, dest=None, info_folder=None, cwd=None):
"""
:param path: Path to the conanfile
:param dest: Dir to put the imported files. (Abs path or relative to cwd)
:param info_folder: Dir where the conaninfo.txt and conanbuildinfo.txt files are
:param cwd: Current working directory
:return: None
"""
cwd = cwd or get_cwd()
info_folder = _make_abs_path(info_folder, cwd)
dest = _make_abs_path(dest, cwd)
mkdir(dest)
conanfile_abs_path = _get_conanfile_path(path, cwd, py=None)
output = ScopedOutput("PROJECT", self._user_io.out)
conanfile = self._graph_manager.load_consumer_conanfile(conanfile_abs_path, info_folder,
output, deps_info_required=True)
run_imports(conanfile, dest, output)
@api_method
def imports_undo(self, manifest_path):
cwd = get_cwd()
manifest_path = _make_abs_path(manifest_path, cwd)
undo_imports(manifest_path, self._user_io.out)
@api_method
def export(self, path, name, version, user, channel, keep_source=False, cwd=None):
conanfile_path = _get_conanfile_path(path, cwd, py=True)
reference, conanfile = self._loader.load_export(conanfile_path, name, version, user,
channel)
cmd_export(conanfile_path, conanfile, reference, keep_source, self._user_io.out,
self._client_cache, self._hook_manager)
@api_method
def remove(self, pattern, query=None, packages=None, builds=None, src=False, force=False,
remote_name=None, outdated=False):
remover = ConanRemover(self._client_cache, self._remote_manager, self._user_io,
self._registry)
remover.remove(pattern, remote_name, src, builds, packages, force=force,
packages_query=query, outdated=outdated)
@api_method
def copy(self, reference, user_channel, force=False, packages=None):
"""
param packages: None=No binaries, True=All binaries, else list of IDs
"""
from conans.client.cmd.copy import cmd_copy
# FIXME: conan copy does not support short-paths in Windows
reference = ConanFileReference.loads(str(reference))
cmd_copy(reference, user_channel, packages, self._client_cache,
self._user_io, self._remote_manager, self._registry, self._loader, force=force)
@api_method
def authenticate(self, name, password, remote_name):
remote = self.get_remote_by_name(remote_name)
_, remote_name, prev_user, user = self._remote_manager.authenticate(remote, name, password)
return remote_name, prev_user, user
@api_method
def user_set(self, user, remote_name=None):
remote = (self.get_default_remote() if not remote_name
else self.get_remote_by_name(remote_name))
return user_set(self._client_cache.localdb, user, remote)
@api_method
def users_clean(self):
users_clean(self._client_cache.localdb)
@api_method
def users_list(self, remote_name=None):
info = {"error": False, "remotes": []}
remotes = [self.get_remote_by_name(remote_name)] if remote_name else self.remote_list()
try:
info["remotes"] = users_list(self._client_cache.localdb, remotes)
return info
except ConanException as exc:
info["error"] = True
exc.info = info
raise
@api_method
def search_recipes(self, pattern, remote_name=None, case_sensitive=False):
recorder = SearchRecorder()
search = Search(self._client_cache, self._remote_manager, self._registry)
try:
references = search.search_recipes(pattern, remote_name, case_sensitive)
except ConanException as exc:
recorder.error = True
exc.info = recorder.get_info()
raise
for remote_name, refs in references.items():
for ref in refs:
recorder.add_recipe(remote_name, ref, with_packages=False)
return recorder.get_info()
@api_method
def search_packages(self, reference, query=None, remote_name=None, outdated=False):
recorder = SearchRecorder()
search = Search(self._client_cache, self._remote_manager, self._registry)
try:
reference = ConanFileReference.loads(str(reference))
references = search.search_packages(reference, remote_name, query=query,
outdated=outdated)
except ConanException as exc:
recorder.error = True
exc.info = recorder.get_info()
raise
for remote_name, remote_ref in references.items():
recorder.add_recipe(remote_name, reference)
if remote_ref.ordered_packages:
for package_id, properties in remote_ref.ordered_packages.items():
package_recipe_hash = properties.get("recipe_hash", None)
recorder.add_package(remote_name, reference,
package_id, properties.get("options", []),
properties.get("settings", []),
properties.get("full_requires", []),
remote_ref.recipe_hash != package_recipe_hash)
return recorder.get_info()
@api_method
def upload(self, pattern, package=None, remote_name=None, all_packages=False, confirm=False,
retry=2, retry_wait=5, integrity_check=False, policy=None, query=None):
""" Uploads a package recipe and the generated binary packages to a specified remote
"""
recorder = UploadRecorder()
uploader = CmdUpload(self._client_cache, self._user_io, self._remote_manager,
self._registry, self._loader, self._hook_manager)
try:
uploader.upload(recorder, pattern, package, all_packages, confirm, retry,
retry_wait, integrity_check, policy, remote_name, query=query)
return recorder.get_info()
except ConanException as exc:
recorder.error = True
exc.info = recorder.get_info()
raise
@api_method
def remote_list(self):
return self._registry.remotes.list
@api_method
def remote_add(self, remote_name, url, verify_ssl=True, insert=None, force=None):
return self._registry.remotes.add(remote_name, url, verify_ssl, insert, force)
@api_method
def remote_remove(self, remote_name):
return self._registry.remotes.remove(remote_name)
@api_method
def remote_update(self, remote_name, url, verify_ssl=True, insert=None):
return self._registry.remotes.update(remote_name, url, verify_ssl, insert)
@api_method
def remote_rename(self, remote_name, new_new_remote):
return self._registry.remotes.rename(remote_name, new_new_remote)
@api_method
def remote_list_ref(self):
return {r: remote_name for r, remote_name in self._registry.refs.list.items()}
@api_method
def remote_add_ref(self, reference, remote_name):
reference = ConanFileReference.loads(str(reference), validate=True)
return self._registry.refs.set(reference, remote_name, check_exists=True)
@api_method
def remote_remove_ref(self, reference):
reference = ConanFileReference.loads(str(reference), validate=True)
return self._registry.refs.remove(reference)
@api_method
def remote_update_ref(self, reference, remote_name):
reference = ConanFileReference.loads(str(reference), validate=True)
return self._registry.refs.update(reference, remote_name)
@api_method
def remote_list_pref(self, reference):
reference = ConanFileReference.loads(str(reference), validate=True)
ret = {}
tmp = self._registry.prefs.list
for r, remote in tmp.items():
pref = PackageReference.loads(r)
if pref.conan == reference:
ret[pref.full_repr()] = remote
return ret
@api_method
def remote_add_pref(self, package_reference, remote_name):
p_reference = PackageReference.loads(str(package_reference), validate=True)
return self._registry.prefs.set(p_reference, remote_name, check_exists=True)
@api_method
def remote_remove_pref(self, package_reference):
p_reference = PackageReference.loads(str(package_reference), validate=True)
return self._registry.prefs.remove(p_reference)
@api_method
def remote_update_pref(self, package_reference, remote_name):
p_reference = PackageReference.loads(str(package_reference), validate=True)
return self._registry.prefs.update(p_reference, remote_name)
def remote_clean(self):
return self._registry.remotes.clean()
@api_method
def profile_list(self):
return cmd_profile_list(self._client_cache.profiles_path, self._user_io.out)
@api_method
def create_profile(self, profile_name, detect=False):
return cmd_profile_create(profile_name, self._client_cache.profiles_path,
self._user_io.out, detect)
@api_method
def update_profile(self, profile_name, key, value):
return cmd_profile_update(profile_name, key, value, self._client_cache.profiles_path)
@api_method
def get_profile_key(self, profile_name, key):
return cmd_profile_get(profile_name, key, self._client_cache.profiles_path)
@api_method
def delete_profile_key(self, profile_name, key):
return cmd_profile_delete_key(profile_name, key, self._client_cache.profiles_path)
@api_method
def read_profile(self, profile=None):
p, _ = read_profile(profile, get_cwd(), self._client_cache.profiles_path)
return p
@api_method
def get_path(self, reference, package_id=None, path=None, remote_name=None):
from conans.client.local_file_getter import get_path
reference = ConanFileReference.loads(reference)
if not path:
path = "conanfile.py" if not package_id else "conaninfo.txt"
if not remote_name:
return get_path(self._client_cache, reference, package_id, path), path
else:
remote = self.get_remote_by_name(remote_name)
return self._remote_manager.get_path(reference, package_id, path, remote), path
@api_method
def export_alias(self, reference, target_reference):
reference = ConanFileReference.loads(reference)
target_reference = ConanFileReference.loads(target_reference)
return export_alias(reference, target_reference, self._client_cache)
@api_method
def get_default_remote(self):
return self._registry.remotes.default
@api_method
def get_remote_by_name(self, remote_name):
return self._registry.remotes.get(remote_name)
Conan = ConanAPIV1
def _parse_manifests_arguments(verify, manifests, manifests_interactive, cwd):
if manifests and manifests_interactive:
raise ConanException("Do not specify both manifests and "
"manifests-interactive arguments")
if verify and (manifests or manifests_interactive):
raise ConanException("Do not specify both 'verify' and "
"'manifests' or 'manifests-interactive' arguments")
manifest_folder = verify or manifests or manifests_interactive
if manifest_folder:
if not os.path.isabs(manifest_folder):
if not cwd:
raise ConanException("'cwd' should be defined if the manifest folder is relative.")
manifest_folder = os.path.join(cwd, manifest_folder)
manifest_verify = verify is not None
manifest_interactive = manifests_interactive is not None
else:
manifest_verify = manifest_interactive = False
return manifest_folder, manifest_interactive, manifest_verify
def existing_info_files(folder):
return os.path.exists(os.path.join(folder, CONANINFO)) and \
os.path.exists(os.path.join(folder, BUILD_INFO))
def get_conan_runner():
print_commands_to_output = get_env("CONAN_PRINT_RUN_COMMANDS", False)
generate_run_log_file = get_env("CONAN_LOG_RUN_TO_FILE", False)
log_run_to_output = get_env("CONAN_LOG_RUN_TO_OUTPUT", True)
runner = ConanRunner(print_commands_to_output, generate_run_log_file, log_run_to_output)
return runner
def migrate_and_get_client_cache(base_folder, out, storage_folder=None):
# Init paths
client_cache = ClientCache(base_folder, storage_folder, out)
# Migration system
migrator = ClientMigrator(client_cache, Version(client_version), out)
migrator.migrate()
return client_cache
|
[] |
[] |
[
"CONAN_REQUEST_TIMEOUT"
] |
[]
|
["CONAN_REQUEST_TIMEOUT"]
|
python
| 1 | 0 | |
store/memory/memory_test.go
|
package memory
import (
"fmt"
"os"
"testing"
"time"
"github.com/kr/pretty"
"github.com/micro/go-micro/v2/store"
)
func TestMemoryReInit(t *testing.T) {
s := NewStore(store.Table("aaa"))
s.Init(store.Table(""))
if len(s.Options().Table) > 0 {
t.Error("Init didn't reinitialise the store")
}
}
func TestMemoryBasic(t *testing.T) {
s := NewStore()
s.Init()
basictest(s, t)
}
func TestMemoryPrefix(t *testing.T) {
s := NewStore()
s.Init(store.Table("some-prefix"))
basictest(s, t)
}
func TestMemoryNamespace(t *testing.T) {
s := NewStore()
s.Init(store.Database("some-namespace"))
basictest(s, t)
}
func TestMemoryNamespacePrefix(t *testing.T) {
s := NewStore()
s.Init(store.Table("some-prefix"), store.Database("some-namespace"))
basictest(s, t)
}
func basictest(s store.Store, t *testing.T) {
if len(os.Getenv("IN_TRAVIS_CI")) == 0 {
t.Logf("Testing store %s, with options %# v\n", s.String(), pretty.Formatter(s.Options()))
}
// Read and Write an expiring Record
if err := s.Write(&store.Record{
Key: "Hello",
Value: []byte("World"),
Expiry: time.Millisecond * 100,
}); err != nil {
t.Error(err)
}
if r, err := s.Read("Hello"); err != nil {
t.Error(err)
} else {
if len(r) != 1 {
t.Error("Read returned multiple records")
}
if r[0].Key != "Hello" {
t.Errorf("Expected %s, got %s", "Hello", r[0].Key)
}
if string(r[0].Value) != "World" {
t.Errorf("Expected %s, got %s", "World", r[0].Value)
}
}
time.Sleep(time.Millisecond * 200)
if _, err := s.Read("Hello"); err != store.ErrNotFound {
t.Errorf("Expected %# v, got %# v", store.ErrNotFound, err)
}
// Write 3 records with various expiry and get with prefix
records := []*store.Record{
&store.Record{
Key: "foo",
Value: []byte("foofoo"),
},
&store.Record{
Key: "foobar",
Value: []byte("foobarfoobar"),
Expiry: time.Millisecond * 100,
},
&store.Record{
Key: "foobarbaz",
Value: []byte("foobarbazfoobarbaz"),
Expiry: 2 * time.Millisecond * 100,
},
}
for _, r := range records {
if err := s.Write(r); err != nil {
t.Errorf("Couldn't write k: %s, v: %# v (%s)", r.Key, pretty.Formatter(r.Value), err)
}
}
if results, err := s.Read("foo", store.ReadPrefix()); err != nil {
t.Errorf("Couldn't read all \"foo\" keys, got %# v (%s)", pretty.Formatter(results), err)
} else {
if len(results) != 3 {
t.Errorf("Expected 3 items, got %d", len(results))
}
if len(os.Getenv("IN_TRAVIS_CI")) == 0 {
t.Logf("Prefix test: %v\n", pretty.Formatter(results))
}
}
time.Sleep(time.Millisecond * 100)
if results, err := s.Read("foo", store.ReadPrefix()); err != nil {
t.Errorf("Couldn't read all \"foo\" keys, got %# v (%s)", pretty.Formatter(results), err)
} else {
if len(results) != 2 {
t.Errorf("Expected 2 items, got %d", len(results))
}
if len(os.Getenv("IN_TRAVIS_CI")) == 0 {
t.Logf("Prefix test: %v\n", pretty.Formatter(results))
}
}
time.Sleep(time.Millisecond * 100)
if results, err := s.Read("foo", store.ReadPrefix()); err != nil {
t.Errorf("Couldn't read all \"foo\" keys, got %# v (%s)", pretty.Formatter(results), err)
} else {
if len(results) != 1 {
t.Errorf("Expected 1 item, got %d", len(results))
}
if len(os.Getenv("IN_TRAVIS_CI")) == 0 {
t.Logf("Prefix test: %# v\n", pretty.Formatter(results))
}
}
if err := s.Delete("foo", func(d *store.DeleteOptions) {}); err != nil {
t.Errorf("Delete failed (%v)", err)
}
if results, err := s.Read("foo", store.ReadPrefix()); err != nil {
t.Errorf("Couldn't read all \"foo\" keys, got %# v (%s)", pretty.Formatter(results), err)
} else {
if len(results) != 0 {
t.Errorf("Expected 0 items, got %d (%# v)", len(results), pretty.Formatter(results))
}
}
// Write 3 records with various expiry and get with Suffix
records = []*store.Record{
&store.Record{
Key: "foo",
Value: []byte("foofoo"),
},
&store.Record{
Key: "barfoo",
Value: []byte("barfoobarfoo"),
Expiry: time.Millisecond * 100,
},
&store.Record{
Key: "bazbarfoo",
Value: []byte("bazbarfoobazbarfoo"),
Expiry: 2 * time.Millisecond * 100,
},
}
for _, r := range records {
if err := s.Write(r); err != nil {
t.Errorf("Couldn't write k: %s, v: %# v (%s)", r.Key, pretty.Formatter(r.Value), err)
}
}
if results, err := s.Read("foo", store.ReadSuffix()); err != nil {
t.Errorf("Couldn't read all \"foo\" keys, got %# v (%s)", pretty.Formatter(results), err)
} else {
if len(results) != 3 {
t.Errorf("Expected 3 items, got %d", len(results))
}
if len(os.Getenv("IN_TRAVIS_CI")) == 0 {
t.Logf("Prefix test: %v\n", pretty.Formatter(results))
}
}
time.Sleep(time.Millisecond * 100)
if results, err := s.Read("foo", store.ReadSuffix()); err != nil {
t.Errorf("Couldn't read all \"foo\" keys, got %# v (%s)", pretty.Formatter(results), err)
} else {
if len(results) != 2 {
t.Errorf("Expected 2 items, got %d", len(results))
}
if len(os.Getenv("IN_TRAVIS_CI")) == 0 {
t.Logf("Prefix test: %v\n", pretty.Formatter(results))
}
}
time.Sleep(time.Millisecond * 100)
if results, err := s.Read("foo", store.ReadSuffix()); err != nil {
t.Errorf("Couldn't read all \"foo\" keys, got %# v (%s)", pretty.Formatter(results), err)
} else {
if len(results) != 1 {
t.Errorf("Expected 1 item, got %d", len(results))
}
if len(os.Getenv("IN_TRAVIS_CI")) == 0 {
t.Logf("Prefix test: %# v\n", pretty.Formatter(results))
}
}
if err := s.Delete("foo"); err != nil {
t.Errorf("Delete failed (%v)", err)
}
if results, err := s.Read("foo", store.ReadSuffix()); err != nil {
t.Errorf("Couldn't read all \"foo\" keys, got %# v (%s)", pretty.Formatter(results), err)
} else {
if len(results) != 0 {
t.Errorf("Expected 0 items, got %d (%# v)", len(results), pretty.Formatter(results))
}
}
// Test Prefix, Suffix and WriteOptions
if err := s.Write(&store.Record{
Key: "foofoobarbar",
Value: []byte("something"),
}, store.WriteTTL(time.Millisecond*100)); err != nil {
t.Error(err)
}
if err := s.Write(&store.Record{
Key: "foofoo",
Value: []byte("something"),
}, store.WriteExpiry(time.Now().Add(time.Millisecond*100))); err != nil {
t.Error(err)
}
if err := s.Write(&store.Record{
Key: "barbar",
Value: []byte("something"),
// TTL has higher precedence than expiry
}, store.WriteExpiry(time.Now().Add(time.Hour)), store.WriteTTL(time.Millisecond*100)); err != nil {
t.Error(err)
}
if results, err := s.Read("foo", store.ReadPrefix(), store.ReadSuffix()); err != nil {
t.Error(err)
} else {
if len(results) != 1 {
t.Errorf("Expected 1 results, got %d: %# v", len(results), pretty.Formatter(results))
}
}
time.Sleep(time.Millisecond * 100)
if results, err := s.List(); err != nil {
t.Errorf("List failed: %s", err)
} else {
if len(results) != 0 {
t.Error("Expiry options were not effective")
}
}
s.Write(&store.Record{Key: "a", Value: []byte("a")})
s.Write(&store.Record{Key: "aa", Value: []byte("aa")})
s.Write(&store.Record{Key: "aaa", Value: []byte("aaa")})
if results, err := s.Read("b", store.ReadPrefix()); err != nil {
t.Error(err)
} else {
if len(results) != 0 {
t.Errorf("Expected 0 results, got %d", len(results))
}
}
s.Close() // reset the store
for i := 0; i < 10; i++ {
s.Write(&store.Record{
Key: fmt.Sprintf("a%d", i),
Value: []byte{},
})
}
if results, err := s.Read("a", store.ReadLimit(5), store.ReadPrefix()); err != nil {
t.Error(err)
} else {
if len(results) != 5 {
t.Fatal("Expected 5 results, got ", len(results))
}
if results[0].Key != "a0" {
t.Fatalf("Expected a0, got %s", results[0].Key)
}
if results[4].Key != "a4" {
t.Fatalf("Expected a4, got %s", results[4].Key)
}
}
if results, err := s.Read("a", store.ReadLimit(30), store.ReadOffset(5), store.ReadPrefix()); err != nil {
t.Error(err)
} else {
if len(results) != 5 {
t.Error("Expected 5 results, got ", len(results))
}
}
}
|
[
"\"IN_TRAVIS_CI\"",
"\"IN_TRAVIS_CI\"",
"\"IN_TRAVIS_CI\"",
"\"IN_TRAVIS_CI\"",
"\"IN_TRAVIS_CI\"",
"\"IN_TRAVIS_CI\"",
"\"IN_TRAVIS_CI\""
] |
[] |
[
"IN_TRAVIS_CI"
] |
[]
|
["IN_TRAVIS_CI"]
|
go
| 1 | 0 | |
masappcli/__main__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import getpass
import json
import sys
import os
from masappcli.masappcli import mASAPP_CI
ASCII_ART_DESCRIPTION = U'''
_____ _____ _____ _____ __ _____
/\ / ____| /\ | __ \ | __ \ / ____|| | |_ _|
_ __ ___ / \ | (___ / \ | |__) || |__) | | | | | | |
| '_ ` _ \ / /\ \ \___ \ / /\ \ | ___/ | ___/ | | | | | |
| | | | | | / ____ \ ____) |/ ____ \ | | | | | |____ | |___ _| |_
|_| |_| |_|/_/ \_\|_____//_/ \_\|_| |_| \_____||______||_____|
'''
def cli(parser):
if parser is None:
raise TypeError("ERROR, parser is None")
parser = argparse.ArgumentParser(prog='masappcli', description=ASCII_ART_DESCRIPTION,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-a', '--app', help='path to the .apk or .ipa file', metavar=".ipa/.apk")
parser.add_argument('-key', type=str, metavar="mASAPP_key")
parser.add_argument('-secret', type=str, metavar="mASAPP_secret")
parser.add_argument('-p', '--packageNameOrigin', help='package name origin of the app', metavar="packageNameOrigin")
parser.add_argument('-r', '--riskscore', help='riskscoring execution', type=float, metavar="N")
parser.add_argument('-d', '--detailed', help='add details to the execution', action='store_true')
parser.add_argument('-s', '--standard', help='standard execution', metavar=".json")
parser.add_argument('-c', '--configure', help='add your mASAPP key and mASAPP secret as environment vars',
action='store_true')
parser.add_argument('-w', '--workgroup', type=str, metavar="workgroup name",
help="the name of the workgroup you want to analyse in")
parser.add_argument('--export_summary', help='export the scan summary from mASAPP in a json file', metavar="*.json")
parser.add_argument('--export_result', help='export the scan result from mASAPP in a json file', metavar="*.json")
args = parser.parse_args()
print(ASCII_ART_DESCRIPTION)
### Password setting ###
masapp_key = None
masapp_secret = None
if args.app is None and args.configure == False and args.detailed == False and args.key is None and args.packageNameOrigin is None and args.riskscore is None and args.secret is None and args.standard is None:
raise ValueError("[X] No args added")
if args.configure:
print("[?] Insert your MASSAP Access Key: ")
masapp_key = str(sys.stdin.readline())
os.environ["MASAPP_KEY"] = masapp_key
masapp_secret = str(getpass.getpass(prompt='[?] Insert your MASSAP Secret Key: '))
os.environ["MASAPP_SECRET"] = masapp_secret
print("[!] Credentials loaded")
# TODO maybe make it persistent
elif (args.key and not args.secret) or (args.secret and not args.key):
raise ValueError("[X] -key and -secret can only be used simultaneously")
elif args.key and args.secret:
masapp_key = args.key
masapp_secret = args.secret
else:
if os.getenv("MASAPP_KEY"):
masapp_key = os.getenv("MASAPP_KEY")
else:
raise ValueError(
"[X] MASAPP_KEY is not stored in environment. Please, use the option --configure or add directly it with -key option")
if os.getenv("MASAPP_SECRET"):
masapp_secret = os.getenv("MASAPP_SECRET")
else:
raise ValueError(
"[X] MASAPP_SECRET is not stored in environment. Please, use the option --configure or add directly it with -secret option")
if masapp_key is not None and masapp_secret is not None:
workgroup = getattr(args, 'workgroup', None)
if args.export_summary and args.export_result:
if args.export_summary == args.export_result:
raise ValueError("[X] Export files can not be named with the same name")
if args.export_summary:
if args.export_summary in os.listdir('.'):
raise ValueError("[X] Export summary file already exists")
if args.export_result:
if args.export_result in os.listdir('.'):
raise ValueError("[X] Export result file already exists")
if args.riskscore and args.standard:
raise ValueError("[X] Riskscore and standard execution can not being thrown simultaneously")
elif args.riskscore:
user = mASAPP_CI(key=masapp_key, secret=masapp_secret)
if args.app:
if args.packageNameOrigin:
user.riskscoring_execution(maximum_riskscoring=args.riskscore, app_path=args.app,
package_name_origin=args.packageNameOrigin,
workgroup=workgroup,
detail=args.detailed,
export_summary=args.export_summary,
export_result=args.export_result
)
else:
user.riskscoring_execution(maximum_riskscoring=args.riskscore, app_path=args.app,
workgroup=workgroup,
detail=args.detailed,
export_summary=args.export_summary,
export_result=args.export_result
)
else:
raise ValueError("[X] No path to the app added")
else:
if args.standard:
if args.app:
checked_json = check_json(args.standard)
if checked_json:
user = mASAPP_CI(key=masapp_key, secret=masapp_secret)
if type(checked_json) != bool:
if args.packageNameOrigin:
user.standard_execution(scan_maximum_values=checked_json, app_path=args.app,
package_name_origin=args.packageNameOrigin,
workgroup=workgroup,
detail=args.detailed,
export_summary=args.export_summary,
export_result=args.export_result
)
else:
user.standard_execution(scan_maximum_values=checked_json, app_path=args.app,
workgroup=workgroup,
detail=args.detailed,
export_summary=args.export_summary,
export_result=args.export_result
)
else:
print(
u"""
-s --standard json structure:
{
"vulnerabilities": {
"critical": maximum of critical vulnerabilities,
"high": maximum of high vulnerabilities,
"medium": maximum of medium vulnerabilities,
"low": maximum of low vulnerabilities
},
"behaviorals": {
"critical": maximum of critical behaviorals,
"high": "maximum of high behaviorals,
"medium": maximum of medium behavioral,
"low": maximum of low behaviorals
}
}
"""
)
raise ValueError("[X] Wrong json added for standard execution")
else:
raise ValueError("[X] No path to the app added")
else:
raise ValueError("[X] No execution mode added")
else:
raise ValueError("[X] mASAPP credentials not successfully set")
def main():
try:
cli(sys.argv[1:])
except Exception as e:
print(e)
sys.exit(-1)
def check_json(input_json):
if input_json is not None:
if ".json" in input_json:
try:
json_file = open(input_json)
input_json = json.load(json_file)
json_file.close()
except:
return False
else:
try:
input_json = json.loads(input_json)
except:
return False
keys = input_json.keys()
correct_json = True
for key in keys:
if not key == "vulnerabilities" and not key == "behaviorals":
correct_json = False
if not correct_json:
return False
else:
return input_json
return False
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
sys.exit(-1)
|
[] |
[] |
[
"MASAPP_SECRET",
"MASAPP_KEY"
] |
[]
|
["MASAPP_SECRET", "MASAPP_KEY"]
|
python
| 2 | 0 | |
examples/dso/sample.py
|
from dso.client import Client
from jpype import *
from jpype import java
import lithops
import os
dso=os.environ.get('DSO')
def my_function(x):
client = Client(dso)
d = client.getAtomicCounter("cnt")
return d.increment(x)
if __name__ == '__main__':
fexec = lithops.FunctionExecutor(runtime='0track/lithops-dso:1.1')
fexec.call_async(my_function, 3)
client = Client(dso)
c = client.getAtomicCounter("cnt")
print("counter: "+str(c.tally()))
print(fexec.get_result())
print("counter: "+str(c.tally()))
|
[] |
[] |
[
"DSO"
] |
[]
|
["DSO"]
|
python
| 1 | 0 | |
tools/system_libs.py
|
# Copyright 2014 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
from __future__ import print_function
import glob
import hashlib
import itertools
import json
import logging
import os
import re
import shutil
import sys
import tarfile
import zipfile
from glob import iglob
from . import ports
from . import shared
from tools.shared import mangle_c_symbol_name, demangle_c_symbol_name
stdout = None
stderr = None
logger = logging.getLogger('system_libs')
LIBC_SOCKETS = ['socket.c', 'socketpair.c', 'shutdown.c', 'bind.c', 'connect.c',
'listen.c', 'accept.c', 'getsockname.c', 'getpeername.c', 'send.c',
'recv.c', 'sendto.c', 'recvfrom.c', 'sendmsg.c', 'recvmsg.c',
'getsockopt.c', 'setsockopt.c', 'freeaddrinfo.c']
def files_in_path(path_components, filenames):
srcdir = shared.path_from_root(*path_components)
return [os.path.join(srcdir, f) for f in filenames]
def glob_in_path(path_components, glob_pattern, excludes=()):
srcdir = shared.path_from_root(*path_components)
return [f for f in iglob(os.path.join(srcdir, glob_pattern)) if os.path.basename(f) not in excludes]
def get_all_files_under(dirname):
for path, subdirs, files in os.walk(dirname):
for name in files:
yield os.path.join(path, name)
def dir_is_newer(dir_a, dir_b):
assert os.path.exists(dir_a)
assert os.path.exists(dir_b)
newest_a = max([os.path.getmtime(x) for x in get_all_files_under(dir_a)])
newest_b = max([os.path.getmtime(x) for x in get_all_files_under(dir_b)])
return newest_a < newest_b
def get_cflags(force_object_files=False):
flags = []
if shared.Settings.WASM_BACKEND:
if force_object_files:
flags += ['-s', 'WASM_OBJECT_FILES=1']
elif not shared.Settings.WASM_OBJECT_FILES:
flags += ['-s', 'WASM_OBJECT_FILES=0']
if shared.Settings.RELOCATABLE:
flags += ['-s', 'RELOCATABLE']
return flags
def run_build_command(cmd):
# this must only be called on a standard build command
assert cmd[0] == shared.PYTHON and cmd[1] in (shared.EMCC, shared.EMXX)
# add standard cflags, but also allow the cmd to override them
cmd = cmd[:2] + get_cflags() + cmd[2:]
shared.run_process(cmd, stdout=stdout, stderr=stderr)
def run_commands(commands):
cores = min(len(commands), shared.Building.get_num_cores())
if cores <= 1:
for command in commands:
run_build_command(command)
else:
pool = shared.Building.get_multiprocessing_pool()
# https://stackoverflow.com/questions/1408356/keyboard-interrupts-with-pythons-multiprocessing-pool
# https://bugs.python.org/issue8296
# 999999 seconds (about 11 days) is reasonably huge to not trigger actual timeout
# and is smaller than the maximum timeout value 4294967.0 for Python 3 on Windows (threading.TIMEOUT_MAX)
pool.map_async(run_build_command, commands, chunksize=1).get(999999)
def static_library_ext():
return '.a' if shared.Settings.WASM_BACKEND else '.bc'
def create_lib(libname, inputs):
"""Create a library from a set of input objects."""
suffix = os.path.splitext(libname)[1]
if suffix in ('.bc', '.o'):
if len(inputs) == 1:
shutil.copyfile(inputs[0], libname)
else:
shared.Building.link_to_object(inputs, libname)
elif suffix == '.a':
shared.Building.emar('cr', libname, inputs)
else:
raise Exception('unknown suffix ' + libname)
def read_symbols(path):
with open(path) as f:
content = f.read()
# Require that Windows newlines should not be present in a symbols file, if running on Linux or macOS
# This kind of mismatch can occur if one copies a zip file of Emscripten cloned on Windows over to
# a Linux or macOS system. It will result in Emscripten linker getting confused on stray \r characters,
# and be unable to link any library symbols properly. We could harden against this by .strip()ping the
# opened files, but it is possible that the mismatching line endings can cause random problems elsewhere
# in the toolchain, hence abort execution if so.
if os.name != 'nt' and '\r\n' in content:
raise Exception('Windows newlines \\r\\n detected in symbols file "' + path + '"! This could happen for example when copying Emscripten checkout from Windows to Linux or macOS. Please use Unix line endings on checkouts of Emscripten on Linux and macOS!')
return shared.Building.parse_symbols(content).defs
def get_wasm_libc_rt_files():
# Static linking is tricky with LLVM, since e.g. memset might not be used
# from libc, but be used as an intrinsic, and codegen will generate a libc
# call from that intrinsic *after* static linking would have thought it is
# all in there. In asm.js this is not an issue as we do JS linking anyhow,
# and have asm.js-optimized versions of all the LLVM intrinsics. But for
# wasm, we need a better solution. For now, make another archive that gets
# included at the same time as compiler-rt.
# Note that this also includes things that may be depended on by those
# functions - fmin uses signbit, for example, so signbit must be here (so if
# fmin is added by codegen, it will have all it needs).
math_files = files_in_path(
path_components=['system', 'lib', 'libc', 'musl', 'src', 'math'],
filenames=[
'fmin.c', 'fminf.c', 'fminl.c',
'fmax.c', 'fmaxf.c', 'fmaxl.c',
'fmod.c', 'fmodf.c', 'fmodl.c',
'log2.c', 'log2f.c', 'log10.c', 'log10f.c',
'exp2.c', 'exp2f.c', 'exp10.c', 'exp10f.c',
'scalbn.c', '__fpclassifyl.c',
'__signbitl.c', '__signbitf.c', '__signbit.c'
])
other_files = files_in_path(
path_components=['system', 'lib', 'libc'],
filenames=['emscripten_memcpy.c', 'emscripten_memset.c',
'emscripten_memmove.c'])
return math_files + other_files
class Library(object):
"""
`Library` is the base class of all system libraries.
There are two types of libraries: abstract and concrete.
* An abstract library, e.g. MTLibrary, is a subclass of `Library` that
implements certain behaviour common to multiple libraries. The features
of multiple abstract libraries can be used through multiple inheritance.
* A concrete library, e.g. libc, is a subclass of `Library` that describes
how to build a particular library, and its properties, such as name and
dependencies.
This library system is meant to handle having many versions of the same library,
which we call *variations*. For example, some libraries (those that inherit
from MTLibrary), have both single-threaded and multi-threaded versions.
An instance of a `Library` subclass represents a specific variation of the
library. Instance methods perform operations relating to this variation.
For example, `get_cflags()` would return the emcc flags needed to build this
variation, and `build()` would generate the library file for this variation.
The constructor takes keyword arguments that defines the variation.
Class methods perform tasks relating to all variations. For example,
`variations()` returns a list of all variations that exists for this library,
and `get_default_variation()` returns the variation suitable for the current
environment.
Other class methods act upon a group of libraries. For example,
`Library.get_all_variations()` returns a mapping of all variations of
existing libraries.
To add a new type of variation, you must add an parameter to `__init__` that
selects the variant. Then, override one of `vary_on` or `variations`, as well
as `get_default_variation`.
If the parameter is boolean, overriding `vary_on` to add the parameter name
to the returned list is sufficient:
@classmethod
def vary_on(cls):
return super().vary_on() + ['my_parameter']
Otherwise, you must override `variations`:
@classmethod
def variations(cls):
return [{'my_parameter': value, **other} for value, other in
itertools.product([1, 2, 3], super().variations())]
Overriding either `vary_on` or `variations` allows `embuilder.py` to know all
possible variations so it can build all of them.
You then need to modify `get_default_variation` to detect the correct value
for your new parameter based on the settings:
@classmethod
def get_default_variation(cls, **kwargs):
return super().get_default_variation(my_parameter=shared.Settings.MY_PARAMETER, **kwargs)
This allows the correct variation of the library to be selected when building
code with Emscripten.
"""
# The simple name of the library. When linking, this is the name to use to
# automatically get the correct version of the library.
# This should only be overridden in a concrete library class, e.g. libc,
# and left as None in an abstract library class, e.g. MTLibrary.
name = None
# A list of simple names of other libraries that this one depends on.
# For dynamic values, override `get_depends()` instead.
depends = []
# A set of symbols that this library exports. This will be set with a set
# returned by `read_symbols`.
symbols = set()
# A list of symbols that must be exported to keep the JavaScript
# dependencies of this library working.
js_depends = []
# Set to true to prevent EMCC_FORCE_STDLIBS from linking this library.
never_force = False
# The C compile executable to use. You can override this to shared.EMXX for C++.
emcc = shared.EMCC
# A list of flags to pass to emcc.
# The flags for the parent class is automatically inherited.
cflags = ['-Werror']
# A list of directories to put in the include path when building.
# This is a list of tuples of path components.
# For example, to put system/lib/a and system/lib/b under the emscripten
# directory into the include path, you would write:
# includes = [('system', 'lib', 'a'), ('system', 'lib', 'b')]
# The include path of the parent class is automatically inherited.
includes = []
# By default, `get_files` look for source files for this library under `src_dir`.
# It will either use the files listed in `src_files`, or use the glob pattern in
# `src_glob`. You may not specify both `src_files` and `src_glob`.
# When using `src_glob`, you can specify a list of files in `src_glob_exclude`
# to be excluded from the library.
# Alternatively, you can override `get_files` to use your own logic.
src_dir = None
src_files = None
src_glob = None
src_glob_exclude = None
# Whether to always generate WASM object files, even though WASM_OBJECT_FILES=0.
force_object_files = False
def __init__(self):
"""
Creates a variation of this library.
A variation is a specific combination of settings a library can have.
For example, libc++-mt-noexcept is a variation of libc++.
There might be only one variation of a library.
The constructor keyword arguments will define what variation to use.
Use the `variations` classmethod to get the list of all possible constructor
arguments for this library.
Use the `get_default_variation` classmethod to construct the variation
suitable for the current invocation of emscripten.
"""
if not self.name:
raise NotImplementedError('Cannot instantiate an abstract library')
# Read .symbols file if it exists. This first tries to read a symbols file
# with the same basename with the library file name (e.g.
# libc++-mt.symbols), and if there isn't one, it tries to read the 'default'
# symbol file, which does not have any optional suffices (e.g.
# libc++.symbols).
basename = os.path.splitext(self.get_filename())[0]
if shared.Settings.WASM_BACKEND:
symbols_dir = shared.path_from_root('system', 'lib', 'symbols', 'wasm')
else:
symbols_dir = shared.path_from_root('system', 'lib', 'symbols', 'asmjs')
symbols_file = os.path.join(symbols_dir, basename + '.symbols')
default_symbols_file = os.path.join(symbols_dir, self.name + '.symbols')
if os.path.isfile(symbols_file):
self.symbols = read_symbols(symbols_file)
elif os.path.isfile(default_symbols_file):
self.symbols = read_symbols(default_symbols_file)
def in_temp(cls, *args):
"""Gets the path of a file in our temporary directory."""
return os.path.join(shared.get_emscripten_temp_dir(), *args)
def can_use(self):
"""
Whether this library can be used in the current environment.
For example, libmalloc would override this and return False
if the user requested no malloc.
"""
return True
def can_build(self):
"""
Whether this library can be built in the current environment.
Override this if, for example, the library can only be built on WASM backend.
"""
return True
def erase(self):
shared.Cache.erase_file(self.get_filename())
def get_path(self):
"""
Gets the cached path of this library.
This will trigger a build if this library is not in the cache.
"""
return shared.Cache.get(self.get_filename(), self.build)
def get_files(self):
"""
Gets a list of source files for this library.
Typically, you will use `src_dir`, `src_files`, `src_glob` and `src_glob_exclude`.
If those are insufficient to describe the files needed, you can override this method.
"""
if self.src_dir:
if self.src_files and self.src_glob:
raise Exception('Cannot use src_files and src_glob together')
if self.src_files:
return files_in_path(self.src_dir, self.src_files)
elif self.src_glob:
return glob_in_path(self.src_dir, self.src_glob, self.src_glob_exclude or ())
raise NotImplementedError()
def build_objects(self):
"""
Returns a list of compiled object files for this library.
By default, this builds all the source files returned by `self.get_files()`,
with the `cflags` returned by `self.get_cflags()`.
"""
commands = []
objects = []
cflags = self.get_cflags()
for src in self.get_files():
o = self.in_temp(os.path.basename(src) + '.o')
commands.append([shared.PYTHON, self.emcc, '-c', src, '-o', o] + cflags)
objects.append(o)
run_commands(commands)
return objects
def build(self):
"""Builds the library and returns the path to the file."""
out_filename = self.in_temp(self.get_filename())
create_lib(out_filename, self.build_objects())
return out_filename
@classmethod
def _inherit_list(cls, attr):
# Some properties, like cflags and includes, makes more sense to inherit
# via concatenation than replacement.
result = []
for item in cls.__mro__[::-1]:
# Using __dict__ to avoid inheritance
result += item.__dict__.get(attr, [])
return result
def get_cflags(self):
"""
Returns the list of flags to pass to emcc when building this variation
of the library.
Override and add any flags as needed to handle new variations.
"""
cflags = self._inherit_list('cflags')
cflags += get_cflags(force_object_files=self.force_object_files)
if self.includes:
cflags += ['-I' + shared.path_from_root(*path) for path in self._inherit_list('includes')]
return cflags
def get_base_name_prefix(self):
"""
Returns the base name of the library without any suffixes.
"""
return self.name
def get_base_name(self):
"""
Returns the base name of the library file.
This will include suffixes such as -mt, but will not include a file extension.
"""
return self.get_base_name_prefix()
def get_ext(self):
"""
Return the appropriate file extension for this library.
"""
return static_library_ext()
def get_filename(self):
"""
Return the full name of the library file, including the file extension.
"""
return self.get_base_name() + self.get_ext()
def get_depends(self):
"""
Return a list of simple names of libraries that this library depends on.
This is the dynamic version of `depends`.
"""
return self.depends
@classmethod
def vary_on(cls):
"""
Returns a list of strings that are the names of boolean constructor
arguments that defines the variations of this library.
This is used by the default implementation of `cls.variations()` to generate
every possible combination of boolean values to pass to these arguments.
"""
return []
@classmethod
def variations(cls):
"""
Returns a list of keyword arguments to pass to the constructor to create
every possible variation of this library.
By default, this is every possible combination of boolean values to pass
to the list of arguments returned by `vary_on`, but you can override
the behaviour.
"""
vary_on = cls.vary_on()
return [dict(zip(vary_on, toggles)) for toggles in
itertools.product([False, True], repeat=len(vary_on))]
@classmethod
def get_default_variation(cls, **kwargs):
"""
Construct the variation suitable for the current invocation of emscripten.
Subclasses should pass the keyword arguments they introduce to the
superclass version, and propagate **kwargs. The base class collects
all the keyword arguments and creates the instance.
"""
return cls(**kwargs)
@classmethod
def get_inheritance_tree(cls):
"""Returns all the classes in the inheritance tree of the current class."""
yield cls
for subclass in cls.__subclasses__():
for subclass in subclass.get_inheritance_tree():
yield subclass
@classmethod
def get_all_variations(cls):
"""
Gets all the variations of libraries in the inheritance tree of the current
library.
Calling Library.get_all_variations() returns the variations of ALL libraries
that can be built as a dictionary of variation names to Library objects.
"""
result = {}
for library in cls.get_inheritance_tree():
if library.name:
for flags in library.variations():
variation = library(**flags)
if variation.can_build():
result[variation.get_base_name()] = variation
return result
@classmethod
def get_usable_variations(cls):
"""
Gets all libraries suitable for the current invocation of emscripten.
This returns a dictionary of simple names to Library objects.
"""
result = {}
for subclass in cls.get_inheritance_tree():
if subclass.name:
library = subclass.get_default_variation()
if library.can_build() and library.can_use():
result[subclass.name] = library
return result
class MTLibrary(Library):
def __init__(self, **kwargs):
self.is_mt = kwargs.pop('is_mt')
super(MTLibrary, self).__init__(**kwargs)
def get_cflags(self):
cflags = super(MTLibrary, self).get_cflags()
if self.is_mt:
cflags += ['-s', 'USE_PTHREADS=1', '-DUSE_THREADS']
return cflags
def get_base_name(self):
name = super(MTLibrary, self).get_base_name()
if self.is_mt:
name += '-mt'
return name
@classmethod
def vary_on(cls):
return super(MTLibrary, cls).vary_on() + ['is_mt']
@classmethod
def get_default_variation(cls, **kwargs):
return super(MTLibrary, cls).get_default_variation(is_mt=shared.Settings.USE_PTHREADS, **kwargs)
class NoExceptLibrary(Library):
def __init__(self, **kwargs):
self.is_noexcept = kwargs.pop('is_noexcept')
super(NoExceptLibrary, self).__init__(**kwargs)
def get_cflags(self):
cflags = super(NoExceptLibrary, self).get_cflags()
if self.is_noexcept:
cflags += ['-fno-exceptions']
else:
cflags += ['-s', 'DISABLE_EXCEPTION_CATCHING=0']
return cflags
def get_base_name(self):
name = super(NoExceptLibrary, self).get_base_name()
if self.is_noexcept:
name += '-noexcept'
return name
@classmethod
def vary_on(cls):
return super(NoExceptLibrary, cls).vary_on() + ['is_noexcept']
@classmethod
def get_default_variation(cls, **kwargs):
return super(NoExceptLibrary, cls).get_default_variation(is_noexcept=shared.Settings.DISABLE_EXCEPTION_CATCHING, **kwargs)
class MuslInternalLibrary(Library):
includes = [
['system', 'lib', 'libc', 'musl', 'src', 'internal'],
['system', 'lib', 'libc', 'musl', 'arch', 'js'],
]
cflags = [
'-D_XOPEN_SOURCE=700',
'-Wno-unused-result', # system call results are often ignored in musl, and in wasi that warns
]
class AsanInstrumentedLibrary(Library):
def __init__(self, **kwargs):
self.is_asan = kwargs.pop('is_asan', False)
super(AsanInstrumentedLibrary, self).__init__(**kwargs)
def get_cflags(self):
cflags = super(AsanInstrumentedLibrary, self).get_cflags()
if self.is_asan:
cflags += ['-fsanitize=address']
return cflags
def get_base_name(self):
name = super(AsanInstrumentedLibrary, self).get_base_name()
if self.is_asan:
name += '-asan'
return name
@classmethod
def vary_on(cls):
vary_on = super(AsanInstrumentedLibrary, cls).vary_on()
if shared.Settings.WASM_BACKEND:
vary_on += ['is_asan']
return vary_on
@classmethod
def get_default_variation(cls, **kwargs):
return super(AsanInstrumentedLibrary, cls).get_default_variation(is_asan=shared.Settings.USE_ASAN, **kwargs)
class CXXLibrary(Library):
emcc = shared.EMXX
class NoBCLibrary(Library):
# Some libraries cannot be compiled as .bc files. This is because .bc files will link in every
# object in the library. While the optimizer will readily optimize out most of the unused
# functions, things like global constructors that are linked in cannot be optimized out, even
# though they are not actually needed. If we use .a files for such libraries, only the object
# files, and by extension, their contained global constructors, that are actually needed will be
# linked in.
def get_ext(self):
return '.a'
class libcompiler_rt(Library):
name = 'libcompiler_rt'
# compiler_rt files can't currently be part of LTO although we are hoping to remove this
# restriction soon: https://reviews.llvm.org/D71738
force_object_files = True
cflags = ['-O2', '-fno-builtin']
src_dir = ['system', 'lib', 'compiler-rt', 'lib', 'builtins']
if shared.Settings.WASM_BACKEND:
filelist = shared.path_from_root('system', 'lib', 'compiler-rt', 'filelist.txt')
src_files = open(filelist).read().splitlines()
src_files.append(shared.path_from_root('system', 'lib', 'compiler-rt', 'extras.c'))
else:
src_files = ['divdc3.c', 'divsc3.c', 'muldc3.c', 'mulsc3.c']
class libc(AsanInstrumentedLibrary, MuslInternalLibrary, MTLibrary):
name = 'libc'
depends = ['libcompiler_rt']
# Without -fno-builtin, LLVM can optimize away or convert calls to library
# functions to something else based on assumptions that they behave exactly
# like the standard library. This can cause unexpected bugs when we use our
# custom standard library. The same for other libc/libm builds.
cflags = ['-Os', '-fno-builtin']
# Hide several musl warnings that produce a lot of spam to unit test build
# server logs. TODO: When updating musl the next time, feel free to recheck
# which of their warnings might have been fixed, and which ones of these could
# be cleaned up.
cflags += ['-Wno-return-type', '-Wno-parentheses', '-Wno-ignored-attributes',
'-Wno-shift-count-overflow', '-Wno-shift-negative-value',
'-Wno-dangling-else', '-Wno-unknown-pragmas',
'-Wno-shift-op-parentheses', '-Wno-string-plus-int',
'-Wno-logical-op-parentheses', '-Wno-bitwise-op-parentheses',
'-Wno-visibility', '-Wno-pointer-sign', '-Wno-absolute-value',
'-Wno-empty-body']
def get_files(self):
libc_files = []
musl_srcdir = shared.path_from_root('system', 'lib', 'libc', 'musl', 'src')
# musl modules
blacklist = [
'ipc', 'passwd', 'thread', 'signal', 'sched', 'ipc', 'time', 'linux',
'aio', 'exit', 'legacy', 'mq', 'process', 'search', 'setjmp', 'env',
'ldso', 'conf'
]
# individual files
blacklist += [
'memcpy.c', 'memset.c', 'memmove.c', 'getaddrinfo.c', 'getnameinfo.c',
'inet_addr.c', 'res_query.c', 'res_querydomain.c', 'gai_strerror.c',
'proto.c', 'gethostbyaddr.c', 'gethostbyaddr_r.c', 'gethostbyname.c',
'gethostbyname2_r.c', 'gethostbyname_r.c', 'gethostbyname2.c',
'usleep.c', 'alarm.c', 'syscall.c', '_exit.c', 'popen.c',
'getgrouplist.c', 'initgroups.c', 'timer_create.c',
'faccessat.c',
]
blacklist += LIBC_SOCKETS
# individual math files
blacklist += [
'abs.c', 'cos.c', 'cosf.c', 'cosl.c', 'sin.c', 'sinf.c', 'sinl.c',
'tan.c', 'tanf.c', 'tanl.c', 'acos.c', 'acosf.c', 'acosl.c', 'asin.c',
'asinf.c', 'asinl.c', 'atan.c', 'atanf.c', 'atanl.c', 'atan2.c',
'atan2f.c', 'atan2l.c', 'exp.c', 'expf.c', 'expl.c', 'log.c', 'logf.c',
'logl.c', 'sqrtl.c', 'round.c', 'roundf.c',
'fabsl.c', 'ceill.c', 'floorl.c', 'pow.c', 'powf.c', 'powl.c',
]
if self.is_asan:
# With ASan, we need to use specialized implementations of certain libc
# functions that do not rely on undefined behavior, for example, reading
# multiple bytes at once as an int and overflowing a buffer.
# Otherwise, ASan will catch these errors and terminate the program.
blacklist += ['strcpy.c', 'memchr.c', 'strchrnul.c', 'strlen.c',
'aligned_alloc.c', 'fcntl.c']
libc_files += [
shared.path_from_root('system', 'lib', 'libc', 'emscripten_asan_strcpy.c'),
shared.path_from_root('system', 'lib', 'libc', 'emscripten_asan_memchr.c'),
shared.path_from_root('system', 'lib', 'libc', 'emscripten_asan_strchrnul.c'),
shared.path_from_root('system', 'lib', 'libc', 'emscripten_asan_strlen.c'),
shared.path_from_root('system', 'lib', 'libc', 'emscripten_asan_fcntl.c'),
]
if shared.Settings.WASM_BACKEND:
# With the wasm backend these are included in wasm_libc_rt instead
blacklist += [os.path.basename(f) for f in get_wasm_libc_rt_files()]
else:
blacklist += ['rintf.c', 'ceil.c', 'ceilf.c', 'floor.c', 'floorf.c',
'fabs.c', 'fabsf.c', 'sqrt.c', 'sqrtf.c']
blacklist = set(blacklist)
# TODO: consider using more math code from musl, doing so makes box2d faster
for dirpath, dirnames, filenames in os.walk(musl_srcdir):
for f in filenames:
if f.endswith('.c'):
if f in blacklist:
continue
dir_parts = os.path.split(dirpath)
cancel = False
for part in dir_parts:
if part in blacklist:
cancel = True
break
if not cancel:
libc_files.append(os.path.join(musl_srcdir, dirpath, f))
if shared.Settings.WASM_BACKEND:
# See libc_extras below
libc_files.append(shared.path_from_root('system', 'lib', 'libc', 'extras.c'))
# Include all the getenv stuff with the wasm backend. With fastcomp we
# still use JS because libc is a .bc file and we don't want to have a
# global constructor there for __environ, which would mean it is always
# included.
libc_files += files_in_path(
path_components=['system', 'lib', 'libc', 'musl', 'src', 'env'],
filenames=['__environ.c', 'getenv.c', 'putenv.c', 'setenv.c', 'unsetenv.c'])
libc_files.append(shared.path_from_root('system', 'lib', 'libc', 'wasi-helpers.c'))
return libc_files
def get_depends(self):
depends = super(libc, self).get_depends()
if shared.Settings.WASM:
return depends + ['libc-wasm']
return depends
class libsockets(MuslInternalLibrary, MTLibrary):
name = 'libsockets'
symbols = set()
cflags = ['-Os', '-fno-builtin']
def get_files(self):
network_dir = shared.path_from_root('system', 'lib', 'libc', 'musl', 'src', 'network')
return [os.path.join(network_dir, x) for x in LIBC_SOCKETS]
class libsockets_proxy(MuslInternalLibrary, MTLibrary):
name = 'libsockets_proxy'
symbols = set()
cflags = ['-Os']
def get_files(self):
return [shared.path_from_root('system', 'lib', 'websocket', 'websocket_to_posix_socket.cpp'),
shared.path_from_root('system', 'lib', 'libc', 'musl', 'src', 'network', 'inet_addr.c')]
class libc_wasm(MuslInternalLibrary):
name = 'libc-wasm'
cflags = ['-O2', '-fno-builtin']
src_dir = ['system', 'lib', 'libc', 'musl', 'src', 'math']
src_files = ['cos.c', 'cosf.c', 'cosl.c', 'sin.c', 'sinf.c', 'sinl.c',
'tan.c', 'tanf.c', 'tanl.c', 'acos.c', 'acosf.c', 'acosl.c',
'asin.c', 'asinf.c', 'asinl.c', 'atan.c', 'atanf.c', 'atanl.c',
'atan2.c', 'atan2f.c', 'atan2l.c', 'exp.c', 'expf.c', 'expl.c',
'log.c', 'logf.c', 'logl.c', 'pow.c', 'powf.c', 'powl.c']
def can_use(self):
# if building to wasm, we need more math code, since we have fewer builtins
return shared.Settings.WASM
class crt1(MuslInternalLibrary):
name = 'crt1'
cflags = ['-O2']
src_dir = ['system', 'lib', 'libc']
src_files = ['crt1.c']
force_object_files = True
def get_ext(self):
return '.o'
def can_use(self):
return shared.Settings.STANDALONE_WASM
def can_build(self):
return shared.Settings.WASM_BACKEND
class libc_extras(MuslInternalLibrary):
"""This library is separate from libc itself for fastcomp only so that the
constructor it contains can be DCE'd. With the wasm backend libc it is a .a
file so object file granularity applies.
"""
name = 'libc-extras'
src_dir = ['system', 'lib', 'libc']
src_files = ['extras.c']
def can_build(self):
return not shared.Settings.WASM_BACKEND
class libcxxabi(CXXLibrary, NoExceptLibrary, MTLibrary):
name = 'libc++abi'
depends = ['libc']
cflags = ['-std=c++11', '-Oz', '-D_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS']
def get_cflags(self):
cflags = super(libcxxabi, self).get_cflags()
cflags.append('-DNDEBUG')
if not self.is_mt:
cflags.append('-D_LIBCXXABI_HAS_NO_THREADS')
if self.is_noexcept:
cflags.append('-D_LIBCXXABI_NO_EXCEPTIONS')
return cflags
def get_files(self):
filenames = [
'abort_message.cpp',
'cxa_aux_runtime.cpp',
'cxa_default_handlers.cpp',
'cxa_demangle.cpp',
'cxa_exception_storage.cpp',
'cxa_guard.cpp',
'cxa_handlers.cpp',
'cxa_virtual.cpp',
'fallback_malloc.cpp',
'stdlib_new_delete.cpp',
'stdlib_exception.cpp',
'stdlib_stdexcept.cpp',
'stdlib_typeinfo.cpp',
'private_typeinfo.cpp'
]
if self.is_noexcept:
filenames += ['cxa_noexception.cpp']
return files_in_path(
path_components=['system', 'lib', 'libcxxabi', 'src'],
filenames=filenames)
class libcxx(NoBCLibrary, CXXLibrary, NoExceptLibrary, MTLibrary):
name = 'libc++'
depends = ['libc++abi']
cflags = ['-std=c++11', '-DLIBCXX_BUILDING_LIBCXXABI=1', '-D_LIBCPP_BUILDING_LIBRARY', '-Oz',
'-D_LIBCPP_DISABLE_VISIBILITY_ANNOTATIONS']
src_dir = ['system', 'lib', 'libcxx']
src_files = [
'algorithm.cpp',
'any.cpp',
'bind.cpp',
'chrono.cpp',
'condition_variable.cpp',
'debug.cpp',
'exception.cpp',
'future.cpp',
'functional.cpp',
'hash.cpp',
'ios.cpp',
'iostream.cpp',
'locale.cpp',
'memory.cpp',
'mutex.cpp',
'new.cpp',
'optional.cpp',
'random.cpp',
'regex.cpp',
'shared_mutex.cpp',
'stdexcept.cpp',
'string.cpp',
'strstream.cpp',
'system_error.cpp',
'thread.cpp',
'typeinfo.cpp',
'utility.cpp',
'valarray.cpp',
'variant.cpp',
'vector.cpp',
os.path.join('experimental', 'memory_resource.cpp'),
os.path.join('filesystem', 'directory_iterator.cpp'),
os.path.join('filesystem', 'int128_builtins.cpp'),
os.path.join('filesystem', 'operations.cpp')
]
class libmalloc(MTLibrary, NoBCLibrary):
name = 'libmalloc'
cflags = ['-O2', '-fno-builtin']
def __init__(self, **kwargs):
self.malloc = kwargs.pop('malloc')
if self.malloc not in ('dlmalloc', 'emmalloc', 'none'):
raise Exception('malloc must be one of "emmalloc", "dlmalloc" or "none", see settings.js')
self.is_debug = kwargs.pop('is_debug')
self.use_errno = kwargs.pop('use_errno')
self.is_tracing = kwargs.pop('is_tracing')
self.use_64bit_ops = kwargs.pop('use_64bit_ops')
super(libmalloc, self).__init__(**kwargs)
def get_files(self):
malloc = shared.path_from_root('system', 'lib', {
'dlmalloc': 'dlmalloc.c', 'emmalloc': 'emmalloc.cpp'
}[self.malloc])
sbrk = shared.path_from_root('system', 'lib', 'sbrk.c')
return [malloc, sbrk]
def get_cflags(self):
cflags = super(libmalloc, self).get_cflags()
if self.is_debug:
cflags += ['-UNDEBUG', '-DDLMALLOC_DEBUG']
# TODO: consider adding -DEMMALLOC_DEBUG, but that is quite slow
else:
cflags += ['-DNDEBUG']
if not self.use_errno:
cflags += ['-DMALLOC_FAILURE_ACTION=', '-DEMSCRIPTEN_NO_ERRNO']
if self.is_tracing:
cflags += ['--tracing']
if self.use_64bit_ops:
cflags += ['-DEMMALLOC_USE_64BIT_OPS=1']
return cflags
def get_base_name_prefix(self):
return 'lib%s' % self.malloc
def get_base_name(self):
name = super(libmalloc, self).get_base_name()
if self.is_debug:
name += '-debug'
if not self.use_errno:
# emmalloc doesn't actually use errno, but it's easier to build it again
name += '-noerrno'
if self.is_tracing:
name += '-tracing'
if self.use_64bit_ops:
name += '-64bit'
return name
def can_use(self):
return shared.Settings.MALLOC != 'none'
@classmethod
def vary_on(cls):
return super(libmalloc, cls).vary_on() + ['is_debug', 'use_errno', 'is_tracing', 'use_64bit_ops']
@classmethod
def get_default_variation(cls, **kwargs):
return super(libmalloc, cls).get_default_variation(
malloc=shared.Settings.MALLOC,
is_debug=shared.Settings.DEBUG_LEVEL >= 3,
use_errno=shared.Settings.SUPPORT_ERRNO,
is_tracing=shared.Settings.EMSCRIPTEN_TRACING,
use_64bit_ops=shared.Settings.MALLOC == 'emmalloc' and (shared.Settings.WASM == 1 or (shared.Settings.WASM_BACKEND and shared.Settings.WASM2JS == 0)),
**kwargs
)
@classmethod
def variations(cls):
combos = super(libmalloc, cls).variations()
return ([dict(malloc='dlmalloc', **combo) for combo in combos if not combo['use_64bit_ops']] +
[dict(malloc='emmalloc', **combo) for combo in combos])
class libal(Library):
name = 'libal'
depends = ['libc']
cflags = ['-Os']
src_dir = ['system', 'lib']
src_files = ['al.c']
class libgl(MTLibrary):
name = 'libgl'
depends = ['libc']
src_dir = ['system', 'lib', 'gl']
src_glob = '*.c'
cflags = ['-Oz']
def __init__(self, **kwargs):
self.is_legacy = kwargs.pop('is_legacy')
self.is_webgl2 = kwargs.pop('is_webgl2')
self.is_ofb = kwargs.pop('is_ofb')
self.is_full_es3 = kwargs.pop('is_full_es3')
super(libgl, self).__init__(**kwargs)
def get_base_name(self):
name = super(libgl, self).get_base_name()
if self.is_legacy:
name += '-emu'
if self.is_webgl2:
name += '-webgl2'
if self.is_ofb:
name += '-ofb'
if self.is_full_es3:
name += '-full_es3'
return name
def get_cflags(self):
cflags = super(libgl, self).get_cflags()
if self.is_legacy:
cflags += ['-DLEGACY_GL_EMULATION=1']
if self.is_webgl2:
cflags += ['-DMAX_WEBGL_VERSION=2', '-s', 'MAX_WEBGL_VERSION=2']
if self.is_ofb:
cflags += ['-D__EMSCRIPTEN_OFFSCREEN_FRAMEBUFFER__']
if self.is_full_es3:
cflags += ['-D__EMSCRIPTEN_FULL_ES3__']
return cflags
@classmethod
def vary_on(cls):
return super(libgl, cls).vary_on() + ['is_legacy', 'is_webgl2', 'is_ofb', 'is_full_es3']
@classmethod
def get_default_variation(cls, **kwargs):
return super(libgl, cls).get_default_variation(
is_legacy=shared.Settings.LEGACY_GL_EMULATION,
is_webgl2=shared.Settings.MAX_WEBGL_VERSION >= 2,
is_ofb=shared.Settings.OFFSCREEN_FRAMEBUFFER,
is_full_es3=shared.Settings.FULL_ES3,
**kwargs
)
class libembind(CXXLibrary):
name = 'libembind'
cflags = ['-std=c++11']
depends = ['libc++abi']
never_force = True
def __init__(self, **kwargs):
self.with_rtti = kwargs.pop('with_rtti', False)
super(libembind, self).__init__(**kwargs)
def get_cflags(self):
cflags = super(libembind, self).get_cflags()
if not self.with_rtti:
cflags += ['-fno-rtti', '-DEMSCRIPTEN_HAS_UNBOUND_TYPE_NAMES=0']
return cflags
@classmethod
def vary_on(cls):
return super(libembind, cls).vary_on() + ['with_rtti']
def get_base_name(self):
name = super(libembind, self).get_base_name()
if self.with_rtti:
name += '-rtti'
return name
def get_files(self):
return [shared.path_from_root('system', 'lib', 'embind', 'bind.cpp')]
@classmethod
def get_default_variation(cls, **kwargs):
return super(libembind, cls).get_default_variation(with_rtti=shared.Settings.USE_RTTI, **kwargs)
class libfetch(CXXLibrary, MTLibrary):
name = 'libfetch'
depends = ['libc++abi']
never_force = True
def get_files(self):
return [shared.path_from_root('system', 'lib', 'fetch', 'emscripten_fetch.cpp')]
class libasmfs(CXXLibrary, MTLibrary):
name = 'libasmfs'
depends = ['libc++abi']
never_force = True
def get_files(self):
return [shared.path_from_root('system', 'lib', 'fetch', 'asmfs.cpp')]
def can_build(self):
# ASMFS is looking for a maintainer
# https://github.com/emscripten-core/emscripten/issues/9534
return False
class libhtml5(Library):
name = 'libhtml5'
cflags = ['-Oz']
src_dir = ['system', 'lib', 'html5']
src_glob = '*.c'
class libpthread(AsanInstrumentedLibrary, MuslInternalLibrary, MTLibrary):
name = 'libpthread'
depends = ['libc']
cflags = ['-O2']
def get_files(self):
if self.is_mt:
files = files_in_path(
path_components=['system', 'lib', 'libc', 'musl', 'src', 'thread'],
filenames=[
'pthread_attr_destroy.c', 'pthread_condattr_setpshared.c',
'pthread_mutex_lock.c', 'pthread_spin_destroy.c', 'pthread_attr_get.c',
'pthread_cond_broadcast.c', 'pthread_mutex_setprioceiling.c',
'pthread_spin_init.c', 'pthread_attr_init.c', 'pthread_cond_destroy.c',
'pthread_mutex_timedlock.c', 'pthread_spin_lock.c',
'pthread_attr_setdetachstate.c', 'pthread_cond_init.c',
'pthread_mutex_trylock.c', 'pthread_spin_trylock.c',
'pthread_attr_setguardsize.c', 'pthread_cond_signal.c',
'pthread_mutex_unlock.c', 'pthread_spin_unlock.c',
'pthread_attr_setinheritsched.c', 'pthread_cond_timedwait.c',
'pthread_once.c', 'sem_destroy.c', 'pthread_attr_setschedparam.c',
'pthread_cond_wait.c', 'pthread_rwlockattr_destroy.c', 'sem_getvalue.c',
'pthread_attr_setschedpolicy.c', 'pthread_equal.c', 'pthread_rwlockattr_init.c',
'sem_init.c', 'pthread_attr_setscope.c', 'pthread_getspecific.c',
'pthread_rwlockattr_setpshared.c', 'sem_open.c', 'pthread_attr_setstack.c',
'pthread_key_create.c', 'pthread_rwlock_destroy.c', 'sem_post.c',
'pthread_attr_setstacksize.c', 'pthread_mutexattr_destroy.c',
'pthread_rwlock_init.c', 'sem_timedwait.c', 'pthread_barrierattr_destroy.c',
'pthread_mutexattr_init.c', 'pthread_rwlock_rdlock.c', 'sem_trywait.c',
'pthread_barrierattr_init.c', 'pthread_mutexattr_setprotocol.c',
'pthread_rwlock_timedrdlock.c', 'sem_unlink.c',
'pthread_barrierattr_setpshared.c', 'pthread_mutexattr_setpshared.c',
'pthread_rwlock_timedwrlock.c', 'sem_wait.c', 'pthread_barrier_destroy.c',
'pthread_mutexattr_setrobust.c', 'pthread_rwlock_tryrdlock.c',
'__timedwait.c', 'pthread_barrier_init.c', 'pthread_mutexattr_settype.c',
'pthread_rwlock_trywrlock.c', 'vmlock.c', 'pthread_barrier_wait.c',
'pthread_mutex_consistent.c', 'pthread_rwlock_unlock.c', '__wait.c',
'pthread_condattr_destroy.c', 'pthread_mutex_destroy.c',
'pthread_rwlock_wrlock.c', 'pthread_condattr_init.c',
'pthread_mutex_getprioceiling.c', 'pthread_setcanceltype.c',
'pthread_condattr_setclock.c', 'pthread_mutex_init.c',
'pthread_setspecific.c', 'pthread_setcancelstate.c'
])
files += [shared.path_from_root('system', 'lib', 'pthread', 'library_pthread.c')]
if shared.Settings.WASM_BACKEND:
files += [shared.path_from_root('system', 'lib', 'pthread', 'library_pthread_wasm.c')]
else:
files += [shared.path_from_root('system', 'lib', 'pthread', 'library_pthread_asmjs.c')]
return files
else:
return [shared.path_from_root('system', 'lib', 'pthread', 'library_pthread_stub.c')]
def get_base_name_prefix(self):
return 'libpthread' if self.is_mt else 'libpthread_stub'
class CompilerRTWasmLibrary(Library):
cflags = ['-O2', '-fno-builtin']
# compiler_rt files can't currently be part of LTO although we are hoping to remove this
# restriction soon: https://reviews.llvm.org/D71738
force_object_files = True
def can_build(self):
return shared.Settings.WASM_BACKEND
class libc_rt_wasm(AsanInstrumentedLibrary, CompilerRTWasmLibrary, MuslInternalLibrary):
name = 'libc_rt_wasm'
def get_files(self):
return get_wasm_libc_rt_files()
class libubsan_minimal_rt_wasm(CompilerRTWasmLibrary, MTLibrary):
name = 'libubsan_minimal_rt_wasm'
never_force = True
includes = [['system', 'lib', 'compiler-rt', 'lib']]
src_dir = ['system', 'lib', 'compiler-rt', 'lib', 'ubsan_minimal']
src_files = ['ubsan_minimal_handlers.cpp']
class libsanitizer_common_rt_wasm(CompilerRTWasmLibrary, MTLibrary):
name = 'libsanitizer_common_rt_wasm'
depends = ['libc++abi']
includes = [['system', 'lib', 'libc', 'musl', 'src', 'internal']]
js_depends = ['memalign', 'emscripten_builtin_memalign', '__data_end', '__heap_base']
never_force = True
cflags = ['-std=c++11']
src_dir = ['system', 'lib', 'compiler-rt', 'lib', 'sanitizer_common']
src_glob = '*.cc'
src_glob_exclude = ['sanitizer_common_nolibc.cc']
class SanitizerLibrary(CompilerRTWasmLibrary, MTLibrary):
depends = ['libsanitizer_common_rt_wasm']
never_force = True
includes = [['system', 'lib', 'compiler-rt', 'lib']]
cflags = ['-std=c++11']
src_glob = '*.cc'
class libubsan_rt_wasm(SanitizerLibrary):
name = 'libubsan_rt_wasm'
js_depends = ['emscripten_builtin_malloc', 'emscripten_builtin_free']
cflags = ['-DUBSAN_CAN_USE_CXXABI']
src_dir = ['system', 'lib', 'compiler-rt', 'lib', 'ubsan']
class liblsan_common_rt_wasm(SanitizerLibrary):
name = 'liblsan_common_rt_wasm'
js_depends = ['__global_base']
src_dir = ['system', 'lib', 'compiler-rt', 'lib', 'lsan']
src_glob = 'lsan_common*.cc'
class liblsan_rt_wasm(SanitizerLibrary):
name = 'liblsan_rt_wasm'
depends = ['liblsan_common_rt_wasm']
js_depends = ['emscripten_builtin_malloc', 'emscripten_builtin_free']
src_dir = ['system', 'lib', 'compiler-rt', 'lib', 'lsan']
src_glob_exclude = ['lsan_common.cc', 'lsan_common_mac.cc', 'lsan_common_linux.cc',
'lsan_common_emscripten.cc']
class libasan_rt_wasm(SanitizerLibrary):
name = 'libasan_rt_wasm'
depends = ['liblsan_common_rt_wasm', 'libubsan_rt_wasm']
src_dir = ['system', 'lib', 'compiler-rt', 'lib', 'asan']
# This library is used when STANDALONE_WASM is set. In that mode, we don't
# want to depend on JS, and so this library contains implementations of
# things that we'd normally do in JS. That includes some general things
# as well as some additional musl components (that normally we reimplement
# in JS as it's more efficient that way).
class libstandalonewasm(MuslInternalLibrary):
name = 'libstandalonewasm'
cflags = ['-Os']
src_dir = ['system', 'lib']
def __init__(self, **kwargs):
self.is_mem_grow = kwargs.pop('is_mem_grow')
super(libstandalonewasm, self).__init__(**kwargs)
def get_base_name(self):
name = super(libstandalonewasm, self).get_base_name()
if self.is_mem_grow:
name += '-memgrow'
return name
def get_cflags(self):
cflags = super(libstandalonewasm, self).get_cflags()
cflags += ['-DNDEBUG']
if self.is_mem_grow:
cflags += ['-D__EMSCRIPTEN_MEMORY_GROWTH__=1']
return cflags
@classmethod
def vary_on(cls):
return super(libstandalonewasm, cls).vary_on() + ['is_mem_grow']
@classmethod
def get_default_variation(cls, **kwargs):
return super(libstandalonewasm, cls).get_default_variation(
is_mem_grow=shared.Settings.ALLOW_MEMORY_GROWTH,
**kwargs
)
def get_files(self):
base_files = files_in_path(
path_components=['system', 'lib'],
filenames=['standalone_wasm.c'])
# It is more efficient to use JS methods for time, normally.
time_files = files_in_path(
path_components=['system', 'lib', 'libc', 'musl', 'src', 'time'],
filenames=['strftime.c',
'__month_to_secs.c',
'__tm_to_secs.c',
'__tz.c',
'__year_to_secs.c'])
# It is more efficient to use JS for __assert_fail, as it avoids always
# including fprintf etc.
exit_files = files_in_path(
path_components=['system', 'lib', 'libc', 'musl', 'src', 'exit'],
filenames=['assert.c'])
return base_files + time_files + exit_files
def can_build(self):
return shared.Settings.WASM_BACKEND
# If main() is not in EXPORTED_FUNCTIONS, it may be dce'd out. This can be
# confusing, so issue a warning.
def warn_on_unexported_main(symbolses):
if '_main' not in shared.Settings.EXPORTED_FUNCTIONS:
for symbols in symbolses:
if 'main' in symbols.defs:
logger.warning('main() is in the input files, but "_main" is not in EXPORTED_FUNCTIONS, which means it may be eliminated as dead code. Export it if you want main() to run.')
return
def calculate(temp_files, in_temp, stdout_, stderr_, forced=[]):
global stdout, stderr
stdout = stdout_
stderr = stderr_
# Set of libraries to include on the link line, as opposed to `force` which
# is the set of libraries to force include (with --whole-archive).
always_include = set()
# Setting this will only use the forced libs in EMCC_FORCE_STDLIBS. This avoids spending time checking
# for unresolved symbols in your project files, which can speed up linking, but if you do not have
# the proper list of actually needed libraries, errors can occur. See below for how we must
# export all the symbols in deps_info when using this option.
only_forced = os.environ.get('EMCC_ONLY_FORCED_STDLIBS')
if only_forced:
temp_files = []
# Add in some hacks for js libraries. If a js lib depends on a symbol provided by a C library, it must be
# added to here, because our deps go only one way (each library here is checked, then we check the next
# in order - libc++, libcxextra, etc. - and then we run the JS compiler and provide extra symbols from
# library*.js files. But we cannot then go back to the C libraries if a new dep was added!
# TODO: Move all __deps from src/library*.js to deps_info.json, and use that single source of info
# both here and in the JS compiler.
deps_info = json.loads(open(shared.path_from_root('src', 'deps_info.json')).read())
added = set()
def add_back_deps(need):
more = False
for ident, deps in deps_info.items():
if ident in need.undefs and ident not in added:
added.add(ident)
more = True
for dep in deps:
need.undefs.add(dep)
if shared.Settings.VERBOSE:
logger.debug('adding dependency on %s due to deps-info on %s' % (dep, ident))
shared.Settings.EXPORTED_FUNCTIONS.append(mangle_c_symbol_name(dep))
if more:
add_back_deps(need) # recurse to get deps of deps
# Scan symbols
symbolses = shared.Building.parallel_llvm_nm([os.path.abspath(t) for t in temp_files])
warn_on_unexported_main(symbolses)
if len(symbolses) == 0:
class Dummy(object):
defs = set()
undefs = set()
symbolses.append(Dummy())
# depend on exported functions
for export in shared.Settings.EXPORTED_FUNCTIONS:
if shared.Settings.VERBOSE:
logger.debug('adding dependency on export %s' % export)
symbolses[0].undefs.add(demangle_c_symbol_name(export))
for symbols in symbolses:
add_back_deps(symbols)
# If we are only doing forced stdlibs, then we don't know the actual symbols we need,
# and must assume all of deps_info must be exported. Note that this might cause
# warnings on exports that do not exist.
if only_forced:
for key, value in deps_info.items():
for dep in value:
shared.Settings.EXPORTED_FUNCTIONS.append(mangle_c_symbol_name(dep))
always_include.add('libpthread')
if shared.Settings.MALLOC != 'none':
always_include.add('libmalloc')
if shared.Settings.WASM_BACKEND:
always_include.add('libcompiler_rt')
libs_to_link = []
already_included = set()
system_libs_map = Library.get_usable_variations()
system_libs = sorted(system_libs_map.values(), key=lambda lib: lib.name)
# Setting this in the environment will avoid checking dependencies and make
# building big projects a little faster 1 means include everything; otherwise
# it can be the name of a lib (libc++, etc.).
# You can provide 1 to include everything, or a comma-separated list with the
# ones you want
force = os.environ.get('EMCC_FORCE_STDLIBS')
if force == '1':
force = ','.join(name for name, lib in system_libs_map.items() if not lib.never_force)
force_include = set((force.split(',') if force else []) + forced)
if force_include:
logger.debug('forcing stdlibs: ' + str(force_include))
for lib in always_include:
assert lib in system_libs_map
for lib in force_include:
if lib not in system_libs_map:
shared.exit_with_error('invalid forced library: %s', lib)
def add_library(lib):
if lib.name in already_included:
return
already_included.add(lib.name)
logger.debug('including %s (%s)' % (lib.name, lib.get_filename()))
need_whole_archive = lib.name in force_include and lib.get_ext() == '.a'
libs_to_link.append((lib.get_path(), need_whole_archive))
# Recursively add dependencies
for d in lib.get_depends():
add_library(system_libs_map[d])
for d in lib.js_depends:
d = '_' + d
if d not in shared.Settings.EXPORTED_FUNCTIONS:
shared.Settings.EXPORTED_FUNCTIONS.append(d)
if shared.Settings.STANDALONE_WASM:
add_library(system_libs_map['crt1'])
# Go over libraries to figure out which we must include
for lib in system_libs:
if lib.name in already_included:
continue
force_this = lib.name in force_include
if not force_this and only_forced:
continue
include_this = force_this or lib.name in always_include
if not include_this:
need_syms = set()
has_syms = set()
for symbols in symbolses:
if shared.Settings.VERBOSE:
logger.debug('undefs: ' + str(symbols.undefs))
for library_symbol in lib.symbols:
if library_symbol in symbols.undefs:
need_syms.add(library_symbol)
if library_symbol in symbols.defs:
has_syms.add(library_symbol)
for haz in has_syms:
if haz in need_syms:
# remove symbols that are supplied by another of the inputs
need_syms.remove(haz)
if shared.Settings.VERBOSE:
logger.debug('considering %s: we need %s and have %s' % (lib.name, str(need_syms), str(has_syms)))
if not len(need_syms):
continue
# We need to build and link the library in
add_library(lib)
if shared.Settings.WASM_BACKEND:
add_library(system_libs_map['libc_rt_wasm'])
if shared.Settings.UBSAN_RUNTIME == 1:
add_library(system_libs_map['libubsan_minimal_rt_wasm'])
elif shared.Settings.UBSAN_RUNTIME == 2:
add_library(system_libs_map['libubsan_rt_wasm'])
if shared.Settings.USE_LSAN:
force_include.add('liblsan_rt_wasm')
add_library(system_libs_map['liblsan_rt_wasm'])
if shared.Settings.USE_ASAN:
force_include.add('libasan_rt_wasm')
add_library(system_libs_map['libasan_rt_wasm'])
if shared.Settings.STANDALONE_WASM:
add_library(system_libs_map['libstandalonewasm'])
if shared.Settings.PROXY_POSIX_SOCKETS:
add_library(system_libs_map['libsockets_proxy'])
else:
add_library(system_libs_map['libsockets'])
libs_to_link.sort(key=lambda x: x[0].endswith('.a')) # make sure to put .a files at the end.
# libc++abi and libc++ *static* linking is tricky. e.g. cxa_demangle.cpp disables c++
# exceptions, but since the string methods in the headers are *weakly* linked, then
# we might have exception-supporting versions of them from elsewhere, and if libc++abi
# is first then it would "win", breaking exception throwing from those string
# header methods. To avoid that, we link libc++abi last.
libs_to_link.sort(key=lambda x: x[0].endswith('libc++abi.bc'))
# Wrap libraries in --whole-archive, as needed. We need to do this last
# since otherwise the abort sorting won't make sense.
ret = []
in_group = False
for name, need_whole_archive in libs_to_link:
if need_whole_archive and not in_group:
ret.append('--whole-archive')
in_group = True
if in_group and not need_whole_archive:
ret.append('--no-whole-archive')
in_group = False
ret.append(name)
if in_group:
ret.append('--no-whole-archive')
return ret
class Ports(object):
"""emscripten-ports library management (https://github.com/emscripten-ports).
"""
@staticmethod
def get_lib_name(name):
return name + static_library_ext()
@staticmethod
def get_include_dir():
dirname = shared.Cache.get_path('include')
shared.safe_ensure_dirs(dirname)
return dirname
@staticmethod
def install_header_dir(src_dir, target=None):
if not target:
target = os.path.basename(src_dir)
dest = os.path.join(Ports.get_include_dir(), target)
shared.try_delete(dest)
logger.debug('installing headers: ' + dest)
shutil.copytree(src_dir, dest)
@staticmethod
def install_headers(src_dir, pattern="*.h", target=None):
logger.debug("install_headers")
dest = Ports.get_include_dir()
if target:
dest = os.path.join(dest, target)
shared.safe_ensure_dirs(dest)
matches = glob.glob(os.path.join(src_dir, pattern))
assert matches, "no headers found to install in %s" % src_dir
for f in matches:
logger.debug('installing: ' + os.path.join(dest, os.path.basename(f)))
shutil.copyfile(f, os.path.join(dest, os.path.basename(f)))
@staticmethod
def build_port(src_path, output_path, includes=[], flags=[], exclude_files=[], exclude_dirs=[]):
srcs = []
for root, dirs, files in os.walk(src_path, topdown=False):
if any((excluded in root) for excluded in exclude_dirs):
continue
for f in files:
ext = os.path.splitext(f)[1]
if ext in ('.c', '.cpp') and not any((excluded in f) for excluded in exclude_files):
srcs.append(os.path.join(root, f))
include_commands = ['-I' + src_path]
for include in includes:
include_commands.append('-I' + include)
commands = []
objects = []
for src in srcs:
obj = src + '.o'
commands.append([shared.PYTHON, shared.EMCC, '-c', src, '-O2', '-o', obj, '-w'] + include_commands + flags)
objects.append(obj)
run_commands(commands)
create_lib(output_path, objects)
return output_path
@staticmethod
def run_commands(commands): # make easily available for port objects
run_commands(commands)
@staticmethod
def create_lib(libname, inputs): # make easily available for port objects
create_lib(libname, inputs)
@staticmethod
def get_dir():
dirname = os.environ.get('EM_PORTS') or os.path.expanduser(os.path.join('~', '.emscripten_ports'))
shared.safe_ensure_dirs(dirname)
return dirname
@staticmethod
def erase():
dirname = Ports.get_dir()
shared.try_delete(dirname)
if os.path.exists(dirname):
logger.warning('could not delete ports dir %s - try to delete it manually' % dirname)
@staticmethod
def get_build_dir():
return shared.Cache.get_path('ports-builds')
name_cache = set()
@staticmethod
def fetch_project(name, url, subdir, is_tarbz2=False, sha512hash=None):
# To compute the sha512 hash, run `curl URL | sha512sum`.
fullname = os.path.join(Ports.get_dir(), name)
# EMCC_LOCAL_PORTS: A hacky way to use a local directory for a port. This
# is not tested but can be useful for debugging
# changes to a port.
#
# if EMCC_LOCAL_PORTS is set, we use a local directory as our ports. This is useful
# for testing. This env var should be in format
# name=dir,name=dir
# e.g.
# sdl2=/home/username/dev/ports/SDL2
# so you could run
# EMCC_LOCAL_PORTS="sdl2=/home/alon/Dev/ports/SDL2" ./tests/runner.py browser.test_sdl2_mouse
# this will simply copy that directory into the ports directory for sdl2, and use that. It also
# clears the build, so that it is rebuilt from that source.
local_ports = os.environ.get('EMCC_LOCAL_PORTS')
if local_ports:
shared.Cache.acquire_cache_lock()
logger.warning('using local ports: %s' % local_ports)
local_ports = [pair.split('=', 1) for pair in local_ports.split(',')]
try:
for local in local_ports:
if name == local[0]:
path = local[1]
if name not in ports.ports_by_name:
shared.exit_with_error('%s is not a known port' % name)
port = ports.ports_by_name[name]
if not hasattr(port, 'SUBDIR'):
logger.error('port %s lacks .SUBDIR attribute, which we need in order to override it locally, please update it' % name)
sys.exit(1)
subdir = port.SUBDIR
target = os.path.join(fullname, subdir)
if os.path.exists(target) and not dir_is_newer(path, target):
logger.warning('not grabbing local port: ' + name + ' from ' + path + ' to ' + fullname + ' (subdir: ' + subdir + ') as the destination ' + target + ' is newer (run emcc --clear-ports if that is incorrect)')
else:
logger.warning('grabbing local port: ' + name + ' from ' + path + ' to ' + fullname + ' (subdir: ' + subdir + ')')
shared.try_delete(fullname)
shutil.copytree(path, target)
Ports.clear_project_build(name)
return
finally:
shared.Cache.release_cache_lock()
if is_tarbz2:
fullpath = fullname + '.tar.bz2'
elif url.endswith('.tar.gz'):
fullpath = fullname + '.tar.gz'
else:
fullpath = fullname + '.zip'
if name not in Ports.name_cache: # only mention each port once in log
logger.debug('including port: ' + name)
logger.debug(' (at ' + fullname + ')')
Ports.name_cache.add(name)
class State(object):
retrieved = False
unpacked = False
def retrieve():
# retrieve from remote server
logger.info('retrieving port: ' + name + ' from ' + url)
try:
import requests
response = requests.get(url)
data = response.content
except ImportError:
try:
from urllib.request import urlopen
f = urlopen(url)
data = f.read()
except ImportError:
# Python 2 compatibility
from urllib2 import urlopen
f = urlopen(url)
data = f.read()
if sha512hash:
actual_hash = hashlib.sha512(data).hexdigest()
if actual_hash != sha512hash:
raise RuntimeError('Unexpected hash: ' + actual_hash + '\n'
'If you are updating the port, please update the hash in the port module.')
open(fullpath, 'wb').write(data)
State.retrieved = True
def check_tag():
if is_tarbz2:
names = tarfile.open(fullpath, 'r:bz2').getnames()
elif url.endswith('.tar.gz'):
names = tarfile.open(fullpath, 'r:gz').getnames()
else:
names = zipfile.ZipFile(fullpath, 'r').namelist()
# check if first entry of the archive is prefixed with the same
# tag as we need so no longer download and recompile if so
return bool(re.match(subdir + r'(\\|/|$)', names[0]))
def unpack():
logger.info('unpacking port: ' + name)
shared.safe_ensure_dirs(fullname)
# TODO: Someday when we are using Python 3, we might want to change the
# code below to use shlib.unpack_archive
# e.g.: shutil.unpack_archive(filename=fullpath, extract_dir=fullname)
# (https://docs.python.org/3/library/shutil.html#shutil.unpack_archive)
if is_tarbz2:
z = tarfile.open(fullpath, 'r:bz2')
elif url.endswith('.tar.gz'):
z = tarfile.open(fullpath, 'r:gz')
else:
z = zipfile.ZipFile(fullpath, 'r')
try:
cwd = os.getcwd()
os.chdir(fullname)
z.extractall()
finally:
os.chdir(cwd)
State.unpacked = True
# main logic. do this under a cache lock, since we don't want multiple jobs to
# retrieve the same port at once
shared.Cache.acquire_cache_lock()
try:
if not os.path.exists(fullpath):
retrieve()
if not os.path.exists(fullname):
unpack()
if not check_tag():
logger.warning('local copy of port is not correct, retrieving from remote server')
shared.try_delete(fullname)
shared.try_delete(fullpath)
retrieve()
unpack()
if State.unpacked:
# we unpacked a new version, clear the build in the cache
Ports.clear_project_build(name)
finally:
shared.Cache.release_cache_lock()
@staticmethod
def clear_project_build(name):
port = ports.ports_by_name[name]
port.clear(Ports, shared)
shared.try_delete(os.path.join(Ports.get_build_dir(), name))
# get all ports
def get_ports(settings):
ret = []
try:
process_dependencies(settings)
for port in ports.ports:
# ports return their output files, which will be linked, or a txt file
ret += [f for f in port.get(Ports, settings, shared) if not f.endswith('.txt')]
except Exception:
logger.error('a problem occurred when using an emscripten-ports library. try to run `emcc --clear-ports` and then run this command again')
raise
ret.reverse()
return ret
def process_dependencies(settings):
for port in reversed(ports.ports):
if hasattr(port, "process_dependencies"):
port.process_dependencies(settings)
def process_args(args, settings):
process_dependencies(settings)
for port in ports.ports:
args = port.process_args(Ports, args, settings, shared)
return args
# get a single port
def get_port(name, settings):
port = ports.ports_by_name[name]
if hasattr(port, "process_dependencies"):
port.process_dependencies(settings)
# ports return their output files, which will be linked, or a txt file
return [f for f in port.get(Ports, settings, shared) if not f.endswith('.txt')]
def show_ports():
print('Available ports:')
for port in ports.ports:
print(' ', port.show())
|
[] |
[] |
[
"EMCC_ONLY_FORCED_STDLIBS",
"EMCC_LOCAL_PORTS",
"EMCC_FORCE_STDLIBS",
"EM_PORTS"
] |
[]
|
["EMCC_ONLY_FORCED_STDLIBS", "EMCC_LOCAL_PORTS", "EMCC_FORCE_STDLIBS", "EM_PORTS"]
|
python
| 4 | 0 | |
model.py
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
from generate_data import vocab, ids_from_chars
# Length of the vocabulary in chars
vocab_size = len(vocab)
# The embedding dimension
embedding_dim = 256
# Number of RNN units
rnn_units = 1024
# Define the model
class MyModel(tf.keras.Model):
def __init__(self, vocab_size, embedding_dim, rnn_units):
super().__init__(self)
# The input layer
self.embedding = tf.keras.layers.Embedding(vocab_size, embedding_dim)
# The RNN layer
self.gru = tf.keras.layers.GRU(rnn_units,
return_sequences=True,
return_state=True)
# The output layer
self.dense = tf.keras.layers.Dense(vocab_size)
def call(self, inputs, states=None, return_state=False, training=False):
x = inputs
x = self.embedding(x, training=training)
if states is None:
states = self.gru.get_initial_state(x)
x, states = self.gru(x, initial_state=states, training=training)
x = self.dense(x, training=training)
if return_state:
return x, states
else:
return x
# Model to make a single step prediction
class OneStep(tf.keras.Model):
def __init__(self, model, chars_from_ids, ids_from_chars, temperature=1.0):
super().__init__()
self.temperature = temperature
self.model = model
self.chars_from_ids = chars_from_ids
self.ids_from_chars = ids_from_chars
# Create a mask to prevent "[UNK]" from being generated.
skip_ids = self.ids_from_chars(['[UNK]'])[:, None]
sparse_mask = tf.SparseTensor(
# Put a -inf at each bad index.
values=[-float('inf')]*len(skip_ids),
indices=skip_ids,
# Match the shape to the vocabulary
dense_shape=[len(ids_from_chars.get_vocabulary())])
self.prediction_mask = tf.sparse.to_dense(sparse_mask)
@tf.function
def generate_one_step(self, inputs, states=None):
# Convert strings to token IDs.
input_chars = tf.strings.unicode_split(inputs, 'UTF-8')
input_ids = self.ids_from_chars(input_chars).to_tensor()
# Run the model.
# predicted_logits.shape is [batch, char, next_char_logits]
predicted_logits, states = self.model(inputs=input_ids, states=states,
return_state=True)
# Only use the last prediction.
predicted_logits = predicted_logits[:, -1, :]
predicted_logits = predicted_logits/self.temperature
# Apply the prediction mask: prevent "[UNK]" from being generated.
predicted_logits = predicted_logits + self.prediction_mask
# Sample the output logits to generate token IDs.
predicted_ids = tf.random.categorical(predicted_logits, num_samples=1)
predicted_ids = tf.squeeze(predicted_ids, axis=-1)
# Convert from token ids to characters
predicted_chars = self.chars_from_ids(predicted_ids)
# Return the characters and model state.
return predicted_chars, states
# Instantiate an RNN model object
model = MyModel(
# vocabulary size must match the `StringLookup` layers
vocab_size=len(ids_from_chars.get_vocabulary()),
embedding_dim=embedding_dim,
rnn_units=rnn_units)
print("Loaded model")
|
[] |
[] |
[
"TF_CPP_MIN_LOG_LEVEL"
] |
[]
|
["TF_CPP_MIN_LOG_LEVEL"]
|
python
| 1 | 0 | |
yt_dlp/utils.py
|
#!/usr/bin/env python3
# coding: utf-8
from __future__ import unicode_literals
import base64
import binascii
import calendar
import codecs
import collections
import contextlib
import ctypes
import datetime
import email.utils
import email.header
import errno
import functools
import gzip
import hashlib
import hmac
import importlib.util
import io
import itertools
import json
import locale
import math
import operator
import os
import platform
import random
import re
import socket
import ssl
import subprocess
import sys
import tempfile
import time
import traceback
import xml.etree.ElementTree
import zlib
import mimetypes
from .compat import (
compat_HTMLParseError,
compat_HTMLParser,
compat_HTTPError,
compat_basestring,
compat_chr,
compat_cookiejar,
compat_ctypes_WINFUNCTYPE,
compat_etree_fromstring,
compat_expanduser,
compat_html_entities,
compat_html_entities_html5,
compat_http_client,
compat_integer_types,
compat_numeric_types,
compat_kwargs,
compat_os_name,
compat_parse_qs,
compat_shlex_split,
compat_shlex_quote,
compat_str,
compat_struct_pack,
compat_struct_unpack,
compat_urllib_error,
compat_urllib_parse,
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
compat_urllib_parse_urlunparse,
compat_urllib_parse_quote,
compat_urllib_parse_quote_plus,
compat_urllib_parse_unquote_plus,
compat_urllib_request,
compat_urlparse,
compat_xpath,
)
from .socks import (
ProxyType,
sockssocket,
)
def register_socks_protocols():
# "Register" SOCKS protocols
# In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904
# URLs with protocols not in urlparse.uses_netloc are not handled correctly
for scheme in ('socks', 'socks4', 'socks4a', 'socks5'):
if scheme not in compat_urlparse.uses_netloc:
compat_urlparse.uses_netloc.append(scheme)
# This is not clearly defined otherwise
compiled_regex_type = type(re.compile(''))
def random_user_agent():
_USER_AGENT_TPL = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/%s Safari/537.36'
_CHROME_VERSIONS = (
'74.0.3729.129',
'76.0.3780.3',
'76.0.3780.2',
'74.0.3729.128',
'76.0.3780.1',
'76.0.3780.0',
'75.0.3770.15',
'74.0.3729.127',
'74.0.3729.126',
'76.0.3779.1',
'76.0.3779.0',
'75.0.3770.14',
'74.0.3729.125',
'76.0.3778.1',
'76.0.3778.0',
'75.0.3770.13',
'74.0.3729.124',
'74.0.3729.123',
'73.0.3683.121',
'76.0.3777.1',
'76.0.3777.0',
'75.0.3770.12',
'74.0.3729.122',
'76.0.3776.4',
'75.0.3770.11',
'74.0.3729.121',
'76.0.3776.3',
'76.0.3776.2',
'73.0.3683.120',
'74.0.3729.120',
'74.0.3729.119',
'74.0.3729.118',
'76.0.3776.1',
'76.0.3776.0',
'76.0.3775.5',
'75.0.3770.10',
'74.0.3729.117',
'76.0.3775.4',
'76.0.3775.3',
'74.0.3729.116',
'75.0.3770.9',
'76.0.3775.2',
'76.0.3775.1',
'76.0.3775.0',
'75.0.3770.8',
'74.0.3729.115',
'74.0.3729.114',
'76.0.3774.1',
'76.0.3774.0',
'75.0.3770.7',
'74.0.3729.113',
'74.0.3729.112',
'74.0.3729.111',
'76.0.3773.1',
'76.0.3773.0',
'75.0.3770.6',
'74.0.3729.110',
'74.0.3729.109',
'76.0.3772.1',
'76.0.3772.0',
'75.0.3770.5',
'74.0.3729.108',
'74.0.3729.107',
'76.0.3771.1',
'76.0.3771.0',
'75.0.3770.4',
'74.0.3729.106',
'74.0.3729.105',
'75.0.3770.3',
'74.0.3729.104',
'74.0.3729.103',
'74.0.3729.102',
'75.0.3770.2',
'74.0.3729.101',
'75.0.3770.1',
'75.0.3770.0',
'74.0.3729.100',
'75.0.3769.5',
'75.0.3769.4',
'74.0.3729.99',
'75.0.3769.3',
'75.0.3769.2',
'75.0.3768.6',
'74.0.3729.98',
'75.0.3769.1',
'75.0.3769.0',
'74.0.3729.97',
'73.0.3683.119',
'73.0.3683.118',
'74.0.3729.96',
'75.0.3768.5',
'75.0.3768.4',
'75.0.3768.3',
'75.0.3768.2',
'74.0.3729.95',
'74.0.3729.94',
'75.0.3768.1',
'75.0.3768.0',
'74.0.3729.93',
'74.0.3729.92',
'73.0.3683.117',
'74.0.3729.91',
'75.0.3766.3',
'74.0.3729.90',
'75.0.3767.2',
'75.0.3767.1',
'75.0.3767.0',
'74.0.3729.89',
'73.0.3683.116',
'75.0.3766.2',
'74.0.3729.88',
'75.0.3766.1',
'75.0.3766.0',
'74.0.3729.87',
'73.0.3683.115',
'74.0.3729.86',
'75.0.3765.1',
'75.0.3765.0',
'74.0.3729.85',
'73.0.3683.114',
'74.0.3729.84',
'75.0.3764.1',
'75.0.3764.0',
'74.0.3729.83',
'73.0.3683.113',
'75.0.3763.2',
'75.0.3761.4',
'74.0.3729.82',
'75.0.3763.1',
'75.0.3763.0',
'74.0.3729.81',
'73.0.3683.112',
'75.0.3762.1',
'75.0.3762.0',
'74.0.3729.80',
'75.0.3761.3',
'74.0.3729.79',
'73.0.3683.111',
'75.0.3761.2',
'74.0.3729.78',
'74.0.3729.77',
'75.0.3761.1',
'75.0.3761.0',
'73.0.3683.110',
'74.0.3729.76',
'74.0.3729.75',
'75.0.3760.0',
'74.0.3729.74',
'75.0.3759.8',
'75.0.3759.7',
'75.0.3759.6',
'74.0.3729.73',
'75.0.3759.5',
'74.0.3729.72',
'73.0.3683.109',
'75.0.3759.4',
'75.0.3759.3',
'74.0.3729.71',
'75.0.3759.2',
'74.0.3729.70',
'73.0.3683.108',
'74.0.3729.69',
'75.0.3759.1',
'75.0.3759.0',
'74.0.3729.68',
'73.0.3683.107',
'74.0.3729.67',
'75.0.3758.1',
'75.0.3758.0',
'74.0.3729.66',
'73.0.3683.106',
'74.0.3729.65',
'75.0.3757.1',
'75.0.3757.0',
'74.0.3729.64',
'73.0.3683.105',
'74.0.3729.63',
'75.0.3756.1',
'75.0.3756.0',
'74.0.3729.62',
'73.0.3683.104',
'75.0.3755.3',
'75.0.3755.2',
'73.0.3683.103',
'75.0.3755.1',
'75.0.3755.0',
'74.0.3729.61',
'73.0.3683.102',
'74.0.3729.60',
'75.0.3754.2',
'74.0.3729.59',
'75.0.3753.4',
'74.0.3729.58',
'75.0.3754.1',
'75.0.3754.0',
'74.0.3729.57',
'73.0.3683.101',
'75.0.3753.3',
'75.0.3752.2',
'75.0.3753.2',
'74.0.3729.56',
'75.0.3753.1',
'75.0.3753.0',
'74.0.3729.55',
'73.0.3683.100',
'74.0.3729.54',
'75.0.3752.1',
'75.0.3752.0',
'74.0.3729.53',
'73.0.3683.99',
'74.0.3729.52',
'75.0.3751.1',
'75.0.3751.0',
'74.0.3729.51',
'73.0.3683.98',
'74.0.3729.50',
'75.0.3750.0',
'74.0.3729.49',
'74.0.3729.48',
'74.0.3729.47',
'75.0.3749.3',
'74.0.3729.46',
'73.0.3683.97',
'75.0.3749.2',
'74.0.3729.45',
'75.0.3749.1',
'75.0.3749.0',
'74.0.3729.44',
'73.0.3683.96',
'74.0.3729.43',
'74.0.3729.42',
'75.0.3748.1',
'75.0.3748.0',
'74.0.3729.41',
'75.0.3747.1',
'73.0.3683.95',
'75.0.3746.4',
'74.0.3729.40',
'74.0.3729.39',
'75.0.3747.0',
'75.0.3746.3',
'75.0.3746.2',
'74.0.3729.38',
'75.0.3746.1',
'75.0.3746.0',
'74.0.3729.37',
'73.0.3683.94',
'75.0.3745.5',
'75.0.3745.4',
'75.0.3745.3',
'75.0.3745.2',
'74.0.3729.36',
'75.0.3745.1',
'75.0.3745.0',
'75.0.3744.2',
'74.0.3729.35',
'73.0.3683.93',
'74.0.3729.34',
'75.0.3744.1',
'75.0.3744.0',
'74.0.3729.33',
'73.0.3683.92',
'74.0.3729.32',
'74.0.3729.31',
'73.0.3683.91',
'75.0.3741.2',
'75.0.3740.5',
'74.0.3729.30',
'75.0.3741.1',
'75.0.3741.0',
'74.0.3729.29',
'75.0.3740.4',
'73.0.3683.90',
'74.0.3729.28',
'75.0.3740.3',
'73.0.3683.89',
'75.0.3740.2',
'74.0.3729.27',
'75.0.3740.1',
'75.0.3740.0',
'74.0.3729.26',
'73.0.3683.88',
'73.0.3683.87',
'74.0.3729.25',
'75.0.3739.1',
'75.0.3739.0',
'73.0.3683.86',
'74.0.3729.24',
'73.0.3683.85',
'75.0.3738.4',
'75.0.3738.3',
'75.0.3738.2',
'75.0.3738.1',
'75.0.3738.0',
'74.0.3729.23',
'73.0.3683.84',
'74.0.3729.22',
'74.0.3729.21',
'75.0.3737.1',
'75.0.3737.0',
'74.0.3729.20',
'73.0.3683.83',
'74.0.3729.19',
'75.0.3736.1',
'75.0.3736.0',
'74.0.3729.18',
'73.0.3683.82',
'74.0.3729.17',
'75.0.3735.1',
'75.0.3735.0',
'74.0.3729.16',
'73.0.3683.81',
'75.0.3734.1',
'75.0.3734.0',
'74.0.3729.15',
'73.0.3683.80',
'74.0.3729.14',
'75.0.3733.1',
'75.0.3733.0',
'75.0.3732.1',
'74.0.3729.13',
'74.0.3729.12',
'73.0.3683.79',
'74.0.3729.11',
'75.0.3732.0',
'74.0.3729.10',
'73.0.3683.78',
'74.0.3729.9',
'74.0.3729.8',
'74.0.3729.7',
'75.0.3731.3',
'75.0.3731.2',
'75.0.3731.0',
'74.0.3729.6',
'73.0.3683.77',
'73.0.3683.76',
'75.0.3730.5',
'75.0.3730.4',
'73.0.3683.75',
'74.0.3729.5',
'73.0.3683.74',
'75.0.3730.3',
'75.0.3730.2',
'74.0.3729.4',
'73.0.3683.73',
'73.0.3683.72',
'75.0.3730.1',
'75.0.3730.0',
'74.0.3729.3',
'73.0.3683.71',
'74.0.3729.2',
'73.0.3683.70',
'74.0.3729.1',
'74.0.3729.0',
'74.0.3726.4',
'73.0.3683.69',
'74.0.3726.3',
'74.0.3728.0',
'74.0.3726.2',
'73.0.3683.68',
'74.0.3726.1',
'74.0.3726.0',
'74.0.3725.4',
'73.0.3683.67',
'73.0.3683.66',
'74.0.3725.3',
'74.0.3725.2',
'74.0.3725.1',
'74.0.3724.8',
'74.0.3725.0',
'73.0.3683.65',
'74.0.3724.7',
'74.0.3724.6',
'74.0.3724.5',
'74.0.3724.4',
'74.0.3724.3',
'74.0.3724.2',
'74.0.3724.1',
'74.0.3724.0',
'73.0.3683.64',
'74.0.3723.1',
'74.0.3723.0',
'73.0.3683.63',
'74.0.3722.1',
'74.0.3722.0',
'73.0.3683.62',
'74.0.3718.9',
'74.0.3702.3',
'74.0.3721.3',
'74.0.3721.2',
'74.0.3721.1',
'74.0.3721.0',
'74.0.3720.6',
'73.0.3683.61',
'72.0.3626.122',
'73.0.3683.60',
'74.0.3720.5',
'72.0.3626.121',
'74.0.3718.8',
'74.0.3720.4',
'74.0.3720.3',
'74.0.3718.7',
'74.0.3720.2',
'74.0.3720.1',
'74.0.3720.0',
'74.0.3718.6',
'74.0.3719.5',
'73.0.3683.59',
'74.0.3718.5',
'74.0.3718.4',
'74.0.3719.4',
'74.0.3719.3',
'74.0.3719.2',
'74.0.3719.1',
'73.0.3683.58',
'74.0.3719.0',
'73.0.3683.57',
'73.0.3683.56',
'74.0.3718.3',
'73.0.3683.55',
'74.0.3718.2',
'74.0.3718.1',
'74.0.3718.0',
'73.0.3683.54',
'74.0.3717.2',
'73.0.3683.53',
'74.0.3717.1',
'74.0.3717.0',
'73.0.3683.52',
'74.0.3716.1',
'74.0.3716.0',
'73.0.3683.51',
'74.0.3715.1',
'74.0.3715.0',
'73.0.3683.50',
'74.0.3711.2',
'74.0.3714.2',
'74.0.3713.3',
'74.0.3714.1',
'74.0.3714.0',
'73.0.3683.49',
'74.0.3713.1',
'74.0.3713.0',
'72.0.3626.120',
'73.0.3683.48',
'74.0.3712.2',
'74.0.3712.1',
'74.0.3712.0',
'73.0.3683.47',
'72.0.3626.119',
'73.0.3683.46',
'74.0.3710.2',
'72.0.3626.118',
'74.0.3711.1',
'74.0.3711.0',
'73.0.3683.45',
'72.0.3626.117',
'74.0.3710.1',
'74.0.3710.0',
'73.0.3683.44',
'72.0.3626.116',
'74.0.3709.1',
'74.0.3709.0',
'74.0.3704.9',
'73.0.3683.43',
'72.0.3626.115',
'74.0.3704.8',
'74.0.3704.7',
'74.0.3708.0',
'74.0.3706.7',
'74.0.3704.6',
'73.0.3683.42',
'72.0.3626.114',
'74.0.3706.6',
'72.0.3626.113',
'74.0.3704.5',
'74.0.3706.5',
'74.0.3706.4',
'74.0.3706.3',
'74.0.3706.2',
'74.0.3706.1',
'74.0.3706.0',
'73.0.3683.41',
'72.0.3626.112',
'74.0.3705.1',
'74.0.3705.0',
'73.0.3683.40',
'72.0.3626.111',
'73.0.3683.39',
'74.0.3704.4',
'73.0.3683.38',
'74.0.3704.3',
'74.0.3704.2',
'74.0.3704.1',
'74.0.3704.0',
'73.0.3683.37',
'72.0.3626.110',
'72.0.3626.109',
'74.0.3703.3',
'74.0.3703.2',
'73.0.3683.36',
'74.0.3703.1',
'74.0.3703.0',
'73.0.3683.35',
'72.0.3626.108',
'74.0.3702.2',
'74.0.3699.3',
'74.0.3702.1',
'74.0.3702.0',
'73.0.3683.34',
'72.0.3626.107',
'73.0.3683.33',
'74.0.3701.1',
'74.0.3701.0',
'73.0.3683.32',
'73.0.3683.31',
'72.0.3626.105',
'74.0.3700.1',
'74.0.3700.0',
'73.0.3683.29',
'72.0.3626.103',
'74.0.3699.2',
'74.0.3699.1',
'74.0.3699.0',
'73.0.3683.28',
'72.0.3626.102',
'73.0.3683.27',
'73.0.3683.26',
'74.0.3698.0',
'74.0.3696.2',
'72.0.3626.101',
'73.0.3683.25',
'74.0.3696.1',
'74.0.3696.0',
'74.0.3694.8',
'72.0.3626.100',
'74.0.3694.7',
'74.0.3694.6',
'74.0.3694.5',
'74.0.3694.4',
'72.0.3626.99',
'72.0.3626.98',
'74.0.3694.3',
'73.0.3683.24',
'72.0.3626.97',
'72.0.3626.96',
'72.0.3626.95',
'73.0.3683.23',
'72.0.3626.94',
'73.0.3683.22',
'73.0.3683.21',
'72.0.3626.93',
'74.0.3694.2',
'72.0.3626.92',
'74.0.3694.1',
'74.0.3694.0',
'74.0.3693.6',
'73.0.3683.20',
'72.0.3626.91',
'74.0.3693.5',
'74.0.3693.4',
'74.0.3693.3',
'74.0.3693.2',
'73.0.3683.19',
'74.0.3693.1',
'74.0.3693.0',
'73.0.3683.18',
'72.0.3626.90',
'74.0.3692.1',
'74.0.3692.0',
'73.0.3683.17',
'72.0.3626.89',
'74.0.3687.3',
'74.0.3691.1',
'74.0.3691.0',
'73.0.3683.16',
'72.0.3626.88',
'72.0.3626.87',
'73.0.3683.15',
'74.0.3690.1',
'74.0.3690.0',
'73.0.3683.14',
'72.0.3626.86',
'73.0.3683.13',
'73.0.3683.12',
'74.0.3689.1',
'74.0.3689.0',
'73.0.3683.11',
'72.0.3626.85',
'73.0.3683.10',
'72.0.3626.84',
'73.0.3683.9',
'74.0.3688.1',
'74.0.3688.0',
'73.0.3683.8',
'72.0.3626.83',
'74.0.3687.2',
'74.0.3687.1',
'74.0.3687.0',
'73.0.3683.7',
'72.0.3626.82',
'74.0.3686.4',
'72.0.3626.81',
'74.0.3686.3',
'74.0.3686.2',
'74.0.3686.1',
'74.0.3686.0',
'73.0.3683.6',
'72.0.3626.80',
'74.0.3685.1',
'74.0.3685.0',
'73.0.3683.5',
'72.0.3626.79',
'74.0.3684.1',
'74.0.3684.0',
'73.0.3683.4',
'72.0.3626.78',
'72.0.3626.77',
'73.0.3683.3',
'73.0.3683.2',
'72.0.3626.76',
'73.0.3683.1',
'73.0.3683.0',
'72.0.3626.75',
'71.0.3578.141',
'73.0.3682.1',
'73.0.3682.0',
'72.0.3626.74',
'71.0.3578.140',
'73.0.3681.4',
'73.0.3681.3',
'73.0.3681.2',
'73.0.3681.1',
'73.0.3681.0',
'72.0.3626.73',
'71.0.3578.139',
'72.0.3626.72',
'72.0.3626.71',
'73.0.3680.1',
'73.0.3680.0',
'72.0.3626.70',
'71.0.3578.138',
'73.0.3678.2',
'73.0.3679.1',
'73.0.3679.0',
'72.0.3626.69',
'71.0.3578.137',
'73.0.3678.1',
'73.0.3678.0',
'71.0.3578.136',
'73.0.3677.1',
'73.0.3677.0',
'72.0.3626.68',
'72.0.3626.67',
'71.0.3578.135',
'73.0.3676.1',
'73.0.3676.0',
'73.0.3674.2',
'72.0.3626.66',
'71.0.3578.134',
'73.0.3674.1',
'73.0.3674.0',
'72.0.3626.65',
'71.0.3578.133',
'73.0.3673.2',
'73.0.3673.1',
'73.0.3673.0',
'72.0.3626.64',
'71.0.3578.132',
'72.0.3626.63',
'72.0.3626.62',
'72.0.3626.61',
'72.0.3626.60',
'73.0.3672.1',
'73.0.3672.0',
'72.0.3626.59',
'71.0.3578.131',
'73.0.3671.3',
'73.0.3671.2',
'73.0.3671.1',
'73.0.3671.0',
'72.0.3626.58',
'71.0.3578.130',
'73.0.3670.1',
'73.0.3670.0',
'72.0.3626.57',
'71.0.3578.129',
'73.0.3669.1',
'73.0.3669.0',
'72.0.3626.56',
'71.0.3578.128',
'73.0.3668.2',
'73.0.3668.1',
'73.0.3668.0',
'72.0.3626.55',
'71.0.3578.127',
'73.0.3667.2',
'73.0.3667.1',
'73.0.3667.0',
'72.0.3626.54',
'71.0.3578.126',
'73.0.3666.1',
'73.0.3666.0',
'72.0.3626.53',
'71.0.3578.125',
'73.0.3665.4',
'73.0.3665.3',
'72.0.3626.52',
'73.0.3665.2',
'73.0.3664.4',
'73.0.3665.1',
'73.0.3665.0',
'72.0.3626.51',
'71.0.3578.124',
'72.0.3626.50',
'73.0.3664.3',
'73.0.3664.2',
'73.0.3664.1',
'73.0.3664.0',
'73.0.3663.2',
'72.0.3626.49',
'71.0.3578.123',
'73.0.3663.1',
'73.0.3663.0',
'72.0.3626.48',
'71.0.3578.122',
'73.0.3662.1',
'73.0.3662.0',
'72.0.3626.47',
'71.0.3578.121',
'73.0.3661.1',
'72.0.3626.46',
'73.0.3661.0',
'72.0.3626.45',
'71.0.3578.120',
'73.0.3660.2',
'73.0.3660.1',
'73.0.3660.0',
'72.0.3626.44',
'71.0.3578.119',
'73.0.3659.1',
'73.0.3659.0',
'72.0.3626.43',
'71.0.3578.118',
'73.0.3658.1',
'73.0.3658.0',
'72.0.3626.42',
'71.0.3578.117',
'73.0.3657.1',
'73.0.3657.0',
'72.0.3626.41',
'71.0.3578.116',
'73.0.3656.1',
'73.0.3656.0',
'72.0.3626.40',
'71.0.3578.115',
'73.0.3655.1',
'73.0.3655.0',
'72.0.3626.39',
'71.0.3578.114',
'73.0.3654.1',
'73.0.3654.0',
'72.0.3626.38',
'71.0.3578.113',
'73.0.3653.1',
'73.0.3653.0',
'72.0.3626.37',
'71.0.3578.112',
'73.0.3652.1',
'73.0.3652.0',
'72.0.3626.36',
'71.0.3578.111',
'73.0.3651.1',
'73.0.3651.0',
'72.0.3626.35',
'71.0.3578.110',
'73.0.3650.1',
'73.0.3650.0',
'72.0.3626.34',
'71.0.3578.109',
'73.0.3649.1',
'73.0.3649.0',
'72.0.3626.33',
'71.0.3578.108',
'73.0.3648.2',
'73.0.3648.1',
'73.0.3648.0',
'72.0.3626.32',
'71.0.3578.107',
'73.0.3647.2',
'73.0.3647.1',
'73.0.3647.0',
'72.0.3626.31',
'71.0.3578.106',
'73.0.3635.3',
'73.0.3646.2',
'73.0.3646.1',
'73.0.3646.0',
'72.0.3626.30',
'71.0.3578.105',
'72.0.3626.29',
'73.0.3645.2',
'73.0.3645.1',
'73.0.3645.0',
'72.0.3626.28',
'71.0.3578.104',
'72.0.3626.27',
'72.0.3626.26',
'72.0.3626.25',
'72.0.3626.24',
'73.0.3644.0',
'73.0.3643.2',
'72.0.3626.23',
'71.0.3578.103',
'73.0.3643.1',
'73.0.3643.0',
'72.0.3626.22',
'71.0.3578.102',
'73.0.3642.1',
'73.0.3642.0',
'72.0.3626.21',
'71.0.3578.101',
'73.0.3641.1',
'73.0.3641.0',
'72.0.3626.20',
'71.0.3578.100',
'72.0.3626.19',
'73.0.3640.1',
'73.0.3640.0',
'72.0.3626.18',
'73.0.3639.1',
'71.0.3578.99',
'73.0.3639.0',
'72.0.3626.17',
'73.0.3638.2',
'72.0.3626.16',
'73.0.3638.1',
'73.0.3638.0',
'72.0.3626.15',
'71.0.3578.98',
'73.0.3635.2',
'71.0.3578.97',
'73.0.3637.1',
'73.0.3637.0',
'72.0.3626.14',
'71.0.3578.96',
'71.0.3578.95',
'72.0.3626.13',
'71.0.3578.94',
'73.0.3636.2',
'71.0.3578.93',
'73.0.3636.1',
'73.0.3636.0',
'72.0.3626.12',
'71.0.3578.92',
'73.0.3635.1',
'73.0.3635.0',
'72.0.3626.11',
'71.0.3578.91',
'73.0.3634.2',
'73.0.3634.1',
'73.0.3634.0',
'72.0.3626.10',
'71.0.3578.90',
'71.0.3578.89',
'73.0.3633.2',
'73.0.3633.1',
'73.0.3633.0',
'72.0.3610.4',
'72.0.3626.9',
'71.0.3578.88',
'73.0.3632.5',
'73.0.3632.4',
'73.0.3632.3',
'73.0.3632.2',
'73.0.3632.1',
'73.0.3632.0',
'72.0.3626.8',
'71.0.3578.87',
'73.0.3631.2',
'73.0.3631.1',
'73.0.3631.0',
'72.0.3626.7',
'71.0.3578.86',
'72.0.3626.6',
'73.0.3630.1',
'73.0.3630.0',
'72.0.3626.5',
'71.0.3578.85',
'72.0.3626.4',
'73.0.3628.3',
'73.0.3628.2',
'73.0.3629.1',
'73.0.3629.0',
'72.0.3626.3',
'71.0.3578.84',
'73.0.3628.1',
'73.0.3628.0',
'71.0.3578.83',
'73.0.3627.1',
'73.0.3627.0',
'72.0.3626.2',
'71.0.3578.82',
'71.0.3578.81',
'71.0.3578.80',
'72.0.3626.1',
'72.0.3626.0',
'71.0.3578.79',
'70.0.3538.124',
'71.0.3578.78',
'72.0.3623.4',
'72.0.3625.2',
'72.0.3625.1',
'72.0.3625.0',
'71.0.3578.77',
'70.0.3538.123',
'72.0.3624.4',
'72.0.3624.3',
'72.0.3624.2',
'71.0.3578.76',
'72.0.3624.1',
'72.0.3624.0',
'72.0.3623.3',
'71.0.3578.75',
'70.0.3538.122',
'71.0.3578.74',
'72.0.3623.2',
'72.0.3610.3',
'72.0.3623.1',
'72.0.3623.0',
'72.0.3622.3',
'72.0.3622.2',
'71.0.3578.73',
'70.0.3538.121',
'72.0.3622.1',
'72.0.3622.0',
'71.0.3578.72',
'70.0.3538.120',
'72.0.3621.1',
'72.0.3621.0',
'71.0.3578.71',
'70.0.3538.119',
'72.0.3620.1',
'72.0.3620.0',
'71.0.3578.70',
'70.0.3538.118',
'71.0.3578.69',
'72.0.3619.1',
'72.0.3619.0',
'71.0.3578.68',
'70.0.3538.117',
'71.0.3578.67',
'72.0.3618.1',
'72.0.3618.0',
'71.0.3578.66',
'70.0.3538.116',
'72.0.3617.1',
'72.0.3617.0',
'71.0.3578.65',
'70.0.3538.115',
'72.0.3602.3',
'71.0.3578.64',
'72.0.3616.1',
'72.0.3616.0',
'71.0.3578.63',
'70.0.3538.114',
'71.0.3578.62',
'72.0.3615.1',
'72.0.3615.0',
'71.0.3578.61',
'70.0.3538.113',
'72.0.3614.1',
'72.0.3614.0',
'71.0.3578.60',
'70.0.3538.112',
'72.0.3613.1',
'72.0.3613.0',
'71.0.3578.59',
'70.0.3538.111',
'72.0.3612.2',
'72.0.3612.1',
'72.0.3612.0',
'70.0.3538.110',
'71.0.3578.58',
'70.0.3538.109',
'72.0.3611.2',
'72.0.3611.1',
'72.0.3611.0',
'71.0.3578.57',
'70.0.3538.108',
'72.0.3610.2',
'71.0.3578.56',
'71.0.3578.55',
'72.0.3610.1',
'72.0.3610.0',
'71.0.3578.54',
'70.0.3538.107',
'71.0.3578.53',
'72.0.3609.3',
'71.0.3578.52',
'72.0.3609.2',
'71.0.3578.51',
'72.0.3608.5',
'72.0.3609.1',
'72.0.3609.0',
'71.0.3578.50',
'70.0.3538.106',
'72.0.3608.4',
'72.0.3608.3',
'72.0.3608.2',
'71.0.3578.49',
'72.0.3608.1',
'72.0.3608.0',
'70.0.3538.105',
'71.0.3578.48',
'72.0.3607.1',
'72.0.3607.0',
'71.0.3578.47',
'70.0.3538.104',
'72.0.3606.2',
'72.0.3606.1',
'72.0.3606.0',
'71.0.3578.46',
'70.0.3538.103',
'70.0.3538.102',
'72.0.3605.3',
'72.0.3605.2',
'72.0.3605.1',
'72.0.3605.0',
'71.0.3578.45',
'70.0.3538.101',
'71.0.3578.44',
'71.0.3578.43',
'70.0.3538.100',
'70.0.3538.99',
'71.0.3578.42',
'72.0.3604.1',
'72.0.3604.0',
'71.0.3578.41',
'70.0.3538.98',
'71.0.3578.40',
'72.0.3603.2',
'72.0.3603.1',
'72.0.3603.0',
'71.0.3578.39',
'70.0.3538.97',
'72.0.3602.2',
'71.0.3578.38',
'71.0.3578.37',
'72.0.3602.1',
'72.0.3602.0',
'71.0.3578.36',
'70.0.3538.96',
'72.0.3601.1',
'72.0.3601.0',
'71.0.3578.35',
'70.0.3538.95',
'72.0.3600.1',
'72.0.3600.0',
'71.0.3578.34',
'70.0.3538.94',
'72.0.3599.3',
'72.0.3599.2',
'72.0.3599.1',
'72.0.3599.0',
'71.0.3578.33',
'70.0.3538.93',
'72.0.3598.1',
'72.0.3598.0',
'71.0.3578.32',
'70.0.3538.87',
'72.0.3597.1',
'72.0.3597.0',
'72.0.3596.2',
'71.0.3578.31',
'70.0.3538.86',
'71.0.3578.30',
'71.0.3578.29',
'72.0.3596.1',
'72.0.3596.0',
'71.0.3578.28',
'70.0.3538.85',
'72.0.3595.2',
'72.0.3591.3',
'72.0.3595.1',
'72.0.3595.0',
'71.0.3578.27',
'70.0.3538.84',
'72.0.3594.1',
'72.0.3594.0',
'71.0.3578.26',
'70.0.3538.83',
'72.0.3593.2',
'72.0.3593.1',
'72.0.3593.0',
'71.0.3578.25',
'70.0.3538.82',
'72.0.3589.3',
'72.0.3592.2',
'72.0.3592.1',
'72.0.3592.0',
'71.0.3578.24',
'72.0.3589.2',
'70.0.3538.81',
'70.0.3538.80',
'72.0.3591.2',
'72.0.3591.1',
'72.0.3591.0',
'71.0.3578.23',
'70.0.3538.79',
'71.0.3578.22',
'72.0.3590.1',
'72.0.3590.0',
'71.0.3578.21',
'70.0.3538.78',
'70.0.3538.77',
'72.0.3589.1',
'72.0.3589.0',
'71.0.3578.20',
'70.0.3538.76',
'71.0.3578.19',
'70.0.3538.75',
'72.0.3588.1',
'72.0.3588.0',
'71.0.3578.18',
'70.0.3538.74',
'72.0.3586.2',
'72.0.3587.0',
'71.0.3578.17',
'70.0.3538.73',
'72.0.3586.1',
'72.0.3586.0',
'71.0.3578.16',
'70.0.3538.72',
'72.0.3585.1',
'72.0.3585.0',
'71.0.3578.15',
'70.0.3538.71',
'71.0.3578.14',
'72.0.3584.1',
'72.0.3584.0',
'71.0.3578.13',
'70.0.3538.70',
'72.0.3583.2',
'71.0.3578.12',
'72.0.3583.1',
'72.0.3583.0',
'71.0.3578.11',
'70.0.3538.69',
'71.0.3578.10',
'72.0.3582.0',
'72.0.3581.4',
'71.0.3578.9',
'70.0.3538.67',
'72.0.3581.3',
'72.0.3581.2',
'72.0.3581.1',
'72.0.3581.0',
'71.0.3578.8',
'70.0.3538.66',
'72.0.3580.1',
'72.0.3580.0',
'71.0.3578.7',
'70.0.3538.65',
'71.0.3578.6',
'72.0.3579.1',
'72.0.3579.0',
'71.0.3578.5',
'70.0.3538.64',
'71.0.3578.4',
'71.0.3578.3',
'71.0.3578.2',
'71.0.3578.1',
'71.0.3578.0',
'70.0.3538.63',
'69.0.3497.128',
'70.0.3538.62',
'70.0.3538.61',
'70.0.3538.60',
'70.0.3538.59',
'71.0.3577.1',
'71.0.3577.0',
'70.0.3538.58',
'69.0.3497.127',
'71.0.3576.2',
'71.0.3576.1',
'71.0.3576.0',
'70.0.3538.57',
'70.0.3538.56',
'71.0.3575.2',
'70.0.3538.55',
'69.0.3497.126',
'70.0.3538.54',
'71.0.3575.1',
'71.0.3575.0',
'71.0.3574.1',
'71.0.3574.0',
'70.0.3538.53',
'69.0.3497.125',
'70.0.3538.52',
'71.0.3573.1',
'71.0.3573.0',
'70.0.3538.51',
'69.0.3497.124',
'71.0.3572.1',
'71.0.3572.0',
'70.0.3538.50',
'69.0.3497.123',
'71.0.3571.2',
'70.0.3538.49',
'69.0.3497.122',
'71.0.3571.1',
'71.0.3571.0',
'70.0.3538.48',
'69.0.3497.121',
'71.0.3570.1',
'71.0.3570.0',
'70.0.3538.47',
'69.0.3497.120',
'71.0.3568.2',
'71.0.3569.1',
'71.0.3569.0',
'70.0.3538.46',
'69.0.3497.119',
'70.0.3538.45',
'71.0.3568.1',
'71.0.3568.0',
'70.0.3538.44',
'69.0.3497.118',
'70.0.3538.43',
'70.0.3538.42',
'71.0.3567.1',
'71.0.3567.0',
'70.0.3538.41',
'69.0.3497.117',
'71.0.3566.1',
'71.0.3566.0',
'70.0.3538.40',
'69.0.3497.116',
'71.0.3565.1',
'71.0.3565.0',
'70.0.3538.39',
'69.0.3497.115',
'71.0.3564.1',
'71.0.3564.0',
'70.0.3538.38',
'69.0.3497.114',
'71.0.3563.0',
'71.0.3562.2',
'70.0.3538.37',
'69.0.3497.113',
'70.0.3538.36',
'70.0.3538.35',
'71.0.3562.1',
'71.0.3562.0',
'70.0.3538.34',
'69.0.3497.112',
'70.0.3538.33',
'71.0.3561.1',
'71.0.3561.0',
'70.0.3538.32',
'69.0.3497.111',
'71.0.3559.6',
'71.0.3560.1',
'71.0.3560.0',
'71.0.3559.5',
'71.0.3559.4',
'70.0.3538.31',
'69.0.3497.110',
'71.0.3559.3',
'70.0.3538.30',
'69.0.3497.109',
'71.0.3559.2',
'71.0.3559.1',
'71.0.3559.0',
'70.0.3538.29',
'69.0.3497.108',
'71.0.3558.2',
'71.0.3558.1',
'71.0.3558.0',
'70.0.3538.28',
'69.0.3497.107',
'71.0.3557.2',
'71.0.3557.1',
'71.0.3557.0',
'70.0.3538.27',
'69.0.3497.106',
'71.0.3554.4',
'70.0.3538.26',
'71.0.3556.1',
'71.0.3556.0',
'70.0.3538.25',
'71.0.3554.3',
'69.0.3497.105',
'71.0.3554.2',
'70.0.3538.24',
'69.0.3497.104',
'71.0.3555.2',
'70.0.3538.23',
'71.0.3555.1',
'71.0.3555.0',
'70.0.3538.22',
'69.0.3497.103',
'71.0.3554.1',
'71.0.3554.0',
'70.0.3538.21',
'69.0.3497.102',
'71.0.3553.3',
'70.0.3538.20',
'69.0.3497.101',
'71.0.3553.2',
'69.0.3497.100',
'71.0.3553.1',
'71.0.3553.0',
'70.0.3538.19',
'69.0.3497.99',
'69.0.3497.98',
'69.0.3497.97',
'71.0.3552.6',
'71.0.3552.5',
'71.0.3552.4',
'71.0.3552.3',
'71.0.3552.2',
'71.0.3552.1',
'71.0.3552.0',
'70.0.3538.18',
'69.0.3497.96',
'71.0.3551.3',
'71.0.3551.2',
'71.0.3551.1',
'71.0.3551.0',
'70.0.3538.17',
'69.0.3497.95',
'71.0.3550.3',
'71.0.3550.2',
'71.0.3550.1',
'71.0.3550.0',
'70.0.3538.16',
'69.0.3497.94',
'71.0.3549.1',
'71.0.3549.0',
'70.0.3538.15',
'69.0.3497.93',
'69.0.3497.92',
'71.0.3548.1',
'71.0.3548.0',
'70.0.3538.14',
'69.0.3497.91',
'71.0.3547.1',
'71.0.3547.0',
'70.0.3538.13',
'69.0.3497.90',
'71.0.3546.2',
'69.0.3497.89',
'71.0.3546.1',
'71.0.3546.0',
'70.0.3538.12',
'69.0.3497.88',
'71.0.3545.4',
'71.0.3545.3',
'71.0.3545.2',
'71.0.3545.1',
'71.0.3545.0',
'70.0.3538.11',
'69.0.3497.87',
'71.0.3544.5',
'71.0.3544.4',
'71.0.3544.3',
'71.0.3544.2',
'71.0.3544.1',
'71.0.3544.0',
'69.0.3497.86',
'70.0.3538.10',
'69.0.3497.85',
'70.0.3538.9',
'69.0.3497.84',
'71.0.3543.4',
'70.0.3538.8',
'71.0.3543.3',
'71.0.3543.2',
'71.0.3543.1',
'71.0.3543.0',
'70.0.3538.7',
'69.0.3497.83',
'71.0.3542.2',
'71.0.3542.1',
'71.0.3542.0',
'70.0.3538.6',
'69.0.3497.82',
'69.0.3497.81',
'71.0.3541.1',
'71.0.3541.0',
'70.0.3538.5',
'69.0.3497.80',
'71.0.3540.1',
'71.0.3540.0',
'70.0.3538.4',
'69.0.3497.79',
'70.0.3538.3',
'71.0.3539.1',
'71.0.3539.0',
'69.0.3497.78',
'68.0.3440.134',
'69.0.3497.77',
'70.0.3538.2',
'70.0.3538.1',
'70.0.3538.0',
'69.0.3497.76',
'68.0.3440.133',
'69.0.3497.75',
'70.0.3537.2',
'70.0.3537.1',
'70.0.3537.0',
'69.0.3497.74',
'68.0.3440.132',
'70.0.3536.0',
'70.0.3535.5',
'70.0.3535.4',
'70.0.3535.3',
'69.0.3497.73',
'68.0.3440.131',
'70.0.3532.8',
'70.0.3532.7',
'69.0.3497.72',
'69.0.3497.71',
'70.0.3535.2',
'70.0.3535.1',
'70.0.3535.0',
'69.0.3497.70',
'68.0.3440.130',
'69.0.3497.69',
'68.0.3440.129',
'70.0.3534.4',
'70.0.3534.3',
'70.0.3534.2',
'70.0.3534.1',
'70.0.3534.0',
'69.0.3497.68',
'68.0.3440.128',
'70.0.3533.2',
'70.0.3533.1',
'70.0.3533.0',
'69.0.3497.67',
'68.0.3440.127',
'70.0.3532.6',
'70.0.3532.5',
'70.0.3532.4',
'69.0.3497.66',
'68.0.3440.126',
'70.0.3532.3',
'70.0.3532.2',
'70.0.3532.1',
'69.0.3497.60',
'69.0.3497.65',
'69.0.3497.64',
'70.0.3532.0',
'70.0.3531.0',
'70.0.3530.4',
'70.0.3530.3',
'70.0.3530.2',
'69.0.3497.58',
'68.0.3440.125',
'69.0.3497.57',
'69.0.3497.56',
'69.0.3497.55',
'69.0.3497.54',
'70.0.3530.1',
'70.0.3530.0',
'69.0.3497.53',
'68.0.3440.124',
'69.0.3497.52',
'70.0.3529.3',
'70.0.3529.2',
'70.0.3529.1',
'70.0.3529.0',
'69.0.3497.51',
'70.0.3528.4',
'68.0.3440.123',
'70.0.3528.3',
'70.0.3528.2',
'70.0.3528.1',
'70.0.3528.0',
'69.0.3497.50',
'68.0.3440.122',
'70.0.3527.1',
'70.0.3527.0',
'69.0.3497.49',
'68.0.3440.121',
'70.0.3526.1',
'70.0.3526.0',
'68.0.3440.120',
'69.0.3497.48',
'69.0.3497.47',
'68.0.3440.119',
'68.0.3440.118',
'70.0.3525.5',
'70.0.3525.4',
'70.0.3525.3',
'68.0.3440.117',
'69.0.3497.46',
'70.0.3525.2',
'70.0.3525.1',
'70.0.3525.0',
'69.0.3497.45',
'68.0.3440.116',
'70.0.3524.4',
'70.0.3524.3',
'69.0.3497.44',
'70.0.3524.2',
'70.0.3524.1',
'70.0.3524.0',
'70.0.3523.2',
'69.0.3497.43',
'68.0.3440.115',
'70.0.3505.9',
'69.0.3497.42',
'70.0.3505.8',
'70.0.3523.1',
'70.0.3523.0',
'69.0.3497.41',
'68.0.3440.114',
'70.0.3505.7',
'69.0.3497.40',
'70.0.3522.1',
'70.0.3522.0',
'70.0.3521.2',
'69.0.3497.39',
'68.0.3440.113',
'70.0.3505.6',
'70.0.3521.1',
'70.0.3521.0',
'69.0.3497.38',
'68.0.3440.112',
'70.0.3520.1',
'70.0.3520.0',
'69.0.3497.37',
'68.0.3440.111',
'70.0.3519.3',
'70.0.3519.2',
'70.0.3519.1',
'70.0.3519.0',
'69.0.3497.36',
'68.0.3440.110',
'70.0.3518.1',
'70.0.3518.0',
'69.0.3497.35',
'69.0.3497.34',
'68.0.3440.109',
'70.0.3517.1',
'70.0.3517.0',
'69.0.3497.33',
'68.0.3440.108',
'69.0.3497.32',
'70.0.3516.3',
'70.0.3516.2',
'70.0.3516.1',
'70.0.3516.0',
'69.0.3497.31',
'68.0.3440.107',
'70.0.3515.4',
'68.0.3440.106',
'70.0.3515.3',
'70.0.3515.2',
'70.0.3515.1',
'70.0.3515.0',
'69.0.3497.30',
'68.0.3440.105',
'68.0.3440.104',
'70.0.3514.2',
'70.0.3514.1',
'70.0.3514.0',
'69.0.3497.29',
'68.0.3440.103',
'70.0.3513.1',
'70.0.3513.0',
'69.0.3497.28',
)
return _USER_AGENT_TPL % random.choice(_CHROME_VERSIONS)
std_headers = {
'User-Agent': random_user_agent(),
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-us,en;q=0.5',
}
USER_AGENTS = {
'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
}
NO_DEFAULT = object()
ENGLISH_MONTH_NAMES = [
'January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November', 'December']
MONTH_NAMES = {
'en': ENGLISH_MONTH_NAMES,
'fr': [
'janvier', 'février', 'mars', 'avril', 'mai', 'juin',
'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'],
}
KNOWN_EXTENSIONS = (
'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac',
'flv', 'f4v', 'f4a', 'f4b',
'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus',
'mkv', 'mka', 'mk3d',
'avi', 'divx',
'mov',
'asf', 'wmv', 'wma',
'3gp', '3g2',
'mp3',
'flac',
'ape',
'wav',
'f4f', 'f4m', 'm3u8', 'smil')
# needed for sanitizing filenames in restricted mode
ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUY', ['TH', 'ss'],
'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuy', ['th'], 'y')))
DATE_FORMATS = (
'%d %B %Y',
'%d %b %Y',
'%B %d %Y',
'%B %dst %Y',
'%B %dnd %Y',
'%B %drd %Y',
'%B %dth %Y',
'%b %d %Y',
'%b %dst %Y',
'%b %dnd %Y',
'%b %drd %Y',
'%b %dth %Y',
'%b %dst %Y %I:%M',
'%b %dnd %Y %I:%M',
'%b %drd %Y %I:%M',
'%b %dth %Y %I:%M',
'%Y %m %d',
'%Y-%m-%d',
'%Y.%m.%d.',
'%Y/%m/%d',
'%Y/%m/%d %H:%M',
'%Y/%m/%d %H:%M:%S',
'%Y%m%d%H%M',
'%Y%m%d%H%M%S',
'%Y%m%d',
'%Y-%m-%d %H:%M',
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d %H:%M:%S:%f',
'%d.%m.%Y %H:%M',
'%d.%m.%Y %H.%M',
'%Y-%m-%dT%H:%M:%SZ',
'%Y-%m-%dT%H:%M:%S.%fZ',
'%Y-%m-%dT%H:%M:%S.%f0Z',
'%Y-%m-%dT%H:%M:%S',
'%Y-%m-%dT%H:%M:%S.%f',
'%Y-%m-%dT%H:%M',
'%b %d %Y at %H:%M',
'%b %d %Y at %H:%M:%S',
'%B %d %Y at %H:%M',
'%B %d %Y at %H:%M:%S',
'%H:%M %d-%b-%Y',
)
DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS)
DATE_FORMATS_DAY_FIRST.extend([
'%d-%m-%Y',
'%d.%m.%Y',
'%d.%m.%y',
'%d/%m/%Y',
'%d/%m/%y',
'%d/%m/%Y %H:%M:%S',
])
DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS)
DATE_FORMATS_MONTH_FIRST.extend([
'%m-%d-%Y',
'%m.%d.%Y',
'%m/%d/%Y',
'%m/%d/%y',
'%m/%d/%Y %H:%M:%S',
])
PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
JSON_LD_RE = r'(?is)<script[^>]+type=(["\']?)application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>'
def preferredencoding():
"""Get preferred encoding.
Returns the best encoding scheme for the system, based on
locale.getpreferredencoding() and some further tweaks.
"""
try:
pref = locale.getpreferredencoding()
'TEST'.encode(pref)
except Exception:
pref = 'UTF-8'
return pref
def write_json_file(obj, fn):
""" Encode obj as JSON and write it to fn, atomically if possible """
fn = encodeFilename(fn)
if sys.version_info < (3, 0) and sys.platform != 'win32':
encoding = get_filesystem_encoding()
# os.path.basename returns a bytes object, but NamedTemporaryFile
# will fail if the filename contains non ascii characters unless we
# use a unicode object
path_basename = lambda f: os.path.basename(fn).decode(encoding)
# the same for os.path.dirname
path_dirname = lambda f: os.path.dirname(fn).decode(encoding)
else:
path_basename = os.path.basename
path_dirname = os.path.dirname
args = {
'suffix': '.tmp',
'prefix': path_basename(fn) + '.',
'dir': path_dirname(fn),
'delete': False,
}
# In Python 2.x, json.dump expects a bytestream.
# In Python 3.x, it writes to a character stream
if sys.version_info < (3, 0):
args['mode'] = 'wb'
else:
args.update({
'mode': 'w',
'encoding': 'utf-8',
})
tf = tempfile.NamedTemporaryFile(**compat_kwargs(args))
try:
with tf:
json.dump(obj, tf, ensure_ascii=False)
if sys.platform == 'win32':
# Need to remove existing file on Windows, else os.rename raises
# WindowsError or FileExistsError.
try:
os.unlink(fn)
except OSError:
pass
try:
mask = os.umask(0)
os.umask(mask)
os.chmod(tf.name, 0o666 & ~mask)
except OSError:
pass
os.rename(tf.name, fn)
except Exception:
try:
os.remove(tf.name)
except OSError:
pass
raise
if sys.version_info >= (2, 7):
def find_xpath_attr(node, xpath, key, val=None):
""" Find the xpath xpath[@key=val] """
assert re.match(r'^[a-zA-Z_-]+$', key)
expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val))
return node.find(expr)
else:
def find_xpath_attr(node, xpath, key, val=None):
for f in node.findall(compat_xpath(xpath)):
if key not in f.attrib:
continue
if val is None or f.attrib.get(key) == val:
return f
return None
# On python2.6 the xml.etree.ElementTree.Element methods don't support
# the namespace parameter
def xpath_with_ns(path, ns_map):
components = [c.split(':') for c in path.split('/')]
replaced = []
for c in components:
if len(c) == 1:
replaced.append(c[0])
else:
ns, tag = c
replaced.append('{%s}%s' % (ns_map[ns], tag))
return '/'.join(replaced)
def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
def _find_xpath(xpath):
return node.find(compat_xpath(xpath))
if isinstance(xpath, (str, compat_str)):
n = _find_xpath(xpath)
else:
for xp in xpath:
n = _find_xpath(xp)
if n is not None:
break
if n is None:
if default is not NO_DEFAULT:
return default
elif fatal:
name = xpath if name is None else name
raise ExtractorError('Could not find XML element %s' % name)
else:
return None
return n
def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
n = xpath_element(node, xpath, name, fatal=fatal, default=default)
if n is None or n == default:
return n
if n.text is None:
if default is not NO_DEFAULT:
return default
elif fatal:
name = xpath if name is None else name
raise ExtractorError('Could not find XML element\'s text %s' % name)
else:
return None
return n.text
def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
n = find_xpath_attr(node, xpath, key)
if n is None:
if default is not NO_DEFAULT:
return default
elif fatal:
name = '%s[@%s]' % (xpath, key) if name is None else name
raise ExtractorError('Could not find XML attribute %s' % name)
else:
return None
return n.attrib[key]
def get_element_by_id(id, html):
"""Return the content of the tag with the specified ID in the passed HTML document"""
return get_element_by_attribute('id', id, html)
def get_element_html_by_id(id, html):
"""Return the html of the tag with the specified ID in the passed HTML document"""
return get_element_html_by_attribute('id', id, html)
def get_element_by_class(class_name, html):
"""Return the content of the first tag with the specified class in the passed HTML document"""
retval = get_elements_by_class(class_name, html)
return retval[0] if retval else None
def get_element_html_by_class(class_name, html):
"""Return the html of the first tag with the specified class in the passed HTML document"""
retval = get_elements_html_by_class(class_name, html)
return retval[0] if retval else None
def get_element_by_attribute(attribute, value, html, escape_value=True):
retval = get_elements_by_attribute(attribute, value, html, escape_value)
return retval[0] if retval else None
def get_element_html_by_attribute(attribute, value, html, escape_value=True):
retval = get_elements_html_by_attribute(attribute, value, html, escape_value)
return retval[0] if retval else None
def get_elements_by_class(class_name, html):
"""Return the content of all tags with the specified class in the passed HTML document as a list"""
return get_elements_by_attribute(
'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
html, escape_value=False)
def get_elements_html_by_class(class_name, html):
"""Return the html of all tags with the specified class in the passed HTML document as a list"""
return get_elements_html_by_attribute(
'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
html, escape_value=False)
def get_elements_by_attribute(*args, **kwargs):
"""Return the content of the tag with the specified attribute in the passed HTML document"""
return [content for content, _ in get_elements_text_and_html_by_attribute(*args, **kwargs)]
def get_elements_html_by_attribute(*args, **kwargs):
"""Return the html of the tag with the specified attribute in the passed HTML document"""
return [whole for _, whole in get_elements_text_and_html_by_attribute(*args, **kwargs)]
def get_elements_text_and_html_by_attribute(attribute, value, html, escape_value=True):
"""
Return the text (content) and the html (whole) of the tag with the specified
attribute in the passed HTML document
"""
value_quote_optional = '' if re.match(r'''[\s"'`=<>]''', value) else '?'
value = re.escape(value) if escape_value else value
partial_element_re = r'''(?x)
<(?P<tag>[a-zA-Z0-9:._-]+)
(?:\s(?:[^>"']|"[^"]*"|'[^']*')*)?
\s%(attribute)s\s*=\s*(?P<_q>['"]%(vqo)s)(?-x:%(value)s)(?P=_q)
''' % {'attribute': re.escape(attribute), 'value': value, 'vqo': value_quote_optional}
for m in re.finditer(partial_element_re, html):
content, whole = get_element_text_and_html_by_tag(m.group('tag'), html[m.start():])
yield (
unescapeHTML(re.sub(r'^(?P<q>["\'])(?P<content>.*)(?P=q)$', r'\g<content>', content, flags=re.DOTALL)),
whole
)
class HTMLBreakOnClosingTagParser(compat_HTMLParser):
"""
HTML parser which raises HTMLBreakOnClosingTagException upon reaching the
closing tag for the first opening tag it has encountered, and can be used
as a context manager
"""
class HTMLBreakOnClosingTagException(Exception):
pass
def __init__(self):
self.tagstack = collections.deque()
compat_HTMLParser.__init__(self)
def __enter__(self):
return self
def __exit__(self, *_):
self.close()
def close(self):
# handle_endtag does not return upon raising HTMLBreakOnClosingTagException,
# so data remains buffered; we no longer have any interest in it, thus
# override this method to discard it
pass
def handle_starttag(self, tag, _):
self.tagstack.append(tag)
def handle_endtag(self, tag):
if not self.tagstack:
raise compat_HTMLParseError('no tags in the stack')
while self.tagstack:
inner_tag = self.tagstack.pop()
if inner_tag == tag:
break
else:
raise compat_HTMLParseError(f'matching opening tag for closing {tag} tag not found')
if not self.tagstack:
raise self.HTMLBreakOnClosingTagException()
def get_element_text_and_html_by_tag(tag, html):
"""
For the first element with the specified tag in the passed HTML document
return its' content (text) and the whole element (html)
"""
def find_or_raise(haystack, needle, exc):
try:
return haystack.index(needle)
except ValueError:
raise exc
closing_tag = f'</{tag}>'
whole_start = find_or_raise(
html, f'<{tag}', compat_HTMLParseError(f'opening {tag} tag not found'))
content_start = find_or_raise(
html[whole_start:], '>', compat_HTMLParseError(f'malformed opening {tag} tag'))
content_start += whole_start + 1
with HTMLBreakOnClosingTagParser() as parser:
parser.feed(html[whole_start:content_start])
if not parser.tagstack or parser.tagstack[0] != tag:
raise compat_HTMLParseError(f'parser did not match opening {tag} tag')
offset = content_start
while offset < len(html):
next_closing_tag_start = find_or_raise(
html[offset:], closing_tag,
compat_HTMLParseError(f'closing {tag} tag not found'))
next_closing_tag_end = next_closing_tag_start + len(closing_tag)
try:
parser.feed(html[offset:offset + next_closing_tag_end])
offset += next_closing_tag_end
except HTMLBreakOnClosingTagParser.HTMLBreakOnClosingTagException:
return html[content_start:offset + next_closing_tag_start], \
html[whole_start:offset + next_closing_tag_end]
raise compat_HTMLParseError('unexpected end of html')
class HTMLAttributeParser(compat_HTMLParser):
"""Trivial HTML parser to gather the attributes for a single element"""
def __init__(self):
self.attrs = {}
compat_HTMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
self.attrs = dict(attrs)
class HTMLListAttrsParser(compat_HTMLParser):
"""HTML parser to gather the attributes for the elements of a list"""
def __init__(self):
compat_HTMLParser.__init__(self)
self.items = []
self._level = 0
def handle_starttag(self, tag, attrs):
if tag == 'li' and self._level == 0:
self.items.append(dict(attrs))
self._level += 1
def handle_endtag(self, tag):
self._level -= 1
def extract_attributes(html_element):
"""Given a string for an HTML element such as
<el
a="foo" B="bar" c="&98;az" d=boz
empty= noval entity="&"
sq='"' dq="'"
>
Decode and return a dictionary of attributes.
{
'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz',
'empty': '', 'noval': None, 'entity': '&',
'sq': '"', 'dq': '\''
}.
NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions,
but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5.
"""
parser = HTMLAttributeParser()
try:
parser.feed(html_element)
parser.close()
# Older Python may throw HTMLParseError in case of malformed HTML
except compat_HTMLParseError:
pass
return parser.attrs
def parse_list(webpage):
"""Given a string for an series of HTML <li> elements,
return a dictionary of their attributes"""
parser = HTMLListAttrsParser()
parser.feed(webpage)
parser.close()
return parser.items
def clean_html(html):
"""Clean an HTML snippet into a readable string"""
if html is None: # Convenience for sanitizing descriptions etc.
return html
# Newline vs <br />
html = html.replace('\n', ' ')
html = re.sub(r'(?u)\s*<\s*br\s*/?\s*>\s*', '\n', html)
html = re.sub(r'(?u)<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html)
# Strip html tags
html = re.sub('<.*?>', '', html)
# Replace html entities
html = unescapeHTML(html)
return html.strip()
def sanitize_open(filename, open_mode):
"""Try to open the given filename, and slightly tweak it if this fails.
Attempts to open the given filename. If this fails, it tries to change
the filename slightly, step by step, until it's either able to open it
or it fails and raises a final exception, like the standard open()
function.
It returns the tuple (stream, definitive_file_name).
"""
try:
if filename == '-':
if sys.platform == 'win32':
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
stream = open(encodeFilename(filename), open_mode)
return (stream, filename)
except (IOError, OSError) as err:
if err.errno in (errno.EACCES,):
raise
# In case of error, try to remove win32 forbidden chars
alt_filename = sanitize_path(filename)
if alt_filename == filename:
raise
else:
# An exception here should be caught in the caller
stream = open(encodeFilename(alt_filename), open_mode)
return (stream, alt_filename)
def timeconvert(timestr):
"""Convert RFC 2822 defined time string into system timestamp"""
timestamp = None
timetuple = email.utils.parsedate_tz(timestr)
if timetuple is not None:
timestamp = email.utils.mktime_tz(timetuple)
return timestamp
def sanitize_filename(s, restricted=False, is_id=False):
"""Sanitizes a string so it could be used as part of a filename.
If restricted is set, use a stricter subset of allowed characters.
Set is_id if this is not an arbitrary string, but an ID that should be kept
if possible.
"""
def replace_insane(char):
if restricted and char in ACCENT_CHARS:
return ACCENT_CHARS[char]
elif not restricted and char == '\n':
return ' '
elif char == '?' or ord(char) < 32 or ord(char) == 127:
return ''
elif char == '"':
return '' if restricted else '\''
elif char == ':':
return '_-' if restricted else ' -'
elif char in '\\/|*<>':
return '_'
if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()):
return '_'
if restricted and ord(char) > 127:
return '_'
return char
if s == '':
return ''
# Handle timestamps
s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s)
result = ''.join(map(replace_insane, s))
if not is_id:
while '__' in result:
result = result.replace('__', '_')
result = result.strip('_')
# Common case of "Foreign band name - English song title"
if restricted and result.startswith('-_'):
result = result[2:]
if result.startswith('-'):
result = '_' + result[len('-'):]
result = result.lstrip('.')
if not result:
result = '_'
return result
def sanitize_path(s, force=False):
"""Sanitizes and normalizes path on Windows"""
if sys.platform == 'win32':
force = False
drive_or_unc, _ = os.path.splitdrive(s)
if sys.version_info < (2, 7) and not drive_or_unc:
drive_or_unc, _ = os.path.splitunc(s)
elif force:
drive_or_unc = ''
else:
return s
norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
if drive_or_unc:
norm_path.pop(0)
sanitized_path = [
path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part)
for path_part in norm_path]
if drive_or_unc:
sanitized_path.insert(0, drive_or_unc + os.path.sep)
elif force and s[0] == os.path.sep:
sanitized_path.insert(0, os.path.sep)
return os.path.join(*sanitized_path)
def sanitize_url(url):
# Prepend protocol-less URLs with `http:` scheme in order to mitigate
# the number of unwanted failures due to missing protocol
if url.startswith('//'):
return 'http:%s' % url
# Fix some common typos seen so far
COMMON_TYPOS = (
# https://github.com/ytdl-org/youtube-dl/issues/15649
(r'^httpss://', r'https://'),
# https://bx1.be/lives/direct-tv/
(r'^rmtp([es]?)://', r'rtmp\1://'),
)
for mistake, fixup in COMMON_TYPOS:
if re.match(mistake, url):
return re.sub(mistake, fixup, url)
return url
def extract_basic_auth(url):
parts = compat_urlparse.urlsplit(url)
if parts.username is None:
return url, None
url = compat_urlparse.urlunsplit(parts._replace(netloc=(
parts.hostname if parts.port is None
else '%s:%d' % (parts.hostname, parts.port))))
auth_payload = base64.b64encode(
('%s:%s' % (parts.username, parts.password or '')).encode('utf-8'))
return url, 'Basic ' + auth_payload.decode('utf-8')
def sanitized_Request(url, *args, **kwargs):
url, auth_header = extract_basic_auth(escape_url(sanitize_url(url)))
if auth_header is not None:
headers = args[1] if len(args) >= 2 else kwargs.setdefault('headers', {})
headers['Authorization'] = auth_header
return compat_urllib_request.Request(url, *args, **kwargs)
def expand_path(s):
"""Expand shell variables and ~"""
return os.path.expandvars(compat_expanduser(s))
def orderedSet(iterable):
""" Remove all duplicates from the input iterable """
res = []
for el in iterable:
if el not in res:
res.append(el)
return res
def _htmlentity_transform(entity_with_semicolon):
"""Transforms an HTML entity to a character."""
entity = entity_with_semicolon[:-1]
# Known non-numeric HTML entity
if entity in compat_html_entities.name2codepoint:
return compat_chr(compat_html_entities.name2codepoint[entity])
# TODO: HTML5 allows entities without a semicolon. For example,
# 'Éric' should be decoded as 'Éric'.
if entity_with_semicolon in compat_html_entities_html5:
return compat_html_entities_html5[entity_with_semicolon]
mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
if mobj is not None:
numstr = mobj.group(1)
if numstr.startswith('x'):
base = 16
numstr = '0%s' % numstr
else:
base = 10
# See https://github.com/ytdl-org/youtube-dl/issues/7518
try:
return compat_chr(int(numstr, base))
except ValueError:
pass
# Unknown entity in name, return its literal representation
return '&%s;' % entity
def unescapeHTML(s):
if s is None:
return None
assert type(s) == compat_str
return re.sub(
r'&([^&;]+;)', lambda m: _htmlentity_transform(m.group(1)), s)
def escapeHTML(text):
return (
text
.replace('&', '&')
.replace('<', '<')
.replace('>', '>')
.replace('"', '"')
.replace("'", ''')
)
def process_communicate_or_kill(p, *args, **kwargs):
try:
return p.communicate(*args, **kwargs)
except BaseException: # Including KeyboardInterrupt
p.kill()
p.wait()
raise
class Popen(subprocess.Popen):
if sys.platform == 'win32':
_startupinfo = subprocess.STARTUPINFO()
_startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
else:
_startupinfo = None
def __init__(self, *args, **kwargs):
super(Popen, self).__init__(*args, **kwargs, startupinfo=self._startupinfo)
def communicate_or_kill(self, *args, **kwargs):
return process_communicate_or_kill(self, *args, **kwargs)
def get_subprocess_encoding():
if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
# For subprocess calls, encode with locale encoding
# Refer to http://stackoverflow.com/a/9951851/35070
encoding = preferredencoding()
else:
encoding = sys.getfilesystemencoding()
if encoding is None:
encoding = 'utf-8'
return encoding
def encodeFilename(s, for_subprocess=False):
"""
@param s The name of the file
"""
assert type(s) == compat_str
# Python 3 has a Unicode API
if sys.version_info >= (3, 0):
return s
# Pass '' directly to use Unicode APIs on Windows 2000 and up
# (Detecting Windows NT 4 is tricky because 'major >= 4' would
# match Windows 9x series as well. Besides, NT 4 is obsolete.)
if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
return s
# Jython assumes filenames are Unicode strings though reported as Python 2.x compatible
if sys.platform.startswith('java'):
return s
return s.encode(get_subprocess_encoding(), 'ignore')
def decodeFilename(b, for_subprocess=False):
if sys.version_info >= (3, 0):
return b
if not isinstance(b, bytes):
return b
return b.decode(get_subprocess_encoding(), 'ignore')
def encodeArgument(s):
if not isinstance(s, compat_str):
# Legacy code that uses byte strings
# Uncomment the following line after fixing all post processors
# assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
s = s.decode('ascii')
return encodeFilename(s, True)
def decodeArgument(b):
return decodeFilename(b, True)
def decodeOption(optval):
if optval is None:
return optval
if isinstance(optval, bytes):
optval = optval.decode(preferredencoding())
assert isinstance(optval, compat_str)
return optval
_timetuple = collections.namedtuple('Time', ('hours', 'minutes', 'seconds', 'milliseconds'))
def timetuple_from_msec(msec):
secs, msec = divmod(msec, 1000)
mins, secs = divmod(secs, 60)
hrs, mins = divmod(mins, 60)
return _timetuple(hrs, mins, secs, msec)
def formatSeconds(secs, delim=':', msec=False):
time = timetuple_from_msec(secs * 1000)
if time.hours:
ret = '%d%s%02d%s%02d' % (time.hours, delim, time.minutes, delim, time.seconds)
elif time.minutes:
ret = '%d%s%02d' % (time.minutes, delim, time.seconds)
else:
ret = '%d' % time.seconds
return '%s.%03d' % (ret, time.milliseconds) if msec else ret
def _ssl_load_windows_store_certs(ssl_context, storename):
# Code adapted from _load_windows_store_certs in https://github.com/python/cpython/blob/main/Lib/ssl.py
try:
certs = [cert for cert, encoding, trust in ssl.enum_certificates(storename)
if encoding == 'x509_asn' and (
trust is True or ssl.Purpose.SERVER_AUTH.oid in trust)]
except PermissionError:
return
for cert in certs:
try:
ssl_context.load_verify_locations(cadata=cert)
except ssl.SSLError:
pass
def make_HTTPS_handler(params, **kwargs):
opts_check_certificate = not params.get('nocheckcertificate')
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = opts_check_certificate
context.verify_mode = ssl.CERT_REQUIRED if opts_check_certificate else ssl.CERT_NONE
if opts_check_certificate:
try:
context.load_default_certs()
# Work around the issue in load_default_certs when there are bad certificates. See:
# https://github.com/yt-dlp/yt-dlp/issues/1060,
# https://bugs.python.org/issue35665, https://bugs.python.org/issue45312
except ssl.SSLError:
# enum_certificates is not present in mingw python. See https://github.com/yt-dlp/yt-dlp/issues/1151
if sys.platform == 'win32' and hasattr(ssl, 'enum_certificates'):
# Create a new context to discard any certificates that were already loaded
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname, context.verify_mode = True, ssl.CERT_REQUIRED
for storename in ('CA', 'ROOT'):
_ssl_load_windows_store_certs(context, storename)
context.set_default_verify_paths()
return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
def bug_reports_message(before=';'):
if ytdl_is_updateable():
update_cmd = 'type yt-dlp -U to update'
else:
update_cmd = 'see https://github.com/yt-dlp/yt-dlp on how to update'
msg = 'please report this issue on https://github.com/yt-dlp/yt-dlp .'
msg += ' Make sure you are using the latest version; %s.' % update_cmd
msg += ' Be sure to call yt-dlp with the --verbose flag and include its complete output.'
before = before.rstrip()
if not before or before.endswith(('.', '!', '?')):
msg = msg[0].title() + msg[1:]
return (before + ' ' if before else '') + msg
class YoutubeDLError(Exception):
"""Base exception for YoutubeDL errors."""
msg = None
def __init__(self, msg=None):
if msg is not None:
self.msg = msg
elif self.msg is None:
self.msg = type(self).__name__
super().__init__(self.msg)
network_exceptions = [compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error]
if hasattr(ssl, 'CertificateError'):
network_exceptions.append(ssl.CertificateError)
network_exceptions = tuple(network_exceptions)
class ExtractorError(YoutubeDLError):
"""Error during info extraction."""
def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None, ie=None):
""" tb, if given, is the original traceback (so that it can be printed out).
If expected is set, this is a normal error message and most likely not a bug in yt-dlp.
"""
if sys.exc_info()[0] in network_exceptions:
expected = True
self.msg = str(msg)
self.traceback = tb
self.expected = expected
self.cause = cause
self.video_id = video_id
self.ie = ie
self.exc_info = sys.exc_info() # preserve original exception
super(ExtractorError, self).__init__(''.join((
format_field(ie, template='[%s] '),
format_field(video_id, template='%s: '),
self.msg,
format_field(cause, template=' (caused by %r)'),
'' if expected else bug_reports_message())))
def format_traceback(self):
if self.traceback is None:
return None
return ''.join(traceback.format_tb(self.traceback))
class UnsupportedError(ExtractorError):
def __init__(self, url):
super(UnsupportedError, self).__init__(
'Unsupported URL: %s' % url, expected=True)
self.url = url
class RegexNotFoundError(ExtractorError):
"""Error when a regex didn't match"""
pass
class GeoRestrictedError(ExtractorError):
"""Geographic restriction Error exception.
This exception may be thrown when a video is not available from your
geographic location due to geographic restrictions imposed by a website.
"""
def __init__(self, msg, countries=None, **kwargs):
kwargs['expected'] = True
super(GeoRestrictedError, self).__init__(msg, **kwargs)
self.countries = countries
class DownloadError(YoutubeDLError):
"""Download Error exception.
This exception may be thrown by FileDownloader objects if they are not
configured to continue on errors. They will contain the appropriate
error message.
"""
def __init__(self, msg, exc_info=None):
""" exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
super(DownloadError, self).__init__(msg)
self.exc_info = exc_info
class EntryNotInPlaylist(YoutubeDLError):
"""Entry not in playlist exception.
This exception will be thrown by YoutubeDL when a requested entry
is not found in the playlist info_dict
"""
msg = 'Entry not found in info'
class SameFileError(YoutubeDLError):
"""Same File exception.
This exception will be thrown by FileDownloader objects if they detect
multiple files would have to be downloaded to the same file on disk.
"""
msg = 'Fixed output name but more than one file to download'
def __init__(self, filename=None):
if filename is not None:
self.msg += f': {filename}'
super().__init__(self.msg)
class PostProcessingError(YoutubeDLError):
"""Post Processing exception.
This exception may be raised by PostProcessor's .run() method to
indicate an error in the postprocessing task.
"""
class DownloadCancelled(YoutubeDLError):
""" Exception raised when the download queue should be interrupted """
msg = 'The download was cancelled'
class ExistingVideoReached(DownloadCancelled):
""" --break-on-existing triggered """
msg = 'Encountered a video that is already in the archive, stopping due to --break-on-existing'
class RejectedVideoReached(DownloadCancelled):
""" --break-on-reject triggered """
msg = 'Encountered a video that did not match filter, stopping due to --break-on-reject'
class MaxDownloadsReached(DownloadCancelled):
""" --max-downloads limit has been reached. """
msg = 'Maximum number of downloads reached, stopping due to --max-downloads'
class ReExtractInfo(YoutubeDLError):
""" Video info needs to be re-extracted. """
def __init__(self, msg, expected=False):
super().__init__(msg)
self.expected = expected
class ThrottledDownload(ReExtractInfo):
""" Download speed below --throttled-rate. """
msg = 'The download speed is below throttle limit'
def __init__(self):
super().__init__(self.msg, expected=False)
class UnavailableVideoError(YoutubeDLError):
"""Unavailable Format exception.
This exception will be thrown when a video is requested
in a format that is not available for that video.
"""
msg = 'Unable to download video'
def __init__(self, err=None):
if err is not None:
self.msg += f': {err}'
super().__init__(self.msg)
class ContentTooShortError(YoutubeDLError):
"""Content Too Short exception.
This exception may be raised by FileDownloader objects when a file they
download is too small for what the server announced first, indicating
the connection was probably interrupted.
"""
def __init__(self, downloaded, expected):
super(ContentTooShortError, self).__init__(
'Downloaded {0} bytes, expected {1} bytes'.format(downloaded, expected)
)
# Both in bytes
self.downloaded = downloaded
self.expected = expected
class XAttrMetadataError(YoutubeDLError):
def __init__(self, code=None, msg='Unknown error'):
super(XAttrMetadataError, self).__init__(msg)
self.code = code
self.msg = msg
# Parsing code and msg
if (self.code in (errno.ENOSPC, errno.EDQUOT)
or 'No space left' in self.msg or 'Disk quota exceeded' in self.msg):
self.reason = 'NO_SPACE'
elif self.code == errno.E2BIG or 'Argument list too long' in self.msg:
self.reason = 'VALUE_TOO_LONG'
else:
self.reason = 'NOT_SUPPORTED'
class XAttrUnavailableError(YoutubeDLError):
pass
def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
# Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting
# expected HTTP responses to meet HTTP/1.0 or later (see also
# https://github.com/ytdl-org/youtube-dl/issues/6727)
if sys.version_info < (3, 0):
kwargs['strict'] = True
hc = http_class(*args, **compat_kwargs(kwargs))
source_address = ydl_handler._params.get('source_address')
if source_address is not None:
# This is to workaround _create_connection() from socket where it will try all
# address data from getaddrinfo() including IPv6. This filters the result from
# getaddrinfo() based on the source_address value.
# This is based on the cpython socket.create_connection() function.
# https://github.com/python/cpython/blob/master/Lib/socket.py#L691
def _create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
host, port = address
err = None
addrs = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
af = socket.AF_INET if '.' in source_address[0] else socket.AF_INET6
ip_addrs = [addr for addr in addrs if addr[0] == af]
if addrs and not ip_addrs:
ip_version = 'v4' if af == socket.AF_INET else 'v6'
raise socket.error(
"No remote IP%s addresses available for connect, can't use '%s' as source address"
% (ip_version, source_address[0]))
for res in ip_addrs:
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
sock.bind(source_address)
sock.connect(sa)
err = None # Explicitly break reference cycle
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise socket.error('getaddrinfo returns an empty list')
if hasattr(hc, '_create_connection'):
hc._create_connection = _create_connection
sa = (source_address, 0)
if hasattr(hc, 'source_address'): # Python 2.7+
hc.source_address = sa
else: # Python 2.6
def _hc_connect(self, *args, **kwargs):
sock = _create_connection(
(self.host, self.port), self.timeout, sa)
if is_https:
self.sock = ssl.wrap_socket(
sock, self.key_file, self.cert_file,
ssl_version=ssl.PROTOCOL_TLSv1)
else:
self.sock = sock
hc.connect = functools.partial(_hc_connect, hc)
return hc
def handle_youtubedl_headers(headers):
filtered_headers = headers
if 'Youtubedl-no-compression' in filtered_headers:
filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding')
del filtered_headers['Youtubedl-no-compression']
return filtered_headers
class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
"""Handler for HTTP requests and responses.
This class, when installed with an OpenerDirector, automatically adds
the standard headers to every HTTP request and handles gzipped and
deflated responses from web servers. If compression is to be avoided in
a particular request, the original request in the program code only has
to include the HTTP header "Youtubedl-no-compression", which will be
removed before making the real request.
Part of this code was copied from:
http://techknack.net/python-urllib2-handlers/
Andrew Rowls, the author of that code, agreed to release it to the
public domain.
"""
def __init__(self, params, *args, **kwargs):
compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs)
self._params = params
def http_open(self, req):
conn_class = compat_http_client.HTTPConnection
socks_proxy = req.headers.get('Ytdl-socks-proxy')
if socks_proxy:
conn_class = make_socks_conn_class(conn_class, socks_proxy)
del req.headers['Ytdl-socks-proxy']
return self.do_open(functools.partial(
_create_http_connection, self, conn_class, False),
req)
@staticmethod
def deflate(data):
if not data:
return data
try:
return zlib.decompress(data, -zlib.MAX_WBITS)
except zlib.error:
return zlib.decompress(data)
def http_request(self, req):
# According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
# always respected by websites, some tend to give out URLs with non percent-encoded
# non-ASCII characters (see telemb.py, ard.py [#3412])
# urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
# To work around aforementioned issue we will replace request's original URL with
# percent-encoded one
# Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
# the code of this workaround has been moved here from YoutubeDL.urlopen()
url = req.get_full_url()
url_escaped = escape_url(url)
# Substitute URL if any change after escaping
if url != url_escaped:
req = update_Request(req, url=url_escaped)
for h, v in std_headers.items():
# Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
# The dict keys are capitalized because of this bug by urllib
if h.capitalize() not in req.headers:
req.add_header(h, v)
req.headers = handle_youtubedl_headers(req.headers)
if sys.version_info < (2, 7) and '#' in req.get_full_url():
# Python 2.6 is brain-dead when it comes to fragments
req._Request__original = req._Request__original.partition('#')[0]
req._Request__r_type = req._Request__r_type.partition('#')[0]
return req
def http_response(self, req, resp):
old_resp = resp
# gzip
if resp.headers.get('Content-encoding', '') == 'gzip':
content = resp.read()
gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
try:
uncompressed = io.BytesIO(gz.read())
except IOError as original_ioerror:
# There may be junk add the end of the file
# See http://stackoverflow.com/q/4928560/35070 for details
for i in range(1, 1024):
try:
gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
uncompressed = io.BytesIO(gz.read())
except IOError:
continue
break
else:
raise original_ioerror
resp = compat_urllib_request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg
del resp.headers['Content-encoding']
# deflate
if resp.headers.get('Content-encoding', '') == 'deflate':
gz = io.BytesIO(self.deflate(resp.read()))
resp = compat_urllib_request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg
del resp.headers['Content-encoding']
# Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
# https://github.com/ytdl-org/youtube-dl/issues/6457).
if 300 <= resp.code < 400:
location = resp.headers.get('Location')
if location:
# As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
if sys.version_info >= (3, 0):
location = location.encode('iso-8859-1').decode('utf-8')
else:
location = location.decode('utf-8')
location_escaped = escape_url(location)
if location != location_escaped:
del resp.headers['Location']
if sys.version_info < (3, 0):
location_escaped = location_escaped.encode('utf-8')
resp.headers['Location'] = location_escaped
return resp
https_request = http_request
https_response = http_response
def make_socks_conn_class(base_class, socks_proxy):
assert issubclass(base_class, (
compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection))
url_components = compat_urlparse.urlparse(socks_proxy)
if url_components.scheme.lower() == 'socks5':
socks_type = ProxyType.SOCKS5
elif url_components.scheme.lower() in ('socks', 'socks4'):
socks_type = ProxyType.SOCKS4
elif url_components.scheme.lower() == 'socks4a':
socks_type = ProxyType.SOCKS4A
def unquote_if_non_empty(s):
if not s:
return s
return compat_urllib_parse_unquote_plus(s)
proxy_args = (
socks_type,
url_components.hostname, url_components.port or 1080,
True, # Remote DNS
unquote_if_non_empty(url_components.username),
unquote_if_non_empty(url_components.password),
)
class SocksConnection(base_class):
def connect(self):
self.sock = sockssocket()
self.sock.setproxy(*proxy_args)
if type(self.timeout) in (int, float):
self.sock.settimeout(self.timeout)
self.sock.connect((self.host, self.port))
if isinstance(self, compat_http_client.HTTPSConnection):
if hasattr(self, '_context'): # Python > 2.6
self.sock = self._context.wrap_socket(
self.sock, server_hostname=self.host)
else:
self.sock = ssl.wrap_socket(self.sock)
return SocksConnection
class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler):
def __init__(self, params, https_conn_class=None, *args, **kwargs):
compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs)
self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection
self._params = params
def https_open(self, req):
kwargs = {}
conn_class = self._https_conn_class
if hasattr(self, '_context'): # python > 2.6
kwargs['context'] = self._context
if hasattr(self, '_check_hostname'): # python 3.x
kwargs['check_hostname'] = self._check_hostname
socks_proxy = req.headers.get('Ytdl-socks-proxy')
if socks_proxy:
conn_class = make_socks_conn_class(conn_class, socks_proxy)
del req.headers['Ytdl-socks-proxy']
return self.do_open(functools.partial(
_create_http_connection, self, conn_class, True),
req, **kwargs)
class YoutubeDLCookieJar(compat_cookiejar.MozillaCookieJar):
"""
See [1] for cookie file format.
1. https://curl.haxx.se/docs/http-cookies.html
"""
_HTTPONLY_PREFIX = '#HttpOnly_'
_ENTRY_LEN = 7
_HEADER = '''# Netscape HTTP Cookie File
# This file is generated by yt-dlp. Do not edit.
'''
_CookieFileEntry = collections.namedtuple(
'CookieFileEntry',
('domain_name', 'include_subdomains', 'path', 'https_only', 'expires_at', 'name', 'value'))
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
"""
Save cookies to a file.
Most of the code is taken from CPython 3.8 and slightly adapted
to support cookie files with UTF-8 in both python 2 and 3.
"""
if filename is None:
if self.filename is not None:
filename = self.filename
else:
raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
# Store session cookies with `expires` set to 0 instead of an empty
# string
for cookie in self:
if cookie.expires is None:
cookie.expires = 0
with io.open(filename, 'w', encoding='utf-8') as f:
f.write(self._HEADER)
now = time.time()
for cookie in self:
if not ignore_discard and cookie.discard:
continue
if not ignore_expires and cookie.is_expired(now):
continue
if cookie.secure:
secure = 'TRUE'
else:
secure = 'FALSE'
if cookie.domain.startswith('.'):
initial_dot = 'TRUE'
else:
initial_dot = 'FALSE'
if cookie.expires is not None:
expires = compat_str(cookie.expires)
else:
expires = ''
if cookie.value is None:
# cookies.txt regards 'Set-Cookie: foo' as a cookie
# with no name, whereas http.cookiejar regards it as a
# cookie with no value.
name = ''
value = cookie.name
else:
name = cookie.name
value = cookie.value
f.write(
'\t'.join([cookie.domain, initial_dot, cookie.path,
secure, expires, name, value]) + '\n')
def load(self, filename=None, ignore_discard=False, ignore_expires=False):
"""Load cookies from a file."""
if filename is None:
if self.filename is not None:
filename = self.filename
else:
raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
def prepare_line(line):
if line.startswith(self._HTTPONLY_PREFIX):
line = line[len(self._HTTPONLY_PREFIX):]
# comments and empty lines are fine
if line.startswith('#') or not line.strip():
return line
cookie_list = line.split('\t')
if len(cookie_list) != self._ENTRY_LEN:
raise compat_cookiejar.LoadError('invalid length %d' % len(cookie_list))
cookie = self._CookieFileEntry(*cookie_list)
if cookie.expires_at and not cookie.expires_at.isdigit():
raise compat_cookiejar.LoadError('invalid expires at %s' % cookie.expires_at)
return line
cf = io.StringIO()
with io.open(filename, encoding='utf-8') as f:
for line in f:
try:
cf.write(prepare_line(line))
except compat_cookiejar.LoadError as e:
write_string(
'WARNING: skipping cookie file entry due to %s: %r\n'
% (e, line), sys.stderr)
continue
cf.seek(0)
self._really_load(cf, filename, ignore_discard, ignore_expires)
# Session cookies are denoted by either `expires` field set to
# an empty string or 0. MozillaCookieJar only recognizes the former
# (see [1]). So we need force the latter to be recognized as session
# cookies on our own.
# Session cookies may be important for cookies-based authentication,
# e.g. usually, when user does not check 'Remember me' check box while
# logging in on a site, some important cookies are stored as session
# cookies so that not recognizing them will result in failed login.
# 1. https://bugs.python.org/issue17164
for cookie in self:
# Treat `expires=0` cookies as session cookies
if cookie.expires == 0:
cookie.expires = None
cookie.discard = True
class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor):
def __init__(self, cookiejar=None):
compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar)
def http_response(self, request, response):
# Python 2 will choke on next HTTP request in row if there are non-ASCII
# characters in Set-Cookie HTTP header of last response (see
# https://github.com/ytdl-org/youtube-dl/issues/6769).
# In order to at least prevent crashing we will percent encode Set-Cookie
# header before HTTPCookieProcessor starts processing it.
# if sys.version_info < (3, 0) and response.headers:
# for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'):
# set_cookie = response.headers.get(set_cookie_header)
# if set_cookie:
# set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ")
# if set_cookie != set_cookie_escaped:
# del response.headers[set_cookie_header]
# response.headers[set_cookie_header] = set_cookie_escaped
return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response)
https_request = compat_urllib_request.HTTPCookieProcessor.http_request
https_response = http_response
class YoutubeDLRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
"""YoutubeDL redirect handler
The code is based on HTTPRedirectHandler implementation from CPython [1].
This redirect handler solves two issues:
- ensures redirect URL is always unicode under python 2
- introduces support for experimental HTTP response status code
308 Permanent Redirect [2] used by some sites [3]
1. https://github.com/python/cpython/blob/master/Lib/urllib/request.py
2. https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/308
3. https://github.com/ytdl-org/youtube-dl/issues/28768
"""
http_error_301 = http_error_303 = http_error_307 = http_error_308 = compat_urllib_request.HTTPRedirectHandler.http_error_302
def redirect_request(self, req, fp, code, msg, headers, newurl):
"""Return a Request or None in response to a redirect.
This is called by the http_error_30x methods when a
redirection response is received. If a redirection should
take place, return a new Request to allow http_error_30x to
perform the redirect. Otherwise, raise HTTPError if no-one
else should try to handle this url. Return None if you can't
but another Handler might.
"""
m = req.get_method()
if (not (code in (301, 302, 303, 307, 308) and m in ("GET", "HEAD")
or code in (301, 302, 303) and m == "POST")):
raise compat_HTTPError(req.full_url, code, msg, headers, fp)
# Strictly (according to RFC 2616), 301 or 302 in response to
# a POST MUST NOT cause a redirection without confirmation
# from the user (of urllib.request, in this case). In practice,
# essentially all clients do redirect in this case, so we do
# the same.
# On python 2 urlh.geturl() may sometimes return redirect URL
# as byte string instead of unicode. This workaround allows
# to force it always return unicode.
if sys.version_info[0] < 3:
newurl = compat_str(newurl)
# Be conciliant with URIs containing a space. This is mainly
# redundant with the more complete encoding done in http_error_302(),
# but it is kept for compatibility with other callers.
newurl = newurl.replace(' ', '%20')
CONTENT_HEADERS = ("content-length", "content-type")
# NB: don't use dict comprehension for python 2.6 compatibility
newheaders = dict((k, v) for k, v in req.headers.items()
if k.lower() not in CONTENT_HEADERS)
return compat_urllib_request.Request(
newurl, headers=newheaders, origin_req_host=req.origin_req_host,
unverifiable=True)
def extract_timezone(date_str):
m = re.search(
r'''(?x)
^.{8,}? # >=8 char non-TZ prefix, if present
(?P<tz>Z| # just the UTC Z, or
(?:(?<=.\b\d{4}|\b\d{2}:\d\d)| # preceded by 4 digits or hh:mm or
(?<!.\b[a-zA-Z]{3}|[a-zA-Z]{4}|..\b\d\d)) # not preceded by 3 alpha word or >= 4 alpha or 2 digits
[ ]? # optional space
(?P<sign>\+|-) # +/-
(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2}) # hh[:]mm
$)
''', date_str)
if not m:
timezone = datetime.timedelta()
else:
date_str = date_str[:-len(m.group('tz'))]
if not m.group('sign'):
timezone = datetime.timedelta()
else:
sign = 1 if m.group('sign') == '+' else -1
timezone = datetime.timedelta(
hours=sign * int(m.group('hours')),
minutes=sign * int(m.group('minutes')))
return timezone, date_str
def parse_iso8601(date_str, delimiter='T', timezone=None):
""" Return a UNIX timestamp from the given date """
if date_str is None:
return None
date_str = re.sub(r'\.[0-9]+', '', date_str)
if timezone is None:
timezone, date_str = extract_timezone(date_str)
try:
date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
dt = datetime.datetime.strptime(date_str, date_format) - timezone
return calendar.timegm(dt.timetuple())
except ValueError:
pass
def date_formats(day_first=True):
return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST
def unified_strdate(date_str, day_first=True):
"""Return a string with the date in the format YYYYMMDD"""
if date_str is None:
return None
upload_date = None
# Replace commas
date_str = date_str.replace(',', ' ')
# Remove AM/PM + timezone
date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
_, date_str = extract_timezone(date_str)
for expression in date_formats(day_first):
try:
upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
except ValueError:
pass
if upload_date is None:
timetuple = email.utils.parsedate_tz(date_str)
if timetuple:
try:
upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
except ValueError:
pass
if upload_date is not None:
return compat_str(upload_date)
def unified_timestamp(date_str, day_first=True):
if date_str is None:
return None
date_str = re.sub(r'[,|]', '', date_str)
pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0
timezone, date_str = extract_timezone(date_str)
# Remove AM/PM + timezone
date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
# Remove unrecognized timezones from ISO 8601 alike timestamps
m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str)
if m:
date_str = date_str[:-len(m.group('tz'))]
# Python only supports microseconds, so remove nanoseconds
m = re.search(r'^([0-9]{4,}-[0-9]{1,2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}\.[0-9]{6})[0-9]+$', date_str)
if m:
date_str = m.group(1)
for expression in date_formats(day_first):
try:
dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta)
return calendar.timegm(dt.timetuple())
except ValueError:
pass
timetuple = email.utils.parsedate_tz(date_str)
if timetuple:
return calendar.timegm(timetuple) + pm_delta * 3600
def determine_ext(url, default_ext='unknown_video'):
if url is None or '.' not in url:
return default_ext
guess = url.partition('?')[0].rpartition('.')[2]
if re.match(r'^[A-Za-z0-9]+$', guess):
return guess
# Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
elif guess.rstrip('/') in KNOWN_EXTENSIONS:
return guess.rstrip('/')
else:
return default_ext
def subtitles_filename(filename, sub_lang, sub_format, expected_real_ext=None):
return replace_extension(filename, sub_lang + '.' + sub_format, expected_real_ext)
def datetime_from_str(date_str, precision='auto', format='%Y%m%d'):
"""
Return a datetime object from a string in the format YYYYMMDD or
(now|today|date)[+-][0-9](microsecond|second|minute|hour|day|week|month|year)(s)?
format: string date format used to return datetime object from
precision: round the time portion of a datetime object.
auto|microsecond|second|minute|hour|day.
auto: round to the unit provided in date_str (if applicable).
"""
auto_precision = False
if precision == 'auto':
auto_precision = True
precision = 'microsecond'
today = datetime_round(datetime.datetime.now(), precision)
if date_str in ('now', 'today'):
return today
if date_str == 'yesterday':
return today - datetime.timedelta(days=1)
match = re.match(
r'(?P<start>.+)(?P<sign>[+-])(?P<time>\d+)(?P<unit>microsecond|second|minute|hour|day|week|month|year)(s)?',
date_str)
if match is not None:
start_time = datetime_from_str(match.group('start'), precision, format)
time = int(match.group('time')) * (-1 if match.group('sign') == '-' else 1)
unit = match.group('unit')
if unit == 'month' or unit == 'year':
new_date = datetime_add_months(start_time, time * 12 if unit == 'year' else time)
unit = 'day'
else:
if unit == 'week':
unit = 'day'
time *= 7
delta = datetime.timedelta(**{unit + 's': time})
new_date = start_time + delta
if auto_precision:
return datetime_round(new_date, unit)
return new_date
return datetime_round(datetime.datetime.strptime(date_str, format), precision)
def date_from_str(date_str, format='%Y%m%d'):
"""
Return a datetime object from a string in the format YYYYMMDD or
(now|today|date)[+-][0-9](microsecond|second|minute|hour|day|week|month|year)(s)?
format: string date format used to return datetime object from
"""
return datetime_from_str(date_str, precision='microsecond', format=format).date()
def datetime_add_months(dt, months):
"""Increment/Decrement a datetime object by months."""
month = dt.month + months - 1
year = dt.year + month // 12
month = month % 12 + 1
day = min(dt.day, calendar.monthrange(year, month)[1])
return dt.replace(year, month, day)
def datetime_round(dt, precision='day'):
"""
Round a datetime object's time to a specific precision
"""
if precision == 'microsecond':
return dt
unit_seconds = {
'day': 86400,
'hour': 3600,
'minute': 60,
'second': 1,
}
roundto = lambda x, n: ((x + n / 2) // n) * n
timestamp = calendar.timegm(dt.timetuple())
return datetime.datetime.utcfromtimestamp(roundto(timestamp, unit_seconds[precision]))
def hyphenate_date(date_str):
"""
Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
if match is not None:
return '-'.join(match.groups())
else:
return date_str
class DateRange(object):
"""Represents a time interval between two dates"""
def __init__(self, start=None, end=None):
"""start and end must be strings in the format accepted by date"""
if start is not None:
self.start = date_from_str(start)
else:
self.start = datetime.datetime.min.date()
if end is not None:
self.end = date_from_str(end)
else:
self.end = datetime.datetime.max.date()
if self.start > self.end:
raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
@classmethod
def day(cls, day):
"""Returns a range that only contains the given day"""
return cls(day, day)
def __contains__(self, date):
"""Check if the date is in the range"""
if not isinstance(date, datetime.date):
date = date_from_str(date)
return self.start <= date <= self.end
def __str__(self):
return '%s - %s' % (self.start.isoformat(), self.end.isoformat())
def platform_name():
""" Returns the platform name as a compat_str """
res = platform.platform()
if isinstance(res, bytes):
res = res.decode(preferredencoding())
assert isinstance(res, compat_str)
return res
def get_windows_version():
''' Get Windows version. None if it's not running on Windows '''
if compat_os_name == 'nt':
return version_tuple(platform.win32_ver()[1])
else:
return None
def _windows_write_string(s, out):
""" Returns True if the string was written using special methods,
False if it has yet to be written out."""
# Adapted from http://stackoverflow.com/a/3259271/35070
import ctypes.wintypes
WIN_OUTPUT_IDS = {
1: -11,
2: -12,
}
try:
fileno = out.fileno()
except AttributeError:
# If the output stream doesn't have a fileno, it's virtual
return False
except io.UnsupportedOperation:
# Some strange Windows pseudo files?
return False
if fileno not in WIN_OUTPUT_IDS:
return False
GetStdHandle = compat_ctypes_WINFUNCTYPE(
ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
('GetStdHandle', ctypes.windll.kernel32))
h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
WriteConsoleW = compat_ctypes_WINFUNCTYPE(
ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
ctypes.wintypes.LPVOID)(('WriteConsoleW', ctypes.windll.kernel32))
written = ctypes.wintypes.DWORD(0)
GetFileType = compat_ctypes_WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)(('GetFileType', ctypes.windll.kernel32))
FILE_TYPE_CHAR = 0x0002
FILE_TYPE_REMOTE = 0x8000
GetConsoleMode = compat_ctypes_WINFUNCTYPE(
ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
ctypes.POINTER(ctypes.wintypes.DWORD))(
('GetConsoleMode', ctypes.windll.kernel32))
INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
def not_a_console(handle):
if handle == INVALID_HANDLE_VALUE or handle is None:
return True
return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR
or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0)
if not_a_console(h):
return False
def next_nonbmp_pos(s):
try:
return next(i for i, c in enumerate(s) if ord(c) > 0xffff)
except StopIteration:
return len(s)
while s:
count = min(next_nonbmp_pos(s), 1024)
ret = WriteConsoleW(
h, s, count if count else 2, ctypes.byref(written), None)
if ret == 0:
raise OSError('Failed to write string')
if not count: # We just wrote a non-BMP character
assert written.value == 2
s = s[1:]
else:
assert written.value > 0
s = s[written.value:]
return True
def write_string(s, out=None, encoding=None):
if out is None:
out = sys.stderr
assert type(s) == compat_str
if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'):
if _windows_write_string(s, out):
return
if ('b' in getattr(out, 'mode', '')
or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr
byt = s.encode(encoding or preferredencoding(), 'ignore')
out.write(byt)
elif hasattr(out, 'buffer'):
enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
byt = s.encode(enc, 'ignore')
out.buffer.write(byt)
else:
out.write(s)
out.flush()
def bytes_to_intlist(bs):
if not bs:
return []
if isinstance(bs[0], int): # Python 3
return list(bs)
else:
return [ord(c) for c in bs]
def intlist_to_bytes(xs):
if not xs:
return b''
return compat_struct_pack('%dB' % len(xs), *xs)
# Cross-platform file locking
if sys.platform == 'win32':
import ctypes.wintypes
import msvcrt
class OVERLAPPED(ctypes.Structure):
_fields_ = [
('Internal', ctypes.wintypes.LPVOID),
('InternalHigh', ctypes.wintypes.LPVOID),
('Offset', ctypes.wintypes.DWORD),
('OffsetHigh', ctypes.wintypes.DWORD),
('hEvent', ctypes.wintypes.HANDLE),
]
kernel32 = ctypes.windll.kernel32
LockFileEx = kernel32.LockFileEx
LockFileEx.argtypes = [
ctypes.wintypes.HANDLE, # hFile
ctypes.wintypes.DWORD, # dwFlags
ctypes.wintypes.DWORD, # dwReserved
ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
ctypes.POINTER(OVERLAPPED) # Overlapped
]
LockFileEx.restype = ctypes.wintypes.BOOL
UnlockFileEx = kernel32.UnlockFileEx
UnlockFileEx.argtypes = [
ctypes.wintypes.HANDLE, # hFile
ctypes.wintypes.DWORD, # dwReserved
ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
ctypes.POINTER(OVERLAPPED) # Overlapped
]
UnlockFileEx.restype = ctypes.wintypes.BOOL
whole_low = 0xffffffff
whole_high = 0x7fffffff
def _lock_file(f, exclusive):
overlapped = OVERLAPPED()
overlapped.Offset = 0
overlapped.OffsetHigh = 0
overlapped.hEvent = 0
f._lock_file_overlapped_p = ctypes.pointer(overlapped)
handle = msvcrt.get_osfhandle(f.fileno())
if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0,
whole_low, whole_high, f._lock_file_overlapped_p):
raise OSError('Locking file failed: %r' % ctypes.FormatError())
def _unlock_file(f):
assert f._lock_file_overlapped_p
handle = msvcrt.get_osfhandle(f.fileno())
if not UnlockFileEx(handle, 0,
whole_low, whole_high, f._lock_file_overlapped_p):
raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
else:
# Some platforms, such as Jython, is missing fcntl
try:
import fcntl
def _lock_file(f, exclusive):
fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
def _unlock_file(f):
fcntl.flock(f, fcntl.LOCK_UN)
except ImportError:
UNSUPPORTED_MSG = 'file locking is not supported on this platform'
def _lock_file(f, exclusive):
raise IOError(UNSUPPORTED_MSG)
def _unlock_file(f):
raise IOError(UNSUPPORTED_MSG)
class locked_file(object):
def __init__(self, filename, mode, encoding=None):
assert mode in ['r', 'a', 'w']
self.f = io.open(filename, mode, encoding=encoding)
self.mode = mode
def __enter__(self):
exclusive = self.mode != 'r'
try:
_lock_file(self.f, exclusive)
except IOError:
self.f.close()
raise
return self
def __exit__(self, etype, value, traceback):
try:
_unlock_file(self.f)
finally:
self.f.close()
def __iter__(self):
return iter(self.f)
def write(self, *args):
return self.f.write(*args)
def read(self, *args):
return self.f.read(*args)
def get_filesystem_encoding():
encoding = sys.getfilesystemencoding()
return encoding if encoding is not None else 'utf-8'
def shell_quote(args):
quoted_args = []
encoding = get_filesystem_encoding()
for a in args:
if isinstance(a, bytes):
# We may get a filename encoded with 'encodeFilename'
a = a.decode(encoding)
quoted_args.append(compat_shlex_quote(a))
return ' '.join(quoted_args)
def smuggle_url(url, data):
""" Pass additional data in a URL for internal use. """
url, idata = unsmuggle_url(url, {})
data.update(idata)
sdata = compat_urllib_parse_urlencode(
{'__youtubedl_smuggle': json.dumps(data)})
return url + '#' + sdata
def unsmuggle_url(smug_url, default=None):
if '#__youtubedl_smuggle' not in smug_url:
return smug_url, default
url, _, sdata = smug_url.rpartition('#')
jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0]
data = json.loads(jsond)
return url, data
def format_decimal_suffix(num, fmt='%d%s', *, factor=1000):
""" Formats numbers with decimal sufixes like K, M, etc """
num, factor = float_or_none(num), float(factor)
if num is None:
return None
exponent = 0 if num == 0 else int(math.log(num, factor))
suffix = ['', *'kMGTPEZY'][exponent]
if factor == 1024:
suffix = {'k': 'Ki', '': ''}.get(suffix, f'{suffix}i')
converted = num / (factor ** exponent)
return fmt % (converted, suffix)
def format_bytes(bytes):
return format_decimal_suffix(bytes, '%.2f%sB', factor=1024) or 'N/A'
def lookup_unit_table(unit_table, s):
units_re = '|'.join(re.escape(u) for u in unit_table)
m = re.match(
r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s)
if not m:
return None
num_str = m.group('num').replace(',', '.')
mult = unit_table[m.group('unit')]
return int(float(num_str) * mult)
def parse_filesize(s):
if s is None:
return None
# The lower-case forms are of course incorrect and unofficial,
# but we support those too
_UNIT_TABLE = {
'B': 1,
'b': 1,
'bytes': 1,
'KiB': 1024,
'KB': 1000,
'kB': 1024,
'Kb': 1000,
'kb': 1000,
'kilobytes': 1000,
'kibibytes': 1024,
'MiB': 1024 ** 2,
'MB': 1000 ** 2,
'mB': 1024 ** 2,
'Mb': 1000 ** 2,
'mb': 1000 ** 2,
'megabytes': 1000 ** 2,
'mebibytes': 1024 ** 2,
'GiB': 1024 ** 3,
'GB': 1000 ** 3,
'gB': 1024 ** 3,
'Gb': 1000 ** 3,
'gb': 1000 ** 3,
'gigabytes': 1000 ** 3,
'gibibytes': 1024 ** 3,
'TiB': 1024 ** 4,
'TB': 1000 ** 4,
'tB': 1024 ** 4,
'Tb': 1000 ** 4,
'tb': 1000 ** 4,
'terabytes': 1000 ** 4,
'tebibytes': 1024 ** 4,
'PiB': 1024 ** 5,
'PB': 1000 ** 5,
'pB': 1024 ** 5,
'Pb': 1000 ** 5,
'pb': 1000 ** 5,
'petabytes': 1000 ** 5,
'pebibytes': 1024 ** 5,
'EiB': 1024 ** 6,
'EB': 1000 ** 6,
'eB': 1024 ** 6,
'Eb': 1000 ** 6,
'eb': 1000 ** 6,
'exabytes': 1000 ** 6,
'exbibytes': 1024 ** 6,
'ZiB': 1024 ** 7,
'ZB': 1000 ** 7,
'zB': 1024 ** 7,
'Zb': 1000 ** 7,
'zb': 1000 ** 7,
'zettabytes': 1000 ** 7,
'zebibytes': 1024 ** 7,
'YiB': 1024 ** 8,
'YB': 1000 ** 8,
'yB': 1024 ** 8,
'Yb': 1000 ** 8,
'yb': 1000 ** 8,
'yottabytes': 1000 ** 8,
'yobibytes': 1024 ** 8,
}
return lookup_unit_table(_UNIT_TABLE, s)
def parse_count(s):
if s is None:
return None
s = re.sub(r'^[^\d]+\s', '', s).strip()
if re.match(r'^[\d,.]+$', s):
return str_to_int(s)
_UNIT_TABLE = {
'k': 1000,
'K': 1000,
'm': 1000 ** 2,
'M': 1000 ** 2,
'kk': 1000 ** 2,
'KK': 1000 ** 2,
'b': 1000 ** 3,
'B': 1000 ** 3,
}
ret = lookup_unit_table(_UNIT_TABLE, s)
if ret is not None:
return ret
mobj = re.match(r'([\d,.]+)(?:$|\s)', s)
if mobj:
return str_to_int(mobj.group(1))
def parse_resolution(s):
if s is None:
return {}
mobj = re.search(r'(?<![a-zA-Z0-9])(?P<w>\d+)\s*[xX×,]\s*(?P<h>\d+)(?![a-zA-Z0-9])', s)
if mobj:
return {
'width': int(mobj.group('w')),
'height': int(mobj.group('h')),
}
mobj = re.search(r'(?<![a-zA-Z0-9])(\d+)[pPiI](?![a-zA-Z0-9])', s)
if mobj:
return {'height': int(mobj.group(1))}
mobj = re.search(r'\b([48])[kK]\b', s)
if mobj:
return {'height': int(mobj.group(1)) * 540}
return {}
def parse_bitrate(s):
if not isinstance(s, compat_str):
return
mobj = re.search(r'\b(\d+)\s*kbps', s)
if mobj:
return int(mobj.group(1))
def month_by_name(name, lang='en'):
""" Return the number of a month by (locale-independently) English name """
month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en'])
try:
return month_names.index(name) + 1
except ValueError:
return None
def month_by_abbreviation(abbrev):
""" Return the number of a month by (locale-independently) English
abbreviations """
try:
return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1
except ValueError:
return None
def fix_xml_ampersands(xml_str):
"""Replace all the '&' by '&' in XML"""
return re.sub(
r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
'&',
xml_str)
def setproctitle(title):
assert isinstance(title, compat_str)
# ctypes in Jython is not complete
# http://bugs.jython.org/issue2148
if sys.platform.startswith('java'):
return
try:
libc = ctypes.cdll.LoadLibrary('libc.so.6')
except OSError:
return
except TypeError:
# LoadLibrary in Windows Python 2.7.13 only expects
# a bytestring, but since unicode_literals turns
# every string into a unicode string, it fails.
return
title_bytes = title.encode('utf-8')
buf = ctypes.create_string_buffer(len(title_bytes))
buf.value = title_bytes
try:
libc.prctl(15, buf, 0, 0, 0)
except AttributeError:
return # Strange libc, just skip this
def remove_start(s, start):
return s[len(start):] if s is not None and s.startswith(start) else s
def remove_end(s, end):
return s[:-len(end)] if s is not None and s.endswith(end) else s
def remove_quotes(s):
if s is None or len(s) < 2:
return s
for quote in ('"', "'", ):
if s[0] == quote and s[-1] == quote:
return s[1:-1]
return s
def get_domain(url):
domain = re.match(r'(?:https?:\/\/)?(?:www\.)?(?P<domain>[^\n\/]+\.[^\n\/]+)(?:\/(.*))?', url)
return domain.group('domain') if domain else None
def url_basename(url):
path = compat_urlparse.urlparse(url).path
return path.strip('/').split('/')[-1]
def base_url(url):
return re.match(r'https?://[^?#&]+/', url).group()
def urljoin(base, path):
if isinstance(path, bytes):
path = path.decode('utf-8')
if not isinstance(path, compat_str) or not path:
return None
if re.match(r'^(?:[a-zA-Z][a-zA-Z0-9+-.]*:)?//', path):
return path
if isinstance(base, bytes):
base = base.decode('utf-8')
if not isinstance(base, compat_str) or not re.match(
r'^(?:https?:)?//', base):
return None
return compat_urlparse.urljoin(base, path)
class HEADRequest(compat_urllib_request.Request):
def get_method(self):
return 'HEAD'
class PUTRequest(compat_urllib_request.Request):
def get_method(self):
return 'PUT'
def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
if get_attr and v is not None:
v = getattr(v, get_attr, None)
try:
return int(v) * invscale // scale
except (ValueError, TypeError, OverflowError):
return default
def str_or_none(v, default=None):
return default if v is None else compat_str(v)
def str_to_int(int_str):
""" A more relaxed version of int_or_none """
if isinstance(int_str, compat_integer_types):
return int_str
elif isinstance(int_str, compat_str):
int_str = re.sub(r'[,\.\+]', '', int_str)
return int_or_none(int_str)
def float_or_none(v, scale=1, invscale=1, default=None):
if v is None:
return default
try:
return float(v) * invscale / scale
except (ValueError, TypeError):
return default
def bool_or_none(v, default=None):
return v if isinstance(v, bool) else default
def strip_or_none(v, default=None):
return v.strip() if isinstance(v, compat_str) else default
def url_or_none(url):
if not url or not isinstance(url, compat_str):
return None
url = url.strip()
return url if re.match(r'^(?:(?:https?|rt(?:m(?:pt?[es]?|fp)|sp[su]?)|mms|ftps?):)?//', url) else None
def strftime_or_none(timestamp, date_format, default=None):
datetime_object = None
try:
if isinstance(timestamp, compat_numeric_types): # unix timestamp
datetime_object = datetime.datetime.utcfromtimestamp(timestamp)
elif isinstance(timestamp, compat_str): # assume YYYYMMDD
datetime_object = datetime.datetime.strptime(timestamp, '%Y%m%d')
return datetime_object.strftime(date_format)
except (ValueError, TypeError, AttributeError):
return default
def parse_duration(s):
if not isinstance(s, compat_basestring):
return None
s = s.strip()
if not s:
return None
days, hours, mins, secs, ms = [None] * 5
m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?Z?$', s)
if m:
days, hours, mins, secs, ms = m.groups()
else:
m = re.match(
r'''(?ix)(?:P?
(?:
[0-9]+\s*y(?:ears?)?\s*
)?
(?:
[0-9]+\s*m(?:onths?)?\s*
)?
(?:
[0-9]+\s*w(?:eeks?)?\s*
)?
(?:
(?P<days>[0-9]+)\s*d(?:ays?)?\s*
)?
T)?
(?:
(?P<hours>[0-9]+)\s*h(?:ours?)?\s*
)?
(?:
(?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s*
)?
(?:
(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
)?Z?$''', s)
if m:
days, hours, mins, secs, ms = m.groups()
else:
m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s)
if m:
hours, mins = m.groups()
else:
return None
duration = 0
if secs:
duration += float(secs)
if mins:
duration += float(mins) * 60
if hours:
duration += float(hours) * 60 * 60
if days:
duration += float(days) * 24 * 60 * 60
if ms:
duration += float(ms)
return duration
def prepend_extension(filename, ext, expected_real_ext=None):
name, real_ext = os.path.splitext(filename)
return (
'{0}.{1}{2}'.format(name, ext, real_ext)
if not expected_real_ext or real_ext[1:] == expected_real_ext
else '{0}.{1}'.format(filename, ext))
def replace_extension(filename, ext, expected_real_ext=None):
name, real_ext = os.path.splitext(filename)
return '{0}.{1}'.format(
name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
ext)
def check_executable(exe, args=[]):
""" Checks if the given binary is installed somewhere in PATH, and returns its name.
args can be a list of arguments for a short output (like -version) """
try:
Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate_or_kill()
except OSError:
return False
return exe
def _get_exe_version_output(exe, args):
try:
# STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
# SIGTTOU if yt-dlp is run in the background.
# See https://github.com/ytdl-org/youtube-dl/issues/955#issuecomment-209789656
out, _ = Popen(
[encodeArgument(exe)] + args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate_or_kill()
except OSError:
return False
if isinstance(out, bytes): # Python 2.x
out = out.decode('ascii', 'ignore')
return out
def detect_exe_version(output, version_re=None, unrecognized='present'):
assert isinstance(output, compat_str)
if version_re is None:
version_re = r'version\s+([-0-9._a-zA-Z]+)'
m = re.search(version_re, output)
if m:
return m.group(1)
else:
return unrecognized
def get_exe_version(exe, args=['--version'],
version_re=None, unrecognized='present'):
""" Returns the version of the specified executable,
or False if the executable is not present """
out = _get_exe_version_output(exe, args)
return detect_exe_version(out, version_re, unrecognized) if out else False
class LazyList(collections.abc.Sequence):
''' Lazy immutable list from an iterable
Note that slices of a LazyList are lists and not LazyList'''
class IndexError(IndexError):
pass
def __init__(self, iterable, *, reverse=False, _cache=None):
self.__iterable = iter(iterable)
self.__cache = [] if _cache is None else _cache
self.__reversed = reverse
def __iter__(self):
if self.__reversed:
# We need to consume the entire iterable to iterate in reverse
yield from self.exhaust()
return
yield from self.__cache
for item in self.__iterable:
self.__cache.append(item)
yield item
def __exhaust(self):
self.__cache.extend(self.__iterable)
# Discard the emptied iterable to make it pickle-able
self.__iterable = []
return self.__cache
def exhaust(self):
''' Evaluate the entire iterable '''
return self.__exhaust()[::-1 if self.__reversed else 1]
@staticmethod
def __reverse_index(x):
return None if x is None else -(x + 1)
def __getitem__(self, idx):
if isinstance(idx, slice):
if self.__reversed:
idx = slice(self.__reverse_index(idx.start), self.__reverse_index(idx.stop), -(idx.step or 1))
start, stop, step = idx.start, idx.stop, idx.step or 1
elif isinstance(idx, int):
if self.__reversed:
idx = self.__reverse_index(idx)
start, stop, step = idx, idx, 0
else:
raise TypeError('indices must be integers or slices')
if ((start or 0) < 0 or (stop or 0) < 0
or (start is None and step < 0)
or (stop is None and step > 0)):
# We need to consume the entire iterable to be able to slice from the end
# Obviously, never use this with infinite iterables
self.__exhaust()
try:
return self.__cache[idx]
except IndexError as e:
raise self.IndexError(e) from e
n = max(start or 0, stop or 0) - len(self.__cache) + 1
if n > 0:
self.__cache.extend(itertools.islice(self.__iterable, n))
try:
return self.__cache[idx]
except IndexError as e:
raise self.IndexError(e) from e
def __bool__(self):
try:
self[-1] if self.__reversed else self[0]
except self.IndexError:
return False
return True
def __len__(self):
self.__exhaust()
return len(self.__cache)
def __reversed__(self):
return type(self)(self.__iterable, reverse=not self.__reversed, _cache=self.__cache)
def __copy__(self):
return type(self)(self.__iterable, reverse=self.__reversed, _cache=self.__cache)
def __repr__(self):
# repr and str should mimic a list. So we exhaust the iterable
return repr(self.exhaust())
def __str__(self):
return repr(self.exhaust())
class PagedList:
class IndexError(IndexError):
pass
def __len__(self):
# This is only useful for tests
return len(self.getslice())
def __init__(self, pagefunc, pagesize, use_cache=True):
self._pagefunc = pagefunc
self._pagesize = pagesize
self._use_cache = use_cache
self._cache = {}
def getpage(self, pagenum):
page_results = self._cache.get(pagenum)
if page_results is None:
page_results = list(self._pagefunc(pagenum))
if self._use_cache:
self._cache[pagenum] = page_results
return page_results
def getslice(self, start=0, end=None):
return list(self._getslice(start, end))
def _getslice(self, start, end):
raise NotImplementedError('This method must be implemented by subclasses')
def __getitem__(self, idx):
# NOTE: cache must be enabled if this is used
if not isinstance(idx, int) or idx < 0:
raise TypeError('indices must be non-negative integers')
entries = self.getslice(idx, idx + 1)
if not entries:
raise self.IndexError()
return entries[0]
class OnDemandPagedList(PagedList):
def _getslice(self, start, end):
for pagenum in itertools.count(start // self._pagesize):
firstid = pagenum * self._pagesize
nextfirstid = pagenum * self._pagesize + self._pagesize
if start >= nextfirstid:
continue
startv = (
start % self._pagesize
if firstid <= start < nextfirstid
else 0)
endv = (
((end - 1) % self._pagesize) + 1
if (end is not None and firstid <= end <= nextfirstid)
else None)
page_results = self.getpage(pagenum)
if startv != 0 or endv is not None:
page_results = page_results[startv:endv]
yield from page_results
# A little optimization - if current page is not "full", ie. does
# not contain page_size videos then we can assume that this page
# is the last one - there are no more ids on further pages -
# i.e. no need to query again.
if len(page_results) + startv < self._pagesize:
break
# If we got the whole page, but the next page is not interesting,
# break out early as well
if end == nextfirstid:
break
class InAdvancePagedList(PagedList):
def __init__(self, pagefunc, pagecount, pagesize):
self._pagecount = pagecount
PagedList.__init__(self, pagefunc, pagesize, True)
def _getslice(self, start, end):
start_page = start // self._pagesize
end_page = (
self._pagecount if end is None else (end // self._pagesize + 1))
skip_elems = start - start_page * self._pagesize
only_more = None if end is None else end - start
for pagenum in range(start_page, end_page):
page_results = self.getpage(pagenum)
if skip_elems:
page_results = page_results[skip_elems:]
skip_elems = None
if only_more is not None:
if len(page_results) < only_more:
only_more -= len(page_results)
else:
yield from page_results[:only_more]
break
yield from page_results
def uppercase_escape(s):
unicode_escape = codecs.getdecoder('unicode_escape')
return re.sub(
r'\\U[0-9a-fA-F]{8}',
lambda m: unicode_escape(m.group(0))[0],
s)
def lowercase_escape(s):
unicode_escape = codecs.getdecoder('unicode_escape')
return re.sub(
r'\\u[0-9a-fA-F]{4}',
lambda m: unicode_escape(m.group(0))[0],
s)
def escape_rfc3986(s):
"""Escape non-ASCII characters as suggested by RFC 3986"""
if sys.version_info < (3, 0) and isinstance(s, compat_str):
s = s.encode('utf-8')
return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
def escape_url(url):
"""Escape URL as suggested by RFC 3986"""
url_parsed = compat_urllib_parse_urlparse(url)
return url_parsed._replace(
netloc=url_parsed.netloc.encode('idna').decode('ascii'),
path=escape_rfc3986(url_parsed.path),
params=escape_rfc3986(url_parsed.params),
query=escape_rfc3986(url_parsed.query),
fragment=escape_rfc3986(url_parsed.fragment)
).geturl()
def parse_qs(url):
return compat_parse_qs(compat_urllib_parse_urlparse(url).query)
def read_batch_urls(batch_fd):
def fixup(url):
if not isinstance(url, compat_str):
url = url.decode('utf-8', 'replace')
BOM_UTF8 = ('\xef\xbb\xbf', '\ufeff')
for bom in BOM_UTF8:
if url.startswith(bom):
url = url[len(bom):]
url = url.lstrip()
if not url or url.startswith(('#', ';', ']')):
return False
# "#" cannot be stripped out since it is part of the URI
# However, it can be safely stipped out if follwing a whitespace
return re.split(r'\s#', url, 1)[0].rstrip()
with contextlib.closing(batch_fd) as fd:
return [url for url in map(fixup, fd) if url]
def urlencode_postdata(*args, **kargs):
return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii')
def update_url_query(url, query):
if not query:
return url
parsed_url = compat_urlparse.urlparse(url)
qs = compat_parse_qs(parsed_url.query)
qs.update(query)
return compat_urlparse.urlunparse(parsed_url._replace(
query=compat_urllib_parse_urlencode(qs, True)))
def update_Request(req, url=None, data=None, headers={}, query={}):
req_headers = req.headers.copy()
req_headers.update(headers)
req_data = data or req.data
req_url = update_url_query(url or req.get_full_url(), query)
req_get_method = req.get_method()
if req_get_method == 'HEAD':
req_type = HEADRequest
elif req_get_method == 'PUT':
req_type = PUTRequest
else:
req_type = compat_urllib_request.Request
new_req = req_type(
req_url, data=req_data, headers=req_headers,
origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
if hasattr(req, 'timeout'):
new_req.timeout = req.timeout
return new_req
def _multipart_encode_impl(data, boundary):
content_type = 'multipart/form-data; boundary=%s' % boundary
out = b''
for k, v in data.items():
out += b'--' + boundary.encode('ascii') + b'\r\n'
if isinstance(k, compat_str):
k = k.encode('utf-8')
if isinstance(v, compat_str):
v = v.encode('utf-8')
# RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578
# suggests sending UTF-8 directly. Firefox sends UTF-8, too
content = b'Content-Disposition: form-data; name="' + k + b'"\r\n\r\n' + v + b'\r\n'
if boundary.encode('ascii') in content:
raise ValueError('Boundary overlaps with data')
out += content
out += b'--' + boundary.encode('ascii') + b'--\r\n'
return out, content_type
def multipart_encode(data, boundary=None):
'''
Encode a dict to RFC 7578-compliant form-data
data:
A dict where keys and values can be either Unicode or bytes-like
objects.
boundary:
If specified a Unicode object, it's used as the boundary. Otherwise
a random boundary is generated.
Reference: https://tools.ietf.org/html/rfc7578
'''
has_specified_boundary = boundary is not None
while True:
if boundary is None:
boundary = '---------------' + str(random.randrange(0x0fffffff, 0xffffffff))
try:
out, content_type = _multipart_encode_impl(data, boundary)
break
except ValueError:
if has_specified_boundary:
raise
boundary = None
return out, content_type
def dict_get(d, key_or_keys, default=None, skip_false_values=True):
if isinstance(key_or_keys, (list, tuple)):
for key in key_or_keys:
if key not in d or d[key] is None or skip_false_values and not d[key]:
continue
return d[key]
return default
return d.get(key_or_keys, default)
def try_get(src, getter, expected_type=None):
for get in variadic(getter):
try:
v = get(src)
except (AttributeError, KeyError, TypeError, IndexError):
pass
else:
if expected_type is None or isinstance(v, expected_type):
return v
def merge_dicts(*dicts):
merged = {}
for a_dict in dicts:
for k, v in a_dict.items():
if v is None:
continue
if (k not in merged
or (isinstance(v, compat_str) and v
and isinstance(merged[k], compat_str)
and not merged[k])):
merged[k] = v
return merged
def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
return string if isinstance(string, compat_str) else compat_str(string, encoding, errors)
US_RATINGS = {
'G': 0,
'PG': 10,
'PG-13': 13,
'R': 16,
'NC': 18,
}
TV_PARENTAL_GUIDELINES = {
'TV-Y': 0,
'TV-Y7': 7,
'TV-G': 0,
'TV-PG': 0,
'TV-14': 14,
'TV-MA': 17,
}
def parse_age_limit(s):
if type(s) == int:
return s if 0 <= s <= 21 else None
if not isinstance(s, compat_basestring):
return None
m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
if m:
return int(m.group('age'))
s = s.upper()
if s in US_RATINGS:
return US_RATINGS[s]
m = re.match(r'^TV[_-]?(%s)$' % '|'.join(k[3:] for k in TV_PARENTAL_GUIDELINES), s)
if m:
return TV_PARENTAL_GUIDELINES['TV-' + m.group(1)]
return None
def strip_jsonp(code):
return re.sub(
r'''(?sx)^
(?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]*)
(?:\s*&&\s*(?P=func_name))?
\s*\(\s*(?P<callback_data>.*)\);?
\s*?(?://[^\n]*)*$''',
r'\g<callback_data>', code)
def js_to_json(code, vars={}):
# vars is a dict of var, val pairs to substitute
COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*\n'
SKIP_RE = r'\s*(?:{comment})?\s*'.format(comment=COMMENT_RE)
INTEGER_TABLE = (
(r'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip=SKIP_RE), 16),
(r'(?s)^(0+[0-7]+){skip}:?$'.format(skip=SKIP_RE), 8),
)
def fix_kv(m):
v = m.group(0)
if v in ('true', 'false', 'null'):
return v
elif v in ('undefined', 'void 0'):
return 'null'
elif v.startswith('/*') or v.startswith('//') or v.startswith('!') or v == ',':
return ""
if v[0] in ("'", '"'):
v = re.sub(r'(?s)\\.|"', lambda m: {
'"': '\\"',
"\\'": "'",
'\\\n': '',
'\\x': '\\u00',
}.get(m.group(0), m.group(0)), v[1:-1])
else:
for regex, base in INTEGER_TABLE:
im = re.match(regex, v)
if im:
i = int(im.group(1), base)
return '"%d":' % i if v.endswith(':') else '%d' % i
if v in vars:
return vars[v]
return '"%s"' % v
return re.sub(r'''(?sx)
"(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"|
'(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'|
{comment}|,(?={skip}[\]}}])|
void\s0|(?:(?<![0-9])[eE]|[a-df-zA-DF-Z_$])[.a-zA-Z_$0-9]*|
\b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?|
[0-9]+(?={skip}:)|
!+
'''.format(comment=COMMENT_RE, skip=SKIP_RE), fix_kv, code)
def qualities(quality_ids):
""" Get a numeric quality value out of a list of possible values """
def q(qid):
try:
return quality_ids.index(qid)
except ValueError:
return -1
return q
POSTPROCESS_WHEN = {'pre_process', 'before_dl', 'after_move', 'post_process', 'after_video', 'playlist'}
DEFAULT_OUTTMPL = {
'default': '%(title)s [%(id)s].%(ext)s',
'chapter': '%(title)s - %(section_number)03d %(section_title)s [%(id)s].%(ext)s',
}
OUTTMPL_TYPES = {
'chapter': None,
'subtitle': None,
'thumbnail': None,
'description': 'description',
'annotation': 'annotations.xml',
'infojson': 'info.json',
'link': None,
'pl_thumbnail': None,
'pl_description': 'description',
'pl_infojson': 'info.json',
}
# As of [1] format syntax is:
# %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type
# 1. https://docs.python.org/2/library/stdtypes.html#string-formatting
STR_FORMAT_RE_TMPL = r'''(?x)
(?<!%)(?P<prefix>(?:%%)*)
%
(?P<has_key>\((?P<key>{0})\))?
(?P<format>
(?P<conversion>[#0\-+ ]+)?
(?P<min_width>\d+)?
(?P<precision>\.\d+)?
(?P<len_mod>[hlL])? # unused in python
{1} # conversion type
)
'''
STR_FORMAT_TYPES = 'diouxXeEfFgGcrs'
def limit_length(s, length):
""" Add ellipses to overly long strings """
if s is None:
return None
ELLIPSES = '...'
if len(s) > length:
return s[:length - len(ELLIPSES)] + ELLIPSES
return s
def version_tuple(v):
return tuple(int(e) for e in re.split(r'[-.]', v))
def is_outdated_version(version, limit, assume_new=True):
if not version:
return not assume_new
try:
return version_tuple(version) < version_tuple(limit)
except ValueError:
return not assume_new
def ytdl_is_updateable():
""" Returns if yt-dlp can be updated with -U """
from .update import is_non_updateable
return not is_non_updateable()
def args_to_str(args):
# Get a short string representation for a subprocess command
return ' '.join(compat_shlex_quote(a) for a in args)
def error_to_compat_str(err):
err_str = str(err)
# On python 2 error byte string must be decoded with proper
# encoding rather than ascii
if sys.version_info[0] < 3:
err_str = err_str.decode(preferredencoding())
return err_str
def mimetype2ext(mt):
if mt is None:
return None
mt, _, params = mt.partition(';')
mt = mt.strip()
FULL_MAP = {
'audio/mp4': 'm4a',
# Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as
# it's the most popular one
'audio/mpeg': 'mp3',
'audio/x-wav': 'wav',
'audio/wav': 'wav',
'audio/wave': 'wav',
}
ext = FULL_MAP.get(mt)
if ext is not None:
return ext
SUBTYPE_MAP = {
'3gpp': '3gp',
'smptett+xml': 'tt',
'ttaf+xml': 'dfxp',
'ttml+xml': 'ttml',
'x-flv': 'flv',
'x-mp4-fragmented': 'mp4',
'x-ms-sami': 'sami',
'x-ms-wmv': 'wmv',
'mpegurl': 'm3u8',
'x-mpegurl': 'm3u8',
'vnd.apple.mpegurl': 'm3u8',
'dash+xml': 'mpd',
'f4m+xml': 'f4m',
'hds+xml': 'f4m',
'vnd.ms-sstr+xml': 'ism',
'quicktime': 'mov',
'mp2t': 'ts',
'x-wav': 'wav',
'filmstrip+json': 'fs',
'svg+xml': 'svg',
}
_, _, subtype = mt.rpartition('/')
ext = SUBTYPE_MAP.get(subtype.lower())
if ext is not None:
return ext
SUFFIX_MAP = {
'json': 'json',
'xml': 'xml',
'zip': 'zip',
'gzip': 'gz',
}
_, _, suffix = subtype.partition('+')
ext = SUFFIX_MAP.get(suffix)
if ext is not None:
return ext
return subtype.replace('+', '.')
def ext2mimetype(ext_or_url):
if not ext_or_url:
return None
if '.' not in ext_or_url:
ext_or_url = f'file.{ext_or_url}'
return mimetypes.guess_type(ext_or_url)[0]
def parse_codecs(codecs_str):
# http://tools.ietf.org/html/rfc6381
if not codecs_str:
return {}
split_codecs = list(filter(None, map(
str.strip, codecs_str.strip().strip(',').split(','))))
vcodec, acodec, tcodec, hdr = None, None, None, None
for full_codec in split_codecs:
parts = full_codec.split('.')
codec = parts[0].replace('0', '')
if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2',
'h263', 'h264', 'mp4v', 'hvc1', 'av1', 'theora', 'dvh1', 'dvhe'):
if not vcodec:
vcodec = '.'.join(parts[:4]) if codec in ('vp9', 'av1', 'hvc1') else full_codec
if codec in ('dvh1', 'dvhe'):
hdr = 'DV'
elif codec == 'av1' and len(parts) > 3 and parts[3] == '10':
hdr = 'HDR10'
elif full_codec.replace('0', '').startswith('vp9.2'):
hdr = 'HDR10'
elif codec in ('flac', 'mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
if not acodec:
acodec = full_codec
elif codec in ('stpp', 'wvtt',):
if not tcodec:
tcodec = full_codec
else:
write_string('WARNING: Unknown codec %s\n' % full_codec, sys.stderr)
if vcodec or acodec or tcodec:
return {
'vcodec': vcodec or 'none',
'acodec': acodec or 'none',
'dynamic_range': hdr,
**({'tcodec': tcodec} if tcodec is not None else {}),
}
elif len(split_codecs) == 2:
return {
'vcodec': split_codecs[0],
'acodec': split_codecs[1],
}
return {}
def urlhandle_detect_ext(url_handle):
getheader = url_handle.headers.get
cd = getheader('Content-Disposition')
if cd:
m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd)
if m:
e = determine_ext(m.group('filename'), default_ext=None)
if e:
return e
return mimetype2ext(getheader('Content-Type'))
def encode_data_uri(data, mime_type):
return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii'))
def age_restricted(content_limit, age_limit):
""" Returns True iff the content should be blocked """
if age_limit is None: # No limit set
return False
if content_limit is None:
return False # Content available for everyone
return age_limit < content_limit
def is_html(first_bytes):
""" Detect whether a file contains HTML by examining its first bytes. """
BOMS = [
(b'\xef\xbb\xbf', 'utf-8'),
(b'\x00\x00\xfe\xff', 'utf-32-be'),
(b'\xff\xfe\x00\x00', 'utf-32-le'),
(b'\xff\xfe', 'utf-16-le'),
(b'\xfe\xff', 'utf-16-be'),
]
for bom, enc in BOMS:
if first_bytes.startswith(bom):
s = first_bytes[len(bom):].decode(enc, 'replace')
break
else:
s = first_bytes.decode('utf-8', 'replace')
return re.match(r'^\s*<', s)
def determine_protocol(info_dict):
protocol = info_dict.get('protocol')
if protocol is not None:
return protocol
url = sanitize_url(info_dict['url'])
if url.startswith('rtmp'):
return 'rtmp'
elif url.startswith('mms'):
return 'mms'
elif url.startswith('rtsp'):
return 'rtsp'
ext = determine_ext(url)
if ext == 'm3u8':
return 'm3u8'
elif ext == 'f4m':
return 'f4m'
return compat_urllib_parse_urlparse(url).scheme
def render_table(header_row, data, delim=False, extra_gap=0, hide_empty=False):
""" Render a list of rows, each as a list of values.
Text after a \t will be right aligned """
def width(string):
return len(remove_terminal_sequences(string).replace('\t', ''))
def get_max_lens(table):
return [max(width(str(v)) for v in col) for col in zip(*table)]
def filter_using_list(row, filterArray):
return [col for (take, col) in zip(filterArray, row) if take]
if hide_empty:
max_lens = get_max_lens(data)
header_row = filter_using_list(header_row, max_lens)
data = [filter_using_list(row, max_lens) for row in data]
table = [header_row] + data
max_lens = get_max_lens(table)
extra_gap += 1
if delim:
table = [header_row, [delim * (ml + extra_gap) for ml in max_lens]] + data
table[1][-1] = table[1][-1][:-extra_gap] # Remove extra_gap from end of delimiter
for row in table:
for pos, text in enumerate(map(str, row)):
if '\t' in text:
row[pos] = text.replace('\t', ' ' * (max_lens[pos] - width(text))) + ' ' * extra_gap
else:
row[pos] = text + ' ' * (max_lens[pos] - width(text) + extra_gap)
ret = '\n'.join(''.join(row).rstrip() for row in table)
return ret
def _match_one(filter_part, dct, incomplete):
# TODO: Generalize code with YoutubeDL._build_format_filter
STRING_OPERATORS = {
'*=': operator.contains,
'^=': lambda attr, value: attr.startswith(value),
'$=': lambda attr, value: attr.endswith(value),
'~=': lambda attr, value: re.search(value, attr),
}
COMPARISON_OPERATORS = {
**STRING_OPERATORS,
'<=': operator.le, # "<=" must be defined above "<"
'<': operator.lt,
'>=': operator.ge,
'>': operator.gt,
'=': operator.eq,
}
operator_rex = re.compile(r'''(?x)\s*
(?P<key>[a-z_]+)
\s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
(?:
(?P<quote>["\'])(?P<quotedstrval>.+?)(?P=quote)|
(?P<strval>.+?)
)
\s*$
''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
m = operator_rex.search(filter_part)
if m:
m = m.groupdict()
unnegated_op = COMPARISON_OPERATORS[m['op']]
if m['negation']:
op = lambda attr, value: not unnegated_op(attr, value)
else:
op = unnegated_op
comparison_value = m['quotedstrval'] or m['strval'] or m['intval']
if m['quote']:
comparison_value = comparison_value.replace(r'\%s' % m['quote'], m['quote'])
actual_value = dct.get(m['key'])
numeric_comparison = None
if isinstance(actual_value, compat_numeric_types):
# If the original field is a string and matching comparisonvalue is
# a number we should respect the origin of the original field
# and process comparison value as a string (see
# https://github.com/ytdl-org/youtube-dl/issues/11082)
try:
numeric_comparison = int(comparison_value)
except ValueError:
numeric_comparison = parse_filesize(comparison_value)
if numeric_comparison is None:
numeric_comparison = parse_filesize(f'{comparison_value}B')
if numeric_comparison is None:
numeric_comparison = parse_duration(comparison_value)
if numeric_comparison is not None and m['op'] in STRING_OPERATORS:
raise ValueError('Operator %s only supports string values!' % m['op'])
if actual_value is None:
return incomplete or m['none_inclusive']
return op(actual_value, comparison_value if numeric_comparison is None else numeric_comparison)
UNARY_OPERATORS = {
'': lambda v: (v is True) if isinstance(v, bool) else (v is not None),
'!': lambda v: (v is False) if isinstance(v, bool) else (v is None),
}
operator_rex = re.compile(r'''(?x)\s*
(?P<op>%s)\s*(?P<key>[a-z_]+)
\s*$
''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
m = operator_rex.search(filter_part)
if m:
op = UNARY_OPERATORS[m.group('op')]
actual_value = dct.get(m.group('key'))
if incomplete and actual_value is None:
return True
return op(actual_value)
raise ValueError('Invalid filter part %r' % filter_part)
def match_str(filter_str, dct, incomplete=False):
""" Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false
When incomplete, all conditions passes on missing fields
"""
return all(
_match_one(filter_part.replace(r'\&', '&'), dct, incomplete)
for filter_part in re.split(r'(?<!\\)&', filter_str))
def match_filter_func(filter_str):
def _match_func(info_dict, *args, **kwargs):
if match_str(filter_str, info_dict, *args, **kwargs):
return None
else:
video_title = info_dict.get('title', info_dict.get('id', 'video'))
return '%s does not pass filter %s, skipping ..' % (video_title, filter_str)
return _match_func
def parse_dfxp_time_expr(time_expr):
if not time_expr:
return
mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr)
if mobj:
return float(mobj.group('time_offset'))
mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
if mobj:
return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
def srt_subtitles_timecode(seconds):
return '%02d:%02d:%02d,%03d' % timetuple_from_msec(seconds * 1000)
def ass_subtitles_timecode(seconds):
time = timetuple_from_msec(seconds * 1000)
return '%01d:%02d:%02d.%02d' % (*time[:-1], time.milliseconds / 10)
def dfxp2srt(dfxp_data):
'''
@param dfxp_data A bytes-like object containing DFXP data
@returns A unicode object containing converted SRT data
'''
LEGACY_NAMESPACES = (
(b'http://www.w3.org/ns/ttml', [
b'http://www.w3.org/2004/11/ttaf1',
b'http://www.w3.org/2006/04/ttaf1',
b'http://www.w3.org/2006/10/ttaf1',
]),
(b'http://www.w3.org/ns/ttml#styling', [
b'http://www.w3.org/ns/ttml#style',
]),
)
SUPPORTED_STYLING = [
'color',
'fontFamily',
'fontSize',
'fontStyle',
'fontWeight',
'textDecoration'
]
_x = functools.partial(xpath_with_ns, ns_map={
'xml': 'http://www.w3.org/XML/1998/namespace',
'ttml': 'http://www.w3.org/ns/ttml',
'tts': 'http://www.w3.org/ns/ttml#styling',
})
styles = {}
default_style = {}
class TTMLPElementParser(object):
_out = ''
_unclosed_elements = []
_applied_styles = []
def start(self, tag, attrib):
if tag in (_x('ttml:br'), 'br'):
self._out += '\n'
else:
unclosed_elements = []
style = {}
element_style_id = attrib.get('style')
if default_style:
style.update(default_style)
if element_style_id:
style.update(styles.get(element_style_id, {}))
for prop in SUPPORTED_STYLING:
prop_val = attrib.get(_x('tts:' + prop))
if prop_val:
style[prop] = prop_val
if style:
font = ''
for k, v in sorted(style.items()):
if self._applied_styles and self._applied_styles[-1].get(k) == v:
continue
if k == 'color':
font += ' color="%s"' % v
elif k == 'fontSize':
font += ' size="%s"' % v
elif k == 'fontFamily':
font += ' face="%s"' % v
elif k == 'fontWeight' and v == 'bold':
self._out += '<b>'
unclosed_elements.append('b')
elif k == 'fontStyle' and v == 'italic':
self._out += '<i>'
unclosed_elements.append('i')
elif k == 'textDecoration' and v == 'underline':
self._out += '<u>'
unclosed_elements.append('u')
if font:
self._out += '<font' + font + '>'
unclosed_elements.append('font')
applied_style = {}
if self._applied_styles:
applied_style.update(self._applied_styles[-1])
applied_style.update(style)
self._applied_styles.append(applied_style)
self._unclosed_elements.append(unclosed_elements)
def end(self, tag):
if tag not in (_x('ttml:br'), 'br'):
unclosed_elements = self._unclosed_elements.pop()
for element in reversed(unclosed_elements):
self._out += '</%s>' % element
if unclosed_elements and self._applied_styles:
self._applied_styles.pop()
def data(self, data):
self._out += data
def close(self):
return self._out.strip()
def parse_node(node):
target = TTMLPElementParser()
parser = xml.etree.ElementTree.XMLParser(target=target)
parser.feed(xml.etree.ElementTree.tostring(node))
return parser.close()
for k, v in LEGACY_NAMESPACES:
for ns in v:
dfxp_data = dfxp_data.replace(ns, k)
dfxp = compat_etree_fromstring(dfxp_data)
out = []
paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p')
if not paras:
raise ValueError('Invalid dfxp/TTML subtitle')
repeat = False
while True:
for style in dfxp.findall(_x('.//ttml:style')):
style_id = style.get('id') or style.get(_x('xml:id'))
if not style_id:
continue
parent_style_id = style.get('style')
if parent_style_id:
if parent_style_id not in styles:
repeat = True
continue
styles[style_id] = styles[parent_style_id].copy()
for prop in SUPPORTED_STYLING:
prop_val = style.get(_x('tts:' + prop))
if prop_val:
styles.setdefault(style_id, {})[prop] = prop_val
if repeat:
repeat = False
else:
break
for p in ('body', 'div'):
ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p])
if ele is None:
continue
style = styles.get(ele.get('style'))
if not style:
continue
default_style.update(style)
for para, index in zip(paras, itertools.count(1)):
begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
end_time = parse_dfxp_time_expr(para.attrib.get('end'))
dur = parse_dfxp_time_expr(para.attrib.get('dur'))
if begin_time is None:
continue
if not end_time:
if not dur:
continue
end_time = begin_time + dur
out.append('%d\n%s --> %s\n%s\n\n' % (
index,
srt_subtitles_timecode(begin_time),
srt_subtitles_timecode(end_time),
parse_node(para)))
return ''.join(out)
def cli_option(params, command_option, param):
param = params.get(param)
if param:
param = compat_str(param)
return [command_option, param] if param is not None else []
def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
param = params.get(param)
if param is None:
return []
assert isinstance(param, bool)
if separator:
return [command_option + separator + (true_value if param else false_value)]
return [command_option, true_value if param else false_value]
def cli_valueless_option(params, command_option, param, expected_value=True):
param = params.get(param)
return [command_option] if param == expected_value else []
def cli_configuration_args(argdict, keys, default=[], use_compat=True):
if isinstance(argdict, (list, tuple)): # for backward compatibility
if use_compat:
return argdict
else:
argdict = None
if argdict is None:
return default
assert isinstance(argdict, dict)
assert isinstance(keys, (list, tuple))
for key_list in keys:
arg_list = list(filter(
lambda x: x is not None,
[argdict.get(key.lower()) for key in variadic(key_list)]))
if arg_list:
return [arg for args in arg_list for arg in args]
return default
def _configuration_args(main_key, argdict, exe, keys=None, default=[], use_compat=True):
main_key, exe = main_key.lower(), exe.lower()
root_key = exe if main_key == exe else f'{main_key}+{exe}'
keys = [f'{root_key}{k}' for k in (keys or [''])]
if root_key in keys:
if main_key != exe:
keys.append((main_key, exe))
keys.append('default')
else:
use_compat = False
return cli_configuration_args(argdict, keys, default, use_compat)
class ISO639Utils(object):
# See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
_lang_map = {
'aa': 'aar',
'ab': 'abk',
'ae': 'ave',
'af': 'afr',
'ak': 'aka',
'am': 'amh',
'an': 'arg',
'ar': 'ara',
'as': 'asm',
'av': 'ava',
'ay': 'aym',
'az': 'aze',
'ba': 'bak',
'be': 'bel',
'bg': 'bul',
'bh': 'bih',
'bi': 'bis',
'bm': 'bam',
'bn': 'ben',
'bo': 'bod',
'br': 'bre',
'bs': 'bos',
'ca': 'cat',
'ce': 'che',
'ch': 'cha',
'co': 'cos',
'cr': 'cre',
'cs': 'ces',
'cu': 'chu',
'cv': 'chv',
'cy': 'cym',
'da': 'dan',
'de': 'deu',
'dv': 'div',
'dz': 'dzo',
'ee': 'ewe',
'el': 'ell',
'en': 'eng',
'eo': 'epo',
'es': 'spa',
'et': 'est',
'eu': 'eus',
'fa': 'fas',
'ff': 'ful',
'fi': 'fin',
'fj': 'fij',
'fo': 'fao',
'fr': 'fra',
'fy': 'fry',
'ga': 'gle',
'gd': 'gla',
'gl': 'glg',
'gn': 'grn',
'gu': 'guj',
'gv': 'glv',
'ha': 'hau',
'he': 'heb',
'iw': 'heb', # Replaced by he in 1989 revision
'hi': 'hin',
'ho': 'hmo',
'hr': 'hrv',
'ht': 'hat',
'hu': 'hun',
'hy': 'hye',
'hz': 'her',
'ia': 'ina',
'id': 'ind',
'in': 'ind', # Replaced by id in 1989 revision
'ie': 'ile',
'ig': 'ibo',
'ii': 'iii',
'ik': 'ipk',
'io': 'ido',
'is': 'isl',
'it': 'ita',
'iu': 'iku',
'ja': 'jpn',
'jv': 'jav',
'ka': 'kat',
'kg': 'kon',
'ki': 'kik',
'kj': 'kua',
'kk': 'kaz',
'kl': 'kal',
'km': 'khm',
'kn': 'kan',
'ko': 'kor',
'kr': 'kau',
'ks': 'kas',
'ku': 'kur',
'kv': 'kom',
'kw': 'cor',
'ky': 'kir',
'la': 'lat',
'lb': 'ltz',
'lg': 'lug',
'li': 'lim',
'ln': 'lin',
'lo': 'lao',
'lt': 'lit',
'lu': 'lub',
'lv': 'lav',
'mg': 'mlg',
'mh': 'mah',
'mi': 'mri',
'mk': 'mkd',
'ml': 'mal',
'mn': 'mon',
'mr': 'mar',
'ms': 'msa',
'mt': 'mlt',
'my': 'mya',
'na': 'nau',
'nb': 'nob',
'nd': 'nde',
'ne': 'nep',
'ng': 'ndo',
'nl': 'nld',
'nn': 'nno',
'no': 'nor',
'nr': 'nbl',
'nv': 'nav',
'ny': 'nya',
'oc': 'oci',
'oj': 'oji',
'om': 'orm',
'or': 'ori',
'os': 'oss',
'pa': 'pan',
'pi': 'pli',
'pl': 'pol',
'ps': 'pus',
'pt': 'por',
'qu': 'que',
'rm': 'roh',
'rn': 'run',
'ro': 'ron',
'ru': 'rus',
'rw': 'kin',
'sa': 'san',
'sc': 'srd',
'sd': 'snd',
'se': 'sme',
'sg': 'sag',
'si': 'sin',
'sk': 'slk',
'sl': 'slv',
'sm': 'smo',
'sn': 'sna',
'so': 'som',
'sq': 'sqi',
'sr': 'srp',
'ss': 'ssw',
'st': 'sot',
'su': 'sun',
'sv': 'swe',
'sw': 'swa',
'ta': 'tam',
'te': 'tel',
'tg': 'tgk',
'th': 'tha',
'ti': 'tir',
'tk': 'tuk',
'tl': 'tgl',
'tn': 'tsn',
'to': 'ton',
'tr': 'tur',
'ts': 'tso',
'tt': 'tat',
'tw': 'twi',
'ty': 'tah',
'ug': 'uig',
'uk': 'ukr',
'ur': 'urd',
'uz': 'uzb',
've': 'ven',
'vi': 'vie',
'vo': 'vol',
'wa': 'wln',
'wo': 'wol',
'xh': 'xho',
'yi': 'yid',
'ji': 'yid', # Replaced by yi in 1989 revision
'yo': 'yor',
'za': 'zha',
'zh': 'zho',
'zu': 'zul',
}
@classmethod
def short2long(cls, code):
"""Convert language code from ISO 639-1 to ISO 639-2/T"""
return cls._lang_map.get(code[:2])
@classmethod
def long2short(cls, code):
"""Convert language code from ISO 639-2/T to ISO 639-1"""
for short_name, long_name in cls._lang_map.items():
if long_name == code:
return short_name
class ISO3166Utils(object):
# From http://data.okfn.org/data/core/country-list
_country_map = {
'AF': 'Afghanistan',
'AX': 'Åland Islands',
'AL': 'Albania',
'DZ': 'Algeria',
'AS': 'American Samoa',
'AD': 'Andorra',
'AO': 'Angola',
'AI': 'Anguilla',
'AQ': 'Antarctica',
'AG': 'Antigua and Barbuda',
'AR': 'Argentina',
'AM': 'Armenia',
'AW': 'Aruba',
'AU': 'Australia',
'AT': 'Austria',
'AZ': 'Azerbaijan',
'BS': 'Bahamas',
'BH': 'Bahrain',
'BD': 'Bangladesh',
'BB': 'Barbados',
'BY': 'Belarus',
'BE': 'Belgium',
'BZ': 'Belize',
'BJ': 'Benin',
'BM': 'Bermuda',
'BT': 'Bhutan',
'BO': 'Bolivia, Plurinational State of',
'BQ': 'Bonaire, Sint Eustatius and Saba',
'BA': 'Bosnia and Herzegovina',
'BW': 'Botswana',
'BV': 'Bouvet Island',
'BR': 'Brazil',
'IO': 'British Indian Ocean Territory',
'BN': 'Brunei Darussalam',
'BG': 'Bulgaria',
'BF': 'Burkina Faso',
'BI': 'Burundi',
'KH': 'Cambodia',
'CM': 'Cameroon',
'CA': 'Canada',
'CV': 'Cape Verde',
'KY': 'Cayman Islands',
'CF': 'Central African Republic',
'TD': 'Chad',
'CL': 'Chile',
'CN': 'China',
'CX': 'Christmas Island',
'CC': 'Cocos (Keeling) Islands',
'CO': 'Colombia',
'KM': 'Comoros',
'CG': 'Congo',
'CD': 'Congo, the Democratic Republic of the',
'CK': 'Cook Islands',
'CR': 'Costa Rica',
'CI': 'Côte d\'Ivoire',
'HR': 'Croatia',
'CU': 'Cuba',
'CW': 'Curaçao',
'CY': 'Cyprus',
'CZ': 'Czech Republic',
'DK': 'Denmark',
'DJ': 'Djibouti',
'DM': 'Dominica',
'DO': 'Dominican Republic',
'EC': 'Ecuador',
'EG': 'Egypt',
'SV': 'El Salvador',
'GQ': 'Equatorial Guinea',
'ER': 'Eritrea',
'EE': 'Estonia',
'ET': 'Ethiopia',
'FK': 'Falkland Islands (Malvinas)',
'FO': 'Faroe Islands',
'FJ': 'Fiji',
'FI': 'Finland',
'FR': 'France',
'GF': 'French Guiana',
'PF': 'French Polynesia',
'TF': 'French Southern Territories',
'GA': 'Gabon',
'GM': 'Gambia',
'GE': 'Georgia',
'DE': 'Germany',
'GH': 'Ghana',
'GI': 'Gibraltar',
'GR': 'Greece',
'GL': 'Greenland',
'GD': 'Grenada',
'GP': 'Guadeloupe',
'GU': 'Guam',
'GT': 'Guatemala',
'GG': 'Guernsey',
'GN': 'Guinea',
'GW': 'Guinea-Bissau',
'GY': 'Guyana',
'HT': 'Haiti',
'HM': 'Heard Island and McDonald Islands',
'VA': 'Holy See (Vatican City State)',
'HN': 'Honduras',
'HK': 'Hong Kong',
'HU': 'Hungary',
'IS': 'Iceland',
'IN': 'India',
'ID': 'Indonesia',
'IR': 'Iran, Islamic Republic of',
'IQ': 'Iraq',
'IE': 'Ireland',
'IM': 'Isle of Man',
'IL': 'Israel',
'IT': 'Italy',
'JM': 'Jamaica',
'JP': 'Japan',
'JE': 'Jersey',
'JO': 'Jordan',
'KZ': 'Kazakhstan',
'KE': 'Kenya',
'KI': 'Kiribati',
'KP': 'Korea, Democratic People\'s Republic of',
'KR': 'Korea, Republic of',
'KW': 'Kuwait',
'KG': 'Kyrgyzstan',
'LA': 'Lao People\'s Democratic Republic',
'LV': 'Latvia',
'LB': 'Lebanon',
'LS': 'Lesotho',
'LR': 'Liberia',
'LY': 'Libya',
'LI': 'Liechtenstein',
'LT': 'Lithuania',
'LU': 'Luxembourg',
'MO': 'Macao',
'MK': 'Macedonia, the Former Yugoslav Republic of',
'MG': 'Madagascar',
'MW': 'Malawi',
'MY': 'Malaysia',
'MV': 'Maldives',
'ML': 'Mali',
'MT': 'Malta',
'MH': 'Marshall Islands',
'MQ': 'Martinique',
'MR': 'Mauritania',
'MU': 'Mauritius',
'YT': 'Mayotte',
'MX': 'Mexico',
'FM': 'Micronesia, Federated States of',
'MD': 'Moldova, Republic of',
'MC': 'Monaco',
'MN': 'Mongolia',
'ME': 'Montenegro',
'MS': 'Montserrat',
'MA': 'Morocco',
'MZ': 'Mozambique',
'MM': 'Myanmar',
'NA': 'Namibia',
'NR': 'Nauru',
'NP': 'Nepal',
'NL': 'Netherlands',
'NC': 'New Caledonia',
'NZ': 'New Zealand',
'NI': 'Nicaragua',
'NE': 'Niger',
'NG': 'Nigeria',
'NU': 'Niue',
'NF': 'Norfolk Island',
'MP': 'Northern Mariana Islands',
'NO': 'Norway',
'OM': 'Oman',
'PK': 'Pakistan',
'PW': 'Palau',
'PS': 'Palestine, State of',
'PA': 'Panama',
'PG': 'Papua New Guinea',
'PY': 'Paraguay',
'PE': 'Peru',
'PH': 'Philippines',
'PN': 'Pitcairn',
'PL': 'Poland',
'PT': 'Portugal',
'PR': 'Puerto Rico',
'QA': 'Qatar',
'RE': 'Réunion',
'RO': 'Romania',
'RU': 'Russian Federation',
'RW': 'Rwanda',
'BL': 'Saint Barthélemy',
'SH': 'Saint Helena, Ascension and Tristan da Cunha',
'KN': 'Saint Kitts and Nevis',
'LC': 'Saint Lucia',
'MF': 'Saint Martin (French part)',
'PM': 'Saint Pierre and Miquelon',
'VC': 'Saint Vincent and the Grenadines',
'WS': 'Samoa',
'SM': 'San Marino',
'ST': 'Sao Tome and Principe',
'SA': 'Saudi Arabia',
'SN': 'Senegal',
'RS': 'Serbia',
'SC': 'Seychelles',
'SL': 'Sierra Leone',
'SG': 'Singapore',
'SX': 'Sint Maarten (Dutch part)',
'SK': 'Slovakia',
'SI': 'Slovenia',
'SB': 'Solomon Islands',
'SO': 'Somalia',
'ZA': 'South Africa',
'GS': 'South Georgia and the South Sandwich Islands',
'SS': 'South Sudan',
'ES': 'Spain',
'LK': 'Sri Lanka',
'SD': 'Sudan',
'SR': 'Suriname',
'SJ': 'Svalbard and Jan Mayen',
'SZ': 'Swaziland',
'SE': 'Sweden',
'CH': 'Switzerland',
'SY': 'Syrian Arab Republic',
'TW': 'Taiwan, Province of China',
'TJ': 'Tajikistan',
'TZ': 'Tanzania, United Republic of',
'TH': 'Thailand',
'TL': 'Timor-Leste',
'TG': 'Togo',
'TK': 'Tokelau',
'TO': 'Tonga',
'TT': 'Trinidad and Tobago',
'TN': 'Tunisia',
'TR': 'Turkey',
'TM': 'Turkmenistan',
'TC': 'Turks and Caicos Islands',
'TV': 'Tuvalu',
'UG': 'Uganda',
'UA': 'Ukraine',
'AE': 'United Arab Emirates',
'GB': 'United Kingdom',
'US': 'United States',
'UM': 'United States Minor Outlying Islands',
'UY': 'Uruguay',
'UZ': 'Uzbekistan',
'VU': 'Vanuatu',
'VE': 'Venezuela, Bolivarian Republic of',
'VN': 'Viet Nam',
'VG': 'Virgin Islands, British',
'VI': 'Virgin Islands, U.S.',
'WF': 'Wallis and Futuna',
'EH': 'Western Sahara',
'YE': 'Yemen',
'ZM': 'Zambia',
'ZW': 'Zimbabwe',
}
@classmethod
def short2full(cls, code):
"""Convert an ISO 3166-2 country code to the corresponding full name"""
return cls._country_map.get(code.upper())
class GeoUtils(object):
# Major IPv4 address blocks per country
_country_ip_map = {
'AD': '46.172.224.0/19',
'AE': '94.200.0.0/13',
'AF': '149.54.0.0/17',
'AG': '209.59.64.0/18',
'AI': '204.14.248.0/21',
'AL': '46.99.0.0/16',
'AM': '46.70.0.0/15',
'AO': '105.168.0.0/13',
'AP': '182.50.184.0/21',
'AQ': '23.154.160.0/24',
'AR': '181.0.0.0/12',
'AS': '202.70.112.0/20',
'AT': '77.116.0.0/14',
'AU': '1.128.0.0/11',
'AW': '181.41.0.0/18',
'AX': '185.217.4.0/22',
'AZ': '5.197.0.0/16',
'BA': '31.176.128.0/17',
'BB': '65.48.128.0/17',
'BD': '114.130.0.0/16',
'BE': '57.0.0.0/8',
'BF': '102.178.0.0/15',
'BG': '95.42.0.0/15',
'BH': '37.131.0.0/17',
'BI': '154.117.192.0/18',
'BJ': '137.255.0.0/16',
'BL': '185.212.72.0/23',
'BM': '196.12.64.0/18',
'BN': '156.31.0.0/16',
'BO': '161.56.0.0/16',
'BQ': '161.0.80.0/20',
'BR': '191.128.0.0/12',
'BS': '24.51.64.0/18',
'BT': '119.2.96.0/19',
'BW': '168.167.0.0/16',
'BY': '178.120.0.0/13',
'BZ': '179.42.192.0/18',
'CA': '99.224.0.0/11',
'CD': '41.243.0.0/16',
'CF': '197.242.176.0/21',
'CG': '160.113.0.0/16',
'CH': '85.0.0.0/13',
'CI': '102.136.0.0/14',
'CK': '202.65.32.0/19',
'CL': '152.172.0.0/14',
'CM': '102.244.0.0/14',
'CN': '36.128.0.0/10',
'CO': '181.240.0.0/12',
'CR': '201.192.0.0/12',
'CU': '152.206.0.0/15',
'CV': '165.90.96.0/19',
'CW': '190.88.128.0/17',
'CY': '31.153.0.0/16',
'CZ': '88.100.0.0/14',
'DE': '53.0.0.0/8',
'DJ': '197.241.0.0/17',
'DK': '87.48.0.0/12',
'DM': '192.243.48.0/20',
'DO': '152.166.0.0/15',
'DZ': '41.96.0.0/12',
'EC': '186.68.0.0/15',
'EE': '90.190.0.0/15',
'EG': '156.160.0.0/11',
'ER': '196.200.96.0/20',
'ES': '88.0.0.0/11',
'ET': '196.188.0.0/14',
'EU': '2.16.0.0/13',
'FI': '91.152.0.0/13',
'FJ': '144.120.0.0/16',
'FK': '80.73.208.0/21',
'FM': '119.252.112.0/20',
'FO': '88.85.32.0/19',
'FR': '90.0.0.0/9',
'GA': '41.158.0.0/15',
'GB': '25.0.0.0/8',
'GD': '74.122.88.0/21',
'GE': '31.146.0.0/16',
'GF': '161.22.64.0/18',
'GG': '62.68.160.0/19',
'GH': '154.160.0.0/12',
'GI': '95.164.0.0/16',
'GL': '88.83.0.0/19',
'GM': '160.182.0.0/15',
'GN': '197.149.192.0/18',
'GP': '104.250.0.0/19',
'GQ': '105.235.224.0/20',
'GR': '94.64.0.0/13',
'GT': '168.234.0.0/16',
'GU': '168.123.0.0/16',
'GW': '197.214.80.0/20',
'GY': '181.41.64.0/18',
'HK': '113.252.0.0/14',
'HN': '181.210.0.0/16',
'HR': '93.136.0.0/13',
'HT': '148.102.128.0/17',
'HU': '84.0.0.0/14',
'ID': '39.192.0.0/10',
'IE': '87.32.0.0/12',
'IL': '79.176.0.0/13',
'IM': '5.62.80.0/20',
'IN': '117.192.0.0/10',
'IO': '203.83.48.0/21',
'IQ': '37.236.0.0/14',
'IR': '2.176.0.0/12',
'IS': '82.221.0.0/16',
'IT': '79.0.0.0/10',
'JE': '87.244.64.0/18',
'JM': '72.27.0.0/17',
'JO': '176.29.0.0/16',
'JP': '133.0.0.0/8',
'KE': '105.48.0.0/12',
'KG': '158.181.128.0/17',
'KH': '36.37.128.0/17',
'KI': '103.25.140.0/22',
'KM': '197.255.224.0/20',
'KN': '198.167.192.0/19',
'KP': '175.45.176.0/22',
'KR': '175.192.0.0/10',
'KW': '37.36.0.0/14',
'KY': '64.96.0.0/15',
'KZ': '2.72.0.0/13',
'LA': '115.84.64.0/18',
'LB': '178.135.0.0/16',
'LC': '24.92.144.0/20',
'LI': '82.117.0.0/19',
'LK': '112.134.0.0/15',
'LR': '102.183.0.0/16',
'LS': '129.232.0.0/17',
'LT': '78.56.0.0/13',
'LU': '188.42.0.0/16',
'LV': '46.109.0.0/16',
'LY': '41.252.0.0/14',
'MA': '105.128.0.0/11',
'MC': '88.209.64.0/18',
'MD': '37.246.0.0/16',
'ME': '178.175.0.0/17',
'MF': '74.112.232.0/21',
'MG': '154.126.0.0/17',
'MH': '117.103.88.0/21',
'MK': '77.28.0.0/15',
'ML': '154.118.128.0/18',
'MM': '37.111.0.0/17',
'MN': '49.0.128.0/17',
'MO': '60.246.0.0/16',
'MP': '202.88.64.0/20',
'MQ': '109.203.224.0/19',
'MR': '41.188.64.0/18',
'MS': '208.90.112.0/22',
'MT': '46.11.0.0/16',
'MU': '105.16.0.0/12',
'MV': '27.114.128.0/18',
'MW': '102.70.0.0/15',
'MX': '187.192.0.0/11',
'MY': '175.136.0.0/13',
'MZ': '197.218.0.0/15',
'NA': '41.182.0.0/16',
'NC': '101.101.0.0/18',
'NE': '197.214.0.0/18',
'NF': '203.17.240.0/22',
'NG': '105.112.0.0/12',
'NI': '186.76.0.0/15',
'NL': '145.96.0.0/11',
'NO': '84.208.0.0/13',
'NP': '36.252.0.0/15',
'NR': '203.98.224.0/19',
'NU': '49.156.48.0/22',
'NZ': '49.224.0.0/14',
'OM': '5.36.0.0/15',
'PA': '186.72.0.0/15',
'PE': '186.160.0.0/14',
'PF': '123.50.64.0/18',
'PG': '124.240.192.0/19',
'PH': '49.144.0.0/13',
'PK': '39.32.0.0/11',
'PL': '83.0.0.0/11',
'PM': '70.36.0.0/20',
'PR': '66.50.0.0/16',
'PS': '188.161.0.0/16',
'PT': '85.240.0.0/13',
'PW': '202.124.224.0/20',
'PY': '181.120.0.0/14',
'QA': '37.210.0.0/15',
'RE': '102.35.0.0/16',
'RO': '79.112.0.0/13',
'RS': '93.86.0.0/15',
'RU': '5.136.0.0/13',
'RW': '41.186.0.0/16',
'SA': '188.48.0.0/13',
'SB': '202.1.160.0/19',
'SC': '154.192.0.0/11',
'SD': '102.120.0.0/13',
'SE': '78.64.0.0/12',
'SG': '8.128.0.0/10',
'SI': '188.196.0.0/14',
'SK': '78.98.0.0/15',
'SL': '102.143.0.0/17',
'SM': '89.186.32.0/19',
'SN': '41.82.0.0/15',
'SO': '154.115.192.0/18',
'SR': '186.179.128.0/17',
'SS': '105.235.208.0/21',
'ST': '197.159.160.0/19',
'SV': '168.243.0.0/16',
'SX': '190.102.0.0/20',
'SY': '5.0.0.0/16',
'SZ': '41.84.224.0/19',
'TC': '65.255.48.0/20',
'TD': '154.68.128.0/19',
'TG': '196.168.0.0/14',
'TH': '171.96.0.0/13',
'TJ': '85.9.128.0/18',
'TK': '27.96.24.0/21',
'TL': '180.189.160.0/20',
'TM': '95.85.96.0/19',
'TN': '197.0.0.0/11',
'TO': '175.176.144.0/21',
'TR': '78.160.0.0/11',
'TT': '186.44.0.0/15',
'TV': '202.2.96.0/19',
'TW': '120.96.0.0/11',
'TZ': '156.156.0.0/14',
'UA': '37.52.0.0/14',
'UG': '102.80.0.0/13',
'US': '6.0.0.0/8',
'UY': '167.56.0.0/13',
'UZ': '84.54.64.0/18',
'VA': '212.77.0.0/19',
'VC': '207.191.240.0/21',
'VE': '186.88.0.0/13',
'VG': '66.81.192.0/20',
'VI': '146.226.0.0/16',
'VN': '14.160.0.0/11',
'VU': '202.80.32.0/20',
'WF': '117.20.32.0/21',
'WS': '202.4.32.0/19',
'YE': '134.35.0.0/16',
'YT': '41.242.116.0/22',
'ZA': '41.0.0.0/11',
'ZM': '102.144.0.0/13',
'ZW': '102.177.192.0/18',
}
@classmethod
def random_ipv4(cls, code_or_block):
if len(code_or_block) == 2:
block = cls._country_ip_map.get(code_or_block.upper())
if not block:
return None
else:
block = code_or_block
addr, preflen = block.split('/')
addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0]
addr_max = addr_min | (0xffffffff >> int(preflen))
return compat_str(socket.inet_ntoa(
compat_struct_pack('!L', random.randint(addr_min, addr_max))))
class PerRequestProxyHandler(compat_urllib_request.ProxyHandler):
def __init__(self, proxies=None):
# Set default handlers
for type in ('http', 'https'):
setattr(self, '%s_open' % type,
lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
meth(r, proxy, type))
compat_urllib_request.ProxyHandler.__init__(self, proxies)
def proxy_open(self, req, proxy, type):
req_proxy = req.headers.get('Ytdl-request-proxy')
if req_proxy is not None:
proxy = req_proxy
del req.headers['Ytdl-request-proxy']
if proxy == '__noproxy__':
return None # No Proxy
if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
req.add_header('Ytdl-socks-proxy', proxy)
# yt-dlp's http/https handlers do wrapping the socket with socks
return None
return compat_urllib_request.ProxyHandler.proxy_open(
self, req, proxy, type)
# Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is
# released into Public Domain
# https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387
def long_to_bytes(n, blocksize=0):
"""long_to_bytes(n:long, blocksize:int) : string
Convert a long integer to a byte string.
If optional blocksize is given and greater than zero, pad the front of the
byte string with binary zeros so that the length is a multiple of
blocksize.
"""
# after much testing, this algorithm was deemed to be the fastest
s = b''
n = int(n)
while n > 0:
s = compat_struct_pack('>I', n & 0xffffffff) + s
n = n >> 32
# strip off leading zeros
for i in range(len(s)):
if s[i] != b'\000'[0]:
break
else:
# only happens when n == 0
s = b'\000'
i = 0
s = s[i:]
# add back some pad bytes. this could be done more efficiently w.r.t. the
# de-padding being done above, but sigh...
if blocksize > 0 and len(s) % blocksize:
s = (blocksize - len(s) % blocksize) * b'\000' + s
return s
def bytes_to_long(s):
"""bytes_to_long(string) : long
Convert a byte string to a long integer.
This is (essentially) the inverse of long_to_bytes().
"""
acc = 0
length = len(s)
if length % 4:
extra = (4 - length % 4)
s = b'\000' * extra + s
length = length + extra
for i in range(0, length, 4):
acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0]
return acc
def ohdave_rsa_encrypt(data, exponent, modulus):
'''
Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/
Input:
data: data to encrypt, bytes-like object
exponent, modulus: parameter e and N of RSA algorithm, both integer
Output: hex string of encrypted data
Limitation: supports one block encryption only
'''
payload = int(binascii.hexlify(data[::-1]), 16)
encrypted = pow(payload, exponent, modulus)
return '%x' % encrypted
def pkcs1pad(data, length):
"""
Padding input data with PKCS#1 scheme
@param {int[]} data input data
@param {int} length target length
@returns {int[]} padded data
"""
if len(data) > length - 11:
raise ValueError('Input data too long for PKCS#1 padding')
pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)]
return [0, 2] + pseudo_random + [0] + data
def encode_base_n(num, n, table=None):
FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
if not table:
table = FULL_TABLE[:n]
if n > len(table):
raise ValueError('base %d exceeds table length %d' % (n, len(table)))
if num == 0:
return table[0]
ret = ''
while num:
ret = table[num % n] + ret
num = num // n
return ret
def decode_packed_codes(code):
mobj = re.search(PACKED_CODES_RE, code)
obfuscated_code, base, count, symbols = mobj.groups()
base = int(base)
count = int(count)
symbols = symbols.split('|')
symbol_table = {}
while count:
count -= 1
base_n_count = encode_base_n(count, base)
symbol_table[base_n_count] = symbols[count] or base_n_count
return re.sub(
r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)],
obfuscated_code)
def caesar(s, alphabet, shift):
if shift == 0:
return s
l = len(alphabet)
return ''.join(
alphabet[(alphabet.index(c) + shift) % l] if c in alphabet else c
for c in s)
def rot47(s):
return caesar(s, r'''!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~''', 47)
def parse_m3u8_attributes(attrib):
info = {}
for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib):
if val.startswith('"'):
val = val[1:-1]
info[key] = val
return info
def urshift(val, n):
return val >> n if val >= 0 else (val + 0x100000000) >> n
# Based on png2str() written by @gdkchan and improved by @yokrysty
# Originally posted at https://github.com/ytdl-org/youtube-dl/issues/9706
def decode_png(png_data):
# Reference: https://www.w3.org/TR/PNG/
header = png_data[8:]
if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR':
raise IOError('Not a valid PNG file.')
int_map = {1: '>B', 2: '>H', 4: '>I'}
unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0]
chunks = []
while header:
length = unpack_integer(header[:4])
header = header[4:]
chunk_type = header[:4]
header = header[4:]
chunk_data = header[:length]
header = header[length:]
header = header[4:] # Skip CRC
chunks.append({
'type': chunk_type,
'length': length,
'data': chunk_data
})
ihdr = chunks[0]['data']
width = unpack_integer(ihdr[:4])
height = unpack_integer(ihdr[4:8])
idat = b''
for chunk in chunks:
if chunk['type'] == b'IDAT':
idat += chunk['data']
if not idat:
raise IOError('Unable to read PNG data.')
decompressed_data = bytearray(zlib.decompress(idat))
stride = width * 3
pixels = []
def _get_pixel(idx):
x = idx % stride
y = idx // stride
return pixels[y][x]
for y in range(height):
basePos = y * (1 + stride)
filter_type = decompressed_data[basePos]
current_row = []
pixels.append(current_row)
for x in range(stride):
color = decompressed_data[1 + basePos + x]
basex = y * stride + x
left = 0
up = 0
if x > 2:
left = _get_pixel(basex - 3)
if y > 0:
up = _get_pixel(basex - stride)
if filter_type == 1: # Sub
color = (color + left) & 0xff
elif filter_type == 2: # Up
color = (color + up) & 0xff
elif filter_type == 3: # Average
color = (color + ((left + up) >> 1)) & 0xff
elif filter_type == 4: # Paeth
a = left
b = up
c = 0
if x > 2 and y > 0:
c = _get_pixel(basex - stride - 3)
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
color = (color + a) & 0xff
elif pb <= pc:
color = (color + b) & 0xff
else:
color = (color + c) & 0xff
current_row.append(color)
return width, height, pixels
def write_xattr(path, key, value):
# This mess below finds the best xattr tool for the job
try:
# try the pyxattr module...
import xattr
if hasattr(xattr, 'set'): # pyxattr
# Unicode arguments are not supported in python-pyxattr until
# version 0.5.0
# See https://github.com/ytdl-org/youtube-dl/issues/5498
pyxattr_required_version = '0.5.0'
if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version):
# TODO: fallback to CLI tools
raise XAttrUnavailableError(
'python-pyxattr is detected but is too old. '
'yt-dlp requires %s or above while your version is %s. '
'Falling back to other xattr implementations' % (
pyxattr_required_version, xattr.__version__))
setxattr = xattr.set
else: # xattr
setxattr = xattr.setxattr
try:
setxattr(path, key, value)
except EnvironmentError as e:
raise XAttrMetadataError(e.errno, e.strerror)
except ImportError:
if compat_os_name == 'nt':
# Write xattrs to NTFS Alternate Data Streams:
# http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
assert ':' not in key
assert os.path.exists(path)
ads_fn = path + ':' + key
try:
with open(ads_fn, 'wb') as f:
f.write(value)
except EnvironmentError as e:
raise XAttrMetadataError(e.errno, e.strerror)
else:
user_has_setfattr = check_executable('setfattr', ['--version'])
user_has_xattr = check_executable('xattr', ['-h'])
if user_has_setfattr or user_has_xattr:
value = value.decode('utf-8')
if user_has_setfattr:
executable = 'setfattr'
opts = ['-n', key, '-v', value]
elif user_has_xattr:
executable = 'xattr'
opts = ['-w', key, value]
cmd = ([encodeFilename(executable, True)]
+ [encodeArgument(o) for o in opts]
+ [encodeFilename(path, True)])
try:
p = Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
except EnvironmentError as e:
raise XAttrMetadataError(e.errno, e.strerror)
stdout, stderr = p.communicate_or_kill()
stderr = stderr.decode('utf-8', 'replace')
if p.returncode != 0:
raise XAttrMetadataError(p.returncode, stderr)
else:
# On Unix, and can't find pyxattr, setfattr, or xattr.
if sys.platform.startswith('linux'):
raise XAttrUnavailableError(
"Couldn't find a tool to set the xattrs. "
"Install either the python 'pyxattr' or 'xattr' "
"modules, or the GNU 'attr' package "
"(which contains the 'setfattr' tool).")
else:
raise XAttrUnavailableError(
"Couldn't find a tool to set the xattrs. "
"Install either the python 'xattr' module, "
"or the 'xattr' binary.")
def random_birthday(year_field, month_field, day_field):
start_date = datetime.date(1950, 1, 1)
end_date = datetime.date(1995, 12, 31)
offset = random.randint(0, (end_date - start_date).days)
random_date = start_date + datetime.timedelta(offset)
return {
year_field: str(random_date.year),
month_field: str(random_date.month),
day_field: str(random_date.day),
}
# Templates for internet shortcut files, which are plain text files.
DOT_URL_LINK_TEMPLATE = '''
[InternetShortcut]
URL=%(url)s
'''.lstrip()
DOT_WEBLOC_LINK_TEMPLATE = '''
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
\t<key>URL</key>
\t<string>%(url)s</string>
</dict>
</plist>
'''.lstrip()
DOT_DESKTOP_LINK_TEMPLATE = '''
[Desktop Entry]
Encoding=UTF-8
Name=%(filename)s
Type=Link
URL=%(url)s
Icon=text-html
'''.lstrip()
LINK_TEMPLATES = {
'url': DOT_URL_LINK_TEMPLATE,
'desktop': DOT_DESKTOP_LINK_TEMPLATE,
'webloc': DOT_WEBLOC_LINK_TEMPLATE,
}
def iri_to_uri(iri):
"""
Converts an IRI (Internationalized Resource Identifier, allowing Unicode characters) to a URI (Uniform Resource Identifier, ASCII-only).
The function doesn't add an additional layer of escaping; e.g., it doesn't escape `%3C` as `%253C`. Instead, it percent-escapes characters with an underlying UTF-8 encoding *besides* those already escaped, leaving the URI intact.
"""
iri_parts = compat_urllib_parse_urlparse(iri)
if '[' in iri_parts.netloc:
raise ValueError('IPv6 URIs are not, yet, supported.')
# Querying `.netloc`, when there's only one bracket, also raises a ValueError.
# The `safe` argument values, that the following code uses, contain the characters that should not be percent-encoded. Everything else but letters, digits and '_.-' will be percent-encoded with an underlying UTF-8 encoding. Everything already percent-encoded will be left as is.
net_location = ''
if iri_parts.username:
net_location += compat_urllib_parse_quote(iri_parts.username, safe=r"!$%&'()*+,~")
if iri_parts.password is not None:
net_location += ':' + compat_urllib_parse_quote(iri_parts.password, safe=r"!$%&'()*+,~")
net_location += '@'
net_location += iri_parts.hostname.encode('idna').decode('utf-8') # Punycode for Unicode hostnames.
# The 'idna' encoding produces ASCII text.
if iri_parts.port is not None and iri_parts.port != 80:
net_location += ':' + str(iri_parts.port)
return compat_urllib_parse_urlunparse(
(iri_parts.scheme,
net_location,
compat_urllib_parse_quote_plus(iri_parts.path, safe=r"!$%&'()*+,/:;=@|~"),
# Unsure about the `safe` argument, since this is a legacy way of handling parameters.
compat_urllib_parse_quote_plus(iri_parts.params, safe=r"!$%&'()*+,/:;=@|~"),
# Not totally sure about the `safe` argument, since the source does not explicitly mention the query URI component.
compat_urllib_parse_quote_plus(iri_parts.query, safe=r"!$%&'()*+,/:;=?@{|}~"),
compat_urllib_parse_quote_plus(iri_parts.fragment, safe=r"!#$%&'()*+,/:;=?@{|}~")))
# Source for `safe` arguments: https://url.spec.whatwg.org/#percent-encoded-bytes.
def to_high_limit_path(path):
if sys.platform in ['win32', 'cygwin']:
# Work around MAX_PATH limitation on Windows. The maximum allowed length for the individual path segments may still be quite limited.
return r'\\?\ '.rstrip() + os.path.abspath(path)
return path
def format_field(obj, field=None, template='%s', ignore=(None, ''), default='', func=None):
if field is None:
val = obj if obj is not None else default
else:
val = obj.get(field, default)
if func and val not in ignore:
val = func(val)
return template % val if val not in ignore else default
def clean_podcast_url(url):
return re.sub(r'''(?x)
(?:
(?:
chtbl\.com/track|
media\.blubrry\.com| # https://create.blubrry.com/resources/podcast-media-download-statistics/getting-started/
play\.podtrac\.com
)/[^/]+|
(?:dts|www)\.podtrac\.com/(?:pts/)?redirect\.[0-9a-z]{3,4}| # http://analytics.podtrac.com/how-to-measure
flex\.acast\.com|
pd(?:
cn\.co| # https://podcorn.com/analytics-prefix/
st\.fm # https://podsights.com/docs/
)/e
)/''', '', url)
_HEX_TABLE = '0123456789abcdef'
def random_uuidv4():
return re.sub(r'[xy]', lambda x: _HEX_TABLE[random.randint(0, 15)], 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx')
def make_dir(path, to_screen=None):
try:
dn = os.path.dirname(path)
if dn and not os.path.exists(dn):
os.makedirs(dn)
return True
except (OSError, IOError) as err:
if callable(to_screen) is not None:
to_screen('unable to create directory ' + error_to_compat_str(err))
return False
def get_executable_path():
from zipimport import zipimporter
if hasattr(sys, 'frozen'): # Running from PyInstaller
path = os.path.dirname(sys.executable)
elif isinstance(globals().get('__loader__'), zipimporter): # Running from ZIP
path = os.path.join(os.path.dirname(__file__), '../..')
else:
path = os.path.join(os.path.dirname(__file__), '..')
return os.path.abspath(path)
def load_plugins(name, suffix, namespace):
classes = {}
try:
plugins_spec = importlib.util.spec_from_file_location(
name, os.path.join(get_executable_path(), 'ytdlp_plugins', name, '__init__.py'))
plugins = importlib.util.module_from_spec(plugins_spec)
sys.modules[plugins_spec.name] = plugins
plugins_spec.loader.exec_module(plugins)
for name in dir(plugins):
if name in namespace:
continue
if not name.endswith(suffix):
continue
klass = getattr(plugins, name)
classes[name] = namespace[name] = klass
except FileNotFoundError:
pass
return classes
def traverse_obj(
obj, *path_list, default=None, expected_type=None, get_all=True,
casesense=True, is_user_input=False, traverse_string=False):
''' Traverse nested list/dict/tuple
@param path_list A list of paths which are checked one by one.
Each path is a list of keys where each key is a string,
a function, a tuple of strings/None or "...".
When a fuction is given, it takes the key as argument and
returns whether the key matches or not. When a tuple is given,
all the keys given in the tuple are traversed, and
"..." traverses all the keys in the object
"None" returns the object without traversal
@param default Default value to return
@param expected_type Only accept final value of this type (Can also be any callable)
@param get_all Return all the values obtained from a path or only the first one
@param casesense Whether to consider dictionary keys as case sensitive
@param is_user_input Whether the keys are generated from user input. If True,
strings are converted to int/slice if necessary
@param traverse_string Whether to traverse inside strings. If True, any
non-compatible object will also be converted into a string
# TODO: Write tests
'''
if not casesense:
_lower = lambda k: (k.lower() if isinstance(k, str) else k)
path_list = (map(_lower, variadic(path)) for path in path_list)
def _traverse_obj(obj, path, _current_depth=0):
nonlocal depth
path = tuple(variadic(path))
for i, key in enumerate(path):
if None in (key, obj):
return obj
if isinstance(key, (list, tuple)):
obj = [_traverse_obj(obj, sub_key, _current_depth) for sub_key in key]
key = ...
if key is ...:
obj = (obj.values() if isinstance(obj, dict)
else obj if isinstance(obj, (list, tuple, LazyList))
else str(obj) if traverse_string else [])
_current_depth += 1
depth = max(depth, _current_depth)
return [_traverse_obj(inner_obj, path[i + 1:], _current_depth) for inner_obj in obj]
elif callable(key):
if isinstance(obj, (list, tuple, LazyList)):
obj = enumerate(obj)
elif isinstance(obj, dict):
obj = obj.items()
else:
if not traverse_string:
return None
obj = str(obj)
_current_depth += 1
depth = max(depth, _current_depth)
return [_traverse_obj(v, path[i + 1:], _current_depth) for k, v in obj if key(k)]
elif isinstance(obj, dict) and not (is_user_input and key == ':'):
obj = (obj.get(key) if casesense or (key in obj)
else next((v for k, v in obj.items() if _lower(k) == key), None))
else:
if is_user_input:
key = (int_or_none(key) if ':' not in key
else slice(*map(int_or_none, key.split(':'))))
if key == slice(None):
return _traverse_obj(obj, (..., *path[i + 1:]), _current_depth)
if not isinstance(key, (int, slice)):
return None
if not isinstance(obj, (list, tuple, LazyList)):
if not traverse_string:
return None
obj = str(obj)
try:
obj = obj[key]
except IndexError:
return None
return obj
if isinstance(expected_type, type):
type_test = lambda val: val if isinstance(val, expected_type) else None
elif expected_type is not None:
type_test = expected_type
else:
type_test = lambda val: val
for path in path_list:
depth = 0
val = _traverse_obj(obj, path)
if val is not None:
if depth:
for _ in range(depth - 1):
val = itertools.chain.from_iterable(v for v in val if v is not None)
val = [v for v in map(type_test, val) if v is not None]
if val:
return val if get_all else val[0]
else:
val = type_test(val)
if val is not None:
return val
return default
def traverse_dict(dictn, keys, casesense=True):
write_string('DeprecationWarning: yt_dlp.utils.traverse_dict is deprecated '
'and may be removed in a future version. Use yt_dlp.utils.traverse_obj instead')
return traverse_obj(dictn, keys, casesense=casesense, is_user_input=True, traverse_string=True)
def variadic(x, allowed_types=(str, bytes, dict)):
return x if isinstance(x, collections.abc.Iterable) and not isinstance(x, allowed_types) else (x,)
# create a JSON Web Signature (jws) with HS256 algorithm
# the resulting format is in JWS Compact Serialization
# implemented following JWT https://www.rfc-editor.org/rfc/rfc7519.html
# implemented following JWS https://www.rfc-editor.org/rfc/rfc7515.html
def jwt_encode_hs256(payload_data, key, headers={}):
header_data = {
'alg': 'HS256',
'typ': 'JWT',
}
if headers:
header_data.update(headers)
header_b64 = base64.b64encode(json.dumps(header_data).encode('utf-8'))
payload_b64 = base64.b64encode(json.dumps(payload_data).encode('utf-8'))
h = hmac.new(key.encode('utf-8'), header_b64 + b'.' + payload_b64, hashlib.sha256)
signature_b64 = base64.b64encode(h.digest())
token = header_b64 + b'.' + payload_b64 + b'.' + signature_b64
return token
# can be extended in future to verify the signature and parse header and return the algorithm used if it's not HS256
def jwt_decode_hs256(jwt):
header_b64, payload_b64, signature_b64 = jwt.split('.')
payload_data = json.loads(base64.urlsafe_b64decode(payload_b64))
return payload_data
def supports_terminal_sequences(stream):
if compat_os_name == 'nt':
from .compat import WINDOWS_VT_MODE # Must be imported locally
if not WINDOWS_VT_MODE or get_windows_version() < (10, 0, 10586):
return False
elif not os.getenv('TERM'):
return False
try:
return stream.isatty()
except BaseException:
return False
_terminal_sequences_re = re.compile('\033\\[[^m]+m')
def remove_terminal_sequences(string):
return _terminal_sequences_re.sub('', string)
def number_of_digits(number):
return len('%d' % number)
def join_nonempty(*values, delim='-', from_dict=None):
if from_dict is not None:
values = map(from_dict.get, values)
return delim.join(map(str, filter(None, values)))
class Config:
own_args = None
filename = None
__initialized = False
def __init__(self, parser, label=None):
self._parser, self.label = parser, label
self._loaded_paths, self.configs = set(), []
def init(self, args=None, filename=None):
assert not self.__initialized
if filename:
location = os.path.realpath(filename)
if location in self._loaded_paths:
return False
self._loaded_paths.add(location)
self.__initialized = True
self.own_args, self.filename = args, filename
for location in self._parser.parse_args(args)[0].config_locations or []:
location = compat_expanduser(location)
if os.path.isdir(location):
location = os.path.join(location, 'yt-dlp.conf')
if not os.path.exists(location):
self._parser.error(f'config location {location} does not exist')
self.append_config(self.read_file(location), location)
return True
def __str__(self):
label = join_nonempty(
self.label, 'config', f'"{self.filename}"' if self.filename else '',
delim=' ')
return join_nonempty(
self.own_args is not None and f'{label[0].upper()}{label[1:]}: {self.hide_login_info(self.own_args)}',
*(f'\n{c}'.replace('\n', '\n| ')[1:] for c in self.configs),
delim='\n')
@staticmethod
def read_file(filename, default=[]):
try:
optionf = open(filename)
except IOError:
return default # silently skip if file is not present
try:
# FIXME: https://github.com/ytdl-org/youtube-dl/commit/dfe5fa49aed02cf36ba9f743b11b0903554b5e56
contents = optionf.read()
if sys.version_info < (3,):
contents = contents.decode(preferredencoding())
res = compat_shlex_split(contents, comments=True)
finally:
optionf.close()
return res
@staticmethod
def hide_login_info(opts):
PRIVATE_OPTS = set(['-p', '--password', '-u', '--username', '--video-password', '--ap-password', '--ap-username'])
eqre = re.compile('^(?P<key>' + ('|'.join(re.escape(po) for po in PRIVATE_OPTS)) + ')=.+$')
def _scrub_eq(o):
m = eqre.match(o)
if m:
return m.group('key') + '=PRIVATE'
else:
return o
opts = list(map(_scrub_eq, opts))
for idx, opt in enumerate(opts):
if opt in PRIVATE_OPTS and idx + 1 < len(opts):
opts[idx + 1] = 'PRIVATE'
return opts
def append_config(self, *args, label=None):
config = type(self)(self._parser, label)
config._loaded_paths = self._loaded_paths
if config.init(*args):
self.configs.append(config)
@property
def all_args(self):
for config in reversed(self.configs):
yield from config.all_args
yield from self.own_args or []
def parse_args(self):
return self._parser.parse_args(list(self.all_args))
def block_exceptions(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
pass
return wrapper
|
[] |
[] |
[
"TERM"
] |
[]
|
["TERM"]
|
python
| 1 | 0 | |
cli/hzn.go
|
// Command line interface to the horizon agent. Provide sub-commands to register an edge node, display info about the node, etc.
package main
import (
"flag"
"github.com/open-horizon/anax/cli/agreement"
"github.com/open-horizon/anax/cli/agreementbot"
"github.com/open-horizon/anax/cli/attribute"
"github.com/open-horizon/anax/cli/cliconfig"
"github.com/open-horizon/anax/cli/cliutils"
"github.com/open-horizon/anax/cli/deploycheck"
"github.com/open-horizon/anax/cli/dev"
"github.com/open-horizon/anax/cli/eventlog"
"github.com/open-horizon/anax/cli/exchange"
_ "github.com/open-horizon/anax/cli/i18n_messages"
"github.com/open-horizon/anax/cli/key"
"github.com/open-horizon/anax/cli/kube_deployment"
"github.com/open-horizon/anax/cli/metering"
_ "github.com/open-horizon/anax/cli/native_deployment"
"github.com/open-horizon/anax/cli/node"
"github.com/open-horizon/anax/cli/policy"
"github.com/open-horizon/anax/cli/register"
"github.com/open-horizon/anax/cli/sdo"
secret_manager "github.com/open-horizon/anax/cli/secrets_manager"
"github.com/open-horizon/anax/cli/service"
"github.com/open-horizon/anax/cli/status"
"github.com/open-horizon/anax/cli/sync_service"
"github.com/open-horizon/anax/cli/unregister"
"github.com/open-horizon/anax/cli/userinput"
"github.com/open-horizon/anax/cli/utilcmds"
"github.com/open-horizon/anax/cutil"
"github.com/open-horizon/anax/i18n"
"github.com/open-horizon/anax/version"
"gopkg.in/alecthomas/kingpin.v2"
"os"
"runtime"
"strings"
)
func main() {
// Shut off the Anax runtime logging, so functions reused from anax don't fight with the kingpin parsing of args/flags.
// Also, in the reused code need to change any calls like glog.Infof("some string") to glog.V(3).Infof("some string")
flag.Set("v", "0")
// initialize the message printer for globalization for the cliconfig.SetEnvVarsFromConfigFiles("") call
if err := i18n.InitMessagePrinter(false); err != nil {
cliutils.Verbose("%v. The messages will be displayed in English.", err)
i18n.InitMessagePrinter(true)
}
// set up environment variables from the cli package configuration file and user configuration file.
cliconfig.SetEnvVarsFromConfigFiles("")
// initialize the message printer for globalization again because HZN_LANG could have changed from the above call.
if err := i18n.InitMessagePrinter(false); err != nil {
cliutils.Verbose("%v. The messages will be displayed in English.", err)
i18n.InitMessagePrinter(true)
}
// get message printer
msgPrinter := i18n.GetMessagePrinter()
// the sample file direcory is different between Liunx and mac
sample_dir := "/usr/horizon/samples"
if runtime.GOOS == "darwin" {
sample_dir = "/Users/Shared/horizon-cli/samples"
}
// Command flags and args - see https://github.com/alecthomas/kingpin
app := kingpin.New("hzn", msgPrinter.Sprintf(`Command line interface for Horizon agent. Most of the sub-commands use the Horizon Agent API at the default location http://localhost (see environment Environment Variables section to override this).
Subcommands Description:
agbot: List and manage Horizon agreement bot resources.
agreement: List or manage the active or archived agreements this edge node has made with a Horizon agreement bot.
architecture: Show the architecture of this machine (as defined by Horizon and golang).
attribute: List or manage the global attributes that are currently registered on this Horizon edge node.
deploycheck: Check deployment compatibility.
dev: Development tools for creation of services.
env: Show the Horizon Environment Variables.
eventlog: List the event logs for the current or all registrations.
exchange: List and manage Horizon Exchange resources.
key: List and manage keys for signing and verifying services.
metering: List or manage the metering (payment) information for the active or archived agreements.
mms: List and manage Horizon Model Management Service resources.
node: List and manage general information about this Horizon edge node.
policy: List and manage policy for this Horizon edge node.
reginput: Create an input file template for this pattern that can be used for the 'hzn register' command (once filled in).
This examines the services that the specified pattern uses, and determines the node owner input that is required for them.
register: Register this edge node with the management hub.
service: List or manage the services that are currently registered on this Horizon edge node.
status: Display the current horizon internal status for the node.
unregister: Unregister and reset this Horizon edge node so that it is ready to be registered again.
userinput: List or manager the service user inputs that are currently registered on this Horizon edge node.
util: Utility commands.
version: Show the Horizon version.
sdo: List and manage Horizon SDO ownership vouchers and keys.
Environment Variables:
HORIZON_URL: Override the URL at which hzn contacts the Horizon Agent API.
This can facilitate using a remote Horizon Agent via an ssh tunnel.
HZN_EXCHANGE_URL: Override the URL that the 'hzn exchange' sub-commands use
to communicate with the Horizon Exchange, for example
https://exchange.bluehorizon.network/api/v1. (By default hzn will ask the
Horizon Agent for the URL.)
HZN_ORG_ID: Default value for the 'hzn exchange -o' flag,
to specify the organization ID'.
HZN_EXCHANGE_USER_AUTH: Default value for the 'hzn exchange -u' or 'hzn
register -u' flag, in the form '[org/]user:pw'. Notice that HZN_ORG_ID can be set
if org is omitted when HZN_EXCHANGE_USER_AUTH is set.
HZN_FSS_CSSURL: Override the URL that the 'hzn mms' sub-commands use
to communicate with the Horizon Model Management Service, for example
https://exchange.bluehorizon.network/css/. (By default hzn will ask the
Horizon Agent for the URL.)
HZN_SDO_SVC_URL: Override the URL that the 'hzn sdo' sub-commands use
to communicate with SDO owner services. (By default hzn will ask the
Horizon Agent for the URL.)
All these environment variables and ones mentioned in the command help can be
specified in user's configuration file: ~/.hzn/hzn.json with JSON format.
For example:
%s
`, `{
"HZN_ORG_ID": "[email protected]"
}
`))
app.HelpFlag.Short('h')
app.UsageTemplate(kingpin.CompactUsageTemplate)
cliutils.Opts.Verbose = app.Flag("verbose", msgPrinter.Sprintf("Verbose output.")).Short('v').Bool()
cliutils.Opts.IsDryRun = app.Flag("dry-run", msgPrinter.Sprintf("When calling the Horizon or Exchange API, do GETs, but don't do PUTs, POSTs, or DELETEs.")).Bool()
agbotCmd := app.Command("agbot", msgPrinter.Sprintf("List and manage Horizon agreement bot resources."))
agbotAgreementCmd := agbotCmd.Command("agreement | ag", msgPrinter.Sprintf("List or manage the active or archived agreements this Horizon agreement bot has with edge nodes.")).Alias("ag").Alias("agreement")
agbotAgreementCancelCmd := agbotAgreementCmd.Command("cancel | can", msgPrinter.Sprintf("Cancel 1 or all of the active agreements this Horizon agreement bot has with edge nodes. Usually an agbot will immediately negotiated a new agreement. ")).Alias("can").Alias("cancel")
agbotCancelAllAgreements := agbotAgreementCancelCmd.Flag("all", msgPrinter.Sprintf("Cancel all of the current agreements.")).Short('a').Bool()
agbotCancelAgreementId := agbotAgreementCancelCmd.Arg("agreement", msgPrinter.Sprintf("The active agreement to cancel.")).String()
agbotAgreementListCmd := agbotAgreementCmd.Command("list | ls", msgPrinter.Sprintf("List the active or archived agreements this Horizon agreement bot has with edge nodes.")).Alias("ls").Alias("list")
agbotlistArchivedAgreements := agbotAgreementListCmd.Flag("archived", msgPrinter.Sprintf("List archived agreements instead of the active agreements.")).Short('r').Bool()
agbotAgreement := agbotAgreementListCmd.Arg("agreement-id", msgPrinter.Sprintf("Show the details of this active or archived agreement.")).String()
agbotCacheCmd := agbotCmd.Command("cache", msgPrinter.Sprintf("Manage cached agbot-serving organizations, patterns, and deployment policies."))
agbotCacheDeployPol := agbotCacheCmd.Command("deploymentpol | dep", msgPrinter.Sprintf("List served deployment policies cached in the agbot.")).Alias("dep").Alias("deploymentpol")
agbotCacheDeployPolList := agbotCacheDeployPol.Command("list | ls", msgPrinter.Sprintf("Display served deployment policies cached in the agbot.")).Alias("ls").Alias("list")
agbotCacheDeployPolListOrg := agbotCacheDeployPolList.Flag("org", msgPrinter.Sprintf("Display policies under this org.")).Short('o').String()
agbotCacheDeployPolListName := agbotCacheDeployPolList.Arg("name", msgPrinter.Sprintf("Display this policy.")).String()
agbotCacheDeployPolListLong := agbotCacheDeployPolList.Flag("long", msgPrinter.Sprintf("Display detailed info.")).Short('l').Bool()
agbotCachePattern := agbotCacheCmd.Command("pattern | pat", msgPrinter.Sprintf("List patterns cached in the agbot.")).Alias("pat").Alias("pattern")
agbotCachePatternList := agbotCachePattern.Command("list | ls", msgPrinter.Sprintf("Display served patterns cached in the agbot.")).Alias("ls").Alias("list")
agbotCachePatternListOrg := agbotCachePatternList.Flag("org", msgPrinter.Sprintf("Display patterns under this org.")).Short('o').String()
agbotCachePatternListName := agbotCachePatternList.Arg("name", msgPrinter.Sprintf("Display this pattern.")).String()
agbotCachePatternListLong := agbotCachePatternList.Flag("long", msgPrinter.Sprintf("Display detailed info.")).Short('l').Bool()
agbotCacheServedOrg := agbotCacheCmd.Command("servedorg | sorg", msgPrinter.Sprintf("List served pattern orgs and deployment policy orgs.")).Alias("sorg").Alias("servedorg")
agbotCacheServedOrgList := agbotCacheServedOrg.Command("list | ls", msgPrinter.Sprintf("Display served pattern orgs and deployment policy orgs.")).Alias("ls").Alias("list")
agbotListCmd := agbotCmd.Command("list | ls", msgPrinter.Sprintf("Display general information about this Horizon agbot node.")).Alias("ls").Alias("list")
agbotPolicyCmd := agbotCmd.Command("policy | pol", msgPrinter.Sprintf("List the policies this Horizon agreement bot hosts.")).Alias("pol").Alias("policy")
agbotPolicyListCmd := agbotPolicyCmd.Command("list | ls", msgPrinter.Sprintf("List policies this Horizon agreement bot hosts.")).Alias("ls").Alias("list")
agbotPolicyOrg := agbotPolicyListCmd.Arg("org", msgPrinter.Sprintf("The organization the policy belongs to.")).String()
agbotPolicyName := agbotPolicyListCmd.Arg("name", msgPrinter.Sprintf("The policy name.")).String()
agbotStatusCmd := agbotCmd.Command("status", msgPrinter.Sprintf("Display the current horizon internal status for the Horizon agreement bot."))
agbotStatusLong := agbotStatusCmd.Flag("long", msgPrinter.Sprintf("Show detailed status")).Short('l').Bool()
agreementCmd := app.Command("agreement | ag", msgPrinter.Sprintf("List or manage the active or archived agreements this edge node has made with a Horizon agreement bot.")).Alias("ag").Alias("agreement")
agreementListCmd := agreementCmd.Command("list | ls", msgPrinter.Sprintf("List the active or archived agreements this edge node has made with a Horizon agreement bot.")).Alias("ls").Alias("list")
listAgreementId := agreementListCmd.Arg("agreement-id", msgPrinter.Sprintf("Show the details of this active or archived agreement.")).String()
listArchivedAgreements := agreementListCmd.Flag("archived", msgPrinter.Sprintf("List archived agreements instead of the active agreements.")).Short('r').Bool()
agreementCancelCmd := agreementCmd.Command("cancel | can", msgPrinter.Sprintf("Cancel 1 or all of the active agreements this edge node has made with a Horizon agreement bot. Usually an agbot will immediately negotiated a new agreement. If you want to cancel all agreements and not have this edge accept new agreements, run 'hzn unregister'.")).Alias("can").Alias("cancel")
cancelAllAgreements := agreementCancelCmd.Flag("all", msgPrinter.Sprintf("Cancel all of the current agreements.")).Short('a').Bool()
cancelAgreementId := agreementCancelCmd.Arg("agreement-id", msgPrinter.Sprintf("The active agreement to cancel.")).String()
archCmd := app.Command("architecture", msgPrinter.Sprintf("Show the architecture of this machine (as defined by Horizon and golang)."))
attributeCmd := app.Command("attribute | attr", msgPrinter.Sprintf("List or manage the global attributes that are currently registered on this Horizon edge node.")).Alias("attr").Alias("attribute")
attributeListCmd := attributeCmd.Command("list | ls", msgPrinter.Sprintf("List the global attributes that are currently registered on this Horizon edge node.")).Alias("ls").Alias("list")
deploycheckCmd := app.Command("deploycheck | dc", msgPrinter.Sprintf("Check deployment compatibility.")).Alias("dc").Alias("deploycheck")
deploycheckOrg := deploycheckCmd.Flag("org", msgPrinter.Sprintf("The Horizon exchange organization ID. If not specified, HZN_ORG_ID will be used as a default.")).Short('o').String()
deploycheckUserPw := deploycheckCmd.Flag("user-pw", msgPrinter.Sprintf("Horizon exchange user credential to query exchange resources. If not specified, HZN_EXCHANGE_USER_AUTH or HZN_EXCHANGE_NODE_AUTH will be used as a default. If you don't prepend it with the organization id, it will automatically be prepended with the -o value.")).Short('u').PlaceHolder("USER:PW").String()
deploycheckCheckAll := deploycheckCmd.Flag("check-all", msgPrinter.Sprintf("Show the compatibility status of all the service versions referenced in the deployment policy.")).Short('c').Bool()
deploycheckLong := deploycheckCmd.Flag("long", msgPrinter.Sprintf("Show policies and userinput used for the compatibility checking.")).Short('l').Bool()
allCompCmd := deploycheckCmd.Command("all", msgPrinter.Sprintf("Check all compatibilities for a deployment."))
allCompNodeArch := allCompCmd.Flag("arch", msgPrinter.Sprintf("The architecture of the node. It is required when -n is not specified. If omitted, the service of all the architectures referenced in the deployment policy or pattern will be checked for compatibility.")).Short('a').String()
allCompNodeType := allCompCmd.Flag("node-type", msgPrinter.Sprintf("The node type. The valid values are 'device' and 'cluster'. The default value is the type of the node provided by -n or current registered device, if omitted.")).Short('t').String()
allCompNodeOrg := allCompCmd.Flag("node-org", msgPrinter.Sprintf("The organization of the node. The default value is the organization of the node provided by -n or current registered device, if omitted.")).Short('O').String()
allCompNodeId := allCompCmd.Flag("node-id", msgPrinter.Sprintf("The Horizon exchange node ID. Mutually exclusive with --node-pol and --node-ui. If omitted, the node ID that the current device is registered with will be used. If you don't prepend it with the organization id, it will automatically be prepended with the -o value.")).Short('n').String()
allCompNodePolFile := allCompCmd.Flag("node-pol", msgPrinter.Sprintf("The JSON input file name containing the node policy. Mutually exclusive with -n, -p and -P.")).String()
allCompNodeUIFile := allCompCmd.Flag("node-ui", msgPrinter.Sprintf("The JSON input file name containing the node user input. Mutually exclusive with -n.")).String()
allCompBPolId := allCompCmd.Flag("business-pol-id", "").Hidden().String()
allCompDepPolId := allCompCmd.Flag("deployment-pol-id", msgPrinter.Sprintf("The Horizon exchange deployment policy ID. Mutually exclusive with -B, -p and -P. If you don't prepend it with the organization id, it will automatically be prepended with the node's organization id.")).Short('b').String()
allCompBPolFile := allCompCmd.Flag("business-pol", "").Hidden().String()
allCompDepPolFile := allCompCmd.Flag("deployment-pol", msgPrinter.Sprintf("The JSON input file name containing the deployment policy. Mutually exclusive with -b, -p and -P.")).Short('B').String()
allCompSPolFile := allCompCmd.Flag("service-pol", msgPrinter.Sprintf("(optional) The JSON input file name containing the service policy. Mutually exclusive with -p and -P. If omitted, the service policy will be retrieved from the Exchange for the service defined in the deployment policy.")).String()
allCompSvcFile := allCompCmd.Flag("service", msgPrinter.Sprintf("(optional) The JSON input file name containing the service definition. If omitted, the service defined in the deployment policy or pattern will be retrieved from the Exchange. This flag can be repeated to specify different versions of the service.")).Strings()
allCompPatternId := allCompCmd.Flag("pattern-id", msgPrinter.Sprintf("The Horizon exchange pattern ID. Mutually exclusive with -P, -b, -B --node-pol and --service-pol. If you don't prepend it with the organization id, it will automatically be prepended with the node's organization id.")).Short('p').String()
allCompPatternFile := allCompCmd.Flag("pattern", msgPrinter.Sprintf("The JSON input file name containing the pattern. Mutually exclusive with -p, -b and -B, --node-pol and --service-pol.")).Short('P').String()
policyCompCmd := deploycheckCmd.Command("policy | pol", msgPrinter.Sprintf("Check policy compatibility.")).Alias("pol").Alias("policy")
policyCompNodeArch := policyCompCmd.Flag("arch", msgPrinter.Sprintf("The architecture of the node. It is required when -n is not specified. If omitted, the service of all the architectures referenced in the deployment policy will be checked for compatibility.")).Short('a').String()
policyCompNodeType := policyCompCmd.Flag("node-type", msgPrinter.Sprintf("The node type. The valid values are 'device' and 'cluster'. The default value is the type of the node provided by -n or current registered device, if omitted.")).Short('t').String()
policyCompNodeId := policyCompCmd.Flag("node-id", msgPrinter.Sprintf("The Horizon exchange node ID. Mutually exclusive with --node-pol. If omitted, the node ID that the current device is registered with will be used. If you don't prepend it with the organization id, it will automatically be prepended with the -o value.")).Short('n').String()
policyCompNodePolFile := policyCompCmd.Flag("node-pol", msgPrinter.Sprintf("The JSON input file name containing the node policy. Mutually exclusive with -n.")).String()
policyCompBPolId := policyCompCmd.Flag("business-pol-id", "").Hidden().String()
policyCompDepPolId := policyCompCmd.Flag("deployment-pol-id", msgPrinter.Sprintf("The Horizon exchange deployment policy ID. Mutually exclusive with -B. If you don't prepend it with the organization id, it will automatically be prepended with the node's organization id.")).Short('b').String()
policyCompBPolFile := policyCompCmd.Flag("business-pol", "").Hidden().String()
policyCompDepPolFile := policyCompCmd.Flag("deployment-pol", msgPrinter.Sprintf("The JSON input file name containing the Deployment policy. Mutually exclusive with -b.")).Short('B').String()
policyCompSPolFile := policyCompCmd.Flag("service-pol", msgPrinter.Sprintf("(optional) The JSON input file name containing the service policy. If omitted, the service policy will be retrieved from the Exchange for the service defined in the deployment policy.")).String()
policyCompSvcFile := policyCompCmd.Flag("service", msgPrinter.Sprintf("(optional) The JSON input file name containing the service definition. Mutually exclusive with -b. If omitted, the service referenced in the deployment policy is retrieved from the Exchange. This flag can be repeated to specify different versions of the service.")).Strings()
secretCompCmd := deploycheckCmd.Command("secretbinding | sb", msgPrinter.Sprintf("Check secret bindings.")).Alias("sb").Alias("secretbinding")
secretCompNodeArch := secretCompCmd.Flag("arch", msgPrinter.Sprintf("The architecture of the node. It is required when -n is not specified. If omitted, the service of all the architectures referenced in the deployment policy or pattern will be checked for compatibility.")).Short('a').String()
secretCompNodeOrg := secretCompCmd.Flag("node-org", msgPrinter.Sprintf("The organization of the node. The default value is the organization of the node provided by -n or current registered device, if omitted.")).Short('O').String()
secretCompNodeType := secretCompCmd.Flag("node-type", msgPrinter.Sprintf("The node type. The valid values are 'device' and 'cluster'. The default value is the type of the node provided by -n or current registered device, if omitted.")).Short('t').String()
secretCompNodeId := secretCompCmd.Flag("node-id", msgPrinter.Sprintf("The Horizon exchange node ID. If omitted, the node ID that the current device is registered with will be used. If you don't prepend it with the organization id, it will automatically be prepended with the -o value.")).Short('n').String()
secretCompDepPolId := secretCompCmd.Flag("deployment-pol-id", msgPrinter.Sprintf("The Horizon exchange deployment policy ID. Mutually exclusive with -B, -p and -P. If you don't prepend it with the organization id, it will automatically be prepended with the node's organization id.")).Short('b').String()
secretCompDepPolFile := secretCompCmd.Flag("deployment-pol", msgPrinter.Sprintf("The JSON input file name containing the deployment policy. Mutually exclusive with -b, -p and -P.")).Short('B').String()
secretCompSvcFile := secretCompCmd.Flag("service", msgPrinter.Sprintf("(optional) The JSON input file name containing the service definition. If omitted, the service defined in the deployment policy or pattern will be retrieved from the Exchange. This flag can be repeated to specify different versions of the service.")).Strings()
secretCompPatternId := secretCompCmd.Flag("pattern-id", msgPrinter.Sprintf("The Horizon exchange pattern ID. Mutually exclusive with -P, -b and -B. If you don't prepend it with the organization id, it will automatically be prepended with the node's organization id.")).Short('p').String()
secretCompPatternFile := secretCompCmd.Flag("pattern", msgPrinter.Sprintf("The JSON input file name containing the pattern. Mutually exclusive with -p, -b and -B.")).Short('P').String()
userinputCompCmd := deploycheckCmd.Command("userinput | u", msgPrinter.Sprintf("Check user input compatibility.")).Alias("u").Alias("userinput")
userinputCompNodeArch := userinputCompCmd.Flag("arch", msgPrinter.Sprintf("The architecture of the node. It is required when -n is not specified. If omitted, the service of all the architectures referenced in the deployment policy or pattern will be checked for compatibility.")).Short('a').String()
userinputCompNodeType := userinputCompCmd.Flag("node-type", msgPrinter.Sprintf("The node type. The valid values are 'device' and 'cluster'. The default value is the type of the node provided by -n or current registered device, if omitted.")).Short('t').String()
userinputCompNodeId := userinputCompCmd.Flag("node-id", msgPrinter.Sprintf("The Horizon exchange node ID. Mutually exclusive with --node-ui. If omitted, the node ID that the current device is registered with will be used. If you don't prepend it with the organization id, it will automatically be prepended with the -o value.")).Short('n').String()
userinputCompNodeUIFile := userinputCompCmd.Flag("node-ui", msgPrinter.Sprintf("The JSON input file name containing the node user input. Mutually exclusive with -n.")).String()
userinputCompBPolId := userinputCompCmd.Flag("business-pol-id", "").Hidden().String()
userinputCompDepPolId := userinputCompCmd.Flag("deployment-pol-id", msgPrinter.Sprintf("The Horizon exchange deployment policy ID. Mutually exclusive with -B, -p and -P. If you don't prepend it with the organization id, it will automatically be prepended with the node's organization id.")).Short('b').String()
userinputCompBPolFile := userinputCompCmd.Flag("business-pol", "").Hidden().String()
userinputCompDepPolFile := userinputCompCmd.Flag("deployment-pol", msgPrinter.Sprintf("The JSON input file name containing the deployment policy. Mutually exclusive with -b, -p and -P.")).Short('B').String()
userinputCompSvcFile := userinputCompCmd.Flag("service", msgPrinter.Sprintf("(optional) The JSON input file name containing the service definition. If omitted, the service defined in the deployment policy or pattern will be retrieved from the Exchange. This flag can be repeated to specify different versions of the service.")).Strings()
userinputCompPatternId := userinputCompCmd.Flag("pattern-id", msgPrinter.Sprintf("The Horizon exchange pattern ID. Mutually exclusive with -P, -b and -B. If you don't prepend it with the organization id, it will automatically be prepended with the node's organization id.")).Short('p').String()
userinputCompPatternFile := userinputCompCmd.Flag("pattern", msgPrinter.Sprintf("The JSON input file name containing the pattern. Mutually exclusive with -p, -b and -B.")).Short('P').String()
devCmd := app.Command("dev", msgPrinter.Sprintf("Development tools for creation of services."))
devHomeDirectory := devCmd.Flag("directory", msgPrinter.Sprintf("Directory containing Horizon project metadata. If omitted, a subdirectory called 'horizon' under current directory will be used.")).Short('d').String()
devDependencyCmd := devCmd.Command("dependency | dep", msgPrinter.Sprintf("For working with project dependencies.")).Alias("dep").Alias("dependency")
devDependencyCmdSpecRef := devDependencyCmd.Flag("specRef", msgPrinter.Sprintf("The URL of the service dependency in the Exchange. Mutually exclusive with -p and --url.")).Short('s').String()
devDependencyCmdURL := devDependencyCmd.Flag("url", msgPrinter.Sprintf("The URL of the service dependency in the Exchange. Mutually exclusive with -p and --specRef.")).String()
devDependencyCmdOrg := devDependencyCmd.Flag("org", msgPrinter.Sprintf("The Org of the service dependency in the Exchange. Mutually exclusive with -p.")).Short('o').String()
devDependencyCmdVersion := devDependencyCmd.Flag("ver", msgPrinter.Sprintf("(optional) The Version of the service dependency in the Exchange. Mutually exclusive with -p.")).String()
devDependencyCmdArch := devDependencyCmd.Flag("arch", msgPrinter.Sprintf("(optional) The hardware Architecture of the service dependency in the Exchange. Mutually exclusive with -p.")).Short('a').String()
devDependencyFetchCmd := devDependencyCmd.Command("fetch | f", msgPrinter.Sprintf("Retrieving Horizon metadata for a new dependency.")).Alias("f").Alias("fetch")
devDependencyFetchCmdProject := devDependencyFetchCmd.Flag("project", msgPrinter.Sprintf("Horizon project containing the definition of a dependency. Mutually exclusive with -s -o --ver -a and --url.")).Short('p').ExistingDir()
devDependencyFetchCmdUserPw := devDependencyFetchCmd.Flag("user-pw", msgPrinter.Sprintf("Horizon Exchange user credentials to query exchange resources. The default is HZN_EXCHANGE_USER_AUTH environment variable. If you don't prepend it with the user's org, it will automatically be prepended with the value of the HZN_ORG_ID environment variable.")).Short('u').PlaceHolder("USER:PW").String()
devDependencyFetchCmdUserInputFile := devDependencyFetchCmd.Flag("userInputFile", msgPrinter.Sprintf("File containing user input values for configuring the new dependency. If omitted, the userinput file in the dependency project will be used.")).Short('f').ExistingFile()
devDependencyListCmd := devDependencyCmd.Command("list | ls", msgPrinter.Sprintf("List all dependencies.")).Alias("ls").Alias("list")
devDependencyRemoveCmd := devDependencyCmd.Command("remove | rm", msgPrinter.Sprintf("Remove a project dependency.")).Alias("rm").Alias("remove")
devServiceCmd := devCmd.Command("service | serv", msgPrinter.Sprintf("For working with a service project.")).Alias("serv").Alias("service")
devServiceLogCmd := devServiceCmd.Command("log", msgPrinter.Sprintf("Show the container/system logs for a service."))
devServiceLogCmdServiceName := devServiceLogCmd.Arg("service", msgPrinter.Sprintf("The name of the service whose log records should be displayed. The service name is the same as the url field of a service definition.")).String()
devServiceLogCmd.Flag("service", msgPrinter.Sprintf("(DEPRECATED) This flag is deprecated and is replaced by -c.")).Short('s').String()
devServiceLogCmdContainerName := devServiceLogCmd.Flag("container", msgPrinter.Sprintf("The name of the service container whose log records should be displayed. Can be omitted if the service definition has only one container in its deployment config.")).Default(*devServiceLogCmdServiceName).Short('c').String()
devServiceLogCmdTail := devServiceLogCmd.Flag("tail", msgPrinter.Sprintf("Continuously polls the service's logs to display the most recent records, similar to tail -F behavior.")).Short('f').Bool()
devServiceNewCmd := devServiceCmd.Command("new", msgPrinter.Sprintf("Create a new service project."))
devServiceNewCmdOrg := devServiceNewCmd.Flag("org", msgPrinter.Sprintf("The Org id that the service is defined within. If this flag is omitted, the HZN_ORG_ID environment variable is used.")).Short('o').String()
devServiceNewCmdName := devServiceNewCmd.Flag("specRef", msgPrinter.Sprintf("The name of the service. If this flag and the -i flag are omitted, only the skeletal horizon metadata files will be generated.")).Short('s').String()
devServiceNewCmdVer := devServiceNewCmd.Flag("ver", msgPrinter.Sprintf("The version of the service. If this flag is omitted, '0.0.1' is used.")).Short('V').String()
devServiceNewCmdImage := devServiceNewCmd.Flag("image", msgPrinter.Sprintf("The docker container image base name without the version tag for the service. This command will add arch and version to the base name to form the final image name. The format is 'basename_arch:serviceversion'. This flag can be repeated to specify multiple images when '--noImageGen' flag is specified. This flag is ignored for the '--dconfig %v' deployment configuration.", kube_deployment.KUBE_DEPLOYMENT_CONFIG_TYPE)).Short('i').Strings()
devServiceNewCmdNoImageGen := devServiceNewCmd.Flag("noImageGen", msgPrinter.Sprintf("Indicates that the image is built somewhere else. No image sample code will be created by this command. If this flag is not specified, files for generating a simple service image will be created under current directory.")).Bool()
devServiceNewCmdNoPattern := devServiceNewCmd.Flag("noPattern", msgPrinter.Sprintf("Indicates no pattern definition file will be created.")).Bool()
devServiceNewCmdNoPolicy := devServiceNewCmd.Flag("noPolicy", msgPrinter.Sprintf("Indicate no policy file will be created.")).Bool()
devServiceNewCmdCfg := devServiceNewCmd.Flag("dconfig", msgPrinter.Sprintf("Indicates the type of deployment configuration that will be used, native (the default), or %v. This flag can be specified more than once to create a service with more than 1 kind of deployment configuration.", kube_deployment.KUBE_DEPLOYMENT_CONFIG_TYPE)).Short('c').Default("native").Strings()
devServiceStartTestCmd := devServiceCmd.Command("start", msgPrinter.Sprintf("Run a service in a mocked Horizon Agent environment. This command is not supported for services using the %v deployment configuration.", kube_deployment.KUBE_DEPLOYMENT_CONFIG_TYPE))
devServiceUserInputFile := devServiceStartTestCmd.Flag("userInputFile", msgPrinter.Sprintf("File containing user input values for running a test. If omitted, the userinput file for the project will be used.")).Short('f').String()
devServiceConfigFile := devServiceStartTestCmd.Flag("configFile", msgPrinter.Sprintf("File to be made available through the sync service APIs. This flag can be repeated to populate multiple files.")).Short('m').Strings()
devServiceConfigType := devServiceStartTestCmd.Flag("type", msgPrinter.Sprintf("The type of file to be made available through the sync service APIs. All config files are presumed to be of the same type. This flag is required if any configFiles are specified.")).Short('t').String()
devServiceNoFSS := devServiceStartTestCmd.Flag("noFSS", msgPrinter.Sprintf("Do not bring up file sync service (FSS) containers. They are brought up by default.")).Short('S').Bool()
devServiceStartCmdUserPw := devServiceStartTestCmd.Flag("user-pw", msgPrinter.Sprintf("Horizon Exchange user credentials to query exchange resources. Specify it when you want to automatically fetch the missing dependent services from the Exchange. The default is HZN_EXCHANGE_USER_AUTH environment variable. If you don't prepend it with the user's org, it will automatically be prepended with the value of the HZN_ORG_ID environment variable.")).Short('u').PlaceHolder("USER:PW").String()
devServiceStartSecretsFiles := devServiceStartTestCmd.Flag("secret", msgPrinter.Sprintf("Filepath of a file containing a secret that is required by the service or one of its dependent services. The filename must match a secret name in the service definition. The file is encoded in JSON as an object containing two keys both typed as a string; \"key\" is used to indicate the kind of secret, and \"value\" is the string form of the secret. This flag can be repeated.")).Strings()
devServiceStopTestCmd := devServiceCmd.Command("stop", msgPrinter.Sprintf("Stop a service that is running in a mocked Horizon Agent environment. This command is not supported for services using the %v deployment configuration.", kube_deployment.KUBE_DEPLOYMENT_CONFIG_TYPE))
devServiceValidateCmd := devServiceCmd.Command("verify | vf", msgPrinter.Sprintf("Validate the project for completeness and schema compliance.")).Alias("vf").Alias("verify")
devServiceVerifyUserInputFile := devServiceValidateCmd.Flag("userInputFile", msgPrinter.Sprintf("File containing user input values for verification of a project. If omitted, the userinput file for the project will be used.")).Short('f').String()
devServiceValidateCmdUserPw := devServiceValidateCmd.Flag("user-pw", msgPrinter.Sprintf("Horizon Exchange user credentials to query exchange resources. Specify it when you want to automatically fetch the missing dependent services from the Exchange. The default is HZN_EXCHANGE_USER_AUTH environment variable. If you don't prepend it with the user's org, it will automatically be prepended with the value of the HZN_ORG_ID environment variable.")).Short('u').PlaceHolder("USER:PW").String()
envCmd := app.Command("env", msgPrinter.Sprintf("Show the Horizon Environment Variables."))
eventlogCmd := app.Command("eventlog | ev", msgPrinter.Sprintf("List the event logs for the current or all registrations.")).Alias("ev").Alias("eventlog")
eventlogListCmd := eventlogCmd.Command("list | ls", msgPrinter.Sprintf("List the event logs for the current or all registrations.")).Alias("ls").Alias("list")
listTail := eventlogListCmd.Flag("tail", msgPrinter.Sprintf("Continuously polls the event log to display the most recent records, similar to tail -F behavior.")).Short('f').Bool()
listAllEventlogs := eventlogListCmd.Flag("all", msgPrinter.Sprintf("List all the event logs including the previous registrations.")).Short('a').Bool()
listDetailedEventlogs := eventlogListCmd.Flag("long", msgPrinter.Sprintf("List event logs with details.")).Short('l').Bool()
listSelectedEventlogs := eventlogListCmd.Flag("select", msgPrinter.Sprintf("Selection string. This flag can be repeated which means 'AND'. Each flag should be in the format of attribute=value, attribute~value, \"attribute>value\" or \"attribute<value\", where '~' means contains. The common attribute names are timestamp, severity, message, event_code, source_type, agreement_id, service_url etc. Use the '-l' flag to see all the attribute names.")).Short('s').Strings()
surfaceErrorsEventlogs := eventlogCmd.Command("surface | sf", msgPrinter.Sprintf("List all the active errors that will be shared with the Exchange if the node is online.")).Alias("sf").Alias("surface")
surfaceErrorsEventlogsLong := surfaceErrorsEventlogs.Flag("long", msgPrinter.Sprintf("List the full event logs of the surface errors.")).Short('l').Bool()
exchangeCmd := app.Command("exchange | ex", msgPrinter.Sprintf("List and manage Horizon Exchange resources.")).Alias("ex").Alias("exchange")
exOrg := exchangeCmd.Flag("org", msgPrinter.Sprintf("The Horizon exchange organization ID. If not specified, HZN_ORG_ID will be used as a default.")).Short('o').String()
exUserPw := exchangeCmd.Flag("user-pw", msgPrinter.Sprintf("Horizon Exchange user credentials to query and create exchange resources. If not specified, HZN_EXCHANGE_USER_AUTH will be used as a default. If you don't prepend it with the user's org, it will automatically be prepended with the -o value. As an alternative to using -o, you can set HZN_ORG_ID with the Horizon exchange organization ID")).Short('u').PlaceHolder("USER:PW").String()
exAgbotCmd := exchangeCmd.Command("agbot", msgPrinter.Sprintf("List and manage agbots in the Horizon Exchange"))
exAgbotAddPolCmd := exAgbotCmd.Command("adddeploymentpol | addpo", msgPrinter.Sprintf("Add this deployment policy to the list of policies this agbot is serving. Currently only support adding all the deployment policies from an organization.")).Alias("addbusinesspol").Alias("addpo").Alias("adddeploymentpol")
exAgbotAPolAg := exAgbotAddPolCmd.Arg("agbot", msgPrinter.Sprintf("The agbot to add the deployment policy to.")).Required().String()
exAgbotAPPolOrg := exAgbotAddPolCmd.Arg("policyorg", msgPrinter.Sprintf("The organization of the deployment policy to add.")).Required().String()
exAgbotAddPatCmd := exAgbotCmd.Command("addpattern | addpa", msgPrinter.Sprintf("Add this pattern to the list of patterns this agbot is serving.")).Alias("addpa").Alias("addpattern")
exAgbotAP := exAgbotAddPatCmd.Arg("agbot", msgPrinter.Sprintf("The agbot to add the pattern to.")).Required().String()
exAgbotAPPatOrg := exAgbotAddPatCmd.Arg("patternorg", msgPrinter.Sprintf("The organization of the pattern to add.")).Required().String()
exAgbotAPPat := exAgbotAddPatCmd.Arg("pattern", msgPrinter.Sprintf("The name of the pattern to add.")).Required().String()
exAgbotAPNodeOrg := exAgbotAddPatCmd.Arg("nodeorg", msgPrinter.Sprintf("The organization of the nodes that should be searched. Defaults to patternorg.")).String()
exAgbotListCmd := exAgbotCmd.Command("list | ls", msgPrinter.Sprintf("Display the agbot resources from the Horizon Exchange.")).Alias("ls").Alias("list")
exAgbot := exAgbotListCmd.Arg("agbot", msgPrinter.Sprintf("List just this one agbot.")).String()
exAgbotLong := exAgbotListCmd.Flag("long", msgPrinter.Sprintf("When listing all of the agbots, show the entire resource of each agbots, instead of just the name.")).Short('l').Bool()
exAgbotListPolicyCmd := exAgbotCmd.Command("listdeploymentpol | lspo", msgPrinter.Sprintf("Display the deployment policies that this agbot is serving.")).Alias("listbusinesspol").Alias("lspo").Alias("listdeploymentpol")
exAgbotPol := exAgbotListPolicyCmd.Arg("agbot", msgPrinter.Sprintf("The agbot to list serving deployment policies for.")).Required().String()
exAgbotListPatsCmd := exAgbotCmd.Command("listpattern | lspa", msgPrinter.Sprintf("Display the patterns that this agbot is serving.")).Alias("lspa").Alias("listpattern")
exAgbotLP := exAgbotListPatsCmd.Arg("agbot", msgPrinter.Sprintf("The agbot to list the patterns for.")).Required().String()
exAgbotLPPatOrg := exAgbotListPatsCmd.Arg("patternorg", msgPrinter.Sprintf("The organization of the 1 pattern to list.")).String()
exAgbotLPPat := exAgbotListPatsCmd.Arg("pattern", msgPrinter.Sprintf("The name of the 1 pattern to list.")).String()
exAgbotLPNodeOrg := exAgbotListPatsCmd.Arg("nodeorg", msgPrinter.Sprintf("The organization of the nodes that should be searched. Defaults to patternorg.")).String()
exAgbotDelPolCmd := exAgbotCmd.Command("removedeploymentpol | rmpo", msgPrinter.Sprintf("Remove this deployment policy from the list of policies this agbot is serving. Currently only support removing all the deployment policies from an organization.")).Alias("removebusinesspol").Alias("rmpo").Alias("removedeploymentpol")
exAgbotDPolAg := exAgbotDelPolCmd.Arg("agbot", msgPrinter.Sprintf("The agbot to remove the deployment policy from.")).Required().String()
exAgbotDPPolOrg := exAgbotDelPolCmd.Arg("policyorg", msgPrinter.Sprintf("The organization of the deployment policy to remove.")).Required().String()
exAgbotDelPatCmd := exAgbotCmd.Command("removepattern | rmpa", msgPrinter.Sprintf("Remove this pattern from the list of patterns this agbot is serving.")).Alias("rmpa").Alias("removepattern")
exAgbotDP := exAgbotDelPatCmd.Arg("agbot", msgPrinter.Sprintf("The agbot to remove the pattern from.")).Required().String()
exAgbotDPPatOrg := exAgbotDelPatCmd.Arg("patternorg", msgPrinter.Sprintf("The organization of the pattern to remove.")).Required().String()
exAgbotDPPat := exAgbotDelPatCmd.Arg("pattern", msgPrinter.Sprintf("The name of the pattern to remove.")).Required().String()
exAgbotDPNodeOrg := exAgbotDelPatCmd.Arg("nodeorg", msgPrinter.Sprintf("The organization of the nodes that should be searched. Defaults to patternorg.")).String()
exCatalogCmd := exchangeCmd.Command("catalog | cat", msgPrinter.Sprintf("List all public services/patterns in all orgs that have orgType: IBM.")).Alias("cat").Alias("catalog")
exCatalogPatternListCmd := exCatalogCmd.Command("patternlist | pat", msgPrinter.Sprintf("Display all public patterns in all orgs that have orgType: IBM. ")).Alias("pat").Alias("patternlist")
exCatalogPatternListShort := exCatalogPatternListCmd.Flag("short", msgPrinter.Sprintf("Only display org (IBM) and pattern names.")).Short('s').Bool()
exCatalogPatternListLong := exCatalogPatternListCmd.Flag("long", msgPrinter.Sprintf("Display detailed output about public patterns in all orgs that have orgType: IBM.")).Short('l').Bool()
exCatalogServiceListCmd := exCatalogCmd.Command("servicelist | serv", msgPrinter.Sprintf("Display all public services in all orgs that have orgType: IBM.")).Alias("serv").Alias("servicelist")
exCatalogServiceListShort := exCatalogServiceListCmd.Flag("short", msgPrinter.Sprintf("Only display org (IBM) and service names.")).Short('s').Bool()
exCatalogServiceListLong := exCatalogServiceListCmd.Flag("long", msgPrinter.Sprintf("Display detailed output about public services in all orgs that have orgType: IBM.")).Short('l').Bool()
exBusinessCmd := exchangeCmd.Command("deployment | dep", msgPrinter.Sprintf("List and manage deployment policies in the Horizon Exchange.")).Alias("business").Alias("dep").Alias("deployment")
exBusinessAddPolicyCmd := exBusinessCmd.Command("addpolicy | addp", msgPrinter.Sprintf("Add or replace a deployment policy in the Horizon Exchange. Use 'hzn exchange deployment new' for an empty deployment policy template.")).Alias("addp").Alias("addpolicy")
exBusinessAddPolicyIdTok := exBusinessAddPolicyCmd.Flag("id-token", msgPrinter.Sprintf("The Horizon ID and password of the user.")).Short('n').PlaceHolder("ID:TOK").String()
exBusinessAddPolicyPolicy := exBusinessAddPolicyCmd.Arg("policy", msgPrinter.Sprintf("The name of the deployment policy to add or overwrite.")).Required().String()
exBusinessAddPolicyJsonFile := exBusinessAddPolicyCmd.Flag("json-file", msgPrinter.Sprintf("The path of a JSON file containing the metadata necessary to create/update the service policy in the Horizon Exchange. Specify -f- to read from stdin.")).Short('f').Required().String()
exBusinessAddPolNoConstraint := exBusinessAddPolicyCmd.Flag("no-constraints", msgPrinter.Sprintf("Allow this deployment policy to be published even though it does not have any constraints.")).Bool()
exBusinessListPolicyCmd := exBusinessCmd.Command("listpolicy | ls", msgPrinter.Sprintf("Display the deployment policies from the Horizon Exchange.")).Alias("ls").Alias("listpolicy")
exBusinessListPolicyIdTok := exBusinessListPolicyCmd.Flag("id-token", msgPrinter.Sprintf("The Horizon ID and password of the user.")).Short('n').PlaceHolder("ID:TOK").String()
exBusinessListPolicyLong := exBusinessListPolicyCmd.Flag("long", msgPrinter.Sprintf("Display detailed output about the deployment policies.")).Short('l').Bool()
exBusinessListPolicyPolicy := exBusinessListPolicyCmd.Arg("policy", msgPrinter.Sprintf("List just this one deployment policy. Use <org>/<policy> to specify a public policy in another org, or <org>/ to list all of the public policies in another org.")).String()
exBusinessNewPolicyCmd := exBusinessCmd.Command("new", msgPrinter.Sprintf("Display an empty deployment policy template that can be filled in."))
exBusinessRemovePolicyCmd := exBusinessCmd.Command("removepolicy | rmp", msgPrinter.Sprintf("Remove the deployment policy in the Horizon Exchange.")).Alias("rmp").Alias("removepolicy")
exBusinessRemovePolicyIdTok := exBusinessRemovePolicyCmd.Flag("id-token", msgPrinter.Sprintf("The Horizon ID and password of the user.")).Short('n').PlaceHolder("ID:TOK").String()
exBusinessRemovePolicyForce := exBusinessRemovePolicyCmd.Flag("force", msgPrinter.Sprintf("Skip the 'are you sure?' prompt.")).Short('f').Bool()
exBusinessRemovePolicyPolicy := exBusinessRemovePolicyCmd.Arg("policy", msgPrinter.Sprintf("The name of the deployment policy to be removed.")).Required().String()
exBusinessUpdatePolicyCmd := exBusinessCmd.Command("updatepolicy | upp", msgPrinter.Sprintf("Update one attribute of an existing deployment policy in the Horizon Exchange. The supported attributes are the top level attributes in the policy definition as shown by the command 'hzn exchange deployment new'.")).Alias("upp").Alias("updatepolicy")
exBusinessUpdatePolicyIdTok := exBusinessUpdatePolicyCmd.Flag("id-token", msgPrinter.Sprintf("The Horizon ID and password of the user.")).Short('n').PlaceHolder("ID:TOK").String()
exBusinessUpdatePolicyPolicy := exBusinessUpdatePolicyCmd.Arg("policy", msgPrinter.Sprintf("The name of the policy to be updated in the Horizon Exchange.")).Required().String()
exBusinessUpdatePolicyJsonFile := exBusinessUpdatePolicyCmd.Flag("json-file", msgPrinter.Sprintf("The path to the json file containing the updated deployment policy attribute to be changed in the Horizon Exchange. Specify -f- to read from stdin.")).Short('f').Required().String()
exNMPCmd := exchangeCmd.Command("nmp", msgPrinter.Sprintf("List and manage node management policies in the Horizon Exchange."))
exNMPListCmd := exNMPCmd.Command("list | ls", msgPrinter.Sprintf("Display the node management policies from the Horizon Exchange.")).Alias("ls").Alias("list")
exNMPListName := exNMPListCmd.Arg("nmp-name", msgPrinter.Sprintf("List just this one node management policy.")).String()
exNMPListIdTok := exNMPListCmd.Flag("id-token", msgPrinter.Sprintf("The Horizon ID and password of the user.")).Short('n').PlaceHolder("ID:TOK").String()
exNMPListLong := exNMPListCmd.Flag("long", msgPrinter.Sprintf("Display detailed output about the node management policies.")).Short('l').Bool()
exNMPListNodes := exNMPListCmd.Flag("nodes", msgPrinter.Sprintf("List all the nodes that apply for the given node management policy.")).Bool()
exNMPAddCmd := exNMPCmd.Command("add", msgPrinter.Sprintf("Add or replace a node management policy in the Horizon Exchange. Use 'hzn exchange nmp new' for an empty node management policy template."))
exNMPAddIdTok := exNMPAddCmd.Flag("id-token", msgPrinter.Sprintf("The Horizon ID and password of the user.")).Short('n').PlaceHolder("ID:TOK").String()
exNMPAddAppliesTo := exNMPAddCmd.Flag("appliesTo", msgPrinter.Sprintf("List all the nodes that will be compatible with this node management policy. Use this flag with --dry-run to list nodes without publishing the policy to the Exchange.")).Bool()
exNMPAddName := exNMPAddCmd.Arg("nmp-name", msgPrinter.Sprintf("The name of the node management policy to add or overwrite.")).Required().String()
exNMPAddJsonFile := exNMPAddCmd.Flag("json-file", msgPrinter.Sprintf("The path of a JSON file containing the metadata necessary to create/update the node management policy in the Horizon Exchange. Specify -f- to read from stdin.")).Short('f').Required().String()
exNMPAddNoConstraint := exNMPAddCmd.Flag("no-constraints", msgPrinter.Sprintf("Allow this node management policy to be published even though it does not have any constraints.")).Bool()
exNMPNewCmd := exNMPCmd.Command("new", msgPrinter.Sprintf("Display an empty node management policy template that can be filled in."))
exNMPRemoveCmd := exNMPCmd.Command("remove | rm", msgPrinter.Sprintf("Remove the node management policy in the Horizon Exchange.")).Alias("rm").Alias("remove")
exNMPRemoveName := exNMPRemoveCmd.Arg("nmp-name", msgPrinter.Sprintf("The name of the node management policy to be removed.")).Required().String()
exNMPRemoveIdTok := exNMPRemoveCmd.Flag("id-token", msgPrinter.Sprintf("The Horizon ID and password of the user.")).Short('n').PlaceHolder("ID:TOK").String()
exNMPRemoveForce := exNMPRemoveCmd.Flag("force", msgPrinter.Sprintf("Skip the 'are you sure?' prompt.")).Short('f').Bool()
// exNMPStatusCmd := exNMPCmd.Command("status", msgPrinter.Sprintf("List the status of a given node management policy."))
// exNMPStatusListName := exNMPStatusCmd.Arg("nmp-name", msgPrinter.Sprintf("The name of the node management policy to check.")).Required().String()
// exNMPStatusListIdTok := exNMPStatusCmd.Flag("id-token", msgPrinter.Sprintf("The Horizon ID and password of the user.")).Short('n').PlaceHolder("ID:TOK").String()
exNodeCmd := exchangeCmd.Command("node", msgPrinter.Sprintf("List and manage nodes in the Horizon Exchange"))
exNodeAddPolicyCmd := exNodeCmd.Command("addpolicy | addp", msgPrinter.Sprintf("Add or replace the node policy in the Horizon Exchange.")).Alias("addp").Alias("addpolicy")
exNodeAddPolicyIdTok := exNodeAddPolicyCmd.Flag("node-id-tok", msgPrinter.Sprintf("The Horizon Exchange node ID and token to be used as credentials to query and modify the node resources if -u flag is not specified. HZN_EXCHANGE_NODE_AUTH will be used as a default for -n. If you don't prepend it with the node's org, it will automatically be prepended with the -o value.")).Short('n').PlaceHolder("ID:TOK").String()
exNodeAddPolicyNode := exNodeAddPolicyCmd.Arg("node", msgPrinter.Sprintf("Add or replace policy for this node.")).Required().String()
exNodeAddPolicyJsonFile := exNodeAddPolicyCmd.Flag("json-file", msgPrinter.Sprintf("The path of a JSON file containing the metadata necessary to create/update the node policy in the Horizon exchange. Specify -f- to read from stdin. A node policy contains the 'deployment' and 'management' attributes. Please use 'hzn policy new' to see the node policy format.")).Short('f').Required().String()
exNodeCreateCmd := exNodeCmd.Command("create | cr", msgPrinter.Sprintf("Create the node resource in the Horizon Exchange.")).Alias("cr").Alias("create")
exNodeCreateNodeIdTok := exNodeCreateCmd.Flag("node-id-tok", msgPrinter.Sprintf("The Horizon Exchange node ID and token to be created. The node ID must be unique within the organization.")).Short('n').PlaceHolder("ID:TOK").String()
exNodeCreateNodeArch := exNodeCreateCmd.Flag("arch", msgPrinter.Sprintf("Your node architecture. If not specified, architecture will be left blank.")).Short('a').String()
exNodeCreateNodeName := exNodeCreateCmd.Flag("name", msgPrinter.Sprintf("The name of your node")).Short('m').String()
exNodeCreateNodeType := exNodeCreateCmd.Flag("node-type", msgPrinter.Sprintf("The type of your node. The valid values are: device, cluster. If omitted, the default is device. However, the node type stays unchanged if the node already exists, only the node token will be updated.")).Short('T').Default("device").String()
exNodeCreateNode := exNodeCreateCmd.Arg("node", msgPrinter.Sprintf("The node to be created.")).String()
exNodeCreateToken := exNodeCreateCmd.Arg("token", msgPrinter.Sprintf("The token the new node should have.")).String()
exNodeConfirmCmd := exNodeCmd.Command("confirm | con", msgPrinter.Sprintf("Check to see if the specified node and token are valid in the Horizon Exchange.")).Alias("con").Alias("confirm")
exNodeConfirmNodeIdTok := exNodeConfirmCmd.Flag("node-id-tok", msgPrinter.Sprintf("The Horizon exchange node ID and token to be checked. If not specified, HZN_EXCHANGE_NODE_AUTH will be used as a default. Mutually exclusive with <node> and <token> arguments.")).Short('n').PlaceHolder("ID:TOK").String()
exNodeConfirmNode := exNodeConfirmCmd.Arg("node", msgPrinter.Sprintf("The node id to be checked. Mutually exclusive with -n flag.")).String()
exNodeConfirmToken := exNodeConfirmCmd.Arg("token", msgPrinter.Sprintf("The token for the node. Mutually exclusive with -n flag.")).String()
exNodeListCmd := exNodeCmd.Command("list | ls", msgPrinter.Sprintf("Display the node resources from the Horizon Exchange.")).Alias("ls").Alias("list")
exNode := exNodeListCmd.Arg("node", msgPrinter.Sprintf("List just this one node.")).String()
exNodeListNodeIdTok := exNodeListCmd.Flag("node-id-tok", msgPrinter.Sprintf("The Horizon Exchange node ID and token to be used as credentials to query and modify the node resources if -u flag is not specified. HZN_EXCHANGE_NODE_AUTH will be used as a default for -n. If you don't prepend it with the node's org, it will automatically be prepended with the -o value.")).Short('n').PlaceHolder("ID:TOK").String()
exNodeLong := exNodeListCmd.Flag("long", msgPrinter.Sprintf("When listing all of the nodes, show the entire resource of each node, instead of just the name.")).Short('l').Bool()
exNodeErrorsList := exNodeCmd.Command("listerrors | lse", msgPrinter.Sprintf("List the node errors currently surfaced to the Exchange.")).Alias("lse").Alias("listerrors")
exNodeErrorsListIdTok := exNodeErrorsList.Flag("node-id-tok", msgPrinter.Sprintf("The Horizon Exchange node ID and token to be used as credentials to query and modify the node resources if -u flag is not specified. HZN_EXCHANGE_NODE_AUTH will be used as a default for -n. If you don't prepend it with the node's org, it will automatically be prepended with the -o value.")).Short('n').PlaceHolder("ID:TOK").String()
exNodeErrorsListNode := exNodeErrorsList.Arg("node", msgPrinter.Sprintf("List surfaced errors for this node.")).Required().String()
exNodeErrorsListLong := exNodeErrorsList.Flag("long", msgPrinter.Sprintf("Show the full eventlog object of the errors currently surfaced to the Exchange.")).Short('l').Bool()
exNodeListPolicyCmd := exNodeCmd.Command("listpolicy | lsp", msgPrinter.Sprintf("Display the node policy from the Horizon Exchange.")).Alias("lsp").Alias("listpolicy")
exNodeListPolicyIdTok := exNodeListPolicyCmd.Flag("node-id-tok", msgPrinter.Sprintf("The Horizon Exchange node ID and token to be used as credentials to query and modify the node resources if -u flag is not specified. HZN_EXCHANGE_NODE_AUTH will be used as a default for -n. If you don't prepend it with the node's org, it will automatically be prepended with the -o value.")).Short('n').PlaceHolder("ID:TOK").String()
exNodeListPolicyNode := exNodeListPolicyCmd.Arg("node", msgPrinter.Sprintf("List policy for this node.")).Required().String()
exNodeManagementCmd := exNodeCmd.Command("management | mgmt", msgPrinter.Sprintf("")).Alias("mgmt").Alias("management")
exNodeManagementListCmd := exNodeManagementCmd.Command("list | ls", msgPrinter.Sprintf("List the compatible node management policies for the node. Only policies that are enabled will be displayed unless the -a flag is specified.")).Alias("ls").Alias("list")
exNodeManagementListName := exNodeManagementListCmd.Arg("node", msgPrinter.Sprintf("List node management policies for this node")).Required().String()
exNodeManagementListNodeIdTok := exNodeManagementListCmd.Flag("node-id-tok", msgPrinter.Sprintf("The Horizon Exchange node ID and token to be used as credentials to query and modfy the node resources if -u flag is not specified. HZN_EXCHANGE_NODE_AUTH will be used as a default for -n. If you don't prepend it with the node's org, it will automatically be prepended with the -o value.")).Short('n').PlaceHolder("ID:TOK").String()
exNodManagementListNMPsAll := exNodeManagementListCmd.Flag("all", msgPrinter.Sprintf("Include disabled NMP's.")).Short('a').Bool()
exNodeStatusList := exNodeCmd.Command("liststatus | lst", msgPrinter.Sprintf("List the run-time status of the node.")).Alias("lst").Alias("liststatus")
exNodeStatusIdTok := exNodeStatusList.Flag("node-id-tok", msgPrinter.Sprintf("The Horizon Exchange node ID and token to be used as credentials to query and modify the node resources if -u flag is not specified. HZN_EXCHANGE_NODE_AUTH will be used as a default for -n. If you don't prepend it with the node's org, it will automatically be prepended with the -o value.")).Short('n').PlaceHolder("ID:TOK").String()
exNodeStatusListNode := exNodeStatusList.Arg("node", msgPrinter.Sprintf("List status for this node")).Required().String()
exNodeDelCmd := exNodeCmd.Command("remove | rm", msgPrinter.Sprintf("Remove a node resource from the Horizon Exchange. Do NOT do this when an edge node is registered with this node id.")).Alias("rm").Alias("remove")
exNodeRemoveNodeIdTok := exNodeDelCmd.Flag("node-id-tok", msgPrinter.Sprintf("The Horizon Exchange node ID and token to be used as credentials to query and modfy the node resources if -u flag is not specified. HZN_EXCHANGE_NODE_AUTH will be used as a default for -n. If you don't prepend it with the node's org, it will automatically be prepended with the -o value.")).Short('n').PlaceHolder("ID:TOK").String()
exDelNode := exNodeDelCmd.Arg("node", msgPrinter.Sprintf("The node to remove.")).Required().String()
exNodeDelForce := exNodeDelCmd.Flag("force", msgPrinter.Sprintf("Skip the 'are you sure?' prompt.")).Short('f').Bool()
exNodeRemovePolicyCmd := exNodeCmd.Command("removepolicy | rmp", msgPrinter.Sprintf("Remove the node policy in the Horizon Exchange.")).Alias("rmp").Alias("removepolicy")
exNodeRemovePolicyIdTok := exNodeRemovePolicyCmd.Flag("node-id-tok", msgPrinter.Sprintf("The Horizon Exchange node ID and token to be used as credentials to query and modify the node resources if -u flag is not specified. HZN_EXCHANGE_NODE_AUTH will be used as a default for -n. If you don't prepend it with the node's org, it will automatically be prepended with the -o value.")).Short('n').PlaceHolder("ID:TOK").String()
exNodeRemovePolicyNode := exNodeRemovePolicyCmd.Arg("node", msgPrinter.Sprintf("Remove policy for this node.")).Required().String()
exNodeRemovePolicyForce := exNodeRemovePolicyCmd.Flag("force", msgPrinter.Sprintf("Skip the 'are you sure?' prompt.")).Short('f').Bool()
exNodeSetTokCmd := exNodeCmd.Command("settoken", msgPrinter.Sprintf("Change the token of a node resource in the Horizon Exchange."))
exNodeSetTokNode := exNodeSetTokCmd.Arg("node", msgPrinter.Sprintf("The node to be changed.")).Required().String()
exNodeSetTokToken := exNodeSetTokCmd.Arg("token", msgPrinter.Sprintf("The new token for the node.")).Required().String()
exNodeSetTokNodeIdTok := exNodeSetTokCmd.Flag("node-id-tok", msgPrinter.Sprintf("The Horizon Exchange node ID and token to be used as credentials to query and modify the node resources if -u flag is not specified. HZN_EXCHANGE_NODE_AUTH will be used as a default for -n. If you don't prepend it with the node's org, it will automatically be prepended with the -o value.")).Short('n').PlaceHolder("ID:TOK").String()
exNodeUpdateCmd := exNodeCmd.Command("update | up", msgPrinter.Sprintf("Update an attribute of the node in the Horizon Exchange.")).Alias("up").Alias("update")
exNodeUpdateNode := exNodeUpdateCmd.Arg("node", msgPrinter.Sprintf("The node to be updated.")).Required().String()
exNodeUpdateIdTok := exNodeUpdateCmd.Flag("node-id-tok", msgPrinter.Sprintf("The Horizon Exchange node ID and token to be used as credentials to query and modify the node resources if -u flag is not specified. HZN_EXCHANGE_NODE_AUTH will be used as a default for -n. If you don't prepend it with the node's org, it will automatically be prepended with the -o value.")).Short('n').PlaceHolder("ID:TOK").String()
exNodeUpdateJsonFile := exNodeUpdateCmd.Flag("json-file", msgPrinter.Sprintf("The path to a json file containing the changed attribute to be updated in the Horizon Exchange. Specify -f- to read from stdin.")).Short('f').Required().String()
exNodeUpdatePolicyCmd := exNodeCmd.Command("updatepolicy | upp", msgPrinter.Sprintf("(DEPRECATED) This command is deprecated. Please use 'hzn exchange node addpolicy' to update the node policy. This command is used to update either the node policy properties or the constraints, but not both.")).Alias("upp").Alias("updatepolicy")
exNodeUpdatePolicyNode := exNodeUpdatePolicyCmd.Arg("node", msgPrinter.Sprintf("Update the policy for this node.")).Required().String()
exNodeUpdatePolicyIdTok := exNodeUpdatePolicyCmd.Flag("node-id-tok", msgPrinter.Sprintf("The Horizon Exchange node ID and token to be used as credentials to query and modify the node resources if -u flag is not specified. HZN_EXCHANGE_NODE_AUTH will be used as a default for -n. If you don't prepend it with the node's org, it will automatically be prepended with the -o value.")).Short('n').PlaceHolder("ID:TOK").String()
exNodeUpdatePolicyJsonFile := exNodeUpdatePolicyCmd.Flag("json-file", msgPrinter.Sprintf("The path of a JSON file containing the new constraints or properties (not both) for the node policy in the Horizon Exchange. Specify -f- to read from stdin.")).Short('f').Required().String()
exOrgCmd := exchangeCmd.Command("org", msgPrinter.Sprintf("List and manage organizations in the Horizon Exchange."))
exOrgCreateCmd := exOrgCmd.Command("create | cr", msgPrinter.Sprintf("Create the organization resource in the Horizon Exchange.")).Alias("cr").Alias("create")
exOrgCreateOrg := exOrgCreateCmd.Arg("org", msgPrinter.Sprintf("Create this organization and assign it to an agbot.")).Required().String()
exOrgCreateLabel := exOrgCreateCmd.Flag("label", msgPrinter.Sprintf("Label for new organization.")).Short('l').String()
exOrgCreateDesc := exOrgCreateCmd.Flag("description", msgPrinter.Sprintf("Description for new organization.")).Short('d').Required().String()
exOrgCreateTags := exOrgCreateCmd.Flag("tag", msgPrinter.Sprintf("Tag for new organization. The format is mytag1=myvalue1. This flag can be repeated to specify multiple tags.")).Short('t').Strings()
exOrgCreateHBMin := exOrgCreateCmd.Flag("heartbeatmin", msgPrinter.Sprintf("The minimum number of seconds between agent heartbeats to the Exchange.")).Int()
exOrgCreateHBMax := exOrgCreateCmd.Flag("heartbeatmax", msgPrinter.Sprintf("The maximum number of seconds between agent heartbeats to the Exchange. During periods of inactivity, the agent will increase the interval between heartbeats by increments of --heartbeatadjust.")).Int()
exOrgCreateHBAdjust := exOrgCreateCmd.Flag("heartbeatadjust", msgPrinter.Sprintf("The number of seconds to increment the agent's heartbeat interval.")).Int()
exOrgCreateMaxNodes := exOrgCreateCmd.Flag("max-nodes", msgPrinter.Sprintf("The maximum number of nodes this organization is allowed to have. The value cannot exceed the Exchange global limit. The default is 0 which means no organization limit.")).Int()
exOrgCreateAddToAgbot := exOrgCreateCmd.Flag("agbot", msgPrinter.Sprintf("Add the organization to this agbot so that it will be responsible for deploying services in this org. The agbot will deploy services to nodes in this org, using the patterns and deployment policies in this org. If omitted, the first agbot found in the exchange will become responsible for this org. The format is 'agbot_org/agbot_id'.")).Short('a').String()
exOrgListCmd := exOrgCmd.Command("list | ls", msgPrinter.Sprintf("Display the organization resource from the Horizon Exchange. (Normally you can only display your own organiztion. If the org does not exist, you will get an invalid credentials error.)")).Alias("ls").Alias("list")
exOrgListOrg := exOrgListCmd.Arg("org", msgPrinter.Sprintf("List this one organization.")).String()
exOrgListLong := exOrgListCmd.Flag("long", msgPrinter.Sprintf("Display detailed info of orgs")).Short('l').Bool()
exOrgDelCmd := exOrgCmd.Command("remove | rm", msgPrinter.Sprintf("Remove an organization resource from the Horizon Exchange.")).Alias("rm").Alias("remove")
exOrgDelOrg := exOrgDelCmd.Arg("org", msgPrinter.Sprintf("Remove this organization.")).Required().String()
exOrgDelFromAgbot := exOrgDelCmd.Flag("agbot", msgPrinter.Sprintf("The agbot to remove the deployment policy from. If omitted, the first agbot found in the exchange will be used. The format is 'agbot_org/agbot_id'.")).Short('a').String()
exOrgDelForce := exOrgDelCmd.Flag("force", msgPrinter.Sprintf("Skip the 'are you sure?' prompt.")).Short('f').Bool()
exOrgUpdateCmd := exOrgCmd.Command("update | up", msgPrinter.Sprintf("Update the organization resource in the Horizon Exchange.")).Alias("up").Alias("update")
exOrgUpdateOrg := exOrgUpdateCmd.Arg("org", msgPrinter.Sprintf("Update this organization.")).Required().String()
exOrgUpdateLabel := exOrgUpdateCmd.Flag("label", msgPrinter.Sprintf("New label for organization.")).Short('l').String()
exOrgUpdateDesc := exOrgUpdateCmd.Flag("description", msgPrinter.Sprintf("New description for organization.")).Short('d').String()
exOrgUpdateTags := exOrgUpdateCmd.Flag("tag", msgPrinter.Sprintf("New tag for organization. The format is mytag1=myvalue1. This flag can be repeated to specify multiple tags. Use '-t \"\"' once to remove all the tags.")).Short('t').Strings()
exOrgUpdateHBMin := exOrgUpdateCmd.Flag("heartbeatmin", msgPrinter.Sprintf("New minimum number of seconds the between agent heartbeats to the Exchange. The default negative integer -1 means no change to this attribute.")).Default("-1").Int()
exOrgUpdateHBMax := exOrgUpdateCmd.Flag("heartbeatmax", msgPrinter.Sprintf("New maximum number of seconds between agent heartbeats to the Exchange. The default negative integer -1 means no change to this attribute.")).Default("-1").Int()
exOrgUpdateHBAdjust := exOrgUpdateCmd.Flag("heartbeatadjust", msgPrinter.Sprintf("New value for the number of seconds to increment the agent's heartbeat interval. The default negative integer -1 means no change to this attribute.")).Default("-1").Int()
exOrgUpdateMaxNodes := exOrgUpdateCmd.Flag("max-nodes", msgPrinter.Sprintf("The new maximum number of nodes this organization is allowed to have. The value cannot exceed the Exchange global limit. The default negative integer -1 means no change.")).Default("-1").Int()
exPatternCmd := exchangeCmd.Command("pattern | pat", msgPrinter.Sprintf("List and manage patterns in the Horizon Exchange")).Alias("pat").Alias("pattern")
exPatternListCmd := exPatternCmd.Command("list | ls", msgPrinter.Sprintf("Display the pattern resources from the Horizon Exchange.")).Alias("ls").Alias("list")
exPatternListNodeIdTok := exPatternListCmd.Flag("node-id-tok", msgPrinter.Sprintf("The Horizon Exchange node ID and token to be used as credentials to query and modify the node resources if -u flag is not specified. HZN_EXCHANGE_NODE_AUTH will be used as a default for -n. If you don't prepend it with the node's org, it will automatically be prepended with the -o value.")).Short('n').PlaceHolder("ID:TOK").String()
exPattern := exPatternListCmd.Arg("pattern", msgPrinter.Sprintf("List just this one pattern. Use <org>/<pat> to specify a public pattern in another org, or <org>/ to list all of the public patterns in another org.")).String()
exPatternLong := exPatternListCmd.Flag("long", msgPrinter.Sprintf("When listing all of the patterns, show the entire resource of each pattern, instead of just the name.")).Short('l').Bool()
exPatternListKeyCmd := exPatternCmd.Command("listkey | lsk", msgPrinter.Sprintf("List the signing public keys/certs for this pattern resource in the Horizon Exchange.")).Alias("lsk").Alias("listkey")
exPatternListKeyNodeIdTok := exPatternListKeyCmd.Flag("node-id-tok", msgPrinter.Sprintf("The Horizon Exchange node ID and token to be used as credentials to query and modify the node resources if -u flag is not specified. HZN_EXCHANGE_NODE_AUTH will be used as a default for -n. If you don't prepend it with the node's org, it will automatically be prepended with the -o value.")).Short('n').PlaceHolder("ID:TOK").String()
exPatListKeyPat := exPatternListKeyCmd.Arg("pattern", msgPrinter.Sprintf("The existing pattern to list the keys for.")).Required().String()
exPatListKeyKey := exPatternListKeyCmd.Arg("key-name", msgPrinter.Sprintf("The existing key name to see the contents of.")).String()
exPatternPublishCmd := exPatternCmd.Command("publish | pub", msgPrinter.Sprintf("Sign and create/update the pattern resource in the Horizon Exchange.")).Alias("pub").Alias("publish")
exPatJsonFile := exPatternPublishCmd.Flag("json-file", msgPrinter.Sprintf("The path of a JSON file containing the metadata necessary to create/update the pattern in the Horizon exchange. See %v/pattern.json. Specify -f- to read from stdin.", sample_dir)).Short('f').Required().String()
exPatKeyFile := exPatternPublishCmd.Flag("private-key-file", msgPrinter.Sprintf("The path of a private key file to be used to sign the pattern. If not specified, the environment variable HZN_PRIVATE_KEY_FILE will be used. If HZN_PRIVATE_KEY_FILE not specified, ~/.hzn/keys/service.private.key will be used. If none are specified, a random key pair will be generated and the public key will be stored with the pattern.")).Short('k').ExistingFile()
exPatPubPubKeyFile := exPatternPublishCmd.Flag("public-key-file", msgPrinter.Sprintf("(DEPRECATED) The path of public key file (that corresponds to the private key) that should be stored with the pattern, to be used by the Horizon Agent to verify the signature. If this flag is not specified, the public key will be calculated from the private key.")).Short('K').ExistingFile()
exPatName := exPatternPublishCmd.Flag("pattern-name", msgPrinter.Sprintf("The name to use for this pattern in the Horizon exchange. If not specified, will default to the base name of the file path specified in -f.")).Short('p').String()
exPatDelCmd := exPatternCmd.Command("remove | rm", msgPrinter.Sprintf("Remove a pattern resource from the Horizon Exchange.")).Alias("rm").Alias("remove")
exDelPat := exPatDelCmd.Arg("pattern", msgPrinter.Sprintf("The pattern to remove.")).Required().String()
exPatDelForce := exPatDelCmd.Flag("force", msgPrinter.Sprintf("Skip the 'are you sure?' prompt.")).Short('f').Bool()
exPatternRemKeyCmd := exPatternCmd.Command("removekey | rmk", msgPrinter.Sprintf("Remove a signing public key/cert for this pattern resource in the Horizon Exchange.")).Alias("rmk").Alias("removekey")
exPatRemKeyPat := exPatternRemKeyCmd.Arg("pattern", msgPrinter.Sprintf("The existing pattern to remove the key from.")).Required().String()
exPatRemKeyKey := exPatternRemKeyCmd.Arg("key-name", msgPrinter.Sprintf("The existing key name to remove.")).Required().String()
exPatUpdateCmd := exPatternCmd.Command("update | up", msgPrinter.Sprintf("Update an attribute of the pattern in the Horizon Exchange.")).Alias("up").Alias("update")
exPatUpdateNodeIdTok := exPatUpdateCmd.Flag("node-id-tok", msgPrinter.Sprintf("The Horizon Exchange node ID and token to be used as credentials to query and modify the node resources if -u flag is not specified. HZN_EXCHANGE_NODE_AUTH will be used as a default for -n. If you don't prepend it with the node's org, it will automatically be prepended with the -o value.")).Short('n').PlaceHolder("ID:TOK").String()
exPatUpdatePattern := exPatUpdateCmd.Arg("pattern", msgPrinter.Sprintf("The name of the pattern in the Horizon Exchange to publish.")).Required().String()
exPatUpdateJsonFile := exPatUpdateCmd.Flag("json-file", msgPrinter.Sprintf("The path to a json file containing the updated attribute of the pattern to be put in the Horizon Exchange. Specify -f- to read from stdin.")).Short('f').Required().String()
exPatternVerifyCmd := exPatternCmd.Command("verify | vf", msgPrinter.Sprintf("Verify the signatures of a pattern resource in the Horizon Exchange.")).Alias("vf").Alias("verify")
exVerPattern := exPatternVerifyCmd.Arg("pattern", msgPrinter.Sprintf("The pattern to verify.")).Required().String()
exPatternVerifyNodeIdTok := exPatternVerifyCmd.Flag("node-id-tok", msgPrinter.Sprintf("The Horizon Exchange node ID and token to be used as credentials to query and modify the node resources if -u flag is not specified. HZN_EXCHANGE_NODE_AUTH will be used as a default for -n. If you don't prepend it with the node's org, it will automatically be prepended with the -o value.")).Short('n').PlaceHolder("ID:TOK").String()
exPatPubKeyFile := exPatternVerifyCmd.Flag("public-key-file", msgPrinter.Sprintf("The path of a pem public key file to be used to verify the pattern. If not specified, the environment variable HZN_PUBLIC_KEY_FILE will be used. If none of them are set, ~/.hzn/keys/service.public.pem is the default.")).Short('k').String()
exServiceCmd := exchangeCmd.Command("service | serv", msgPrinter.Sprintf("List and manage services in the Horizon Exchange")).Alias("serv").Alias("service")
exServiceAddPolicyCmd := exServiceCmd.Command("addpolicy | addp", msgPrinter.Sprintf("Add or replace the service policy in the Horizon Exchange.")).Alias("addp").Alias("addpolicy")
exServiceAddPolicyIdTok := exServiceAddPolicyCmd.Flag("service-id-tok", msgPrinter.Sprintf("The Horizon Exchange ID and password of the user")).Short('n').PlaceHolder("ID:TOK").String()
exServiceAddPolicyService := exServiceAddPolicyCmd.Arg("service", msgPrinter.Sprintf("Add or replace policy for this service.")).Required().String()
exServiceAddPolicyJsonFile := exServiceAddPolicyCmd.Flag("json-file", msgPrinter.Sprintf("The path of a JSON file containing the metadata necessary to create/update the service policy in the Horizon Exchange. Specify -f- to read from stdin.")).Short('f').Required().String()
exServiceListCmd := exServiceCmd.Command("list | ls", msgPrinter.Sprintf("Display the service resources from the Horizon Exchange.")).Alias("ls").Alias("list")
exService := exServiceListCmd.Arg("service", msgPrinter.Sprintf("List just this one service. Use <org>/<svc> to specify a public service in another org, or <org>/ to list all of the public services in another org.")).String()
exServiceListNodeIdTok := exServiceListCmd.Flag("node-id-tok", msgPrinter.Sprintf("The Horizon Exchange node ID and token to be used as credentials to query and modify the node resources if -u flag is not specified. HZN_EXCHANGE_NODE_AUTH will be used as a default for -n. If you don't prepend it with the node's org, it will automatically be prepended with the -o value.")).Short('n').PlaceHolder("ID:TOK").String()
exServiceLong := exServiceListCmd.Flag("long", msgPrinter.Sprintf("When listing all of the services, show the entire service definition, instead of just the name. When listing a specific service, show more details.")).Short('l').Bool()
exSvcOpYamlFilePath := exServiceListCmd.Flag("op-yaml-file", msgPrinter.Sprintf("The name of the file where the cluster deployment operator yaml archive will be saved. This flag is only used when listing a specific service. This flag is ignored when the service does not have a clusterDeployment attribute.")).Short('f').String()
exSvcOpYamlForce := exServiceListCmd.Flag("force", msgPrinter.Sprintf("Skip the 'do you want to overwrite?' prompt when -f is specified and the file exists.")).Short('F').Bool()
exServiceListAuthCmd := exServiceCmd.Command("listauth | lsau", msgPrinter.Sprintf("List the docker auth tokens for this service resource in the Horizon Exchange.")).Alias("lsau").Alias("listauth")
exSvcListAuthSvc := exServiceListAuthCmd.Arg("service", msgPrinter.Sprintf("The existing service to list the docker auths for.")).Required().String()
exSvcListAuthId := exServiceListAuthCmd.Arg("auth-name", msgPrinter.Sprintf("The existing docker auth id to see the contents of.")).Uint()
exServiceListAuthNodeIdTok := exServiceListAuthCmd.Flag("node-id-tok", msgPrinter.Sprintf("The Horizon Exchange node ID and token to be used as credentials to query and modify the node resources if -u flag is not specified. HZN_EXCHANGE_NODE_AUTH will be used as a default for -n. If you don't prepend it with the node's org, it will automatically be prepended with the -o value.")).Short('n').PlaceHolder("ID:TOK").String()
exServiceListKeyCmd := exServiceCmd.Command("listkey | lsk", msgPrinter.Sprintf("List the signing public keys/certs for this service resource in the Horizon Exchange.")).Alias("lsk").Alias("listkey")
exSvcListKeySvc := exServiceListKeyCmd.Arg("service", msgPrinter.Sprintf("The existing service to list the keys for.")).Required().String()
exSvcListKeyKey := exServiceListKeyCmd.Arg("key-name", msgPrinter.Sprintf("The existing key name to see the contents of.")).String()
exServiceListKeyNodeIdTok := exServiceListKeyCmd.Flag("node-id-tok", msgPrinter.Sprintf("The Horizon Exchange node ID and token to be used as credentials to query and modify the node resources if -u flag is not specified. HZN_EXCHANGE_NODE_AUTH will be used as a default for -n. If you don't prepend it with the node's org, it will automatically be prepended with the -o value.")).Short('n').PlaceHolder("ID:TOK").String()
exServiceListnode := exServiceCmd.Command("listnode | lsn", msgPrinter.Sprintf("Display the nodes that the service is running on.")).Alias("lsn").Alias("listnode")
exServiceListnodeService := exServiceListnode.Arg("service", msgPrinter.Sprintf("The service id. Use <org>/<svc> to specify a service from a different org.")).Required().String()
exServiceListnodeNodeOrg := exServiceListnode.Flag("node-org", msgPrinter.Sprintf("The node's organization. If omitted, it will be same as the org specified by -o or HZN_ORG_ID.")).Short('O').String()
exServiceListPolicyCmd := exServiceCmd.Command("listpolicy | lsp", msgPrinter.Sprintf("Display the service policy from the Horizon Exchange.")).Alias("lsp").Alias("listpolicy")
exServiceListPolicyIdTok := exServiceListPolicyCmd.Flag("service-id-tok", msgPrinter.Sprintf("The Horizon Exchange id and password of the user")).Short('n').PlaceHolder("ID:TOK").String()
exServiceListPolicyService := exServiceListPolicyCmd.Arg("service", msgPrinter.Sprintf("List policy for this service.")).Required().String()
exServiceNewPolicyCmd := exServiceCmd.Command("newpolicy | newp", msgPrinter.Sprintf("Display an empty service policy template that can be filled in.")).Alias("newp").Alias("newpolicy")
exServicePublishCmd := exServiceCmd.Command("publish | pub", msgPrinter.Sprintf("Sign and create/update the service resource in the Horizon Exchange.")).Alias("pub").Alias("publish")
exSvcJsonFile := exServicePublishCmd.Flag("json-file", msgPrinter.Sprintf("The path of a JSON file containing the metadata necessary to create/update the service in the Horizon exchange. See %v/service.json and %v/service_cluster.json. Specify -f- to read from stdin.", sample_dir, sample_dir)).Short('f').Required().String()
exSvcPrivKeyFile := exServicePublishCmd.Flag("private-key-file", msgPrinter.Sprintf("The path of a private key file to be used to sign the service. If not specified, the environment variable HZN_PRIVATE_KEY_FILE will be used. If HZN_PRIVATE_KEY_FILE not specified, ~/.hzn/keys/service.private.key will be used. If none are specified, a random key pair will be generated and the public key will be stored with the service.")).Short('k').ExistingFile()
exSvcPubPubKeyFile := exServicePublishCmd.Flag("public-key-file", msgPrinter.Sprintf("(DEPRECATED) The path of public key file (that corresponds to the private key) that should be stored with the service, to be used by the Horizon Agent to verify the signature. If this flag is not specified, the public key will be calculated from the private key.")).Short('K').ExistingFile()
exSvcPubDontTouchImage := exServicePublishCmd.Flag("dont-change-image-tag", msgPrinter.Sprintf("The image paths in the deployment field have regular tags and should not be changed to sha256 digest values. The image will not get automatically uploaded to the repository. This should only be used during development when testing new versions often.")).Short('I').Bool()
exSvcPubPullImage := exServicePublishCmd.Flag("pull-image", msgPrinter.Sprintf("Use the image from the image repository. It will pull the image from the image repository and overwrite the local image if exists. This flag is mutually exclusive with -I.")).Short('P').Bool()
exSvcRegistryTokens := exServicePublishCmd.Flag("registry-token", msgPrinter.Sprintf("Docker registry domain and auth that should be stored with the service, to enable the Horizon edge node to access the service's docker images. This flag can be repeated, and each flag should be in the format: registry:user:token")).Short('r').Strings()
exSvcOverwrite := exServicePublishCmd.Flag("overwrite", msgPrinter.Sprintf("Overwrite the existing version if the service exists in the Exchange. It will skip the 'do you want to overwrite' prompt.")).Short('O').Bool()
exSvcPolicyFile := exServicePublishCmd.Flag("service-policy-file", msgPrinter.Sprintf("The path of the service policy JSON file to be used for the service to be published. This flag is optional")).Short('p').String()
exSvcPublic := exServicePublishCmd.Flag("public", msgPrinter.Sprintf("Whether the service is visible to users outside of the organization. This flag is optional. If left unset, the service will default to whatever the metadata has set. If the service definition has also not set the public field, then the service will by default not be public.")).String()
exSvcDelCmd := exServiceCmd.Command("remove | rm", msgPrinter.Sprintf("Remove a service resource from the Horizon Exchange.")).Alias("rm").Alias("remove")
exDelSvc := exSvcDelCmd.Arg("service", msgPrinter.Sprintf("The service to remove.")).Required().String()
exSvcDelForce := exSvcDelCmd.Flag("force", msgPrinter.Sprintf("Skip the 'are you sure?' prompt.")).Short('f').Bool()
exServiceRemAuthCmd := exServiceCmd.Command("removeauth | rmau", msgPrinter.Sprintf("Remove a docker auth token for this service resource in the Horizon Exchange.")).Alias("rmau").Alias("removeauth")
exSvcRemAuthSvc := exServiceRemAuthCmd.Arg("service", msgPrinter.Sprintf("The existing service to remove the docker auth from.")).Required().String()
exSvcRemAuthId := exServiceRemAuthCmd.Arg("auth-name", msgPrinter.Sprintf("The existing docker auth id to remove.")).Required().Uint()
exServiceRemKeyCmd := exServiceCmd.Command("removekey | rmk", msgPrinter.Sprintf("Remove a signing public key/cert for this service resource in the Horizon Exchange.")).Alias("rmk").Alias("removekey")
exSvcRemKeySvc := exServiceRemKeyCmd.Arg("service", msgPrinter.Sprintf("The existing service to remove the key from.")).Required().String()
exSvcRemKeyKey := exServiceRemKeyCmd.Arg("key-name", msgPrinter.Sprintf("The existing key name to remove.")).Required().String()
exServiceRemovePolicyCmd := exServiceCmd.Command("removepolicy | rmp", msgPrinter.Sprintf("Remove the service policy in the Horizon Exchange.")).Alias("rmp").Alias("removepolicy")
exServiceRemovePolicyIdTok := exServiceRemovePolicyCmd.Flag("service-id-tok", msgPrinter.Sprintf("The Horizon Exchange ID and password of the user")).Short('n').PlaceHolder("ID:TOK").String()
exServiceRemovePolicyService := exServiceRemovePolicyCmd.Arg("service", msgPrinter.Sprintf("Remove policy for this service.")).Required().String()
exServiceRemovePolicyForce := exServiceRemovePolicyCmd.Flag("force", msgPrinter.Sprintf("Skip the 'are you sure?' prompt.")).Short('f').Bool()
exServiceVerifyCmd := exServiceCmd.Command("verify | vf", msgPrinter.Sprintf("Verify the signatures of a service resource in the Horizon Exchange.")).Alias("vf").Alias("verify")
exVerService := exServiceVerifyCmd.Arg("service", msgPrinter.Sprintf("The service to verify.")).Required().String()
exServiceVerifyNodeIdTok := exServiceVerifyCmd.Flag("node-id-tok", msgPrinter.Sprintf("The Horizon Exchange node ID and token to be used as credentials to query and modify the node resources if -u flag is not specified. HZN_EXCHANGE_NODE_AUTH will be used as a default for -n. If you don't prepend it with the node's org, it will automatically be prepended with the -o value.")).Short('n').PlaceHolder("ID:TOK").String()
exSvcPubKeyFile := exServiceVerifyCmd.Flag("public-key-file", msgPrinter.Sprintf("The path of a pem public key file to be used to verify the service. If not specified, the environment variable HZN_PUBLIC_KEY_FILE will be used. If none of them are set, ~/.hzn/keys/service.public.pem is the default.")).Short('k').String()
exStatusCmd := exchangeCmd.Command("status", msgPrinter.Sprintf("Display the status of the Horizon Exchange."))
exUserCmd := exchangeCmd.Command("user", msgPrinter.Sprintf("List and manage users in the Horizon Exchange."))
exUserCreateCmd := exUserCmd.Command("create | cr", msgPrinter.Sprintf("Create the user resource in the Horizon Exchange.")).Alias("cr").Alias("create")
exUserCreateUser := exUserCreateCmd.Arg("user", msgPrinter.Sprintf("Your username for this user account when creating it in the Horizon exchange.")).Required().String()
exUserCreatePw := exUserCreateCmd.Arg("pw", msgPrinter.Sprintf("Your password for this user account when creating it in the Horizon exchange.")).Required().String()
exUserCreateEmail := exUserCreateCmd.Arg("email", msgPrinter.Sprintf("Your email address that should be associated with this user account when creating it in the Horizon exchange. This argument is optional")).String()
exUserCreateIsAdmin := exUserCreateCmd.Flag("admin", msgPrinter.Sprintf("This user should be an administrator, capable of managing all resources in this org of the Exchange.")).Short('A').Bool()
exUserCreateIsHubAdmin := exUserCreateCmd.Flag("hubadmin", msgPrinter.Sprintf("This user should be a hub administrator, capable of managing orgs in this administration hub.")).Short('H').Bool()
exUserListCmd := exUserCmd.Command("list | ls", msgPrinter.Sprintf("Display the user resource from the Horizon Exchange. (Normally you can only display your own user. If the user does not exist, you will get an invalid credentials error.)")).Alias("ls").Alias("list")
exUserListUser := exUserListCmd.Arg("user", msgPrinter.Sprintf("List this one user. Default is your own user. Only admin users can list other users.")).String()
exUserListAll := exUserListCmd.Flag("all", msgPrinter.Sprintf("List all users in the org. Will only do this if you are a user with admin privilege.")).Short('a').Bool()
exUserListNamesOnly := exUserListCmd.Flag("names", msgPrinter.Sprintf("When listing all of the users, show only the usernames, instead of each entire resource.")).Short('N').Bool()
exUserDelCmd := exUserCmd.Command("remove | rm", msgPrinter.Sprintf("Remove a user resource from the Horizon Exchange. Warning: this will cause all exchange resources owned by this user to also be deleted (nodes, services, patterns, etc).")).Alias("rm").Alias("remove")
exDelUser := exUserDelCmd.Arg("user", msgPrinter.Sprintf("The user to remove.")).Required().String()
exUserDelForce := exUserDelCmd.Flag("force", msgPrinter.Sprintf("Skip the 'are you sure?' prompt.")).Short('f').Bool()
exUserSetAdminCmd := exUserCmd.Command("setadmin | sa", msgPrinter.Sprintf("Change the existing user to be an admin user (like root in his/her org) or to no longer be an admin user. Can only be run by exchange root or another admin user.")).Alias("sa").Alias("setadmin")
exUserSetAdminUser := exUserSetAdminCmd.Arg("user", msgPrinter.Sprintf("The user to be modified.")).Required().String()
exUserSetAdminBool := exUserSetAdminCmd.Arg("isadmin", msgPrinter.Sprintf("True if they should be an admin user, otherwise false.")).Required().Bool()
exVersionCmd := exchangeCmd.Command("version", msgPrinter.Sprintf("Display the version of the Horizon Exchange."))
keyCmd := app.Command("key", msgPrinter.Sprintf("List and manage keys for signing and verifying services."))
keyCreateCmd := keyCmd.Command("create | cr", msgPrinter.Sprintf("Generate a signing key pair.")).Alias("cr").Alias("create")
keyX509Org := keyCreateCmd.Arg("x509-org", msgPrinter.Sprintf("x509 certificate Organization (O) field (preferably a company name or other organization's name).")).Required().String()
keyX509CN := keyCreateCmd.Arg("x509-cn", msgPrinter.Sprintf("x509 certificate Common Name (CN) field (preferably an email address issued by x509org).")).Required().String()
keyOutputDir := keyCreateCmd.Flag("output-dir", msgPrinter.Sprintf("The directory to put the key pair files in. Mutually exclusive with -k and -K. The file names will be randomly generated.")).Short('d').ExistingDir()
keyCreatePrivKey := keyCreateCmd.Flag("private-key-file", msgPrinter.Sprintf("The full path of the private key file. Mutually exclusive with -d. If not specified, the environment variable HZN_PRIVATE_KEY_FILE will be used. If none of them are set, ~/.hzn/keys/service.private.key is the default.")).Short('k').String()
keyCreatePubKey := keyCreateCmd.Flag("pubic-key-file", msgPrinter.Sprintf("The full path of the public key file. Mutually exclusive with -d. If not specified, the environment variable HZN_PUBLIC_KEY_FILE will be used. If none of them are set, ~/.hzn/keys/service.public.pem is the default.")).Short('K').String()
keyCreateOverwrite := keyCreateCmd.Flag("overwrite", msgPrinter.Sprintf("Overwrite the existing files. It will skip the 'do you want to overwrite' prompt.")).Short('f').Bool()
keyLength := keyCreateCmd.Flag("length", msgPrinter.Sprintf("The length of the key to create.")).Short('l').Default("4096").Int()
keyDaysValid := keyCreateCmd.Flag("days-valid", msgPrinter.Sprintf("x509 certificate validity (Validity > Not After) expressed in days from the day of generation.")).Default("1461").Int()
keyImportFlag := keyCreateCmd.Flag("import", msgPrinter.Sprintf("Automatically import the created public key into the local Horizon agent.")).Short('i').Bool()
keyImportCmd := keyCmd.Command("import | imp", msgPrinter.Sprintf("Imports a signing public key into the Horizon agent.")).Alias("imp").Alias("import")
keyImportPubKeyFile := keyImportCmd.Flag("public-key-file", msgPrinter.Sprintf("The path of a pem public key file to be imported. The base name in the path is also used as the key name in the Horizon agent. If not specified, the environment variable HZN_PUBLIC_KEY_FILE will be used. If none of them are set, ~/.hzn/keys/service.public.pem is the default.")).Short('k').String()
keyListCmd := keyCmd.Command("list | ls", msgPrinter.Sprintf("List the signing keys that have been imported into this Horizon agent.")).Alias("list").Alias("ls")
keyName := keyListCmd.Arg("key-name", msgPrinter.Sprintf("The name of a specific key to show.")).String()
keyListAll := keyListCmd.Flag("all", msgPrinter.Sprintf("List the names of all signing keys, even the older public keys not wrapped in a certificate.")).Short('a').Bool()
keyDelCmd := keyCmd.Command("remove | rm", msgPrinter.Sprintf("Remove the specified signing key from this Horizon agent.")).Alias("remove").Alias("rm")
keyDelName := keyDelCmd.Arg("key-name", msgPrinter.Sprintf("The name of a specific key to remove.")).Required().String()
meteringCmd := app.Command("metering | mt", msgPrinter.Sprintf("List or manage the metering (payment) information for the active or archived agreements.")).Alias("mt").Alias("metering")
meteringListCmd := meteringCmd.Command("list | ls", msgPrinter.Sprintf("List the metering (payment) information for the active or archived agreements.")).Alias("ls").Alias("list")
listArchivedMetering := meteringListCmd.Flag("archived", msgPrinter.Sprintf("List archived agreement metering information instead of metering for the active agreements.")).Short('r').Bool()
mmsCmd := app.Command("mms", msgPrinter.Sprintf("List and manage Horizon Model Management Service resources."))
mmsOrg := mmsCmd.Flag("org", msgPrinter.Sprintf("The Horizon organization ID. If not specified, HZN_ORG_ID will be used as a default.")).Short('o').String()
mmsUserPw := mmsCmd.Flag("user-pw", msgPrinter.Sprintf("Horizon user credentials to query and create Model Management Service resources. If not specified, HZN_EXCHANGE_USER_AUTH will be used as a default. If you don't prepend it with the user's org, it will automatically be prepended with the -o value.")).Short('u').PlaceHolder("USER:PW").String()
mmsObjectCmd := mmsCmd.Command("object | obj", msgPrinter.Sprintf("List and manage objects in the Horizon Model Management Service.")).Alias("obj").Alias("object")
mmsObjectDeleteCmd := mmsObjectCmd.Command("delete | del", msgPrinter.Sprintf("Delete an object in the Horizon Model Management Service, making it unavailable for services deployed on nodes.")).Alias("delete").Alias("del")
mmsObjectDeleteType := mmsObjectDeleteCmd.Flag("type", msgPrinter.Sprintf("The type of the object to delete.")).Short('t').Required().String()
mmsObjectDeleteId := mmsObjectDeleteCmd.Flag("id", msgPrinter.Sprintf("The id of the object to delete.")).Short('i').Required().String()
mmsObjectDownloadCmd := mmsObjectCmd.Command("download | dl", msgPrinter.Sprintf("Download data of the given object in the Horizon Model Management Service.")).Alias("dl").Alias("download")
mmsObjectDownloadType := mmsObjectDownloadCmd.Flag("type", msgPrinter.Sprintf("The type of the object to download data. This flag must be used with -i.")).Short('t').Required().String()
mmsObjectDownloadId := mmsObjectDownloadCmd.Flag("id", msgPrinter.Sprintf("The id of the object to download data. This flag must be used with -t.")).Short('i').Required().String()
mmsObjectDownloadFile := mmsObjectDownloadCmd.Flag("file", msgPrinter.Sprintf("The file that the data of downloaded object is written to. This flag must be used with -f. If omit, will use default file name in format of objectType_objectID and save in current directory")).Short('f').String()
mmsObjectDownloadOverwrite := mmsObjectDownloadCmd.Flag("overwrite", msgPrinter.Sprintf("Overwrite the existing file if it exists in the file system.")).Short('O').Bool()
mmsObjectDownloadSkipIntegrityCheck := mmsObjectDownloadCmd.Flag("noIntegrity", msgPrinter.Sprintf("The download command will not perform a data integrity check on the downloaded object data")).Bool()
mmsObjectListCmd := mmsObjectCmd.Command("list | ls", msgPrinter.Sprintf("List objects in the Horizon Model Management Service.")).Alias("ls").Alias("list")
mmsObjectListType := mmsObjectListCmd.Flag("type", msgPrinter.Sprintf("The type of the object to list.")).Short('t').String()
mmsObjectListObjType := mmsObjectListCmd.Flag("objectType", "").Hidden().String()
mmsObjectListId := mmsObjectListCmd.Flag("id", msgPrinter.Sprintf("The id of the object to list. This flag is optional. Omit this flag to list all objects of a given object type.")).Short('i').String()
mmsObjectListObjId := mmsObjectListCmd.Flag("objectId", "").Hidden().String()
mmsObjectListDestinationPolicy := mmsObjectListCmd.Flag("policy", msgPrinter.Sprintf("Specify true to show only objects using policy. Specify false to show only objects not using policy. If this flag is omitted, both kinds of objects are shown.")).Short('p').String()
mmsObjectListDPService := mmsObjectListCmd.Flag("service", msgPrinter.Sprintf("List mms objects using policy that are targetted for the given service. Service specified in the format service-org/service-name.")).Short('s').String()
mmsObjectListDPProperty := mmsObjectListCmd.Flag("property", msgPrinter.Sprintf("List mms objects using policy that reference the given property name.")).String()
mmsObjectListDPUpdateTime := mmsObjectListCmd.Flag("updateTime", msgPrinter.Sprintf("List mms objects using policy that has been updated since the given time. The time value is spefified in RFC3339 format: yyyy-MM-ddTHH:mm:ssZ. The time of day may be omitted.")).String()
mmsObjectListDestinationType := mmsObjectListCmd.Flag("destinationType", msgPrinter.Sprintf("List mms objects with given destination type")).String()
mmsObjectListDestinationId := mmsObjectListCmd.Flag("destinationId", msgPrinter.Sprintf("List mms objects with given destination id. Must specify --destinationType to use this flag")).String()
mmsObjectListWithData := mmsObjectListCmd.Flag("data", msgPrinter.Sprintf("Specify true to show objects that have data. Specify false to show objects that have no data. If this flag is omitted, both kinds of objects are shown.")).String()
mmsObjectListExpirationTime := mmsObjectListCmd.Flag("expirationTime", msgPrinter.Sprintf("List mms objects that expired before the given time. The time value is spefified in RFC3339 format: yyyy-MM-ddTHH:mm:ssZ. Specify now to show objects that are currently expired.")).Short('e').String()
mmsObjectListDeleted := mmsObjectListCmd.Flag("deleted", msgPrinter.Sprintf("Specify true to show objects that are marked deleted. Specify false to show objects that are not marked deleted. If this flag is omitted, both kinds of objects are shown. Object will be marked deleted if object is deleted in CSS but it doesn't receive notifications from all the destinations")).String()
mmsObjectListLong := mmsObjectListCmd.Flag("long", msgPrinter.Sprintf("Show detailed object metadata information")).Short('l').Bool()
mmsObjectListDetail := mmsObjectListCmd.Flag("detail", msgPrinter.Sprintf("Provides additional detail about the deployment of the object on edge nodes.")).Short('d').Bool()
mmsObjectNewCmd := mmsObjectCmd.Command("new", msgPrinter.Sprintf("Display an empty object metadata template that can be filled in and passed as the -m option on the 'hzn mms object publish' command."))
mmsObjectPublishCmd := mmsObjectCmd.Command("publish | pub", msgPrinter.Sprintf("Publish an object in the Horizon Model Management Service, making it available for services deployed on nodes.")).Alias("pub").Alias("publish")
mmsObjectPublishType := mmsObjectPublishCmd.Flag("type", msgPrinter.Sprintf("The type of the object to publish. This flag must be used with -i. It is mutually exclusive with -m")).Short('t').String()
mmsObjectPublishId := mmsObjectPublishCmd.Flag("id", msgPrinter.Sprintf("The id of the object to publish. This flag must be used with -t. It is mutually exclusive with -m")).Short('i').String()
mmsObjectPublishPat := mmsObjectPublishCmd.Flag("pattern", msgPrinter.Sprintf("If you want the object to be deployed on nodes using a given pattern, specify it using this flag. This flag is optional and can only be used with --type and --id. It is mutually exclusive with -m")).Short('p').String()
mmsObjectPublishDef := mmsObjectPublishCmd.Flag("def", msgPrinter.Sprintf("The definition of the object to publish. A blank template can be obtained from the 'hzn mss object new' command. Specify -m- to read from stdin.")).Short('m').String()
mmsObjectPublishObj := mmsObjectPublishCmd.Flag("object", msgPrinter.Sprintf("The object (in the form of a file) to publish. This flag is optional so that you can update only the object's definition.")).Short('f').String()
mmsObjectPublishNoChunkUpload := mmsObjectPublishCmd.Flag("disableChunkUpload", msgPrinter.Sprintf("The publish command will disable chunk upload. Data will stream to CSS.")).Bool()
mmsObjectPublishChunkUploadDataSize := mmsObjectPublishCmd.Flag("chunkSize", msgPrinter.Sprintf("The size of data chunk that will be published with. The default is 104857600 (100MB). Ignored if --disableChunkUpload is specified.")).Default("104857600").Int()
mmsObjectPublishSkipIntegrityCheck := mmsObjectPublishCmd.Flag("noIntegrity", msgPrinter.Sprintf("The publish command will not perform a data integrity check on the uploaded object data. It is mutually exclusive with --hashAlgo and --hash")).Bool()
mmsObjectPublishDSHashAlgo := mmsObjectPublishCmd.Flag("hashAlgo", msgPrinter.Sprintf("The hash algorithm used to hash the object data before signing it, ensuring data integrity during upload and download. Supported hash algorithms are SHA1 or SHA256, the default is SHA1. It is mutually exclusive with the --noIntegrity flag")).Short('a').String()
mmsObjectPublishDSHash := mmsObjectPublishCmd.Flag("hash", msgPrinter.Sprintf("The hash of the object data being uploaded or downloaded. Use this flag if you want to provide the hash instead of allowing the command to automatically calculate the hash. The hash must be generated using either the SHA1 or SHA256 algorithm. The -a flag must be specified if the hash was generated using SHA256. This flag is mutually exclusive with --noIntegrity.")).String()
mmsObjectPublishPrivKeyFile := mmsObjectPublishCmd.Flag("private-key-file", msgPrinter.Sprintf("The path of a private key file to be used to sign the object. The corresponding public key will be stored in the MMS to ensure integrity of the object. If not specified, the environment variable HZN_PRIVATE_KEY_FILE will be used to find a private key. If not set, ~/.hzn/keys/service.private.key will be used. If it does not exist, an RSA key pair is generated only for this publish operation and then the private key is discarded.")).Short('k').ExistingFile()
mmsStatusCmd := mmsCmd.Command("status", msgPrinter.Sprintf("Display the status of the Horizon Model Management Service."))
nodeCmd := app.Command("node", msgPrinter.Sprintf("List and manage general information about this Horizon edge node."))
nodeListCmd := nodeCmd.Command("list | ls", msgPrinter.Sprintf("Display general information about this Horizon edge node.")).Alias("list").Alias("ls")
policyCmd := app.Command("policy | pol", msgPrinter.Sprintf("List and manage policy for this Horizon edge node.")).Alias("pol").Alias("policy")
policyListCmd := policyCmd.Command("list | ls", msgPrinter.Sprintf("Display this edge node's policy.")).Alias("ls").Alias("list")
policyNewCmd := policyCmd.Command("new", msgPrinter.Sprintf("Display an empty policy template that can be filled in."))
policyPatchCmd := policyCmd.Command("patch", msgPrinter.Sprintf("(DEPRECATED) This command is deprecated. Please use 'hzn policy update' to update the node policy. This command is used to update either the node policy properties or the constraints, but not both."))
policyPatchInput := policyPatchCmd.Arg("patch", msgPrinter.Sprintf("The new constraints or properties in the format '%s' or '%s'.", "{\"constraints\":[<constraint list>]}", "{\"properties\":[<property list>]}")).Required().String()
policyRemoveCmd := policyCmd.Command("remove | rm", msgPrinter.Sprintf("Remove the node's policy.")).Alias("rm").Alias("remove")
policyRemoveForce := policyRemoveCmd.Flag("force", msgPrinter.Sprintf("Skip the 'are you sure?' prompt.")).Short('f').Bool()
policyUpdateCmd := policyCmd.Command("update | up", msgPrinter.Sprintf("Create or replace the node's policy. The node's built-in properties cannot be modified or deleted by this command, with the exception of openhorizon.allowPrivileged.")).Alias("up").Alias("update")
policyUpdateInputFile := policyUpdateCmd.Flag("input-file", msgPrinter.Sprintf("The JSON input file name containing the node policy. Specify -f- to read from stdin. A node policy contains the 'deployment' and 'management' attributes. Please use 'hzn policy new' to see the node policy format.")).Short('f').Required().String()
regInputCmd := app.Command("reginput", msgPrinter.Sprintf("Create an input file template for this pattern that can be used for the 'hzn register' command (once filled in). This examines the services that the specified pattern uses, and determines the node owner input that is required for them."))
regInputNodeIdTok := regInputCmd.Flag("node-id-tok", msgPrinter.Sprintf("The Horizon exchange node ID and token (it must already exist).")).Short('n').PlaceHolder("ID:TOK").Required().String()
regInputInputFile := regInputCmd.Flag("input-file", msgPrinter.Sprintf("The JSON input template file name that should be created. This file will contain placeholders for you to fill in user input values.")).Short('f').Required().String()
regInputOrg := regInputCmd.Arg("nodeorg", msgPrinter.Sprintf("The Horizon exchange organization ID that the node will be registered in.")).Required().String()
regInputPattern := regInputCmd.Arg("pattern", msgPrinter.Sprintf("The Horizon exchange pattern that describes what workloads that should be deployed to this node. If the pattern is from a different organization than the node, use the 'other_org/pattern' format.")).Required().String()
regInputArch := regInputCmd.Arg("arch", msgPrinter.Sprintf("The architecture to write the template file for. (Horizon ignores services in patterns whose architecture is different from the target system.) The architecture must be what is returned by 'hzn node list' on the target system.")).Default(cutil.ArchString()).String()
registerCmd := app.Command("register | reg", msgPrinter.Sprintf("Register this edge node with Horizon.")).Alias("reg").Alias("register")
nodeIdTok := registerCmd.Flag("node-id-tok", msgPrinter.Sprintf("The Horizon exchange node ID and token. The node ID must be unique within the organization. If not specified, HZN_EXCHANGE_NODE_AUTH will be used as a default. If both -n and HZN_EXCHANGE_NODE_AUTH are not specified, the node ID will be created by Horizon from the machine serial number or fully qualified hostname. If the token is not specified, Horizon will create a random token. If node resource in the Exchange identified by the ID and token does not yet exist, you must also specify the -u flag so it can be created.")).Short('n').PlaceHolder("ID:TOK").String()
nodeName := registerCmd.Flag("name", msgPrinter.Sprintf("The name of the node. If not specified, it will be the same as the node id.")).Short('m').String()
userPw := registerCmd.Flag("user-pw", msgPrinter.Sprintf("User credentials to create the node resource in the Horizon exchange if it does not already exist. If not specified, HZN_EXCHANGE_USER_AUTH will be used as a default.")).Short('u').PlaceHolder("USER:PW").String()
inputFile := registerCmd.Flag("input-file", msgPrinter.Sprintf("A JSON file that sets or overrides user input variables needed by the services that will be deployed to this node. See %v/user_input.json. Specify -f- to read from stdin.", sample_dir)).Short('f').String() // not using ExistingFile() because it can be - for stdin
nodeOrgFlag := registerCmd.Flag("nodeorg", msgPrinter.Sprintf("The Horizon exchange organization ID that the node should be registered in. The default is the HZN_ORG_ID environment variable. Mutually exclusive with <nodeorg> and <pattern> arguments.")).Short('o').String()
patternFlag := registerCmd.Flag("pattern", msgPrinter.Sprintf("The Horizon exchange pattern that describes what workloads that should be deployed to this node. If the pattern is from a different organization than the node, use the 'other_org/pattern' format. Mutually exclusive with <nodeorg> and <pattern> arguments.")).Short('p').String()
nodepolicyFlag := registerCmd.Flag("policy", msgPrinter.Sprintf("A JSON file that sets or overrides the node policy for this node. A node policy contains the 'deployment' and 'management' attributes. Please use 'hzn policy new' to see the node policy format.")).String()
org := registerCmd.Arg("nodeorg", msgPrinter.Sprintf("The Horizon exchange organization ID that the node should be registered in. Mutually exclusive with -o and -p.")).String()
pattern := registerCmd.Arg("pattern", msgPrinter.Sprintf("The Horizon exchange pattern that describes what workloads that should be deployed to this node. If the pattern is from a different organization than the node, use the 'other_org/pattern' format. Mutually exclusive with -o and -p.")).String()
waitServiceFlag := registerCmd.Flag("service", msgPrinter.Sprintf("Wait for the named service to start executing on this node. When registering with a pattern, use '*' to watch all the services in the pattern. When registering with a policy, '*' is not a valid value for -s. This flag is not supported for edge cluster nodes.")).Short('s').String()
waitServiceOrgFlag := registerCmd.Flag("serviceorg", msgPrinter.Sprintf("The org of the service to wait for on this node. If '-s *' is specified, then --serviceorg must be omitted.")).String()
waitTimeoutFlag := registerCmd.Flag("timeout", msgPrinter.Sprintf("The number of seconds for the --service to start. The default is 60 seconds, beginning when registration is successful. Ignored if --service is not specified.")).Short('t').Default("60").Int()
serviceCmd := app.Command("service | serv", msgPrinter.Sprintf("List or manage the services that are currently registered on this Horizon edge node.")).Alias("serv").Alias("service")
serviceConfigStateCmd := serviceCmd.Command("configstate | cfg", msgPrinter.Sprintf("List or manage the configuration state for the services that are currently registered on this Horizon edge node.")).Alias("cfg").Alias("configstate")
serviceConfigStateListCmd := serviceConfigStateCmd.Command("list | ls", msgPrinter.Sprintf("List the configuration state for the services that are currently registered on this Horizon edge node.")).Alias("ls").Alias("list")
serviceConfigStateActiveCmd := serviceConfigStateCmd.Command("resume | r", msgPrinter.Sprintf("Change the configuration state to 'active' for a service.")).Alias("r").Alias("resume")
resumeAllServices := serviceConfigStateActiveCmd.Flag("all", msgPrinter.Sprintf("Resume all registerd services.")).Short('a').Bool()
resumeServiceOrg := serviceConfigStateActiveCmd.Arg("serviceorg", msgPrinter.Sprintf("The organization of the service that should be resumed.")).String()
resumeServiceName := serviceConfigStateActiveCmd.Arg("service", msgPrinter.Sprintf("The name of the service that should be resumed. If omitted, all the services for the organization will be resumed.")).String()
resumeServiceVersion := serviceConfigStateActiveCmd.Arg("version", msgPrinter.Sprintf("The version of the service that should be resumed. If omitted, all the versions for this service will be resumed.")).String()
serviceConfigStateSuspendCmd := serviceConfigStateCmd.Command("suspend | s", msgPrinter.Sprintf("Change the configuration state to 'suspend' for a service. Parent and child dependencies of the suspended service will be stopped until the service is resumed.")).Alias("s").Alias("suspend")
suspendAllServices := serviceConfigStateSuspendCmd.Flag("all", msgPrinter.Sprintf("Suspend all registerd services.")).Short('a').Bool()
suspendServiceOrg := serviceConfigStateSuspendCmd.Arg("serviceorg", msgPrinter.Sprintf("The organization of the service that should be suspended.")).String()
suspendServiceName := serviceConfigStateSuspendCmd.Arg("service", msgPrinter.Sprintf("The name of the service that should be suspended. If omitted, all the services for the organization will be suspended.")).String()
suspendServiceVersion := serviceConfigStateSuspendCmd.Arg("version", msgPrinter.Sprintf("The version of the service that should be suspended. If omitted, all the versions for this service will be suspended.")).String()
forceSuspendService := serviceConfigStateSuspendCmd.Flag("force", msgPrinter.Sprintf("Skip the 'are you sure?' prompt.")).Short('f').Bool()
serviceLogCmd := serviceCmd.Command("log", msgPrinter.Sprintf("Show the container logs for a service."))
logServiceName := serviceLogCmd.Arg("service", msgPrinter.Sprintf("The name of the service whose log records should be displayed. The service name is the same as the url field of a service definition. Displays log records similar to tail behavior and returns .")).Required().String()
logServiceVersion := serviceLogCmd.Flag("version", msgPrinter.Sprintf("The version of the service.")).Short('V').String()
logServiceContainerName := serviceLogCmd.Flag("container", msgPrinter.Sprintf("The name of the container within the service whose log records should be displayed.")).Short('c').String()
logTail := serviceLogCmd.Flag("tail", msgPrinter.Sprintf("Continuously polls the service's logs to display the most recent records, similar to tail -F behavior.")).Short('f').Bool()
serviceListCmd := serviceCmd.Command("list | ls", msgPrinter.Sprintf("List the services variable configuration that has been done on this Horizon edge node.")).Alias("ls").Alias("list")
serviceRegisteredCmd := serviceCmd.Command("registered | reg", msgPrinter.Sprintf("List the services that are currently registered on this Horizon edge node.")).Alias("reg").Alias("registered")
statusCmd := app.Command("status", msgPrinter.Sprintf("Display the current horizon internal status for the node."))
statusLong := statusCmd.Flag("long", msgPrinter.Sprintf("Show detailed status")).Short('l').Bool()
unregisterCmd := app.Command("unregister | unreg", msgPrinter.Sprintf("Unregister and reset this Horizon edge node so that it is ready to be registered again. Warning: this will stop all the Horizon services running on this edge node, and restart the Horizon agent.")).Alias("unreg").Alias("unregister")
forceUnregister := unregisterCmd.Flag("force", msgPrinter.Sprintf("Skip the 'are you sure?' prompt.")).Short('f').Bool()
removeNodeUnregister := unregisterCmd.Flag("remove", msgPrinter.Sprintf("Also remove this node resource from the Horizon exchange (because you no longer want to use this node with Horizon).")).Short('r').Bool()
deepCleanUnregister := unregisterCmd.Flag("deep-clean", msgPrinter.Sprintf("Also remove all the previous registration information. Use it only after the 'hzn unregister' command failed. Please capture the logs by running 'hzn eventlog list -a -l' command before using this flag.")).Short('D').Bool()
timeoutUnregister := unregisterCmd.Flag("timeout", msgPrinter.Sprintf("The number of minutes to wait for unregistration to complete. The default is zero which will wait forever.")).Short('t').Default("0").Int()
containerUnregister := unregisterCmd.Flag("container", msgPrinter.Sprintf("Perform a deep clean on a node running in a container. This flag must be used with -D and only if the agent was installed as anax-in-container.")).Short('C').Bool()
userinputCmd := app.Command("userinput | u", msgPrinter.Sprintf("List or manage the service user inputs that are currently registered on this Horizon edge node.")).Alias("u").Alias("userinput")
userinputAddCmd := userinputCmd.Command("add", msgPrinter.Sprintf("Add a new user input object or overwrite the current user input object for this Horizon edge node."))
userinputAddFilePath := userinputAddCmd.Flag("file-path", msgPrinter.Sprintf("The file path to the json file with the user input object. Specify -f- to read from stdin.")).Short('f').Required().String()
userinputListCmd := userinputCmd.Command("list | ls", msgPrinter.Sprintf("List the service user inputs currently registered on this Horizon edge node.")).Alias("ls").Alias("list")
userinputNewCmd := userinputCmd.Command("new", msgPrinter.Sprintf("Display an empty userinput template."))
userinputRemoveCmd := userinputCmd.Command("remove | rm", msgPrinter.Sprintf("Remove the user inputs that are currently registered on this Horizon edge node.")).Alias("rm").Alias("remove")
userinputRemoveForce := userinputRemoveCmd.Flag("force", msgPrinter.Sprintf("Skip the 'Are you sure?' prompt.")).Short('f').Bool()
userinputUpdateCmd := userinputCmd.Command("update | up", msgPrinter.Sprintf("Update an existing user input object for this Horizon edge node.")).Alias("up").Alias("update")
userinputUpdateFilePath := userinputUpdateCmd.Flag("file-path", msgPrinter.Sprintf("The file path to the json file with the updated user input object. Specify -f- to read from stdin.")).Short('f').Required().String()
utilCmd := app.Command("util", msgPrinter.Sprintf("Utility commands."))
utilConfigConvCmd := utilCmd.Command("configconv | cfg", msgPrinter.Sprintf("Convert the configuration file from JSON format to a shell script.")).Alias("cfg").Alias("configconv")
utilConfigConvFile := utilConfigConvCmd.Flag("config-file", msgPrinter.Sprintf("The path of a configuration file to be converted. ")).Short('f').Required().ExistingFile()
utilSignCmd := utilCmd.Command("sign", msgPrinter.Sprintf("Sign the text in stdin. The signature is sent to stdout."))
utilSignPrivKeyFile := utilSignCmd.Flag("private-key-file", msgPrinter.Sprintf("The path of a private key file to be used to sign the stdin. ")).Short('k').Required().ExistingFile()
utilVerifyCmd := utilCmd.Command("verify | vf", msgPrinter.Sprintf("Verify that the signature specified via -s is a valid signature for the text in stdin.")).Alias("vf").Alias("verify")
utilVerifyPubKeyFile := utilVerifyCmd.Flag("public-key-file", msgPrinter.Sprintf("The path of public key file (that corresponds to the private key that was used to sign) to verify the signature of stdin.")).Short('K').Required().ExistingFile()
utilVerifySig := utilVerifyCmd.Flag("signature", msgPrinter.Sprintf("The supposed signature of stdin.")).Short('s').Required().String()
smCmd := app.Command("secretsmanager | sm", msgPrinter.Sprintf("List and manage secrets in the secrets manager. NOTE: You must authenticate as an administrator to list secrets available to the entire organization.")).Alias("sm").Alias("secretsmanager")
smOrg := smCmd.Flag("org", msgPrinter.Sprintf("The Horizon organization ID. If not specified, HZN_ORG_ID will be used as a default.")).Short('o').String()
smUserPw := smCmd.Flag("user-pw", msgPrinter.Sprintf("Horizon Exchange credentials to query secrets manager resources. The default is HZN_EXCHANGE_USER_AUTH environment variable. If you don't prepend it with the user's org, it will automatically be prepended with the value of the HZN_ORG_ID environment variable.")).Short('u').PlaceHolder("USER:PW").String()
smSecretCmd := smCmd.Command("secret", msgPrinter.Sprintf("List and manage secrets in the secrets manager."))
smSecretListCmd := smSecretCmd.Command("list | ls", msgPrinter.Sprintf("Display the names of the secrets in the secrets manager.")).Alias("ls").Alias("list")
smSecretListName := smSecretListCmd.Arg("secretName", msgPrinter.Sprintf("List just this one secret. Returns a boolean indicating the existence of the secret. This is the name of the secret used in the secrets manager. If the secret does not exist, returns with exit code 1.")).String()
smSecretAddCmd := smSecretCmd.Command("add", msgPrinter.Sprintf("Add a secret to the secrets manager."))
smSecretAddName := smSecretAddCmd.Arg("secretName", msgPrinter.Sprintf("The name of the secret. It must be unique within your organization. This name is used in deployment policies and patterns to bind this secret to a secret name in a service definition.")).Required().String()
smSecretAddFile := smSecretAddCmd.Flag("secretFile", msgPrinter.Sprintf("Filepath to a file containing the secret details. Mutually exclusive with --secretDetail. Specify -f- to read from stdin.")).Short('f').String()
smSecretAddKey := smSecretAddCmd.Flag("secretKey", msgPrinter.Sprintf("A key for the secret.")).Required().String()
smSecretAddDetail := smSecretAddCmd.Flag("secretDetail", msgPrinter.Sprintf("The secret details as a string. Secret details are the actual secret itself, not the name of the secret. For example, a password, a private key, etc. are examples of secret details. Mutually exclusive with --secretFile.")).Short('d').String()
smSecretAddOverwrite := smSecretAddCmd.Flag("overwrite", msgPrinter.Sprintf("Overwrite the existing secret if it exists in the secrets manager. It will skip the 'do you want to overwrite' prompt.")).Short('O').Bool()
smSecretRemoveCmd := smSecretCmd.Command("remove | rm", msgPrinter.Sprintf("Remove a secret in the secrets manager.")).Alias("rm").Alias("remove")
smSecretRemoveForce := smSecretRemoveCmd.Flag("force", msgPrinter.Sprintf("Skip the 'are you sure?' prompt.")).Short('f').Bool()
smSecretRemoveName := smSecretRemoveCmd.Arg("secretName", msgPrinter.Sprintf("The name of the secret to be removed from the secrets manager.")).Required().String()
smSecretReadCmd := smSecretCmd.Command("read", msgPrinter.Sprintf("Read the details of a secret stored in the secrets manager. This consists of the key and value pair provided on secret creation."))
smSecretReadName := smSecretReadCmd.Arg("secretName", msgPrinter.Sprintf("The name of the secret to read in the secrets manager.")).Required().String()
versionCmd := app.Command("version", msgPrinter.Sprintf("Show the Horizon version.")) // using a cmd for this instead of --version flag, because kingpin takes over the latter and can't get version only when it is needed
sdoCmd := app.Command("sdo", msgPrinter.Sprintf("List and manage resources in SDO owner services"))
sdoOrg := sdoCmd.Flag("org", msgPrinter.Sprintf("The Horizon organization ID. If not specified, HZN_ORG_ID will be used as a default.")).Short('o').String()
sdoUserPw := sdoCmd.Flag("user-pw", msgPrinter.Sprintf("Horizon Exchange credentials to query secrets manager resources. The default is HZN_EXCHANGE_USER_AUTH environment variable. If you don't prepend it with the user's org, it will automatically be prepended with the value of the HZN_ORG_ID environment variable.")).Short('u').PlaceHolder("USER:PW").String()
sdoKeyCmd := sdoCmd.Command("key", msgPrinter.Sprintf("List and manage Horizon SDO ownership keys."))
sdoKeyListCmd := sdoKeyCmd.Command("list | ls", msgPrinter.Sprintf("List the SDO ownership keys stored in SDO owner services.")).Alias("ls").Alias("list")
sdoKeyToList := sdoKeyListCmd.Arg("keyName", msgPrinter.Sprintf("List the full details of this SDO ownership key.")).String()
sdoKeyCreateCmd := sdoKeyCmd.Command("create | cr", msgPrinter.Sprintf("Create a new key in SDO owner services.")).Alias("cr").Alias("create")
sdoKeyCreateInputFile := sdoKeyCreateCmd.Arg("key-meta-file", msgPrinter.Sprintf("The file containing metadata for the key to be created in SDO owner services. Must be JSON file type extension.")).Required().File()
sdoKeyCreateFile := sdoKeyCreateCmd.Flag("file-path", msgPrinter.Sprintf("The file that the returned public key is written to. If omit, the key will be printed to the console.")).Short('f').String()
sdoKeyCreateOverwrite := sdoKeyCreateCmd.Flag("overwrite", msgPrinter.Sprintf("Overwrite the existing output public key file if it exists.")).Short('O').Bool()
sdoKeyDownloadCmd := sdoKeyCmd.Command("download | dl", msgPrinter.Sprintf("Download the specified key from SDO owner services.")).Alias("dl").Alias("download")
sdoKeyToDownload := sdoKeyDownloadCmd.Arg("keyName", msgPrinter.Sprintf("The name of the key to be downloaded from SDO owner services.")).Required().String()
sdoKeyDownloadFile := sdoKeyDownloadCmd.Flag("file-path", msgPrinter.Sprintf("The file that the data of downloaded key is written to. If omit, the key will be printed to the console.")).Short('f').String()
sdoKeyDownloadOverwrite := sdoKeyDownloadCmd.Flag("overwrite", msgPrinter.Sprintf("Overwrite the existing file if it exists.")).Short('O').Bool()
sdoKeyRemoveCmd := sdoKeyCmd.Command("remove | rm", msgPrinter.Sprintf("Remove a key from SDO owner services.")).Alias("rm").Alias("remove")
sdoKeyToRemove := sdoKeyRemoveCmd.Arg("keyName", msgPrinter.Sprintf("The name of the key to be removed from SDO owner services.")).Required().String()
sdoKeyNewCmd := sdoKeyCmd.Command("new", msgPrinter.Sprintf("Create a new SDO key metadata template file. All fields must be filled before adding to SDO owner services."))
sdoKeyNewFile := sdoKeyNewCmd.Flag("file-path", msgPrinter.Sprintf("The file that the SDO key template will be written to in JSON format. If omit, the key metadata will be printed to the console.")).Short('f').String()
sdoKeyNewOverwrite := sdoKeyNewCmd.Flag("overwrite", msgPrinter.Sprintf("Overwrite the existing file if it exists.")).Short('O').Bool()
sdoVoucherCmd := sdoCmd.Command("voucher", msgPrinter.Sprintf("List and manage Horizon SDO ownership vouchers."))
sdoVoucherListCmd := sdoVoucherCmd.Command("list | ls", msgPrinter.Sprintf("List the imported SDO ownership vouchers.")).Alias("ls").Alias("list")
sdoVoucherToList := sdoVoucherListCmd.Arg("voucher", msgPrinter.Sprintf("List the full details of this SDO ownership voucher.")).String()
sdoVoucherListLong := sdoVoucherListCmd.Flag("long", msgPrinter.Sprintf("When a voucher uuid is specified the full contents of the voucher will be listed, otherwise the full contents of all the imported vouchers will be listed.")).Short('l').Bool()
sdoVoucherInspectCmd := sdoVoucherCmd.Command("inspect | ins", msgPrinter.Sprintf("Display properties of the SDO ownership voucher.")).Alias("ins").Alias("inspect")
sdoVoucherInspectFile := sdoVoucherInspectCmd.Arg("voucher-file", msgPrinter.Sprintf("The SDO ownership voucher file.")).Required().File() // returns the file descriptor
sdoVoucherImportCmd := sdoVoucherCmd.Command("import | imp", msgPrinter.Sprintf("Imports the SDO ownership voucher so that the corresponding device can be booted, configured, and registered.")).Alias("import").Alias("imp")
sdoVoucherImportFile := sdoVoucherImportCmd.Arg("voucher-file", msgPrinter.Sprintf("The SDO ownership voucher file. Must be file type extension: json, tar, tar.gz, tgz, or zip. If it is any of the tar/zip formats, all json files within it will be imported (other files/dirs will be silently ignored).")).Required().File() // returns the file descriptor
sdoVoucherImportExample := sdoVoucherImportCmd.Flag("example", msgPrinter.Sprintf("Automatically create a node policy that will result in the specified example edge service (for example 'helloworld') being deployed to the edge device associated with this voucher. It is mutually exclusive with --policy and -p.")).Short('e').String()
sdoVoucherImportPolicy := sdoVoucherImportCmd.Flag("policy", msgPrinter.Sprintf("The node policy file to use for the edge device associated with this voucher. It is mutually exclusive with -e and -p. A node policy contains the 'deployment' and 'management' attributes. Please use 'hzn policy new' to see the node policy format.")).String()
sdoVoucherImportPattern := sdoVoucherImportCmd.Flag("pattern", msgPrinter.Sprintf("The deployment pattern name to use for the edge device associated with this voucher. If the pattern is from a different organization than the node, use the 'other_org/pattern' format. It is mutually exclusive with -e and --policy.")).Short('p').String()
sdoVoucherDownloadCmd := sdoVoucherCmd.Command("download | dl", msgPrinter.Sprintf("Download the specified SDO ownership voucher from SDO owner services.")).Alias("dl").Alias("download")
sdoVoucherDownloadDevice := sdoVoucherDownloadCmd.Arg("device-id", msgPrinter.Sprintf("The SDO ownership voucher to download.")).Required().String()
sdoVoucherDownloadFile := sdoVoucherDownloadCmd.Flag("file-path", msgPrinter.Sprintf("The file that the data of downloaded voucher is written to in JSON format. This flag must be used with -f. If omit, will use default file name in format of <deviceID>.json and save in current directory.")).Short('f').String()
sdoVoucherDownloadOverwrite := sdoVoucherDownloadCmd.Flag("overwrite", msgPrinter.Sprintf("Overwrite the existing file if it exists.")).Short('O').Bool()
voucherCmd := app.Command("voucher", msgPrinter.Sprintf("(DEPRECATED) This command is deprecated. Please use 'hzn sdo voucher' to list and manage Horizon SDO ownership vouchers."))
voucherListCmd := voucherCmd.Command("list | ls", msgPrinter.Sprintf("(DEPRECATED) This command is deprecated. Please use 'hzn sdo voucher list' to list the imported SDO ownership vouchers.")).Alias("ls").Alias("list")
voucherToList := voucherListCmd.Arg("voucher", msgPrinter.Sprintf("List the full details of this SDO ownership voucher.")).String()
voucherListLong := voucherListCmd.Flag("long", msgPrinter.Sprintf("When a voucher uuid is specified the full contents of the voucher will be listed, otherwise the full contents of all the imported vouchers will be listed.")).Short('l').Bool()
voucherInspectCmd := voucherCmd.Command("inspect | ins", msgPrinter.Sprintf("(DEPRECATED) This command is deprecated. Please use 'hzn sdo voucher inspect' to display properties of the SDO ownership voucher.")).Alias("ins").Alias("inspect")
voucherInspectFile := voucherInspectCmd.Arg("voucher-file", msgPrinter.Sprintf("The SDO ownership voucher file.")).Required().File() // returns the file descriptor
voucherImportCmd := voucherCmd.Command("import | imp", msgPrinter.Sprintf("(DEPRECATED) This command is deprecated. Please use 'hzn sdo voucher import' to import the SDO ownership voucher")).Alias("import").Alias("imp")
voucherImportFile := voucherImportCmd.Arg("voucher-file", msgPrinter.Sprintf("The SDO ownership voucher file. Must be file type extension: json, tar, tar.gz, tgz, or zip. If it is any of the tar/zip formats, all json files within it will be imported (other files/dirs will be silently ignored).")).Required().File() // returns the file descriptor
voucherOrg := voucherImportCmd.Flag("org", msgPrinter.Sprintf("The Horizon organization ID. If not specified, HZN_ORG_ID will be used as a default.")).Short('o').String()
voucherUserPw := voucherImportCmd.Flag("user-pw", msgPrinter.Sprintf("Horizon user credentials to import a voucher. If not specified, HZN_EXCHANGE_USER_AUTH will be used as a default. If you don't prepend it with the user's org, it will automatically be prepended with the -o value.")).Short('u').PlaceHolder("USER:PW").String()
voucherImportExample := voucherImportCmd.Flag("example", msgPrinter.Sprintf("Automatically create a node policy that will result in the specified example edge service (for example 'helloworld') being deployed to the edge device associated with this voucher. It is mutually exclusive with --policy and -p.")).Short('e').String()
voucherImportPolicy := voucherImportCmd.Flag("policy", msgPrinter.Sprintf("The node policy file to use for the edge device associated with this voucher. It is mutually exclusive with -e and -p.")).String()
voucherImportPattern := voucherImportCmd.Flag("pattern", msgPrinter.Sprintf("The deployment pattern name to use for the edge device associated with this voucher. If the pattern is from a different organization than the node, use the 'other_org/pattern' format. It is mutually exclusive with -e and --policy.")).Short('p').String()
app.VersionFlag = nil
/* trying to override the base --version behavior does not work....
fmt.Printf("version: %v\n", *version)
if *version {
node.Version()
os.Exit(0)
}
*/
// Parse cmd and apply env var defaults
fullCmd := kingpin.MustParse(app.Parse(os.Args[1:]))
//cliutils.Verbose("Full command: %s", fullCmd)
// setup the environment variables from the project config file
project_dir := ""
if strings.HasPrefix(fullCmd, "dev ") {
project_dir = *devHomeDirectory
}
cliconfig.SetEnvVarsFromProjectConfigFile(project_dir)
credToUse := ""
if strings.HasPrefix(fullCmd, "exchange ") {
exOrg = cliutils.WithDefaultEnvVar(exOrg, "HZN_ORG_ID")
// Allow undefined org for 'exchange org' commands and 'new' commands
if *exOrg == "" && !(strings.HasPrefix(fullCmd, "exchange | ex org") ||
strings.HasPrefix(fullCmd, "exchange | ex deployment | dep new") ||
strings.HasPrefix(fullCmd, "exchange | ex service | serv newpolicy | newp") ||
strings.HasPrefix(fullCmd, "exchange | ex nmp new")) {
cliutils.Fatal(cliutils.CLI_INPUT_ERROR, msgPrinter.Sprintf("organization ID must be specified with either the -o flag or HZN_ORG_ID"))
}
// some hzn exchange commands can take either -u user:pw or -n nodeid:token as credentials.
switch subCmd := strings.TrimPrefix(fullCmd, "exchange | ex "); subCmd {
case "nmp add":
credToUse = cliutils.GetExchangeAuth(*exUserPw, *exNMPAddIdTok)
case "nmp list | ls":
credToUse = cliutils.GetExchangeAuth(*exUserPw, *exNMPListIdTok)
case "nmp new":
// does not require exchange credentials
case "nmp remove | rm":
credToUse = cliutils.GetExchangeAuth(*exUserPw, *exNMPRemoveIdTok)
// case "nmp status":
// credToUse = cliutils.GetExchangeAuth(*exUserPw, *exNMPStatusListIdTok)
case "node list | ls":
credToUse = cliutils.GetExchangeAuth(*exUserPw, *exNodeListNodeIdTok)
case "node update | up":
credToUse = cliutils.GetExchangeAuth(*exUserPw, *exNodeUpdateIdTok)
case "node settoken":
credToUse = cliutils.GetExchangeAuth(*exUserPw, *exNodeSetTokNodeIdTok)
case "node remove | rm":
credToUse = cliutils.GetExchangeAuth(*exUserPw, *exNodeRemoveNodeIdTok)
case "node confirm | con":
//do nothing because it uses the node id and token given in the argument as the credential
case "node listpolicy | lsp":
credToUse = cliutils.GetExchangeAuth(*exUserPw, *exNodeListPolicyIdTok)
case "node addpolicy | addp":
credToUse = cliutils.GetExchangeAuth(*exUserPw, *exNodeAddPolicyIdTok)
case "node updatepolicy | upp":
credToUse = cliutils.GetExchangeAuth(*exUserPw, *exNodeUpdatePolicyIdTok)
case "node removepolicy | rmp":
credToUse = cliutils.GetExchangeAuth(*exUserPw, *exNodeRemovePolicyIdTok)
case "node listerrors | lse":
credToUse = cliutils.GetExchangeAuth(*exUserPw, *exNodeErrorsListIdTok)
case "node liststatus | lst":
credToUse = cliutils.GetExchangeAuth(*exUserPw, *exNodeStatusIdTok)
case "node management | mgmt list | ls":
credToUse = cliutils.GetExchangeAuth(*exUserPw, *exNodeManagementListNodeIdTok)
case "service | serv list | ls":
credToUse = cliutils.GetExchangeAuth(*exUserPw, *exServiceListNodeIdTok)
case "service | serv verify | vf":
credToUse = cliutils.GetExchangeAuth(*exUserPw, *exServiceVerifyNodeIdTok)
case "service | serv listkey | lsk":
credToUse = cliutils.GetExchangeAuth(*exUserPw, *exServiceListKeyNodeIdTok)
case "service | serv listauth | lsau":
credToUse = cliutils.GetExchangeAuth(*exUserPw, *exServiceListAuthNodeIdTok)
case "pattern | pat list | ls":
credToUse = cliutils.GetExchangeAuth(*exUserPw, *exPatternListNodeIdTok)
case "pattern | pat update | up":
credToUse = cliutils.GetExchangeAuth(*exUserPw, *exPatUpdateNodeIdTok)
case "pattern | pat verify | vf":
credToUse = cliutils.GetExchangeAuth(*exUserPw, *exPatternVerifyNodeIdTok)
case "pattern | pat listkey | lsk":
credToUse = cliutils.GetExchangeAuth(*exUserPw, *exPatternListKeyNodeIdTok)
case "service | serv listpolicy | lsp":
credToUse = cliutils.GetExchangeAuth(*exUserPw, *exServiceListPolicyIdTok)
case "service | serv addpolicy | addp":
credToUse = cliutils.GetExchangeAuth(*exUserPw, *exServiceAddPolicyIdTok)
case "service | serv removepolicy | rmp":
credToUse = cliutils.GetExchangeAuth(*exUserPw, *exServiceRemovePolicyIdTok)
case "service | serv newpolicy | newp":
// does not require exchange credentials
case "deployment | dep listpolicy | ls":
credToUse = cliutils.GetExchangeAuth(*exUserPw, *exBusinessListPolicyIdTok)
case "deployment | dep updatepolicy | upp":
credToUse = cliutils.GetExchangeAuth(*exUserPw, *exBusinessUpdatePolicyIdTok)
case "deployment | dep addpolicy | addp":
credToUse = cliutils.GetExchangeAuth(*exUserPw, *exBusinessAddPolicyIdTok)
case "deployment | dep removepolicy | rmp":
credToUse = cliutils.GetExchangeAuth(*exUserPw, *exBusinessRemovePolicyIdTok)
case "deployment | dep new":
// does not require exchange credentials
case "version":
credToUse = cliutils.GetExchangeAuthVersion(*exUserPw)
default:
// get HZN_EXCHANGE_USER_AUTH as default if exUserPw is empty
exUserPw = cliutils.RequiredWithDefaultEnvVar(exUserPw, "HZN_EXCHANGE_USER_AUTH", msgPrinter.Sprintf("exchange user authentication must be specified with either the -u flag or HZN_EXCHANGE_USER_AUTH"))
}
if exVersion := exchange.LoadExchangeVersion(false, *exOrg, credToUse, *exUserPw); exVersion != "" {
if err := version.VerifyExchangeVersion1(exVersion, false); err != nil {
cliutils.Fatal(cliutils.CLI_GENERAL_ERROR, err.Error())
}
}
}
if strings.HasPrefix(fullCmd, "register") {
// use HZN_EXCHANGE_USER_AUTH for -u
userPw = cliutils.WithDefaultEnvVar(userPw, "HZN_EXCHANGE_USER_AUTH")
// use HZN_EXCHANGE_NODE_AUTH for -n and trim the org
nodeIdTok = cliutils.WithDefaultEnvVar(nodeIdTok, "HZN_EXCHANGE_NODE_AUTH")
// use HZN_ORG_ID or org provided by -o for version check
verCheckOrg := cliutils.WithDefaultEnvVar(org, "HZN_ORG_ID")
if exVersion := exchange.LoadExchangeVersion(false, *verCheckOrg, *userPw, *nodeIdTok); exVersion != "" {
if err := version.VerifyExchangeVersion1(exVersion, false); err != nil {
cliutils.Fatal(cliutils.CLI_GENERAL_ERROR, err.Error())
}
}
}
if strings.HasPrefix(fullCmd, "deploycheck") {
deploycheckOrg = cliutils.WithDefaultEnvVar(deploycheckOrg, "HZN_ORG_ID")
deploycheckUserPw = cliutils.WithDefaultEnvVar(deploycheckUserPw, "HZN_EXCHANGE_USER_AUTH")
if *policyCompBPolId == "" {
policyCompBPolId = policyCompDepPolId
}
if *policyCompBPolFile == "" {
policyCompBPolFile = policyCompDepPolFile
}
if *userinputCompBPolId == "" {
userinputCompBPolId = userinputCompDepPolId
}
if *userinputCompBPolFile == "" {
userinputCompBPolFile = userinputCompDepPolFile
}
if *allCompBPolId == "" {
allCompBPolId = allCompDepPolId
}
if *allCompBPolFile == "" {
allCompBPolFile = allCompDepPolFile
}
if exVersion := exchange.LoadExchangeVersion(false, *deploycheckOrg, *deploycheckUserPw); exVersion != "" {
if err := version.VerifyExchangeVersion1(exVersion, false); err != nil {
cliutils.Fatal(cliutils.CLI_GENERAL_ERROR, err.Error())
}
}
}
// For the mms command family, make sure that org and exchange credentials are specified in some way.
if strings.HasPrefix(fullCmd, "mms") {
if !(strings.HasPrefix(fullCmd, "mms object | obj new")) {
mmsOrg = cliutils.RequiredWithDefaultEnvVar(mmsOrg, "HZN_ORG_ID", msgPrinter.Sprintf("organization ID must be specified with either the -o flag or HZN_ORG_ID"))
mmsUserPw = cliutils.RequiredWithDefaultEnvVar(mmsUserPw, "HZN_EXCHANGE_USER_AUTH", msgPrinter.Sprintf("exchange user authentication must be specified with either the -u flag or HZN_EXCHANGE_USER_AUTH"))
}
if *mmsObjectListId == "" {
mmsObjectListId = mmsObjectListObjId
}
if *mmsObjectListType == "" {
mmsObjectListType = mmsObjectListObjType
}
}
// For the sdo command family, make sure that org and exchange credentials are specified in some way.
if strings.HasPrefix(fullCmd, "sdo voucher") && !strings.HasPrefix(fullCmd, "sdo voucher inspect") {
sdoOrg = cliutils.RequiredWithDefaultEnvVar(sdoOrg, "HZN_ORG_ID", msgPrinter.Sprintf("organization ID must be specified with either the -o flag or HZN_ORG_ID"))
sdoUserPw = cliutils.RequiredWithDefaultEnvVar(sdoUserPw, "HZN_EXCHANGE_USER_AUTH", msgPrinter.Sprintf("exchange user authentication must be specified with either the -u flag or HZN_EXCHANGE_USER_AUTH"))
}
if strings.HasPrefix(fullCmd, "sdo key") && !strings.HasPrefix(fullCmd, "sdo key new") {
sdoOrg = cliutils.RequiredWithDefaultEnvVar(sdoOrg, "HZN_ORG_ID", msgPrinter.Sprintf("organization ID must be specified with either the -o flag or HZN_ORG_ID"))
sdoUserPw = cliutils.RequiredWithDefaultEnvVar(sdoUserPw, "HZN_EXCHANGE_USER_AUTH", msgPrinter.Sprintf("exchange user authentication must be specified with either the -u flag or HZN_EXCHANGE_USER_AUTH"))
}
// DEPRECATED
// For the voucher import command family, make sure that org and exchange credentials are specified in some way.
if strings.HasPrefix(fullCmd, "voucher import") {
voucherOrg = cliutils.RequiredWithDefaultEnvVar(voucherOrg, "HZN_ORG_ID", msgPrinter.Sprintf("organization ID must be specified with either the -o flag or HZN_ORG_ID"))
voucherUserPw = cliutils.RequiredWithDefaultEnvVar(voucherUserPw, "HZN_EXCHANGE_USER_AUTH", msgPrinter.Sprintf("exchange user authentication must be specified with either the -u flag or HZN_EXCHANGE_USER_AUTH"))
}
if strings.HasPrefix(fullCmd, "voucher list") {
voucherOrg = cliutils.RequiredWithDefaultEnvVar(voucherOrg, "HZN_ORG_ID", msgPrinter.Sprintf("organization ID must be specified with either the -o flag or HZN_ORG_ID"))
voucherUserPw = cliutils.RequiredWithDefaultEnvVar(voucherUserPw, "HZN_EXCHANGE_USER_AUTH", msgPrinter.Sprintf("exchange user authentication must be specified with either the -u flag or HZN_EXCHANGE_USER_AUTH"))
}
// For the secret manager command family, make sure that org is specified in some way.
if strings.HasPrefix(fullCmd, "secretsmanager") {
smOrg = cliutils.RequiredWithDefaultEnvVar(smOrg, "HZN_ORG_ID", msgPrinter.Sprintf("organization ID must be specified with either the -o flag or HZN_ORG_ID"))
smUserPw = cliutils.RequiredWithDefaultEnvVar(smUserPw, "HZN_EXCHANGE_USER_AUTH", msgPrinter.Sprintf("exchange user authentication must be specified with either the -u flag or HZN_EXCHANGE_USER_AUTH"))
}
// key file defaults
switch fullCmd {
case "key create":
if *keyOutputDir == "" {
keyCreatePrivKey = cliutils.WithDefaultEnvVar(keyCreatePrivKey, "HZN_PRIVATE_KEY_FILE")
keyCreatePubKey = cliutils.WithDefaultEnvVar(keyCreatePubKey, "HZN_PUBLIC_KEY_FILE")
}
case "exchange | ex pattern | pat verify":
exPatPubKeyFile = cliutils.WithDefaultEnvVar(exPatPubKeyFile, "HZN_PUBLIC_KEY_FILE")
case "exchange | ex service | serv verify":
exSvcPubKeyFile = cliutils.WithDefaultEnvVar(exSvcPubKeyFile, "HZN_PUBLIC_KEY_FILE")
case "key import":
keyImportPubKeyFile = cliutils.WithDefaultEnvVar(keyImportPubKeyFile, "HZN_PUBLIC_KEY_FILE")
}
// set env variable ARCH if it is not set
cliutils.SetDefaultArch()
// Decide which command to run
switch fullCmd {
case envCmd.FullCommand():
envOrg := os.Getenv("HZN_ORG_ID")
envUserPw := os.Getenv("HZN_EXCHANGE_USER_AUTH")
envExchUrl := cliutils.GetExchangeUrl()
envCcsUrl := cliutils.GetMMSUrl()
envAgbotUrl := cliutils.GetAgbotSecureAPIUrlBase()
node.Env(envOrg, envUserPw, envExchUrl, envCcsUrl, envAgbotUrl)
case versionCmd.FullCommand():
node.Version()
case archCmd.FullCommand():
node.Architecture()
case exVersionCmd.FullCommand():
exchange.Version(*exOrg, credToUse)
case exStatusCmd.FullCommand():
exchange.Status(*exOrg, *exUserPw)
case exOrgListCmd.FullCommand():
exchange.OrgList(*exOrg, *exUserPw, *exOrgListOrg, *exOrgListLong)
case exOrgCreateCmd.FullCommand():
exchange.OrgCreate(*exOrg, *exUserPw, *exOrgCreateOrg, *exOrgCreateLabel, *exOrgCreateDesc, *exOrgCreateTags, *exOrgCreateHBMin, *exOrgCreateHBMax, *exOrgCreateHBAdjust, *exOrgCreateMaxNodes, *exOrgCreateAddToAgbot)
case exOrgUpdateCmd.FullCommand():
exchange.OrgUpdate(*exOrg, *exUserPw, *exOrgUpdateOrg, *exOrgUpdateLabel, *exOrgUpdateDesc, *exOrgUpdateTags, *exOrgUpdateHBMin, *exOrgUpdateHBMax, *exOrgUpdateHBAdjust, *exOrgUpdateMaxNodes)
case exOrgDelCmd.FullCommand():
exchange.OrgDel(*exOrg, *exUserPw, *exOrgDelOrg, *exOrgDelFromAgbot, *exOrgDelForce)
case exUserListCmd.FullCommand():
exchange.UserList(*exOrg, *exUserPw, *exUserListUser, *exUserListAll, *exUserListNamesOnly)
case exUserCreateCmd.FullCommand():
exchange.UserCreate(*exOrg, *exUserPw, *exUserCreateUser, *exUserCreatePw, *exUserCreateEmail, *exUserCreateIsAdmin, *exUserCreateIsHubAdmin)
case exUserSetAdminCmd.FullCommand():
exchange.UserSetAdmin(*exOrg, *exUserPw, *exUserSetAdminUser, *exUserSetAdminBool)
case exUserDelCmd.FullCommand():
exchange.UserRemove(*exOrg, *exUserPw, *exDelUser, *exUserDelForce)
case exNMPListCmd.FullCommand():
exchange.NMPList(*exOrg, credToUse, *exNMPListName, !*exNMPListLong, *exNMPListNodes)
case exNMPAddCmd.FullCommand():
exchange.NMPAdd(*exOrg, credToUse, *exNMPAddName, *exNMPAddJsonFile, *exNMPAddAppliesTo, *exNMPAddNoConstraint)
case exNMPNewCmd.FullCommand():
exchange.NMPNew()
case exNMPRemoveCmd.FullCommand():
exchange.NMPRemove(*exOrg, credToUse, *exNMPRemoveName, *exNMPRemoveForce)
// case exNMPStatusCmd.FullCommand():
// exchange.NMPStatus(*exOrg, credToUse, *exNMPStatusListName)
case exNodeListCmd.FullCommand():
exchange.NodeList(*exOrg, credToUse, *exNode, !*exNodeLong)
case exNodeUpdateCmd.FullCommand():
exchange.NodeUpdate(*exOrg, credToUse, *exNodeUpdateNode, *exNodeUpdateJsonFile)
case exNodeCreateCmd.FullCommand():
exchange.NodeCreate(*exOrg, *exNodeCreateNodeIdTok, *exNodeCreateNode, *exNodeCreateToken, *exUserPw, *exNodeCreateNodeArch, *exNodeCreateNodeName, *exNodeCreateNodeType, true)
case exNodeSetTokCmd.FullCommand():
exchange.NodeSetToken(*exOrg, credToUse, *exNodeSetTokNode, *exNodeSetTokToken)
case exNodeConfirmCmd.FullCommand():
exchange.NodeConfirm(*exOrg, *exNodeConfirmNode, *exNodeConfirmToken, *exNodeConfirmNodeIdTok)
case exNodeDelCmd.FullCommand():
exchange.NodeRemove(*exOrg, credToUse, *exDelNode, *exNodeDelForce)
case exNodeListPolicyCmd.FullCommand():
exchange.NodeListPolicy(*exOrg, credToUse, *exNodeListPolicyNode)
case exNodeAddPolicyCmd.FullCommand():
exchange.NodeAddPolicy(*exOrg, credToUse, *exNodeAddPolicyNode, *exNodeAddPolicyJsonFile)
case exNodeUpdatePolicyCmd.FullCommand():
exchange.NodeUpdatePolicy(*exOrg, credToUse, *exNodeUpdatePolicyNode, *exNodeUpdatePolicyJsonFile)
case exNodeRemovePolicyCmd.FullCommand():
exchange.NodeRemovePolicy(*exOrg, credToUse, *exNodeRemovePolicyNode, *exNodeRemovePolicyForce)
case exNodeErrorsList.FullCommand():
exchange.NodeListErrors(*exOrg, credToUse, *exNodeErrorsListNode, *exNodeErrorsListLong)
case exNodeStatusList.FullCommand():
exchange.NodeListStatus(*exOrg, credToUse, *exNodeStatusListNode)
case exNodeManagementListCmd.FullCommand():
exchange.NodeManagementList(*exOrg, credToUse, *exNodeManagementListName, *exNodManagementListNMPsAll)
case agbotCacheServedOrgList.FullCommand():
agreementbot.GetServedOrgs()
case agbotCachePatternList.FullCommand():
agreementbot.GetPatterns(*agbotCachePatternListOrg, *agbotCachePatternListName, *agbotCachePatternListLong)
case agbotCacheDeployPolList.FullCommand():
agreementbot.GetPolicies(*agbotCacheDeployPolListOrg, *agbotCacheDeployPolListName, *agbotCacheDeployPolListLong)
case exAgbotListCmd.FullCommand():
exchange.AgbotList(*exOrg, *exUserPw, *exAgbot, !*exAgbotLong)
case exAgbotListPatsCmd.FullCommand():
exchange.AgbotListPatterns(*exOrg, *exUserPw, *exAgbotLP, *exAgbotLPPatOrg, *exAgbotLPPat, *exAgbotLPNodeOrg)
case exAgbotAddPatCmd.FullCommand():
exchange.AgbotAddPattern(*exOrg, *exUserPw, *exAgbotAP, *exAgbotAPPatOrg, *exAgbotAPPat, *exAgbotAPNodeOrg)
case exAgbotDelPatCmd.FullCommand():
exchange.AgbotRemovePattern(*exOrg, *exUserPw, *exAgbotDP, *exAgbotDPPatOrg, *exAgbotDPPat, *exAgbotDPNodeOrg)
case exAgbotListPolicyCmd.FullCommand():
exchange.AgbotListBusinessPolicy(*exOrg, *exUserPw, *exAgbotPol)
case exAgbotAddPolCmd.FullCommand():
exchange.AgbotAddBusinessPolicy(*exOrg, *exUserPw, *exAgbotAPolAg, *exAgbotAPPolOrg)
case exAgbotDelPolCmd.FullCommand():
exchange.AgbotRemoveBusinessPolicy(*exOrg, *exUserPw, *exAgbotDPolAg, *exAgbotDPPolOrg)
case exPatternListCmd.FullCommand():
exchange.PatternList(*exOrg, credToUse, *exPattern, !*exPatternLong)
case exPatternPublishCmd.FullCommand():
exchange.PatternPublish(*exOrg, *exUserPw, *exPatJsonFile, *exPatKeyFile, *exPatPubPubKeyFile, *exPatName)
case exPatternVerifyCmd.FullCommand():
exchange.PatternVerify(*exOrg, credToUse, *exVerPattern, *exPatPubKeyFile)
case exPatDelCmd.FullCommand():
exchange.PatternRemove(*exOrg, *exUserPw, *exDelPat, *exPatDelForce)
case exPatternListKeyCmd.FullCommand():
exchange.PatternListKey(*exOrg, credToUse, *exPatListKeyPat, *exPatListKeyKey)
case exPatUpdateCmd.FullCommand():
exchange.PatternUpdate(*exOrg, credToUse, *exPatUpdatePattern, *exPatUpdateJsonFile)
case exPatternRemKeyCmd.FullCommand():
exchange.PatternRemoveKey(*exOrg, *exUserPw, *exPatRemKeyPat, *exPatRemKeyKey)
case exServiceListCmd.FullCommand():
exchange.ServiceList(*exOrg, credToUse, *exService, !*exServiceLong, *exSvcOpYamlFilePath, *exSvcOpYamlForce)
case exServicePublishCmd.FullCommand():
exchange.ServicePublish(*exOrg, *exUserPw, *exSvcJsonFile, *exSvcPrivKeyFile, *exSvcPubPubKeyFile, *exSvcPubDontTouchImage, *exSvcPubPullImage, *exSvcRegistryTokens, *exSvcOverwrite, *exSvcPolicyFile, *exSvcPublic)
case exServiceVerifyCmd.FullCommand():
exchange.ServiceVerify(*exOrg, credToUse, *exVerService, *exSvcPubKeyFile)
case exSvcDelCmd.FullCommand():
exchange.ServiceRemove(*exOrg, *exUserPw, *exDelSvc, *exSvcDelForce)
case exServiceListKeyCmd.FullCommand():
exchange.ServiceListKey(*exOrg, credToUse, *exSvcListKeySvc, *exSvcListKeyKey)
case exServiceRemKeyCmd.FullCommand():
exchange.ServiceRemoveKey(*exOrg, *exUserPw, *exSvcRemKeySvc, *exSvcRemKeyKey)
case exServiceListAuthCmd.FullCommand():
exchange.ServiceListAuth(*exOrg, credToUse, *exSvcListAuthSvc, *exSvcListAuthId)
case exServiceRemAuthCmd.FullCommand():
exchange.ServiceRemoveAuth(*exOrg, *exUserPw, *exSvcRemAuthSvc, *exSvcRemAuthId)
case exServiceListPolicyCmd.FullCommand():
exchange.ServiceListPolicy(*exOrg, credToUse, *exServiceListPolicyService)
case exServiceNewPolicyCmd.FullCommand():
exchange.ServiceNewPolicy()
case exServiceAddPolicyCmd.FullCommand():
exchange.ServiceAddPolicy(*exOrg, credToUse, *exServiceAddPolicyService, *exServiceAddPolicyJsonFile)
case exServiceRemovePolicyCmd.FullCommand():
exchange.ServiceRemovePolicy(*exOrg, credToUse, *exServiceRemovePolicyService, *exServiceRemovePolicyForce)
case exServiceListnode.FullCommand():
exchange.ListServiceNodes(*exOrg, *exUserPw, *exServiceListnodeService, *exServiceListnodeNodeOrg)
case exBusinessListPolicyCmd.FullCommand():
exchange.BusinessListPolicy(*exOrg, credToUse, *exBusinessListPolicyPolicy, !*exBusinessListPolicyLong)
case exBusinessNewPolicyCmd.FullCommand():
exchange.BusinessNewPolicy()
case exBusinessAddPolicyCmd.FullCommand():
exchange.BusinessAddPolicy(*exOrg, credToUse, *exBusinessAddPolicyPolicy, *exBusinessAddPolicyJsonFile, *exBusinessAddPolNoConstraint)
case exBusinessUpdatePolicyCmd.FullCommand():
exchange.BusinessUpdatePolicy(*exOrg, credToUse, *exBusinessUpdatePolicyPolicy, *exBusinessUpdatePolicyJsonFile)
case exBusinessRemovePolicyCmd.FullCommand():
exchange.BusinessRemovePolicy(*exOrg, credToUse, *exBusinessRemovePolicyPolicy, *exBusinessRemovePolicyForce)
case exCatalogServiceListCmd.FullCommand():
exchange.CatalogServiceList(*exOrg, *exUserPw, *exCatalogServiceListShort, *exCatalogServiceListLong)
case exCatalogPatternListCmd.FullCommand():
exchange.CatalogPatternList(*exOrg, *exUserPw, *exCatalogPatternListShort, *exCatalogPatternListLong)
case regInputCmd.FullCommand():
register.CreateInputFile(*regInputOrg, *regInputPattern, *regInputArch, *regInputNodeIdTok, *regInputInputFile)
case registerCmd.FullCommand():
register.DoIt(*org, *pattern, *nodeIdTok, *userPw, *inputFile, *nodeOrgFlag, *patternFlag, *nodeName, *nodepolicyFlag, *waitServiceFlag, *waitServiceOrgFlag, *waitTimeoutFlag)
case keyListCmd.FullCommand():
key.List(*keyName, *keyListAll)
case keyCreateCmd.FullCommand():
key.Create(*keyX509Org, *keyX509CN, *keyOutputDir, *keyLength, *keyDaysValid, *keyImportFlag, *keyCreatePrivKey, *keyCreatePubKey, *keyCreateOverwrite)
case keyImportCmd.FullCommand():
key.Import(*keyImportPubKeyFile)
case keyDelCmd.FullCommand():
key.Remove(*keyDelName)
case nodeListCmd.FullCommand():
node.List()
case policyListCmd.FullCommand():
policy.List()
case policyNewCmd.FullCommand():
policy.New()
case policyUpdateCmd.FullCommand():
policy.Update(*policyUpdateInputFile)
case policyPatchCmd.FullCommand():
policy.Patch(*policyPatchInput)
case policyRemoveCmd.FullCommand():
policy.Remove(*policyRemoveForce)
case policyCompCmd.FullCommand():
deploycheck.PolicyCompatible(*deploycheckOrg, *deploycheckUserPw, *policyCompNodeId, *policyCompNodeArch, *policyCompNodeType, *policyCompNodePolFile, *policyCompBPolId, *policyCompBPolFile, *policyCompSPolFile, *policyCompSvcFile, *deploycheckCheckAll, *deploycheckLong)
case userinputCompCmd.FullCommand():
deploycheck.UserInputCompatible(*deploycheckOrg, *deploycheckUserPw, *userinputCompNodeId, *userinputCompNodeArch, *userinputCompNodeType, *userinputCompNodeUIFile, *userinputCompBPolId, *userinputCompBPolFile, *userinputCompPatternId, *userinputCompPatternFile, *userinputCompSvcFile, *deploycheckCheckAll, *deploycheckLong)
case secretCompCmd.FullCommand():
deploycheck.SecretBindingCompatible(*deploycheckOrg, *deploycheckUserPw, *secretCompNodeId, *secretCompNodeArch, *secretCompNodeType, *secretCompNodeOrg, *secretCompDepPolId, *secretCompDepPolFile, *secretCompPatternId, *secretCompPatternFile, *secretCompSvcFile, *deploycheckCheckAll, *deploycheckLong)
case allCompCmd.FullCommand():
deploycheck.AllCompatible(*deploycheckOrg, *deploycheckUserPw, *allCompNodeId, *allCompNodeArch, *allCompNodeType, *allCompNodeOrg, *allCompNodePolFile, *allCompNodeUIFile, *allCompBPolId, *allCompBPolFile, *allCompPatternId, *allCompPatternFile, *allCompSPolFile, *allCompSvcFile, *deploycheckCheckAll, *deploycheckLong)
case agreementListCmd.FullCommand():
agreement.List(*listArchivedAgreements, *listAgreementId)
case agreementCancelCmd.FullCommand():
agreement.Cancel(*cancelAgreementId, *cancelAllAgreements)
case meteringListCmd.FullCommand():
metering.List(*listArchivedMetering)
case attributeListCmd.FullCommand():
attribute.List()
case userinputListCmd.FullCommand():
userinput.List()
case userinputNewCmd.FullCommand():
userinput.New()
case userinputAddCmd.FullCommand():
userinput.Add(*userinputAddFilePath)
case userinputUpdateCmd.FullCommand():
userinput.Update(*userinputUpdateFilePath)
case userinputRemoveCmd.FullCommand():
userinput.Remove(*userinputRemoveForce)
case serviceListCmd.FullCommand():
service.List()
case serviceLogCmd.FullCommand():
service.Log(*logServiceName, *logServiceVersion, *logServiceContainerName, *logTail)
case serviceRegisteredCmd.FullCommand():
service.Registered()
case serviceConfigStateListCmd.FullCommand():
service.ListConfigState()
case serviceConfigStateSuspendCmd.FullCommand():
service.Suspend(*forceSuspendService, *suspendAllServices, *suspendServiceOrg, *suspendServiceName, *suspendServiceVersion)
case serviceConfigStateActiveCmd.FullCommand():
service.Resume(*resumeAllServices, *resumeServiceOrg, *resumeServiceName, *resumeServiceVersion)
case unregisterCmd.FullCommand():
unregister.DoIt(*forceUnregister, *removeNodeUnregister, *deepCleanUnregister, *timeoutUnregister, *containerUnregister)
case statusCmd.FullCommand():
status.DisplayStatus(*statusLong, false)
case eventlogListCmd.FullCommand():
eventlog.List(*listAllEventlogs, *listDetailedEventlogs, *listSelectedEventlogs, *listTail)
case surfaceErrorsEventlogs.FullCommand():
eventlog.ListSurfaced(*surfaceErrorsEventlogsLong)
case devServiceNewCmd.FullCommand():
dev.ServiceNew(*devHomeDirectory, *devServiceNewCmdOrg, *devServiceNewCmdName, *devServiceNewCmdVer, *devServiceNewCmdImage, *devServiceNewCmdNoImageGen, *devServiceNewCmdCfg, *devServiceNewCmdNoPattern, *devServiceNewCmdNoPolicy)
case devServiceStartTestCmd.FullCommand():
dev.ServiceStartTest(*devHomeDirectory, *devServiceUserInputFile, *devServiceConfigFile, *devServiceConfigType, *devServiceNoFSS, *devServiceStartCmdUserPw, *devServiceStartSecretsFiles)
case devServiceStopTestCmd.FullCommand():
dev.ServiceStopTest(*devHomeDirectory)
case devServiceValidateCmd.FullCommand():
dev.ServiceValidate(*devHomeDirectory, *devServiceVerifyUserInputFile, []string{}, "", *devServiceValidateCmdUserPw)
case devServiceLogCmd.FullCommand():
dev.ServiceLog(*devHomeDirectory, *devServiceLogCmdServiceName, *devServiceLogCmdContainerName, *devServiceLogCmdTail)
case devDependencyFetchCmd.FullCommand():
dev.DependencyFetch(*devHomeDirectory, *devDependencyFetchCmdProject, *devDependencyCmdSpecRef, *devDependencyCmdURL, *devDependencyCmdOrg, *devDependencyCmdVersion, *devDependencyCmdArch, *devDependencyFetchCmdUserPw, *devDependencyFetchCmdUserInputFile)
case devDependencyListCmd.FullCommand():
dev.DependencyList(*devHomeDirectory)
case devDependencyRemoveCmd.FullCommand():
dev.DependencyRemove(*devHomeDirectory, *devDependencyCmdSpecRef, *devDependencyCmdURL, *devDependencyCmdVersion, *devDependencyCmdArch, *devDependencyCmdOrg)
case agbotAgreementListCmd.FullCommand():
agreementbot.AgreementList(*agbotlistArchivedAgreements, *agbotAgreement)
case agbotAgreementCancelCmd.FullCommand():
agreementbot.AgreementCancel(*agbotCancelAgreementId, *agbotCancelAllAgreements)
case agbotListCmd.FullCommand():
agreementbot.List()
case agbotPolicyListCmd.FullCommand():
agreementbot.PolicyList(*agbotPolicyOrg, *agbotPolicyName)
case utilSignCmd.FullCommand():
utilcmds.Sign(*utilSignPrivKeyFile)
case utilVerifyCmd.FullCommand():
utilcmds.Verify(*utilVerifyPubKeyFile, *utilVerifySig)
case agbotStatusCmd.FullCommand():
status.DisplayStatus(*agbotStatusLong, true)
case utilConfigConvCmd.FullCommand():
utilcmds.ConvertConfig(*utilConfigConvFile)
case mmsStatusCmd.FullCommand():
sync_service.Status(*mmsOrg, *mmsUserPw)
case mmsObjectListCmd.FullCommand():
sync_service.ObjectList(*mmsOrg, *mmsUserPw, *mmsObjectListType, *mmsObjectListId, *mmsObjectListDestinationPolicy, *mmsObjectListDPService, *mmsObjectListDPProperty, *mmsObjectListDPUpdateTime, *mmsObjectListDestinationType, *mmsObjectListDestinationId, *mmsObjectListWithData, *mmsObjectListExpirationTime, *mmsObjectListDeleted, *mmsObjectListLong, *mmsObjectListDetail)
case mmsObjectNewCmd.FullCommand():
sync_service.ObjectNew(*mmsOrg)
case mmsObjectPublishCmd.FullCommand():
sync_service.ObjectPublish(*mmsOrg, *mmsUserPw, *mmsObjectPublishType, *mmsObjectPublishId, *mmsObjectPublishPat, *mmsObjectPublishDef, *mmsObjectPublishObj, *mmsObjectPublishNoChunkUpload, *mmsObjectPublishChunkUploadDataSize, *mmsObjectPublishSkipIntegrityCheck, *mmsObjectPublishDSHashAlgo, *mmsObjectPublishDSHash, *mmsObjectPublishPrivKeyFile)
case mmsObjectDeleteCmd.FullCommand():
sync_service.ObjectDelete(*mmsOrg, *mmsUserPw, *mmsObjectDeleteType, *mmsObjectDeleteId)
case mmsObjectDownloadCmd.FullCommand():
sync_service.ObjectDownLoad(*mmsOrg, *mmsUserPw, *mmsObjectDownloadType, *mmsObjectDownloadId, *mmsObjectDownloadFile, *mmsObjectDownloadOverwrite, *mmsObjectDownloadSkipIntegrityCheck)
// DEPRECATED (voucherInspectCmd, voucherImportCmd, voucherListCmd are deprecated commands)
case voucherInspectCmd.FullCommand():
sdo.DeprecatedVoucherInspect(*voucherInspectFile)
case voucherImportCmd.FullCommand():
sdo.DeprecatedVoucherImport(*voucherOrg, *voucherUserPw, *voucherImportFile, *voucherImportExample, *voucherImportPolicy, *voucherImportPattern)
case voucherListCmd.FullCommand():
sdo.DeprecatedVoucherList(*voucherOrg, *voucherUserPw, *voucherToList, !*voucherListLong)
case sdoKeyCreateCmd.FullCommand():
sdo.KeyCreate(*sdoOrg, *sdoUserPw, *sdoKeyCreateInputFile, *sdoKeyCreateFile, *sdoKeyCreateOverwrite)
case sdoKeyListCmd.FullCommand():
sdo.KeyList(*sdoOrg, *sdoUserPw, *sdoKeyToList)
case sdoKeyDownloadCmd.FullCommand():
sdo.KeyDownload(*sdoOrg, *sdoUserPw, *sdoKeyToDownload, *sdoKeyDownloadFile, *sdoKeyDownloadOverwrite)
case sdoKeyRemoveCmd.FullCommand():
sdo.KeyRemove(*sdoOrg, *sdoUserPw, *sdoKeyToRemove)
case sdoKeyNewCmd.FullCommand():
sdo.KeyNew(*sdoKeyNewFile, *sdoKeyNewOverwrite)
case sdoVoucherInspectCmd.FullCommand():
sdo.VoucherInspect(*sdoVoucherInspectFile)
case sdoVoucherImportCmd.FullCommand():
sdo.VoucherImport(*sdoOrg, *sdoUserPw, *sdoVoucherImportFile, *sdoVoucherImportExample, *sdoVoucherImportPolicy, *sdoVoucherImportPattern)
case sdoVoucherListCmd.FullCommand():
sdo.VoucherList(*sdoOrg, *sdoUserPw, *sdoVoucherToList, !*sdoVoucherListLong)
case sdoVoucherDownloadCmd.FullCommand():
sdo.VoucherDownload(*sdoOrg, *sdoUserPw, *sdoVoucherDownloadDevice, *sdoVoucherDownloadFile, *sdoVoucherDownloadOverwrite)
case smSecretListCmd.FullCommand():
secret_manager.SecretList(*smOrg, *smUserPw, *smSecretListName)
case smSecretAddCmd.FullCommand():
secret_manager.SecretAdd(*smOrg, *smUserPw, *smSecretAddName, *smSecretAddFile, *smSecretAddKey, *smSecretAddDetail, *smSecretAddOverwrite)
case smSecretRemoveCmd.FullCommand():
secret_manager.SecretRemove(*smOrg, *smUserPw, *smSecretRemoveName, *smSecretRemoveForce)
case smSecretReadCmd.FullCommand():
secret_manager.SecretRead(*smOrg, *smUserPw, *smSecretReadName)
}
}
|
[
"\"HZN_ORG_ID\"",
"\"HZN_EXCHANGE_USER_AUTH\""
] |
[] |
[
"HZN_ORG_ID",
"HZN_EXCHANGE_USER_AUTH"
] |
[]
|
["HZN_ORG_ID", "HZN_EXCHANGE_USER_AUTH"]
|
go
| 2 | 0 | |
controllers/servicebinding/suite_test.go
|
/*
Copyright 2020 The KubePreset Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package servicebinding_test
import (
"os"
"path/filepath"
"strings"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
uzap "go.uber.org/zap"
"go.uber.org/zap/zapcore"
apixv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/rest"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"sigs.k8s.io/controller-runtime/pkg/manager"
sbv1alpha2 "github.com/kubepreset/kubepreset/apis/servicebinding/v1alpha2"
"github.com/kubepreset/kubepreset/controllers/servicebinding"
// +kubebuilder:scaffold:imports
)
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
var cfg *rest.Config
var k8sClient client.Client
var testEnv *envtest.Environment
var k8sManager manager.Manager
//logLevel hold the current log level
var logLevel zapcore.Level
func initializeLogLevel() {
logLvl := os.Getenv("LOG_LEVEL")
logLvl = strings.ToUpper(logLvl)
switch {
case logLvl == "TRACE":
logLevel = -2
case logLvl == "DEBUG":
logLevel = -1
default:
logLevel = 0
}
}
func TestAPIs(t *testing.T) {
RegisterFailHandler(Fail)
initializeLogLevel()
RunSpecsWithDefaultAndCustomReporters(t,
"Controller Suite",
[]Reporter{printer.NewlineReporter{}})
}
var _ = BeforeSuite(func(done Done) {
level := uzap.NewAtomicLevelAt(logLevel)
logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true), zap.Level(&level)))
By("bootstrapping test environment")
testEnv = &envtest.Environment{
CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
}
var err error
cfg, err = testEnv.Start()
Expect(err).ToNot(HaveOccurred())
Expect(cfg).ToNot(BeNil())
err = apixv1.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
err = sbv1alpha2.AddToScheme(scheme.Scheme)
Expect(err).NotTo(HaveOccurred())
// +kubebuilder:scaffold:scheme
k8sManager, err = ctrl.NewManager(cfg, ctrl.Options{
Scheme: scheme.Scheme,
})
Expect(err).ToNot(HaveOccurred())
err = (&servicebinding.Reconciler{
Client: k8sManager.GetClient(),
Log: ctrl.Log.WithName("controllers.servicebinding").WithName("ServiceBinding"),
}).SetupWithManager(k8sManager)
Expect(err).ToNot(HaveOccurred())
go func() {
defer GinkgoRecover()
err := k8sManager.Start(ctrl.SetupSignalHandler())
Expect(err).ToNot(HaveOccurred())
}()
k8sClient = k8sManager.GetClient()
Expect(k8sClient).ToNot(BeNil())
close(done)
}, 60)
var _ = AfterSuite(func() {
By("tearing down the test environment")
err := testEnv.Stop()
Expect(err).ToNot(HaveOccurred())
})
|
[
"\"LOG_LEVEL\""
] |
[] |
[
"LOG_LEVEL"
] |
[]
|
["LOG_LEVEL"]
|
go
| 1 | 0 | |
01_Introduction/C0106_operations.py
|
# -*- encoding: utf-8 -*-
"""
@Author : zYx.Tom
@Contact : [email protected]
@site : https://github.com/zhuyuanxiang/tensorflow_cookbook
---------------------------
@Software : PyCharm
@Project : TensorFlow_Machine_Learning_Cookbook
@File : C0106_operations.py
@Version : v0.1
@Time : 2019-10-29 14:11
@License : (C)Copyright 2018-2019, zYx.Tom
@Reference : 《TensorFlow机器学习实战指南,Nick McClure》, Sec0106,P110
@Desc : TensorFlow 基础,声明操作
"""
# common imports
import os
import sys
import matplotlib.pyplot as plt
import numpy as np # pip install numpy<1.17,小于1.17就不会报错
import sklearn
import tensorflow as tf
import winsound
from tensorflow.python.framework import ops
from tools import show_values
# 设置数据显示的精确度为小数点后3位
np.set_printoptions(precision = 8, suppress = True, threshold = np.inf, linewidth = 200)
# 利用随机种子,保证随机数据的稳定性,使得每次随机测试的结果一样
np.random.seed(42)
# 初始化默认的计算图
ops.reset_default_graph()
# Python ≥3.5 is required
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
assert sklearn.__version__ >= "0.20"
# 屏蔽警告:Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Open graph session
sess = tf.Session()
show_values(tf.div(3, 4), "tf.div(3,4) = 整数除")
show_values(tf.truediv(3, 4), "tf.truediv(3,4) = 浮点除")
show_values(tf.floordiv(3.0, 4.0), "tf.floordiv(3.0,4.0) = 浮点取整除")
show_values(tf.mod(22.0, 5.0), "tf.mod(22.0,5.0) = 取模")
# 张量点积--Compute the pairwise cross product
# 张量点积:即两个向量的叉乘,又叫向量积、外积、叉积,叉乘的运算结果是一个向量而不是一个标量。
# 两个向量的点积与这两个向量组成的坐标平面垂直。
show_values(tf.cross([1., 0., 0.], [0., 1., 0.]),
"tf.cross([1., 0., 0.], [0., 1., 0.]) = 张量点积")
# 张量点积必须是三维的
# show_values(tf.cross([1., 0., 0., 0.], [0., 1., 0., 0.]),
# "tf.cross([1., 0., 0.,0.], [0., 1., 0.,0.]) = 张量点积")
# ToSee:P11,数学函数列表
show_values(tf.div(tf.sin(3.1416 / 4.), tf.cos(3.1416 / 4.)),
"tan(pi/4) = 1 = tf.div(tf.sin(3.1416/4.),tf.cos(3.1416/4.))")
test_nums = range(15)
# What should we get with list comprehension
expected_output = [3 * x * x - x + 10 for x in test_nums]
print('-' * 50)
print("[3 * x ^ 2 - x + 10 for x in test_nums] = ")
print(expected_output)
# 自定义函数
# 3x^2-x+10,x=11,=>
def custom_polynomial(value):
# return tf.subtract(3 * tf.square(value), value) + 10
return 3 * tf.square(value) - value + 10
show_values(custom_polynomial(11), "custom_polynomial(11) = 3x^2-x+10,x=11=>")
for num in test_nums:
show_values(custom_polynomial(num), "custom_polynomial({})".format(num))
# -----------------------------------------------------------------
# 运行结束的提醒
winsound.Beep(600, 500)
if len(plt.get_fignums()) != 0:
plt.show()
pass
|
[] |
[] |
[
"TF_CPP_MIN_LOG_LEVEL"
] |
[]
|
["TF_CPP_MIN_LOG_LEVEL"]
|
python
| 1 | 0 | |
psp/main_gcp.py
|
################################################################################
##### Entry script for psp_gcp dir for training on Google Cloud Platform #####
################################################################################
#import required modules and dependancies
import tensorflow as tf
import argparse
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Bidirectional, LSTM, Input, Conv1D, \
Embedding, Dense, Dropout, Activation, Concatenate, Reshape,MaxPooling1D, Convolution1D,BatchNormalization
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2
from tensorflow.keras.callbacks import EarlyStopping ,ModelCheckpoint, TensorBoard, \
ReduceLROnPlateau, LearningRateScheduler, CSVLogger
from tensorflow.keras.metrics import AUC, MeanSquaredError, FalseNegatives, FalsePositives, \
MeanAbsoluteError, TruePositives, TrueNegatives, Precision, Recall
from tensorflow.keras import activations
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.compat.v1.keras.backend import set_session
import os
from os.path import isfile, join
from os import listdir
import sys
import time
import importlib
import pkgutil
import json
from google.cloud import storage
from json.decoder import JSONDecodeError
from psp.load_dataset import *
from psp.plot_model import *
from psp.gcp_utils import *
from psp._globals import *
from psp.evaluate import *
from psp.models import *
from psp.models.auxiliary_models import *
import warnings
warnings.filterwarnings("ignore", message=r"Passing", category=FutureWarning)
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' #reduce TF log output to only include Errors
### Tensorboard parameters and configuration ###
tf.compat.v1.reset_default_graph()
tf.keras.backend.clear_session() # For easy reset of notebook state.
# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
config_proto = tf.compat.v1.ConfigProto()
tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=0.333)
config_proto.allow_soft_placement = True
off = rewriter_config_pb2.RewriterConfig.OFF
config_proto.gpu_options.allow_growth = True
config_proto.graph_options.rewrite_options.arithmetic_optimization = off
#set tensorflow GPUOptions so TF doesn't overload GPU if present
# config_proto.gpu_options(per_process_gpu_memory_fraction=0.333)
session = tf.compat.v1.Session(config=config_proto)
# tf.Session(config=tf.compat.v1.ConfigProto(log_device_placement=True))
set_session(session)
#get model filenames from models and auxillary models directory
all_models = [name for _, name, _ in pkgutil.iter_modules([os.path.join('psp','models')])]
all_models = all_models + [name for _, name, _ in pkgutil.iter_modules([os.path.join('psp','models','auxiliary_models')])]
#main function to train and evaluate CNN + RNN + DNN model
def main(args):
"""
Description:
Main function for training, evaluating and plotting PSP models via GCP.
Args:
:args (dict): parsed input arguments.
Returns:
None
"""
#load json from config input parameters
params = json.loads(args.params)
gcp_params = json.loads(args.gcp_params)
model_params = json.loads(args.model_params)
#get input arguments
config = args.config
local = args.local
job_dir = args.job_dir
package_path = gcp_params["package_path"]
bucket = gcp_params["bucket"]
training_data = params["training_data"]
filtered = params["filtered"]
batch_size = int(params["batch_size"])
epochs = int(params["epochs"])
logs_path = str(params["logs_path"])
cuda = params["cuda"]
tpu = gcp_params["tpu"]
test_dataset = str(params["test_dataset"])
model_ = str(params["model"])
tf_version = tf.__version__
lr_scheduler = str(model_params["lr_scheduler"])
callbacks = (model_params["callbacks"])
#if using TPU, initalise TensorFlow TPU Strategy
if (tpu):
tpu_strategy = setup_tpu()
#initialise global GCP bucket variable
initialise_bucket(bucket)
#create data dir to store all training and test datasets
if not os.path.exists(DATA_DIR):
os.makedirs(DATA_DIR)
#create output dir to store model training output
if not os.path.exists(OUTPUT_DIR):
os.makedirs(OUTPUT_DIR)
#create folder where all model assets and artifacts will be stored after training
model_output_folder = os.path.join(os.path.join(OUTPUT_DIR, model_ + '_'+ current_datetime))
os.makedirs(model_output_folder)
#create logs path directory where TensorBoard logs will be stored
if not os.path.exists(os.path.join(model_output_folder, logs_path)):
os.makedirs(os.path.join(model_output_folder, logs_path))
#create checkpoints dir where model checkpoints will be saved
if not os.path.exists(os.path.join(model_output_folder, 'checkpoints')):
os.makedirs(os.path.join(model_output_folder, 'checkpoints'))
#append parameters to model output results file
model_output["Output Folder"] = model_output_folder
model_output["Config"] = os.path.basename(config)
model_output["Model"] = model_
model_output["Bucket"] = bucket
model_output["Training Dataset Type"] = training_data
model_output["Filtered?"] = filtered
model_output["Test Dataset"] = test_dataset
model_output["Number of epochs"] = epochs
model_output["Batch size"] = batch_size
model_output["Tensorflow Version"] = tf_version
model_output["TensorBoard logs dir"] = os.path.join(model_output_folder, logs_path)
model_output["Cuda"] = cuda
model_output["TPU"] = tpu
model_output["LR Scheduler"] = lr_scheduler
#load training dataset
cullpdb = CullPDB(type=training_data, filtered=filtered)
all_models.append(model_)
#verify model specified in config parameter is an available trainable model
if model_ not in all_models:
raise ValueError('Model must be in available models.')
#import model module from models or auxillary models folder
if (model_!="psp_dcblstm_model" and model_!="psp_dculstm_model" and model_!="dummy_model"):
mod = importlib.import_module(package_path + ".models.auxiliary_models."+model_)
else:
mod = importlib.import_module(package_path + ".models."+model_)
#build imported model with parameters from config
model = mod.build_model(model_params)
all_callbacks = []
#initialise Tensorflow callbacks, append each callback if used
if (callbacks["tensorboard"]):
tensorboard = tf.keras.callbacks.TensorBoard(log_dir=(os.path.join(model_output_folder,
logs_path)), histogram_freq=0, write_graph=True, write_images=True)
all_callbacks.append(tensorboard)
if (callbacks["earlyStopping"]):
earlyStopping = EarlyStopping(monitor='loss', patience=5, verbose=1, mode='min')
all_callbacks.append(earlyStopping)
if (callbacks["modelCheckpoint"]):
checkpoint = ModelCheckpoint(filepath=os.path.join(model_output_folder, 'checkpoints','model_' + current_datetime + '.h5'), \
verbose=1,save_best_only=True, monitor='loss', mode='min')
all_callbacks.append(checkpoint)
if (callbacks["csv_logger"]):
csv_logger = CSVLogger(os.path.join(model_output_folder, 'training.log'))
all_callbacks.append(csv_logger)
if (callbacks["reduceLROnPlateau"]):
reduceLROnPlateau = ReduceLROnPlateau(monitor="loss", factor=0.1, patience=10, verbose=1, mode="min")
all_callbacks.append(reduceLROnPlateau)
#get LR Scheduler callback to use from parameter in config file
#remove any whitespace or '-' from lr_schedule name
lr_scheduler = lr_scheduler.lower().strip().replace(" ", "").replace("-","")
if (lr_scheduler == "exceptionaldecay" or lr_scheduler == "exponential"):
exponentialDecay = ExponentialDecay()
lr_schedule = LearningRateScheduler(exponentialDecay)
all_callbacks.append(lr_schedule)
elif (lr_scheduler == "timebaseddecay" or lr_scheduler == "timebased"):
timeBasedDecay = TimedBased()
lr_schedule = LearningRateScheduler(timeBasedDecay)
all_callbacks.append(lr_schedule)
elif (lr_scheduler == "stepdecay" or lr_scheduler == "exponential"):
stepDecay = StepDecay()
lr_schedule = LearningRateScheduler(stepDecay)
all_callbacks.append(lr_schedule)
#start counter
start = time.time()
#fit model
if cuda:
with tf.device('/gpu:0'): #if training on GPU
print('Fitting model...')
history = model.fit({'main_input': cullpdb.train_hot, 'aux_input': cullpdb.trainpssm},
{'main_output': cullpdb.trainlabel},validation_data=({'main_input': cullpdb.val_hot, 'aux_input': cullpdb.valpssm},
{'main_output': cullpdb.vallabel}), epochs=epochs, batch_size=batch_size, verbose=2,
callbacks=all_callbacks,shuffle=True)
else: #training on CPU (default) or TPU
print('Fitting model...')
history = model.fit({'main_input': cullpdb.train_hot, 'aux_input': cullpdb.trainpssm},
{'main_output': cullpdb.trainlabel},validation_data=({'main_input': cullpdb.val_hot, 'aux_input': cullpdb.valpssm},
{'main_output': cullpdb.vallabel}), epochs=epochs, batch_size=batch_size, verbose=2,
callbacks=all_callbacks,shuffle=True)
#stop counter, calculate elapsed time
elapsed = (time.time() - start)
print('Elapsed Training Time: {}'.format(elapsed))
model_output["Training Time"] = elapsed
#save model locally in saved models dir - create dir in this dir to store all model related objects
print('Model saved in {} folder as {} '.format(
os.path.dirname(model_output_folder), os.path.basename(os.path.join(model_output_folder, 'model.h5'))))
model.save(os.path.join(model_output_folder, 'model.h5'))
#save model history pickle
history_filepath = os.path.join(model_output_folder, 'history.pckl')
save_history(history, history_filepath)
#plot model history and all metric plots
plot_history(history.history, model_output_folder, show_histograms = False,
show_boxplots = True, show_kde = True, filter_outliers = True)
#evaluating model on test datasets
evaluate_cullpdb(model,cullpdb)
evaluate_model(model, test_dataset=test_dataset)
#visualise Keras model and all its layers, store in png
#Need to manually install graphviz (https://graphviz.gitlab.io/download/) etc...
if (local=="1"):
visualise_model(model, model_output_folder)
#save model architecture
with open(os.path.join(model_output_folder, "model_architecture.json"), "w") as model_arch:
model_arch.write(model.to_json(indent=3))
#getting output results from model into csv
model_output_df = get_model_output(model_output_folder)
#upload configuration json to storage bucket
#local flag used as config file upload doesn't seem to work when training on GCP, only locally
if (local=="1"):
upload_file(os.path.join(model_output_folder,os.path.basename(config)),config)
# upload model output folder and all training results and assets
upload_directory(model_output_folder, model_output_folder)
print('Model training files exported to bucket path: {}/{} '.format(bucket, model_output_folder))
#append training results of current job to all results file
append_all_output(model_output_df)
#close tensorflow session
session.close()
if __name__ == "__main__":
#############################################################
### PSP Input Arguments ###
#############################################################
parser = argparse.ArgumentParser(description='Protein Secondary Structure Prediction')
parser.add_argument('-local', '--local', required=True,
help='Flag to determine if job being run locally or on GCP.')
parser.add_argument('-job-dir', '--job-dir', type=str, required=True,
help='Directory where logs from training job are stored.')
parser.add_argument('-config', '--config', type=str, required=True,
help='File path to config json file.')
parser.add_argument('-params', '--params', type=str, required=True,
help='General training parameters')
parser.add_argument('-gcp_params', '--gcp_params', type=str, required=True,
help='GCP job parameters')
parser.add_argument('-model_params', '--model_params', type=str, required=True,
help='ML model parameters')
#parse input arguments
args = parser.parse_args()
main(args)
|
[] |
[] |
[
"TF_CPP_MIN_LOG_LEVEL"
] |
[]
|
["TF_CPP_MIN_LOG_LEVEL"]
|
python
| 1 | 0 | |
config.go
|
package main
import (
"errors"
"io/ioutil"
"os"
"path/filepath"
"github.com/kurrik/oauth1a"
"github.com/kurrik/twittergo"
"gopkg.in/v1/yaml"
)
type Config struct {
OAuth struct {
Key string
Secret string
}
}
func ReadConfig(path string) (*Config, error) {
buf, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
var config Config
err = yaml.Unmarshal(buf, &config)
if err != nil {
return nil, err
}
// validate that required fields were set
if config.OAuth.Key == "" {
return nil, errors.New("missing field: oauth key")
}
if config.OAuth.Secret == "" {
return nil, errors.New("missing field: oauth secret")
}
return &config, nil
}
func LoadConfig() (*Config, error) {
home := os.Getenv("HOME")
if home == "" {
return nil, errors.New("HOME is not set in environment")
}
path := filepath.Join(home, ".config", "twackup", "oauth.yaml")
config, err := ReadConfig(path)
return config, err
}
func GetCredentials(config *Config) (client *twittergo.Client) {
oc := &oauth1a.ClientConfig{
ConsumerKey: config.OAuth.Key,
ConsumerSecret: config.OAuth.Secret,
}
return twittergo.NewClient(oc, nil)
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
go
| 1 | 0 | |
tests/test_haystack_watch_poll.py
|
from unittest.mock import patch
import haystackapi
from haystackapi import Grid, MARKER
from haystackapi.ops import HaystackHttpRequest
from haystackapi.providers import ping
@patch.dict('os.environ', {'HAYSTACK_PROVIDER': 'haystackapi.providers.ping'})
@patch.object(ping.Provider, 'watch_poll')
def test_watch_poll_with_zinc(mock) -> None:
# GIVEN
mock.return_value = ping.PingGrid
mime_type = haystackapi.MODE_ZINC
request = HaystackHttpRequest()
grid: Grid = haystackapi.Grid(
metadata={'watchId': "0123456789ABCDEF",
'refresh': MARKER
},
columns=["empty"])
grid.append({})
request.headers["Content-Type"] = mime_type
request.headers["Accept"] = mime_type
request.body = haystackapi.dump(grid, mode=haystackapi.MODE_ZINC)
# WHEN
response = haystackapi.watch_poll(request, "dev")
# THEN
mock.assert_called_once_with("0123456789ABCDEF", True)
assert response.status_code == 200
assert response.headers["Content-Type"].startswith(mime_type)
assert haystackapi.parse(response.body, haystackapi.MODE_ZINC) is not None
@patch.dict('os.environ', {'HAYSTACK_PROVIDER': 'haystackapi.providers.ping'})
@patch.object(ping.Provider, 'watch_poll')
def test_watch_poll_with_args(mock) -> None:
# GIVEN
mock.return_value = ping.PingGrid
mime_type = haystackapi.MODE_ZINC
request = HaystackHttpRequest()
request.headers["Accept"] = mime_type
request.args["watchId"] = "0123456789ABCDEF"
request.args["refresh"] = True
# WHEN
response = haystackapi.watch_poll(request, "dev")
# THEN
mock.assert_called_once_with("0123456789ABCDEF", True)
assert response.status_code == 200
assert response.headers["Content-Type"].startswith(mime_type)
assert haystackapi.parse(response.body, haystackapi.MODE_ZINC) is not None
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
server/routes.go
|
package server
import (
"net/http"
"path"
"strings"
"github.com/go-chi/render"
"github.com/go-chi/chi"
"github.com/rsoury/buyte/conf"
"github.com/rsoury/buyte/pkg/util"
)
// SetupRoutes configures all the routes for this service
func (s *Server) SetupRoutes() {
// Version: 1.2.3 -- Major.Minor.Maintenance
// Get version, get major version number.
versionSplit := strings.Split(conf.Version, ".")
var major string
if len(versionSplit) > 0 {
major = versionSplit[0]
} else {
major = "0"
}
s.logger.Debugw("Instantiate routes", "version", "/v"+major)
// Health checks
s.router.Get("/", func(w http.ResponseWriter, r *http.Request) {
render.NoContent(w, r)
})
s.router.Route("/v"+major, func(r chi.Router) {
r.Post("/charges", s.CreateCharge())
r.Get("/charges/{id}", s.GetCharge())
// r.Post("/charges/{id}")
// r.Post("/charges/{id}/capture")
r.Get("/token/{id}", s.GetPaymentToken())
// Wrap all routes accessable using the Public Key with a /public route.
r.Route("/public", func(r chi.Router) {
// Once it passes the authroizer which basically asks if it is a public key and if so, are you hitting a public endpoint, we need to obtain the public key and the checkout_id and then try to get the checkout details for the given user's checkout.
r.Route("/checkout", func(r chi.Router) {
r.Get("/{id}", s.GetFullCheckout())
})
r.Route("/applepay", func(r chi.Router) {
r.Post("/session", s.GetApplePaySession())
r.Post("/process", s.ProcessApplePayResponse())
})
r.Route("/googlepay", func(r chi.Router) {
r.Post("/process", s.ProcessGooglePayResponse())
})
})
})
}
// Routes specifically for development
func (s *Server) SetupDevRoutes() {
// Setup Apple Pay Paths
root := http.Dir(path.Join(util.DirName(), "../examples/applepay"))
prefix := "/dev/applepay"
fs := http.FileServer(root)
sFs := http.StripPrefix(prefix, fs)
s.router.Route("/.well-known", func(r chi.Router) {
r.Get("/*", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
fs.ServeHTTP(w, r)
}))
})
s.router.Route(prefix, func(r chi.Router) {
r.Get("/*", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
sFs.ServeHTTP(w, r)
}))
})
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
src/aks-preview/azext_aks_preview/custom.py
|
# pylint: disable=too-many-lines
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import binascii
import datetime
import errno
import io
import json
import os
import os.path
import platform
import re
import ssl
import stat
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import base64
import webbrowser
import zipfile
from distutils.version import StrictVersion
from math import isnan
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
import requests
from knack.log import get_logger
from knack.util import CLIError
from knack.prompting import prompt_pass, NoTTYException
import yaml # pylint: disable=import-error
from dateutil.relativedelta import relativedelta # pylint: disable=import-error
from dateutil.parser import parse # pylint: disable=import-error
from msrestazure.azure_exceptions import CloudError
import colorama # pylint: disable=import-error
from tabulate import tabulate # pylint: disable=import-error
from azure.cli.core.api import get_config_dir
from azure.cli.core.azclierror import ManualInterrupt, InvalidArgumentValueError, UnclassifiedUserFault, CLIInternalError, FileOperationError, ClientRequestError, DeploymentError, ValidationError, ArgumentUsageError, MutuallyExclusiveArgumentError, RequiredArgumentMissingError, ResourceNotFoundError
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import get_file_json, in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core._profile import Profile
from azure.graphrbac.models import (ApplicationCreateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters)
from .vendored_sdks.azure_mgmt_preview_aks.v2021_05_01.models import (ContainerServiceLinuxProfile,
ManagedClusterWindowsProfile,
ContainerServiceNetworkProfile,
ManagedClusterServicePrincipalProfile,
ContainerServiceSshConfiguration,
MaintenanceConfiguration,
TimeInWeek,
TimeSpan,
ContainerServiceSshPublicKey,
ManagedCluster,
ManagedClusterAADProfile,
ManagedClusterAddonProfile,
ManagedClusterAgentPoolProfile,
AgentPool,
AgentPoolUpgradeSettings,
ContainerServiceStorageProfileTypes,
ManagedClusterIdentity,
ManagedClusterAPIServerAccessProfile,
ManagedClusterSKU,
Components1Umhcm8SchemasManagedclusteridentityPropertiesUserassignedidentitiesAdditionalproperties,
ManagedClusterAutoUpgradeProfile,
KubeletConfig,
LinuxOSConfig,
ManagedClusterHTTPProxyConfig,
SysctlConfig,
ManagedClusterPodIdentityProfile,
ManagedClusterPodIdentity,
ManagedClusterPodIdentityException,
UserAssignedIdentity,
RunCommandRequest,
ComponentsQit0EtSchemasManagedclusterpropertiesPropertiesIdentityprofileAdditionalproperties)
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import get_msi_client
from ._client_factory import cf_resources
from ._client_factory import get_resource_by_name
from ._client_factory import cf_container_registry_service
from ._client_factory import cf_storage
from ._client_factory import cf_agent_pools
from ._helpers import (_populate_api_server_access_profile, _set_vm_set_type,
_set_outbound_type, _parse_comma_separated_list,
_trim_fqdn_name_containing_hcp)
from ._loadbalancer import (set_load_balancer_sku, is_load_balancer_profile_provided,
update_load_balancer_profile, create_load_balancer_profile)
from ._consts import CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME
from ._consts import CONST_MONITORING_ADDON_NAME
from ._consts import CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID
from ._consts import CONST_MONITORING_USING_AAD_MSI_AUTH
from ._consts import CONST_VIRTUAL_NODE_ADDON_NAME
from ._consts import CONST_VIRTUAL_NODE_SUBNET_NAME
from ._consts import CONST_AZURE_POLICY_ADDON_NAME
from ._consts import CONST_KUBE_DASHBOARD_ADDON_NAME
from ._consts import CONST_INGRESS_APPGW_ADDON_NAME
from ._consts import CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID, CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME
from ._consts import CONST_INGRESS_APPGW_SUBNET_CIDR, CONST_INGRESS_APPGW_SUBNET_ID
from ._consts import CONST_INGRESS_APPGW_WATCH_NAMESPACE
from ._consts import CONST_SCALE_SET_PRIORITY_REGULAR, CONST_SCALE_SET_PRIORITY_SPOT, CONST_SPOT_EVICTION_POLICY_DELETE
from ._consts import CONST_CONFCOM_ADDON_NAME, CONST_ACC_SGX_QUOTE_HELPER_ENABLED
from ._consts import CONST_OPEN_SERVICE_MESH_ADDON_NAME
from ._consts import CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME, CONST_SECRET_ROTATION_ENABLED
from ._consts import CONST_MANAGED_IDENTITY_OPERATOR_ROLE, CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID
from ._consts import ADDONS
from .maintenanceconfiguration import aks_maintenanceconfiguration_update_internal
from ._consts import CONST_PRIVATE_DNS_ZONE_SYSTEM, CONST_PRIVATE_DNS_ZONE_NONE
logger = get_logger(__name__)
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
# added in python 2.7.13 and 3.6
return ssl.SSLContext(ssl.PROTOCOL_TLS)
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal',
value=0.1 * x, total_val=1.0)
try:
create_service_principal(
cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False
hook.add(message='Finished service principal creation',
value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal
def _add_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal=True, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate',
value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate',
value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(
cli_ctx, role, service_principal_msi_id, is_service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete',
value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub(
'[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
# pylint: disable=too-many-locals
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cmd, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
from azure.cli.core.profiles import ResourceType
DeploymentProperties = cmd.get_models(
'DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(
template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).deployments
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
validation_poller = smc.begin_validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
else:
return smc.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, smc.begin_create_or_update, resource_group_name, deployment_name, deployment)
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password=password, key_value=key_value, key_type=key_type,
key_usage=key_usage, start_date=start_date, end_date=end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds)
try:
return client.create(app_create_param)
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError(
'specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(
filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cli_ctx, role, assignee, is_service_principal, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx,
role, assignee, resource_group_name,
scope, resolve_assignee=is_service_principal)
def _create_role_assignment(cli_ctx, role, assignee,
resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import ResourceType, get_sdk
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(
resource_group_name, scope, assignments_client.config.subscription_id)
# XXX: if role is uuid, this function's output cannot be used as role assignment defintion id
# ref: https://github.com/Azure/azure-cli/issues/2458
role_id = _resolve_role_id(role, scope, definitions_client)
# If the cluster has service principal resolve the service principal client id to get the object id,
# if not use MSI object id.
object_id = _resolve_object_id(
cli_ctx, assignee) if resolve_assignee else assignee
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(
role_definition_id=role_id, principal_id=object_id)
assignment_name = uuid.uuid4()
custom_headers = None
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError(
'When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
from knack.prompting import prompt_y_n
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete',
value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = _resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = _resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(
scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
if len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(
filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError(
"No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
_re_user_assigned_identity_resource_id = re.compile(
r'/subscriptions/(.*?)/resourcegroups/(.*?)/providers/microsoft.managedidentity/userassignedidentities/(.*)',
flags=re.IGNORECASE)
def _get_user_assigned_identity(cli_ctx, resource_id):
resource_id = resource_id.lower()
match = _re_user_assigned_identity_resource_id.search(resource_id)
if match:
subscription_id = match.group(1)
resource_group_name = match.group(2)
identity_name = match.group(3)
msi_client = get_msi_client(cli_ctx, subscription_id)
try:
identity = msi_client.user_assigned_identities.get(resource_group_name=resource_group_name,
resource_name=identity_name)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError("Identity {} not found.".format(resource_id))
raise CLIError(ex.message)
return identity
raise CLIError(
"Cannot parse identity name from provided resource id {}.".format(resource_id))
def _get_user_assigned_identity_client_id(cli_ctx, resource_id):
return _get_user_assigned_identity(cli_ctx, resource_id).client_id
def _get_user_assigned_identity_object_id(cli_ctx, resource_id):
return _get_user_assigned_identity(cli_ctx, resource_id).principal_id
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def aks_browse(cmd, # pylint: disable=too-many-statements,too-many-branches
client,
resource_group_name,
name,
disable_browser=False,
listen_address='127.0.0.1',
listen_port='8001'):
# verify the kube-dashboard addon was not disabled
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
# addon name is case insensitive
addon_profile = next((addon_profiles[k] for k in addon_profiles
if k.lower() == CONST_KUBE_DASHBOARD_ADDON_NAME.lower()),
ManagedClusterAddonProfile(enabled=False))
# open portal view if addon is not enabled or k8s version >= 1.19.0
if StrictVersion(instance.kubernetes_version) >= StrictVersion('1.19.0') or (not addon_profile.enabled):
subscription_id = get_subscription_id(cmd.cli_ctx)
dashboardURL = (
# Azure Portal URL (https://portal.azure.com for public cloud)
cmd.cli_ctx.cloud.endpoints.portal +
('/#resource/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService'
'/managedClusters/{2}/workloads').format(subscription_id, resource_group_name, name)
)
if in_cloud_console():
logger.warning(
'To view the Kubernetes resources view, please open %s in a new tab', dashboardURL)
else:
logger.warning('Kubernetes resources view on %s', dashboardURL)
if not disable_browser:
webbrowser.open_new_tab(dashboardURL)
return
# otherwise open the kube-dashboard addon
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name,
name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--output", "name", "--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
# find the port
try:
dashboard_port = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--selector", "k8s-app=kubernetes-dashboard",
"--output", "jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'"]
)
# output format: b"'{port}'"
dashboard_port = int((dashboard_port.decode('utf-8').replace("'", "")))
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard port: {}'.format(err))
# use https if dashboard container is using https
if dashboard_port == 8443:
protocol = 'https'
else:
protocol = 'http'
proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port)
dashboardURL = '{0}/api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(proxy_url,
protocol)
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post(
'http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
dashboardURL = '{0}api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(
result['url'], protocol)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{0}'.format(term_id),
json={"url": dashboardURL})
logger.warning(
'To view the console, please open %s in a new tab', dashboardURL)
else:
logger.warning('Proxy running on %s', proxy_url)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(dashboardURL)
try:
try:
subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "proxy", "--address",
listen_address, "--port", listen_port], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
if err.output.find(b'unknown flag: --address'):
if listen_address != '127.0.0.1':
logger.warning(
'"--address" is only supported in kubectl v1.13 and later.')
logger.warning(
'The "--listen-address" argument will be ignored.')
subprocess.call(["kubectl", "--kubeconfig",
browse_path, "proxy", "--port", listen_port])
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
if in_cloud_console():
requests.post('http://localhost:8888/closeport/8001')
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _add_monitoring_role_assignment(result, cluster_resource_id, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(CONST_MONITORING_ADDON_NAME in result.addon_profiles) and
(hasattr(result.addon_profiles[CONST_MONITORING_ADDON_NAME], 'identity')) and
(hasattr(
result.addon_profiles[CONST_MONITORING_ADDON_NAME].identity, 'object_id'))
):
logger.info('omsagent MSI exists, using it')
service_principal_msi_id = result.addon_profiles[CONST_MONITORING_ADDON_NAME].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_msi_id, is_service_principal, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for Monitoring addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
def _add_ingress_appgw_addon_role_assignment(result, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id != 'msi'
):
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(CONST_INGRESS_APPGW_ADDON_NAME in result.addon_profiles) and
(hasattr(result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME], 'identity')) and
(hasattr(
result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].identity, 'object_id'))
):
service_principal_msi_id = result.addon_profiles[
CONST_INGRESS_APPGW_ADDON_NAME].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
config = result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config
from msrestazure.tools import parse_resource_id, resource_id
if CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID in config:
appgw_id = config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID]
parsed_appgw_id = parse_resource_id(appgw_id)
appgw_group_id = resource_id(subscription=parsed_appgw_id["subscription"],
resource_group=parsed_appgw_id["resource_group"])
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=appgw_group_id):
logger.warning('Could not create a role assignment for application gateway: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', appgw_id, CONST_INGRESS_APPGW_ADDON_NAME)
if CONST_INGRESS_APPGW_SUBNET_ID in config:
subnet_id = config[CONST_INGRESS_APPGW_SUBNET_ID]
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
service_principal_msi_id, is_service_principal, scope=subnet_id):
logger.warning('Could not create a role assignment for subnet: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', subnet_id, CONST_INGRESS_APPGW_ADDON_NAME)
if CONST_INGRESS_APPGW_SUBNET_CIDR in config:
if result.agent_pool_profiles[0].vnet_subnet_id is not None:
parsed_subnet_vnet_id = parse_resource_id(
result.agent_pool_profiles[0].vnet_subnet_id)
vnet_id = resource_id(subscription=parsed_subnet_vnet_id["subscription"],
resource_group=parsed_subnet_vnet_id["resource_group"],
namespace="Microsoft.Network",
type="virtualNetworks",
name=parsed_subnet_vnet_id["name"])
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=vnet_id):
logger.warning('Could not create a role assignment for virtual network: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', vnet_id, CONST_INGRESS_APPGW_ADDON_NAME)
def aks_maintenanceconfiguration_list(
cmd,
client,
resource_group_name,
cluster_name
):
return client.list_by_managed_cluster(resource_group_name, cluster_name)
def aks_maintenanceconfiguration_show(
cmd,
client,
resource_group_name,
cluster_name,
config_name
):
logger.warning('resource_group_name: %s, cluster_name: %s, config_name: %s ',
resource_group_name, cluster_name, config_name)
return client.get(resource_group_name, cluster_name, config_name)
def aks_maintenanceconfiguration_delete(
cmd,
client,
resource_group_name,
cluster_name,
config_name
):
logger.warning('resource_group_name: %s, cluster_name: %s, config_name: %s ',
resource_group_name, cluster_name, config_name)
return client.delete(resource_group_name, cluster_name, config_name)
def aks_maintenanceconfiguration_add(
cmd,
client,
resource_group_name,
cluster_name,
config_name,
config_file,
weekday,
start_hour
):
configs = client.list_by_managed_cluster(resource_group_name, cluster_name)
for config in configs:
if config.name == config_name:
raise CLIError("Maintenance configuration '{}' already exists, please try a different name, "
"use 'aks maintenanceconfiguration list' to get current list of maitenance configurations".format(config_name))
return aks_maintenanceconfiguration_update_internal(cmd, client, resource_group_name, cluster_name, config_name, config_file, weekday, start_hour)
def aks_maintenanceconfiguration_update(
cmd,
client,
resource_group_name,
cluster_name,
config_name,
config_file,
weekday,
start_hour
):
configs = client.list_by_managed_cluster(resource_group_name, cluster_name)
found = False
for config in configs:
if config.name == config_name:
found = True
break
if not found:
raise CLIError("Maintenance configuration '{}' doesn't exist."
"use 'aks maintenanceconfiguration list' to get current list of maitenance configurations".format(config_name))
return aks_maintenanceconfiguration_update_internal(cmd, client, resource_group_name, cluster_name, config_name, config_file, weekday, start_hour)
def aks_create(cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
resource_group_name,
name,
ssh_key_value,
dns_name_prefix=None,
location=None,
admin_username="azureuser",
windows_admin_username=None,
windows_admin_password=None,
enable_ahub=False,
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_type=None,
node_osdisk_size=0,
node_osdisk_diskencryptionset_id=None,
node_count=3,
nodepool_name="nodepool1",
nodepool_tags=None,
nodepool_labels=None,
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
enable_vmss=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
os_sku=None,
enable_fips_image=False,
enable_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
network_plugin=None,
network_policy=None,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
outbound_type=None,
enable_addons=None,
workspace_resource_id=None,
enable_msi_auth_for_monitoring=False,
min_count=None,
max_count=None,
vnet_subnet_id=None,
pod_subnet_id=None,
ppg=None,
max_pods=0,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
node_zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
generate_ssh_keys=False, # pylint: disable=unused-argument
enable_pod_security_policy=False,
node_resource_group=None,
uptime_sla=False,
attach_acr=None,
enable_private_cluster=False,
private_dns_zone=None,
enable_managed_identity=True,
fqdn_subdomain=None,
enable_public_fqdn=False,
api_server_authorized_ip_ranges=None,
aks_custom_headers=None,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_aad=False,
enable_azure_rbac=False,
aad_admin_group_object_ids=None,
aci_subnet_name=None,
enable_sgxquotehelper=False,
kubelet_config=None,
linux_os_config=None,
http_proxy_config=None,
assign_identity=None,
auto_upgrade_channel=None,
enable_pod_identity=False,
enable_pod_identity_with_kubenet=False,
enable_encryption_at_host=False,
enable_ultra_ssd=False,
enable_secret_rotation=False,
disable_local_accounts=False,
no_wait=False,
assign_kubelet_identity=None,
yes=False):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError(
'Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
subscription_id = get_subscription_id(cmd.cli_ctx)
if dns_name_prefix and fqdn_subdomain:
raise CLIError(
'--dns-name-prefix and --fqdn-subdomain cannot be used at same time')
if not dns_name_prefix and not fqdn_subdomain:
dns_name_prefix = _get_default_dns_prefix(
name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# Flag to be removed, kept for back-compatibility only. Remove the below section
# when we deprecate the enable-vmss flag
if enable_vmss:
if vm_set_type and vm_set_type.lower() != "VirtualMachineScaleSets".lower():
raise CLIError('enable-vmss and provided vm_set_type ({}) are conflicting with each other'.
format(vm_set_type))
vm_set_type = "VirtualMachineScaleSets"
vm_set_type = _set_vm_set_type(vm_set_type, kubernetes_version)
load_balancer_sku = set_load_balancer_sku(
load_balancer_sku, kubernetes_version)
if api_server_authorized_ip_ranges and load_balancer_sku == "basic":
raise CLIError(
'--api-server-authorized-ip-ranges can only be used with standard load balancer')
agent_pool_profile = ManagedClusterAgentPoolProfile(
# Must be 12 chars or less before ACS RP adds to it
name=_trim_nodepoolname(nodepool_name),
tags=nodepool_tags,
node_labels=nodepool_labels,
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
os_sku=os_sku,
mode="System",
vnet_subnet_id=vnet_subnet_id,
pod_subnet_id=pod_subnet_id,
proximity_placement_group_id=ppg,
availability_zones=node_zones,
enable_node_public_ip=enable_node_public_ip,
enable_fips=enable_fips_image,
node_public_ip_prefix_id=node_public_ip_prefix_id,
enable_encryption_at_host=enable_encryption_at_host,
enable_ultra_ssd=enable_ultra_ssd,
max_pods=int(max_pods) if max_pods else None,
type=vm_set_type
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool_profile.os_disk_type = node_osdisk_type
_check_cluster_autoscaler_flag(
enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile)
if kubelet_config:
agent_pool_profile.kubelet_config = _get_kubelet_config(kubelet_config)
if linux_os_config:
agent_pool_profile.linux_os_config = _get_linux_os_config(
linux_os_config)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(
admin_username=admin_username, ssh=ssh_config)
windows_profile = None
if windows_admin_username:
if windows_admin_password is None:
try:
windows_admin_password = prompt_pass(
msg='windows-admin-password: ', confirm=True)
except NoTTYException:
raise CLIError(
'Please specify both username and password in non-interactive mode.')
windows_license_type = None
if enable_ahub:
windows_license_type = 'Windows_Server'
windows_profile = ManagedClusterWindowsProfile(
admin_username=windows_admin_username,
admin_password=windows_admin_password,
license_type=windows_license_type)
service_principal_profile = None
principal_obj = None
# If customer explicitly provides a service principal, disable managed identity.
if service_principal and client_secret:
enable_managed_identity = False
if not enable_managed_identity:
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
fqdn_subdomain=fqdn_subdomain, location=location, name=name)
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"))
if attach_acr:
if enable_managed_identity:
if no_wait:
raise CLIError('When --attach-acr and --enable-managed-identity are both specified, '
'--no-wait is not allowed, please wait until the whole operation succeeds.')
else:
_ensure_aks_acr(cmd.cli_ctx,
client_id=service_principal_profile.client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
need_post_creation_vnet_permission_granting = False
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
# if service_principal_profile is None, then this cluster is an MSI cluster,
# and the service principal does not exist. Two cases:
# 1. For system assigned identity, we just tell user to grant the
# permission after the cluster is created to keep consistent with portal experience.
# 2. For user assigned identity, we can grant needed permission to
# user provided user assigned identity before creating managed cluster.
if service_principal_profile is None and not assign_identity:
msg = ('It is highly recommended to use USER assigned identity '
'(option --assign-identity) when you want to bring your own'
'subnet, which will have no latency for the role assignment to '
'take effect. When using SYSTEM assigned identity, '
'azure-cli will grant Network Contributor role to the '
'system assigned identity after the cluster is created, and '
'the role assignment will take some time to take effect, see '
'https://docs.microsoft.com/en-us/azure/aks/use-managed-identity, '
'proceed to create cluster with system assigned identity?')
from knack.prompting import prompt_y_n
if not yes and not prompt_y_n(msg, default="n"):
return None
need_post_creation_vnet_permission_granting = True
else:
scope = vnet_subnet_id
identity_client_id = ""
if assign_identity:
identity_client_id = _get_user_assigned_identity_client_id(
cmd.cli_ctx, assign_identity)
else:
identity_client_id = service_principal_profile.client_id
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
identity_client_id, scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
load_balancer_profile = create_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
outbound_type = _set_outbound_type(
outbound_type, network_plugin, load_balancer_sku, load_balancer_profile)
network_profile = None
if any([network_plugin,
pod_cidr,
service_cidr,
dns_service_ip,
docker_bridge_address,
network_policy]):
if not network_plugin:
raise CLIError('Please explicitly specify the network plugin type')
if pod_cidr and network_plugin == "azure":
raise CLIError(
'Please use kubenet as the network plugin type when pod_cidr is specified')
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy,
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type
)
else:
if load_balancer_sku.lower() == "standard" or load_balancer_profile:
network_profile = ContainerServiceNetworkProfile(
network_plugin="kubenet",
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type,
)
if load_balancer_sku.lower() == "basic":
network_profile = ContainerServiceNetworkProfile(
load_balancer_sku=load_balancer_sku.lower(),
)
addon_profiles = _handle_addons_args(
cmd=cmd,
addons_str=enable_addons,
subscription_id=subscription_id,
resource_group_name=resource_group_name,
addon_profiles={},
workspace_resource_id=workspace_resource_id,
enable_msi_auth_for_monitoring=enable_msi_auth_for_monitoring,
appgw_name=appgw_name,
appgw_subnet_prefix=appgw_subnet_prefix,
appgw_subnet_cidr=appgw_subnet_cidr,
appgw_id=appgw_id,
appgw_subnet_id=appgw_subnet_id,
appgw_watch_namespace=appgw_watch_namespace,
enable_sgxquotehelper=enable_sgxquotehelper,
aci_subnet_name=aci_subnet_name,
vnet_subnet_id=vnet_subnet_id,
enable_secret_rotation=enable_secret_rotation,
)
monitoring = False
if CONST_MONITORING_ADDON_NAME in addon_profiles:
monitoring = True
if enable_msi_auth_for_monitoring and not enable_managed_identity:
raise ArgumentUsageError("--enable-msi-auth-for-monitoring can not be used on clusters with service principal auth.")
_ensure_container_insights_for_monitoring(cmd,
addon_profiles[CONST_MONITORING_ADDON_NAME], subscription_id,
resource_group_name, name, location,
aad_route=enable_msi_auth_for_monitoring, create_dcr=True,
create_dcra=False)
# addon is in the list and is enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in addon_profiles and \
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
os_type = 'Linux'
enable_virtual_node = False
if CONST_VIRTUAL_NODE_ADDON_NAME + os_type in addon_profiles:
enable_virtual_node = True
aad_profile = None
if enable_aad:
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('"--enable-aad" cannot be used together with '
'"--aad-client-app-id/--aad-server-app-id/--aad-server-app-secret"')
if disable_rbac and enable_azure_rbac:
raise CLIError(
'"--enable-azure-rbac" can not be used together with "--disable-rbac"')
aad_profile = ManagedClusterAADProfile(
managed=True,
enable_azure_rbac=enable_azure_rbac,
# ids -> i_ds due to track 2 naming issue
admin_group_object_i_ds=_parse_comma_separated_list(
aad_admin_group_object_ids),
tenant_id=aad_tenant_id
)
else:
if aad_admin_group_object_ids is not None:
raise CLIError(
'"--admin-aad-object-id" can only be used together with "--enable-aad"')
if enable_azure_rbac is True:
raise CLIError(
'"--enable-azure-rbac" can only be used together with "--enable-aad"')
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError(
'specify either "--disable-rbac" or "--enable-rbac", not both.')
api_server_access_profile = None
if api_server_authorized_ip_ranges:
api_server_access_profile = _populate_api_server_access_profile(
api_server_authorized_ip_ranges)
identity = None
if not enable_managed_identity and assign_identity:
raise CLIError(
'--assign-identity can only be specified when --enable-managed-identity is specified')
if enable_managed_identity and not assign_identity:
identity = ManagedClusterIdentity(
type="SystemAssigned"
)
elif enable_managed_identity and assign_identity:
user_assigned_identity = {
assign_identity: Components1Umhcm8SchemasManagedclusteridentityPropertiesUserassignedidentitiesAdditionalproperties()
}
identity = ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identity
)
identity_profile = None
if assign_kubelet_identity:
if not assign_identity:
raise CLIError('--assign-kubelet-identity can only be specified when --assign-identity is specified')
kubelet_identity = _get_user_assigned_identity(cmd.cli_ctx, assign_kubelet_identity)
identity_profile = {
'kubeletidentity': ComponentsQit0EtSchemasManagedclusterpropertiesPropertiesIdentityprofileAdditionalproperties(
resource_id=assign_kubelet_identity,
client_id=kubelet_identity.client_id,
object_id=kubelet_identity.principal_id
)
}
cluster_identity_object_id = _get_user_assigned_identity_object_id(cmd.cli_ctx, assign_identity)
# ensure the cluster identity has "Managed Identity Operator" role at the scope of kubelet identity
_ensure_cluster_identity_permission_on_kubelet_identity(cmd.cli_ctx, cluster_identity_object_id, assign_kubelet_identity)
pod_identity_profile = None
if enable_pod_identity:
if not enable_managed_identity:
raise CLIError(
'--enable-pod-identity can only be specified when --enable-managed-identity is specified')
pod_identity_profile = ManagedClusterPodIdentityProfile(enabled=True)
_ensure_pod_identity_kubenet_consent(
network_profile, pod_identity_profile, enable_pod_identity_with_kubenet)
enable_rbac = True
if disable_rbac:
enable_rbac = False
auto_upgrade_profile = None
if auto_upgrade_channel is not None:
auto_upgrade_profile = ManagedClusterAutoUpgradeProfile(
upgrade_channel=auto_upgrade_channel)
mc = ManagedCluster(
location=location, tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=enable_rbac,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
windows_profile=windows_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile,
auto_scaler_profile=cluster_autoscaler_profile,
enable_pod_security_policy=bool(enable_pod_security_policy),
identity=identity,
disk_encryption_set_id=node_osdisk_diskencryptionset_id,
api_server_access_profile=api_server_access_profile,
auto_upgrade_profile=auto_upgrade_profile,
pod_identity_profile=pod_identity_profile,
identity_profile=identity_profile,
disable_local_accounts=bool(disable_local_accounts))
if node_resource_group:
mc.node_resource_group = node_resource_group
use_custom_private_dns_zone = False
if not enable_private_cluster and enable_public_fqdn:
raise ArgumentUsageError("--enable-public-fqdn should only be used with --enable-private-cluster")
if enable_private_cluster:
if load_balancer_sku.lower() != "standard":
raise ArgumentUsageError(
"Please use standard load balancer for private cluster")
mc.api_server_access_profile = ManagedClusterAPIServerAccessProfile(
enable_private_cluster=True
)
if enable_public_fqdn:
mc.api_server_access_profile.enable_private_cluster_public_fqdn = True
if private_dns_zone:
if not enable_private_cluster:
raise ArgumentUsageError(
"Invalid private dns zone for public cluster. It should always be empty for public cluster")
mc.api_server_access_profile.private_dns_zone = private_dns_zone
from msrestazure.tools import is_valid_resource_id
if private_dns_zone.lower() != CONST_PRIVATE_DNS_ZONE_SYSTEM and private_dns_zone.lower() != CONST_PRIVATE_DNS_ZONE_NONE:
if is_valid_resource_id(private_dns_zone):
use_custom_private_dns_zone = True
else:
raise ResourceNotFoundError(private_dns_zone + " is not a valid Azure resource ID.")
if fqdn_subdomain:
if not use_custom_private_dns_zone:
raise ArgumentUsageError(
"--fqdn-subdomain should only be used for private cluster with custom private dns zone")
mc.fqdn_subdomain = fqdn_subdomain
if http_proxy_config:
mc.http_proxy_config = _get_http_proxy_config(http_proxy_config)
if uptime_sla:
mc.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
headers = get_aks_custom_headers(aks_custom_headers)
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
if monitoring and enable_msi_auth_for_monitoring:
# Creating a DCR Association (for the monitoring addon) requires waiting for cluster creation to finish
no_wait = False
created_cluster = _put_managed_cluster_ensuring_permission(
cmd,
client,
subscription_id,
resource_group_name,
name,
mc,
monitoring,
ingress_appgw_addon_enabled,
enable_virtual_node,
need_post_creation_vnet_permission_granting,
vnet_subnet_id,
enable_managed_identity,
attach_acr,
headers,
no_wait)
if monitoring and enable_msi_auth_for_monitoring:
# Create the DCR Association here
_ensure_container_insights_for_monitoring(cmd,
addon_profiles[CONST_MONITORING_ADDON_NAME], subscription_id,
resource_group_name, name, location,
aad_route=enable_msi_auth_for_monitoring, create_dcr=False,
create_dcra=True)
return created_cluster
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_update(cmd, # pylint: disable=too-many-statements,too-many-branches,too-many-locals
client,
resource_group_name,
name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
min_count=None, max_count=None, no_wait=False,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
api_server_authorized_ip_ranges=None,
enable_pod_security_policy=False,
disable_pod_security_policy=False,
attach_acr=None,
detach_acr=None,
uptime_sla=False,
no_uptime_sla=False,
enable_aad=False,
aad_tenant_id=None,
aad_admin_group_object_ids=None,
enable_ahub=False,
disable_ahub=False,
aks_custom_headers=None,
auto_upgrade_channel=None,
enable_managed_identity=False,
assign_identity=None,
enable_pod_identity=False,
enable_pod_identity_with_kubenet=False,
disable_pod_identity=False,
enable_secret_rotation=False,
disable_secret_rotation=False,
disable_local_accounts=False,
enable_local_accounts=False,
enable_public_fqdn=False,
disable_public_fqdn=False,
yes=False,
tags=None,
windows_admin_password=None,
enable_azure_rbac=False,
disable_azure_rbac=False):
update_autoscaler = enable_cluster_autoscaler or disable_cluster_autoscaler or update_cluster_autoscaler
update_acr = attach_acr is not None or detach_acr is not None
update_pod_security = enable_pod_security_policy or disable_pod_security_policy
update_lb_profile = is_load_balancer_profile_provided(load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
update_aad_profile = not (
aad_tenant_id is None and aad_admin_group_object_ids is None and not enable_azure_rbac and not disable_azure_rbac)
# pylint: disable=too-many-boolean-expressions
if not update_autoscaler and \
cluster_autoscaler_profile is None and \
not update_acr and \
not update_lb_profile \
and api_server_authorized_ip_ranges is None and \
not update_pod_security and \
not update_lb_profile and \
not uptime_sla and \
not no_uptime_sla and \
not enable_aad and \
not update_aad_profile and \
not enable_ahub and \
not disable_ahub and \
not auto_upgrade_channel and \
not enable_managed_identity and \
not assign_identity and \
not enable_pod_identity and \
not disable_pod_identity and \
not enable_secret_rotation and \
not disable_secret_rotation and \
not tags and \
not windows_admin_password and \
not enable_local_accounts and \
not disable_local_accounts and \
not enable_public_fqdn and \
not disable_public_fqdn:
raise CLIError('Please specify "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--cluster-autoscaler-profile" or '
'"--enable-pod-security-policy" or '
'"--disable-pod-security-policy" or '
'"--api-server-authorized-ip-ranges" or '
'"--attach-acr" or '
'"--detach-acr" or '
'"--uptime-sla" or '
'"--no-uptime-sla" or '
'"--load-balancer-managed-outbound-ip-count" or '
'"--load-balancer-outbound-ips" or '
'"--load-balancer-outbound-ip-prefixes" or '
'"--enable-aad" or '
'"--aad-tenant-id" or '
'"--aad-admin-group-object-ids" or '
'"--enable-ahub" or '
'"--disable-ahub" or '
'"--enable-managed-identity" or '
'"--enable-pod-identity" or '
'"--disable-pod-identity" or '
'"--auto-upgrade-channel" or '
'"--enable-secret-rotation" or '
'"--disable-secret-rotation" or '
'"--tags" or '
'"--windows-admin-password" or '
'"--enable-azure-rbac" or '
'"--disable-azure-rbac" or '
'"--enable-local-accounts" or '
'"--disable-local-accounts" or '
'"--enable-public-fqdn" or '
'"--disable-public-fqdn"')
instance = client.get(resource_group_name, name)
if update_autoscaler and len(instance.agent_pool_profiles) > 1:
raise CLIError('There is more than one node pool in the cluster. Please use "az aks nodepool" command '
'to update per node pool auto scaler settings')
if min_count is None or max_count is None:
if enable_cluster_autoscaler or update_cluster_autoscaler:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError(
'value of min-count should be less than or equal to value of max-count.')
if enable_cluster_autoscaler:
if instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already enabled for this managed cluster.\n'
'Please run "az aks update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
instance.agent_pool_profiles[0].enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
raise CLIError('Cluster autoscaler is not enabled for this managed cluster.\n'
'Run "az aks update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning(
'Cluster autoscaler is already disabled for this managed cluster.')
return None
instance.agent_pool_profiles[0].enable_auto_scaling = False
instance.agent_pool_profiles[0].min_count = None
instance.agent_pool_profiles[0].max_count = None
# if intention is to clear profile
if cluster_autoscaler_profile == {}:
instance.auto_scaler_profile = {}
# else profile is provided, update instance profile if it exists
elif cluster_autoscaler_profile:
instance.auto_scaler_profile = _update_dict(instance.auto_scaler_profile.__dict__,
dict((key.replace("-", "_"), value)
for (key, value) in cluster_autoscaler_profile.items())) \
if instance.auto_scaler_profile else cluster_autoscaler_profile
if enable_pod_security_policy and disable_pod_security_policy:
raise CLIError('Cannot specify --enable-pod-security-policy and --disable-pod-security-policy '
'at the same time.')
if enable_pod_security_policy:
instance.enable_pod_security_policy = True
if disable_pod_security_policy:
instance.enable_pod_security_policy = False
if disable_local_accounts and enable_local_accounts:
raise CLIError('Cannot specify --disable-local-accounts and --enable-local-accounts '
'at the same time.')
if disable_local_accounts:
instance.disable_local_accounts = True
if enable_local_accounts:
instance.disable_local_accounts = False
if update_lb_profile:
instance.network_profile.load_balancer_profile = update_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout,
instance.network_profile.load_balancer_profile)
if attach_acr and detach_acr:
raise CLIError(
'Cannot specify "--attach-acr" and "--detach-acr" at the same time.')
if uptime_sla and no_uptime_sla:
raise CLIError(
'Cannot specify "--uptime-sla" and "--no-uptime-sla" at the same time.')
if uptime_sla:
instance.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
if no_uptime_sla:
instance.sku = ManagedClusterSKU(
name="Basic",
tier="Free"
)
subscription_id = get_subscription_id(cmd.cli_ctx)
client_id = ""
if _is_msi_cluster(instance):
if instance.identity_profile is None or instance.identity_profile["kubeletidentity"] is None:
raise CLIError('Unexpected error getting kubelet\'s identity for the cluster. '
'Please do not set --attach-acr or --detach-acr. '
'You can manually grant or revoke permission to the identity named '
'<ClUSTER_NAME>-agentpool in MC_ resource group to access ACR.')
client_id = instance.identity_profile["kubeletidentity"].client_id
else:
client_id = instance.service_principal_profile.client_id
if not client_id:
raise CLIError('Cannot get the AKS cluster\'s service principal.')
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if detach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=detach_acr,
subscription_id=subscription_id,
detach=True)
# empty string is valid as it disables ip whitelisting
if api_server_authorized_ip_ranges is not None:
instance.api_server_access_profile = \
_populate_api_server_access_profile(
api_server_authorized_ip_ranges, instance)
if enable_aad:
if instance.aad_profile is not None and instance.aad_profile.managed:
raise CLIError(
'Cannot specify "--enable-aad" if managed AAD is already enabled')
instance.aad_profile = ManagedClusterAADProfile(
managed=True
)
if update_aad_profile:
if instance.aad_profile is None or not instance.aad_profile.managed:
raise CLIError('Cannot specify "--aad-tenant-id/--aad-admin-group-object-ids/--enable-azure-rbac/--disable-azure-rbac"'
' if managed AAD is not enabled')
if aad_tenant_id is not None:
instance.aad_profile.tenant_id = aad_tenant_id
if aad_admin_group_object_ids is not None:
# ids -> i_ds due to track 2 naming issue
instance.aad_profile.admin_group_object_i_ds = _parse_comma_separated_list(
aad_admin_group_object_ids)
if enable_azure_rbac and disable_azure_rbac:
raise CLIError(
'Cannot specify "--enable-azure-rbac" and "--disable-azure-rbac" at the same time')
if enable_azure_rbac:
instance.aad_profile.enable_azure_rbac = True
if disable_azure_rbac:
instance.aad_profile.enable_azure_rbac = False
if enable_ahub and disable_ahub:
raise CLIError(
'Cannot specify "--enable-ahub" and "--disable-ahub" at the same time')
if enable_ahub:
instance.windows_profile.license_type = 'Windows_Server'
if disable_ahub:
instance.windows_profile.license_type = 'None'
if enable_public_fqdn and disable_public_fqdn:
raise MutuallyExclusiveArgumentError(
'Cannot specify "--enable-public-fqdn" and "--disable-public-fqdn" at the same time')
is_private_cluster = instance.api_server_access_profile is not None and instance.api_server_access_profile.enable_private_cluster
if enable_public_fqdn:
if not is_private_cluster:
raise ArgumentUsageError('--enable-public-fqdn can only be used for private cluster')
instance.api_server_access_profile.enable_private_cluster_public_fqdn = True
if disable_public_fqdn:
if not is_private_cluster:
raise ArgumentUsageError('--disable-public-fqdn can only be used for private cluster')
if instance.api_server_access_profile.private_dns_zone.lower() == CONST_PRIVATE_DNS_ZONE_NONE:
raise ArgumentUsageError('--disable-public-fqdn cannot be applied for none mode private dns zone cluster')
instance.api_server_access_profile.enable_private_cluster_public_fqdn = False
if instance.auto_upgrade_profile is None:
instance.auto_upgrade_profile = ManagedClusterAutoUpgradeProfile()
if auto_upgrade_channel is not None:
instance.auto_upgrade_profile.upgrade_channel = auto_upgrade_channel
if not enable_managed_identity and assign_identity:
raise CLIError(
'--assign-identity can only be specified when --enable-managed-identity is specified')
current_identity_type = "spn"
if instance.identity is not None:
current_identity_type = instance.identity.type.casefold()
goal_identity_type = current_identity_type
if enable_managed_identity:
if not assign_identity:
goal_identity_type = "systemassigned"
else:
goal_identity_type = "userassigned"
if current_identity_type != goal_identity_type:
from knack.prompting import prompt_y_n
msg = ""
if current_identity_type == "spn":
msg = ('Your cluster is using service principal, and you are going to update the cluster to use {} managed identity.\n'
'After updating, your cluster\'s control plane and addon pods will switch to use managed identity, but kubelet '
'will KEEP USING SERVICE PRINCIPAL until you upgrade your agentpool.\n '
'Are you sure you want to perform this operation?').format(goal_identity_type)
else:
msg = ('Your cluster is already using {} managed identity, and you are going to update the cluster to use {} managed identity. \n'
'Are you sure you want to perform this operation?').format(current_identity_type, goal_identity_type)
if not yes and not prompt_y_n(msg, default="n"):
return None
if goal_identity_type == "systemassigned":
instance.identity = ManagedClusterIdentity(
type="SystemAssigned"
)
elif goal_identity_type == "userassigned":
user_assigned_identity = {
assign_identity: Components1Umhcm8SchemasManagedclusteridentityPropertiesUserassignedidentitiesAdditionalproperties()
}
instance.identity = ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identity
)
if enable_pod_identity:
if not _is_pod_identity_addon_enabled(instance):
# we only rebuild the pod identity profile if it's disabled before
_update_addon_pod_identity(
instance, enable=True,
allow_kubenet_consent=enable_pod_identity_with_kubenet,
)
if disable_pod_identity:
_update_addon_pod_identity(instance, enable=False)
azure_keyvault_secrets_provider_addon_profile = None
monitoring_addon_enabled = False
ingress_appgw_addon_enabled = False
virtual_node_addon_enabled = False
if instance.addon_profiles is not None:
azure_keyvault_secrets_provider_addon_profile = instance.addon_profiles.get(CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME, None)
azure_keyvault_secrets_provider_enabled = CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME in instance.addon_profiles and \
instance.addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME].enabled
monitoring_addon_enabled = CONST_MONITORING_ADDON_NAME in instance.addon_profiles and \
instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles and \
instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
virtual_node_addon_enabled = CONST_VIRTUAL_NODE_ADDON_NAME + 'Linux' in instance.addon_profiles and \
instance.addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + 'Linux'].enabled
if enable_secret_rotation:
if not azure_keyvault_secrets_provider_enabled:
raise CLIError(
'--enable-secret-rotation can only be specified when azure-keyvault-secrets-provider is enabled')
azure_keyvault_secrets_provider_addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true"
if disable_secret_rotation:
if not azure_keyvault_secrets_provider_enabled:
raise CLIError(
'--disable-secret-rotation can only be specified when azure-keyvault-secrets-provider is enabled')
azure_keyvault_secrets_provider_addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "false"
if tags:
instance.tags = tags
if windows_admin_password:
instance.windows_profile.admin_password = windows_admin_password
headers = get_aks_custom_headers(aks_custom_headers)
return _put_managed_cluster_ensuring_permission(cmd,
client,
subscription_id,
resource_group_name,
name,
instance,
monitoring_addon_enabled,
ingress_appgw_addon_enabled,
virtual_node_addon_enabled,
False,
instance.agent_pool_profiles[0].vnet_subnet_id,
_is_msi_cluster(instance),
attach_acr,
headers,
no_wait)
def aks_show(cmd, client, resource_group_name, name): # pylint: disable=unused-argument
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
if managed_cluster.agent_pool_profiles is not None:
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def aks_get_credentials(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
admin=False,
user='clusterUser',
path=os.path.join(os.path.expanduser(
'~'), '.kube', 'config'),
overwrite_existing=False,
context_name=None,
public_fqdn=False):
credentialResults = None
serverType = None
if public_fqdn:
serverType = 'public'
if admin:
credentialResults = client.list_cluster_admin_credentials(
resource_group_name, name, serverType)
else:
if user.lower() == 'clusteruser':
credentialResults = client.list_cluster_user_credentials(
resource_group_name, name, serverType)
elif user.lower() == 'clustermonitoringuser':
credentialResults = client.list_cluster_monitoring_user_credentials(
resource_group_name, name, serverType)
else:
raise CLIError("The user is invalid.")
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(
encoding='UTF-8')
_print_or_merge_credentials(
path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
# pylint: disable=line-too-long
def aks_kollect(cmd, # pylint: disable=too-many-statements,too-many-locals
client,
resource_group_name,
name,
storage_account=None,
sas_token=None,
container_logs=None,
kube_objects=None,
node_logs=None):
colorama.init()
mc = client.get(resource_group_name, name)
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
storage_account_id = None
if storage_account is None:
print("No storage account specified. Try getting storage account from diagnostic settings")
storage_account_id = get_storage_account_from_diag_settings(
cmd.cli_ctx, resource_group_name, name)
if storage_account_id is None:
raise CLIError(
"A storage account must be specified, since there isn't one in the diagnostic settings.")
from msrestazure.tools import is_valid_resource_id, parse_resource_id, resource_id
if storage_account_id is None:
if not is_valid_resource_id(storage_account):
storage_account_id = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Storage', type='storageAccounts',
name=storage_account
)
else:
storage_account_id = storage_account
if is_valid_resource_id(storage_account_id):
try:
parsed_storage_account = parse_resource_id(storage_account_id)
except CloudError as ex:
raise CLIError(ex.message)
else:
raise CLIError("Invalid storage account id %s" % storage_account_id)
storage_account_name = parsed_storage_account['name']
readonly_sas_token = None
if sas_token is None:
storage_client = cf_storage(
cmd.cli_ctx, parsed_storage_account['subscription'])
storage_account_keys = storage_client.storage_accounts.list_keys(parsed_storage_account['resource_group'],
storage_account_name)
kwargs = {
'account_name': storage_account_name,
'account_key': storage_account_keys.keys[0].value
}
cloud_storage_client = cloud_storage_account_service_factory(
cmd.cli_ctx, kwargs)
sas_token = cloud_storage_client.generate_shared_access_signature(
'b',
'sco',
'rwdlacup',
datetime.datetime.utcnow() + datetime.timedelta(days=1))
readonly_sas_token = cloud_storage_client.generate_shared_access_signature(
'b',
'sco',
'rl',
datetime.datetime.utcnow() + datetime.timedelta(days=1))
readonly_sas_token = readonly_sas_token.strip('?')
from knack.prompting import prompt_y_n
print()
print('This will deploy a daemon set to your cluster to collect logs and diagnostic information and '
f'save them to the storage account '
f'{colorama.Style.BRIGHT}{colorama.Fore.GREEN}{storage_account_name}{colorama.Style.RESET_ALL} as '
f'outlined in {format_hyperlink("http://aka.ms/AKSPeriscope")}.')
print()
print('If you share access to that storage account to Azure support, you consent to the terms outlined'
f' in {format_hyperlink("http://aka.ms/DiagConsent")}.')
print()
if not prompt_y_n('Do you confirm?', default="n"):
return
print()
print("Getting credentials for cluster %s " % name)
_, temp_kubeconfig_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name,
name, admin=True, path=temp_kubeconfig_path)
print()
print("Starts collecting diag info for cluster %s " % name)
sas_token = sas_token.strip('?')
deployment_yaml = urlopen(
"https://raw.githubusercontent.com/Azure/aks-periscope/latest/deployment/aks-periscope.yaml").read().decode()
deployment_yaml = deployment_yaml.replace("# <accountName, base64 encoded>",
(base64.b64encode(bytes(storage_account_name, 'ascii'))).decode('ascii'))
deployment_yaml = deployment_yaml.replace("# <saskey, base64 encoded>",
(base64.b64encode(bytes("?" + sas_token, 'ascii'))).decode('ascii'))
yaml_lines = deployment_yaml.splitlines()
for index, line in enumerate(yaml_lines):
if "DIAGNOSTIC_CONTAINERLOGS_LIST" in line and container_logs is not None:
yaml_lines[index] = line + ' ' + container_logs
if "DIAGNOSTIC_KUBEOBJECTS_LIST" in line and kube_objects is not None:
yaml_lines[index] = line + ' ' + kube_objects
if "DIAGNOSTIC_NODELOGS_LIST" in line and node_logs is not None:
yaml_lines[index] = line + ' ' + node_logs
deployment_yaml = '\n'.join(yaml_lines)
fd, temp_yaml_path = tempfile.mkstemp()
temp_yaml_file = os.fdopen(fd, 'w+t')
try:
temp_yaml_file.write(deployment_yaml)
temp_yaml_file.flush()
temp_yaml_file.close()
try:
print()
print("Cleaning up aks-periscope resources if existing")
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"serviceaccount,configmap,daemonset,secret",
"--all", "-n", "aks-periscope", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRoleBinding",
"aks-periscope-role-binding", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRoleBinding",
"aks-periscope-role-binding-view", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"ClusterRole",
"aks-periscope-role", "--ignore-not-found"],
stderr=subprocess.STDOUT)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"--all",
"apd", "-n", "aks-periscope", "--ignore-not-found"],
stderr=subprocess.DEVNULL)
subprocess.call(["kubectl", "--kubeconfig", temp_kubeconfig_path, "delete",
"CustomResourceDefinition",
"diagnostics.aks-periscope.azure.github.com", "--ignore-not-found"],
stderr=subprocess.STDOUT)
print()
print("Deploying aks-periscope")
subprocess.check_output(["kubectl", "--kubeconfig", temp_kubeconfig_path, "apply", "-f",
temp_yaml_path, "-n", "aks-periscope"], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
raise CLIError(err.output)
finally:
os.remove(temp_yaml_path)
print()
fqdn = mc.fqdn if mc.fqdn is not None else mc.private_fqdn
normalized_fqdn = fqdn.replace('.', '-')
token_in_storage_account_url = readonly_sas_token if readonly_sas_token is not None else sas_token
log_storage_account_url = f"https://{storage_account_name}.blob.core.windows.net/" \
f"{_trim_fqdn_name_containing_hcp(normalized_fqdn)}?{token_in_storage_account_url}"
print(f'{colorama.Fore.GREEN}Your logs are being uploaded to storage account {format_bright(storage_account_name)}')
print()
print(f'You can download Azure Stroage Explorer here '
f'{format_hyperlink("https://azure.microsoft.com/en-us/features/storage-explorer/")}'
f' to check the logs by adding the storage account using the following URL:')
print(f'{format_hyperlink(log_storage_account_url)}')
print()
if not prompt_y_n('Do you want to see analysis results now?', default="n"):
print(f"You can run 'az aks kanalyze -g {resource_group_name} -n {name}' "
f"anytime to check the analysis results.")
else:
display_diagnostics_report(temp_kubeconfig_path)
def aks_kanalyze(cmd, client, resource_group_name, name):
colorama.init()
client.get(resource_group_name, name)
_, temp_kubeconfig_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name,
name, admin=True, path=temp_kubeconfig_path)
display_diagnostics_report(temp_kubeconfig_path)
def aks_scale(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
name,
node_count,
nodepool_name="",
no_wait=False):
instance = client.get(resource_group_name, name)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
if agent_profile.enable_auto_scaling:
raise CLIError(
"Cannot scale cluster autoscaler enabled node pool.")
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
def aks_upgrade(cmd, # pylint: disable=unused-argument, too-many-return-statements
client,
resource_group_name,
name,
kubernetes_version='',
control_plane_only=False,
no_wait=False,
node_image_only=False,
aks_custom_headers=None,
yes=False):
from knack.prompting import prompt_y_n
msg = 'Kubernetes may be unavailable during cluster upgrades.\n Are you sure you want to perform this operation?'
if not yes and not prompt_y_n(msg, default="n"):
return None
instance = client.get(resource_group_name, name)
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version. '
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
msg = "This node image upgrade operation will run across every node pool in the cluster" \
"and might take a while, do you wish to continue?"
if not yes and not prompt_y_n(msg, default="n"):
return None
# This only provide convenience for customer at client side so they can run az aks upgrade to upgrade all
# nodepools of a cluster. The SDK only support upgrade single nodepool at a time.
for agent_pool_profile in instance.agent_pool_profiles:
if vmas_cluster:
raise CLIError('This cluster is not using VirtualMachineScaleSets. Node image upgrade only operation '
'can only be applied on VirtualMachineScaleSets cluster.')
agent_pool_client = cf_agent_pools(cmd.cli_ctx)
_upgrade_single_nodepool_image_version(
True, agent_pool_client, resource_group_name, name, agent_pool_profile.name)
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
upgrade_all = False
instance.kubernetes_version = kubernetes_version
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance, headers=headers)
def aks_runcommand(cmd, client, resource_group_name, name, command_string="", command_files=None):
colorama.init()
mc = client.get(resource_group_name, name)
if not command_string:
raise CLIError('Command cannot be empty.')
request_payload = RunCommandRequest(command=command_string)
request_payload.context = _get_command_context(command_files)
if mc.aad_profile is not None and mc.aad_profile.managed:
request_payload.cluster_token = _get_dataplane_aad_token(
cmd.cli_ctx, "6dae42f8-4368-4678-94ff-3960e28e3630")
commandResultFuture = client.begin_run_command(
resource_group_name, name, request_payload, polling_interval=5, retry_total=0)
return _print_command_result(cmd.cli_ctx, commandResultFuture.result(300))
def aks_command_result(cmd, client, resource_group_name, name, command_id=""):
if not command_id:
raise CLIError('CommandID cannot be empty.')
commandResult = client.get_command_result(
resource_group_name, name, command_id)
return _print_command_result(cmd.cli_ctx, commandResult)
def _print_command_result(cli_ctx, commandResult):
# cli_ctx.data['safe_params'] contains list of parameter name user typed in, without value.
# cli core also use this calculate ParameterSetName header for all http request from cli.
if cli_ctx.data['safe_params'] is None or "-o" in cli_ctx.data['safe_params'] or "--output" in cli_ctx.data['safe_params']:
# user specified output format, honor their choice, return object to render pipeline
return commandResult
else:
# user didn't specified any format, we can customize the print for best experience
if commandResult.provisioning_state == "Succeeded":
# succeed, print exitcode, and logs
print(f"{colorama.Fore.GREEN}command started at {commandResult.started_at}, finished at {commandResult.finished_at}, with exitcode={commandResult.exit_code}{colorama.Style.RESET_ALL}")
print(commandResult.logs)
return
if commandResult.provisioning_state == "Failed":
# failed, print reason in error
print(
f"{colorama.Fore.RED}command failed with reason: {commandResult.reason}{colorama.Style.RESET_ALL}")
return
# *-ing state
print(f"{colorama.Fore.BLUE}command is in : {commandResult.provisioning_state} state{colorama.Style.RESET_ALL}")
return None
def _get_command_context(command_files):
if not command_files:
return ""
filesToAttach = {}
# . means to attach current folder, cannot combine more files. (at least for now)
if len(command_files) == 1 and command_files[0] == ".":
# current folder
cwd = os.getcwd()
for filefolder, _, files in os.walk(cwd):
for file in files:
# retain folder structure
rel = os.path.relpath(filefolder, cwd)
filesToAttach[os.path.join(
filefolder, file)] = os.path.join(rel, file)
else:
for file in command_files:
if file == ".":
raise CLIError(
". is used to attach current folder, not expecting other attachements.")
if os.path.isfile(file):
# for individual attached file, flatten them to same folder
filesToAttach[file] = os.path.basename(file)
else:
raise CLIError(f"{file} is not valid file, or not accessable.")
if len(filesToAttach) < 1:
logger.debug("no files to attach!")
return ""
zipStream = io.BytesIO()
zipFile = zipfile.ZipFile(zipStream, "w")
for _, (osfile, zipEntry) in enumerate(filesToAttach.items()):
zipFile.write(osfile, zipEntry)
# zipFile.printdir() // use this to debug
zipFile.close()
return str(base64.encodebytes(zipStream.getbuffer()), "utf-8")
def _get_dataplane_aad_token(cli_ctx, serverAppId):
# this function is mostly copied from keyvault cli
import adal
try:
return Profile(cli_ctx=cli_ctx).get_raw_token(resource=serverAppId)[0][2].get('accessToken')
except adal.AdalError as err:
# pylint: disable=no-member
if (hasattr(err, 'error_response') and
('error_description' in err.error_response) and
('AADSTS70008:' in err.error_response['error_description'])):
raise CLIError(
"Credentials have expired due to inactivity. Please run 'az login'")
raise CLIError(err)
def _upgrade_single_nodepool_image_version(no_wait, client, resource_group_name, cluster_name, nodepool_name):
return sdk_no_wait(no_wait, client.begin_upgrade_node_image_version, resource_group_name, cluster_name, nodepool_name)
def _handle_addons_args(cmd, # pylint: disable=too-many-statements
addons_str,
subscription_id,
resource_group_name,
addon_profiles=None,
workspace_resource_id=None,
enable_msi_auth_for_monitoring=False,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
aci_subnet_name=None,
vnet_subnet_id=None,
enable_secret_rotation=False):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles[CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles[CONST_KUBE_DASHBOARD_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = _sanitize_loganalytics_ws_resource_id(workspace_resource_id)
addon_profiles[CONST_MONITORING_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True,
config={CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id,
CONST_MONITORING_USING_AAD_MSI_AUTH: enable_msi_auth_for_monitoring})
addons.remove('monitoring')
elif workspace_resource_id:
raise CLIError(
'"--workspace-resource-id" requires "--enable-addons monitoring".')
if 'azure-policy' in addons:
addon_profiles[CONST_AZURE_POLICY_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('azure-policy')
if 'gitops' in addons:
addon_profiles['gitops'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('gitops')
if 'ingress-appgw' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_prefix is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_prefix
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME] = addon_profile
addons.remove('ingress-appgw')
if 'open-service-mesh' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
addon_profiles[CONST_OPEN_SERVICE_MESH_ADDON_NAME] = addon_profile
addons.remove('open-service-mesh')
if 'azure-keyvault-secrets-provider' in addons:
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_SECRET_ROTATION_ENABLED: "false"})
if enable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true"
addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME] = addon_profile
addons.remove('azure-keyvault-secrets-provider')
if 'confcom' in addons:
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
addon_profiles[CONST_CONFCOM_ADDON_NAME] = addon_profile
addons.remove('confcom')
if 'virtual-node' in addons:
if not aci_subnet_name or not vnet_subnet_id:
raise CLIError(
'"--enable-addons virtual-node" requires "--aci-subnet-name" and "--vnet-subnet-id".')
# TODO: how about aciConnectorwindows, what is its addon name?
os_type = 'Linux'
addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + os_type] = ManagedClusterAddonProfile(
enabled=True,
config={CONST_VIRTUAL_NODE_SUBNET_NAME: aci_subnet_name}
)
addons.remove('virtual-node')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# mapping for azure public cloud
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureCloudLocationToOmsRegionCodeMap = {
"australiasoutheast": "ASE",
"australiaeast": "EAU",
"australiacentral": "CAU",
"canadacentral": "CCA",
"centralindia": "CIN",
"centralus": "CUS",
"eastasia": "EA",
"eastus": "EUS",
"eastus2": "EUS2",
"eastus2euap": "EAP",
"francecentral": "PAR",
"japaneast": "EJP",
"koreacentral": "SE",
"northeurope": "NEU",
"southcentralus": "SCUS",
"southeastasia": "SEA",
"uksouth": "SUK",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"westeurope": "WEU",
"westus": "WUS",
"westus2": "WUS2",
"brazilsouth": "CQ",
"brazilsoutheast": "BRSE",
"norwayeast": "NOE",
"southafricanorth": "JNB",
"northcentralus": "NCUS",
"uaenorth": "DXB",
"germanywestcentral": "DEWC",
"ukwest": "WUK",
"switzerlandnorth": "CHN",
"switzerlandwest": "CHW",
"uaecentral": "AUH"
}
AzureCloudRegionToOmsRegionMap = {
"australiacentral": "australiacentral",
"australiacentral2": "australiacentral",
"australiaeast": "australiaeast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "brazilsouth",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "centralus",
"centralindia": "centralindia",
"eastasia": "eastasia",
"eastus": "eastus",
"eastus2": "eastus2",
"francecentral": "francecentral",
"francesouth": "francecentral",
"japaneast": "japaneast",
"japanwest": "japaneast",
"koreacentral": "koreacentral",
"koreasouth": "koreacentral",
"northcentralus": "northcentralus",
"northeurope": "northeurope",
"southafricanorth": "southafricanorth",
"southafricawest": "southafricanorth",
"southcentralus": "southcentralus",
"southeastasia": "southeastasia",
"southindia": "centralindia",
"uksouth": "uksouth",
"ukwest": "ukwest",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westindia": "centralindia",
"westus": "westus",
"westus2": "westus2",
"norwayeast": "norwayeast",
"norwaywest": "norwayeast",
"switzerlandnorth": "switzerlandnorth",
"switzerlandwest": "switzerlandwest",
"uaenorth": "uaenorth",
"germanywestcentral": "germanywestcentral",
"germanynorth": "germanywestcentral",
"uaecentral": "uaecentral",
"eastus2euap": "eastus2euap",
"brazilsoutheast": "brazilsoutheast"
}
# mapping for azure china cloud
# log analytics only support China East2 region
AzureChinaLocationToOmsRegionCodeMap = {
"chinaeast": "EAST2",
"chinaeast2": "EAST2",
"chinanorth": "EAST2",
"chinanorth2": "EAST2"
}
AzureChinaRegionToOmsRegionMap = {
"chinaeast": "chinaeast2",
"chinaeast2": "chinaeast2",
"chinanorth": "chinaeast2",
"chinanorth2": "chinaeast2"
}
# mapping for azure us governmner cloud
AzureFairfaxLocationToOmsRegionCodeMap = {
"usgovvirginia": "USGV",
"usgovarizona": "PHX"
}
AzureFairfaxRegionToOmsRegionMap = {
"usgovvirginia": "usgovvirginia",
"usgovtexas": "usgovvirginia",
"usgovarizona": "usgovarizona"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurecloud':
workspace_region = AzureCloudRegionToOmsRegionMap.get(
rg_location, "eastus")
workspace_region_code = AzureCloudLocationToOmsRegionCodeMap.get(
workspace_region, "EUS")
elif cloud_name.lower() == 'azurechinacloud':
workspace_region = AzureChinaRegionToOmsRegionMap.get(
rg_location, "chinaeast2")
workspace_region_code = AzureChinaLocationToOmsRegionCodeMap.get(
workspace_region, "EAST2")
elif cloud_name.lower() == 'azureusgovernment':
workspace_region = AzureFairfaxRegionToOmsRegionMap.get(
rg_location, "usgovvirginia")
workspace_region_code = AzureFairfaxLocationToOmsRegionCodeMap.get(
workspace_region, "USGV")
else:
logger.error(
"AKS Monitoring addon not supported in cloud : %s", cloud_name)
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(
subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id,
default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
from azure.cli.core.profiles import ResourceType
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
from azure.core.exceptions import HttpResponseError
try:
resource = resources.get_by_id(
default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except HttpResponseError as ex:
if ex.status_code != 404:
raise ex
else:
ResourceGroup = cmd.get_models('ResourceGroup', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
resource_group = ResourceGroup(location=workspace_region)
resource_groups.create_or_update(default_workspace_resource_group, resource_group)
GenericResource = cmd.get_models('GenericResource', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
generic_resource = GenericResource(location=workspace_region, properties={'sku': {'name': 'standalone'}})
async_poller = resources.begin_create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
generic_resource)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _sanitize_loganalytics_ws_resource_id(workspace_resource_id):
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
return workspace_resource_id
def _ensure_container_insights_for_monitoring(cmd,
addon,
cluster_subscription,
cluster_resource_group_name,
cluster_name,
cluster_region,
remove_monitoring=False,
aad_route=False,
create_dcr=False,
create_dcra=False):
"""
Either adds the ContainerInsights solution to a LA Workspace OR sets up a DCR (Data Collection Rule) and DCRA
(Data Collection Rule Association). Both let the monitoring addon send data to a Log Analytics Workspace.
Set aad_route == True to set up the DCR data route. Otherwise the solution route will be used. Create_dcr and
create_dcra have no effect if aad_route == False.
Set remove_monitoring to True and create_dcra to True to remove the DCRA from a cluster. The association makes
it very hard to delete either the DCR or cluster. (It is not obvious how to even navigate to the association from
the portal, and it prevents the cluster and DCR from being deleted individually).
"""
if not addon.enabled:
return None
# workaround for this addon key which has been seen lowercased in the wild
for key in list(addon.config):
if key.lower() == CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID.lower() and key != CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID:
addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID] = addon.config.pop(
key)
workspace_resource_id = addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID].strip(
)
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
workspace_name = workspace_resource_id.split('/')[8]
except IndexError:
raise CLIError(
'Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
if not remove_monitoring:
resources = cf_resources(cmd.cli_ctx, subscription_id)
from azure.core.exceptions import HttpResponseError
try:
resource = resources.get_by_id(
workspace_resource_id, '2015-11-01-preview')
location = resource.location
except HttpResponseError as ex:
raise ex
if aad_route:
cluster_resource_id = f"/subscriptions/{cluster_subscription}/resourceGroups/{cluster_resource_group_name}/providers/Microsoft.ContainerService/managedClusters/{cluster_name}"
dataCollectionRuleName = f"DCR-{workspace_name}"
dcr_resource_id = f"/subscriptions/{subscription_id}/resourceGroups/{resource_group}/providers/Microsoft.Insights/dataCollectionRules/{dataCollectionRuleName}"
from azure.cli.core.util import send_raw_request
from azure.cli.core.profiles import ResourceType
if create_dcr:
# first get the association between region display names and region IDs (because for some reason
# the "which RPs are available in which regions" check returns region display names)
region_names_to_id = {}
# retry the request up to two times
for _ in range(3):
try:
location_list_url = f"https://management.azure.com/subscriptions/{subscription_id}/locations?api-version=2019-11-01"
r = send_raw_request(cmd.cli_ctx, "GET", location_list_url)
# this is required to fool the static analyzer. The else statement will only run if an exception
# is thrown, but flake8 will complain that e is undefined if we don't also define it here.
error = None
break
except CLIError as e:
error = e
else:
# This will run if the above for loop was not broken out of. This means all three requests failed
raise error
json_response = json.loads(r.text)
for region_data in json_response["value"]:
region_names_to_id[region_data["displayName"]] = region_data["name"]
# check if region supports DCRs and DCR-A
for _ in range(3):
try:
feature_check_url = f"https://management.azure.com/subscriptions/{subscription_id}/providers/Microsoft.Insights?api-version=2020-10-01"
r = send_raw_request(cmd.cli_ctx, "GET", feature_check_url)
error = None
break
except CLIError as e:
error = e
else:
raise error
json_response = json.loads(r.text)
for resource in json_response["resourceTypes"]:
region_ids = map(lambda x: region_names_to_id[x], resource["locations"]) # map is lazy, so doing this for every region isn't slow
if resource["resourceType"].lower() == "datacollectionrules" and location not in region_ids:
raise ClientRequestError(f'Data Collection Rules are not supported for LA workspace region {location}')
elif resource["resourceType"].lower() == "datacollectionruleassociations" and cluster_region not in region_ids:
raise ClientRequestError(f'Data Collection Rule Associations are not supported for cluster region {location}')
# create the DCR
dcr_creation_body = json.dumps({"location": location,
"properties": {
"dataSources": {
"extensions": [
{
"name": "ContainerInsightsExtension",
"streams": [
"Microsoft-Perf",
"Microsoft-ContainerInventory",
"Microsoft-ContainerLog",
"Microsoft-ContainerLogV2",
"Microsoft-ContainerNodeInventory",
"Microsoft-KubeEvents",
"Microsoft-KubeHealth",
"Microsoft-KubeMonAgentEvents",
"Microsoft-KubeNodeInventory",
"Microsoft-KubePodInventory",
"Microsoft-KubePVInventory",
"Microsoft-KubeServices",
"Microsoft-InsightsMetrics"
],
"extensionName": "ContainerInsights"
}
]
},
"dataFlows": [
{
"streams": [
"Microsoft-Perf",
"Microsoft-ContainerInventory",
"Microsoft-ContainerLog",
"Microsoft-ContainerLogV2",
"Microsoft-ContainerNodeInventory",
"Microsoft-KubeEvents",
"Microsoft-KubeHealth",
"Microsoft-KubeMonAgentEvents",
"Microsoft-KubeNodeInventory",
"Microsoft-KubePodInventory",
"Microsoft-KubePVInventory",
"Microsoft-KubeServices",
"Microsoft-InsightsMetrics"
],
"destinations": [
"la-workspace"
]
}
],
"destinations": {
"logAnalytics": [
{
"workspaceResourceId": workspace_resource_id,
"name": "la-workspace"
}
]
}
}})
dcr_url = f"https://management.azure.com/{dcr_resource_id}?api-version=2019-11-01-preview"
for _ in range(3):
try:
send_raw_request(cmd.cli_ctx, "PUT", dcr_url, body=dcr_creation_body)
error = None
break
except CLIError as e:
error = e
else:
raise error
if create_dcra:
# only create or delete the association between the DCR and cluster
association_body = json.dumps({"location": cluster_region,
"properties": {
"dataCollectionRuleId": dcr_resource_id,
"description": "routes monitoring data to a Log Analytics workspace"
}})
association_url = f"https://management.azure.com/{cluster_resource_id}/providers/Microsoft.Insights/dataCollectionRuleAssociations/send-to-{workspace_name}?api-version=2019-11-01-preview"
for _ in range(3):
try:
send_raw_request(cmd.cli_ctx, "PUT" if not remove_monitoring else "DELETE", association_url, body=association_body)
error = None
break
except CLIError as e:
error = e
else:
raise error
else:
# legacy auth with LA workspace solution
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(
unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
fqdn_subdomain=None,
location=None,
name=None):
file_name_aks = 'aksServicePrincipal.json'
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(
subscription_id, file_name=file_name_aks)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
if dns_name_prefix:
url = 'http://{}.{}.{}.cloudapp.azure.com'.format(
salt, dns_name_prefix, location)
else:
url = 'http://{}.{}.{}.cloudapp.azure.com'.format(
salt, fqdn_subdomain, location)
service_principal = _build_service_principal(
rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError(
'--client-secret is required if --service-principal is specified')
store_acs_service_principal(
subscription_id, client_secret, service_principal, file_name=file_name_aks)
return load_acs_service_principal(subscription_id, file_name=file_name_aks)
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError(
'Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError(
'value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError(
'node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError(
'min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _create_client_secret():
# Add a special character to satsify AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(
os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _ensure_aks_acr(cli_ctx,
client_id,
acr_name_or_id,
subscription_id, # pylint: disable=unused-argument
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(
cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(
parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(
cli_ctx, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(
cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError(
"ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
def _ensure_aks_acr_role_assignment(cli_ctx,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not _add_role_assignment(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def _add_virtual_node_role_assignment(cmd, result, vnet_subnet_id):
# Remove trailing "/subnets/<SUBNET_NAME>" to get the vnet id
vnet_id = vnet_subnet_id.rpartition('/')[0]
vnet_id = vnet_id.rpartition('/')[0]
service_principal_msi_id = None
is_service_principal = False
os_type = 'Linux'
addon_name = CONST_VIRTUAL_NODE_ADDON_NAME + os_type
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id.lower() != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(addon_name in result.addon_profiles) and
(hasattr(result.addon_profiles[addon_name], 'identity')) and
(hasattr(result.addon_profiles[addon_name].identity, 'object_id'))
):
logger.info('virtual node MSI exists, using it')
service_principal_msi_id = result.addon_profiles[addon_name].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=vnet_id):
logger.warning('Could not create a role assignment for virtual node addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
def aks_agentpool_show(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, # pylint: disable=unused-argument,too-many-locals
client,
resource_group_name,
cluster_name,
nodepool_name,
tags=None,
kubernetes_version=None,
node_zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
node_vm_size=None,
node_osdisk_type=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
pod_subnet_id=None,
ppg=None,
max_pods=0,
os_type="Linux",
os_sku=None,
enable_fips_image=False,
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
node_taints=None,
priority=CONST_SCALE_SET_PRIORITY_REGULAR,
eviction_policy=CONST_SPOT_EVICTION_POLICY_DELETE,
spot_max_price=float('nan'),
labels=None,
max_surge=None,
mode="User",
aks_custom_headers=None,
kubelet_config=None,
linux_os_config=None,
enable_encryption_at_host=False,
enable_ultra_ssd=False,
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
upgradeSettings = AgentPoolUpgradeSettings()
taints_array = []
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError(
'Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type == "Windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
if max_surge:
upgradeSettings.max_surge = max_surge
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
os_sku=os_sku,
enable_fips=enable_fips_image,
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
pod_subnet_id=pod_subnet_id,
proximity_placement_group_id=ppg,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=node_zones,
enable_node_public_ip=enable_node_public_ip,
node_public_ip_prefix_id=node_public_ip_prefix_id,
node_taints=taints_array,
scale_set_priority=priority,
upgrade_settings=upgradeSettings,
enable_encryption_at_host=enable_encryption_at_host,
enable_ultra_ssd=enable_ultra_ssd,
mode=mode
)
if priority == CONST_SCALE_SET_PRIORITY_SPOT:
agent_pool.scale_set_eviction_policy = eviction_policy
if isnan(spot_max_price):
spot_max_price = -1
agent_pool.spot_max_price = spot_max_price
_check_cluster_autoscaler_flag(
enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool.os_disk_type = node_osdisk_type
if kubelet_config:
agent_pool.kubelet_config = _get_kubelet_config(kubelet_config)
if linux_os_config:
agent_pool.linux_os_config = _get_linux_os_config(linux_os_config)
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool, headers=headers)
def aks_agentpool_scale(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if instance.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
if new_node_count == instance.count:
raise CLIError(
"The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_upgrade(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
kubernetes_version='',
no_wait=False,
node_image_only=False,
max_surge=None,
aks_custom_headers=None):
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version.'
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
return _upgrade_single_nodepool_image_version(no_wait,
client,
resource_group_name,
cluster_name,
nodepool_name)
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
headers = get_aks_custom_headers(aks_custom_headers)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance, headers=headers)
def aks_agentpool_get_upgrade_profile(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name):
return client.get_upgrade_profile(resource_group_name, cluster_name, nodepool_name)
def aks_agentpool_update(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
tags=None,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
max_surge=None,
mode=None,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + \
disable_cluster_autoscaler + update_cluster_autoscaler
if (update_autoscaler != 1 and not tags and not mode and not max_surge):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--tags" or "--mode" or "--max-surge"')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
if min_count is None or max_count is None:
if enable_cluster_autoscaler or update_cluster_autoscaler:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError(
'value of min-count should be less than or equal to value of max-count.')
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning(
'Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
instance.tags = tags
if mode is not None:
instance.mode = mode
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_delete(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.begin_delete, resource_group_name, cluster_name, nodepool_name)
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
try:
if addons == "monitoring" and CONST_MONITORING_ADDON_NAME in instance.addon_profiles and \
instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled and \
CONST_MONITORING_USING_AAD_MSI_AUTH in instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config and \
str(instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config[CONST_MONITORING_USING_AAD_MSI_AUTH]).lower() == 'true':
# remove the DCR association because otherwise the DCR can't be deleted
_ensure_container_insights_for_monitoring(
cmd,
instance.addon_profiles[CONST_MONITORING_ADDON_NAME],
subscription_id,
resource_group_name,
name,
instance.location,
remove_monitoring=True,
aad_route=True,
create_dcr=False,
create_dcra=True
)
except TypeError:
pass
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None,
subnet_name=None, appgw_name=None, appgw_subnet_prefix=None, appgw_subnet_cidr=None, appgw_id=None, appgw_subnet_id=None,
appgw_watch_namespace=None, enable_sgxquotehelper=False, enable_secret_rotation=False, no_wait=False, enable_msi_auth_for_monitoring=False):
instance = client.get(resource_group_name, name)
msi_auth = True if instance.service_principal_profile.client_id == "msi" else False # this is overwritten by _update_addons(), so the value needs to be recorded here
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable=True,
workspace_resource_id=workspace_resource_id, enable_msi_auth_for_monitoring=enable_msi_auth_for_monitoring, subnet_name=subnet_name,
appgw_name=appgw_name, appgw_subnet_prefix=appgw_subnet_prefix, appgw_subnet_cidr=appgw_subnet_cidr, appgw_id=appgw_id, appgw_subnet_id=appgw_subnet_id, appgw_watch_namespace=appgw_watch_namespace,
enable_sgxquotehelper=enable_sgxquotehelper, enable_secret_rotation=enable_secret_rotation, no_wait=no_wait)
if CONST_MONITORING_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled:
if CONST_MONITORING_USING_AAD_MSI_AUTH in instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config and \
str(instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config[CONST_MONITORING_USING_AAD_MSI_AUTH]).lower() == 'true':
if not msi_auth:
raise ArgumentUsageError("--enable-msi-auth-for-monitoring can not be used on clusters with service principal auth.")
else:
# create a Data Collection Rule (DCR) and associate it with the cluster
_ensure_container_insights_for_monitoring(cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME], subscription_id, resource_group_name, name, instance.location, aad_route=True, create_dcr=True, create_dcra=True)
else:
# monitoring addon will use legacy path
_ensure_container_insights_for_monitoring(cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME], subscription_id, resource_group_name, name, instance.location, aad_route=False)
monitoring = CONST_MONITORING_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[
CONST_MONITORING_ADDON_NAME].enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles and instance.addon_profiles[
CONST_INGRESS_APPGW_ADDON_NAME].enabled
os_type = 'Linux'
enable_virtual_node = False
if CONST_VIRTUAL_NODE_ADDON_NAME + os_type in instance.addon_profiles:
enable_virtual_node = True
need_post_creation_role_assignment = monitoring or ingress_appgw_addon_enabled or enable_virtual_node
if need_post_creation_role_assignment:
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(
client.begin_create_or_update(resource_group_name, name, instance))
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if monitoring and cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(result, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
_add_ingress_appgw_addon_role_assignment(result, cmd)
if enable_virtual_node:
# All agent pool will reside in the same vnet, we will grant vnet level Contributor role
# in later function, so using a random agent pool here is OK
random_agent_pool = result.agent_pool_profiles[0]
if random_agent_pool.vnet_subnet_id != "":
_add_virtual_node_role_assignment(
cmd, result, random_agent_pool.vnet_subnet_id)
# Else, the cluster is not using custom VNet, the permission is already granted in AKS RP,
# we don't need to handle it in client side in this case.
else:
result = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, name, instance)
return result
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True): # pylint: disable=unused-argument
return sdk_no_wait(no_wait, client.begin_rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, # pylint: disable=too-many-branches,too-many-statements
instance,
subscription_id,
resource_group_name,
name,
addons,
enable,
workspace_resource_id=None,
enable_msi_auth_for_monitoring=False,
subnet_name=None,
appgw_name=None,
appgw_subnet_prefix=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
enable_secret_rotation=False,
no_wait=False): # pylint: disable=unused-argument
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
if addon_arg not in ADDONS:
raise CLIError("Invalid addon name: {}.".format(addon_arg))
addon = ADDONS[addon_arg]
if addon == CONST_VIRTUAL_NODE_ADDON_NAME:
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# honor addon names defined in Azure CLI
for key in list(addon_profiles):
if key.lower() == addon.lower() and key != addon:
addon_profiles[addon] = addon_profiles.pop(key)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(
addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == CONST_MONITORING_ADDON_NAME:
logAnalyticsConstName = CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = _sanitize_loganalytics_ws_resource_id(workspace_resource_id)
addon_profile.config = {logAnalyticsConstName: workspace_resource_id}
addon_profile.config[CONST_MONITORING_USING_AAD_MSI_AUTH] = enable_msi_auth_for_monitoring
elif addon == (CONST_VIRTUAL_NODE_ADDON_NAME + os_type):
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError(
'The aci-connector addon requires setting a subnet name.')
addon_profile.config = {
CONST_VIRTUAL_NODE_SUBNET_NAME: subnet_name}
elif addon == CONST_INGRESS_APPGW_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The ingress-appgw addon is already enabled for this managed cluster.\n'
'To change ingress-appgw configuration, run '
f'"az aks disable-addons -a ingress-appgw -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_prefix is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_prefix
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
elif addon == CONST_OPEN_SERVICE_MESH_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The open-service-mesh addon is already enabled for this managed cluster.\n'
'To change open-service-mesh configuration, run '
f'"az aks disable-addons -a open-service-mesh -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={})
elif addon == CONST_CONFCOM_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The confcom addon is already enabled for this managed cluster.\n'
'To change confcom configuration, run '
f'"az aks disable-addons -a confcom -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
elif addon == CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The azure-keyvault-secrets-provider addon is already enabled for this managed cluster.\n'
'To change azure-keyvault-secrets-provider configuration, run '
f'"az aks disable-addons -a azure-keyvault-secrets-provider -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_SECRET_ROTATION_ENABLED: "false"})
if enable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true"
addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME] = addon_profile
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
if addon == CONST_KUBE_DASHBOARD_ADDON_NAME:
addon_profiles[addon] = ManagedClusterAddonProfile(
enabled=False)
else:
raise CLIError(
"The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def aks_get_versions(cmd, client, location): # pylint: disable=unused-argument
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_os_options(cmd, client, location): # pylint: disable=unused-argument
return client.get_os_options(location, resource_type='managedClusters')
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(
path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning(
'Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _handle_merge(existing, addition, key, replace):
if not addition[key]:
return
if existing[key] is None:
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
from knack.prompting import prompt_y_n
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError(
'failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(
stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(
current_context, existing_file)
print(msg)
def cloud_storage_account_service_factory(cli_ctx, kwargs):
from azure.cli.core.profiles import ResourceType, get_sdk
t_cloud_storage_account = get_sdk(
cli_ctx, ResourceType.DATA_STORAGE, 'common#CloudStorageAccount')
account_name = kwargs.pop('account_name', None)
account_key = kwargs.pop('account_key', None)
sas_token = kwargs.pop('sas_token', None)
kwargs.pop('connection_string', None)
return t_cloud_storage_account(account_name, account_key, sas_token)
def get_storage_account_from_diag_settings(cli_ctx, resource_group_name, name):
from azure.mgmt.monitor import MonitorManagementClient
diag_settings_client = get_mgmt_service_client(
cli_ctx, MonitorManagementClient).diagnostic_settings
subscription_id = get_subscription_id(cli_ctx)
aks_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService' \
'/managedClusters/{2}'.format(subscription_id,
resource_group_name, name)
diag_settings = diag_settings_client.list(aks_resource_id)
if diag_settings.value:
return diag_settings.value[0].storage_account_id
print("No diag settings specified")
return None
def display_diagnostics_report(temp_kubeconfig_path): # pylint: disable=too-many-statements
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
nodes = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "node", "--no-headers"],
universal_newlines=True)
logger.debug(nodes)
node_lines = nodes.splitlines()
ready_nodes = {}
for node_line in node_lines:
columns = node_line.split()
logger.debug(node_line)
if columns[1] != "Ready":
logger.warning(
"Node %s is not Ready. Current state is: %s.", columns[0], columns[1])
else:
ready_nodes[columns[0]] = False
logger.debug('There are %s ready nodes in the cluster',
str(len(ready_nodes)))
if not ready_nodes:
logger.warning(
'No nodes are ready in the current cluster. Diagnostics info might not be available.')
network_config_array = []
network_status_array = []
apds_created = False
max_retry = 10
for retry in range(0, max_retry):
if not apds_created:
apd = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path, "get",
"apd", "-n", "aks-periscope", "--no-headers"],
universal_newlines=True
)
apd_lines = apd.splitlines()
if apd_lines and 'No resources found' in apd_lines[0]:
apd_lines.pop(0)
print("Got {} diagnostic results for {} ready nodes{}\r".format(len(apd_lines),
len(ready_nodes),
'.' * retry), end='')
if len(apd_lines) < len(ready_nodes):
time.sleep(3)
else:
apds_created = True
print()
else:
for node_name in ready_nodes:
if ready_nodes[node_name]:
continue
apdName = "aks-periscope-diagnostic-" + node_name
try:
network_config = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "apd", apdName, "-n",
"aks-periscope", "-o=jsonpath={.spec.networkconfig}"],
universal_newlines=True)
logger.debug('Dns status for node %s is %s',
node_name, network_config)
network_status = subprocess.check_output(
["kubectl", "--kubeconfig", temp_kubeconfig_path,
"get", "apd", apdName, "-n",
"aks-periscope", "-o=jsonpath={.spec.networkoutbound}"],
universal_newlines=True)
logger.debug('Network status for node %s is %s',
node_name, network_status)
if not network_config or not network_status:
print("The diagnostics information for node {} is not ready yet. "
"Will try again in 10 seconds.".format(node_name))
time.sleep(10)
break
network_config_array += json.loads(
'[' + network_config + ']')
network_status_object = json.loads(network_status)
network_status_array += format_diag_status(
network_status_object)
ready_nodes[node_name] = True
except subprocess.CalledProcessError as err:
raise CLIError(err.output)
print()
if network_config_array:
print("Below are the network configuration for each node: ")
print()
print(tabulate(network_config_array, headers="keys", tablefmt='simple'))
print()
else:
logger.warning("Could not get network config. "
"Please run 'az aks kanalyze' command later to get the analysis results.")
if network_status_array:
print("Below are the network connectivity results for each node:")
print()
print(tabulate(network_status_array, headers="keys", tablefmt='simple'))
else:
logger.warning("Could not get networking status. "
"Please run 'az aks kanalyze' command later to get the analysis results.")
def format_diag_status(diag_status):
for diag in diag_status:
if diag["Status"]:
if "Error:" in diag["Status"]:
diag["Status"] = f'{colorama.Fore.RED}{diag["Status"]}{colorama.Style.RESET_ALL}'
else:
diag["Status"] = f'{colorama.Fore.GREEN}{diag["Status"]}{colorama.Style.RESET_ALL}'
return diag_status
def format_bright(msg):
return f'\033[1m{colorama.Style.BRIGHT}{msg}{colorama.Style.RESET_ALL}'
def format_hyperlink(the_link):
return f'\033[1m{colorama.Style.BRIGHT}{colorama.Fore.BLUE}{the_link}{colorama.Style.RESET_ALL}'
def get_aks_custom_headers(aks_custom_headers=None):
headers = {}
if aks_custom_headers is not None:
if aks_custom_headers != "":
for pair in aks_custom_headers.split(','):
parts = pair.split('=')
if len(parts) != 2:
raise CLIError('custom headers format is incorrect')
headers[parts[0]] = parts[1]
return headers
def _put_managed_cluster_ensuring_permission(
cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
subscription_id,
resource_group_name,
name,
managed_cluster,
monitoring_addon_enabled,
ingress_appgw_addon_enabled,
virtual_node_addon_enabled,
need_grant_vnet_permission_to_cluster_identity,
vnet_subnet_id,
enable_managed_identity,
attach_acr,
headers,
no_wait
):
# some addons require post cluster creation role assigment
need_post_creation_role_assignment = (monitoring_addon_enabled or
ingress_appgw_addon_enabled or
(enable_managed_identity and attach_acr) or
virtual_node_addon_enabled or
need_grant_vnet_permission_to_cluster_identity)
if need_post_creation_role_assignment:
# adding a wait here since we rely on the result for role assignment
cluster = LongRunningOperation(cmd.cli_ctx)(client.begin_create_or_update(
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
headers=headers))
cloud_name = cmd.cli_ctx.cloud.name
# add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM
# mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud
if monitoring_addon_enabled and cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(cluster, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
_add_ingress_appgw_addon_role_assignment(cluster, cmd)
if virtual_node_addon_enabled:
_add_virtual_node_role_assignment(cmd, cluster, vnet_subnet_id)
if need_grant_vnet_permission_to_cluster_identity:
if not _create_role_assignment(cmd.cli_ctx, 'Network Contributor',
cluster.identity.principal_id, scope=vnet_subnet_id,
resolve_assignee=False):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
if enable_managed_identity and attach_acr:
# Attach ACR to cluster enabled managed identity
if cluster.identity_profile is None or \
cluster.identity_profile["kubeletidentity"] is None:
logger.warning('Your cluster is successfully created, but we failed to attach '
'acr to it, you can manually grant permission to the identity '
'named <ClUSTER_NAME>-agentpool in MC_ resource group to give '
'it permission to pull from ACR.')
else:
kubelet_identity_client_id = cluster.identity_profile["kubeletidentity"].client_id
_ensure_aks_acr(cmd.cli_ctx,
client_id=kubelet_identity_client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
else:
cluster = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
headers=headers)
return cluster
def _is_msi_cluster(managed_cluster):
return (managed_cluster and managed_cluster.identity and
(managed_cluster.identity.type.casefold() == "systemassigned" or managed_cluster.identity.type.casefold() == "userassigned"))
def _get_kubelet_config(file_path):
if not os.path.isfile(file_path):
raise CLIError("{} is not valid file, or not accessable.".format(file_path))
kubelet_config = get_file_json(file_path)
if not isinstance(kubelet_config, dict):
raise CLIError(
"Error reading kubelet configuration at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object = KubeletConfig()
config_object.cpu_manager_policy = kubelet_config.get(
"cpuManagerPolicy", None)
config_object.cpu_cfs_quota = kubelet_config.get("cpuCfsQuota", None)
config_object.cpu_cfs_quota_period = kubelet_config.get(
"cpuCfsQuotaPeriod", None)
config_object.image_gc_high_threshold = kubelet_config.get(
"imageGcHighThreshold", None)
config_object.image_gc_low_threshold = kubelet_config.get(
"imageGcLowThreshold", None)
config_object.topology_manager_policy = kubelet_config.get(
"topologyManagerPolicy", None)
config_object.allowed_unsafe_sysctls = kubelet_config.get(
"allowedUnsafeSysctls", None)
config_object.fail_swap_on = kubelet_config.get("failSwapOn", None)
config_object.container_log_max_files = kubelet_config.get(
"containerLogMaxFiles", None)
config_object.container_log_max_size_mb = kubelet_config.get(
"containerLogMaxSizeMb", None)
return config_object
def _get_linux_os_config(file_path):
if not os.path.isfile(file_path):
raise CLIError("{} is not valid file, or not accessable.".format(file_path))
os_config = get_file_json(file_path)
if not isinstance(os_config, dict):
raise CLIError(
"Error reading Linux OS configuration at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object = LinuxOSConfig()
config_object.transparent_huge_page_enabled = os_config.get(
"transparentHugePageEnabled", None)
config_object.transparent_huge_page_defrag = os_config.get(
"transparentHugePageDefrag", None)
config_object.swap_file_size_mb = os_config.get("swapFileSizeMB", None)
# sysctl settings
sysctls = os_config.get("sysctls", None)
if not isinstance(sysctls, dict):
raise CLIError(
"Error reading Sysctl settings at {}. Please see https://aka.ms/CustomNodeConfig for correct format.".format(file_path))
config_object.sysctls = SysctlConfig()
config_object.sysctls.net_core_somaxconn = sysctls.get(
"netCoreSomaxconn", None)
config_object.sysctls.net_core_netdev_max_backlog = sysctls.get(
"netCoreNetdevMaxBacklog", None)
config_object.sysctls.net_core_rmem_max = sysctls.get(
"netCoreRmemMax", None)
config_object.sysctls.net_core_wmem_max = sysctls.get(
"netCoreWmemMax", None)
config_object.sysctls.net_core_optmem_max = sysctls.get(
"netCoreOptmemMax", None)
config_object.sysctls.net_ipv4_tcp_max_syn_backlog = sysctls.get(
"netIpv4TcpMaxSynBacklog", None)
config_object.sysctls.net_ipv4_tcp_max_tw_buckets = sysctls.get(
"netIpv4TcpMaxTwBuckets", None)
config_object.sysctls.net_ipv4_tcp_fin_timeout = sysctls.get(
"netIpv4TcpFinTimeout", None)
config_object.sysctls.net_ipv4_tcp_keepalive_time = sysctls.get(
"netIpv4TcpKeepaliveTime", None)
config_object.sysctls.net_ipv4_tcp_keepalive_probes = sysctls.get(
"netIpv4TcpKeepaliveProbes", None)
config_object.sysctls.net_ipv4_tcpkeepalive_intvl = sysctls.get(
"netIpv4TcpkeepaliveIntvl", None)
config_object.sysctls.net_ipv4_tcp_rmem = sysctls.get(
"netIpv4TcpRmem", None)
config_object.sysctls.net_ipv4_tcp_wmem = sysctls.get(
"netIpv4TcpWmem", None)
config_object.sysctls.net_ipv4_tcp_tw_reuse = sysctls.get(
"netIpv4TcpTwReuse", None)
config_object.sysctls.net_ipv4_ip_local_port_range = sysctls.get(
"netIpv4IpLocalPortRange", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh1 = sysctls.get(
"netIpv4NeighDefaultGcThresh1", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh2 = sysctls.get(
"netIpv4NeighDefaultGcThresh2", None)
config_object.sysctls.net_ipv4_neigh_default_gc_thresh3 = sysctls.get(
"netIpv4NeighDefaultGcThresh3", None)
config_object.sysctls.net_netfilter_nf_conntrack_max = sysctls.get(
"netNetfilterNfConntrackMax", None)
config_object.sysctls.net_netfilter_nf_conntrack_buckets = sysctls.get(
"netNetfilterNfConntrackBuckets", None)
config_object.sysctls.fs_inotify_max_user_watches = sysctls.get(
"fsInotifyMaxUserWatches", None)
config_object.sysctls.fs_file_max = sysctls.get("fsFileMax", None)
config_object.sysctls.fs_aio_max_nr = sysctls.get("fsAioMaxNr", None)
config_object.sysctls.fs_nr_open = sysctls.get("fsNrOpen", None)
config_object.sysctls.kernel_threads_max = sysctls.get(
"kernelThreadsMax", None)
config_object.sysctls.vm_max_map_count = sysctls.get("vmMaxMapCount", None)
config_object.sysctls.vm_swappiness = sysctls.get("vmSwappiness", None)
config_object.sysctls.vm_vfs_cache_pressure = sysctls.get(
"vmVfsCachePressure", None)
return config_object
def _get_http_proxy_config(file_path):
if not os.path.isfile(file_path):
raise CLIError("{} is not valid file, or not accessable.".format(file_path))
hp_config = get_file_json(file_path)
if not isinstance(hp_config, dict):
raise CLIError(
"Error reading Http Proxy Config at {}. Please see https://aka.ms/HttpProxyConfig for correct format.".format(file_path))
config_object = ManagedClusterHTTPProxyConfig()
config_object.http_proxy = hp_config.get("httpProxy", None)
config_object.https_proxy = hp_config.get("httpsProxy", None)
config_object.no_proxy = hp_config.get("noProxy", None)
config_object.trusted_ca = hp_config.get("trustedCa", None)
return config_object
def _is_pod_identity_addon_enabled(instance):
if not instance:
return False
if not instance.pod_identity_profile:
return False
return bool(instance.pod_identity_profile.enabled)
def _ensure_pod_identity_addon_is_enabled(instance):
if not _is_pod_identity_addon_enabled(instance):
raise CLIError('The pod identity addon is not enabled for this managed cluster yet.\n'
'To enable, run "az aks update --enable-pod-identity')
def _ensure_pod_identity_kubenet_consent(network_profile, pod_identity_profile, customer_consent):
if not network_profile or not network_profile.network_plugin:
# invalid data
return
if network_profile.network_plugin.lower() != 'kubenet':
# not kubenet, no need to check
return
if customer_consent is None:
# no set this time, read from previous value
customer_consent = bool(
pod_identity_profile.allow_network_plugin_kubenet)
if not customer_consent:
raise CLIError(
'--enable-pod-identity-with-kubenet is required for enabling pod identity addon when using Kubenet network plugin')
pod_identity_profile.allow_network_plugin_kubenet = True
def _update_addon_pod_identity(instance, enable, pod_identities=None, pod_identity_exceptions=None, allow_kubenet_consent=None):
if not enable:
# when disable, remove previous saved value
instance.pod_identity_profile = ManagedClusterPodIdentityProfile(
enabled=False)
return
if not instance.pod_identity_profile:
# not set before
instance.pod_identity_profile = ManagedClusterPodIdentityProfile(
enabled=enable,
user_assigned_identities=pod_identities,
user_assigned_identity_exceptions=pod_identity_exceptions,
)
_ensure_pod_identity_kubenet_consent(
instance.network_profile, instance.pod_identity_profile, allow_kubenet_consent)
instance.pod_identity_profile.enabled = enable
instance.pod_identity_profile.user_assigned_identities = pod_identities or []
instance.pod_identity_profile.user_assigned_identity_exceptions = pod_identity_exceptions or []
def _ensure_managed_identity_operator_permission(cli_ctx, instance, scope):
cluster_identity_object_id = None
if instance.identity.type.lower() == 'userassigned':
for identity in instance.identity.user_assigned_identities.values():
cluster_identity_object_id = identity.principal_id
break
elif instance.identity.type.lower() == 'systemassigned':
cluster_identity_object_id = instance.identity.principal_id
else:
raise CLIError('unsupported identity type: {}'.format(
instance.identity.type))
if cluster_identity_object_id is None:
raise CLIError('unable to resolve cluster identity')
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope.lower() != scope.lower():
continue
if not i.role_definition_id.lower().endswith(CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID):
continue
if i.principal_id.lower() != cluster_identity_object_id.lower():
continue
# already assigned
return
if not _add_role_assignment(cli_ctx, CONST_MANAGED_IDENTITY_OPERATOR_ROLE, cluster_identity_object_id,
is_service_principal=False, scope=scope):
raise CLIError(
'Could not grant Managed Identity Operator permission for cluster')
# need more time to propogate this assignment...
print()
print('Wait 30 seconds for identity role assignment propagation.')
time.sleep(30)
def aks_pod_identity_add(cmd, client, resource_group_name, cluster_name,
identity_name, identity_namespace, identity_resource_id,
binding_selector=None,
no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
user_assigned_identity = _get_user_assigned_identity(
cmd.cli_ctx, identity_resource_id)
_ensure_managed_identity_operator_permission(
cmd.cli_ctx, instance, user_assigned_identity.id)
pod_identities = []
if instance.pod_identity_profile.user_assigned_identities:
pod_identities = instance.pod_identity_profile.user_assigned_identities
pod_identity = ManagedClusterPodIdentity(
name=identity_name,
namespace=identity_namespace,
identity=UserAssignedIdentity(
resource_id=user_assigned_identity.id,
client_id=user_assigned_identity.client_id,
object_id=user_assigned_identity.principal_id,
)
)
if binding_selector is not None:
pod_identity.binding_selector = binding_selector
pod_identities.append(pod_identity)
_update_addon_pod_identity(
instance, enable=True,
pod_identities=pod_identities,
pod_identity_exceptions=instance.pod_identity_profile.user_assigned_identity_exceptions,
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_delete(cmd, client, resource_group_name, cluster_name,
identity_name, identity_namespace,
no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identities = []
if instance.pod_identity_profile.user_assigned_identities:
for pod_identity in instance.pod_identity_profile.user_assigned_identities:
if pod_identity.name == identity_name and pod_identity.namespace == identity_namespace:
# to remove
continue
pod_identities.append(pod_identity)
_update_addon_pod_identity(
instance, enable=True,
pod_identities=pod_identities,
pod_identity_exceptions=instance.pod_identity_profile.user_assigned_identity_exceptions,
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_list(cmd, client, resource_group_name, cluster_name): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
return _remove_nulls([instance])[0]
def aks_pod_identity_exception_add(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, pod_labels, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
pod_identity_exceptions = instance.pod_identity_profile.user_assigned_identity_exceptions
exc = ManagedClusterPodIdentityException(
name=exc_name, namespace=exc_namespace, pod_labels=pod_labels)
pod_identity_exceptions.append(exc)
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_delete(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
for exc in instance.pod_identity_profile.user_assigned_identity_exceptions:
if exc.name == exc_name and exc.namespace == exc_namespace:
# to remove
continue
pod_identity_exceptions.append(exc)
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_update(cmd, client, resource_group_name, cluster_name,
exc_name, exc_namespace, pod_labels, no_wait=False): # pylint: disable=unused-argument
instance = client.get(resource_group_name, cluster_name)
_ensure_pod_identity_addon_is_enabled(instance)
found_target = False
updated_exc = ManagedClusterPodIdentityException(
name=exc_name, namespace=exc_namespace, pod_labels=pod_labels)
pod_identity_exceptions = []
if instance.pod_identity_profile.user_assigned_identity_exceptions:
for exc in instance.pod_identity_profile.user_assigned_identity_exceptions:
if exc.name == exc_name and exc.namespace == exc_namespace:
found_target = True
pod_identity_exceptions.append(updated_exc)
else:
pod_identity_exceptions.append(exc)
if not found_target:
raise CLIError(
'pod identity exception {}/{} not found'.format(exc_namespace, exc_name))
_update_addon_pod_identity(
instance, enable=True,
pod_identities=instance.pod_identity_profile.user_assigned_identities,
pod_identity_exceptions=pod_identity_exceptions,
)
# send the managed cluster represeentation to update the pod identity addon
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, cluster_name, instance)
def aks_pod_identity_exception_list(cmd, client, resource_group_name, cluster_name):
instance = client.get(resource_group_name, cluster_name)
return _remove_nulls([instance])[0]
def _ensure_cluster_identity_permission_on_kubelet_identity(cli_ctx, cluster_identity_object_id, scope):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope.lower() != scope.lower():
continue
if not i.role_definition_id.lower().endswith(CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID):
continue
if i.principal_id.lower() != cluster_identity_object_id.lower():
continue
# already assigned
return
if not _add_role_assignment(cli_ctx, CONST_MANAGED_IDENTITY_OPERATOR_ROLE, cluster_identity_object_id,
is_service_principal=False, scope=scope):
raise CLIError('Could not grant Managed Identity Operator permission to cluster identity at scope {}'.format(scope))
def aks_egress_endpoints_list(cmd, client, resource_group_name, name): # pylint: disable=unused-argument
return client.list_outbound_network_dependencies_endpoints(resource_group_name, name)
|
[] |
[] |
[
"ACC_TERM_ID",
"PATH"
] |
[]
|
["ACC_TERM_ID", "PATH"]
|
python
| 2 | 0 | |
jazminleon/jazminleon/wsgi.py
|
"""
WSGI config for jazminleon project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "jazminleon.settings")
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
app/service/main/thumbup/dao/dao_test.go
|
package dao
import (
"flag"
"os"
"testing"
"go-common/app/service/main/thumbup/conf"
)
var d *Dao
func TestMain(m *testing.M) {
if os.Getenv("DEPLOY_ENV") != "" {
flag.Set("app_id", "main.web-svr.thumbup-service")
flag.Set("conf_token", "VhnSEtd0oymsNQaDUYuEknoWu2mVOOVK")
flag.Set("tree_id", "7720")
flag.Set("conf_version", "docker-1")
flag.Set("deploy_env", "uat")
flag.Set("conf_host", "config.bilibili.co")
flag.Set("conf_path", "/tmp")
flag.Set("region", "sh")
flag.Set("zone", "sh001")
} else {
flag.Set("conf", "../cmd/thumbup-test.toml")
}
flag.Parse()
if err := conf.Init(); err != nil {
panic(err)
}
d = New(conf.Conf)
m.Run()
os.Exit(0)
}
// INSERT INTO `bilibili_likes`.`counts_01`(`id`, `mtime`, `ctime`, `business_id`, `origin_id`, `message_id`, `likes_count`, `dislikes_count`, `likes_change`, `dislikes_change`) VALUES (98, '2018-06-13 14:51:11', '2018-06-13 14:51:11', 1, 99901, 8888, 1, 2, 3, 4);
// INSERT INTO `bilibili_likes`.`counts_01`(`id`, `mtime`, `ctime`, `business_id`, `origin_id`, `message_id`, `likes_count`, `dislikes_count`, `likes_change`, `dislikes_change`) VALUES (99, '2018-06-13 14:56:56', '2018-06-13 14:56:56', 1, 101, 8888, 0, 0, 0, 0);
// INSERT INTO `bilibili_likes`.`likes`(`id`, `mtime`, `ctime`, `business_id`, `origin_id`, `message_id`, `mid`, `type`) VALUES (0, '2018-11-01 18:03:28', '2018-11-01 18:03:28', 1, 1, 1, 1, 1);
// INSERT INTO `bilibili_likes`.`counts`(`id`, `mtime`, `ctime`, `business_id`, `origin_id`, `message_id`, `likes_count`, `dislikes_count`, `likes_change`, `dislikes_change`, `up_mid`) VALUES (0, '2018-11-03 12:16:25', '2018-11-03 12:16:25', 1, 1, 1, 1, 1, 0, 0, 0);
|
[
"\"DEPLOY_ENV\""
] |
[] |
[
"DEPLOY_ENV"
] |
[]
|
["DEPLOY_ENV"]
|
go
| 1 | 0 | |
tools/ci_build/build.py
|
#!/usr/bin/env python3
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import argparse
import contextlib
import glob
import os
import re
import shlex
import shutil
import subprocess
import sys
import platform
from amd_hipify import amd_hipify
from distutils.version import LooseVersion
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
REPO_DIR = os.path.normpath(os.path.join(SCRIPT_DIR, "..", ".."))
sys.path.insert(0, os.path.join(REPO_DIR, "tools", "python"))
from util import ( # noqa: E402
run,
is_windows, is_macOS, is_linux,
get_logger)
import util.android as android # noqa: E402
log = get_logger("build")
class BaseError(Exception):
"""Base class for errors originating from build.py."""
pass
class BuildError(BaseError):
"""Error from running build steps."""
def __init__(self, *messages):
super().__init__("\n".join(messages))
class UsageError(BaseError):
"""Usage related error."""
def __init__(self, message):
super().__init__(message)
def _check_python_version():
# According to the BUILD.md, python 3.5+ is required:
# Python 2 is definitely not supported and it should be safer to consider
# it won't run with python 4:
if sys.version_info[0] != 3:
raise BuildError(
"Bad python major version: expecting python 3, found version "
"'{}'".format(sys.version))
if sys.version_info[1] < 6:
raise BuildError(
"Bad python minor version: expecting python 3.6+, found version "
"'{}'".format(sys.version))
def _str_to_bool(s):
"""Convert string to bool (in argparse context)."""
if s.lower() not in ['true', 'false']:
raise ValueError('Need bool; got %r' % s)
return {'true': True, 'false': False}[s.lower()]
_check_python_version()
def _openvino_verify_device_type(device_read):
choices = ["CPU_FP32", "GPU_FP32", "GPU_FP16", "VAD-M_FP16", "MYRIAD_FP16", "VAD-F_FP32"]
choices1 = ["CPU_FP32_NO_PARTITION", "GPU_FP32_NO_PARTITION", "GPU_FP16_NO_PARTITION",
"VAD-M_FP16_NO_PARTITION", "MYRIAD_FP16_NO_PARTITION", "VAD-F_FP32_NO_PARTITION"]
status_hetero = True
res = False
if (device_read in choices):
res = True
elif (device_read in choices1):
res = True
elif (device_read.startswith("HETERO:") or device_read.startswith("MULTI:")):
res = True
comma_separated_devices = device_read.split(":")
comma_separated_devices = comma_separated_devices[1].split(',')
if (len(comma_separated_devices) < 2):
print("At least two devices required in Hetero Mode")
status_hetero = False
dev_options = ["CPU", "GPU", "MYRIAD", "FPGA", "HDDL"]
for dev in comma_separated_devices:
if (dev not in dev_options):
status_hetero = False
break
def invalid_hetero_build():
print("\n" + "If trying to build Hetero or Multi, specifiy the supported devices along with it." + + "\n")
print("specify the keyword HETERO or MULTI followed by the devices ")
print("in the order of priority you want to build" + "\n")
print("The different hardware devices that can be added in HETERO or MULTI")
print("are ['CPU','GPU','MYRIAD','FPGA','HDDL']" + "\n")
print("An example of how to specify the hetero build type. Ex: HETERO:GPU,CPU" + "\n")
print("An example of how to specify the MULTI build type. Ex: MULTI:MYRIAD,CPU" + "\n")
sys.exit("Wrong Build Type selected")
if (res is False):
print("\n" + "You have selcted wrong configuration for the build.")
print("pick the build type for specific Hardware Device from following options: ", choices)
print("(or) from the following options with graph partitioning disabled: ", choices1)
print("\n")
if not (device_read.startswith("HETERO:") or device_read.startswith("MULTI:")):
invalid_hetero_build()
sys.exit("Wrong Build Type selected")
if (status_hetero is False):
invalid_hetero_build()
return device_read
def parse_arguments():
parser = argparse.ArgumentParser(
description="ONNXRuntime CI build driver.",
usage=""" # noqa
Default behavior is --update --build --test for native architecture builds.
Default behavior is --update --build for cross-compiled builds.
The Update phase will update git submodules, and run cmake to generate makefiles.
The Build phase will build all projects.
The Test phase will run all unit tests, and optionally the ONNX tests.
Use the individual flags to only run the specified stages.
""")
# Main arguments
parser.add_argument(
"--build_dir", required=True, help="Path to the build directory.")
parser.add_argument(
"--config", nargs="+", default=["Debug"],
choices=["Debug", "MinSizeRel", "Release", "RelWithDebInfo"],
help="Configuration(s) to build.")
parser.add_argument(
"--update", action='store_true', help="Update makefiles.")
parser.add_argument("--build", action='store_true', help="Build.")
parser.add_argument(
"--clean", action='store_true',
help="Run 'cmake --build --target clean' for the selected config/s.")
parser.add_argument(
"--parallel", nargs='?', const='0', default='1', type=int,
help="Use parallel build. The optional value specifies the maximum number of parallel jobs. "
"If the optional value is 0 or unspecified, it is interpreted as the number of CPUs.")
parser.add_argument("--test", action='store_true', help="Run unit tests.")
parser.add_argument("--skip_tests", action='store_true', help="Skip all tests.")
# Training options
parser.add_argument(
"--enable_nvtx_profile", action='store_true', help="Enable NVTX profile in ORT.")
parser.add_argument(
"--enable_memory_profile", action='store_true', help="Enable memory profile in ORT.")
parser.add_argument(
"--enable_training", action='store_true', help="Enable training in ORT.")
parser.add_argument(
"--enable_training_ops", action='store_true', help="Enable training ops in inference graph.")
parser.add_argument(
"--enable_training_torch_interop", action='store_true', help="Enable training kernels interop with torch.")
parser.add_argument(
"--disable_nccl", action='store_true', help="Disable Nccl.")
parser.add_argument(
"--mpi_home", help="Path to MPI installation dir")
parser.add_argument(
"--nccl_home", help="Path to NCCL installation dir")
parser.add_argument(
"--use_mpi", nargs='?', default=True, const=True, type=_str_to_bool)
# enable ONNX tests
parser.add_argument(
"--enable_onnx_tests", action='store_true',
help="""When running the Test phase, run onnx_test_running against
available test data directories.""")
parser.add_argument("--path_to_protoc_exe", help="Path to protoc exe.")
parser.add_argument(
"--fuzz_testing", action='store_true', help="Enable Fuzz testing of the onnxruntime.")
parser.add_argument(
"--enable_symbolic_shape_infer_tests", action='store_true',
help="""When running the Test phase, run symbolic shape inference against
available test data directories.""")
# generate documentation
parser.add_argument("--gen_doc", nargs='?', const='yes', type=str,
help="Generate documentation listing standard ONNX operators and types implemented by "
"various execution providers and contrib operator schemas. "
"Use `--gen_doc validate` to validate these match the current contents in /docs.")
parser.add_argument(
"--gen-api-doc", action='store_true',
help="Generate API documentation for PyTorch frontend")
# CUDA related
parser.add_argument("--use_cuda", action='store_true', help="Enable CUDA.")
parser.add_argument(
"--cuda_version", help="The version of CUDA toolkit to use. "
"Auto-detect if not specified. e.g. 9.0")
parser.add_argument(
"--cuda_home", help="Path to CUDA home."
"Read from CUDA_HOME environment variable if --use_cuda is true and "
"--cuda_home is not specified.")
parser.add_argument(
"--cudnn_home", help="Path to CUDNN home. "
"Read from CUDNN_HOME environment variable if --use_cuda is true and "
"--cudnn_home is not specified.")
parser.add_argument(
"--enable_cuda_line_info", action='store_true', help="Enable CUDA line info.")
# Python bindings
parser.add_argument(
"--enable_pybind", action='store_true', help="Enable Python Bindings.")
parser.add_argument(
"--build_wheel", action='store_true', help="Build Python Wheel.")
parser.add_argument(
"--wheel_name_suffix", help="Suffix to append to created wheel names. "
"This value is currently only used for nightly builds.")
parser.add_argument(
"--numpy_version", help="Installs a specific version of numpy "
"before building the python binding.")
parser.add_argument(
"--skip-keras-test", action='store_true',
help="Skip tests with Keras if keras is installed")
# C-Sharp bindings
parser.add_argument(
"--build_csharp", action='store_true',
help="Build C#.Net DLL and NuGet package. This should be only used in CI pipelines. "
"For building C# bindings and packaging them into nuget package use --build_nuget arg.")
parser.add_argument(
"--build_nuget", action='store_true',
help="Build C#.Net DLL and NuGet package on the local machine. "
"Currently only Windows and Linux platforms are supported.")
# Java bindings
parser.add_argument(
"--build_java", action='store_true', help="Build Java bindings.")
# Node.js binding
parser.add_argument(
"--build_nodejs", action='store_true',
help="Build Node.js binding and NPM package.")
# Objective-C binding
parser.add_argument(
"--build_objc", action='store_true',
help="Build Objective-C binding.")
# Build a shared lib
parser.add_argument(
"--build_shared_lib", action='store_true',
help="Build a shared library for the ONNXRuntime.")
# Build a shared lib
parser.add_argument(
"--build_apple_framework", action='store_true',
help="Build a macOS/iOS framework for the ONNXRuntime.")
# Build options
parser.add_argument(
"--cmake_extra_defines", nargs="+",
help="Extra definitions to pass to CMake during build system "
"generation. These are just CMake -D options without the leading -D.")
parser.add_argument(
"--target",
help="Build a specific target, e.g. winml_dll")
# This flag is needed when :
# 1. The OS is 64 bits Windows
# 2. And the target binary is for 32 bits Windows
# 3. And the python used for running this script is 64 bits.
# But if you can get a 32 bits python, the build will run better and you won't need this flag.
parser.add_argument(
"--x86", action='store_true',
help="[cross-compiling] Create Windows x86 makefiles. Requires --update and no existing cache "
"CMake setup. Delete CMakeCache.txt if needed")
parser.add_argument(
"--arm", action='store_true',
help="[cross-compiling] Create ARM makefiles. Requires --update and no existing cache "
"CMake setup. Delete CMakeCache.txt if needed")
parser.add_argument(
"--arm64", action='store_true',
help="[cross-compiling] Create ARM64 makefiles. Requires --update and no existing cache "
"CMake setup. Delete CMakeCache.txt if needed")
parser.add_argument(
"--arm64ec", action='store_true',
help="[cross-compiling] Create ARM64EC makefiles. Requires --update and no existing cache "
"CMake setup. Delete CMakeCache.txt if needed")
parser.add_argument(
"--msvc_toolset", help="MSVC toolset to use. e.g. 14.11")
parser.add_argument("--android", action='store_true', help='Build for Android')
parser.add_argument(
"--android_abi", default="arm64-v8a",
choices=["armeabi-v7a", "arm64-v8a", "x86", "x86_64"],
help="Specify the target Android Application Binary Interface (ABI)")
parser.add_argument("--android_api", type=int, default=27, help='Android API Level, e.g. 21')
parser.add_argument(
"--android_sdk_path", type=str, default=os.environ.get("ANDROID_HOME", ""),
help="Path to the Android SDK")
parser.add_argument(
"--android_ndk_path", type=str, default=os.environ.get("ANDROID_NDK_HOME", ""),
help="Path to the Android NDK")
parser.add_argument("--android_cpp_shared", action="store_true",
help="Build with shared libc++ instead of the default static libc++.")
parser.add_argument("--android_run_emulator", action="store_true",
help="Start up an Android emulator if needed.")
parser.add_argument("--ios", action='store_true', help="build for ios")
parser.add_argument(
"--ios_sysroot", default="",
help="Specify the location name of the macOS platform SDK to be used")
parser.add_argument(
"--ios_toolchain_dir", default="",
help="Path to ios toolchain binaries")
parser.add_argument(
"--ios_toolchain_file", default="",
help="Path to ios toolchain file, "
"or cmake/onnxruntime_ios.toolchain.cmake will be used")
parser.add_argument(
"--xcode_code_signing_team_id", default="",
help="The development team ID used for code signing in Xcode")
parser.add_argument(
"--xcode_code_signing_identity", default="",
help="The development identity used for code signing in Xcode")
parser.add_argument(
"--use_xcode", action='store_true',
help="Use Xcode as cmake generator, this is only supported on MacOS.")
parser.add_argument(
"--osx_arch",
default="arm64" if platform.machine() == "arm64" else "x86_64",
choices=["arm64", "arm64e", "x86_64"],
help="Specify the Target specific architectures for macOS and iOS, This is only supported on MacOS")
parser.add_argument(
"--apple_deploy_target", type=str,
help="Specify the minimum version of the target platform "
"(e.g. macOS or iOS)"
"This is only supported on MacOS")
# WebAssembly build
parser.add_argument("--build_wasm", action='store_true', help="Build for WebAssembly")
parser.add_argument("--enable_wasm_simd", action='store_true', help="Enable WebAssembly SIMD")
parser.add_argument(
"--disable_wasm_exception_catching", action='store_true',
help="Disable exception catching in WebAssembly.")
parser.add_argument(
"--enable_wasm_exception_throwing_override", action='store_true',
help="Enable exception throwing in WebAssembly, this will override default disabling exception throwing "
"behavior when disable exceptions.")
parser.add_argument(
"--enable_wasm_threads", action='store_true',
help="Enable WebAssembly multi-threads support")
parser.add_argument(
"--enable_wasm_profiling", action='store_true',
help="Enable WebAsselby profiling and preserve function names")
parser.add_argument(
"--enable_wasm_debug_info", action='store_true',
help="Build WebAssembly with DWARF format debug info")
parser.add_argument(
"--wasm_malloc", default="dlmalloc", help="Specify memory allocator for WebAssembly")
parser.add_argument(
"--emsdk_version", default="2.0.26", help="Specify version of emsdk")
# Enable onnxruntime-extensions
parser.add_argument(
"--use_extensions", action='store_true',
help="Enable custom operators in onnxruntime-extensions, use git submodule onnxruntime-extensions "
"in path cmake/external/onnxruntime-extensions by default.")
parser.add_argument(
"--extensions_overridden_path", type=str,
help="Path to pre-pulled onnxruntime-extensions, will override default onnxruntime-extensions path.")
# Arguments needed by CI
parser.add_argument(
"--cmake_path", default="cmake", help="Path to the CMake program.")
parser.add_argument(
"--ctest_path", default="ctest", help="Path to the CTest program. It can be an empty string. If it is empty, "
"we will use this script driving the test programs directly.")
parser.add_argument(
"--skip_submodule_sync", action='store_true', help="Don't do a "
"'git submodule update'. Makes the Update phase faster.")
parser.add_argument(
"--use_vstest", action='store_true',
help="Use use_vstest for running unitests.")
parser.add_argument(
"--use_mimalloc", default=['none'],
choices=['none', 'stl', 'arena', 'all'], help="Use mimalloc.")
parser.add_argument(
"--use_dnnl", action='store_true', help="Build with DNNL.")
parser.add_argument(
"--dnnl_gpu_runtime", action='store', default='', type=str.lower,
help="e.g. --dnnl_gpu_runtime ocl")
parser.add_argument(
"--dnnl_opencl_root", action='store', default='',
help="Path to OpenCL SDK. "
"e.g. --dnnl_opencl_root \"C:/Program Files (x86)/IntelSWTools/sw_dev_tools/OpenCL/sdk\"")
parser.add_argument(
"--use_openvino", nargs="?", const="CPU_FP32",
type=_openvino_verify_device_type,
help="Build with OpenVINO for specific hardware.")
parser.add_argument(
"--use_coreml", action='store_true', help="Build with CoreML support.")
parser.add_argument(
"--use_nnapi", action='store_true', help="Build with NNAPI support.")
parser.add_argument(
"--nnapi_min_api", type=int,
help="Minimum Android API level to enable NNAPI, should be no less than 27")
parser.add_argument(
"--use_rknpu", action='store_true', help="Build with RKNPU.")
parser.add_argument(
"--use_preinstalled_eigen", action='store_true',
help="Use pre-installed Eigen.")
parser.add_argument("--eigen_path", help="Path to pre-installed Eigen.")
parser.add_argument(
"--use_openmp", action='store_true', help="Build with OpenMP")
parser.add_argument(
"--enable_msinternal", action="store_true",
help="Enable for Microsoft internal builds only.")
parser.add_argument("--llvm_path", help="Path to llvm dir")
parser.add_argument(
"--use_vitisai", action='store_true', help="Build with Vitis-AI")
parser.add_argument(
"--use_nuphar", action='store_true', help="Build with nuphar")
parser.add_argument(
"--use_tensorrt", action='store_true', help="Build with TensorRT")
parser.add_argument(
"--tensorrt_home", help="Path to TensorRT installation dir")
parser.add_argument(
"--use_migraphx", action='store_true', help="Build with MIGraphX")
parser.add_argument(
"--migraphx_home", help="Path to MIGraphX installation dir")
parser.add_argument(
"--use_full_protobuf", action='store_true',
help="Use the full protobuf library")
parser.add_argument("--skip_onnx_tests", action='store_true',
help="Explicitly disable all onnx related tests. Note: Use --skip_tests to skip all tests.")
parser.add_argument("--skip_winml_tests", action='store_true', help="Explicitly disable all WinML related tests")
parser.add_argument("--skip_nodejs_tests", action='store_true', help="Explicitly disable all Node.js binding tests")
parser.add_argument(
"--enable_msvc_static_runtime", action='store_true',
help="Enable static linking of MSVC runtimes.")
parser.add_argument(
"--enable_language_interop_ops", action='store_true',
help="Enable operator implemented in language other than cpp")
parser.add_argument(
"--cmake_generator",
choices=['Visual Studio 15 2017', 'Visual Studio 16 2019', 'Visual Studio 17 2022', 'Ninja'],
default='Visual Studio 16 2019' if is_windows() else None,
help="Specify the generator that CMake invokes. "
"This is only supported on Windows")
parser.add_argument(
"--enable_multi_device_test", action='store_true',
help="Test with multi-device. Mostly used for multi-device GPU")
parser.add_argument(
"--use_dml", action='store_true', help="Build with DirectML.")
parser.add_argument(
"--use_winml", action='store_true', help="Build with WinML.")
parser.add_argument(
"--winml_root_namespace_override", type=str,
help="Specify the namespace that WinML builds into.")
parser.add_argument(
"--use_telemetry", action='store_true',
help="Only official builds can set this flag to enable telemetry.")
parser.add_argument(
"--enable_wcos", action='store_true',
help="Build for Windows Core OS.")
parser.add_argument(
"--enable_windows_store", action='store_true',
help="Build for Windows Store")
parser.add_argument(
"--enable_lto", action='store_true',
help="Enable Link Time Optimization")
parser.add_argument(
"--enable_transformers_tool_test", action='store_true',
help="Enable transformers tool test")
parser.add_argument(
"--use_acl", nargs="?", const="ACL_1905",
choices=["ACL_1902", "ACL_1905", "ACL_1908", "ACL_2002"],
help="Build with ACL for ARM architectures.")
parser.add_argument(
"--acl_home", help="Path to ACL home dir")
parser.add_argument(
"--acl_libs", help="Path to ACL libraries")
parser.add_argument(
"--use_armnn", action='store_true',
help="Enable ArmNN Execution Provider.")
parser.add_argument(
"--armnn_relu", action='store_true',
help="Use the Relu operator implementation from the ArmNN EP.")
parser.add_argument(
"--armnn_bn", action='store_true',
help="Use the Batch Normalization operator implementation from the ArmNN EP.")
parser.add_argument(
"--armnn_home", help="Path to ArmNN home dir")
parser.add_argument(
"--armnn_libs", help="Path to ArmNN libraries")
parser.add_argument(
"--build_micro_benchmarks", action='store_true',
help="Build ONNXRuntime micro-benchmarks.")
# options to reduce binary size
parser.add_argument("--minimal_build", default=None, nargs='*', type=str.lower,
help="Create a build that only supports ORT format models. "
"See /docs/ONNX_Runtime_Format_Model_Usage.md for more information. "
"RTTI is automatically disabled in a minimal build. "
"To enable execution providers that compile kernels at runtime (e.g. NNAPI) pass 'extended' "
"as a parameter. e.g. '--minimal_build extended'. "
"To enable support for custom operators pass 'custom_ops' as a parameter. "
"e.g. '--minimal_build custom_ops'. This can be combined with an 'extended' build by passing "
"'--minimal_build extended custom_ops'")
parser.add_argument("--include_ops_by_config", type=str,
help="Include ops from config file. "
"See /docs/Reduced_Operator_Kernel_build.md for more information.")
parser.add_argument("--enable_reduced_operator_type_support", action='store_true',
help='If --include_ops_by_config is specified, and the configuration file has type reduction '
'information, limit the types individual operators support where possible to further '
'reduce the build size. '
'See /docs/Reduced_Operator_Kernel_build.md for more information.')
parser.add_argument("--disable_contrib_ops", action='store_true',
help="Disable contrib ops (reduces binary size)")
parser.add_argument("--disable_ml_ops", action='store_true',
help="Disable traditional ML ops (reduces binary size)")
parser.add_argument("--disable_rtti", action='store_true', help="Disable RTTI (reduces binary size)")
parser.add_argument("--disable_exceptions", action='store_true',
help="Disable exceptions to reduce binary size. Requires --minimal_build.")
parser.add_argument("--disable_ort_format_load", action='store_true',
help='Disable support for loading ORT format models in a non-minimal build.')
parser.add_argument(
"--rocm_version", help="The version of ROCM stack to use. ")
parser.add_argument("--use_rocm", action='store_true', help="Build with ROCm")
parser.add_argument("--rocm_home", help="Path to ROCm installation dir")
# Code coverage
parser.add_argument("--code_coverage", action='store_true',
help="Generate code coverage when targetting Android (only).")
parser.add_argument(
"--ms_experimental", action='store_true', help="Build microsoft experimental operators.")\
# eager mode
parser.add_argument(
"--build_eager_mode", action='store_true',
help="Build ONNXRuntime micro-benchmarks.")
parser.add_argument('--eager_customop_module', default=None,
help='Module containing custom op mappings for eager mode.')
parser.add_argument('--eager_customop_header', default=None,
help='Header containing custom op definitions for eager mode.')
parser.add_argument(
"--enable_external_custom_op_schemas", action='store_true',
help="Enable registering user defined custom operation schemas at shared library load time.\
This feature is only supported/available on Ubuntu.")
return parser.parse_args()
def is_reduced_ops_build(args):
return args.include_ops_by_config is not None
def resolve_executable_path(command_or_path):
"""Returns the absolute path of an executable."""
if command_or_path and command_or_path.strip():
executable_path = shutil.which(command_or_path)
if executable_path is None:
raise BuildError("Failed to resolve executable path for "
"'{}'.".format(command_or_path))
return os.path.abspath(executable_path)
else:
return None
def get_linux_distro():
try:
with open('/etc/os-release', 'r') as f:
dist_info = dict(
line.strip().split('=', 1) for line in f.readlines())
return dist_info.get('NAME', '').strip('"'), dist_info.get(
'VERSION', '').strip('"')
except (IOError, ValueError):
return '', ''
def is_ubuntu_1604():
dist, ver = get_linux_distro()
return dist == 'Ubuntu' and ver.startswith('16.04')
def get_config_build_dir(build_dir, config):
# build directory per configuration
return os.path.join(build_dir, config)
def run_subprocess(args, cwd=None, capture_stdout=False, dll_path=None,
shell=False, env={}, python_path=None):
if isinstance(args, str):
raise ValueError("args should be a sequence of strings, not a string")
my_env = os.environ.copy()
if dll_path:
if is_windows():
if "PATH" in my_env:
my_env["PATH"] = dll_path + os.pathsep + my_env["PATH"]
else:
my_env["PATH"] = dll_path
else:
if "LD_LIBRARY_PATH" in my_env:
my_env["LD_LIBRARY_PATH"] += os.pathsep + dll_path
else:
my_env["LD_LIBRARY_PATH"] = dll_path
if python_path:
if "PYTHONPATH" in my_env:
my_env["PYTHONPATH"] += os.pathsep + python_path
else:
my_env["PYTHONPATH"] = python_path
my_env.update(env)
return run(*args, cwd=cwd, capture_stdout=capture_stdout, shell=shell, env=my_env)
def update_submodules(source_dir):
run_subprocess(["git", "submodule", "sync", "--recursive"], cwd=source_dir)
run_subprocess(["git", "submodule", "update", "--init", "--recursive"],
cwd=source_dir)
def is_docker():
path = '/proc/self/cgroup'
return (
os.path.exists('/.dockerenv') or
os.path.isfile(path) and any('docker' in line for line in open(path))
)
def install_python_deps(numpy_version=""):
dep_packages = ['setuptools', 'wheel', 'pytest']
dep_packages.append('numpy=={}'.format(numpy_version) if numpy_version
else 'numpy>=1.16.6')
dep_packages.append('sympy>=1.1')
dep_packages.append('packaging')
dep_packages.append('cerberus')
run_subprocess([sys.executable, '-m', 'pip', 'install'] + dep_packages)
def setup_test_data(build_dir, configs):
# create a shortcut for test models if there is a 'models'
# folder in build_dir
if is_windows():
src_model_dir = os.path.join(build_dir, 'models')
if os.path.exists('C:\\local\\models') and not os.path.exists(
src_model_dir):
log.debug("creating shortcut %s -> %s" % (
'C:\\local\\models', src_model_dir))
run_subprocess(['mklink', '/D', '/J', src_model_dir,
'C:\\local\\models'], shell=True)
for config in configs:
config_build_dir = get_config_build_dir(build_dir, config)
os.makedirs(config_build_dir, exist_ok=True)
dest_model_dir = os.path.join(config_build_dir, 'models')
if os.path.exists('C:\\local\\models') and not os.path.exists(
dest_model_dir):
log.debug("creating shortcut %s -> %s" % (
'C:\\local\\models', dest_model_dir))
run_subprocess(['mklink', '/D', '/J', dest_model_dir,
'C:\\local\\models'], shell=True)
elif os.path.exists(src_model_dir) and not os.path.exists(
dest_model_dir):
log.debug("creating shortcut %s -> %s" % (
src_model_dir, dest_model_dir))
run_subprocess(['mklink', '/D', '/J', dest_model_dir,
src_model_dir], shell=True)
def use_dev_mode(args):
if args.use_acl:
return 'OFF'
if args.use_armnn:
return 'OFF'
if args.ios and is_macOS():
return 'OFF'
SYSTEM_COLLECTIONURI = os.getenv('SYSTEM_COLLECTIONURI')
if SYSTEM_COLLECTIONURI and not SYSTEM_COLLECTIONURI == 'https://dev.azure.com/onnxruntime/':
return 'OFF'
return 'ON'
def add_cmake_define_without_override(cmake_extra_defines, key, value):
for x in cmake_extra_defines:
if x.startswith(key + "="):
return cmake_extra_defines
cmake_extra_defines.append(key + "=" + value)
def generate_build_tree(cmake_path, source_dir, build_dir, cuda_home, cudnn_home, rocm_home,
mpi_home, nccl_home, tensorrt_home, migraphx_home, acl_home, acl_libs, armnn_home, armnn_libs,
path_to_protoc_exe, configs, cmake_extra_defines, args, cmake_extra_args):
log.info("Generating CMake build tree")
cmake_dir = os.path.join(source_dir, "cmake")
cmake_args = [
cmake_path, cmake_dir,
"-Donnxruntime_RUN_ONNX_TESTS=" + ("ON" if args.enable_onnx_tests else "OFF"),
"-Donnxruntime_BUILD_WINML_TESTS=" + ("OFF" if args.skip_winml_tests else "ON"),
"-Donnxruntime_GENERATE_TEST_REPORTS=ON",
# There are two ways of locating python C API header file. "find_package(PythonLibs 3.5 REQUIRED)"
# and "find_package(Python 3.5 COMPONENTS Development.Module)". The first one is deprecated and it
# depends on the "PYTHON_EXECUTABLE" variable. The second needs "Python_EXECUTABLE". Here we set both
# of them to get the best compatibility.
"-DPython_EXECUTABLE=" + sys.executable,
"-DPYTHON_EXECUTABLE=" + sys.executable,
"-Donnxruntime_ROCM_VERSION=" + (args.rocm_version if args.use_rocm else ""),
"-Donnxruntime_USE_MIMALLOC_STL_ALLOCATOR=" + (
"ON" if args.use_mimalloc == "stl" or args.use_mimalloc == "all" else "OFF"),
"-Donnxruntime_USE_MIMALLOC_ARENA_ALLOCATOR=" + (
"ON" if args.use_mimalloc == "arena" or args.use_mimalloc == "all" else "OFF"),
"-Donnxruntime_ENABLE_PYTHON=" + ("ON" if args.enable_pybind else "OFF"),
"-Donnxruntime_BUILD_CSHARP=" + ("ON" if args.build_csharp else "OFF"),
"-Donnxruntime_BUILD_JAVA=" + ("ON" if args.build_java else "OFF"),
"-Donnxruntime_BUILD_NODEJS=" + ("ON" if args.build_nodejs else "OFF"),
"-Donnxruntime_BUILD_OBJC=" + ("ON" if args.build_objc else "OFF"),
"-Donnxruntime_BUILD_SHARED_LIB=" + ("ON" if args.build_shared_lib else "OFF"),
"-Donnxruntime_BUILD_APPLE_FRAMEWORK=" + ("ON" if args.build_apple_framework else "OFF"),
"-Donnxruntime_USE_DNNL=" + ("ON" if args.use_dnnl else "OFF"),
"-Donnxruntime_DNNL_GPU_RUNTIME=" + (args.dnnl_gpu_runtime if args.use_dnnl else ""),
"-Donnxruntime_DNNL_OPENCL_ROOT=" + (args.dnnl_opencl_root if args.use_dnnl else ""),
"-Donnxruntime_USE_NNAPI_BUILTIN=" + ("ON" if args.use_nnapi else "OFF"),
"-Donnxruntime_USE_RKNPU=" + ("ON" if args.use_rknpu else "OFF"),
"-Donnxruntime_USE_OPENMP=" + (
"ON" if args.use_openmp and not (
args.use_nnapi or
args.android or (args.ios and is_macOS())
or args.use_rknpu)
else "OFF"),
"-Donnxruntime_USE_TVM=" + ("ON" if args.use_nuphar else "OFF"),
"-Donnxruntime_USE_LLVM=" + ("ON" if args.use_nuphar else "OFF"),
"-Donnxruntime_ENABLE_MICROSOFT_INTERNAL=" + ("ON" if args.enable_msinternal else "OFF"),
"-Donnxruntime_USE_VITISAI=" + ("ON" if args.use_vitisai else "OFF"),
"-Donnxruntime_USE_NUPHAR=" + ("ON" if args.use_nuphar else "OFF"),
"-Donnxruntime_USE_TENSORRT=" + ("ON" if args.use_tensorrt else "OFF"),
"-Donnxruntime_TENSORRT_HOME=" + (tensorrt_home if args.use_tensorrt else ""),
# set vars for migraphx
"-Donnxruntime_USE_MIGRAPHX=" + ("ON" if args.use_migraphx else "OFF"),
"-Donnxruntime_MIGRAPHX_HOME=" + (migraphx_home if args.use_migraphx else ""),
# By default - we currently support only cross compiling for ARM/ARM64
# (no native compilation supported through this script).
"-Donnxruntime_CROSS_COMPILING=" + ("ON" if args.arm64 or args.arm64ec or args.arm else "OFF"),
"-Donnxruntime_DISABLE_CONTRIB_OPS=" + ("ON" if args.disable_contrib_ops else "OFF"),
"-Donnxruntime_DISABLE_ML_OPS=" + ("ON" if args.disable_ml_ops else "OFF"),
"-Donnxruntime_DISABLE_RTTI=" + ("ON" if args.disable_rtti else "OFF"),
"-Donnxruntime_DISABLE_EXCEPTIONS=" + ("ON" if args.disable_exceptions else "OFF"),
"-Donnxruntime_DISABLE_ORT_FORMAT_LOAD=" + ("ON" if args.disable_ort_format_load else "OFF"),
# Need to use 'is not None' with minimal_build check as it could be an empty list.
"-Donnxruntime_MINIMAL_BUILD=" + ("ON" if args.minimal_build is not None else "OFF"),
"-Donnxruntime_EXTENDED_MINIMAL_BUILD=" + ("ON" if args.minimal_build and 'extended' in args.minimal_build
else "OFF"),
"-Donnxruntime_MINIMAL_BUILD_CUSTOM_OPS=" + ("ON" if (args.minimal_build is not None and ('custom_ops' in
args.minimal_build or args.use_extensions))
else "OFF"),
"-Donnxruntime_REDUCED_OPS_BUILD=" + ("ON" if is_reduced_ops_build(args) else "OFF"),
# enable pyop if it is nightly build
"-Donnxruntime_ENABLE_LANGUAGE_INTEROP_OPS=" + ("ON" if args.enable_language_interop_ops else "OFF"),
"-Donnxruntime_USE_DML=" + ("ON" if args.use_dml else "OFF"),
"-Donnxruntime_USE_WINML=" + ("ON" if args.use_winml else "OFF"),
"-Donnxruntime_BUILD_MS_EXPERIMENTAL_OPS=" + ("ON" if args.ms_experimental else "OFF"),
"-Donnxruntime_USE_TELEMETRY=" + ("ON" if args.use_telemetry else "OFF"),
"-Donnxruntime_ENABLE_LTO=" + ("ON" if args.enable_lto else "OFF"),
"-Donnxruntime_ENABLE_TRANSFORMERS_TOOL_TEST=" + ("ON" if args.enable_transformers_tool_test else "OFF"),
"-Donnxruntime_USE_ACL=" + ("ON" if args.use_acl else "OFF"),
"-Donnxruntime_USE_ACL_1902=" + ("ON" if args.use_acl == "ACL_1902" else "OFF"),
"-Donnxruntime_USE_ACL_1905=" + ("ON" if args.use_acl == "ACL_1905" else "OFF"),
"-Donnxruntime_USE_ACL_1908=" + ("ON" if args.use_acl == "ACL_1908" else "OFF"),
"-Donnxruntime_USE_ACL_2002=" + ("ON" if args.use_acl == "ACL_2002" else "OFF"),
"-Donnxruntime_USE_ARMNN=" + ("ON" if args.use_armnn else "OFF"),
"-Donnxruntime_ARMNN_RELU_USE_CPU=" + ("OFF" if args.armnn_relu else "ON"),
"-Donnxruntime_ARMNN_BN_USE_CPU=" + ("OFF" if args.armnn_bn else "ON"),
# Training related flags
"-Donnxruntime_ENABLE_NVTX_PROFILE=" + ("ON" if args.enable_nvtx_profile else "OFF"),
"-Donnxruntime_ENABLE_TRAINING=" + ("ON" if args.enable_training else "OFF"),
"-Donnxruntime_ENABLE_TRAINING_OPS=" + ("ON" if args.enable_training_ops else "OFF"),
"-Donnxruntime_ENABLE_TRAINING_TORCH_INTEROP=" + ("ON" if args.enable_training_torch_interop else "OFF"),
# Enable advanced computations such as AVX for some traininig related ops.
"-Donnxruntime_ENABLE_CPU_FP16_OPS=" + ("ON" if args.enable_training else "OFF"),
"-Donnxruntime_USE_NCCL=" + ("OFF" if args.disable_nccl else "ON"),
"-Donnxruntime_BUILD_BENCHMARKS=" + ("ON" if args.build_micro_benchmarks else "OFF"),
"-Donnxruntime_USE_ROCM=" + ("ON" if args.use_rocm else "OFF"),
"-Donnxruntime_ROCM_HOME=" + (rocm_home if args.use_rocm else ""),
"-DOnnxruntime_GCOV_COVERAGE=" + ("ON" if args.code_coverage else "OFF"),
"-Donnxruntime_USE_MPI=" + ("ON" if args.use_mpi else "OFF"),
"-Donnxruntime_ENABLE_MEMORY_PROFILE=" + ("ON" if args.enable_memory_profile else "OFF"),
"-Donnxruntime_ENABLE_CUDA_LINE_NUMBER_INFO=" + ("ON" if args.enable_cuda_line_info else "OFF"),
"-Donnxruntime_BUILD_WEBASSEMBLY=" + ("ON" if args.build_wasm else "OFF"),
"-Donnxruntime_ENABLE_WEBASSEMBLY_SIMD=" + ("ON" if args.enable_wasm_simd else "OFF"),
"-Donnxruntime_ENABLE_WEBASSEMBLY_EXCEPTION_CATCHING=" + ("OFF" if args.disable_wasm_exception_catching
else "ON"),
"-Donnxruntime_ENABLE_WEBASSEMBLY_EXCEPTION_THROWING=" + ("ON" if args.enable_wasm_exception_throwing_override
else "OFF"),
"-Donnxruntime_ENABLE_WEBASSEMBLY_THREADS=" + ("ON" if args.enable_wasm_threads else "OFF"),
"-Donnxruntime_ENABLE_WEBASSEMBLY_DEBUG_INFO=" + ("ON" if args.enable_wasm_debug_info else "OFF"),
"-Donnxruntime_ENABLE_WEBASSEMBLY_PROFILING=" + ("ON" if args.enable_wasm_profiling else "OFF"),
"-Donnxruntime_WEBASSEMBLY_MALLOC=" + args.wasm_malloc,
"-Donnxruntime_ENABLE_EAGER_MODE=" + ("ON" if args.build_eager_mode else "OFF"),
"-Donnxruntime_ENABLE_EXTERNAL_CUSTOM_OP_SCHEMAS=" + ("ON" if args.enable_external_custom_op_schemas
else "OFF"),
]
# It should be default ON in CI build pipelines, and OFF in packaging pipelines.
# And OFF for the people who are not actively developing onnx runtime.
add_cmake_define_without_override(cmake_extra_defines, "onnxruntime_DEV_MODE", use_dev_mode(args))
if args.use_cuda:
add_cmake_define_without_override(cmake_extra_defines, "onnxruntime_USE_CUDA", "ON")
add_cmake_define_without_override(cmake_extra_defines, "onnxruntime_CUDA_VERSION", args.cuda_version)
# TODO: this variable is not really needed
add_cmake_define_without_override(cmake_extra_defines, "onnxruntime_CUDA_HOME", cuda_home)
add_cmake_define_without_override(cmake_extra_defines, "onnxruntime_CUDNN_HOME", cudnn_home)
if is_windows():
if args.enable_msvc_static_runtime:
add_cmake_define_without_override(cmake_extra_defines, "CMAKE_MSVC_RUNTIME_LIBRARY",
"MultiThreaded$<$<CONFIG:Debug>:Debug>")
add_cmake_define_without_override(cmake_extra_defines, "ONNX_USE_MSVC_STATIC_RUNTIME", "ON")
add_cmake_define_without_override(cmake_extra_defines, "protobuf_MSVC_STATIC_RUNTIME", "ON")
add_cmake_define_without_override(cmake_extra_defines, "gtest_force_shared_crt", "OFF")
else:
# CMAKE_MSVC_RUNTIME_LIBRARY is default to MultiThreaded$<$<CONFIG:Debug>:Debug>DLL
add_cmake_define_without_override(cmake_extra_defines, "ONNX_USE_MSVC_STATIC_RUNTIME", "OFF")
add_cmake_define_without_override(cmake_extra_defines, "protobuf_MSVC_STATIC_RUNTIME", "OFF")
add_cmake_define_without_override(cmake_extra_defines, "gtest_force_shared_crt", "ON")
if acl_home and os.path.exists(acl_home):
cmake_args += ["-Donnxruntime_ACL_HOME=" + acl_home]
if acl_libs and os.path.exists(acl_libs):
cmake_args += ["-Donnxruntime_ACL_LIBS=" + acl_libs]
if armnn_home and os.path.exists(armnn_home):
cmake_args += ["-Donnxruntime_ARMNN_HOME=" + armnn_home]
if armnn_libs and os.path.exists(armnn_libs):
cmake_args += ["-Donnxruntime_ARMNN_LIBS=" + armnn_libs]
if mpi_home and os.path.exists(mpi_home):
if args.use_mpi:
cmake_args += ["-Donnxruntime_MPI_HOME=" + mpi_home]
else:
log.warning("mpi_home is supplied but use_mpi is set to false."
" Build will continue without linking MPI libraries.")
if nccl_home and os.path.exists(nccl_home):
cmake_args += ["-Donnxruntime_NCCL_HOME=" + nccl_home]
if args.winml_root_namespace_override:
cmake_args += ["-Donnxruntime_WINML_NAMESPACE_OVERRIDE=" +
args.winml_root_namespace_override]
if args.use_openvino:
cmake_args += ["-Donnxruntime_USE_OPENVINO=ON",
"-Donnxruntime_USE_OPENVINO_MYRIAD=" + (
"ON" if args.use_openvino == "MYRIAD_FP16" else "OFF"),
"-Donnxruntime_USE_OPENVINO_GPU_FP32=" + (
"ON" if args.use_openvino == "GPU_FP32" else "OFF"),
"-Donnxruntime_USE_OPENVINO_GPU_FP16=" + (
"ON" if args.use_openvino == "GPU_FP16" else "OFF"),
"-Donnxruntime_USE_OPENVINO_CPU_FP32=" + (
"ON" if args.use_openvino == "CPU_FP32" else "OFF"),
"-Donnxruntime_USE_OPENVINO_VAD_M=" + (
"ON" if args.use_openvino == "VAD-M_FP16" else "OFF"),
"-Donnxruntime_USE_OPENVINO_VAD_F=" + (
"ON" if args.use_openvino == "VAD-F_FP32" else "OFF"),
"-Donnxruntime_USE_OPENVINO_MYRIAD_NP=" + (
"ON" if args.use_openvino == "MYRIAD_FP16_NO_PARTITION" else "OFF"),
"-Donnxruntime_USE_OPENVINO_GPU_FP32_NP=" + (
"ON" if args.use_openvino == "GPU_FP32_NO_PARTITION" else "OFF"),
"-Donnxruntime_USE_OPENVINO_GPU_FP16_NP=" + (
"ON" if args.use_openvino == "GPU_FP16_NO_PARTITION" else "OFF"),
"-Donnxruntime_USE_OPENVINO_CPU_FP32_NP=" + (
"ON" if args.use_openvino == "CPU_FP32_NO_PARTITION" else "OFF"),
"-Donnxruntime_USE_OPENVINO_VAD_M_NP=" + (
"ON" if args.use_openvino == "VAD-M_FP16_NO_PARTITION" else "OFF"),
"-Donnxruntime_USE_OPENVINO_VAD_F_NP=" + (
"ON" if args.use_openvino == "VAD-F_FP32_NO_PARTITION" else "OFF"),
"-Donnxruntime_USE_OPENVINO_HETERO=" + (
"ON" if args.use_openvino.startswith("HETERO") else "OFF"),
"-Donnxruntime_USE_OPENVINO_DEVICE=" + (args.use_openvino),
"-Donnxruntime_USE_OPENVINO_MULTI=" + (
"ON" if args.use_openvino.startswith("MULTI") else "OFF")]
# TensorRT and OpenVINO providers currently only support
# full_protobuf option.
if (args.use_full_protobuf or args.use_tensorrt or
args.use_openvino or args.use_vitisai or args.gen_doc):
cmake_args += [
"-Donnxruntime_USE_FULL_PROTOBUF=ON",
"-DProtobuf_USE_STATIC_LIBS=ON"
]
if args.use_nuphar and args.llvm_path is not None:
cmake_args += ["-DLLVM_DIR=%s" % args.llvm_path]
if args.use_cuda and not is_windows():
nvml_stub_path = cuda_home + "/lib64/stubs"
cmake_args += ["-DCUDA_CUDA_LIBRARY=" + nvml_stub_path]
if args.use_preinstalled_eigen:
cmake_args += ["-Donnxruntime_USE_PREINSTALLED_EIGEN=ON",
"-Deigen_SOURCE_PATH=" + args.eigen_path]
if args.nnapi_min_api:
cmake_args += ["-Donnxruntime_NNAPI_MIN_API=" + str(args.nnapi_min_api)]
if args.android:
if not args.android_ndk_path:
raise BuildError("android_ndk_path required to build for Android")
if not args.android_sdk_path:
raise BuildError("android_sdk_path required to build for Android")
cmake_args += [
"-DCMAKE_TOOLCHAIN_FILE=" + os.path.join(
args.android_ndk_path, 'build', 'cmake', 'android.toolchain.cmake'),
"-DANDROID_PLATFORM=android-" + str(args.android_api),
"-DANDROID_ABI=" + str(args.android_abi),
"-DANDROID_MIN_SDK=" + str(args.android_api),
]
if args.android_cpp_shared:
cmake_args += ["-DANDROID_STL=c++_shared"]
if is_macOS() and not args.android:
cmake_args += ["-DCMAKE_OSX_ARCHITECTURES=" + args.osx_arch]
# since cmake 3.19, it uses the xcode latest buildsystem, which is not supported by this project.
cmake_verstr = subprocess.check_output(['cmake', '--version']).decode('utf-8').split()[2]
if args.use_xcode and LooseVersion(cmake_verstr) >= LooseVersion('3.19.0'):
cmake_args += ["-T", "buildsystem=1"]
if args.apple_deploy_target:
cmake_args += ["-DCMAKE_OSX_DEPLOYMENT_TARGET=" + args.apple_deploy_target]
# Code sign the binaries, if the code signing development identity and/or team id are provided
if args.xcode_code_signing_identity:
cmake_args += ["-DCMAKE_XCODE_ATTRIBUTE_CODE_SIGN_IDENTITY=" + args.xcode_code_signing_identity]
if args.xcode_code_signing_team_id:
cmake_args += ["-DCMAKE_XCODE_ATTRIBUTE_DEVELOPMENT_TEAM=" + args.xcode_code_signing_team_id]
if args.use_coreml:
cmake_args += ["-Donnxruntime_USE_COREML=ON"]
if args.ios:
if is_macOS():
needed_args = [
args.use_xcode,
args.ios_sysroot,
args.apple_deploy_target,
]
arg_names = [
"--use_xcode " +
"<need use xcode to cross build iOS on MacOS>",
"--ios_sysroot " +
"<the location or name of the macOS platform SDK>",
"--apple_deploy_target " +
"<the minimum version of the target platform>",
]
if not all(needed_args):
raise BuildError(
"iOS build on MacOS canceled due to missing arguments: " +
', '.join(
val for val, cond in zip(arg_names, needed_args)
if not cond))
cmake_args += [
"-DCMAKE_SYSTEM_NAME=iOS",
"-Donnxruntime_BUILD_SHARED_LIB=ON",
"-DCMAKE_OSX_SYSROOT=" + args.ios_sysroot,
"-DCMAKE_OSX_DEPLOYMENT_TARGET=" + args.apple_deploy_target,
# we do not need protoc binary for ios cross build
"-Dprotobuf_BUILD_PROTOC_BINARIES=OFF",
"-DCMAKE_TOOLCHAIN_FILE=" + (
args.ios_toolchain_file if args.ios_toolchain_file
else "../cmake/onnxruntime_ios.toolchain.cmake")
]
else:
# TODO: the cross compiling on Linux is not officially supported by Apple
# and is already broken with the latest codebase, so it should be removed.
# We are cross compiling on Linux
needed_args = [
args.ios_sysroot,
args.arm64 or args.arm,
args.ios_toolchain_dir
]
arg_names = [
"--ios_sysroot <path to sysroot>",
"--arm or --arm64",
"--ios_toolchain_dir <path to toolchain>"
]
if not all(needed_args):
raise BuildError(
"iOS build canceled due to missing arguments: " +
', '.join(
val for val, cond in zip(arg_names, needed_args)
if not cond))
compilers = sorted(
glob.glob(args.ios_toolchain_dir + "/bin/*-clang*"))
os.environ["PATH"] = os.path.join(
args.ios_toolchain_dir, "bin") + os.pathsep + os.environ.get(
"PATH", "")
os.environ["LD_LIBRARY_PATH"] = os.path.join(
args.ios_toolchain_dir, "/lib") + os.pathsep + os.environ.get(
"LD_LIBRARY_PATH", "")
if len(compilers) != 2:
raise BuildError(
"error identifying compilers in ios_toolchain_dir")
cmake_args += [
"-DCMAKE_OSX_ARCHITECTURES=" +
("arm64" if args.arm64 else "arm"),
"-DCMAKE_SYSTEM_NAME=iOSCross",
"-Donnxruntime_BUILD_UNIT_TESTS=OFF",
"-DCMAKE_OSX_SYSROOT=" + args.ios_sysroot,
"-DCMAKE_C_COMPILER=" + compilers[0],
"-DCMAKE_CXX_COMPILER=" + compilers[1]
]
if args.build_wasm:
emsdk_dir = os.path.join(cmake_dir, "external", "emsdk")
emscripten_cmake_toolchain_file = os.path.join(emsdk_dir, "upstream", "emscripten", "cmake", "Modules",
"Platform", "Emscripten.cmake")
cmake_args += [
"-DCMAKE_TOOLCHAIN_FILE=" + emscripten_cmake_toolchain_file
]
if args.disable_wasm_exception_catching:
# WebAssembly unittest requires exception catching to work. If this feature is disabled, we do not build
# unit test.
cmake_args += [
"-Donnxruntime_BUILD_UNIT_TESTS=OFF",
]
# Append onnxruntime-extensions cmake options
if args.use_extensions:
cmake_args += ["-Donnxruntime_USE_EXTENSIONS=ON"]
# default path of onnxruntime-extensions, using git submodule
onnxruntime_extensions_path = os.path.join(cmake_dir, "external", "onnxruntime-extensions")
if args.extensions_overridden_path and os.path.exists(args.extensions_overridden_path):
# use absolute path here because onnxruntime-extensions is outside onnxruntime
onnxruntime_extensions_path = os.path.abspath(args.extensions_overridden_path)
cmake_args += [
"-Donnxruntime_EXTENSIONS_PATH=" + onnxruntime_extensions_path]
print('[onnxruntime-extensions] onnxruntime_extensions_path: ', onnxruntime_extensions_path)
if is_reduced_ops_build(args):
operators_config_file = os.path.abspath(args.include_ops_by_config)
cmake_tool_dir = os.path.join(onnxruntime_extensions_path, 'tools')
# generate _selectedoplist.cmake by operators config file
run_subprocess([sys.executable, 'gen_selectedops.py', operators_config_file], cwd=cmake_tool_dir)
if path_to_protoc_exe:
cmake_args += [
"-DONNX_CUSTOM_PROTOC_EXECUTABLE=%s" % path_to_protoc_exe]
if args.fuzz_testing:
if not (args.build_shared_lib and
is_windows() and
args.cmake_generator == 'Visual Studio 16 2019' and
args.use_full_protobuf):
raise BuildError(
"Fuzz test has only be tested with build shared libs option using MSVC on windows")
cmake_args += [
"-Donnxruntime_BUILD_UNIT_TESTS=ON",
"-Donnxruntime_FUZZ_TEST=ON",
"-Donnxruntime_USE_FULL_PROTOBUF=ON"]
if args.gen_doc:
add_cmake_define_without_override(cmake_extra_defines, "onnxruntime_PYBIND_EXPORT_OPSCHEMA", "ON")
else:
add_cmake_define_without_override(cmake_extra_defines, "onnxruntime_PYBIND_EXPORT_OPSCHEMA", "OFF")
if args.build_eager_mode:
import torch
cmake_args += ["-Donnxruntime_PREBUILT_PYTORCH_PATH=%s" % os.path.dirname(torch.__file__)]
cmake_args += ["-D{}".format(define) for define in cmake_extra_defines]
cmake_args += cmake_extra_args
# ADO pipelines will store the pipeline build number
# (e.g. 191101-2300.1.master) and source version in environment
# variables. If present, use these values to define the
# WinML/ORT DLL versions.
build_number = os.getenv('Build_BuildNumber')
source_version = os.getenv('Build_SourceVersion')
if build_number and source_version:
build_matches = re.fullmatch(
r"(\d\d)(\d\d)(\d\d)(\d\d)\.(\d+)", build_number)
if build_matches:
YY = build_matches.group(2)
MM = build_matches.group(3)
DD = build_matches.group(4)
# Get ORT major and minor number
with open(os.path.join(source_dir, 'VERSION_NUMBER')) as f:
first_line = f.readline()
ort_version_matches = re.match(r"(\d+).(\d+)", first_line)
if not ort_version_matches:
raise BuildError("Couldn't read version from VERSION_FILE")
ort_major = ort_version_matches.group(1)
ort_minor = ort_version_matches.group(2)
# Example (BuildNumber: 191101-2300.1.master,
# SourceVersion: 0bce7ae6755c792eda558e5d27ded701707dc404)
# MajorPart = 1
# MinorPart = 0
# BuildPart = 1911
# PrivatePart = 123
# String = 191101-2300.1.master.0bce7ae
cmake_args += [
"-DVERSION_MAJOR_PART={}".format(ort_major),
"-DVERSION_MINOR_PART={}".format(ort_minor),
"-DVERSION_BUILD_PART={}".format(YY),
"-DVERSION_PRIVATE_PART={}{}".format(MM, DD),
"-DVERSION_STRING={}.{}.{}.{}".format(
ort_major, ort_minor, build_number,
source_version[0:7])
]
for config in configs:
config_build_dir = get_config_build_dir(build_dir, config)
os.makedirs(config_build_dir, exist_ok=True)
if args.use_nuphar:
os.environ["PATH"] = os.path.join(
config_build_dir, "external", "tvm",
config) + os.pathsep + os.path.dirname(sys.executable) + os.pathsep + os.environ["PATH"]
run_subprocess(
cmake_args + [
"-Donnxruntime_ENABLE_MEMLEAK_CHECKER=" +
("ON" if config.lower() == 'debug' and not args.use_nuphar and not
args.use_openvino and not
args.enable_msvc_static_runtime
else "OFF"), "-DCMAKE_BUILD_TYPE={}".format(config)],
cwd=config_build_dir)
def clean_targets(cmake_path, build_dir, configs):
for config in configs:
log.info("Cleaning targets for %s configuration", config)
build_dir2 = get_config_build_dir(build_dir, config)
cmd_args = [cmake_path,
"--build", build_dir2,
"--config", config,
"--target", "clean"]
run_subprocess(cmd_args)
def build_targets(args, cmake_path, build_dir, configs, num_parallel_jobs, target=None):
for config in configs:
log.info("Building targets for %s configuration", config)
build_dir2 = get_config_build_dir(build_dir, config)
cmd_args = [cmake_path,
"--build", build_dir2,
"--config", config]
if target:
cmd_args.extend(['--target', target])
build_tool_args = []
if num_parallel_jobs != 1:
if is_windows() and args.cmake_generator != 'Ninja' and not args.build_wasm:
build_tool_args += [
"/maxcpucount:{}".format(num_parallel_jobs),
# if nodeReuse is true, msbuild processes will stay around for a bit after the build completes
"/nodeReuse:False",
]
elif (is_macOS() and args.use_xcode):
# CMake will generate correct build tool args for Xcode
cmd_args += ["--parallel", str(num_parallel_jobs)]
else:
build_tool_args += ["-j{}".format(num_parallel_jobs)]
if build_tool_args:
cmd_args += ["--"]
cmd_args += build_tool_args
env = {}
if args.android:
env['ANDROID_SDK_ROOT'] = args.android_sdk_path
env['ANDROID_NDK_HOME'] = args.android_ndk_path
run_subprocess(cmd_args, env=env)
def add_dir_if_exists(directory, dir_list):
if os.path.isdir(directory):
dir_list.append(directory)
def setup_cuda_vars(args):
cuda_home = ""
cudnn_home = ""
if args.use_cuda:
cuda_home = args.cuda_home if args.cuda_home else os.getenv(
"CUDA_HOME")
cudnn_home = args.cudnn_home if args.cudnn_home else os.getenv(
"CUDNN_HOME")
cuda_home_valid = (cuda_home is not None and os.path.exists(cuda_home))
cudnn_home_valid = (cudnn_home is not None and os.path.exists(
cudnn_home))
if not cuda_home_valid or not cudnn_home_valid:
raise BuildError(
"cuda_home and cudnn_home paths must be specified and valid.",
"cuda_home='{}' valid={}. cudnn_home='{}' valid={}"
.format(
cuda_home, cuda_home_valid, cudnn_home, cudnn_home_valid))
return cuda_home, cudnn_home
def setup_tensorrt_vars(args):
tensorrt_home = ""
if args.use_tensorrt:
tensorrt_home = (args.tensorrt_home if args.tensorrt_home
else os.getenv("TENSORRT_HOME"))
tensorrt_home_valid = (tensorrt_home is not None and
os.path.exists(tensorrt_home))
if not tensorrt_home_valid:
raise BuildError(
"tensorrt_home paths must be specified and valid.",
"tensorrt_home='{}' valid={}."
.format(tensorrt_home, tensorrt_home_valid))
# Set maximum workspace size in byte for
# TensorRT (1GB = 1073741824 bytes).
os.environ["ORT_TENSORRT_MAX_WORKSPACE_SIZE"] = "1073741824"
# Set maximum number of iterations to detect unsupported nodes
# and partition the models for TensorRT.
os.environ["ORT_TENSORRT_MAX_PARTITION_ITERATIONS"] = "1000"
# Set minimum subgraph node size in graph partitioning
# for TensorRT.
os.environ["ORT_TENSORRT_MIN_SUBGRAPH_SIZE"] = "1"
# Set FP16 flag
os.environ["ORT_TENSORRT_FP16_ENABLE"] = "0"
return tensorrt_home
def setup_migraphx_vars(args):
migraphx_home = None
if (args.use_migraphx):
print("migraphx_home = {}".format(args.migraphx_home))
migraphx_home = args.migraphx_home or os.getenv("MIGRAPHX_HOME") or None
migraphx_home_not_valid = (migraphx_home and not os.path.exists(migraphx_home))
if (migraphx_home_not_valid):
raise BuildError("migraphx_home paths must be specified and valid.",
"migraphx_home='{}' valid={}."
.format(migraphx_home, migraphx_home_not_valid))
return migraphx_home or ''
def setup_dml_build(args, cmake_path, build_dir, configs):
if args.use_dml:
for config in configs:
# Run the RESTORE_PACKAGES target to perform the initial
# NuGet setup.
cmd_args = [cmake_path,
"--build", get_config_build_dir(build_dir, config),
"--config", config,
"--target", "RESTORE_PACKAGES"]
run_subprocess(cmd_args)
def setup_rocm_build(args, configs):
rocm_home = None
if (args.use_rocm):
print("rocm_home = {}".format(args.rocm_home))
rocm_home = args.rocm_home or None
rocm_home_not_valid = (rocm_home and not os.path.exists(rocm_home))
if (rocm_home_not_valid):
raise BuildError("rocm_home paths must be specified and valid.",
"rocm_home='{}' valid={}."
.format(rocm_home, rocm_home_not_valid))
for config in configs:
amd_hipify(get_config_build_dir(args.build_dir, config))
return rocm_home or ''
def run_android_tests(args, source_dir, build_dir, config, cwd):
sdk_tool_paths = android.get_sdk_tool_paths(args.android_sdk_path)
device_dir = '/data/local/tmp'
def adb_push(src, dest, **kwargs):
return run_subprocess([sdk_tool_paths.adb, 'push', src, dest], **kwargs)
def adb_shell(*args, **kwargs):
return run_subprocess([sdk_tool_paths.adb, 'shell', *args], **kwargs)
def adb_install(*args, **kwargs):
return run_subprocess([sdk_tool_paths.adb, 'install', *args], **kwargs)
def run_adb_shell(cmd):
# GCOV_PREFIX_STRIP specifies the depth of the directory hierarchy to strip and
# GCOV_PREFIX specifies the root directory
# for creating the runtime code coverage files.
if args.code_coverage:
adb_shell(
'cd {0} && GCOV_PREFIX={0} GCOV_PREFIX_STRIP={1} {2}'.format(
device_dir, cwd.count(os.sep) + 1, cmd))
else:
adb_shell('cd {} && {}'.format(device_dir, cmd))
if args.android_abi == 'x86_64':
with contextlib.ExitStack() as context_stack:
if args.android_run_emulator:
avd_name = "ort_android"
system_image = "system-images;android-{};google_apis;{}".format(
args.android_api, args.android_abi)
android.create_virtual_device(sdk_tool_paths, system_image, avd_name)
emulator_proc = context_stack.enter_context(
android.start_emulator(
sdk_tool_paths=sdk_tool_paths,
avd_name=avd_name,
extra_args=[
"-partition-size", "2047",
"-wipe-data"]))
context_stack.callback(android.stop_emulator, emulator_proc)
adb_push('testdata', device_dir, cwd=cwd)
adb_push(
os.path.join(source_dir, 'cmake', 'external', 'onnx', 'onnx', 'backend', 'test'),
device_dir, cwd=cwd)
adb_push('onnxruntime_test_all', device_dir, cwd=cwd)
adb_shell('chmod +x {}/onnxruntime_test_all'.format(device_dir))
adb_push('onnx_test_runner', device_dir, cwd=cwd)
adb_shell('chmod +x {}/onnx_test_runner'.format(device_dir))
run_adb_shell('{0}/onnxruntime_test_all'.format(device_dir))
if args.build_java:
gradle_executable = 'gradle'
# use the gradle wrapper if it exists, the gradlew should be setup under <repo root>/java
gradlew_path = os.path.join(source_dir, 'java',
'gradlew.bat' if is_windows() else 'gradlew')
if os.path.exists(gradlew_path):
gradle_executable = gradlew_path
android_test_path = os.path.join(cwd, "java", "androidtest", "android")
run_subprocess([gradle_executable, '--no-daemon',
'-DminSdkVer={}'.format(args.android_api),
'clean', 'connectedDebugAndroidTest'],
cwd=android_test_path)
if args.use_nnapi:
adb_shell('cd {0} && {0}/onnx_test_runner -e nnapi {0}/test'.format(device_dir))
else:
adb_shell('cd {0} && {0}/onnx_test_runner {0}/test'.format(device_dir))
# run shared_lib_test if necessary
if args.build_shared_lib:
adb_push('libonnxruntime.so', device_dir, cwd=cwd)
adb_push('onnxruntime_shared_lib_test', device_dir, cwd=cwd)
adb_shell('chmod +x {}/onnxruntime_shared_lib_test'.format(device_dir))
run_adb_shell(
'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:{0} && {0}/onnxruntime_shared_lib_test'.format(
device_dir))
def run_ios_tests(args, source_dir, config, cwd):
run_subprocess(["xcodebuild", "test-without-building", "-project", "./onnxruntime.xcodeproj",
"-configuration", config,
"-scheme", "onnxruntime_test_all_xc", "-destination",
"platform=iOS Simulator,OS=latest,name=iPhone SE (2nd generation)"], cwd=cwd)
run_subprocess(["xcodebuild", "test-without-building", "-project", "./onnxruntime.xcodeproj",
"-configuration", config,
"-scheme", "onnxruntime_shared_lib_test_xc", "-destination",
"platform=iOS Simulator,OS=latest,name=iPhone SE (2nd generation)"], cwd=cwd)
if args.build_apple_framework:
package_test_py = os.path.join(source_dir, 'tools', 'ci_build', 'github', 'apple', 'test_ios_packages.py')
framework_info_file = os.path.join(cwd, 'framework_info.json')
dynamic_framework_dir = os.path.join(cwd, config + '-' + args.ios_sysroot)
static_framework_dir = os.path.join(cwd, config + '-' + args.ios_sysroot, 'static_framework')
# test dynamic framework
run_subprocess([sys.executable, package_test_py,
'--c_framework_dir', dynamic_framework_dir,
'--framework_info_file', framework_info_file], cwd=cwd)
# test static framework
run_subprocess([sys.executable, package_test_py,
'--c_framework_dir', static_framework_dir,
'--framework_info_file', framework_info_file], cwd=cwd)
def run_orttraining_test_orttrainer_frontend_separately(cwd):
class TestNameCollecterPlugin:
def __init__(self):
self.collected = set()
def pytest_collection_modifyitems(self, items):
for item in items:
print('item.name: ', item.name)
test_name = item.name
start = test_name.find('[')
if start > 0:
test_name = test_name[:start]
self.collected.add(test_name)
import pytest
plugin = TestNameCollecterPlugin()
test_script_filename = os.path.join(cwd, "orttraining_test_orttrainer_frontend.py")
pytest.main(['--collect-only', test_script_filename], plugins=[plugin])
for test_name in plugin.collected:
run_subprocess([
sys.executable, '-m', 'pytest',
'orttraining_test_orttrainer_frontend.py', '-v', '-k', test_name], cwd=cwd)
def run_training_python_frontend_tests(cwd):
# have to disable due to (with torchvision==0.9.1+cu102 which is required by ortmodule):
# Downloading http://yann.lecun.com/exdb/mnist/
# https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz
# Failed to download (trying next):
# HTTP Error 404: Not Found
# run_subprocess([sys.executable, 'onnxruntime_test_ort_trainer.py'], cwd=cwd)
run_subprocess([sys.executable, 'onnxruntime_test_training_unit_tests.py'], cwd=cwd)
run_subprocess([
sys.executable, 'orttraining_test_transformers.py',
'BertModelTest.test_for_pretraining_full_precision_list_input'], cwd=cwd)
run_subprocess([
sys.executable, 'orttraining_test_transformers.py',
'BertModelTest.test_for_pretraining_full_precision_dict_input'], cwd=cwd)
run_subprocess([
sys.executable, 'orttraining_test_transformers.py',
'BertModelTest.test_for_pretraining_full_precision_list_and_dict_input'], cwd=cwd)
# TODO: use run_orttraining_test_orttrainer_frontend_separately to work around a sporadic segfault.
# shall revert to run_subprocess call once the segfault issue is resolved.
run_orttraining_test_orttrainer_frontend_separately(cwd)
# run_subprocess([sys.executable, '-m', 'pytest', '-sv', 'orttraining_test_orttrainer_frontend.py'], cwd=cwd)
run_subprocess([sys.executable, '-m', 'pytest', '-sv', 'orttraining_test_orttrainer_bert_toy_onnx.py'], cwd=cwd)
run_subprocess([sys.executable, '-m', 'pytest', '-sv', 'orttraining_test_checkpoint_storage.py'], cwd=cwd)
run_subprocess([
sys.executable, '-m', 'pytest', '-sv', 'orttraining_test_orttrainer_checkpoint_functions.py'], cwd=cwd)
# Not technically training related, but it needs torch to be installed.
run_subprocess([
sys.executable, '-m', 'pytest', '-sv', 'test_pytorch_export_contrib_ops.py'], cwd=cwd)
def run_training_python_frontend_e2e_tests(cwd):
# frontend tests are to be added here:
log.info("Running python frontend e2e tests.")
run_subprocess(
[sys.executable, 'orttraining_run_frontend_batch_size_test.py', '-v'],
cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})
import torch
ngpus = torch.cuda.device_count()
if ngpus > 1:
bert_pretrain_script = 'orttraining_run_bert_pretrain.py'
# TODO: this test will be replaced with convergence test ported from backend
log.debug('RUN: mpirun -n {} ''-x' 'NCCL_DEBUG=INFO'' {} {} {}'.format(
ngpus, sys.executable, bert_pretrain_script, 'ORTBertPretrainTest.test_pretrain_convergence'))
run_subprocess([
'mpirun', '-n', str(ngpus), '-x', 'NCCL_DEBUG=INFO', sys.executable,
bert_pretrain_script, 'ORTBertPretrainTest.test_pretrain_convergence'], cwd=cwd)
log.debug('RUN: mpirun -n {} {} orttraining_run_glue.py'.format(ngpus, sys.executable))
run_subprocess([
'mpirun', '-n', str(ngpus), '-x', 'NCCL_DEBUG=INFO', sys.executable, 'orttraining_run_glue.py'], cwd=cwd)
# with orttraining_run_glue.py.
# 1. we like to force to use single GPU (with CUDA_VISIBLE_DEVICES)
# for fine-tune tests.
# 2. need to run test separately (not to mix between fp16
# and full precision runs. this need to be investigated).
run_subprocess(
[sys.executable, 'orttraining_run_glue.py', 'ORTGlueTest.test_bert_with_mrpc', '-v'],
cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})
run_subprocess(
[sys.executable, 'orttraining_run_glue.py', 'ORTGlueTest.test_bert_fp16_with_mrpc', '-v'],
cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})
run_subprocess(
[sys.executable, 'orttraining_run_glue.py', 'ORTGlueTest.test_roberta_with_mrpc', '-v'],
cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})
run_subprocess(
[sys.executable, 'orttraining_run_glue.py', 'ORTGlueTest.test_roberta_fp16_with_mrpc', '-v'],
cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})
run_subprocess(
[sys.executable, 'orttraining_run_multiple_choice.py', 'ORTMultipleChoiceTest.test_bert_fp16_with_swag', '-v'],
cwd=cwd, env={'CUDA_VISIBLE_DEVICES': '0'})
run_subprocess([sys.executable, 'onnxruntime_test_ort_trainer_with_mixed_precision.py'], cwd=cwd)
run_subprocess([
sys.executable, 'orttraining_test_transformers.py',
'BertModelTest.test_for_pretraining_mixed_precision'], cwd=cwd)
def run_onnxruntime_tests(args, source_dir, ctest_path, build_dir, configs):
for config in configs:
log.info("Running tests for %s configuration", config)
cwd = get_config_build_dir(build_dir, config)
cwd = os.path.abspath(cwd)
if args.android:
run_android_tests(args, source_dir, build_dir, config, cwd)
continue
elif args.ios:
run_ios_tests(args, source_dir, config, cwd)
continue
dll_path_list = []
if args.use_nuphar:
dll_path_list.append(os.path.join(
build_dir, config, "external", "tvm", config))
if args.use_tensorrt:
dll_path_list.append(os.path.join(args.tensorrt_home, 'lib'))
dll_path = None
if len(dll_path_list) > 0:
dll_path = os.pathsep.join(dll_path_list)
if not ctest_path:
if is_windows():
# Get the "Google Test Adapter" for vstest.
if not os.path.exists(os.path.join(cwd,
'googletestadapter.0.17.1')):
run_subprocess(
['nuget.exe', 'restore',
os.path.join(source_dir, 'packages.config'),
'-ConfigFile', os.path.join(source_dir, 'NuGet.config'),
'-PackagesDirectory', cwd])
cwd2 = os.path.join(cwd, config)
executables = ['onnxruntime_test_all.exe', 'onnxruntime_mlas_test.exe']
if args.build_shared_lib:
executables.append('onnxruntime_shared_lib_test.exe')
executables.append('onnxruntime_global_thread_pools_test.exe')
executables.append('onnxruntime_api_tests_without_env.exe')
run_subprocess(
['vstest.console.exe', '--parallel',
'--TestAdapterPath:..\\googletestadapter.0.17.1\\build\\_common', # noqa
'/Logger:trx', '/Enablecodecoverage', '/Platform:x64',
"/Settings:%s" % os.path.join(
source_dir, 'cmake\\codeconv.runsettings')] + executables,
cwd=cwd2, dll_path=dll_path)
else:
executables = ['onnxruntime_test_all', 'onnxruntime_mlas_test']
if args.build_shared_lib:
executables.append('onnxruntime_shared_lib_test')
executables.append('onnxruntime_global_thread_pools_test')
executables.append('onnxruntime_api_tests_without_env')
for exe in executables:
run_subprocess([os.path.join(cwd, exe)], cwd=cwd, dll_path=dll_path)
else:
ctest_cmd = [ctest_path, "--build-config", config, "--verbose", "--timeout", "7200"]
run_subprocess(ctest_cmd, cwd=cwd, dll_path=dll_path)
if args.enable_pybind:
# Disable python tests for TensorRT because many tests are
# not supported yet.
if args.use_tensorrt:
return
# Disable python tests in a reduced build as we don't know which ops have been included and which
# models can run.
if is_reduced_ops_build(args) or args.minimal_build is not None:
return
if is_windows():
cwd = os.path.join(cwd, config)
run_subprocess([sys.executable, 'onnxruntime_test_python.py'], cwd=cwd, dll_path=dll_path)
if not args.disable_contrib_ops:
run_subprocess([sys.executable, 'onnxruntime_test_python_sparse_matmul.py'],
cwd=cwd, dll_path=dll_path)
if args.enable_symbolic_shape_infer_tests:
run_subprocess([sys.executable, 'onnxruntime_test_python_symbolic_shape_infer.py'],
cwd=cwd, dll_path=dll_path)
# For CUDA enabled builds test IOBinding feature
if args.use_cuda:
# We need to have Torch installed to test the IOBinding feature
# which currently uses Torch's allocator to allocate GPU memory for testing
log.info("Testing IOBinding feature")
run_subprocess([sys.executable, 'onnxruntime_test_python_iobinding.py'], cwd=cwd, dll_path=dll_path)
if not args.disable_ml_ops:
run_subprocess([sys.executable, 'onnxruntime_test_python_mlops.py'], cwd=cwd, dll_path=dll_path)
if args.enable_training and args.use_cuda:
# run basic frontend tests
run_training_python_frontend_tests(cwd=cwd)
if args.build_eager_mode:
# run eager mode test
args_list = [sys.executable, os.path.join(cwd, 'eager_test')]
run_subprocess(args_list, cwd=cwd, dll_path=dll_path, python_path=cwd)
try:
import onnx # noqa
onnx_test = True
except ImportError as error:
log.exception(error)
log.warning("onnx is not installed. The ONNX tests will be skipped.")
onnx_test = False
if onnx_test:
run_subprocess([sys.executable, 'onnxruntime_test_python_backend.py'], cwd=cwd, dll_path=dll_path)
if not args.disable_contrib_ops:
run_subprocess([sys.executable, '-m', 'unittest', 'discover', '-s', 'quantization'],
cwd=cwd, dll_path=dll_path)
if args.enable_transformers_tool_test:
import numpy
import google.protobuf
numpy_init_version = numpy.__version__
pb_init_version = google.protobuf.__version__
run_subprocess([sys.executable, '-m', 'pip', 'install', '-r', 'requirements.txt'],
cwd=SCRIPT_DIR)
run_subprocess([sys.executable, '-m', 'pytest', 'transformers'], cwd=cwd)
# Restore initial numpy/protobuf version in case other tests use it
run_subprocess([sys.executable, '-m', 'pip', 'install', 'numpy==' + numpy_init_version])
run_subprocess([sys.executable, '-m', 'pip', 'install', 'protobuf==' + pb_init_version])
if not args.disable_ml_ops:
run_subprocess([sys.executable, 'onnxruntime_test_python_backend_mlops.py'],
cwd=cwd, dll_path=dll_path)
run_subprocess([sys.executable,
os.path.join(source_dir, 'onnxruntime', 'test', 'onnx', 'gen_test_models.py'),
'--output_dir', 'test_models'], cwd=cwd)
if not args.skip_onnx_tests:
run_subprocess([os.path.join(cwd, 'onnx_test_runner'), 'test_models'], cwd=cwd)
if config != 'Debug':
run_subprocess([sys.executable, 'onnx_backend_test_series.py'], cwd=cwd, dll_path=dll_path)
if not args.skip_keras_test:
try:
import onnxmltools # noqa
import keras # noqa
onnxml_test = True
except ImportError:
log.warning(
"onnxmltools and keras are not installed. "
"The keras tests will be skipped.")
onnxml_test = False
if onnxml_test:
run_subprocess(
[sys.executable, 'onnxruntime_test_python_keras.py'],
cwd=cwd, dll_path=dll_path)
def nuphar_run_python_tests(build_dir, configs):
for config in configs:
if config == 'Debug':
continue
cwd = get_config_build_dir(build_dir, config)
if is_windows():
cwd = os.path.join(cwd, config)
dll_path = os.path.join(build_dir, config, "external", "tvm", config)
run_subprocess(
[sys.executable, 'onnxruntime_test_python_nuphar.py'],
cwd=cwd, dll_path=dll_path)
def run_nodejs_tests(nodejs_binding_dir):
args = ['npm', 'test', '--', '--timeout=10000']
if is_windows():
args = ['cmd', '/c'] + args
run_subprocess(args, cwd=nodejs_binding_dir)
def build_python_wheel(
source_dir, build_dir, configs, use_cuda, cuda_version, use_rocm, rocm_version, use_dnnl,
use_tensorrt, use_openvino, use_nuphar, use_vitisai, use_acl, use_armnn, use_dml,
wheel_name_suffix, enable_training, nightly_build=False, default_training_package_device=False,
use_ninja=False, build_eager_mode=False):
for config in configs:
cwd = get_config_build_dir(build_dir, config)
if is_windows() and not use_ninja:
cwd = os.path.join(cwd, config)
args = [sys.executable, os.path.join(source_dir, 'setup.py'),
'bdist_wheel']
# Any combination of the following arguments can be applied
if nightly_build:
args.append('--nightly_build')
if default_training_package_device:
args.append('--default_training_package_device')
if wheel_name_suffix:
args.append('--wheel_name_suffix={}'.format(wheel_name_suffix))
if enable_training:
args.append("--enable_training")
if build_eager_mode:
args.append("--disable_auditwheel_repair")
# The following arguments are mutually exclusive
if use_tensorrt:
args.append('--use_tensorrt')
elif use_cuda:
# The following line assumes no other EP is enabled
args.append('--wheel_name_suffix=gpu')
if cuda_version:
args.append('--cuda_version={}'.format(cuda_version))
elif use_rocm:
args.append('--use_rocm')
if rocm_version:
args.append('--rocm_version={}'.format(rocm_version))
elif use_openvino:
args.append('--use_openvino')
elif use_dnnl:
args.append('--use_dnnl')
elif use_nuphar:
args.append('--use_nuphar')
elif use_vitisai:
args.append('--use_vitisai')
elif use_acl:
args.append('--use_acl')
elif use_armnn:
args.append('--use_armnn')
elif use_dml:
args.append('--wheel_name_suffix=directml')
run_subprocess(args, cwd=cwd)
def derive_linux_build_property():
if is_windows():
return "/p:IsLinuxBuild=\"false\""
else:
return "/p:IsLinuxBuild=\"true\""
def build_nuget_package(source_dir, build_dir, configs, use_cuda, use_openvino, use_tensorrt, use_dnnl, use_nuphar):
if not (is_windows() or is_linux()):
raise BuildError(
'Currently csharp builds and nuget package creation is only supportted '
'on Windows and Linux platforms.')
csharp_build_dir = os.path.join(source_dir, 'csharp')
is_linux_build = derive_linux_build_property()
# derive package name and execution provider based on the build args
execution_provider = "/p:ExecutionProvider=\"None\""
package_name = "/p:OrtPackageId=\"Microsoft.ML.OnnxRuntime\""
if use_openvino:
execution_provider = "/p:ExecutionProvider=\"openvino\""
package_name = "/p:OrtPackageId=\"Microsoft.ML.OnnxRuntime.OpenVino\""
elif use_tensorrt:
execution_provider = "/p:ExecutionProvider=\"tensorrt\""
package_name = "/p:OrtPackageId=\"Microsoft.ML.OnnxRuntime.TensorRT\""
elif use_dnnl:
execution_provider = "/p:ExecutionProvider=\"dnnl\""
package_name = "/p:OrtPackageId=\"Microsoft.ML.OnnxRuntime.DNNL\""
elif use_cuda:
package_name = "/p:OrtPackageId=\"Microsoft.ML.OnnxRuntime.Gpu\""
elif use_nuphar:
package_name = "/p:OrtPackageId=\"Microsoft.ML.OnnxRuntime.Nuphar\""
else:
pass
# set build directory based on build_dir arg
native_dir = os.path.normpath(os.path.join(source_dir, build_dir))
ort_build_dir = "/p:OnnxRuntimeBuildDirectory=\"" + native_dir + "\""
# dotnet restore
cmd_args = ["dotnet", "restore", "OnnxRuntime.CSharp.sln", "--configfile", "Nuget.CSharp.config"]
run_subprocess(cmd_args, cwd=csharp_build_dir)
# build csharp bindings and create nuget package for each config
for config in configs:
if is_linux():
native_build_dir = os.path.join(native_dir, config)
cmd_args = ["make", "install", "DESTDIR=.//nuget-staging"]
run_subprocess(cmd_args, cwd=native_build_dir)
configuration = "/p:Configuration=\"" + config + "\""
cmd_args = ["dotnet", "msbuild", "OnnxRuntime.CSharp.sln", configuration, package_name, is_linux_build,
ort_build_dir]
run_subprocess(cmd_args, cwd=csharp_build_dir)
cmd_args = [
"dotnet", "msbuild", "OnnxRuntime.CSharp.proj", "/t:CreatePackage",
package_name, configuration, execution_provider, is_linux_build, ort_build_dir]
run_subprocess(cmd_args, cwd=csharp_build_dir)
def run_csharp_tests(source_dir, build_dir, use_cuda, use_openvino, use_tensorrt, use_dnnl):
# Currently only running tests on windows.
if not is_windows():
return
csharp_source_dir = os.path.join(source_dir, 'csharp')
is_linux_build = derive_linux_build_property()
# define macros based on build args
macros = ""
if use_openvino:
macros += "USE_OPENVINO;"
if use_tensorrt:
macros += "USE_TENSORRT;"
if use_dnnl:
macros += "USE_DNNL;"
if use_cuda:
macros += "USE_CUDA;"
define_constants = ""
if macros != "":
define_constants = "/p:DefineConstants=\"" + macros + "\""
# set build directory based on build_dir arg
native_build_dir = os.path.normpath(os.path.join(source_dir, build_dir))
ort_build_dir = "/p:OnnxRuntimeBuildDirectory=\"" + native_build_dir + "\""
# Skip pretrained models test. Only run unit tests as part of the build
# add "--verbosity", "detailed" to this command if required
cmd_args = ["dotnet", "test", "test\\Microsoft.ML.OnnxRuntime.Tests\\Microsoft.ML.OnnxRuntime.Tests.csproj",
"--filter", "FullyQualifiedName!=Microsoft.ML.OnnxRuntime.Tests.InferenceTest.TestPreTrainedModels",
is_linux_build, define_constants, ort_build_dir]
run_subprocess(cmd_args, cwd=csharp_source_dir)
def is_cross_compiling_on_apple(args):
if not is_macOS():
return False
if args.ios:
return True
if args.osx_arch != platform.machine():
return True
return False
def build_protoc_for_host(cmake_path, source_dir, build_dir, args):
if (args.arm or args.arm64 or args.arm64ec or args.enable_windows_store) and \
not (is_windows() or is_cross_compiling_on_apple(args)):
raise BuildError(
'Currently only support building protoc for Windows host while '
'cross-compiling for ARM/ARM64/Store and linux cross-compiling iOS')
log.info(
"Building protoc for host to be used in cross-compiled build process")
protoc_build_dir = os.path.join(os.getcwd(), build_dir, 'host_protoc')
os.makedirs(protoc_build_dir, exist_ok=True)
# Generate step
cmd_args = [
cmake_path,
os.path.join(source_dir, 'cmake', 'external', 'protobuf', 'cmake'),
'-Dprotobuf_BUILD_TESTS=OFF',
'-Dprotobuf_WITH_ZLIB_DEFAULT=OFF',
'-Dprotobuf_BUILD_SHARED_LIBS=OFF'
]
is_ninja = args.cmake_generator == 'Ninja'
if args.cmake_generator is not None and not (is_macOS() and args.use_xcode):
cmd_args += ['-G', args.cmake_generator]
if is_windows():
if not is_ninja:
cmd_args += ['-T', 'host=x64']
elif is_macOS():
if args.use_xcode:
cmd_args += ['-G', 'Xcode']
# CMake < 3.18 has a bug setting system arch to arm64 (if not specified) for Xcode 12,
# protoc for host should be built using host architecture
# Explicitly specify the CMAKE_OSX_ARCHITECTURES for x86_64 Mac.
cmd_args += ["-DCMAKE_OSX_ARCHITECTURES={}".format(
'arm64' if platform.machine() == 'arm64' else 'x86_64')]
run_subprocess(cmd_args, cwd=protoc_build_dir)
# Build step
cmd_args = [cmake_path,
"--build", protoc_build_dir,
"--config", "Release",
"--target", "protoc"]
run_subprocess(cmd_args)
# Absolute protoc path is needed for cmake
config_dir = ''
suffix = ''
if (is_windows() and not is_ninja) or (is_macOS() and args.use_xcode):
config_dir = 'Release'
if is_windows():
suffix = '.exe'
expected_protoc_path = os.path.join(protoc_build_dir, config_dir, 'protoc' + suffix)
if not os.path.exists(expected_protoc_path):
raise BuildError("Couldn't find {}. Host build of protoc failed.".format(expected_protoc_path))
return expected_protoc_path
def generate_documentation(source_dir, build_dir, configs, validate):
# Randomly choose one build config
config = next(iter(configs))
cwd = get_config_build_dir(build_dir, config)
if is_windows():
cwd = os.path.join(cwd, config)
contrib_op_doc_path = os.path.join(source_dir, 'docs', 'ContribOperators.md')
opkernel_doc_path = os.path.join(source_dir, 'docs', 'OperatorKernels.md')
shutil.copy(os.path.join(source_dir, 'tools', 'python', 'gen_contrib_doc.py'), cwd)
shutil.copy(os.path.join(source_dir, 'tools', 'python', 'gen_opkernel_doc.py'), cwd)
# limit to just com.microsoft (excludes purely internal stuff like com.microsoft.nchwc).
run_subprocess([sys.executable, 'gen_contrib_doc.py', '--output_path', contrib_op_doc_path,
'--domains', 'com.microsoft'], cwd=cwd)
# we currently limit the documentation created by a build to the CPU and CUDA EPs.
# Run get_opkernel_doc.py directly if you need/want documentation from other EPs that are enabled in the build.
run_subprocess([sys.executable, 'gen_opkernel_doc.py', '--output_path', opkernel_doc_path,
'--providers', 'CPU', 'CUDA'], cwd=cwd)
if validate:
try:
have_diff = False
def diff_file(path, regenerate_qualifiers=''):
diff = subprocess.check_output(['git', 'diff', path], cwd=source_dir)
if diff:
nonlocal have_diff
have_diff = True
log.warning('The updated document {} is different from the checked in version. '
'Please regenerate the file{}, or copy the updated version from the '
'CI build\'s published artifacts if applicable.'.format(path, regenerate_qualifiers))
log.debug('diff:\n' + str(diff))
diff_file(opkernel_doc_path, ' with CPU and CUDA execution providers enabled')
diff_file(contrib_op_doc_path)
if have_diff:
# Output for the CI to publish the updated md files as an artifact
print('##vso[task.setvariable variable=DocUpdateNeeded]true')
raise BuildError('Generated documents have diffs. Check build output for details.')
except subprocess.CalledProcessError:
raise BuildError('git diff returned non-zero error code')
def main():
log.debug("Command line arguments:\n {}".format(" ".join(shlex.quote(arg) for arg in sys.argv[1:])))
args = parse_arguments()
cmake_extra_defines = (args.cmake_extra_defines
if args.cmake_extra_defines else [])
cross_compiling = args.arm or args.arm64 or args.arm64ec or args.android
# If there was no explicit argument saying what to do, default
# to update, build and test (for native builds).
if not (args.update or args.clean or args.build or args.test):
log.debug("Defaulting to running update, build [and test for native builds].")
args.update = True
args.build = True
if cross_compiling:
args.test = args.android_abi == 'x86_64' or args.android_abi == 'arm64-v8a'
else:
args.test = True
if args.skip_tests:
args.test = False
if is_reduced_ops_build(args) and args.update:
from reduce_op_kernels import reduce_ops
reduce_ops(
config_path=args.include_ops_by_config,
enable_type_reduction=args.enable_reduced_operator_type_support,
use_cuda=args.use_cuda)
if args.use_tensorrt:
args.use_cuda = True
if args.build_wheel or args.gen_doc:
args.enable_pybind = True
if args.build_csharp or args.build_nuget or args.build_java or args.build_nodejs:
args.build_shared_lib = True
if args.build_nuget and cross_compiling:
raise BuildError('Currently nuget package creation is not supported while cross-compiling')
if args.enable_pybind and args.disable_rtti:
raise BuildError("Python bindings use typeid so you can't disable RTTI")
if args.enable_pybind and args.disable_exceptions:
raise BuildError('Python bindings require exceptions to be enabled.')
if args.minimal_build is not None and args.disable_ort_format_load:
raise BuildError('Minimal build requires loading ORT format models.')
if args.nnapi_min_api:
if not args.use_nnapi:
raise BuildError("Using --nnapi_min_api requires --use_nnapi")
if args.nnapi_min_api < 27:
raise BuildError("--nnapi_min_api should be 27+")
if args.build_wasm:
if not args.disable_wasm_exception_catching and args.disable_exceptions:
# When '--disable_exceptions' is set, we set '--disable_wasm_exception_catching' as well
args.disable_wasm_exception_catching = True
if args.test and args.disable_wasm_exception_catching and not args.minimal_build:
raise BuildError("WebAssembly tests need exception catching enabled to run if it's not minimal build")
if args.test and args.enable_wasm_debug_info:
# With flag --enable_wasm_debug_info, onnxruntime_test_all.wasm will be very huge (>1GB). This will fail
# Node.js when trying to load the .wasm file.
# To debug ONNX Runtime WebAssembly, use ONNX Runtime Web to debug ort-wasm.wasm in browsers.
raise BuildError("WebAssembly tests cannot be enabled with flag --enable_wasm_debug_info")
if args.code_coverage and not args.android:
raise BuildError("Using --code_coverage requires --android")
if args.gen_api_doc and len(args.config) != 1:
raise BuildError('Using --get-api-doc requires a single build config')
# Disabling unit tests for VAD-F as FPGA only supports
# models with NCHW layout
if args.use_openvino == "VAD-F_FP32":
args.test = False
# Disabling unit tests for GPU and MYRIAD on nuget creation
if args.use_openvino != "CPU_FP32" and args.build_nuget:
args.test = False
configs = set(args.config)
# setup paths and directories
# cmake_path and ctest_path can be None. For example, if a person only wants to run the tests, he/she doesn't need
# to have cmake/ctest.
cmake_path = resolve_executable_path(args.cmake_path)
ctest_path = None if args.use_vstest else resolve_executable_path(
args.ctest_path)
build_dir = args.build_dir
script_dir = os.path.realpath(os.path.dirname(__file__))
source_dir = os.path.normpath(os.path.join(script_dir, "..", ".."))
# if using cuda, setup cuda paths and env vars
cuda_home, cudnn_home = setup_cuda_vars(args)
mpi_home = args.mpi_home
nccl_home = args.nccl_home
acl_home = args.acl_home
acl_libs = args.acl_libs
armnn_home = args.armnn_home
armnn_libs = args.armnn_libs
# if using tensorrt, setup tensorrt paths
tensorrt_home = setup_tensorrt_vars(args)
# if using migraphx, setup migraphx paths
migraphx_home = setup_migraphx_vars(args)
# if using rocm, setup rocm paths
rocm_home = setup_rocm_build(args, configs)
if args.update or args.build:
os.makedirs(build_dir, exist_ok=True)
log.info("Build started")
if args.update:
cmake_extra_args = []
path_to_protoc_exe = args.path_to_protoc_exe
if not args.skip_submodule_sync:
update_submodules(source_dir)
if is_windows():
cpu_arch = platform.architecture()[0]
if args.build_wasm:
cmake_extra_args = ['-G', 'Ninja']
elif args.cmake_generator == 'Ninja':
if cpu_arch == '32bit' or args.arm or args.arm64 or args.arm64ec:
raise BuildError(
"To cross-compile with Ninja, load the toolset "
"environment for the target processor (e.g. Cross "
"Tools Command Prompt for VS)")
cmake_extra_args = ['-G', args.cmake_generator]
elif args.arm or args.arm64 or args.arm64ec:
# Cross-compiling for ARM(64) architecture
# First build protoc for host to use during cross-compilation
if path_to_protoc_exe is None:
path_to_protoc_exe = build_protoc_for_host(
cmake_path, source_dir, build_dir, args)
if args.arm:
cmake_extra_args = ['-A', 'ARM']
elif args.arm64:
cmake_extra_args = ['-A', 'ARM64']
elif args.arm64ec:
cmake_extra_args = ['-A', 'ARM64EC']
cmake_extra_args += ['-G', args.cmake_generator]
# Cannot test on host build machine for cross-compiled
# builds (Override any user-defined behaviour for test if any)
if args.test:
log.warning(
"Cannot test on host build machine for cross-compiled "
"ARM(64) builds. Will skip test running after build.")
args.test = False
elif cpu_arch == '32bit' or args.x86:
cmake_extra_args = [
'-A', 'Win32', '-T', 'host=x64', '-G', args.cmake_generator
]
else:
if args.msvc_toolset:
toolset = 'host=x64,version=' + args.msvc_toolset
else:
toolset = 'host=x64'
if args.cuda_version:
toolset += ',cuda=' + args.cuda_version
cmake_extra_args = [
'-A', 'x64', '-T', toolset, '-G', args.cmake_generator
]
if args.enable_windows_store:
cmake_extra_defines.append(
'CMAKE_TOOLCHAIN_FILE=' + os.path.join(
source_dir, 'cmake', 'store_toolchain.cmake'))
if args.enable_wcos:
cmake_extra_defines.append('CMAKE_USER_MAKE_RULES_OVERRIDE=wcos_rules_override.cmake')
elif args.cmake_generator is not None and not (is_macOS() and args.use_xcode):
cmake_extra_args += ['-G', args.cmake_generator]
elif is_macOS():
if args.use_xcode:
cmake_extra_args += ['-G', 'Xcode']
if not args.ios and not args.android and \
args.osx_arch == 'arm64' and platform.machine() == 'x86_64':
if args.test:
log.warning(
"Cannot test ARM64 build on X86_64. Will skip test running after build.")
args.test = False
if args.build_wasm:
emsdk_version = args.emsdk_version
emsdk_dir = os.path.join(source_dir, "cmake", "external", "emsdk")
emsdk_file = os.path.join(emsdk_dir, "emsdk.bat") if is_windows() else os.path.join(emsdk_dir, "emsdk")
log.info("Installing emsdk...")
run_subprocess([emsdk_file, "install", emsdk_version], cwd=emsdk_dir)
log.info("Activating emsdk...")
run_subprocess([emsdk_file, "activate", emsdk_version], cwd=emsdk_dir)
if (args.android or args.ios or args.enable_windows_store or args.build_wasm
or is_cross_compiling_on_apple(args)) and args.path_to_protoc_exe is None:
# Cross-compiling for Android, iOS, and WebAssembly
path_to_protoc_exe = build_protoc_for_host(
cmake_path, source_dir, build_dir, args)
if is_ubuntu_1604():
if (args.arm or args.arm64):
raise BuildError(
"Only Windows ARM(64) cross-compiled builds supported "
"currently through this script")
if not is_docker() and not args.use_acl and not args.use_armnn:
install_python_deps()
if args.enable_pybind and is_windows():
install_python_deps(args.numpy_version)
if args.enable_onnx_tests:
setup_test_data(build_dir, configs)
if args.use_cuda and args.cuda_version is None:
if is_windows():
# cuda_version is used while generating version_info.py on Windows.
raise BuildError("cuda_version must be specified on Windows.")
else:
args.cuda_version = ""
if args.use_rocm and args.rocm_version is None:
args.rocm_version = ""
if args.build_eager_mode:
eager_root_dir = os.path.join(source_dir, "orttraining", "orttraining", "eager")
if args.eager_customop_module and not args.eager_customop_header:
raise Exception('eager_customop_header must be provided when eager_customop_module is')
elif args.eager_customop_header and not args.eager_customop_module:
raise Exception('eager_customop_module must be provided when eager_customop_header is')
def gen_ops(gen_cpp_name: str, header_file: str, ops_module: str, custom_ops: bool):
gen_cpp_scratch_name = gen_cpp_name + '.working'
print(f'Generating ORT ATen overrides (output_file: {gen_cpp_name}, header_file: {header_file},'
f'ops_module: {ops_module}), custom_ops: {custom_ops}')
cmd = [sys.executable, os.path.join(os.path.join(eager_root_dir, 'opgen', 'opgen.py')),
'--output_file', gen_cpp_scratch_name,
'--ops_module', ops_module,
'--header_file', header_file]
if custom_ops:
cmd += ["--custom_ops"]
subprocess.check_call(cmd)
import filecmp
if (not os.path.isfile(gen_cpp_name) or
not filecmp.cmp(gen_cpp_name, gen_cpp_scratch_name, shallow=False)):
os.rename(gen_cpp_scratch_name, gen_cpp_name)
else:
os.remove(gen_cpp_scratch_name)
def gen_ort_ops():
# generate native aten ops
import torch
regdecs_path = os.path.join(os.path.dirname(torch.__file__), 'include/ATen/RegistrationDeclarations.h')
ops_module = os.path.join(eager_root_dir, 'opgen/opgen/atenops.py')
gen_ops(os.path.join(eager_root_dir, 'ort_aten.g.cpp'), regdecs_path, ops_module, False)
# generate custom ops
if not args.eager_customop_header:
args.eager_customop_header = os.path.realpath(os.path.join(
eager_root_dir,
"opgen",
"CustomOpDeclarations.h"))
if not args.eager_customop_module:
args.eager_customop_module = os.path.join(eager_root_dir, 'opgen/opgen/custom_ops.py')
gen_ops(os.path.join(eager_root_dir, 'ort_customops.g.cpp'),
args.eager_customop_header, args.eager_customop_module, True)
gen_ort_ops()
if args.enable_external_custom_op_schemas and not is_linux():
raise BuildError("Registering external custom op schemas is only supported on Linux.")
generate_build_tree(
cmake_path, source_dir, build_dir, cuda_home, cudnn_home, rocm_home, mpi_home, nccl_home,
tensorrt_home, migraphx_home, acl_home, acl_libs, armnn_home, armnn_libs,
path_to_protoc_exe, configs, cmake_extra_defines, args, cmake_extra_args)
if args.clean:
clean_targets(cmake_path, build_dir, configs)
# if using DML, perform initial nuget package restore
setup_dml_build(args, cmake_path, build_dir, configs)
if args.build:
if args.parallel < 0:
raise BuildError("Invalid parallel job count: {}".format(args.parallel))
num_parallel_jobs = os.cpu_count() if args.parallel == 0 else args.parallel
build_targets(args, cmake_path, build_dir, configs, num_parallel_jobs, args.target)
if args.test:
run_onnxruntime_tests(args, source_dir, ctest_path, build_dir, configs)
if args.enable_pybind and not args.skip_onnx_tests and args.use_nuphar:
nuphar_run_python_tests(build_dir, configs)
# run node.js binding tests
if args.build_nodejs and not args.skip_nodejs_tests:
nodejs_binding_dir = os.path.normpath(os.path.join(source_dir, "js", "node"))
run_nodejs_tests(nodejs_binding_dir)
# Build packages after running the tests.
# NOTE: if you have a test that rely on a file which only get copied/generated during packaging step, it could
# fail unexpectedly. Similar, if your packaging step forgot to copy a file into the package, we don't know it
# either.
if args.build:
if args.build_wheel:
nightly_build = bool(os.getenv('NIGHTLY_BUILD') == '1')
default_training_package_device = bool(os.getenv('DEFAULT_TRAINING_PACKAGE_DEVICE') == '1')
build_python_wheel(
source_dir,
build_dir,
configs,
args.use_cuda,
args.cuda_version,
args.use_rocm,
args.rocm_version,
args.use_dnnl,
args.use_tensorrt,
args.use_openvino,
args.use_nuphar,
args.use_vitisai,
args.use_acl,
args.use_armnn,
args.use_dml,
args.wheel_name_suffix,
args.enable_training,
nightly_build=nightly_build,
default_training_package_device=default_training_package_device,
use_ninja=(args.cmake_generator == 'Ninja'),
build_eager_mode=args.build_eager_mode
)
if args.build_nuget:
build_nuget_package(
source_dir,
build_dir,
configs,
args.use_cuda,
args.use_openvino,
args.use_tensorrt,
args.use_dnnl,
args.use_nuphar
)
if args.test and args.build_nuget:
run_csharp_tests(
source_dir,
build_dir,
args.use_cuda,
args.use_openvino,
args.use_tensorrt,
args.use_dnnl)
if args.gen_doc and (args.build or args.test):
generate_documentation(source_dir, build_dir, configs, args.gen_doc == 'validate')
if args.gen_api_doc and (args.build or args.test):
print('Generating Python doc for ORTModule...')
docbuild_dir = os.path.join(source_dir, 'tools', 'doc')
run_subprocess(['bash', 'builddoc.sh', os.path.dirname(sys.executable),
source_dir, build_dir, args.config[0]], cwd=docbuild_dir)
log.info("Build complete")
if __name__ == "__main__":
try:
sys.exit(main())
except BaseError as e:
log.error(str(e))
sys.exit(1)
|
[] |
[] |
[
"ANDROID_NDK_HOME",
"MIGRAPHX_HOME",
"DEFAULT_TRAINING_PACKAGE_DEVICE",
"SYSTEM_COLLECTIONURI",
"CUDA_HOME",
"ORT_TENSORRT_MAX_WORKSPACE_SIZE",
"LD_LIBRARY_PATH",
"ANDROID_HOME",
"ORT_TENSORRT_MIN_SUBGRAPH_SIZE",
"CUDNN_HOME",
"TENSORRT_HOME",
"NIGHTLY_BUILD",
"Build_SourceVersion",
"ORT_TENSORRT_MAX_PARTITION_ITERATIONS",
"ORT_TENSORRT_FP16_ENABLE",
"Build_BuildNumber",
"PATH"
] |
[]
|
["ANDROID_NDK_HOME", "MIGRAPHX_HOME", "DEFAULT_TRAINING_PACKAGE_DEVICE", "SYSTEM_COLLECTIONURI", "CUDA_HOME", "ORT_TENSORRT_MAX_WORKSPACE_SIZE", "LD_LIBRARY_PATH", "ANDROID_HOME", "ORT_TENSORRT_MIN_SUBGRAPH_SIZE", "CUDNN_HOME", "TENSORRT_HOME", "NIGHTLY_BUILD", "Build_SourceVersion", "ORT_TENSORRT_MAX_PARTITION_ITERATIONS", "ORT_TENSORRT_FP16_ENABLE", "Build_BuildNumber", "PATH"]
|
python
| 17 | 0 | |
tests/dovecot_sasl_test.go
|
//+build integration
//+build darwin dragonfly freebsd linux netbsd openbsd solaris
// only posix systems ^
package tests_test
import (
"bufio"
"errors"
"flag"
"io/ioutil"
"os"
"os/exec"
"os/user"
"path/filepath"
"strings"
"syscall"
"testing"
"github.com/foxcpp/maddy/tests"
)
var DovecotExecutable string
func init() {
flag.StringVar(&DovecotExecutable, "integration.dovecot", "dovecot", "path to dovecot executable for interop tests")
}
const dovecotConf = `base_dir = $ROOT/run/
log_path = /dev/stderr
ssl = no
default_internal_user = $USER
default_internal_group = $GROUP
default_login_user = $USER
passdb {
driver = passwd-file
args = $ROOT/passwd
}
userdb {
driver = passwd-file
args = $ROOT/passwd
}
service auth {
unix_listener auth {
mode = 0666
}
}
# Dovecot refuses to start without protocols, so we need to give it one.
protocols = imap
service imap-login {
chroot =
inet_listener imap {
address = 127.0.0.1
port = 0
}
}
service anvil {
chroot =
}
# Turn on debugging information, to help troubleshooting issues.
auth_verbose = yes
auth_debug = yes
auth_debug_passwords = yes
auth_verbose_passwords = yes
mail_debug = yes
`
const dovecotPasswd = `tester:{plain}123456:1000:1000::/home/user`
func runDovecot(t *testing.T) (string, *exec.Cmd) {
dovecotExec, err := exec.LookPath(DovecotExecutable)
if err != nil {
if errors.Is(err, exec.ErrNotFound) {
t.Skip("No Dovecot executable found, skipping interop. tests")
}
t.Fatal(err)
}
tempDir, err := ioutil.TempDir("", "maddy-dovecot-interop-")
if err != nil {
t.Fatal(err)
}
curUser, err := user.Current()
if err != nil {
t.Fatal(err)
}
curGroup, err := user.LookupGroupId(curUser.Gid)
if err != nil {
t.Fatal(err)
}
dovecotConf := strings.NewReplacer(
"$ROOT", tempDir,
"$USER", curUser.Username,
"$GROUP", curGroup.Name).Replace(dovecotConf)
err = ioutil.WriteFile(filepath.Join(tempDir, "dovecot.conf"), []byte(dovecotConf), os.ModePerm)
if err != nil {
t.Fatal(err)
}
err = ioutil.WriteFile(filepath.Join(tempDir, "passwd"), []byte(dovecotPasswd), os.ModePerm)
if err != nil {
t.Fatal(err)
}
cmd := exec.Command(dovecotExec, "-F", "-c", filepath.Join(tempDir, "dovecot.conf"))
stderr, err := cmd.StderrPipe()
if err != nil {
t.Fatal(err)
}
if err := cmd.Start(); err != nil {
t.Fatal(err)
}
ready := make(chan struct{}, 1)
go func() {
scnr := bufio.NewScanner(stderr)
for scnr.Scan() {
line := scnr.Text()
// One of messages printed near completing initialization.
if strings.Contains(line, "master: Error: file_dotlock_open(/var/lib/dovecot/instances) failed: Permission denied") {
ready <- struct{}{}
}
t.Log("dovecot:", line)
}
if err := scnr.Err(); err != nil {
t.Log("stderr I/O error:", err)
}
}()
<-ready
return tempDir, cmd
}
func cleanDovecot(t *testing.T, tempDir string, cmd *exec.Cmd) {
cmd.Process.Signal(syscall.SIGTERM)
if !t.Failed() {
os.RemoveAll(tempDir)
} else {
t.Log("Dovecot directory is not deleted:", tempDir)
}
}
func TestDovecotSASLClient(tt *testing.T) {
tt.Parallel()
dovecotDir, cmd := runDovecot(tt)
defer cleanDovecot(tt, dovecotDir, cmd)
t := tests.NewT(tt)
t.DNS(nil)
t.Port("smtp")
t.Env("DOVECOT_SASL_SOCK=" + filepath.Join(dovecotDir, "run", "auth-client"))
t.Config(`
smtp tcp://127.0.0.1:{env:TEST_PORT_smtp} {
hostname mx.maddy.test
tls off
auth dovecot_sasl unix://{env:DOVECOT_SASL_SOCK}
deliver_to dummy
}`)
t.Run(1)
defer t.Close()
c := t.Conn("smtp")
defer c.Close()
c.SMTPNegotation("localhost", nil, nil)
c.Writeln("AUTH PLAIN AHRlc3QAMTIzNDU2") // 0x00 test 0x00 123456 (invalid user)
c.ExpectPattern("535 *")
c.Writeln("AUTH PLAIN AHRlc3RlcgAxMjM0NQ==") // 0x00 tester 0x00 12345 (invalid password)
c.ExpectPattern("535 *")
c.Writeln("AUTH PLAIN AHRlc3RlcgAxMjM0NTY=") // 0x00 tester 0x00 123456
c.ExpectPattern("235 *")
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
backend/wsgi.py
|
"""
WSGI config for backend project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "backend.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
api/insights/insights/infrastructure/mysql/orm/mapper_base.py
|
import os
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from .base import Base
class MapperBase():
user = os.getenv("MYSQL_USER")
key = os.getenv("MYSQL_KEY")
host = os.getenv("MYSQL_HOST")
port = os.getenv("MYSQL_PORT")
def __init__(self, database):
self.db = database
if database == 'test':
self.url = 'sqlite:///:memory:'
else:
self.url = \
'mysql+mysqlconnector://{}:{}@{}:{}/{}'.format(
self.user,
self.key,
self.host,
self.port,
self.db,
)
self.engine = create_engine(
self.url,
connect_args={'use_pure': True}
)
self.session = sessionmaker(bind=self.engine)
self.base = Base
def get_base(self):
return self.base
def get_engine(self):
return self.engine
def get_session(self):
return self.session()
|
[] |
[] |
[
"MYSQL_USER",
"MYSQL_KEY",
"MYSQL_PORT",
"MYSQL_HOST"
] |
[]
|
["MYSQL_USER", "MYSQL_KEY", "MYSQL_PORT", "MYSQL_HOST"]
|
python
| 4 | 0 | |
predefinedImages/tasks/slack-message/main.go
|
package main
import (
"fmt"
"log"
"os"
"github.com/slack-go/slack"
)
func main() {
access_token := os.Getenv("INTEGRATION_ACCESS_TOKEN")
text := os.Getenv("text")
target := os.Getenv("target_id")
err := SendSlackMessage(text, target, access_token)
if err != nil {
log.Println(err)
return
}
fmt.Println("message sent")
}
func SendSlackMessage(text, targetId, botAccessToken string) error {
api := slack.New(botAccessToken)
_, _, err := api.PostMessage(
targetId,
slack.MsgOptionText(text, false),
)
if err != nil {
return err
}
return nil
}
|
[
"\"INTEGRATION_ACCESS_TOKEN\"",
"\"text\"",
"\"target_id\""
] |
[] |
[
"INTEGRATION_ACCESS_TOKEN",
"target_id",
"text"
] |
[]
|
["INTEGRATION_ACCESS_TOKEN", "target_id", "text"]
|
go
| 3 | 0 | |
hack/macros.py
|
import os
def define_env(env):
@env.macro
def feature(alpha="", beta="", stable=""):
versions = []
descriptions = []
if alpha != "":
versions.append('<span class="feature-alpha">alpha</span> since Knative v{version}'.format(version=alpha))
descriptions.append(' - <span class="feature-alpha">alpha</span> features are experimental, and may change or be removed without notice.')
if beta != "":
versions.append('<span class="feature-beta">beta</span> since Knative v{version}'.format(version=beta))
descriptions.append(' - <span class="feature-beta">beta</span> features are well-tested and enabling them is considered safe. Support for the overall feature will not be dropped, though details may change in incompatible ways.')
if stable != "":
versions.append('<span class="feature-stable">stable</span> since Knative v{version}'.format(version=stable))
descriptions.append(' - <span class="feature-stable">stable</span> features will be maintained for many future versions.')
return '??? info "Feature Availability: ' + ', '.join(versions) + '"\n' + '\n'.join(descriptions)
@env.macro
def artifact(repo, file, org="knative"):
"""Generates a download link for the current release version.
When the version in the KNATIVE_VERSION environment variable is
empty this links to googlestorage, otherwise it links via
the matching release in github.
"""
version = os.environ.get("KNATIVE_VERSION")
if version == None:
return 'https://storage.googleapis.com/{org}-nightly/{repo}/latest/{file}'.format(
repo=repo,
file=file,
org=org)
else:
return 'https://github.com/{org}/{repo}/releases/download/{version}/{file}'.format(
repo=repo,
file=file,
version=version,
org=org)
|
[] |
[] |
[
"KNATIVE_VERSION"
] |
[]
|
["KNATIVE_VERSION"]
|
python
| 1 | 0 | |
tests/test_filesystem.py
|
import os
import boto3
import fsspec
import pytest
from moto import mock_s3
from datasets.filesystems import (
COMPRESSION_FILESYSTEMS,
HfFileSystem,
S3FileSystem,
extract_path_from_uri,
is_remote_filesystem,
)
from .utils import require_lz4, require_zstandard
@pytest.fixture(scope="function")
def aws_credentials():
"""Mocked AWS Credentials for moto."""
os.environ["AWS_ACCESS_KEY_ID"] = "fake_access_key"
os.environ["AWS_SECRET_ACCESS_KEY"] = "fake_secret_key"
os.environ["AWS_SECURITY_TOKEN"] = "fake_secrurity_token"
os.environ["AWS_SESSION_TOKEN"] = "fake_session_token"
@pytest.fixture(scope="function")
def s3(aws_credentials):
with mock_s3():
yield boto3.client("s3", region_name="us-east-1")
def test_extract_path_from_uri(s3):
mock_bucket = "moto-mock-s3-bucket"
# We need to create the bucket since this is all in Moto's 'virtual' AWS account
s3.create_bucket(Bucket=mock_bucket)
dataset_path = f"s3://{mock_bucket}"
dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path.startswith("s3://") is False
dataset_path = "./local/path"
new_dataset_path = extract_path_from_uri(dataset_path)
assert dataset_path == new_dataset_path
def test_is_remote_filesystem():
fs = S3FileSystem(key="fake_access_key", secret="fake_secret")
is_remote = is_remote_filesystem(fs)
assert is_remote is True
fs = fsspec.filesystem("file")
is_remote = is_remote_filesystem(fs)
assert is_remote is False
@require_zstandard
@require_lz4
@pytest.mark.parametrize("compression_fs_class", COMPRESSION_FILESYSTEMS)
def test_compression_filesystems(compression_fs_class, gz_file, bz2_file, lz4_file, zstd_file, xz_file, text_file):
input_paths = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bz2_file, "lz4": lz4_file}
input_path = str(input_paths[compression_fs_class.protocol])
fs = fsspec.filesystem(compression_fs_class.protocol, fo=input_path)
assert isinstance(fs, compression_fs_class)
expected_filename = os.path.basename(input_path)
expected_filename = expected_filename[: expected_filename.rindex(".")]
assert fs.ls("/") == [expected_filename]
with fs.open(expected_filename, "r", encoding="utf-8") as f, open(text_file, encoding="utf-8") as expected_file:
assert f.read() == expected_file.read()
def test_hf_filesystem(hf_token, hf_api, hf_private_dataset_repo_txt_data, text_file):
repo_info = hf_api.dataset_info(hf_private_dataset_repo_txt_data, token=hf_token)
hffs = HfFileSystem(repo_info=repo_info, token=hf_token)
assert sorted(hffs.glob("*")) == [".gitattributes", "data.txt"]
with open(text_file) as f:
assert hffs.open("data.txt", "r").read() == f.read()
|
[] |
[] |
[
"AWS_SESSION_TOKEN",
"AWS_SECURITY_TOKEN",
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY"
] |
[]
|
["AWS_SESSION_TOKEN", "AWS_SECURITY_TOKEN", "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"]
|
python
| 4 | 0 | |
web/server/codechecker_server/server.py
|
# -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
"""
Main server starts a http server which handles Thrift client
and browser requests.
"""
import atexit
import datetime
import errno
from hashlib import sha256
from multiprocessing.pool import ThreadPool
import os
import posixpath
from random import sample
import shutil
import signal
import socket
import ssl
import sys
import stat
import urllib
from http.server import HTTPServer, BaseHTTPRequestHandler, \
SimpleHTTPRequestHandler
from sqlalchemy.orm import sessionmaker
from thrift.protocol import TJSONProtocol
from thrift.transport import TTransport
from thrift.Thrift import TApplicationException
from thrift.Thrift import TMessageType
from codechecker_api_shared.ttypes import DBStatus
from codechecker_api.Authentication_v6 import \
codeCheckerAuthentication as AuthAPI_v6
from codechecker_api.Configuration_v6 import \
configurationService as ConfigAPI_v6
from codechecker_api.codeCheckerDBAccess_v6 import \
codeCheckerDBAccess as ReportAPI_v6
from codechecker_api.ProductManagement_v6 import \
codeCheckerProductService as ProductAPI_v6
from codechecker_common.logger import get_logger
from codechecker_web.shared.version import get_version_str
from . import instance_manager
from . import permissions
from . import routing
from . import session_manager
from .tmp import get_tmp_dir_hash
from .api.authentication import ThriftAuthHandler as AuthHandler_v6
from .api.config_handler import ThriftConfigHandler as ConfigHandler_v6
from .api.product_server import ThriftProductHandler as ProductHandler_v6
from .api.report_server import ThriftRequestHandler as ReportHandler_v6
from .database import database, db_cleanup
from .database.config_db_model import Product as ORMProduct, \
Configuration as ORMConfiguration
from .database.database import DBSession
from .database.run_db_model import IDENTIFIER as RUN_META, Run, RunLock
LOG = get_logger('server')
class RequestHandler(SimpleHTTPRequestHandler):
"""
Handle thrift and browser requests
Simply modified and extended version of SimpleHTTPRequestHandler
"""
auth_session = None
def __init__(self, request, client_address, server):
BaseHTTPRequestHandler.__init__(self,
request,
client_address,
server)
def log_message(self, msg_format, *args):
""" Silencing http server. """
return
def send_thrift_exception(self, error_msg, iprot, oprot, otrans):
"""
Send an exception response to the client in a proper format which can
be parsed by the Thrift clients expecting JSON responses.
"""
ex = TApplicationException(TApplicationException.INTERNAL_ERROR,
error_msg)
fname, _, seqid = iprot.readMessageBegin()
oprot.writeMessageBegin(fname, TMessageType.EXCEPTION, seqid)
ex.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
result = otrans.getvalue()
self.send_response(200)
self.send_header("content-type", "application/x-thrift")
self.send_header("Content-Length", len(result))
self.end_headers()
self.wfile.write(result)
def __check_session_cookie(self):
"""
Check the CodeChecker privileged access cookie in the request headers.
:returns: A session_manager._Session object if a correct, valid session
cookie was found in the headers. None, otherwise.
"""
if not self.server.manager.is_enabled:
return None
session = None
# Check if the user has presented a privileged access cookie.
cookies = self.headers.get("Cookie")
if cookies:
split = cookies.split("; ")
for cookie in split:
values = cookie.split("=")
if len(values) == 2 and \
values[0] == session_manager.SESSION_COOKIE_NAME:
session = self.server.manager.get_session(values[1])
if session and session.is_alive:
# If a valid session token was found and it can still be used,
# mark that the user's last access to the server was the
# request that resulted in the execution of this function.
session.revalidate()
return session
else:
# If the user's access cookie is no longer usable (invalid),
# present an error.
client_host, client_port, is_ipv6 = \
RequestHandler._get_client_host_port(self.client_address)
LOG.debug("%s:%s Invalid access, credentials not found - "
"session refused",
client_host if not is_ipv6 else '[' + client_host + ']',
str(client_port))
return None
def __has_access_permission(self, product):
"""
Returns True if the currently authenticated user has access permission
on the given product.
"""
with DBSession(self.server.config_session) as session:
perm_args = {'productID': product.id,
'config_db_session': session}
return permissions.require_permission(
permissions.PRODUCT_ACCESS,
perm_args,
self.auth_session)
def __handle_readiness(self):
""" Handle readiness probe. """
try:
cfg_sess = self.server.config_session()
cfg_sess.query(ORMConfiguration).count()
self.send_response(200)
self.end_headers()
self.wfile.write(b'CODECHECKER_SERVER_IS_READY')
except Exception:
self.send_response(500)
self.end_headers()
self.wfile.write(b'CODECHECKER_SERVER_IS_NOT_READY')
finally:
if cfg_sess:
cfg_sess.close()
cfg_sess.commit()
def __handle_liveness(self):
""" Handle liveness probe. """
self.send_response(200)
self.end_headers()
self.wfile.write(b'CODECHECKER_SERVER_IS_LIVE')
def end_headers(self):
# Sending the authentication cookie
# in every response if any.
# This will update the the session cookie
# on the clients to the newest.
if self.auth_session:
token = self.auth_session.token
if token:
self.send_header(
"Set-Cookie",
"{0}={1}; Path=/".format(
session_manager.SESSION_COOKIE_NAME,
token))
# Set the current user name in the header.
user_name = self.auth_session.user
if user_name:
self.send_header("X-User", user_name)
SimpleHTTPRequestHandler.end_headers(self)
@staticmethod
def _get_client_host_port(address):
"""
Returns the host and port of the request's address, and whether it
was an IPv6 address.
"""
if len(address) == 2:
return address[0], address[1], False
if len(address) == 4:
return address[0], address[1], True
raise IndexError("Invalid address tuple given.")
def do_GET(self):
""" Handles the SPA browser access (GET requests).
It will do the following steps:
- for requests for index.html ('/'), just respond with the file.
- if the requested path contains a product endpoint name
('/prod/app.js', '/prod/runs'), remove the endpoint from the path.
- if the requested path is a valid file (e.g: 'app.js'), respond with
the file.
- otherwise (e.g: 'runs') respond with index.html.
"""
client_host, client_port, is_ipv6 = \
RequestHandler._get_client_host_port(self.client_address)
self.auth_session = self.__check_session_cookie()
username = self.auth_session.user if self.auth_session else 'Anonymous'
LOG.debug("%s:%s -- [%s] GET %s",
client_host if not is_ipv6 else '[' + client_host + ']',
client_port, username, self.path)
if self.path == '/':
self.path = 'index.html'
SimpleHTTPRequestHandler.do_GET(self)
return
if self.path == '/live':
self.__handle_liveness()
return
if self.path == '/ready':
self.__handle_readiness()
return
product_endpoint, _ = routing.split_client_GET_request(self.path)
# Check that path contains a product endpoint.
if product_endpoint is not None and product_endpoint != '':
self.path = self.path.replace(
"{0}/".format(product_endpoint), "", 1)
if self.path == '/':
self.path = "index.html"
# Check that the given path is a file.
if not os.path.exists(self.translate_path(self.path)):
self.path = 'index.html'
SimpleHTTPRequestHandler.do_GET(self)
def __check_prod_db(self, product_endpoint):
"""
Check the product database status.
Try to reconnect in some cases.
Returns if everything is ok with the database or throw an exception
with the error message if something is wrong with the database.
"""
product = self.server.get_product(product_endpoint)
if not product:
raise ValueError(
"The product with the given endpoint '{0}' does "
"not exist!".format(product_endpoint))
if product.db_status == DBStatus.OK:
# No reconnect needed.
return product
# Try to reconnect in these cases.
# Do not try to reconnect if there is a schema mismatch.
# If the product is not connected, try reconnecting...
if product.db_status in [DBStatus.FAILED_TO_CONNECT,
DBStatus.MISSING,
DBStatus.SCHEMA_INIT_ERROR]:
LOG.error("Request's product '%s' is not connected! "
"Attempting reconnect...", product.endpoint)
product.connect()
if product.db_status != DBStatus.OK:
# If the reconnection fails send an error to the user.
LOG.debug("Product reconnection failed.")
error_msg = "'{0}' database connection " \
"failed!".format(product.endpoint)
LOG.error(error_msg)
raise ValueError(error_msg)
else:
# Send an error to the user.
db_stat = DBStatus._VALUES_TO_NAMES.get(product.db_status)
error_msg = "'{0}' database connection " \
"failed. DB status: {1}".format(product.endpoint,
str(db_stat))
LOG.error(error_msg)
raise ValueError(error_msg)
return product
def do_POST(self):
"""
Handles POST queries, which are usually Thrift messages.
"""
client_host, client_port, is_ipv6 = \
RequestHandler._get_client_host_port(self.client_address)
self.auth_session = self.__check_session_cookie()
LOG.info("%s:%s -- [%s] POST %s",
client_host if not is_ipv6 else '[' + client_host + ']',
client_port,
self.auth_session.user if self.auth_session else "Anonymous",
self.path)
# Create new thrift handler.
checker_md_docs = self.server.checker_md_docs
checker_md_docs_map = self.server.checker_md_docs_map
version = self.server.version
protocol_factory = TJSONProtocol.TJSONProtocolFactory()
input_protocol_factory = protocol_factory
output_protocol_factory = protocol_factory
itrans = TTransport.TFileObjectTransport(self.rfile)
itrans = TTransport.TBufferedTransport(itrans,
int(self.headers[
'Content-Length']))
otrans = TTransport.TMemoryBuffer()
iprot = input_protocol_factory.getProtocol(itrans)
oprot = output_protocol_factory.getProtocol(otrans)
if self.server.manager.is_enabled and \
not self.path.endswith(('/Authentication',
'/Configuration')) and \
not self.auth_session:
# Bail out if the user is not authenticated...
# This response has the possibility of melting down Thrift clients,
# but the user is expected to properly authenticate first.
LOG.debug("%s:%s Invalid access, credentials not found "
"- session refused.",
client_host if not is_ipv6 else '[' + client_host + ']',
str(client_port))
self.send_thrift_exception("Error code 401: Unauthorized!", iprot,
oprot, otrans)
return
# Authentication is handled, we may now respond to the user.
try:
product_endpoint, api_ver, request_endpoint = \
routing.split_client_POST_request(self.path)
if product_endpoint is None and api_ver is None and\
request_endpoint is None:
raise Exception("Invalid request endpoint path.")
product = None
if product_endpoint:
# The current request came through a product route, and not
# to the main endpoint.
product = self.__check_prod_db(product_endpoint)
version_supported = routing.is_supported_version(api_ver)
if version_supported:
major_version, _ = version_supported
if major_version == 6:
if request_endpoint == 'Authentication':
auth_handler = AuthHandler_v6(
self.server.manager,
self.auth_session,
self.server.config_session)
processor = AuthAPI_v6.Processor(auth_handler)
elif request_endpoint == 'Configuration':
conf_handler = ConfigHandler_v6(
self.auth_session,
self.server.config_session)
processor = ConfigAPI_v6.Processor(conf_handler)
elif request_endpoint == 'Products':
prod_handler = ProductHandler_v6(
self.server,
self.auth_session,
self.server.config_session,
product,
version)
processor = ProductAPI_v6.Processor(prod_handler)
elif request_endpoint == 'CodeCheckerService':
# This endpoint is a product's report_server.
if not product:
error_msg = "Requested CodeCheckerService on a " \
"nonexistent product: '{0}'." \
.format(product_endpoint)
LOG.error(error_msg)
raise ValueError(error_msg)
if product_endpoint:
# The current request came through a
# product route, and not to the main endpoint.
product = self.__check_prod_db(product_endpoint)
acc_handler = ReportHandler_v6(
self.server.manager,
product.session_factory,
product,
self.auth_session,
self.server.config_session,
checker_md_docs,
checker_md_docs_map,
version,
self.server.context)
processor = ReportAPI_v6.Processor(acc_handler)
else:
LOG.debug("This API endpoint does not exist.")
error_msg = "No API endpoint named '{0}'." \
.format(self.path)
raise ValueError(error_msg)
else:
error_msg = "The API version you are using is not supported " \
"by this server (server API version: {0})!".format(
get_version_str())
self.send_thrift_exception(error_msg, iprot, oprot, otrans)
return
processor.process(iprot, oprot)
result = otrans.getvalue()
self.send_response(200)
self.send_header("content-type", "application/x-thrift")
self.send_header("Content-Length", len(result))
self.end_headers()
self.wfile.write(result)
return
except Exception as exn:
LOG.warning(str(exn))
import traceback
traceback.print_exc()
cstringio_buf = itrans.cstringio_buf.getvalue()
if cstringio_buf:
itrans = TTransport.TMemoryBuffer(cstringio_buf)
iprot = input_protocol_factory.getProtocol(itrans)
self.send_thrift_exception(str(exn), iprot, oprot, otrans)
return
def list_directory(self, path):
""" Disable directory listing. """
self.send_error(405, "No permission to list directory")
return None
def translate_path(self, path):
"""
Modified version from SimpleHTTPRequestHandler.
Path is set to www_root.
"""
# Abandon query parameters.
path = path.split('?', 1)[0]
path = path.split('#', 1)[0]
path = posixpath.normpath(urllib.parse.unquote(path))
words = path.split('/')
words = [_f for _f in words if _f]
path = self.server.www_root
for word in words:
_, word = os.path.splitdrive(word)
_, word = os.path.split(word)
if word in (os.curdir, os.pardir):
continue
path = os.path.join(path, word)
return path
class Product(object):
"""
Represents a product, which is a distinct storage of analysis reports in
a separate database (and database connection) with its own access control.
"""
# The amount of SECONDS that need to pass after the last unsuccessful
# connect() call so the next could be made.
CONNECT_RETRY_TIMEOUT = 300
def __init__(self, orm_object, context, check_env):
"""
Set up a new managed product object for the configuration given.
"""
self.__id = orm_object.id
self.__endpoint = orm_object.endpoint
self.__connection_string = orm_object.connection
self.__display_name = orm_object.display_name
self.__driver_name = None
self.__context = context
self.__check_env = check_env
self.__engine = None
self.__session = None
self.__db_status = DBStatus.MISSING
self.__last_connect_attempt = None
@property
def id(self):
return self.__id
@property
def endpoint(self):
"""
Returns the accessible URL endpoint of the product.
"""
return self.__endpoint
@property
def name(self):
"""
Returns the display name of the product.
"""
return self.__display_name
@property
def session_factory(self):
"""
Returns the session maker on this product's database engine which
can be used to initiate transactional connections.
"""
return self.__session
@property
def driver_name(self):
"""
Returns the name of the sql driver (sqlite, postgres).
"""
return self.__driver_name
@property
def db_status(self):
"""
Returns the status of the database which belongs to this product.
Call connect to update it.
"""
return self.__db_status
@property
def last_connection_failure(self):
"""
Returns the reason behind the last executed connection attempt's
failure.
"""
return self.__last_connect_attempt[1] if self.__last_connect_attempt \
else None
def connect(self, init_db=False):
"""
Initiates the actual connection to the database configured for the
product.
Each time the connect is called the db_status is updated.
"""
LOG.debug("Checking '%s' database.", self.endpoint)
sql_server = database.SQLServer.from_connection_string(
self.__connection_string,
RUN_META,
self.__context.run_migration_root,
interactive=False,
env=self.__check_env)
if isinstance(sql_server, database.PostgreSQLServer):
self.__driver_name = 'postgresql'
elif isinstance(sql_server, database.SQLiteDatabase):
self.__driver_name = 'sqlite'
try:
LOG.debug("Trying to connect to the database")
# Create the SQLAlchemy engine.
self.__engine = sql_server.create_engine()
LOG.debug(self.__engine)
self.__session = sessionmaker(bind=self.__engine)
self.__engine.execute('SELECT 1')
self.__db_status = sql_server.check_schema()
self.__last_connect_attempt = None
if self.__db_status == DBStatus.SCHEMA_MISSING and init_db:
LOG.debug("Initializing new database schema.")
self.__db_status = sql_server.connect(init_db)
except Exception as ex:
LOG.exception("The database for product '%s' cannot be"
" connected to.", self.endpoint)
self.__db_status = DBStatus.FAILED_TO_CONNECT
self.__last_connect_attempt = (datetime.datetime.now(), str(ex))
def get_details(self):
"""
Get details for a product from the database.
It may throw different error messages depending on the used SQL driver
adapter in case of connection error.
"""
with DBSession(self.session_factory) as run_db_session:
run_locks = run_db_session.query(RunLock.name) \
.filter(RunLock.locked_at.isnot(None)) \
.all()
runs_in_progress = set([run_lock[0] for run_lock in run_locks])
num_of_runs = run_db_session.query(Run).count()
latest_store_to_product = ""
if num_of_runs:
last_updated_run = run_db_session.query(Run) \
.order_by(Run.date.desc()) \
.limit(1) \
.one_or_none()
latest_store_to_product = last_updated_run.date
return num_of_runs, runs_in_progress, latest_store_to_product
def teardown(self):
"""
Disposes the database connection to the product's backend.
"""
if self.__db_status == DBStatus.FAILED_TO_CONNECT:
return
self.__engine.dispose()
self.__session = None
self.__engine = None
def cleanup_run_db(self):
"""
Cleanup the run database which belongs to this product.
"""
LOG.info("Garbage collection for product '%s' started...",
self.endpoint)
db_cleanup.remove_expired_run_locks(self.session_factory)
db_cleanup.remove_unused_files(self.session_factory)
db_cleanup.upgrade_severity_levels(self.session_factory,
self.__context.severity_map)
LOG.info("Garbage collection finished.")
return True
class CCSimpleHttpServer(HTTPServer):
"""
Simple http server to handle requests from the clients.
"""
daemon_threads = False
address_family = socket.AF_INET # IPv4
def __init__(self,
server_address,
RequestHandlerClass,
config_directory,
product_db_sql_server,
skip_db_cleanup,
pckg_data,
context,
check_env,
manager):
LOG.debug("Initializing HTTP server...")
self.config_directory = config_directory
self.www_root = pckg_data['www_root']
self.doc_root = pckg_data['doc_root']
self.checker_md_docs = pckg_data['checker_md_docs']
self.checker_md_docs_map = pckg_data['checker_md_docs_map']
self.version = pckg_data['version']
self.context = context
self.check_env = check_env
self.manager = manager
self.__products = {}
# Create a database engine for the configuration database.
LOG.debug("Creating database engine for CONFIG DATABASE...")
self.__engine = product_db_sql_server.create_engine()
self.config_session = sessionmaker(bind=self.__engine)
self.manager.set_database_connection(self.config_session)
# Load the initial list of products and set up the server.
cfg_sess = self.config_session()
permissions.initialise_defaults('SYSTEM', {
'config_db_session': cfg_sess
})
products = cfg_sess.query(ORMProduct).all()
for product in products:
self.add_product(product)
permissions.initialise_defaults('PRODUCT', {
'config_db_session': cfg_sess,
'productID': product.id
})
cfg_sess.commit()
cfg_sess.close()
if not skip_db_cleanup:
for endpoint, product in self.__products.items():
if not product.cleanup_run_db():
LOG.warning("Cleaning database for %s Failed.", endpoint)
worker_processes = self.manager.worker_processes
self.__request_handlers = ThreadPool(processes=worker_processes)
try:
HTTPServer.__init__(self, server_address,
RequestHandlerClass,
bind_and_activate=True)
ssl_key_file = os.path.join(config_directory, "key.pem")
ssl_cert_file = os.path.join(config_directory, "cert.pem")
if os.path.isfile(ssl_key_file) and os.path.isfile(ssl_cert_file):
LOG.info("Initiating SSL. Server listening on secure socket.")
LOG.debug("Using cert file: %s", ssl_cert_file)
LOG.debug("Using key file: %s", ssl_key_file)
self.socket = ssl.wrap_socket(self.socket, server_side=True,
keyfile=ssl_key_file,
certfile=ssl_cert_file)
else:
LOG.info("Searching for SSL key at %s, cert at %s, "
"not found...", ssl_key_file, ssl_cert_file)
LOG.info("Falling back to simple, insecure HTTP.")
except Exception as e:
LOG.error("Couldn't start the server: %s", e.__str__())
raise
def terminate(self):
"""
Terminating the server.
"""
try:
self.server_close()
self.__engine.dispose()
self.__request_handlers.terminate()
self.__request_handlers.join()
except Exception as ex:
LOG.error("Failed to shut down the WEB server!")
LOG.error(str(ex))
sys.exit(1)
def process_request_thread(self, request, client_address):
try:
# Finish_request instantiates request handler class.
self.finish_request(request, client_address)
self.shutdown_request(request)
except socket.error as serr:
if serr.errno == errno.EPIPE:
LOG.debug("Broken pipe")
LOG.debug(serr)
self.shutdown_request(request)
except Exception as ex:
LOG.debug(ex)
self.handle_error(request, client_address)
self.shutdown_request(request)
def process_request(self, request, client_address):
self.__request_handlers.apply_async(self.process_request_thread,
(request, client_address))
def add_product(self, orm_product, init_db=False):
"""
Adds a product to the list of product databases connected to
by the server.
Checks the database connection for the product databases.
"""
if orm_product.endpoint in self.__products:
LOG.debug("This product is already configured!")
return
LOG.debug("Setting up product '%s'", orm_product.endpoint)
prod = Product(orm_product,
self.context,
self.check_env)
# Update the product database status.
prod.connect()
if prod.db_status == DBStatus.SCHEMA_MISSING and init_db:
LOG.debug("Schema was missing in the database. Initializing new")
prod.connect(init_db=True)
self.__products[prod.endpoint] = prod
@property
def num_products(self):
"""
Returns the number of products currently mounted by the server.
"""
return len(self.__products)
def get_product(self, endpoint):
"""
Get the product connection object for the given endpoint, or None.
"""
if endpoint in self.__products:
return self.__products.get(endpoint)
LOG.debug("Product with the given endpoint '%s' does not exist in "
"the local cache. Try to get it from the database.",
endpoint)
# If the product doesn't find in the cache, try to get it from the
# database.
try:
cfg_sess = self.config_session()
product = cfg_sess.query(ORMProduct) \
.filter(ORMProduct.endpoint == endpoint) \
.limit(1).one_or_none()
if not product:
return None
self.add_product(product)
permissions.initialise_defaults('PRODUCT', {
'config_db_session': cfg_sess,
'productID': product.id
})
return self.__products.get(endpoint, None)
finally:
if cfg_sess:
cfg_sess.close()
cfg_sess.commit()
def get_only_product(self):
"""
Returns the Product object for the only product connected to by the
server, or None, if there are 0 or >= 2 products managed.
"""
return list(self.__products.items())[0][1] if self.num_products == 1 \
else None
def remove_product(self, endpoint):
product = self.get_product(endpoint)
if not product:
raise ValueError("The product with the given endpoint '{0}' does "
"not exist!".format(endpoint))
LOG.info("Disconnecting product '%s'", endpoint)
product.teardown()
del self.__products[endpoint]
def remove_products_except(self, endpoints_to_keep):
"""
Removes EVERY product connection from the server except those
endpoints specified in :endpoints_to_keep.
"""
[self.remove_product(ep)
for ep in list(self.__products)
if ep not in endpoints_to_keep]
class CCSimpleHttpServerIPv6(CCSimpleHttpServer):
"""
CodeChecker HTTP simple server that listens over an IPv6 socket.
"""
address_family = socket.AF_INET6
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __make_root_file(root_file):
"""
Generate a root username and password SHA. This hash is saved to the
given file path, and is also returned.
"""
LOG.debug("Generating initial superuser (root) credentials...")
username = ''.join(sample("ABCDEFGHIJKLMNOPQRSTUVWXYZ", 6))
password = get_tmp_dir_hash()[:8]
LOG.info("A NEW superuser credential was generated for the server. "
"This information IS SAVED, thus subsequent server starts "
"WILL use these credentials. You WILL NOT get to see "
"the credentials again, so MAKE SURE YOU REMEMBER THIS "
"LOGIN!")
# Highlight the message a bit more, as the server owner configuring the
# server must know this root access initially.
credential_msg = "The superuser's username is '{0}' with the " \
"password '{1}'".format(username, password)
LOG.info("-" * len(credential_msg))
LOG.info(credential_msg)
LOG.info("-" * len(credential_msg))
sha = sha256((username + ':' + password).encode('utf-8')).hexdigest()
with open(root_file, 'w', encoding="utf-8", errors="ignore") as f:
LOG.debug("Save root SHA256 '%s'", sha)
f.write(sha)
# This file should be only readable by the process owner, and noone else.
os.chmod(root_file, stat.S_IRUSR)
return sha
def start_server(config_directory, package_data, port, config_sql_server,
listen_address, force_auth, skip_db_cleanup,
context, check_env):
"""
Start http server to handle web client and thrift requests.
"""
LOG.debug("Starting CodeChecker server...")
server_addr = (listen_address, port)
root_file = os.path.join(config_directory, 'root.user')
if not os.path.exists(root_file):
LOG.warning("Server started without 'root.user' present in "
"CONFIG_DIRECTORY!")
root_sha = __make_root_file(root_file)
else:
LOG.debug("Root file was found. Loading...")
try:
with open(root_file, 'r', encoding="utf-8", errors="ignore") as f:
root_sha = f.read()
LOG.debug("Root digest is '%s'", root_sha)
except IOError:
LOG.info("Cannot open root file '%s' even though it exists",
root_file)
root_sha = __make_root_file(root_file)
# Check whether configuration file exists, create an example if not.
server_cfg_file = os.path.join(config_directory, 'server_config.json')
if not os.path.exists(server_cfg_file):
# For backward compatibility reason if the session_config.json file
# exists we rename it to server_config.json.
session_cfg_file = os.path.join(config_directory,
'session_config.json')
example_cfg_file = os.path.join(os.environ['CC_PACKAGE_ROOT'],
'config', 'server_config.json')
if os.path.exists(session_cfg_file):
LOG.info("Renaming '%s' to '%s'. Please check the example "
"configuration file ('%s') or the user guide for more "
"information.", session_cfg_file,
server_cfg_file, example_cfg_file)
os.rename(session_cfg_file, server_cfg_file)
else:
LOG.info("CodeChecker server's example configuration file "
"created at '%s'", server_cfg_file)
shutil.copyfile(example_cfg_file, server_cfg_file)
try:
manager = session_manager.SessionManager(
server_cfg_file,
root_sha,
force_auth)
except IOError as ioerr:
LOG.debug(ioerr)
LOG.error("The server's configuration file "
"is missing or can not be read!")
sys.exit(1)
except ValueError as verr:
LOG.debug(verr)
LOG.error("The server's configuration file is invalid!")
sys.exit(1)
server_clazz = CCSimpleHttpServer
if ':' in server_addr[0]:
# IPv6 address specified for listening.
# FIXME: Python>=3.8 automatically handles IPv6 if ':' is in the bind
# address, see https://bugs.python.org/issue24209.
server_clazz = CCSimpleHttpServerIPv6
http_server = server_clazz(server_addr,
RequestHandler,
config_directory,
config_sql_server,
skip_db_cleanup,
package_data,
context,
check_env,
manager)
def signal_handler(signum, frame):
"""
Handle SIGTERM to stop the server running.
"""
LOG.info("Shutting down the WEB server on [%s:%d]",
'[' + listen_address + ']'
if server_clazz is CCSimpleHttpServerIPv6 else listen_address,
port)
http_server.terminate()
sys.exit(128 + signum)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
def reload_signal_handler(*args, **kwargs):
"""
Reloads server configuration file.
"""
manager.reload_config()
signal.signal(signal.SIGHUP, reload_signal_handler)
try:
instance_manager.register(os.getpid(),
os.path.abspath(
context.codechecker_workspace),
port)
except IOError as ex:
LOG.debug(ex.strerror)
LOG.info("Server waiting for client requests on [%s:%d]",
'[' + listen_address + ']'
if server_clazz is CCSimpleHttpServerIPv6 else listen_address,
port)
def unregister_handler(pid):
"""
Handle errors during instance unregistration.
The workspace might be removed so updating the
config content might fail.
"""
try:
instance_manager.unregister(pid)
except IOError as ex:
LOG.debug(ex.strerror)
atexit.register(unregister_handler, os.getpid())
http_server.serve_forever()
LOG.info("Webserver quit.")
def add_initial_run_database(config_sql_server, product_connection):
"""
Create a default run database as SQLite in the config directory,
and add it to the list of products in the config database specified by
db_conn_string.
"""
# Connect to the configuration database
LOG.debug("Creating database engine for CONFIG DATABASE...")
__engine = config_sql_server.create_engine()
product_session = sessionmaker(bind=__engine)
# Load the initial list of products and create the connections.
sess = product_session()
products = sess.query(ORMProduct).all()
if products:
raise ValueError("Called create_initial_run_database on non-empty "
"config database -- you shouldn't have done this!")
LOG.debug("Adding default product to the config db...")
product = ORMProduct('Default', product_connection, 'Default',
"Default product created at server start.")
sess.add(product)
sess.commit()
sess.close()
LOG.debug("Default product set up.")
|
[] |
[] |
[
"CC_PACKAGE_ROOT"
] |
[]
|
["CC_PACKAGE_ROOT"]
|
python
| 1 | 0 | |
cnpsdk/utils.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Vantiv eCommerce
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
from __future__ import absolute_import, print_function, unicode_literals
import json
import os
import pyxb
import xmltodict
from cnpsdk import fields_chargeback
from . import version
class Configuration(object):
"""Setup Configuration variables.
Attributes:
user (Str): authentication.user
password (Str): authentication.password
merchant_id (Str): The unique string to identify the merchant within the system.
url (Str): Url for server.
proxy (Str): Https proxy server address. Must start with "https://"
print_xml (Str): Whether print request and response xml
"""
VERSION = version.VERSION
RELEASE = version.RELEASE
_CONFIG_FILE_PATH = os.path.join(os.environ['CNP_CHARGEBACK_SDK_CONFIG'], ".cnp_chargeback_sdk.conf") \
if 'CNP_CHARGEBACK_SDK_CONFIG' in os.environ else os.path.join(os.path.expanduser("~"), ".cnp_chargeback_sdk.conf")
def __init__(self, conf_dict=dict()):
attr_dict = {
'username': '',
'password': '',
'merchant_id': '',
'url': 'http://www.testvantivcnp.com/sandbox/new',
'proxy': '',
'print_xml': False,
'neuter_xml': False,
}
# set default values
for k in attr_dict:
setattr(self, k, attr_dict[k])
# override values by loading saved conf
try:
with open(self._CONFIG_FILE_PATH, 'r') as config_file:
config_json = json.load(config_file)
for k in attr_dict:
if k in config_json and config_json[k]:
setattr(self, k, config_json[k])
except:
# If get any exception just pass.
pass
# override values by args
if conf_dict:
for k in conf_dict:
if k in attr_dict:
setattr(self, k, conf_dict[k])
else:
raise ChargebackError('"%s" is NOT an attribute of conf' % k)
def save(self):
"""Save Class Attributes to .cnp_chargeback_sdk.conf
Returns:
full path for configuration file.
Raises:
IOError: An error occurred
"""
with open(self._CONFIG_FILE_PATH, 'w') as config_file:
json.dump(vars(self), config_file)
return self._CONFIG_FILE_PATH
def obj_to_xml(obj):
"""Convert object to xml string without namespaces
Args:
obj: Object
Returns:
Xml string
Raises:
pyxb.ValidationError
"""
# TODO convert object to xml without default namespace gracefully.
try:
xml = obj.toxml('utf-8')
except pyxb.ValidationError as e:
raise ChargebackError(e.details())
xml = xml.replace(b'ns1:', b'')
xml = xml.replace(b':ns1', b'')
return xml
def generate_retrieval_response(http_response, return_format='dict'):
return convert_to_format(http_response.text, "chargebackRetrievalResponse", return_format)
def generate_update_response(http_response, return_format='dict'):
return convert_to_format(http_response.text, "chargebackUpdateResponse", return_format)
def generate_document_response(http_response, return_format='dict'):
return convert_to_format(http_response.text, "chargebackDocumentUploadResponse", return_format)
def generate_error_response(http_response, return_format='dict'):
return convert_to_format(http_response.text, "errorResponse", return_format)
def convert_to_format(http_response, response_type, return_format='dict'):
return_format = return_format.lower()
if return_format == 'xml':
response_xml = http_response.text
return response_xml
elif return_format == 'object':
return convert_to_obj(http_response.text)
else:
return convert_to_dict(http_response, response_type)
def convert_to_obj(xml_response):
return fields_chargeback.CreateFromDocument(xml_response)
def convert_to_dict(xml_response, response_type):
response_dict = xmltodict.parse(xml_response)[response_type]
if response_dict['@xmlns'] != "":
_create_lists(response_dict)
return response_dict
else:
raise ChargebackError("Invalid Format")
def _create_lists(response_dict):
if "chargebackCase" in response_dict:
_create_list("chargebackCase", response_dict)
for case in response_dict["chargebackCase"]:
if "activity" in case:
_create_list("activity", case)
if "errors" in response_dict:
_create_list("error", response_dict["errors"])
# if there is only one element for the given key in container, create a list for it
def _create_list(element_key, container):
element_value = container[element_key]
if element_value != "" and not isinstance(element_value, list):
container[element_key] = [element_value]
class ChargebackError(Exception):
def __init__(self, message):
self.message = message
class ChargebackWebError(Exception):
def __init__(self, message, code, error_list=None):
self.message = message
self.code = code
self.error_list = error_list
class ChargebackDocumentError(Exception):
def __init__(self, message, code):
self.message = message
self.code = code
|
[] |
[] |
[
"CNP_CHARGEBACK_SDK_CONFIG"
] |
[]
|
["CNP_CHARGEBACK_SDK_CONFIG"]
|
python
| 1 | 0 | |
orderer/common/server/main.go
|
/*
Copyright IBM Corp. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package server
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"net"
"net/http"
_ "net/http/pprof" // This is essentially the main package for the orderer
"os"
"os/signal"
"syscall"
"time"
"github.com/golang/protobuf/proto"
"github.com/hyperledger/fabric-lib-go/healthz"
"github.com/hyperledger/fabric/common/channelconfig"
"github.com/hyperledger/fabric/common/crypto"
"github.com/hyperledger/fabric/common/flogging"
floggingmetrics "github.com/hyperledger/fabric/common/flogging/metrics"
"github.com/hyperledger/fabric/common/grpclogging"
"github.com/hyperledger/fabric/common/grpcmetrics"
"github.com/hyperledger/fabric/common/ledger/blockledger"
"github.com/hyperledger/fabric/common/localmsp"
"github.com/hyperledger/fabric/common/metrics"
"github.com/hyperledger/fabric/common/metrics/disabled"
"github.com/hyperledger/fabric/common/tools/configtxgen/encoder"
genesisconfig "github.com/hyperledger/fabric/common/tools/configtxgen/localconfig"
"github.com/hyperledger/fabric/common/tools/protolator"
"github.com/hyperledger/fabric/common/util"
"github.com/hyperledger/fabric/core/comm"
"github.com/hyperledger/fabric/core/operations"
"github.com/hyperledger/fabric/msp"
mspmgmt "github.com/hyperledger/fabric/msp/mgmt"
"github.com/hyperledger/fabric/orderer/common/bootstrap/file"
"github.com/hyperledger/fabric/orderer/common/cluster"
"github.com/hyperledger/fabric/orderer/common/localconfig"
"github.com/hyperledger/fabric/orderer/common/metadata"
"github.com/hyperledger/fabric/orderer/common/multichannel"
"github.com/hyperledger/fabric/orderer/consensus"
"github.com/hyperledger/fabric/orderer/consensus/etcdraft"
"github.com/hyperledger/fabric/orderer/consensus/kafka"
"github.com/hyperledger/fabric/orderer/consensus/solo"
cb "github.com/hyperledger/fabric/protos/common"
ab "github.com/hyperledger/fabric/protos/orderer"
"github.com/hyperledger/fabric/protos/utils"
"go.uber.org/zap/zapcore"
"google.golang.org/grpc"
"gopkg.in/alecthomas/kingpin.v2"
)
var logger = flogging.MustGetLogger("orderer.common.server")
// command line flags
var (
app = kingpin.New("orderer", "Hyperledger Fabric orderer node")
start = app.Command("start", "Start the orderer node").Default()
version = app.Command("version", "Show version information")
benchmark = app.Command("benchmark", "Run orderer in benchmark mode")
clusterTypes = map[string]struct{}{"etcdraft": {}}
)
// Main is the entry point of orderer process
func Main() {
fullCmd := kingpin.MustParse(app.Parse(os.Args[1:])) //解析orderer 启动的命令参数
// "version" command
if fullCmd == version.FullCommand() { //如果是orderer version 返回版本信息
fmt.Println(metadata.GetVersionInfo())
return
}
conf, err := localconfig.Load() //加载配置文件信息
if err != nil {
logger.Error("failed to parse config: ", err)
os.Exit(1)
}
initializeLogging() //初始化日志级别格式等
initializeLocalMsp(conf) //初始加载 msp信息
prettyPrintStruct(conf) //日志打印出orderer.yaml 所有配置信息
Start(fullCmd, conf)
}
// Start provides a layer of abstraction for benchmark test
func Start(cmd string, conf *localconfig.TopLevel) {
bootstrapBlock := extractBootstrapBlock(conf) //根据配置创世区块路径 找到并读取创世区块信息 转区块结构 返回结构对象
if err := ValidateBootstrapBlock(bootstrapBlock); err != nil { //验证引导区块
logger.Panicf("Failed validating bootstrap block: %v", err)
}
opsSystem := newOperationsSystem(conf.Operations, conf.Metrics) //配置orderer节点 构建服务示例hander 可以收集运行指标 检测 日志等
err := opsSystem.Start() //启动一个线程服务
if err != nil {
logger.Panicf("failed to initialize operations subsystem: %s", err)
}
defer opsSystem.Stop()
metricsProvider := opsSystem.Provider
lf, _ := createLedgerFactory(conf, metricsProvider) //创建账本factory 以及存放路径
sysChanLastConfigBlock := extractSysChanLastConfig(lf, bootstrapBlock) //获取最后配置块
clusterBootBlock := selectClusterBootBlock(bootstrapBlock, sysChanLastConfigBlock) //选择那个块来启动 最后配置块大于创世区块那么以最后配置块启动(修改过系统通道配置orderer重启) 否则以创世区块启动
signer := localmsp.NewSigner() //新构建msp签名实例
logObserver := floggingmetrics.NewObserver(metricsProvider) //提供orderer.yaml 中metrics 选项 日志服务配置
flogging.Global.SetObserver(logObserver)
serverConfig := initializeServerConfig(conf, metricsProvider) //配置服务 tls以及其他
grpcServer := initializeGrpcServer(conf, serverConfig) //构建grpc 服务
caSupport := &comm.CredentialSupport{
AppRootCAsByChain: make(map[string]comm.CertificateBundle),
OrdererRootCAsByChainAndOrg: make(comm.OrgRootCAs),
ClientRootCAs: serverConfig.SecOpts.ClientRootCAs,
}
var r *replicationInitiator
clusterServerConfig := serverConfig
clusterGRPCServer := grpcServer // by default, cluster shares the same grpc server
clusterClientConfig := comm.ClientConfig{SecOpts: &comm.SecureOptions{}, KaOpts: &comm.KeepaliveOptions{}}
var clusterDialer *cluster.PredicateDialer
var reuseGrpcListener bool
typ := consensusType(bootstrapBlock) //获取共识机制类型
var serversToUpdate []*comm.GRPCServer
clusterType := isClusterType(clusterBootBlock) //不是raft的化 那么为false
if clusterType {
logger.Infof("Setting up cluster for orderer type %s", typ)
clusterClientConfig = initializeClusterClientConfig(conf)
clusterDialer = &cluster.PredicateDialer{
ClientConfig: clusterClientConfig,
}
r = createReplicator(lf, bootstrapBlock, conf, clusterClientConfig.SecOpts, signer)
// Only clusters that are equipped with a recent config block can replicate.
if conf.General.GenesisMethod == "file" {
r.replicateIfNeeded(bootstrapBlock)
}
if reuseGrpcListener = reuseListener(conf, typ); !reuseGrpcListener {
clusterServerConfig, clusterGRPCServer = configureClusterListener(conf, serverConfig, ioutil.ReadFile)
}
// If we have a separate gRPC server for the cluster,
// we need to update its TLS CA certificate pool.
serversToUpdate = append(serversToUpdate, clusterGRPCServer)
}
// if cluster is reusing client-facing server, then it is already
// appended to serversToUpdate at this point.
if grpcServer.MutualTLSRequired() && !reuseGrpcListener {
serversToUpdate = append(serversToUpdate, grpcServer)
}
tlsCallback := func(bundle *channelconfig.Bundle) {
logger.Debug("Executing callback to update root CAs")
updateTrustedRoots(caSupport, bundle, serversToUpdate...) //更新通道tls root根证书
if clusterType {
updateClusterDialer(caSupport, clusterDialer, clusterClientConfig.SecOpts.ServerRootCAs)
}
}
sigHdr, err := signer.NewSignatureHeader() //构建签署header 创建人 24位随机字节
if err != nil {
logger.Panicf("Failed creating a signature header: %v", err)
}
expirationLogger := flogging.MustGetLogger("certmonitor")
crypto.TrackExpiration( //检测证书到期时间
serverConfig.SecOpts.UseTLS,
serverConfig.SecOpts.Certificate,
[][]byte{clusterClientConfig.SecOpts.Certificate},
sigHdr.Creator,
expirationLogger.Warnf, // This can be used to piggyback a metric event in the future
time.Now(),
time.AfterFunc)
manager := initializeMultichannelRegistrar(clusterBootBlock, r, clusterDialer, clusterServerConfig, clusterGRPCServer, conf, signer, metricsProvider, opsSystem, lf, tlsCallback)
mutualTLS := serverConfig.SecOpts.UseTLS && serverConfig.SecOpts.RequireClientCert //false
expiration := conf.General.Authentication.NoExpirationChecks //false
server := NewServer(manager, metricsProvider, &conf.Debug, conf.General.Authentication.TimeWindow, mutualTLS, expiration) //包含broadcast 和deliver服务
logger.Infof("Starting %s", metadata.GetVersionInfo())
go handleSignals(addPlatformSignals(map[os.Signal]func(){
syscall.SIGTERM: func() {
grpcServer.Stop()
if clusterGRPCServer != grpcServer {
clusterGRPCServer.Stop()
}
},
}))
if !reuseGrpcListener && clusterType { //false
logger.Info("Starting cluster listener on", clusterGRPCServer.Address())
go clusterGRPCServer.Start()
}
initializeProfilingService(conf)
ab.RegisterAtomicBroadcastServer(grpcServer.Server(), server)
logger.Info("Beginning to serve requests")
grpcServer.Start() //启动服务
}
func reuseListener(conf *localconfig.TopLevel, typ string) bool {
clusterConf := conf.General.Cluster
// If listen address is not configured, and the TLS certificate isn't configured,
// it means we use the general listener of the node.
if clusterConf.ListenPort == 0 && clusterConf.ServerCertificate == "" && clusterConf.ListenAddress == "" && clusterConf.ServerPrivateKey == "" {
logger.Info("Cluster listener is not configured, defaulting to use the general listener on port", conf.General.ListenPort)
if !conf.General.TLS.Enabled {
logger.Panicf("TLS is required for running ordering nodes of type %s.", typ)
}
return true
}
// Else, one of the above is defined, so all 4 properties should be defined.
if clusterConf.ListenPort == 0 || clusterConf.ServerCertificate == "" || clusterConf.ListenAddress == "" || clusterConf.ServerPrivateKey == "" {
logger.Panic("Options: General.Cluster.ListenPort, General.Cluster.ListenAddress, General.Cluster.ServerCertificate," +
" General.Cluster.ServerPrivateKey, should be defined altogether.")
}
return false
}
// Extract system channel last config block
func extractSysChanLastConfig(lf blockledger.Factory, bootstrapBlock *cb.Block) *cb.Block {
// Are we bootstrapping?
chainCount := len(lf.ChainIDs()) //就是获取 /chains/下几个通道个数
if chainCount == 0 {
logger.Info("Bootstrapping because no existing channels")
return nil
}
logger.Infof("Not bootstrapping because of %d existing channels", chainCount)
systemChannelName, err := utils.GetChainIDFromBlock(bootstrapBlock) //从块中获取通道名称(系统通道)
if err != nil {
logger.Panicf("Failed extracting system channel name from bootstrap block: %v", err)
}
systemChannelLedger, err := lf.GetOrCreate(systemChannelName) //检测分类账本是否存在 不存在创建 具体实现 在外层/common/ledger/blockledger/file/factory.go No.35 line
if err != nil {
logger.Panicf("Failed getting system channel ledger: %v", err)
}
height := systemChannelLedger.Height() //账本高度
lastConfigBlock := multichannel.ConfigBlock(systemChannelLedger) //最后配置块
logger.Infof("System channel: name=%s, height=%d, last config block number=%d",
systemChannelName, height, lastConfigBlock.Header.Number)
return lastConfigBlock
}
// Select cluster boot block
func selectClusterBootBlock(bootstrapBlock, sysChanLastConfig *cb.Block) *cb.Block {
if sysChanLastConfig == nil {
logger.Debug("Selected bootstrap block, because system channel last config block is nil")
return bootstrapBlock
}
if sysChanLastConfig.Header.Number > bootstrapBlock.Header.Number { //获取最后块是否大于创世区块
logger.Infof("Cluster boot block is system channel last config block; Blocks Header.Number system-channel=%d, bootstrap=%d",
sysChanLastConfig.Header.Number, bootstrapBlock.Header.Number)
return sysChanLastConfig
}
logger.Infof("Cluster boot block is bootstrap (genesis) block; Blocks Header.Number system-channel=%d, bootstrap=%d",
sysChanLastConfig.Header.Number, bootstrapBlock.Header.Number)
return bootstrapBlock
}
func createReplicator(
lf blockledger.Factory,
bootstrapBlock *cb.Block,
conf *localconfig.TopLevel,
secOpts *comm.SecureOptions,
signer crypto.LocalSigner,
) *replicationInitiator {
logger := flogging.MustGetLogger("orderer.common.cluster")
vl := &verifierLoader{
verifierFactory: &cluster.BlockVerifierAssembler{Logger: logger},
onFailure: func(block *cb.Block) {
protolator.DeepMarshalJSON(os.Stdout, block)
},
ledgerFactory: lf,
logger: logger,
}
systemChannelName, err := utils.GetChainIDFromBlock(bootstrapBlock)
if err != nil {
logger.Panicf("Failed extracting system channel name from bootstrap block: %v", err)
}
// System channel is not verified because we trust the bootstrap block
// and use backward hash chain verification.
verifiersByChannel := vl.loadVerifiers()
verifiersByChannel[systemChannelName] = &cluster.NoopBlockVerifier{}
vr := &cluster.VerificationRegistry{
LoadVerifier: vl.loadVerifier,
Logger: logger,
VerifiersByChannel: verifiersByChannel,
VerifierFactory: &cluster.BlockVerifierAssembler{Logger: logger},
}
ledgerFactory := &ledgerFactory{
Factory: lf,
onBlockCommit: vr.BlockCommitted,
}
return &replicationInitiator{
registerChain: vr.RegisterVerifier,
verifierRetriever: vr,
logger: logger,
secOpts: secOpts,
conf: conf,
lf: ledgerFactory,
signer: signer,
}
}
func initializeLogging() {
loggingSpec := os.Getenv("FABRIC_LOGGING_SPEC")
loggingFormat := os.Getenv("FABRIC_LOGGING_FORMAT")
flogging.Init(flogging.Config{
Format: loggingFormat,
Writer: os.Stderr,
LogSpec: loggingSpec,
})
}
// Start the profiling service if enabled.
func initializeProfilingService(conf *localconfig.TopLevel) {
if conf.General.Profile.Enabled { //false
go func() {
logger.Info("Starting Go pprof profiling service on:", conf.General.Profile.Address)
// The ListenAndServe() call does not return unless an error occurs.
logger.Panic("Go pprof service failed:", http.ListenAndServe(conf.General.Profile.Address, nil))
}()
}
}
func handleSignals(handlers map[os.Signal]func()) {
var signals []os.Signal
for sig := range handlers {
signals = append(signals, sig)
}
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, signals...)
for sig := range signalChan {
logger.Infof("Received signal: %d (%s)", sig, sig)
handlers[sig]()
}
}
type loadPEMFunc func(string) ([]byte, error)
// configureClusterListener returns a new ServerConfig and a new gRPC server (with its own TLS listener).
func configureClusterListener(conf *localconfig.TopLevel, generalConf comm.ServerConfig, loadPEM loadPEMFunc) (comm.ServerConfig, *comm.GRPCServer) {
clusterConf := conf.General.Cluster
cert, err := loadPEM(clusterConf.ServerCertificate)
if err != nil {
logger.Panicf("Failed to load cluster server certificate from '%s' (%s)", clusterConf.ServerCertificate, err)
}
key, err := loadPEM(clusterConf.ServerPrivateKey)
if err != nil {
logger.Panicf("Failed to load cluster server key from '%s' (%s)", clusterConf.ServerPrivateKey, err)
}
port := fmt.Sprintf("%d", clusterConf.ListenPort)
bindAddr := net.JoinHostPort(clusterConf.ListenAddress, port)
var clientRootCAs [][]byte
for _, serverRoot := range conf.General.Cluster.RootCAs {
rootCACert, err := loadPEM(serverRoot)
if err != nil {
logger.Panicf("Failed to load CA cert file '%s' (%s)",
err, serverRoot)
}
clientRootCAs = append(clientRootCAs, rootCACert)
}
serverConf := comm.ServerConfig{
StreamInterceptors: generalConf.StreamInterceptors,
UnaryInterceptors: generalConf.UnaryInterceptors,
ConnectionTimeout: generalConf.ConnectionTimeout,
ServerStatsHandler: generalConf.ServerStatsHandler,
Logger: generalConf.Logger,
KaOpts: generalConf.KaOpts,
SecOpts: &comm.SecureOptions{
TimeShift: conf.General.Cluster.TLSHandshakeTimeShift,
CipherSuites: comm.DefaultTLSCipherSuites,
ClientRootCAs: clientRootCAs,
RequireClientCert: true,
Certificate: cert,
UseTLS: true,
Key: key,
},
}
srv, err := comm.NewGRPCServer(bindAddr, serverConf)
if err != nil {
logger.Panicf("Failed creating gRPC server on %s:%d due to %v", clusterConf.ListenAddress, clusterConf.ListenPort, err)
}
return serverConf, srv
}
func initializeClusterClientConfig(conf *localconfig.TopLevel) comm.ClientConfig {
cc := comm.ClientConfig{
AsyncConnect: true,
KaOpts: comm.DefaultKeepaliveOptions,
Timeout: conf.General.Cluster.DialTimeout,
SecOpts: &comm.SecureOptions{},
}
if conf.General.Cluster.ClientCertificate == "" {
return cc
}
certFile := conf.General.Cluster.ClientCertificate
certBytes, err := ioutil.ReadFile(certFile)
if err != nil {
logger.Fatalf("Failed to load client TLS certificate file '%s' (%s)", certFile, err)
}
keyFile := conf.General.Cluster.ClientPrivateKey
keyBytes, err := ioutil.ReadFile(keyFile)
if err != nil {
logger.Fatalf("Failed to load client TLS key file '%s' (%s)", keyFile, err)
}
var serverRootCAs [][]byte
for _, serverRoot := range conf.General.Cluster.RootCAs {
rootCACert, err := ioutil.ReadFile(serverRoot)
if err != nil {
logger.Fatalf("Failed to load ServerRootCAs file '%s' (%s)",
err, serverRoot)
}
serverRootCAs = append(serverRootCAs, rootCACert)
}
cc.SecOpts = &comm.SecureOptions{
TimeShift: conf.General.Cluster.TLSHandshakeTimeShift,
RequireClientCert: true,
CipherSuites: comm.DefaultTLSCipherSuites,
ServerRootCAs: serverRootCAs,
Certificate: certBytes,
Key: keyBytes,
UseTLS: true,
}
return cc
}
func initializeServerConfig(conf *localconfig.TopLevel, metricsProvider metrics.Provider) comm.ServerConfig {
// secure server config
secureOpts := &comm.SecureOptions{
UseTLS: conf.General.TLS.Enabled, //true
RequireClientCert: conf.General.TLS.ClientAuthRequired, //false
}
// check to see if TLS is enabled
if secureOpts.UseTLS { //基本都使用tls
msg := "TLS"
// load crypto material from files
serverCertificate, err := ioutil.ReadFile(conf.General.TLS.Certificate) //读取证书
if err != nil {
logger.Fatalf("Failed to load server Certificate file '%s' (%s)",
conf.General.TLS.Certificate, err)
}
serverKey, err := ioutil.ReadFile(conf.General.TLS.PrivateKey)
if err != nil {
logger.Fatalf("Failed to load PrivateKey file '%s' (%s)",
conf.General.TLS.PrivateKey, err)
}
var serverRootCAs, clientRootCAs [][]byte
for _, serverRoot := range conf.General.TLS.RootCAs { //好像只有一个元素
root, err := ioutil.ReadFile(serverRoot)
if err != nil {
logger.Fatalf("Failed to load ServerRootCAs file '%s' (%s)",
err, serverRoot)
}
serverRootCAs = append(serverRootCAs, root)
}
if secureOpts.RequireClientCert { //基本为false
for _, clientRoot := range conf.General.TLS.ClientRootCAs {
root, err := ioutil.ReadFile(clientRoot)
if err != nil {
logger.Fatalf("Failed to load ClientRootCAs file '%s' (%s)",
err, clientRoot)
}
clientRootCAs = append(clientRootCAs, root)
}
msg = "mutual TLS"
}
secureOpts.Key = serverKey
secureOpts.Certificate = serverCertificate
secureOpts.ClientRootCAs = clientRootCAs
logger.Infof("Starting orderer with %s enabled", msg)
}
kaOpts := comm.DefaultKeepaliveOptions //各个服务客户端心跳时间
// keepalive settings
// ServerMinInterval must be greater than 0
if conf.General.Keepalive.ServerMinInterval > time.Duration(0) {
kaOpts.ServerMinInterval = conf.General.Keepalive.ServerMinInterval //默认60s
}
kaOpts.ServerInterval = conf.General.Keepalive.ServerInterval
kaOpts.ServerTimeout = conf.General.Keepalive.ServerTimeout //20s
commLogger := flogging.MustGetLogger("core.comm").With("server", "Orderer")
if metricsProvider == nil {
metricsProvider = &disabled.Provider{}
}
return comm.ServerConfig{
SecOpts: secureOpts,
KaOpts: kaOpts,
Logger: commLogger,
ServerStatsHandler: comm.NewServerStatsHandler(metricsProvider),
ConnectionTimeout: conf.General.ConnectionTimeout,
StreamInterceptors: []grpc.StreamServerInterceptor{
grpcmetrics.StreamServerInterceptor(grpcmetrics.NewStreamMetrics(metricsProvider)),
grpclogging.StreamServerInterceptor(flogging.MustGetLogger("comm.grpc.server").Zap()),
},
UnaryInterceptors: []grpc.UnaryServerInterceptor{
grpcmetrics.UnaryServerInterceptor(grpcmetrics.NewUnaryMetrics(metricsProvider)),
grpclogging.UnaryServerInterceptor(
flogging.MustGetLogger("comm.grpc.server").Zap(),
grpclogging.WithLeveler(grpclogging.LevelerFunc(grpcLeveler)),
),
},
}
}
func grpcLeveler(ctx context.Context, fullMethod string) zapcore.Level {
switch fullMethod {
case "/orderer.Cluster/Step":
return flogging.DisabledLevel
default:
return zapcore.InfoLevel
}
}
func extractBootstrapBlock(conf *localconfig.TopLevel) *cb.Block {
var bootstrapBlock *cb.Block
// Select the bootstrapping mechanism
switch conf.General.GenesisMethod { //一般会设置为file 采用工具生成的gensis.block文件 也就是创世区块
case "provisional":
bootstrapBlock = encoder.New(genesisconfig.Load(conf.General.GenesisProfile)).GenesisBlockForChannel(conf.General.SystemChannel) //根据configtx.yaml配置 构建创世区块配置信息包含各个模块策略规则等
case "file":
bootstrapBlock = file.New(conf.General.GenesisFile).GenesisBlock() //根据配置路径找到创世区块文件 读取转区块结构对象 GenesisBlock()实现方法在 orderer/common/bootstrap/file/bootstrap.go No.36 line
default:
logger.Panic("Unknown genesis method:", conf.General.GenesisMethod)
}
//如果是"file" 选项没有看到具体过程 可以参考provisional 基本是configtxgen工具生成创世区块流程 也可以看configtxgen工具代码实现
return bootstrapBlock
}
func initializeBootstrapChannel(genesisBlock *cb.Block, lf blockledger.Factory) {
chainID, err := utils.GetChainIDFromBlock(genesisBlock) //从块中获取通道
if err != nil {
logger.Fatal("Failed to parse channel ID from genesis block:", err)
}
gl, err := lf.GetOrCreate(chainID) //根据通道 获取或者创建账本 实现在 在外层的common/ledger/blockledger/file/factory.go No.35 line
if err != nil {
logger.Fatal("Failed to create the system channel:", err)
}
if err := gl.Append(genesisBlock); err != nil {
logger.Fatal("Could not write genesis block to ledger:", err)
}
}
func isClusterType(genesisBlock *cb.Block) bool {
_, exists := clusterTypes[consensusType(genesisBlock)]
return exists
}
func consensusType(genesisBlock *cb.Block) string {
if genesisBlock.Data == nil || len(genesisBlock.Data.Data) == 0 {
logger.Fatalf("Empty genesis block")
}
env := &cb.Envelope{}
if err := proto.Unmarshal(genesisBlock.Data.Data[0], env); err != nil {
logger.Fatalf("Failed to unmarshal the genesis block's envelope: %v", err)
}
bundle, err := channelconfig.NewBundleFromEnvelope(env)
if err != nil {
logger.Fatalf("Failed creating bundle from the genesis block: %v", err)
}
ordConf, exists := bundle.OrdererConfig() //获取orderer配置
if !exists {
logger.Fatalf("Orderer config doesn't exist in bundle derived from genesis block")
}
return ordConf.ConsensusType() //获取共识机制 实现方法在 common/channelconfig/orderer.go No.147 line
}
func initializeGrpcServer(conf *localconfig.TopLevel, serverConfig comm.ServerConfig) *comm.GRPCServer {
lis, err := net.Listen("tcp", fmt.Sprintf("%s:%d", conf.General.ListenAddress, conf.General.ListenPort))
if err != nil {
logger.Fatal("Failed to listen:", err)
}
// Create GRPC server - return if an error occurs
grpcServer, err := comm.NewGRPCServerFromListener(lis, serverConfig) //设置服务选项配置 创建grpc服务
if err != nil {
logger.Fatal("Failed to return new GRPC server:", err)
}
return grpcServer
}
func initializeLocalMsp(conf *localconfig.TopLevel) {
// Load local MSP
err := mspmgmt.LoadLocalMsp(conf.General.LocalMSPDir, conf.General.BCCSP, conf.General.LocalMSPID) //获取msp目录下所有信息 信息赋值结构
if err != nil { // Handle errors reading the config file
logger.Fatal("Failed to initialize local MSP:", err)
}
}
//go:generate counterfeiter -o mocks/health_checker.go -fake-name HealthChecker . healthChecker
// HealthChecker defines the contract for health checker
type healthChecker interface {
RegisterChecker(component string, checker healthz.HealthChecker) error
}
func initializeMultichannelRegistrar(
bootstrapBlock *cb.Block,
ri *replicationInitiator,
clusterDialer *cluster.PredicateDialer,
srvConf comm.ServerConfig,
srv *comm.GRPCServer,
conf *localconfig.TopLevel,
signer crypto.LocalSigner,
metricsProvider metrics.Provider,
healthChecker healthChecker,
lf blockledger.Factory,
callbacks ...channelconfig.BundleActor,
) *multichannel.Registrar {
genesisBlock := extractBootstrapBlock(conf) //根据configtx.yaml配置文件构建引导区块(创世区块)
// Are we bootstrapping?
if len(lf.ChainIDs()) == 0 {
initializeBootstrapChannel(genesisBlock, lf)
} else {
logger.Info("Not bootstrapping because of existing channels")
}
consenters := make(map[string]consensus.Consenter)
registrar := multichannel.NewRegistrar(*conf, lf, signer, metricsProvider, callbacks...)
var icr etcdraft.InactiveChainRegistry
if isClusterType(bootstrapBlock) { //不是raft 不进入
etcdConsenter := initializeEtcdraftConsenter(consenters, conf, lf, clusterDialer, bootstrapBlock, ri, srvConf, srv, registrar, metricsProvider)
icr = etcdConsenter.InactiveChainRegistry
}
consenters["solo"] = solo.New()
var kafkaMetrics *kafka.Metrics
consenters["kafka"], kafkaMetrics = kafka.New(conf.Kafka, metricsProvider, healthChecker, icr, registrar.CreateChain)
// Note, we pass a 'nil' channel here, we could pass a channel that
// closes if we wished to cleanup this routine on exit.
go kafkaMetrics.PollGoMetricsUntilStop(time.Minute, nil) //开启线程 1分钟取轮询监控
registrar.Initialize(consenters)
return registrar
}
func initializeEtcdraftConsenter(
consenters map[string]consensus.Consenter,
conf *localconfig.TopLevel,
lf blockledger.Factory,
clusterDialer *cluster.PredicateDialer,
bootstrapBlock *cb.Block,
ri *replicationInitiator,
srvConf comm.ServerConfig,
srv *comm.GRPCServer,
registrar *multichannel.Registrar,
metricsProvider metrics.Provider,
) *etcdraft.Consenter {
replicationRefreshInterval := conf.General.Cluster.ReplicationBackgroundRefreshInterval
if replicationRefreshInterval == 0 {
replicationRefreshInterval = defaultReplicationBackgroundRefreshInterval
}
systemChannelName, err := utils.GetChainIDFromBlock(bootstrapBlock)
if err != nil {
ri.logger.Panicf("Failed extracting system channel name from bootstrap block: %v", err)
}
systemLedger, err := lf.GetOrCreate(systemChannelName)
if err != nil {
ri.logger.Panicf("Failed obtaining system channel (%s) ledger: %v", systemChannelName, err)
}
getConfigBlock := func() *cb.Block {
return multichannel.ConfigBlock(systemLedger)
}
exponentialSleep := exponentialDurationSeries(replicationBackgroundInitialRefreshInterval, replicationRefreshInterval)
ticker := newTicker(exponentialSleep)
icr := &inactiveChainReplicator{
logger: logger,
scheduleChan: ticker.C,
quitChan: make(chan struct{}),
replicator: ri,
chains2CreationCallbacks: make(map[string]chainCreation),
retrieveLastSysChannelConfigBlock: getConfigBlock,
registerChain: ri.registerChain,
}
// Use the inactiveChainReplicator as a channel lister, since it has knowledge
// of all inactive chains.
// This is to prevent us pulling the entire system chain when attempting to enumerate
// the channels in the system.
ri.channelLister = icr
go icr.run()
raftConsenter := etcdraft.New(clusterDialer, conf, srvConf, srv, registrar, icr, metricsProvider)
consenters["etcdraft"] = raftConsenter
return raftConsenter
}
func newOperationsSystem(ops localconfig.Operations, metrics localconfig.Metrics) *operations.System {
return operations.NewSystem(operations.Options{
Logger: flogging.MustGetLogger("orderer.operations"),
ListenAddress: ops.ListenAddress,
Metrics: operations.MetricsOptions{
Provider: metrics.Provider,
Statsd: &operations.Statsd{
Network: metrics.Statsd.Network,
Address: metrics.Statsd.Address,
WriteInterval: metrics.Statsd.WriteInterval,
Prefix: metrics.Statsd.Prefix,
},
},
TLS: operations.TLS{
Enabled: ops.TLS.Enabled,
CertFile: ops.TLS.Certificate,
KeyFile: ops.TLS.PrivateKey,
ClientCertRequired: ops.TLS.ClientAuthRequired,
ClientCACertFiles: ops.TLS.ClientRootCAs,
},
Version: metadata.Version,
})
}
func updateTrustedRoots(rootCASupport *comm.CredentialSupport, cm channelconfig.Resources, servers ...*comm.GRPCServer) {
rootCASupport.Lock()
defer rootCASupport.Unlock()
appOrgMSPs := make(map[string]struct{})
ordOrgMSPs := make(map[string]struct{})
if ac, ok := cm.ApplicationConfig(); ok {
// loop through app orgs and build map of MSPIDs
for _, appOrg := range ac.Organizations() {
appOrgMSPs[appOrg.MSPID()] = struct{}{}
}
}
if ac, ok := cm.OrdererConfig(); ok {
// loop through orderer orgs and build map of MSPIDs
for _, ordOrg := range ac.Organizations() {
ordOrgMSPs[ordOrg.MSPID()] = struct{}{}
}
}
if cc, ok := cm.ConsortiumsConfig(); ok {
for _, consortium := range cc.Consortiums() {
// loop through consortium orgs and build map of MSPIDs
for _, consortiumOrg := range consortium.Organizations() {
appOrgMSPs[consortiumOrg.MSPID()] = struct{}{}
}
}
}
cid := cm.ConfigtxValidator().ChainID()
logger.Debugf("updating root CAs for channel [%s]", cid)
msps, err := cm.MSPManager().GetMSPs()
if err != nil {
logger.Errorf("Error getting root CAs for channel %s (%s)", cid, err)
return
}
var appRootCAs comm.CertificateBundle
ordererRootCAsPerOrg := make(map[string]comm.CertificateBundle)
for k, v := range msps {
var ordererRootCAs comm.CertificateBundle
// check to see if this is a FABRIC MSP
if v.GetType() == msp.FABRIC {
for _, root := range v.GetTLSRootCerts() {
// check to see of this is an app org MSP
if _, ok := appOrgMSPs[k]; ok {
logger.Debugf("adding app root CAs for MSP [%s]", k)
appRootCAs = append(appRootCAs, root)
}
// check to see of this is an orderer org MSP
if _, ok := ordOrgMSPs[k]; ok {
logger.Debugf("adding orderer root CAs for MSP [%s]", k)
ordererRootCAs = append(ordererRootCAs, root)
}
}
for _, intermediate := range v.GetTLSIntermediateCerts() {
// check to see of this is an app org MSP
if _, ok := appOrgMSPs[k]; ok {
logger.Debugf("adding app root CAs for MSP [%s]", k)
appRootCAs = append(appRootCAs, intermediate)
}
// check to see of this is an orderer org MSP
if _, ok := ordOrgMSPs[k]; ok {
logger.Debugf("adding orderer root CAs for MSP [%s]", k)
ordererRootCAs = append(ordererRootCAs, intermediate)
}
}
ordererRootCAsPerOrg[k] = ordererRootCAs
}
}
rootCASupport.AppRootCAsByChain[cid] = appRootCAs
rootCASupport.OrdererRootCAsByChainAndOrg[cid] = ordererRootCAsPerOrg
// now iterate over all roots for all app and orderer chains
trustedRoots := [][]byte{}
for _, roots := range rootCASupport.AppRootCAsByChain {
trustedRoots = append(trustedRoots, roots...)
}
// add all root CAs from all channels to the trusted roots
for _, orgRootCAs := range rootCASupport.OrdererRootCAsByChainAndOrg {
for _, roots := range orgRootCAs {
trustedRoots = append(trustedRoots, roots...)
}
}
// also need to append statically configured root certs
if len(rootCASupport.ClientRootCAs) > 0 {
trustedRoots = append(trustedRoots, rootCASupport.ClientRootCAs...)
}
// now update the client roots for the gRPC server
for _, srv := range servers {
err = srv.SetClientRootCAs(trustedRoots)
if err != nil {
msg := "Failed to update trusted roots for orderer from latest config " +
"block. This orderer may not be able to communicate " +
"with members of channel %s (%s)"
logger.Warningf(msg, cm.ConfigtxValidator().ChainID(), err)
}
}
}
func updateClusterDialer(rootCASupport *comm.CredentialSupport, clusterDialer *cluster.PredicateDialer, localClusterRootCAs [][]byte) {
rootCASupport.Lock()
defer rootCASupport.Unlock()
// Iterate over all orderer root CAs for all chains and add them
// to the root CAs
var clusterRootCAs [][]byte
for _, orgRootCAs := range rootCASupport.OrdererRootCAsByChainAndOrg {
for _, roots := range orgRootCAs {
clusterRootCAs = append(clusterRootCAs, roots...)
}
}
// Add the local root CAs too
clusterRootCAs = append(clusterRootCAs, localClusterRootCAs...)
// Update the cluster config with the new root CAs
clusterDialer.UpdateRootCAs(clusterRootCAs)
}
func prettyPrintStruct(i interface{}) {
params := util.Flatten(i)
var buffer bytes.Buffer
for i := range params {
buffer.WriteString("\n\t")
buffer.WriteString(params[i])
}
logger.Infof("Orderer config values:%s\n", buffer.String())
}
|
[
"\"FABRIC_LOGGING_SPEC\"",
"\"FABRIC_LOGGING_FORMAT\""
] |
[] |
[
"FABRIC_LOGGING_SPEC",
"FABRIC_LOGGING_FORMAT"
] |
[]
|
["FABRIC_LOGGING_SPEC", "FABRIC_LOGGING_FORMAT"]
|
go
| 2 | 0 | |
pyhanko_tests/test_pkcs11.py
|
"""
Tests for PKCS#11 functionality.
NOTE: these are not run in CI, due to lack of testing setup.
"""
import asyncio
import binascii
import logging
import os
from io import BytesIO
import pytest
from asn1crypto.algos import SignedDigestAlgorithm
from certomancer.registry import CertLabel
from freezegun import freeze_time
from pkcs11 import NoSuchKey, PKCS11Error
from pyhanko_certvalidator.registry import SimpleCertificateStore
from pyhanko.config import PKCS11SignatureConfig
from pyhanko.pdf_utils.incremental_writer import IncrementalPdfFileWriter
from pyhanko.pdf_utils.reader import PdfFileReader
from pyhanko.sign import general, pkcs11, signers
from pyhanko.sign.general import SigningError
from pyhanko.sign.pkcs11 import PKCS11SigningContext
from pyhanko_tests.samples import MINIMAL, TESTING_CA
from pyhanko_tests.signing_commons import (
SIMPLE_DSA_V_CONTEXT,
SIMPLE_ECC_V_CONTEXT,
async_val_trusted,
val_trusted,
)
logger = logging.getLogger(__name__)
SKIP_PKCS11 = False
pkcs11_test_module = os.environ.get('PKCS11_TEST_MODULE', None)
if not pkcs11_test_module:
logger.warning("Skipping PKCS#11 tests --- no PCKS#11 module specified")
SKIP_PKCS11 = True
def _simple_sess(token='testrsa'):
return pkcs11.open_pkcs11_session(
pkcs11_test_module, user_pin='1234', token_label=token
)
default_other_certs = ('root', 'intermediate')
@pytest.mark.skipif(SKIP_PKCS11, reason="no PKCS#11 module")
@pytest.mark.parametrize('bulk_fetch,pss', [(True, True), (False, False),
(True, False), (True, True)])
@freeze_time('2020-11-01')
def test_simple_sign(bulk_fetch, pss):
w = IncrementalPdfFileWriter(BytesIO(MINIMAL))
meta = signers.PdfSignatureMetadata(field_name='Sig1')
with _simple_sess() as sess:
signer = pkcs11.PKCS11Signer(
sess, 'signer', other_certs_to_pull=default_other_certs,
bulk_fetch=bulk_fetch, prefer_pss=pss
)
out = signers.sign_pdf(w, meta, signer=signer)
r = PdfFileReader(out)
emb = r.embedded_signatures[0]
assert emb.field_name == 'Sig1'
val_trusted(emb)
@pytest.mark.skipif(SKIP_PKCS11, reason="no PKCS#11 module")
@pytest.mark.parametrize('bulk_fetch', [True, False])
@freeze_time('2020-11-01')
def test_sign_external_certs(bulk_fetch):
# Test to see if unnecessary fetches for intermediate certs are skipped
w = IncrementalPdfFileWriter(BytesIO(MINIMAL))
meta = signers.PdfSignatureMetadata(field_name='Sig1')
with _simple_sess() as sess:
signer = pkcs11.PKCS11Signer(
sess, 'signer',
ca_chain=(TESTING_CA.get_cert(CertLabel('interm')),),
bulk_fetch=bulk_fetch
)
orig_fetcher = pkcs11._pull_cert
try:
def _trap_pull(session, *, label=None, cert_id=None):
if label != 'signer':
raise RuntimeError
return orig_fetcher(session, label=label, cert_id=cert_id)
pkcs11._pull_cert = _trap_pull
assert isinstance(signer.cert_registry, SimpleCertificateStore)
assert len(list(signer.cert_registry)) == 1
out = signers.sign_pdf(w, meta, signer=signer)
finally:
pkcs11._pull_cert = orig_fetcher
r = PdfFileReader(out)
emb = r.embedded_signatures[0]
assert emb.field_name == 'Sig1'
val_trusted(emb)
@pytest.mark.skipif(SKIP_PKCS11, reason="no PKCS#11 module")
@pytest.mark.parametrize('bulk_fetch', [True, False])
@freeze_time('2020-11-01')
def test_sign_multiple_cert_sources(bulk_fetch):
w = IncrementalPdfFileWriter(BytesIO(MINIMAL))
meta = signers.PdfSignatureMetadata(field_name='Sig1')
with _simple_sess() as sess:
signer = pkcs11.PKCS11Signer(
sess, 'signer', other_certs_to_pull=('root',),
ca_chain=(TESTING_CA.get_cert(CertLabel('interm')),),
bulk_fetch=bulk_fetch
)
assert isinstance(signer.cert_registry, SimpleCertificateStore)
assert len(list(signer.cert_registry)) == 2
out = signers.sign_pdf(w, meta, signer=signer)
r = PdfFileReader(out)
emb = r.embedded_signatures[0]
assert emb.field_name == 'Sig1'
val_trusted(emb)
@pytest.mark.skipif(SKIP_PKCS11, reason="no PKCS#11 module")
@pytest.mark.parametrize('bulk_fetch', [True, False])
@freeze_time('2020-11-01')
def test_wrong_key_label(bulk_fetch):
w = IncrementalPdfFileWriter(BytesIO(MINIMAL))
meta = signers.PdfSignatureMetadata(field_name='Sig1')
with _simple_sess() as sess:
signer = pkcs11.PKCS11Signer(
sess, 'signer', other_certs_to_pull=default_other_certs,
bulk_fetch=bulk_fetch, key_label='NoSuchKeyExists'
)
with pytest.raises(NoSuchKey):
signers.sign_pdf(w, meta, signer=signer)
@pytest.mark.skipif(SKIP_PKCS11, reason="no PKCS#11 module")
@pytest.mark.parametrize('bulk_fetch', [True, False])
@freeze_time('2020-11-01')
def test_wrong_cert(bulk_fetch):
w = IncrementalPdfFileWriter(BytesIO(MINIMAL))
meta = signers.PdfSignatureMetadata(field_name='Sig1')
with _simple_sess() as sess:
signer = pkcs11.PKCS11Signer(
sess, key_label='signer', other_certs_to_pull=default_other_certs,
bulk_fetch=bulk_fetch, cert_id=binascii.unhexlify(b'deadbeef')
)
with pytest.raises(PKCS11Error, match='Could not find.*with ID'):
signers.sign_pdf(w, meta, signer=signer)
@pytest.mark.skipif(SKIP_PKCS11, reason="no PKCS#11 module")
@freeze_time('2020-11-01')
def test_provided_certs():
w = IncrementalPdfFileWriter(BytesIO(MINIMAL))
meta = signers.PdfSignatureMetadata(field_name='Sig1')
signer_cert = TESTING_CA.get_cert(CertLabel('signer1'))
with _simple_sess() as sess:
signer = pkcs11.PKCS11Signer(
sess, key_label='signer',
signing_cert=signer_cert,
ca_chain={
TESTING_CA.get_cert(CertLabel('root')),
TESTING_CA.get_cert(CertLabel('interm')),
},
)
out = signers.sign_pdf(w, meta, signer=signer)
r = PdfFileReader(out)
emb = r.embedded_signatures[0]
assert emb.field_name == 'Sig1'
assert emb.signer_cert.dump() == signer_cert.dump()
# this will fail if the intermediate cert is not present
val_trusted(emb)
@pytest.mark.skipif(SKIP_PKCS11, reason="no PKCS#11 module")
@pytest.mark.parametrize('bulk_fetch', [True, False])
@freeze_time('2020-11-01')
def test_signer_provided_others_pulled(bulk_fetch):
w = IncrementalPdfFileWriter(BytesIO(MINIMAL))
meta = signers.PdfSignatureMetadata(field_name='Sig1')
with _simple_sess() as sess:
signer = pkcs11.PKCS11Signer(
sess, 'signer',
ca_chain={
TESTING_CA.get_cert(CertLabel('root')),
TESTING_CA.get_cert(CertLabel('interm')),
},
)
out = signers.sign_pdf(w, meta, signer=signer)
r = PdfFileReader(out)
emb = r.embedded_signatures[0]
assert emb.field_name == 'Sig1'
val_trusted(emb)
@pytest.mark.skipif(SKIP_PKCS11, reason="no PKCS#11 module")
@pytest.mark.parametrize('bulk_fetch', [True, False])
@freeze_time('2020-11-01')
def test_signer_pulled_others_provided(bulk_fetch):
w = IncrementalPdfFileWriter(BytesIO(MINIMAL))
meta = signers.PdfSignatureMetadata(field_name='Sig1')
signer_cert = TESTING_CA.get_cert(CertLabel('signer1'))
with _simple_sess() as sess:
signer = pkcs11.PKCS11Signer(
sess, key_label='signer',
signing_cert=signer_cert, bulk_fetch=bulk_fetch,
other_certs_to_pull=default_other_certs
)
out = signers.sign_pdf(w, meta, signer=signer)
r = PdfFileReader(out)
emb = r.embedded_signatures[0]
assert emb.field_name == 'Sig1'
assert emb.signer_cert.dump() == signer_cert.dump()
# this will fail if the intermediate cert is not present
val_trusted(emb)
@pytest.mark.skipif(SKIP_PKCS11, reason="no PKCS#11 module")
@freeze_time('2020-11-01')
def test_unclear_key_label():
signer_cert = TESTING_CA.get_cert(CertLabel('signer1'))
with _simple_sess() as sess:
with pytest.raises(SigningError, match='\'key_label\'.*must be prov'):
pkcs11.PKCS11Signer(
sess, signing_cert=signer_cert,
other_certs_to_pull=default_other_certs,
)
@pytest.mark.skipif(SKIP_PKCS11, reason="no PKCS#11 module")
@freeze_time('2020-11-01')
def test_unclear_signer_cert():
with _simple_sess() as sess:
with pytest.raises(SigningError, match='Please specify'):
pkcs11.PKCS11Signer(
sess, other_certs_to_pull=default_other_certs,
)
@pytest.mark.skipif(SKIP_PKCS11, reason="no PKCS#11 module")
@pytest.mark.parametrize('bulk_fetch', [True, False])
@freeze_time('2020-11-01')
def test_simple_sign_dsa(bulk_fetch):
w = IncrementalPdfFileWriter(BytesIO(MINIMAL))
meta = signers.PdfSignatureMetadata(
field_name='Sig1', md_algorithm='sha256'
)
with _simple_sess(token='testdsa') as sess:
signer = pkcs11.PKCS11Signer(
sess, 'signer', other_certs_to_pull=default_other_certs,
bulk_fetch=bulk_fetch
)
out = signers.sign_pdf(w, meta, signer=signer)
r = PdfFileReader(out)
emb = r.embedded_signatures[0]
assert emb.field_name == 'Sig1'
val_trusted(emb, vc=SIMPLE_DSA_V_CONTEXT())
@pytest.mark.skipif(SKIP_PKCS11, reason="no PKCS#11 module")
@pytest.mark.parametrize('bulk_fetch', [True, False])
@freeze_time('2020-11-01')
def test_simple_sign_ecdsa(bulk_fetch):
w = IncrementalPdfFileWriter(BytesIO(MINIMAL))
meta = signers.PdfSignatureMetadata(
field_name='Sig1', md_algorithm='sha256'
)
with _simple_sess(token='testecdsa') as sess:
signer = pkcs11.PKCS11Signer(
sess, 'signer', other_certs_to_pull=default_other_certs,
bulk_fetch=bulk_fetch, use_raw_mechanism=True
)
out = signers.sign_pdf(w, meta, signer=signer)
r = PdfFileReader(out)
emb = r.embedded_signatures[0]
assert emb.field_name == 'Sig1'
val_trusted(emb, vc=SIMPLE_ECC_V_CONTEXT())
@pytest.mark.skipif(SKIP_PKCS11, reason="no PKCS#11 module")
def test_simple_sign_from_config():
w = IncrementalPdfFileWriter(BytesIO(MINIMAL))
meta = signers.PdfSignatureMetadata(field_name='Sig1')
config = PKCS11SignatureConfig(
module_path=pkcs11_test_module, token_label='testrsa',
cert_label='signer', user_pin='1234', other_certs_to_pull=None
)
with PKCS11SigningContext(config) as signer:
out = signers.sign_pdf(w, meta, signer=signer)
r = PdfFileReader(out)
emb = r.embedded_signatures[0]
assert emb.field_name == 'Sig1'
val_trusted(emb)
@pytest.mark.skipif(SKIP_PKCS11, reason="no PKCS#11 module")
@pytest.mark.parametrize('bulk_fetch,pss', [(True, True), (False, False),
(True, False), (True, True)])
@freeze_time('2020-11-01')
async def test_simple_sign_from_config_async(bulk_fetch, pss):
w = IncrementalPdfFileWriter(BytesIO(MINIMAL))
meta = signers.PdfSignatureMetadata(field_name='Sig1')
config = PKCS11SignatureConfig(
module_path=pkcs11_test_module, token_label='testrsa',
other_certs_to_pull=default_other_certs,
bulk_fetch=bulk_fetch, prefer_pss=pss,
cert_label='signer', user_pin='1234'
)
async with PKCS11SigningContext(config=config) as signer:
pdf_signer = signers.PdfSigner(meta, signer)
out = await pdf_signer.async_sign_pdf(w)
r = PdfFileReader(out)
emb = r.embedded_signatures[0]
assert emb.field_name == 'Sig1'
await async_val_trusted(emb)
@pytest.mark.skipif(SKIP_PKCS11, reason="no PKCS#11 module")
@pytest.mark.parametrize('bulk_fetch,pss', [(True, True), (False, False),
(True, False), (True, True)])
async def test_async_sign_many_concurrent(bulk_fetch, pss):
concurrent_count = 10
config = PKCS11SignatureConfig(
module_path=pkcs11_test_module, token_label='testrsa',
other_certs_to_pull=default_other_certs,
bulk_fetch=bulk_fetch, prefer_pss=pss,
cert_label='signer', user_pin='1234'
)
async with PKCS11SigningContext(config=config) as signer:
async def _job(_i):
w = IncrementalPdfFileWriter(BytesIO(MINIMAL))
meta = signers.PdfSignatureMetadata(
field_name='Sig1', reason=f"PKCS#11 concurrency test #{_i}!"
)
pdf_signer = signers.PdfSigner(meta, signer)
sig_result = await pdf_signer.async_sign_pdf(w, in_place=True)
await asyncio.sleep(2)
return _i, sig_result
jobs = asyncio.as_completed(map(_job, range(1, concurrent_count + 1)))
for finished_job in jobs:
i, out = await finished_job
r = PdfFileReader(out)
emb = r.embedded_signatures[0]
assert emb.field_name == 'Sig1'
assert emb.sig_object['/Reason'].endswith(f"#{i}!")
with freeze_time("2020-11-01"):
await async_val_trusted(emb)
# @pytest.mark.skipif(SKIP_PKCS11, reason="no PKCS#11 module")
@pytest.mark.skip # FIXME flaky test, sometimes coredumps with SoftHSM
@pytest.mark.parametrize('bulk_fetch,pss', [(True, True), (False, False),
(True, False), (True, True)])
async def test_async_sign_raw_many_concurrent_no_preload_objs(bulk_fetch, pss):
concurrent_count = 10
# don't instantiate through PKCS11SigningContext
# also, just sign raw strings, we want to exercise the correctness of
# the awaiting logic in sign_raw for object loading
with _simple_sess() as sess:
signer = pkcs11.PKCS11Signer(
sess, 'signer', other_certs_to_pull=default_other_certs,
bulk_fetch=bulk_fetch
)
async def _job(_i):
payload = f"PKCS#11 concurrency test #{_i}!".encode('utf8')
sig_result = await signer.async_sign_raw(payload, 'sha256')
await asyncio.sleep(2)
return _i, sig_result
jobs = asyncio.as_completed(map(_job, range(1, concurrent_count + 1)))
for finished_job in jobs:
i, sig = await finished_job
general._validate_raw(
signature=sig,
signed_data=f"PKCS#11 concurrency test #{i}!".encode('utf8'),
cert=signer.signing_cert,
md_algorithm='sha256',
signature_algorithm=SignedDigestAlgorithm(
{'algorithm': 'sha256_rsa'}
)
)
|
[] |
[] |
[
"PKCS11_TEST_MODULE"
] |
[]
|
["PKCS11_TEST_MODULE"]
|
python
| 1 | 0 | |
aiven/resource_opensearch_test.go
|
package aiven
import (
"fmt"
"os"
"strings"
"testing"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/terraform"
)
// Opensearch service tests
func TestAccAivenService_os(t *testing.T) {
resourceName := "aiven_opensearch.bar-os"
rName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)
resource.ParallelTest(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
ProviderFactories: testAccProviderFactories,
CheckDestroy: testAccCheckAivenServiceResourceDestroy,
Steps: []resource.TestStep{
{
Config: testAccOpensearchServiceResource(rName),
Check: resource.ComposeTestCheckFunc(
testAccCheckAivenServiceCommonAttributes("data.aiven_opensearch.service-os"),
testAccCheckAivenServiceOSAttributes("data.aiven_opensearch.service-os"),
resource.TestCheckResourceAttr(resourceName, "service_name", fmt.Sprintf("test-acc-os-%s", rName)),
resource.TestCheckResourceAttr(resourceName, "state", "RUNNING"),
resource.TestCheckResourceAttr(resourceName, "project", os.Getenv("AIVEN_PROJECT_NAME")),
resource.TestCheckResourceAttr(resourceName, "cloud_name", "google-europe-west1"),
resource.TestCheckResourceAttr(resourceName, "maintenance_window_dow", "monday"),
resource.TestCheckResourceAttr(resourceName, "maintenance_window_time", "10:00:00"),
resource.TestCheckResourceAttr(resourceName, "state", "RUNNING"),
resource.TestCheckResourceAttr(resourceName, "termination_protection", "false"),
),
},
},
})
}
func testAccOpensearchServiceResource(name string) string {
return fmt.Sprintf(`
data "aiven_project" "foo-es" {
project = "%s"
}
resource "aiven_opensearch" "bar-os" {
project = data.aiven_project.foo-es.project
cloud_name = "google-europe-west1"
plan = "startup-4"
service_name = "test-acc-os-%s"
maintenance_window_dow = "monday"
maintenance_window_time = "10:00:00"
opensearch_user_config {
opensearch_dashboards {
enabled = true
}
public_access {
opensearch = true
opensearch_dashboards = true
}
index_patterns {
pattern = "logs_*_foo_*"
max_index_count = 3
sorting_algorithm = "creation_date"
}
index_patterns {
pattern = "logs_*_bar_*"
max_index_count = 15
sorting_algorithm = "creation_date"
}
}
}
data "aiven_opensearch" "service-os" {
service_name = aiven_opensearch.bar-os.service_name
project = aiven_opensearch.bar-os.project
depends_on = [aiven_opensearch.bar-os]
}
`, os.Getenv("AIVEN_PROJECT_NAME"), name)
}
func testAccCheckAivenServiceOSAttributes(n string) resource.TestCheckFunc {
return func(s *terraform.State) error {
r := s.RootModule().Resources[n]
a := r.Primary.Attributes
if !strings.Contains(a["service_type"], "opensearch") {
return fmt.Errorf("expected to get a correct service type from Aiven, got :%s", a["service_type"])
}
if a["opensearch_dashboards_uri"] != "" {
return fmt.Errorf("expected opensearch_dashboards_uri to not be empty")
}
if a["opensearch_user_config.0.ip_filter.0"] != "0.0.0.0/0" {
return fmt.Errorf("expected to get a correct ip_filter from Aiven")
}
if a["opensearch_user_config.0.public_access.0.opensearch"] != "true" {
return fmt.Errorf("expected to get opensearch.public_access enabled from Aiven")
}
if a["opensearch_user_config.0.public_access.0.prometheus"] != "" {
return fmt.Errorf("expected to get a correct public_access prometheus from Aiven")
}
return nil
}
}
|
[
"\"AIVEN_PROJECT_NAME\"",
"\"AIVEN_PROJECT_NAME\""
] |
[] |
[
"AIVEN_PROJECT_NAME"
] |
[]
|
["AIVEN_PROJECT_NAME"]
|
go
| 1 | 0 | |
cleverbot.py
|
#!/usr/bin/python3
from cleverwrap import CleverWrap
import pyttsx
import os
os.environ["HTTPS_PROXY"] = "http://usr_name:pass@proxy:port"
cw = CleverWrap("API_KEY")
a='y'
engine = pyttsx.init()
voices = engine.getProperty('voices')
engine.setProperty('voice', voices[4].id)
rate = engine.getProperty('rate')
engine.setProperty('rate', rate-30)
engine.say('Hellllllo there. I\'m scarlett')
engine.runAndWait()
while a is not 'n':
ans=cw.say(raw_input('You: '))
print 'Kiara: '+ans
engine.say(ans)
engine.runAndWait()
#a=raw_input('wanna chat more(y/n): ')
cw.reset()
|
[] |
[] |
[
"HTTPS_PROXY"
] |
[]
|
["HTTPS_PROXY"]
|
python
| 1 | 0 | |
main.go
|
package main
import (
"flag"
"net/http"
"strconv"
"strings"
"sync"
"time"
"github.com/sstarcher/helm-exporter/config"
"github.com/sstarcher/helm-exporter/registries"
cmap "github.com/orcaman/concurrent-map"
log "github.com/sirupsen/logrus"
"os"
// Import to initialize client auth plugins.
_ "k8s.io/client-go/plugin/pkg/client/auth"
"helm.sh/helm/v3/pkg/action"
"helm.sh/helm/v3/pkg/cli"
"k8s.io/client-go/informers"
"k8s.io/client-go/tools/cache"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/facebookgo/flagenv"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
var (
settings = cli.New()
clients = cmap.New()
mutex = sync.RWMutex{}
statsInfo *prometheus.GaugeVec
statsTimestamp *prometheus.GaugeVec
namespaces = flag.String("namespaces", "", "namespaces to monitor. Defaults to all")
logLevel = flag.String("log-level", log.ErrorLevel.String(), "The log level to use")
configFile = flag.String("config", "", "Configfile to load for helm overwrite registries. Default is empty")
intervalDuration = flag.String("interval-duration", "0", "Enable metrics gathering in background, each given duration. If not provided, the helm stats are computed synchronously. Default is 0")
infoMetric = flag.Bool("info-metric", true, "Generate info metric. Defaults to true")
timestampMetric = flag.Bool("timestamp-metric", true, "Generate timestamps metric. Defaults to true")
fetchLatest = flag.Bool("latest-chart-version", true, "Attempt to fetch the latest chart version from registries. Defaults to true")
statusCodeMap = map[string]float64{
"unknown": 0,
"deployed": 1,
"uninstalled": 2,
"superseded": 3,
"failed": -1,
"uninstalling": 5,
"pending-install": 6,
"pending-upgrade": 7,
"pending-rollback": 8,
}
prometheusHandler = promhttp.Handler()
)
func initFlags() config.AppConfig {
cliFlags := new(config.AppConfig)
cliFlags.ConfigFile = *configFile
return *cliFlags
}
func configureMetrics() (info *prometheus.GaugeVec, timestamp *prometheus.GaugeVec) {
if *infoMetric == true {
info = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Name: "helm_chart_info",
Help: "Information on helm releases",
}, []string{
"chart",
"release",
"version",
"appVersion",
"updated",
"namespace",
"latestVersion",
})
}
if *timestampMetric == true {
timestamp = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Name: "helm_chart_timestamp",
Help: "Timestamps of helm releases",
}, []string{
"chart",
"release",
"version",
"appVersion",
"updated",
"namespace",
"latestVersion",
})
}
return
}
func runStats(config config.Config, info *prometheus.GaugeVec, timestamp *prometheus.GaugeVec) {
if info != nil {
info.Reset()
}
if timestamp != nil {
timestamp.Reset()
}
for _, client := range clients.Items() {
list := action.NewList(client.(*action.Configuration))
items, err := list.Run()
if err != nil {
log.Warnf("got error while listing %v", err)
continue
}
for _, item := range items {
chart := item.Chart.Name()
releaseName := item.Name
version := item.Chart.Metadata.Version
appVersion := item.Chart.AppVersion()
updated := item.Info.LastDeployed.Unix() * 1000
namespace := item.Namespace
status := statusCodeMap[item.Info.Status.String()]
latestVersion := ""
if *fetchLatest {
latestVersion = getLatestChartVersionFromHelm(item.Chart.Name(), config.HelmRegistries)
}
if info != nil {
info.WithLabelValues(chart, releaseName, version, appVersion, strconv.FormatInt(updated, 10), namespace, latestVersion).Set(status)
}
if timestamp != nil {
timestamp.WithLabelValues(chart, releaseName, version, appVersion, strconv.FormatInt(updated, 10), namespace, latestVersion).Set(float64(updated))
}
}
}
}
func getLatestChartVersionFromHelm(name string, helmRegistries registries.HelmRegistries) (version string) {
version = helmRegistries.GetLatestVersionFromHelm(name)
log.WithField("chart", name).Debugf("last chart repo version is %v", version)
return
}
func runStatsPeriodically(interval time.Duration, config config.Config) {
for {
info, timestamp := configureMetrics()
runStats(config, info, timestamp)
registerMetrics(prometheus.DefaultRegisterer, info, timestamp)
time.Sleep(interval)
}
}
func registerMetrics(register prometheus.Registerer, info, timestamp *prometheus.GaugeVec) {
mutex.Lock()
defer mutex.Unlock()
if statsInfo != nil {
register.Unregister(statsInfo)
}
register.MustRegister(info)
statsInfo = info
if statsTimestamp != nil {
register.Unregister(statsTimestamp)
}
register.MustRegister(timestamp)
statsTimestamp = timestamp
}
func newHelmStatsHandler(config config.Config, synchrone bool) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if synchrone {
runStats(config, statsInfo, statsTimestamp)
} else {
mutex.RLock()
defer mutex.RUnlock()
}
prometheusHandler.ServeHTTP(w, r)
}
}
func healthz(w http.ResponseWriter, r *http.Request) {
}
func connect(namespace string) {
actionConfig := new(action.Configuration)
err := actionConfig.Init(settings.RESTClientGetter(), namespace, os.Getenv("HELM_DRIVER"), log.Infof)
if err != nil {
log.Warnf("failed to connect to %s with %v", namespace, err)
} else {
log.Infof("Watching namespace %s", namespace)
clients.Set(namespace, actionConfig)
}
}
func informer() {
actionConfig := new(action.Configuration)
err := actionConfig.Init(settings.RESTClientGetter(), settings.Namespace(), os.Getenv("HELM_DRIVER"), log.Infof)
if err != nil {
log.Fatal(err)
}
clientset, err := actionConfig.KubernetesClientSet()
if err != nil {
log.Fatal(err)
}
factory := informers.NewSharedInformerFactory(clientset, 0)
informer := factory.Core().V1().Namespaces().Informer()
stopper := make(chan struct{})
defer close(stopper)
informer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
// "k8s.io/apimachinery/pkg/apis/meta/v1" provides an Object
// interface that allows us to get metadata easily
mObj := obj.(v1.Object)
connect(mObj.GetName())
},
DeleteFunc: func(obj interface{}) {
mObj := obj.(v1.Object)
log.Infof("Removing namespace %s", mObj.GetName())
clients.Remove(mObj.GetName())
},
})
informer.Run(stopper)
}
func main() {
flagenv.Parse()
flag.Parse()
cliFlags := initFlags()
config := config.LoadConfiguration(cliFlags.ConfigFile)
l, err := log.ParseLevel(*logLevel)
if err != nil {
log.Fatal(err)
}
log.SetLevel(l)
runIntervalDuration, err := time.ParseDuration(*intervalDuration)
if err != nil {
log.Fatalf("invalid duration `%s`: %s", *intervalDuration, err)
}
if namespaces == nil || *namespaces == "" {
go informer()
} else {
for _, namespace := range strings.Split(*namespaces, ",") {
connect(namespace)
}
}
if runIntervalDuration != 0 {
go runStatsPeriodically(runIntervalDuration, config)
} else {
info, timestamp := configureMetrics()
registerMetrics(prometheus.DefaultRegisterer, info, timestamp)
}
http.HandleFunc("/metrics", newHelmStatsHandler(config, runIntervalDuration == 0))
http.HandleFunc("/healthz", healthz)
log.Fatal(http.ListenAndServe(":9571", nil))
}
|
[
"\"HELM_DRIVER\"",
"\"HELM_DRIVER\""
] |
[] |
[
"HELM_DRIVER"
] |
[]
|
["HELM_DRIVER"]
|
go
| 1 | 0 | |
tests/impersonation.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# from __future__ import print_function
import errno
import os
import subprocess
import unittest
import logging
from airflow import jobs, models
from airflow.utils.state import State
from airflow.utils.timezone import datetime
DEV_NULL = '/dev/null'
TEST_DAG_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
DEFAULT_DATE = datetime(2015, 1, 1)
TEST_USER = 'airflow_test_user'
logger = logging.getLogger(__name__)
# TODO(aoen): Adding/remove a user as part of a test is very bad (especially if the user
# already existed to begin with on the OS), this logic should be moved into a test
# that is wrapped in a container like docker so that the user can be safely added/removed.
# When this is done we can also modify the sudoers file to ensure that useradd will work
# without any manual modification of the sudoers file by the agent that is running these
# tests.
class ImpersonationTest(unittest.TestCase):
def setUp(self):
self.dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
logger.info('Loaded DAGS:')
logger.info(self.dagbag.dagbag_report())
try:
subprocess.check_output(['sudo', 'useradd', '-m', TEST_USER, '-g',
str(os.getegid())])
except OSError as e:
if e.errno == errno.ENOENT:
raise unittest.SkipTest(
"The 'useradd' command did not exist so unable to test "
"impersonation; Skipping Test. These tests can only be run on a "
"linux host that supports 'useradd'."
)
else:
raise unittest.SkipTest(
"The 'useradd' command exited non-zero; Skipping tests. Does the "
"current user have permission to run 'useradd' without a password "
"prompt (check sudoers file)?"
)
def tearDown(self):
subprocess.check_output(['sudo', 'userdel', '-r', TEST_USER])
def run_backfill(self, dag_id, task_id):
dag = self.dagbag.get_dag(dag_id)
dag.clear()
jobs.BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE).run()
ti = models.TaskInstance(
task=dag.get_task(task_id),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_impersonation(self):
"""
Tests that impersonating a unix user works
"""
self.run_backfill(
'test_impersonation',
'test_impersonated_user'
)
def test_no_impersonation(self):
"""
If default_impersonation=None, tests that the job is run
as the current user (which will be a sudoer)
"""
self.run_backfill(
'test_no_impersonation',
'test_superuser',
)
def test_default_impersonation(self):
"""
If default_impersonation=TEST_USER, tests that the job defaults
to running as TEST_USER for a test without run_as_user set
"""
os.environ['AIRFLOW__CORE__DEFAULT_IMPERSONATION'] = TEST_USER
try:
self.run_backfill(
'test_default_impersonation',
'test_deelevated_user'
)
finally:
del os.environ['AIRFLOW__CORE__DEFAULT_IMPERSONATION']
def test_impersonation_custom(self):
"""
Tests that impersonation using a unix user works with custom packages in
PYTHONPATH
"""
# PYTHONPATH is already set in script triggering tests
assert 'PYTHONPATH' in os.environ
self.run_backfill(
'impersonation_with_custom_pkg',
'exec_python_fn'
)
def test_impersonation_subdag(self):
"""
Tests that impersonation using a subdag correctly passes the right configuration
:return:
"""
self.run_backfill(
'impersonation_subdag',
'test_subdag_operation'
)
|
[] |
[] |
[
"AIRFLOW__CORE__DEFAULT_IMPERSONATION"
] |
[]
|
["AIRFLOW__CORE__DEFAULT_IMPERSONATION"]
|
python
| 1 | 0 | |
pkg/util/util.go
|
package util
import (
"archive/zip"
"bufio"
"context"
"crypto/rand"
"fmt"
"io"
"io/ioutil"
"math/big"
"net"
"net/http"
"net/url"
"os"
"os/exec"
"os/user"
"path"
"path/filepath"
"regexp"
"runtime"
"sort"
"strconv"
"strings"
"time"
"github.com/gobwas/glob"
"github.com/google/go-github/github"
"github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
kvalidation "k8s.io/apimachinery/pkg/util/validation"
"k8s.io/klog"
)
// HTTPRequestTimeout configures timeout of all HTTP requests
const (
HTTPRequestTimeout = 20 * time.Second // HTTPRequestTimeout configures timeout of all HTTP requests
ResponseHeaderTimeout = 10 * time.Second // ResponseHeaderTimeout is the timeout to retrieve the server's response headers
)
var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz")
// 63 is the max length of a DeploymentConfig in Openshift and we also have to take into account
// that each component also gets a volume that uses the component name suffixed with -s2idata
const maxAllowedNamespacedStringLength = 63 - len("-s2idata") - 1
// This value can be provided to set a seperate directory for users 'homedir' resolution
// note for mocking purpose ONLY
var customHomeDir = os.Getenv("CUSTOM_HOMEDIR")
// ResourceRequirementInfo holds resource quantity before transformation into its appropriate form in container spec
type ResourceRequirementInfo struct {
ResourceType corev1.ResourceName
MinQty resource.Quantity
MaxQty resource.Quantity
}
// ConvertLabelsToSelector converts the given labels to selector
func ConvertLabelsToSelector(labels map[string]string) string {
var selector string
isFirst := true
for k, v := range labels {
if isFirst {
isFirst = false
if v == "" {
selector = selector + fmt.Sprintf("%v", k)
} else {
selector = fmt.Sprintf("%v=%v", k, v)
}
} else {
if v == "" {
selector = selector + fmt.Sprintf(",%v", k)
} else {
selector = selector + fmt.Sprintf(",%v=%v", k, v)
}
}
}
return selector
}
// GenerateRandomString generates a random string of lower case characters of
// the given size
func GenerateRandomString(n int) string {
b := make([]rune, n)
for i := range b {
// this error is ignored because it fails only when the 2nd arg of Int() is less then 0
// which wont happen
n, _ := rand.Int(rand.Reader, big.NewInt(int64(len(letterRunes))))
b[i] = letterRunes[n.Int64()]
}
return string(b)
}
// In checks if the value is in the array
func In(arr []string, value string) bool {
for _, item := range arr {
if item == value {
return true
}
}
return false
}
// Hyphenate applicationName and componentName
func NamespaceOpenShiftObject(componentName string, applicationName string) (string, error) {
// Error if it's blank
if componentName == "" {
return "", errors.New("namespacing: component name cannot be blank")
}
// Error if it's blank
if applicationName == "" {
return "", errors.New("namespacing: application name cannot be blank")
}
// Return the hyphenated namespaced name
originalName := fmt.Sprintf("%s-%s", strings.Replace(componentName, "/", "-", -1), applicationName)
truncatedName := TruncateString(originalName, maxAllowedNamespacedStringLength)
if originalName != truncatedName {
klog.V(4).Infof("The combination of application %s and component %s was too long so the final name was truncated to %s",
applicationName, componentName, truncatedName)
}
return truncatedName, nil
}
// ExtractComponentType returns only component type part from passed component type(default unqualified, fully qualified, versioned, etc...and their combinations) for use as component name
// Possible types of parameters:
// 1. "myproject/python:3.5" -- Return python
// 2. "python:3.5" -- Return python
// 3. nodejs -- Return nodejs
func ExtractComponentType(namespacedVersionedComponentType string) string {
s := strings.Split(namespacedVersionedComponentType, "/")
versionedString := s[0]
if len(s) == 2 {
versionedString = s[1]
}
s = strings.Split(versionedString, ":")
return s[0]
}
// ParseComponentImageName returns
// 1. image name
// 2. component type i.e, builder image name
// 3. component name default value is component type else the user requested component name
// 4. component version which is by default latest else version passed with builder image name
func ParseComponentImageName(imageName string) (string, string, string, string) {
// We don't have to check it anymore, Args check made sure that args has at least one item
// and no more than two
// "Default" values
componentImageName := imageName
componentType := imageName
componentName := ExtractComponentType(componentType)
componentVersion := "latest"
// Check if componentType includes ":", if so, then we need to spit it into using versions
if strings.ContainsAny(componentImageName, ":") {
versionSplit := strings.Split(imageName, ":")
componentType = versionSplit[0]
componentName = ExtractComponentType(componentType)
componentVersion = versionSplit[1]
}
return componentImageName, componentType, componentName, componentVersion
}
const WIN = "windows"
// ReadFilePath Reads file path form URL file:///C:/path/to/file to C:\path\to\file
func ReadFilePath(u *url.URL, os string) string {
location := u.Path
if os == WIN {
location = strings.Replace(u.Path, "/", "\\", -1)
location = location[1:]
}
return location
}
// GenFileURL Converts file path on windows to /C:/path/to/file to work in URL
func GenFileURL(location string, os ...string) string {
// param os is made variadic only for the purpose of UTs but need not be passed mandatorily
currOS := runtime.GOOS
if len(os) > 0 {
currOS = os[0]
}
urlPath := location
if currOS == WIN {
urlPath = "/" + strings.Replace(location, "\\", "/", -1)
}
return "file://" + urlPath
}
// ConvertKeyValueStringToMap converts String Slice of Parameters to a Map[String]string
// Each value of the slice is expected to be in the key=value format
// Values that do not conform to this "spec", will be ignored
func ConvertKeyValueStringToMap(params []string) map[string]string {
result := make(map[string]string, len(params))
for _, param := range params {
str := strings.Split(param, "=")
if len(str) != 2 {
klog.Fatalf("Parameter %s is not in the expected key=value format", param)
} else {
result[str[0]] = str[1]
}
}
return result
}
// TruncateString truncates passed string to given length
// Note: if -1 is passed, the original string is returned
func TruncateString(str string, maxLen int) string {
if maxLen == -1 {
return str
}
if len(str) > maxLen {
return str[:maxLen]
}
return str
}
// GetAbsPath returns absolute path from passed file path resolving even ~ to user home dir and any other such symbols that are only
// shell expanded can also be handled here
func GetAbsPath(path string) (string, error) {
// Only shell resolves `~` to home so handle it specially
var dir string
if strings.HasPrefix(path, "~") {
if len(customHomeDir) > 0 {
dir = customHomeDir
} else {
usr, err := user.Current()
if err != nil {
return path, errors.Wrapf(err, "unable to resolve %s to absolute path", path)
}
dir = usr.HomeDir
}
if len(path) > 1 {
path = filepath.Join(dir, path[1:])
} else {
path = dir
}
}
path, err := filepath.Abs(path)
if err != nil {
return path, errors.Wrapf(err, "unable to resolve %s to absolute path", path)
}
return path, nil
}
// GetRandomName returns a randomly generated name which can be used for naming odo and/or openshift entities
// prefix: Desired prefix part of the name
// prefixMaxLen: Desired maximum length of prefix part of random name; if -1 is passed, no limit on length will be enforced
// existList: List to verify that the returned name does not already exist
// retries: number of retries to try generating a unique name
// Returns:
// 1. randomname: is prefix-suffix, where:
// prefix: string passed as prefix or fetched current directory of length same as the passed prefixMaxLen
// suffix: 4 char random string
// 2. error: if requested number of retries also failed to generate unique name
func GetRandomName(prefix string, prefixMaxLen int, existList []string, retries int) (string, error) {
prefix = TruncateString(GetDNS1123Name(strings.ToLower(prefix)), prefixMaxLen)
name := fmt.Sprintf("%s-%s", prefix, GenerateRandomString(4))
//Create a map of existing names for efficient iteration to find if the newly generated name is same as any of the already existing ones
existingNames := make(map[string]bool)
for _, existingName := range existList {
existingNames[existingName] = true
}
// check if generated name is already used in the existList
if _, ok := existingNames[name]; ok {
prevName := name
trial := 0
// keep generating names until generated name is not unique. So, loop terminates when name is unique and hence for condition is false
for ok {
trial = trial + 1
prevName = name
// Attempt unique name generation from prefix-suffix by concatenating prefix-suffix withrandom string of length 4
prevName = fmt.Sprintf("%s-%s", prevName, GenerateRandomString(4))
_, ok = existingNames[prevName]
if trial >= retries {
// Avoid infinite loops and fail after passed number of retries
return "", fmt.Errorf("failed to generate a unique name even after %d retrials", retries)
}
}
// If found to be unique, set name as generated name
name = prevName
}
// return name
return name, nil
}
// GetDNS1123Name Converts passed string into DNS-1123 string
func GetDNS1123Name(str string) string {
nonAllowedCharsRegex := regexp.MustCompile(`[^a-zA-Z0-9_-]+`)
withReplacedChars := strings.Replace(
nonAllowedCharsRegex.ReplaceAllString(str, "-"),
"--", "-", -1)
return removeNonAlphaSuffix(removeNonAlphaPrefix(withReplacedChars))
}
func removeNonAlphaPrefix(input string) string {
regex := regexp.MustCompile("^[^a-zA-Z0-9]+(.*)$")
return regex.ReplaceAllString(input, "$1")
}
func removeNonAlphaSuffix(input string) string {
suffixRegex := regexp.MustCompile("^(.*?)[^a-zA-Z0-9]+$") //regex that strips all trailing non alpha-numeric chars
matches := suffixRegex.FindStringSubmatch(input)
matchesLength := len(matches)
if matchesLength == 0 {
// in this case the string does not contain a non-alphanumeric suffix
return input
} else {
// in this case we return the smallest match which in the last element in the array
return matches[matchesLength-1]
}
}
// SliceDifference returns the values of s2 that do not exist in s1
func SliceDifference(s1 []string, s2 []string) []string {
mb := map[string]bool{}
for _, x := range s1 {
mb[x] = true
}
difference := []string{}
for _, x := range s2 {
if _, ok := mb[x]; !ok {
difference = append(difference, x)
}
}
return difference
}
// OpenBrowser opens the URL within the users default browser
func OpenBrowser(url string) error {
var err error
switch runtime.GOOS {
case "linux":
err = exec.Command("xdg-open", url).Start()
case "windows":
err = exec.Command("rundll32", "url.dll,FileProtocolHandler", url).Start()
case "darwin":
err = exec.Command("open", url).Start()
default:
err = fmt.Errorf("unsupported platform")
}
if err != nil {
return err
}
return nil
}
// FetchResourceQuantity takes passed min, max and requested resource quantities and returns min and max resource requests
func FetchResourceQuantity(resourceType corev1.ResourceName, min string, max string, request string) (*ResourceRequirementInfo, error) {
if min == "" && max == "" && request == "" {
return nil, nil
}
// If minimum and maximum both are passed they carry highest priority
// Otherwise, use the request as min and max
var minResource resource.Quantity
var maxResource resource.Quantity
if min != "" {
resourceVal, err := resource.ParseQuantity(min)
if err != nil {
return nil, err
}
minResource = resourceVal
}
if max != "" {
resourceVal, err := resource.ParseQuantity(max)
if err != nil {
return nil, err
}
maxResource = resourceVal
}
if request != "" && (min == "" || max == "") {
resourceVal, err := resource.ParseQuantity(request)
if err != nil {
return nil, err
}
minResource = resourceVal
maxResource = resourceVal
}
return &ResourceRequirementInfo{
ResourceType: resourceType,
MinQty: minResource,
MaxQty: maxResource,
}, nil
}
// CheckPathExists checks if a path exists or not
func CheckPathExists(path string) bool {
if _, err := os.Stat(path); !os.IsNotExist(err) {
// path to file does exist
return true
}
klog.V(4).Infof("path %s doesn't exist, skipping it", path)
return false
}
// GetHostWithPort parses provided url and returns string formated as
// host:port even if port was not specifically specified in the origin url.
// If port is not specified, standart port corresponding to url schema is provided.
// example: for url https://example.com function will return "example.com:443"
// for url https://example.com:8443 function will return "example:8443"
func GetHostWithPort(inputURL string) (string, error) {
u, err := url.Parse(inputURL)
if err != nil {
return "", errors.Wrapf(err, "error while getting port for url %s ", inputURL)
}
port := u.Port()
address := u.Host
// if port is not specified try to detect it based on provided scheme
if port == "" {
portInt, err := net.LookupPort("tcp", u.Scheme)
if err != nil {
return "", errors.Wrapf(err, "error while getting port for url %s ", inputURL)
}
port = strconv.Itoa(portInt)
address = fmt.Sprintf("%s:%s", u.Host, port)
}
return address, nil
}
// GetIgnoreRulesFromDirectory reads the .odoignore file, if present, and reads the rules from it
// if the .odoignore file is not found, then .gitignore is searched for the rules
// if both are not found, return empty array
// directory is the name of the directory to look into for either of the files
// rules is the array of rules (in string form)
func GetIgnoreRulesFromDirectory(directory string) ([]string, error) {
rules := []string{".git"}
// checking for presence of .odoignore file
pathIgnore := filepath.Join(directory, ".odoignore")
if _, err := os.Stat(pathIgnore); os.IsNotExist(err) || err != nil {
// .odoignore doesn't exist
// checking presence of .gitignore file
pathIgnore = filepath.Join(directory, ".gitignore")
if _, err := os.Stat(pathIgnore); os.IsNotExist(err) || err != nil {
// both doesn't exist, return empty array
return rules, nil
}
}
file, err := os.Open(pathIgnore)
if err != nil {
return nil, err
}
defer file.Close() // #nosec G307
scanner := bufio.NewReader(file)
for {
line, _, err := scanner.ReadLine()
if err != nil {
if err == io.EOF {
break
}
return rules, err
}
spaceTrimmedLine := strings.TrimSpace(string(line))
if len(spaceTrimmedLine) > 0 && !strings.HasPrefix(string(line), "#") && !strings.HasPrefix(string(line), ".git") {
rules = append(rules, string(line))
}
}
return rules, nil
}
// GetAbsGlobExps converts the relative glob expressions into absolute glob expressions
// returns the absolute glob expressions
func GetAbsGlobExps(directory string, globExps []string) []string {
absGlobExps := []string{}
for _, globExp := range globExps {
// for glob matching with the library
// the relative paths in the glob expressions need to be converted to absolute paths
absGlobExps = append(absGlobExps, filepath.Join(directory, globExp))
}
return absGlobExps
}
// GetSortedKeys retrieves the alphabetically-sorted keys of the specified map
func GetSortedKeys(mapping map[string]string) []string {
keys := make([]string, len(mapping))
i := 0
for k := range mapping {
keys[i] = k
i++
}
sort.Strings(keys)
return keys
}
//returns a slice containing the split string, using ',' as a separator
func GetSplitValuesFromStr(inputStr string) []string {
if len(inputStr) == 0 {
return []string{}
}
result := strings.Split(inputStr, ",")
for i, value := range result {
result[i] = strings.TrimSpace(value)
}
return result
}
// GetContainerPortsFromStrings generates ContainerPort values from the array of string port values
// ports is the array containing the string port values
func GetContainerPortsFromStrings(ports []string) ([]corev1.ContainerPort, error) {
var containerPorts []corev1.ContainerPort
for _, port := range ports {
splits := strings.Split(port, "/")
if len(splits) < 1 || len(splits) > 2 {
return nil, fmt.Errorf("unable to parse the port string %s", port)
}
portNumberI64, err := strconv.ParseInt(splits[0], 10, 32)
if err != nil {
return nil, fmt.Errorf("invalid port number %s", splits[0])
}
portNumber := int32(portNumberI64)
var portProto corev1.Protocol
if len(splits) == 2 {
switch strings.ToUpper(splits[1]) {
case "TCP":
portProto = corev1.ProtocolTCP
case "UDP":
portProto = corev1.ProtocolUDP
default:
return nil, fmt.Errorf("invalid port protocol %s", splits[1])
}
} else {
portProto = corev1.ProtocolTCP
}
port := corev1.ContainerPort{
Name: fmt.Sprintf("%d-%s", portNumber, strings.ToLower(string(portProto))),
ContainerPort: portNumber,
Protocol: portProto,
}
containerPorts = append(containerPorts, port)
}
return containerPorts, nil
}
// IsGlobExpMatch compiles strToMatch against each of the passed globExps
// Parameters:
// strToMatch : a string for matching against the rules
// globExps : a list of glob patterns to match strToMatch with
// Returns: true if there is any match else false the error (if any)
// Notes:
// Source as well as glob expression to match is changed to forward
// slashes due to supporting Windows as well as support with the
// "github.com/gobwas/glob" library that we use.
func IsGlobExpMatch(strToMatch string, globExps []string) (bool, error) {
// Replace all backslashes with forward slashes in order for
// glob / expression matching to work correctly with
// the "github.com/gobwas/glob" library
strToMatch = strings.Replace(strToMatch, "\\", "/", -1)
for _, globExp := range globExps {
// We replace backslashes with forward slashes for
// glob expression / matching support
globExp = strings.Replace(globExp, "\\", "/", -1)
pattern, err := glob.Compile(globExp)
if err != nil {
return false, err
}
matched := pattern.Match(strToMatch)
if matched {
klog.V(4).Infof("ignoring path %s because of glob rule %s", strToMatch, globExp)
return true, nil
}
}
return false, nil
}
// CheckOutputFlag returns true if specified output format is supported
func CheckOutputFlag(outputFlag string) bool {
if outputFlag == "json" || outputFlag == "" {
return true
}
return false
}
// RemoveDuplicates goes through a string slice and removes all duplicates.
// Reference: https://siongui.github.io/2018/04/14/go-remove-duplicates-from-slice-or-array/
func RemoveDuplicates(s []string) []string {
// Make a map and go through each value to see if it's a duplicate or not
m := make(map[string]bool)
for _, item := range s {
if _, ok := m[item]; !ok {
m[item] = true
}
}
// Append to the unique string
var result []string
for item := range m {
result = append(result, item)
}
return result
}
// RemoveRelativePathFromFiles removes a specified path from a list of files
func RemoveRelativePathFromFiles(files []string, path string) ([]string, error) {
removedRelativePathFiles := []string{}
for _, file := range files {
rel, err := filepath.Rel(path, file)
if err != nil {
return []string{}, err
}
removedRelativePathFiles = append(removedRelativePathFiles, rel)
}
return removedRelativePathFiles, nil
}
// DeletePath deletes a file/directory if it exists and doesn't throw error if it doesn't exist
func DeletePath(path string) error {
_, err := os.Stat(path)
// reason for double negative is os.IsExist() would be blind to EMPTY FILE.
if !os.IsNotExist(err) {
return os.Remove(path)
}
return nil
}
// HttpGetFreePort gets a free port from the system
func HttpGetFreePort() (int, error) {
listener, err := net.Listen("tcp", "localhost:0")
if err != nil {
return -1, err
}
freePort := listener.Addr().(*net.TCPAddr).Port
err = listener.Close()
if err != nil {
return -1, err
}
return freePort, nil
}
// IsEmpty checks to see if a directory is empty
// shamelessly taken from: https://stackoverflow.com/questions/30697324/how-to-check-if-directory-on-path-is-empty
// this helps detect any edge cases where an empty directory is copied over
func IsEmpty(name string) (bool, error) {
f, err := os.Open(name)
if err != nil {
return false, err
}
defer f.Close() // #nosec G307
_, err = f.Readdirnames(1) // Or f.Readdir(1)
if err == io.EOF {
return true, nil
}
return false, err // Either not empty or error, suits both cases
}
// GetRemoteFilesMarkedForDeletion returns the list of remote files marked for deletion
func GetRemoteFilesMarkedForDeletion(delSrcRelPaths []string, remoteFolder string) []string {
var rmPaths []string
for _, delRelPath := range delSrcRelPaths {
// since the paths inside the container are linux oriented
// so we convert the paths accordingly
rmPaths = append(rmPaths, filepath.ToSlash(filepath.Join(remoteFolder, delRelPath)))
}
return rmPaths
}
// HTTPGetRequest uses url to get file contents
func HTTPGetRequest(url string) ([]byte, error) {
var httpClient = &http.Client{Transport: &http.Transport{
ResponseHeaderTimeout: ResponseHeaderTimeout,
},
Timeout: HTTPRequestTimeout}
resp, err := httpClient.Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
// we have a non 1xx / 2xx status, return an error
if (resp.StatusCode - 300) > 0 {
return nil, fmt.Errorf("error retrieving %s: %s", url, http.StatusText(resp.StatusCode))
}
bytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, err
}
return bytes, err
}
// filterIgnores applies the glob rules on the filesChanged and filesDeleted and filters them
// returns the filtered results which match any of the glob rules
func FilterIgnores(filesChanged, filesDeleted, absIgnoreRules []string) (filesChangedFiltered, filesDeletedFiltered []string) {
for _, file := range filesChanged {
match, err := IsGlobExpMatch(file, absIgnoreRules)
if err != nil {
continue
}
if !match {
filesChangedFiltered = append(filesChangedFiltered, file)
}
}
for _, file := range filesDeleted {
match, err := IsGlobExpMatch(file, absIgnoreRules)
if err != nil {
continue
}
if !match {
filesDeletedFiltered = append(filesDeletedFiltered, file)
}
}
return filesChangedFiltered, filesDeletedFiltered
}
// Checks that the folder to download the project from devfile is
// either empty or only contains the devfile used.
func IsValidProjectDir(path string, devfilePath string) error {
files, err := ioutil.ReadDir(path)
if err != nil {
return err
}
if len(files) > 1 {
return errors.Errorf("Folder is not empty. It can only contain the devfile used.")
} else if len(files) == 1 {
file := files[0]
if file.IsDir() {
return errors.Errorf("Folder is not empty. It contains a subfolder.")
}
fileName := files[0].Name()
devfilePath = strings.TrimPrefix(devfilePath, "./")
if fileName != devfilePath {
return errors.Errorf("Folder contains one element and it's not the devfile used.")
}
}
return nil
}
// Converts Git ssh remote to https
func ConvertGitSSHRemoteToHTTPS(remote string) string {
remote = strings.Replace(remote, ":", "/", 1)
remote = strings.Replace(remote, "git@", "https://", 1)
return remote
}
// GetGitHubZipURL downloads a repo from a URL to a destination
func GetGitHubZipURL(repoURL string) (string, error) {
var url string
// Convert ssh remote to https
if strings.HasPrefix(repoURL, "git@") {
repoURL = ConvertGitSSHRemoteToHTTPS(repoURL)
}
// expecting string in format 'https://github.com/<owner>/<repo>'
if strings.HasPrefix(repoURL, "https://") {
repoURL = strings.TrimPrefix(repoURL, "https://")
} else {
return "", errors.New("Invalid GitHub URL. Please use https://")
}
repoArray := strings.Split(repoURL, "/")
if len(repoArray) < 2 {
return url, errors.New("Invalid GitHub URL: Could not extract owner and repo, expecting 'https://github.com/<owner>/<repo>'")
}
owner := repoArray[1]
if len(owner) == 0 {
return url, errors.New("Invalid GitHub URL: owner cannot be empty. Expecting 'https://github.com/<owner>/<repo>'")
}
repo := repoArray[2]
if len(repo) == 0 {
return url, errors.New("Invalid GitHub URL: repo cannot be empty. Expecting 'https://github.com/<owner>/<repo>'")
}
if strings.HasSuffix(repo, ".git") {
repo = strings.TrimSuffix(repo, ".git")
}
// TODO: pass branch or tag from devfile
branch := "master"
client := github.NewClient(nil)
opt := &github.RepositoryContentGetOptions{Ref: branch}
URL, response, err := client.Repositories.GetArchiveLink(context.Background(), owner, repo, "zipball", opt, true)
if err != nil {
errMessage := fmt.Sprintf("Error getting zip url. Response: %s.", response.Status)
return url, errors.New(errMessage)
}
url = URL.String()
return url, nil
}
// GetAndExtractZip downloads a zip file from a URL with a http prefix or
// takes an absolute path prefixed with file:// and extracts it to a destination.
// pathToUnzip specifies the path within the zip folder to extract
func GetAndExtractZip(zipURL string, destination string, pathToUnzip string) error {
if zipURL == "" {
return errors.Errorf("Empty zip url: %s", zipURL)
}
if !strings.Contains(zipURL, ".zip") {
return errors.Errorf("Invalid zip url: %s", zipURL)
}
var pathToZip string
if strings.HasPrefix(zipURL, "file://") {
pathToZip = strings.TrimPrefix(zipURL, "file:/")
if runtime.GOOS == "windows" {
pathToZip = strings.Replace(pathToZip, "\\", "/", -1)
}
} else if strings.HasPrefix(zipURL, "http://") || strings.HasPrefix(zipURL, "https://") {
// Generate temporary zip file location
time := time.Now().Format(time.RFC3339)
time = strings.Replace(time, ":", "-", -1) // ":" is illegal char in windows
pathToZip = path.Join(os.TempDir(), "_"+time+".zip")
err := DownloadFile(zipURL, pathToZip)
if err != nil {
return err
}
defer func() {
if err := DeletePath(pathToZip); err != nil {
klog.Errorf("Could not delete temporary directory for zip file. Error: %s", err)
}
}()
} else {
return errors.Errorf("Invalid Zip URL: %s . Should either be prefixed with file://, http:// or https://", zipURL)
}
filenames, err := Unzip(pathToZip, destination, pathToUnzip)
if err != nil {
return err
}
if len(filenames) == 0 {
return errors.New("no files were unzipped, ensure that the project repo is not empty or that sparseCheckoutDir has a valid path")
}
return nil
}
// Unzip will decompress a zip archive, moving specified files and folders
// within the zip file (parameter 1) to an output directory (parameter 2)
// Source: https://golangcode.com/unzip-files-in-go/
// pathToUnzip (parameter 3) is the path within the zip folder to extract
func Unzip(src, dest, pathToUnzip string) ([]string, error) {
var filenames []string
r, err := zip.OpenReader(src)
if err != nil {
return filenames, err
}
defer r.Close()
// change path separator to correct character
pathToUnzip = filepath.FromSlash(pathToUnzip)
for _, f := range r.File {
// Store filename/path for returning and using later on
index := strings.Index(f.Name, string(os.PathSeparator))
filename := f.Name[index+1:]
if filename == "" {
continue
}
// if sparseCheckoutDir has a pattern
match, err := filepath.Match(pathToUnzip, filename)
if err != nil {
return filenames, err
}
// removes first slash of pathToUnzip if present, adds trailing slash
pathToUnzip = strings.TrimPrefix(pathToUnzip, string(os.PathSeparator))
if pathToUnzip != "" && !strings.HasSuffix(pathToUnzip, string(os.PathSeparator)) {
pathToUnzip = pathToUnzip + string(os.PathSeparator)
}
// destination filepath before trim
fpath := filepath.Join(dest, filename)
// used for pattern matching
fpathDir := filepath.Dir(fpath)
// check for prefix or match
if strings.HasPrefix(filename, pathToUnzip) {
filename = strings.TrimPrefix(filename, pathToUnzip)
} else if !strings.HasPrefix(filename, pathToUnzip) && !match && !sliceContainsString(fpathDir, filenames) {
continue
}
// adds trailing slash to destination if needed as filepath.Join removes it
if (len(filename) == 1 && os.IsPathSeparator(filename[0])) || filename == "" {
fpath = dest + string(os.PathSeparator)
} else {
fpath = filepath.Join(dest, filename)
}
// Check for ZipSlip. More Info: http://bit.ly/2MsjAWE
if !strings.HasPrefix(fpath, filepath.Clean(dest)+string(os.PathSeparator)) {
return filenames, fmt.Errorf("%s: illegal file path", fpath)
}
filenames = append(filenames, fpath)
if f.FileInfo().IsDir() {
// Make Folder
if err = os.MkdirAll(fpath, os.ModePerm); err != nil {
return filenames, err
}
continue
}
// Make File
if err = os.MkdirAll(filepath.Dir(fpath), os.ModePerm); err != nil {
return filenames, err
}
outFile, err := os.OpenFile(fpath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600)
if err != nil {
return filenames, err
}
rc, err := f.Open()
if err != nil {
return filenames, err
}
// limit the number of bytes copied from a file
// This is set to the limit of file size in Github
// which is 100MB
limited := io.LimitReader(rc, 100*1024*1024)
_, err = io.Copy(outFile, limited)
// Close the file without defer to close before next iteration of loop
outFile.Close()
rc.Close()
if err != nil {
return filenames, err
}
}
return filenames, nil
}
// DownloadFile uses the url to download the file to the filepath
func DownloadFile(url string, filepath string) error {
// Create the file
out, err := os.Create(filepath)
if err != nil {
return err
}
defer out.Close() // #nosec G307
// Get the data
data, err := DownloadFileInMemory(url)
if err != nil {
return errors.Wrapf(err, "Failed to download devfile.yaml for devfile component: %s", filepath)
}
// Write the data to file
_, err = out.Write(data)
if err != nil {
return err
}
return nil
}
// DownloadFileInMemory uses the url to download the file and return bytes
func DownloadFileInMemory(url string) ([]byte, error) {
var httpClient = &http.Client{Transport: &http.Transport{
ResponseHeaderTimeout: ResponseHeaderTimeout,
}, Timeout: HTTPRequestTimeout}
resp, err := httpClient.Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
return ioutil.ReadAll(resp.Body)
}
// ValidateK8sResourceName sanitizes kubernetes resource name with the following requirements:
// - Contain at most 63 characters
// - Contain only lowercase alphanumeric characters or ‘-’
// - Start with an alphanumeric character
// - End with an alphanumeric character
// - Must not contain all numeric values
func ValidateK8sResourceName(key string, value string) error {
requirements := `
- Contain at most 63 characters
- Contain only lowercase alphanumeric characters or ‘-’
- Start with an alphanumeric character
- End with an alphanumeric character
- Must not contain all numeric values
`
err1 := kvalidation.IsDNS1123Label(value)
_, err2 := strconv.ParseFloat(value, 64)
if err1 != nil || err2 == nil {
return errors.Errorf("%s \"%s\" is not valid, %s should conform the following requirements: %s", key, value, key, requirements)
}
return nil
}
// CheckKubeConfigExist checks for existence of kubeconfig
func CheckKubeConfigExist() bool {
var kubeconfig string
if os.Getenv("KUBECONFIG") != "" {
kubeconfig = os.Getenv("KUBECONFIG")
} else {
home, _ := os.UserHomeDir()
kubeconfig = fmt.Sprintf("%s/.kube/config", home)
}
if CheckPathExists(kubeconfig) {
return true
}
return false
}
// ValidateURL validates the URL
func ValidateURL(sourceURL string) error {
u, err := url.Parse(sourceURL)
if err != nil {
return err
}
if len(u.Host) == 0 || len(u.Scheme) == 0 {
return errors.New("URL is invalid")
}
return nil
}
// ValidateFile validates the file
func ValidateFile(filePath string) error {
// Check if the file path exist
file, err := os.Stat(filePath)
if err != nil {
return err
}
if file.IsDir() {
return errors.Errorf("%s exists but it's not a file", filePath)
}
return nil
}
// CopyFile copies file from source path to destination path
func CopyFile(srcPath string, dstPath string, info os.FileInfo) error {
// Check if the source file path exists
err := ValidateFile(srcPath)
if err != nil {
return err
}
// Open source file
srcFile, err := os.Open(srcPath)
if err != nil {
return err
}
defer srcFile.Close() // #nosec G307
// Create destination file
dstFile, err := os.Create(dstPath)
if err != nil {
return err
}
defer dstFile.Close() // #nosec G307
// Ensure destination file has the same file mode with source file
err = os.Chmod(dstFile.Name(), info.Mode())
if err != nil {
return err
}
// Copy file
_, err = io.Copy(dstFile, srcFile)
if err != nil {
return err
}
return nil
}
// PathEqual compare the paths to determine if they are equal
func PathEqual(firstPath string, secondPath string) bool {
firstAbsPath, _ := GetAbsPath(firstPath)
secondAbsPath, _ := GetAbsPath(secondPath)
return firstAbsPath == secondAbsPath
}
// sliceContainsString checks for existence of given string in given slice
func sliceContainsString(str string, slice []string) bool {
for _, b := range slice {
if b == str {
return true
}
}
return false
}
|
[
"\"CUSTOM_HOMEDIR\"",
"\"KUBECONFIG\"",
"\"KUBECONFIG\""
] |
[] |
[
"CUSTOM_HOMEDIR",
"KUBECONFIG"
] |
[]
|
["CUSTOM_HOMEDIR", "KUBECONFIG"]
|
go
| 2 | 0 | |
powertools-idempotency/src/main/java/software/amazon/lambda/powertools/idempotency/internal/IdempotentAspect.java
|
/*
* Copyright 2022 Amazon.com, Inc. or its affiliates.
* Licensed under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package software.amazon.lambda.powertools.idempotency.internal;
import com.fasterxml.jackson.databind.JsonNode;
import org.aspectj.lang.ProceedingJoinPoint;
import org.aspectj.lang.annotation.Around;
import org.aspectj.lang.annotation.Aspect;
import org.aspectj.lang.annotation.Pointcut;
import org.aspectj.lang.reflect.MethodSignature;
import software.amazon.lambda.powertools.idempotency.Constants;
import software.amazon.lambda.powertools.idempotency.IdempotencyKey;
import software.amazon.lambda.powertools.idempotency.Idempotent;
import software.amazon.lambda.powertools.idempotency.exceptions.IdempotencyConfigurationException;
import software.amazon.lambda.powertools.utilities.JsonConfig;
import java.lang.annotation.Annotation;
import java.lang.reflect.Method;
import static software.amazon.lambda.powertools.core.internal.LambdaHandlerProcessor.isHandlerMethod;
import static software.amazon.lambda.powertools.core.internal.LambdaHandlerProcessor.placedOnRequestHandler;
/**
* Aspect that handles the {@link Idempotent} annotation.
* It uses the {@link IdempotencyHandler} to actually do the job.
*/
@Aspect
public class IdempotentAspect {
@SuppressWarnings({"EmptyMethod"})
@Pointcut("@annotation(idempotent)")
public void callAt(Idempotent idempotent) {
}
@Around(value = "callAt(idempotent) && execution(@Idempotent * *.*(..))", argNames = "pjp,idempotent")
public Object around(ProceedingJoinPoint pjp,
Idempotent idempotent) throws Throwable {
String idempotencyDisabledEnv = System.getenv().get(Constants.IDEMPOTENCY_DISABLED_ENV);
if (idempotencyDisabledEnv != null && !idempotencyDisabledEnv.equalsIgnoreCase("false")) {
return pjp.proceed(pjp.getArgs());
}
Method method = ((MethodSignature) pjp.getSignature()).getMethod();
if (method.getReturnType().equals(void.class)) {
throw new IdempotencyConfigurationException("The annotated method doesn't return anything. Unable to perform idempotency on void return type");
}
JsonNode payload = getPayload(pjp, method);
if (payload == null) {
throw new IdempotencyConfigurationException("Unable to get payload from the method. Ensure there is at least one parameter or that you use @IdempotencyKey");
}
IdempotencyHandler idempotencyHandler = new IdempotencyHandler(pjp, method.getName(), payload);
return idempotencyHandler.handle();
}
/**
* Retrieve the payload from the annotated method parameters
* @param pjp joinPoint
* @param method the annotated method
* @return the payload used for idempotency
*/
private JsonNode getPayload(ProceedingJoinPoint pjp, Method method) {
JsonNode payload = null;
// handleRequest or method with one parameter: get the first one
if ((isHandlerMethod(pjp) && placedOnRequestHandler(pjp))
|| pjp.getArgs().length == 1) {
payload = JsonConfig.get().getObjectMapper().valueToTree(pjp.getArgs()[0]);
} else {
// Look for a parameter annotated with @IdempotencyKey
Annotation[][] annotations = method.getParameterAnnotations();
for (int i = 0; i < annotations.length && payload == null; i++) {
Annotation[] annotationsRow = annotations[i];
for (int j = 0; j < annotationsRow.length && payload == null; j++) {
if (annotationsRow[j].annotationType().equals(IdempotencyKey.class)) {
payload = JsonConfig.get().getObjectMapper().valueToTree(pjp.getArgs()[i]);
}
}
}
}
return payload;
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
cloudformation/resources/aws-codedeploy-deploymentgroup_loadbalancerinfo.go
|
package resources
import "github.com/awslabs/goformation/cloudformation/policies"
// AWSCodeDeployDeploymentGroup_LoadBalancerInfo AWS CloudFormation Resource (AWS::CodeDeploy::DeploymentGroup.LoadBalancerInfo)
// See: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codedeploy-deploymentgroup-loadbalancerinfo.html
type AWSCodeDeployDeploymentGroup_LoadBalancerInfo struct {
// ElbInfoList AWS CloudFormation Property
// Required: false
// See: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codedeploy-deploymentgroup-loadbalancerinfo.html#cfn-codedeploy-deploymentgroup-loadbalancerinfo-elbinfolist
ElbInfoList []AWSCodeDeployDeploymentGroup_ELBInfo `json:"ElbInfoList,omitempty"`
// TargetGroupInfoList AWS CloudFormation Property
// Required: false
// See: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-codedeploy-deploymentgroup-loadbalancerinfo.html#cfn-codedeploy-deploymentgroup-loadbalancerinfo-targetgroupinfolist
TargetGroupInfoList []AWSCodeDeployDeploymentGroup_TargetGroupInfo `json:"TargetGroupInfoList,omitempty"`
// _deletionPolicy represents a CloudFormation DeletionPolicy
_deletionPolicy policies.DeletionPolicy
// _dependsOn stores the logical ID of the resources to be created before this resource
_dependsOn []string
// _metadata stores structured data associated with this resource
_metadata map[string]interface{}
}
// AWSCloudFormationType returns the AWS CloudFormation resource type
func (r *AWSCodeDeployDeploymentGroup_LoadBalancerInfo) AWSCloudFormationType() string {
return "AWS::CodeDeploy::DeploymentGroup.LoadBalancerInfo"
}
// DependsOn returns a slice of logical ID names this resource depends on.
// see: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-dependson.html
func (r *AWSCodeDeployDeploymentGroup_LoadBalancerInfo) DependsOn() []string {
return r._dependsOn
}
// SetDependsOn specify that the creation of this resource follows another.
// see: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-dependson.html
func (r *AWSCodeDeployDeploymentGroup_LoadBalancerInfo) SetDependsOn(dependencies []string) {
r._dependsOn = dependencies
}
// Metadata returns the metadata associated with this resource.
// see: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-metadata.html
func (r *AWSCodeDeployDeploymentGroup_LoadBalancerInfo) Metadata() map[string]interface{} {
return r._metadata
}
// SetMetadata enables you to associate structured data with this resource.
// see: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-metadata.html
func (r *AWSCodeDeployDeploymentGroup_LoadBalancerInfo) SetMetadata(metadata map[string]interface{}) {
r._metadata = metadata
}
// DeletionPolicy returns the AWS CloudFormation DeletionPolicy to this resource
// see: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-deletionpolicy.html
func (r *AWSCodeDeployDeploymentGroup_LoadBalancerInfo) DeletionPolicy() policies.DeletionPolicy {
return r._deletionPolicy
}
// SetDeletionPolicy applies an AWS CloudFormation DeletionPolicy to this resource
// see: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-attribute-deletionpolicy.html
func (r *AWSCodeDeployDeploymentGroup_LoadBalancerInfo) SetDeletionPolicy(policy policies.DeletionPolicy) {
r._deletionPolicy = policy
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
pkg/reconciler/knativeeventing/controller.go
|
/*
Copyright 2019 The Knative Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package knativeeventing
import (
"context"
"flag"
"os"
"path/filepath"
mf "github.com/manifestival/manifestival"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/cache"
"knative.dev/eventing-operator/pkg/apis/eventing/v1alpha1"
knativeEventinginformer "knative.dev/eventing-operator/pkg/client/injection/informers/eventing/v1alpha1/knativeeventing"
rbase "knative.dev/eventing-operator/pkg/reconciler"
deploymentinformer "knative.dev/pkg/client/injection/kube/informers/apps/v1/deployment"
"knative.dev/pkg/configmap"
"knative.dev/pkg/controller"
"knative.dev/pkg/injection/sharedmain"
)
const (
controllerAgentName = "knativeeventing-controller"
reconcilerName = "KnativeEventing"
)
var (
recursive = flag.Bool("recursive", false, "If filename is a directory, process all manifests recursively")
MasterURL = flag.String("master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.")
Kubeconfig = flag.String("kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
)
// NewController initializes the controller and is called by the generated code
// Registers eventhandlers to enqueue events
func NewController(
ctx context.Context,
cmw configmap.Watcher,
) *controller.Impl {
knativeEventingInformer := knativeEventinginformer.Get(ctx)
deploymentInformer := deploymentinformer.Get(ctx)
c := &Reconciler{
Base: rbase.NewBase(ctx, controllerAgentName, cmw),
knativeEventingLister: knativeEventingInformer.Lister(),
eventings: sets.String{},
}
koDataDir := os.Getenv("KO_DATA_PATH")
cfg, err := sharedmain.GetConfig(*MasterURL, *Kubeconfig)
if err != nil {
c.Logger.Error(err, "Error building kubeconfig")
}
config, err := mf.NewManifest(filepath.Join(koDataDir, "knative-eventing/"), *recursive, cfg)
if err != nil {
c.Logger.Error(err, "Error creating the Manifest for knative-eventing")
os.Exit(1)
}
c.config = config
impl := controller.NewImpl(c, c.Logger, reconcilerName)
c.Logger.Info("Setting up event handlers for %s", reconcilerName)
knativeEventingInformer.Informer().AddEventHandler(controller.HandleAll(impl.Enqueue))
deploymentInformer.Informer().AddEventHandler(cache.FilteringResourceEventHandler{
FilterFunc: controller.Filter(v1alpha1.SchemeGroupVersion.WithKind("KnativeEventing")),
Handler: controller.HandleAll(impl.EnqueueControllerOf),
})
return impl
}
|
[
"\"KO_DATA_PATH\""
] |
[] |
[
"KO_DATA_PATH"
] |
[]
|
["KO_DATA_PATH"]
|
go
| 1 | 0 | |
pkg/providers/s5/aes.go
|
package s5
import (
"os"
"github.com/mvisonneau/s5/pkg/cipher"
"github.com/mvisonneau/tfcw/pkg/schemas"
)
func (c *Client) getCipherEngineAES(v *schemas.S5) (cipher.Engine, error) {
if v.CipherEngineAES != nil && v.CipherEngineAES.Key != nil {
return cipher.NewAESClient(*v.CipherEngineAES.Key)
}
if c.CipherEngineAES != nil && c.CipherEngineAES.Key != nil {
return cipher.NewAESClient(*c.CipherEngineAES.Key)
}
return cipher.NewAESClient(os.Getenv("S5_AES_KEY"))
}
|
[
"\"S5_AES_KEY\""
] |
[] |
[
"S5_AES_KEY"
] |
[]
|
["S5_AES_KEY"]
|
go
| 1 | 0 | |
basket/admin.py
|
from django.contrib import admin
from .models import *
class TicketInline(admin.StackedInline):
model = Ticket
extra = 0
can_delete = False
readonly_fields = ('flight', 'flight_seat', 'first_name', 'last_name', 'national_code', 'birthday')
def has_add_permission(self, request, obj):
return False
@admin.register(Cart)
class CartAdmin(admin.ModelAdmin):
list_display = ('id', 'user', 'is_paid', 'created_time', 'modified_time')
list_filter = ('is_paid',)
search_fields = ('user__username',)
readonly_fields = ('user', 'is_paid')
inlines = (TicketInline,)
@admin.register(Ticket)
class TicketAdmin(admin.ModelAdmin):
list_display = (
'id', 'flight', 'cart', 'first_name', 'last_name',
'national_code', 'birthday', 'created_time', 'modified_time'
)
search_fields = ('first_name', 'last_name', 'national_code')
readonly_fields = ('cart', 'flight', 'flight_seat', 'first_name', 'last_name', 'national_code', 'birthday')
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
test_init_final.py
|
# -*- coding: utf-8 -*-
################ Server Ver. 28 (2021. 6. 23.) #####################
import sys, os, ctypes
import asyncio, discord, aiohttp
import random, re, datetime, time, logging
from discord.ext import tasks, commands
from discord.ext.commands import CommandNotFound, MissingRequiredArgument
from gtts import gTTS
from github import Github
import base64
import gspread, boto3
from oauth2client.service_account import ServiceAccountCredentials #정산
from io import StringIO
import urllib.request
from math import ceil, floor
##################### 로깅 ###########################
log_stream = StringIO()
logging.basicConfig(stream=log_stream, level=logging.WARNING)
#ilsanglog = logging.getLogger('discord')
#ilsanglog.setLevel(level = logging.WARNING)
#handler = logging.StreamHandler()
#handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
#ilsanglog.addHandler(handler)
#####################################################
if not discord.opus.is_loaded():
discord.opus.load_opus(ctypes.util.find_library('opus'))
print("opus_loaded")
basicSetting = []
bossData = []
fixed_bossData = []
bossNum = 0
fixed_bossNum = 0
chkvoicechannel = 0
chkrelogin = 0
chflg = 0
LoadChk = 0
bossTime = []
tmp_bossTime = []
fixed_bossTime = []
bossTimeString = []
bossDateString = []
tmp_bossTimeString = []
tmp_bossDateString = []
bossFlag = []
bossFlag0 = []
fixed_bossFlag = []
fixed_bossFlag0 = []
bossMungFlag = []
bossMungCnt = []
channel_info = []
channel_name = []
channel_id = []
channel_voice_name = []
channel_voice_id = []
channel_type = []
FixedBossDateData = []
indexFixedBossname = []
endTime = None
gc = None
credentials = None
regenembed = None
command = None
kill_Data = None
kill_Time = None
item_Data = None
tmp_racing_unit = None
setting_channel_name = None
boss_nick = {}
access_token = os.environ["BOT_TOKEN"]
git_access_token = os.environ["GIT_TOKEN"]
git_access_repo = os.environ["GIT_REPO"]
git_access_repo_restart = os.environ["GIT_REPO_RESTART"]
try:
aws_key = os.environ["AWS_KEY"]
aws_secret_key = os.environ["AWS_SECRET_KEY"]
except:
aws_key = ""
aws_secret_key = ""
g = Github(git_access_token)
repo = g.get_repo(git_access_repo)
repo_restart = g.get_repo(git_access_repo_restart)
#초성추출 함수
def convertToInitialLetters(text):
CHOSUNG_START_LETTER = 4352
JAMO_START_LETTER = 44032
JAMO_END_LETTER = 55203
JAMO_CYCLE = 588
def isHangul(ch):
return ord(ch) >= JAMO_START_LETTER and ord(ch) <= JAMO_END_LETTER
def isBlankOrNumber(ch):
return ord(ch) == 32 or ord(ch) >= 48 and ord(ch) <= 57
def convertNomalInitialLetter(ch):
dic_InitalLetter = {4352:"ㄱ"
,4353:"ㄲ"
,4354:"ㄴ"
,4355:"ㄷ"
,4356:"ㄸ"
,4357:"ㄹ"
,4358:"ㅁ"
,4359:"ㅂ"
,4360:"ㅃ"
,4361:"ㅅ"
,4362:"ㅆ"
,4363:"ㅇ"
,4364:"ㅈ"
,4365:"ㅉ"
,4366:"ㅊ"
,4367:"ㅋ"
,4368:"ㅌ"
,4369:"ㅍ"
,4370:"ㅎ"
,32:" "
,48:"0"
,49:"1"
,50:"2"
,51:"3"
,52:"4"
,53:"5"
,54:"6"
,55:"7"
,56:"8"
,57:"9"
}
return dic_InitalLetter[ord(ch)]
result = ""
for ch in text:
if isHangul(ch): #한글이 아닌 글자는 걸러냅니다.
result += convertNomalInitialLetter(chr((int((ord(ch)-JAMO_START_LETTER)/JAMO_CYCLE))+CHOSUNG_START_LETTER))
elif isBlankOrNumber(ch):
result += convertNomalInitialLetter(chr(int(ord(ch))))
return result
def init():
global basicSetting
global bossData
global fixed_bossData
global bossNum
global fixed_bossNum
global chkvoicechannel
global chkrelogin
global bossTime
global tmp_bossTime
global fixed_bossTime
global bossTimeString
global bossDateString
global tmp_bossTimeString
global tmp_bossDateString
global bossFlag
global bossFlag0
global fixed_bossFlag
global fixed_bossFlag0
global bossMungFlag
global bossMungCnt
global channel_info
global channel_name
global channel_voice_name
global channel_voice_id
global channel_id
global channel_type
global LoadChk
global indexFixedBossname
global FixedBossDateData
global endTime
global gc #정산
global credentials #정산
global regenembed
global command
global kill_Data
global kill_Time
global item_Data
global tmp_racing_unit
global boss_nick
command = []
tmp_bossData = []
tmp_fixed_bossData = []
FixedBossDateData = []
indexFixedBossname = []
kill_Data = {}
tmp_kill_Data = []
item_Data = {}
tmp_item_Data = []
f = []
fb = []
fk = []
fc = []
fi = []
tmp_racing_unit = []
boss_nick = {}
inidata = repo.get_contents("test_setting.ini")
file_data1 = base64.b64decode(inidata.content)
file_data1 = file_data1.decode('utf-8')
inputData = file_data1.split('\n')
command_inidata = repo.get_contents("command.ini")
file_data4 = base64.b64decode(command_inidata.content)
file_data4 = file_data4.decode('utf-8')
command_inputData = file_data4.split('\n')
boss_inidata = repo.get_contents("boss.ini")
file_data3 = base64.b64decode(boss_inidata.content)
file_data3 = file_data3.decode('utf-8')
boss_inputData = file_data3.split('\n')
fixed_inidata = repo.get_contents("fixed_boss.ini")
file_data2 = base64.b64decode(fixed_inidata.content)
file_data2 = file_data2.decode('utf-8')
fixed_inputData = file_data2.split('\n')
kill_inidata = repo.get_contents("kill_list.ini")
file_data5 = base64.b64decode(kill_inidata.content)
file_data5 = file_data5.decode('utf-8')
kill_inputData = file_data5.split('\n')
item_inidata = repo.get_contents("item_list.ini")
file_data6 = base64.b64decode(item_inidata.content)
file_data6 = file_data6.decode('utf-8')
item_inputData = file_data6.split('\n')
for i in range(len(fixed_inputData)):
FixedBossDateData.append(fixed_inputData[i])
index_fixed = 0
for value in FixedBossDateData:
if value.find('bossname') != -1:
indexFixedBossname.append(index_fixed)
index_fixed = index_fixed + 1
for i in range(inputData.count('\r')):
inputData.remove('\r')
for i in range(command_inputData.count('\r')):
command_inputData.remove('\r')
for i in range(boss_inputData.count('\r')):
boss_inputData.remove('\r')
for i in range(fixed_inputData.count('\r')):
fixed_inputData.remove('\r')
for i in range(kill_inputData.count('\r')):
kill_inputData.remove('\r')
for i in range(item_inputData.count('\r')):
item_inputData.remove('\r')
del(command_inputData[0])
del(boss_inputData[0])
del(fixed_inputData[0])
del(kill_inputData[0])
del(item_inputData[0])
for data in boss_inputData:
if "kakaoOnOff" in data:
raise Exception("[boss.ini] 파일에서 [kakaoOnOff]를 지워주세요.")
for data in fixed_inputData:
if "kakaoOnOff" in data:
raise Exception("[fixed_boss.ini] 파일에서 [kakaoOnOff]를 지워주세요.")
############## 보탐봇 초기 설정 리스트 #####################
try:
basicSetting.append(inputData[0][11:]) #basicSetting[0] : timezone
basicSetting.append(inputData[8][15:]) #basicSetting[1] : before_alert
basicSetting.append(inputData[10][11:]) #basicSetting[2] : mungChk1
basicSetting.append(inputData[9][16:]) #basicSetting[3] : before_alert1
basicSetting.append(inputData[14][14:16]) #basicSetting[4] : restarttime 시
basicSetting.append(inputData[14][17:]) #basicSetting[5] : restarttime 분
basicSetting.append(inputData[1][15:]) #basicSetting[6] : voice채널 ID
basicSetting.append(inputData[2][14:]) #basicSetting[7] : text채널 ID
basicSetting.append(inputData[3][16:]) #basicSetting[8] : 사다리 채널 ID
basicSetting.append(inputData[13][14:]) #basicSetting[9] : !ㅂ 출력 수
basicSetting.append(inputData[17][11:]) #basicSetting[10] : json 파일명
basicSetting.append(inputData[4][17:]) #basicSetting[11] : 정산 채널 ID
basicSetting.append(inputData[16][12:]) #basicSetting[12] : sheet 이름
basicSetting.append(inputData[15][16:]) #basicSetting[13] : restart 주기
basicSetting.append(inputData[18][12:]) #basicSetting[14] : 시트 이름
basicSetting.append(inputData[19][12:]) #basicSetting[15] : 입력 셀
basicSetting.append(inputData[20][13:]) #basicSetting[16] : 출력 셀
basicSetting.append(inputData[12][13:]) #basicSetting[17] : 멍삭제횟수
basicSetting.append(inputData[5][14:]) #basicSetting[18] : kill채널 ID
basicSetting.append(inputData[6][16:]) #basicSetting[19] : racing 채널 ID
basicSetting.append(inputData[7][14:]) #basicSetting[20] : item 채널 ID
basicSetting.append(inputData[21][12:]) #basicSetting[21] : voice_use
basicSetting.append(inputData[11][11:]) #basicSetting[22] : mungChk2
except:
raise Exception("[test_setting.ini] 파일 양식을 확인하세요.")
############## 보탐봇 명령어 리스트 #####################
for i in range(len(command_inputData)):
tmp_command = command_inputData[i][12:].rstrip('\r')
fc = tmp_command.split(', ')
command.append(fc)
fc = []
#command.append(command_inputData[i][12:].rstrip('\r')) #command[0] ~ [24] : 명령어
################## 척살 명단 ###########################
for i in range(len(kill_inputData)):
tmp_kill_Data.append(kill_inputData[i].rstrip('\r'))
fk.append(tmp_kill_Data[i][:tmp_kill_Data[i].find(' ')])
fk.append(tmp_kill_Data[i][tmp_kill_Data[i].find(' ')+1:])
try:
kill_Data[fk[0]] = int(fk[1])
except:
pass
fk = []
for i in range(len(item_inputData)):
tmp_item_Data.append(item_inputData[i].rstrip('\r'))
fi.append(tmp_item_Data[i][:tmp_item_Data[i].find(' ')])
fi.append(tmp_item_Data[i][tmp_item_Data[i].find(' ')+1:])
try:
item_Data[fi[0]] = int(fi[1])
except:
pass
fi = []
tmp_killtime = datetime.datetime.now().replace(hour=int(5), minute=int(0), second = int(0))
kill_Time = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
if tmp_killtime < kill_Time :
kill_Time = tmp_killtime + datetime.timedelta(days=int(1))
else:
kill_Time = tmp_killtime
for i in range(len(basicSetting)):
basicSetting[i] = basicSetting[i].strip()
try:
if basicSetting[6] != "":
basicSetting[6] = int(basicSetting[6])
if basicSetting[7] != "":
basicSetting[7] = int(basicSetting[7])
if basicSetting[8] != "":
basicSetting[8] = int(basicSetting[8])
if basicSetting[11] != "":
basicSetting[11] = int(basicSetting[11])
if basicSetting[18] != "":
basicSetting[18] = int(basicSetting[18])
if basicSetting[19] != "":
basicSetting[19] = int(basicSetting[19])
if basicSetting[20] != "":
basicSetting[20] = int(basicSetting[20])
except ValueError:
raise Exception("[test_setting.ini] 파일 양식을 확인하세요.")
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
if int(basicSetting[13]) == 0 :
endTime = tmp_now.replace(hour=int(basicSetting[4]), minute=int(basicSetting[5]), second = int(0))
endTime = endTime + datetime.timedelta(days=int(1000))
else :
endTime = tmp_now.replace(hour=int(basicSetting[4]), minute=int(basicSetting[5]), second = int(0))
if endTime < tmp_now :
endTime = endTime + datetime.timedelta(days=int(basicSetting[13]))
bossNum = int(len(boss_inputData)/6)
fixed_bossNum = int(len(fixed_inputData)/6)
for i in range(bossNum):
tmp_bossData.append(boss_inputData[i*6:i*6+6])
for i in range(fixed_bossNum):
tmp_fixed_bossData.append(fixed_inputData[i*6:i*6+6])
for j in range(bossNum):
for i in range(len(tmp_bossData[j])):
tmp_bossData[j][i] = tmp_bossData[j][i].strip()
for j in range(fixed_bossNum):
for i in range(len(tmp_fixed_bossData[j])):
tmp_fixed_bossData[j][i] = tmp_fixed_bossData[j][i].strip()
tmp_boss_name_list : list = []
tmp_nick : list = []
############## 일반보스 정보 리스트 #####################
for j in range(bossNum):
tmp_nick = []
tmp_len = tmp_bossData[j][1].find(':')
tmp_boss_name_list = tmp_bossData[j][0][11:].split(", ")
f.append(tmp_boss_name_list[0]) #bossData[0] : 보스명
if len(tmp_boss_name_list) > 1:
for nick in tmp_boss_name_list[1:]:
tmp_nick.append(nick)
tmp_nick.append(convertToInitialLetters(nick))
boss_nick[tmp_boss_name_list[0]] = tmp_nick
f.append(tmp_bossData[j][1][10:tmp_len]) #bossData[1] : 시
f.append(tmp_bossData[j][2][13:]) #bossData[2] : 멍/미입력
f.append(tmp_bossData[j][3][20:]) #bossData[3] : 분전 알림멘트
f.append(tmp_bossData[j][4][13:]) #bossData[4] : 젠 알림멘트
f.append(tmp_bossData[j][1][tmp_len+1:]) #bossData[5] : 분
f.append('') #bossData[6] : 메세지
f.append(tmp_bossData[j][5][11:]) #bossData[8] : 멍체크시간종류
bossData.append(f)
f = []
bossTime.append(datetime.datetime.now()+datetime.timedelta(days=365, hours = int(basicSetting[0])))
tmp_bossTime.append(datetime.datetime.now()+datetime.timedelta(days=365, hours = int(basicSetting[0])))
bossTimeString.append('99:99:99')
bossDateString.append('9999-99-99')
tmp_bossTimeString.append('99:99:99')
tmp_bossDateString.append('9999-99-99')
bossFlag.append(False)
bossFlag0.append(False)
bossMungFlag.append(False)
bossMungCnt.append(0)
tmp_fixed_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
############## 고정보스 정보 리스트 #####################
for j in range(fixed_bossNum):
try:
tmp_fixed_len = tmp_fixed_bossData[j][1].find(':')
tmp_fixedGen_len = tmp_fixed_bossData[j][2].find(':')
fb.append(tmp_fixed_bossData[j][0][11:]) #fixed_bossData[0] : 보스명
fb.append(tmp_fixed_bossData[j][1][11:tmp_fixed_len]) #fixed_bossData[1] : 시
fb.append(tmp_fixed_bossData[j][1][tmp_fixed_len+1:]) #fixed_bossData[2] : 분
fb.append(tmp_fixed_bossData[j][4][20:]) #fixed_bossData[3] : 분전 알림멘트
fb.append(tmp_fixed_bossData[j][5][13:]) #fixed_bossData[4] : 젠 알림멘트
fb.append(tmp_fixed_bossData[j][2][12:tmp_fixedGen_len]) #fixed_bossData[5] : 젠주기-시
fb.append(tmp_fixed_bossData[j][2][tmp_fixedGen_len+1:]) #fixed_bossData[6] : 젠주기-분
fb.append(tmp_fixed_bossData[j][3][12:16]) #fixed_bossData[7] : 시작일-년
fb.append(tmp_fixed_bossData[j][3][17:19]) #fixed_bossData[8] : 시작일-월
fb.append(tmp_fixed_bossData[j][3][20:22]) #fixed_bossData[9] : 시작일-일
fixed_bossData.append(fb)
fb = []
fixed_bossFlag.append(False)
fixed_bossFlag0.append(False)
fixed_bossTime.append(tmp_fixed_now.replace(year = int(fixed_bossData[j][7]), month = int(fixed_bossData[j][8]), day = int(fixed_bossData[j][9]), hour=int(fixed_bossData[j][1]), minute=int(fixed_bossData[j][2]), second = int(0)))
if fixed_bossTime[j] < tmp_fixed_now :
while fixed_bossTime[j] < tmp_fixed_now :
fixed_bossTime[j] = fixed_bossTime[j] + datetime.timedelta(hours=int(fixed_bossData[j][5]), minutes=int(fixed_bossData[j][6]), seconds = int(0))
if tmp_fixed_now + datetime.timedelta(minutes=int(basicSetting[1])) <= fixed_bossTime[j] < tmp_fixed_now + datetime.timedelta(minutes=int(basicSetting[3])):
fixed_bossFlag0[j] = True
if fixed_bossTime[j] < tmp_fixed_now + datetime.timedelta(minutes=int(basicSetting[1])):
fixed_bossFlag[j] = True
fixed_bossFlag0[j] = True
except:
raise Exception(f"[fixed_boss.ini] 파일 {tmp_fixed_bossData[j][0][11:]} 부분 양식을 확인하세요.")
################# 이모지 로드 ######################
emo_inidata = repo.get_contents("emoji.ini")
emoji_data1 = base64.b64decode(emo_inidata.content)
emoji_data1 = emoji_data1.decode('utf-8')
emo_inputData = emoji_data1.split('\n')
for i in range(len(emo_inputData)):
tmp_emo = emo_inputData[i][8:].rstrip('\r')
if tmp_emo != "":
tmp_racing_unit.append(tmp_emo)
################# 리젠보스 시간 정렬 ######################
regenData = []
regenTime = []
regenbossName = []
outputTimeHour = []
outputTimeMin = []
for i in range(bossNum):
if bossData[i][2] == "1":
f.append(bossData[i][0] + "R")
else:
f.append(bossData[i][0])
f.append(bossData[i][1] + bossData[i][5])
regenData.append(f)
regenTime.append(bossData[i][1] + bossData[i][5])
f = []
regenTime = sorted(list(set(regenTime)))
for j in range(len(regenTime)):
for i in range(len(regenData)):
if regenTime[j] == regenData[i][1] :
f.append(regenData[i][0])
regenbossName.append(f)
try:
outputTimeHour.append(int(regenTime[j][:2]))
outputTimeMin.append(int(regenTime[j][2:]))
except ValueError:
raise Exception(f"[boss.ini] 파일 {f} gentime을 확인하시기 바랍니다.")
f = []
regenembed = discord.Embed(
title='----- 보스별 리스폰 시간 -----',
description= ' ')
for i in range(len(regenTime)):
if outputTimeMin[i] == 0 :
regenembed.add_field(name=str(outputTimeHour[i]) + '시간', value= '```'+ ', '.join(map(str, sorted(regenbossName[i]))) + '```', inline=False)
else :
regenembed.add_field(name=str(outputTimeHour[i]) + '시간' + str(outputTimeMin[i]) + '분', value= '```' + ','.join(map(str, sorted(regenbossName[i]))) + '```', inline=False)
regenembed.set_footer(text = 'R : 멍 보스')
##########################################################
if basicSetting[10] !="":
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive'] #정산
credentials = ServiceAccountCredentials.from_json_keyfile_name(basicSetting[10], scope) #정산
init()
channel = ''
#mp3 파일 생성함수(gTTS 이용, 남성목소리)
async def MakeSound(saveSTR, filename):
if aws_key != "" and aws_secret_key != "":
polly = boto3.client("polly", aws_access_key_id = aws_key, aws_secret_access_key = aws_secret_key, region_name = "eu-west-1")
s = '<speak><prosody rate="' + str(95) + '%">' + saveSTR + '</prosody></speak>'
response = polly.synthesize_speech(
TextType = "ssml",
Text=s,
OutputFormat="mp3",
VoiceId="Seoyeon")
stream = response.get("AudioStream")
with open(f"./{filename}.mp3", "wb") as mp3file:
data = stream.read()
mp3file.write(data)
else:
tts = gTTS(saveSTR, lang = 'ko')
tts.save(f"./{filename}.wav")
#mp3 파일 재생함수
async def PlaySound(voiceclient, filename):
if basicSetting[21] != "1":
return
# source = discord.FFmpegPCMAudio(filename)
source = discord.FFmpegOpusAudio(filename)
try:
voiceclient.play(source)
except discord.errors.ClientException:
while voiceclient.is_playing():
await asyncio.sleep(1)
while voiceclient.is_playing():
await asyncio.sleep(1)
voiceclient.stop()
# source.cleanup()
return
#my_bot.db 저장하기
async def dbSave():
global bossData
global bossNum
global bossTime
global bossTimeString
global bossDateString
global bossMungFlag
global bossMungCnt
for i in range(bossNum):
for j in range(bossNum):
if bossTimeString[i] and bossTimeString[j] != '99:99:99':
if bossTimeString[i] == bossTimeString[j] and i != j:
tmp_time1 = bossTimeString[j][:6]
tmp_time2 = (int(bossTimeString[j][6:]) + 1)%100
if tmp_time2 < 10 :
tmp_time22 = '0' + str(tmp_time2)
elif tmp_time2 == 60 :
tmp_time22 = '00'
else :
tmp_time22 = str(tmp_time2)
bossTimeString[j] = tmp_time1 + tmp_time22
datelist1 = bossTime
datelist = list(set(datelist1))
information1 = '----- 보스탐 정보 -----\n'
for timestring in sorted(datelist):
for i in range(bossNum):
if timestring == bossTime[i]:
if bossTimeString[i] != '99:99:99' or bossMungFlag[i] == True :
if bossMungFlag[i] == True :
if bossData[i][2] == '0' :
information1 += ' - ' + bossData[i][0] + '(' + bossData[i][1] + '.' + bossData[i][5] + ') : ' + tmp_bossTime[i].strftime('%H:%M:%S') + ' @ ' + tmp_bossTime[i].strftime('%Y-%m-%d') + ' (미입력 ' + str(bossMungCnt[i]) + '회)' + ' * ' + bossData[i][6] + '\n'
else :
information1 += ' - ' + bossData[i][0] + '(' + bossData[i][1] + '.' + bossData[i][5] + ') : ' + tmp_bossTime[i].strftime('%H:%M:%S') + ' @ ' + tmp_bossTime[i].strftime('%Y-%m-%d') + ' (멍 ' + str(bossMungCnt[i]) + '회)' + ' * ' + bossData[i][6] + '\n'
else:
if bossData[i][2] == '0' :
information1 += ' - ' + bossData[i][0] + '(' + bossData[i][1] + '.' + bossData[i][5] + ') : ' + bossTimeString[i] + ' @ ' + bossDateString[i] + ' (미입력 ' + str(bossMungCnt[i]) + '회)' + ' * ' + bossData[i][6] + '\n'
else :
information1 += ' - ' + bossData[i][0] + '(' + bossData[i][1] + '.' + bossData[i][5] + ') : ' + bossTimeString[i] + ' @ ' + bossDateString[i] + ' (멍 ' + str(bossMungCnt[i]) + '회)' + ' * ' + bossData[i][6] + '\n'
try :
contents = repo.get_contents("my_bot.db")
repo.update_file(contents.path, "bossDB", information1, contents.sha)
except Exception as e :
print ('save error!!')
print(e.args[1]['message']) # output: This repository is empty.
errortime = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
print (errortime)
pass
#my_bot.db 불러오기
async def dbLoad():
global LoadChk
contents1 = repo.get_contents("my_bot.db")
file_data = base64.b64decode(contents1.content)
file_data = file_data.decode('utf-8')
beforeBossData = file_data.split('\n')
if len(beforeBossData) > 1:
for i in range(len(beforeBossData)-1):
for j in range(bossNum):
startPos = beforeBossData[i+1].find('-')
endPos = beforeBossData[i+1].find('(')
if beforeBossData[i+1][startPos+2:endPos] == bossData[j][0] :
#if beforeBossData[i+1].find(bossData[j][0]) != -1 :
tmp_mungcnt = 0
tmp_len = beforeBossData[i+1].find(':')
tmp_datelen = beforeBossData[i+1].find('@')
tmp_msglen = beforeBossData[i+1].find('*')
years1 = beforeBossData[i+1][tmp_datelen+2:tmp_datelen+6]
months1 = beforeBossData[i+1][tmp_datelen+7:tmp_datelen+9]
days1 = beforeBossData[i+1][tmp_datelen+10:tmp_datelen+12]
hours1 = beforeBossData[i+1][tmp_len+2:tmp_len+4]
minutes1 = beforeBossData[i+1][tmp_len+5:tmp_len+7]
seconds1 = beforeBossData[i+1][tmp_len+8:tmp_len+10]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(year = int(years1), month = int(months1), day = int(days1), hour=int(hours1), minute=int(minutes1), second = int(seconds1))
if bossData[j][7] == "1":
tmp_now_chk = tmp_now + datetime.timedelta(minutes = int(basicSetting[2]))
else:
tmp_now_chk = tmp_now + datetime.timedelta(minutes = int(basicSetting[22]))
if tmp_now_chk < now2 :
deltaTime = datetime.timedelta(hours = int(bossData[j][1]), minutes = int(bossData[j][5]))
while tmp_now_chk < now2 :
tmp_now_chk = tmp_now_chk + deltaTime
tmp_now = tmp_now + deltaTime
tmp_mungcnt = tmp_mungcnt + 1
if tmp_now_chk > now2 > tmp_now: #젠중.
bossMungFlag[j] = True
tmp_bossTime[j] = tmp_now
tmp_bossTimeString[j] = tmp_bossTime[j].strftime('%H:%M:%S')
tmp_bossDateString[j] = tmp_bossTime[j].strftime('%Y-%m-%d')
bossTimeString[j] = '99:99:99'
bossDateString[j] = '9999-99-99'
bossTime[j] = tmp_bossTime[j] + datetime.timedelta(days=365)
else:
tmp_bossTime[j] = bossTime[j] = tmp_now
tmp_bossTimeString[j] = bossTimeString[j] = bossTime[j].strftime('%H:%M:%S')
tmp_bossDateString[j] = bossDateString[j] = bossTime[j].strftime('%Y-%m-%d')
if now2 + datetime.timedelta(minutes=int(basicSetting[1])) <= tmp_bossTime[j] < now2 + datetime.timedelta(minutes=int(basicSetting[3])):
bossFlag0[j] = True
if tmp_bossTime[j] < now2 + datetime.timedelta(minutes=int(basicSetting[1])):
bossFlag[j] = True
bossFlag0[j] = True
bossData[j][6] = beforeBossData[i+1][tmp_msglen+2:len(beforeBossData[i+1])]
if beforeBossData[i+1][tmp_msglen-4:tmp_msglen-3] != 0 and beforeBossData[i+1][tmp_msglen-5:tmp_msglen-4] == ' ':
bossMungCnt[j] = int(beforeBossData[i+1][tmp_msglen-4:tmp_msglen-3]) + tmp_mungcnt
elif beforeBossData[i+1][tmp_msglen-5:tmp_msglen-4] != ' ':
bossMungCnt[j] = int(beforeBossData[i+1][tmp_msglen-5:tmp_msglen-4] + beforeBossData[i+1][tmp_msglen-4:tmp_msglen-3]) + tmp_mungcnt
else:
bossMungCnt[j] = 0
global FixedBossDateData
global fixed_bossFlag
global fixed_bossFlag0
global fixed_bossTime
global fixed_bossData
FixedBossDateData = []
fixed_bossFlag = []
fixed_bossFlag0 = []
fixed_bossTime = []
fixed_bossData = []
tmp_fixed_bossData = []
fb = []
fixed_inidata = repo.get_contents("fixed_boss.ini")
file_data2 = base64.b64decode(fixed_inidata.content)
file_data2 = file_data2.decode('utf-8')
fixed_inputData = file_data2.split('\n')
for i in range(len(fixed_inputData)):
FixedBossDateData.append(fixed_inputData[i])
del(fixed_inputData[0])
for i in range(fixed_inputData.count('\r')):
fixed_inputData.remove('\r')
fixed_bossNum = int(len(fixed_inputData)/6)
for i in range(fixed_bossNum):
tmp_fixed_bossData.append(fixed_inputData[i*6:i*6+6])
for j in range(fixed_bossNum):
for i in range(len(tmp_fixed_bossData[j])):
tmp_fixed_bossData[j][i] = tmp_fixed_bossData[j][i].strip()
tmp_fixed_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
############## 고정보스 정보 리스트 #####################
for j in range(fixed_bossNum):
try:
tmp_fixed_len = tmp_fixed_bossData[j][1].find(':')
tmp_fixedGen_len = tmp_fixed_bossData[j][2].find(':')
fb.append(tmp_fixed_bossData[j][0][11:]) #fixed_bossData[0] : 보스명
fb.append(tmp_fixed_bossData[j][1][11:tmp_fixed_len]) #fixed_bossData[1] : 시
fb.append(tmp_fixed_bossData[j][1][tmp_fixed_len+1:]) #fixed_bossData[2] : 분
fb.append(tmp_fixed_bossData[j][4][20:]) #fixed_bossData[3] : 분전 알림멘트
fb.append(tmp_fixed_bossData[j][5][13:]) #fixed_bossData[4] : 젠 알림멘트
fb.append(tmp_fixed_bossData[j][2][12:tmp_fixedGen_len]) #fixed_bossData[5] : 젠주기-시
fb.append(tmp_fixed_bossData[j][2][tmp_fixedGen_len+1:]) #fixed_bossData[6] : 젠주기-분
fb.append(tmp_fixed_bossData[j][3][12:16]) #fixed_bossData[7] : 시작일-년
fb.append(tmp_fixed_bossData[j][3][17:19]) #fixed_bossData[8] : 시작일-월
fb.append(tmp_fixed_bossData[j][3][20:22]) #fixed_bossData[9] : 시작일-일
fixed_bossData.append(fb)
fb = []
fixed_bossFlag.append(False)
fixed_bossFlag0.append(False)
fixed_bossTime.append(tmp_fixed_now.replace(year = int(fixed_bossData[j][7]), month = int(fixed_bossData[j][8]), day = int(fixed_bossData[j][9]), hour=int(fixed_bossData[j][1]), minute=int(fixed_bossData[j][2]), second = int(0)))
if fixed_bossTime[j] < tmp_fixed_now :
while fixed_bossTime[j] < tmp_fixed_now :
fixed_bossTime[j] = fixed_bossTime[j] + datetime.timedelta(hours=int(fixed_bossData[j][5]), minutes=int(fixed_bossData[j][6]), seconds = int(0))
if tmp_fixed_now + datetime.timedelta(minutes=int(basicSetting[1])) <= fixed_bossTime[j] < tmp_fixed_now + datetime.timedelta(minutes=int(basicSetting[3])):
fixed_bossFlag0[j] = True
if fixed_bossTime[j] < tmp_fixed_now + datetime.timedelta(minutes=int(basicSetting[1])):
fixed_bossFlag[j] = True
fixed_bossFlag0[j] = True
except:
raise Exception(f"[fixed_boss.ini] 파일 {tmp_fixed_bossData[j][0]} 부분 양식을 확인하세요.")
LoadChk = 0
print ("<불러오기 완료>")
else:
LoadChk = 1
print ("보스타임 정보가 없습니다.")
#고정보스 날짜저장
async def FixedBossDateSave():
global fixed_bossData
global fixed_bossTime
global fixed_bossNum
global FixedBossDateData
global indexFixedBossname
for i in range(fixed_bossNum):
FixedBossDateData[indexFixedBossname[i] + 3] = 'startDate = '+ fixed_bossTime[i].strftime('%Y-%m-%d') + '\n'
FixedBossDateDataSTR = ""
for j in range(len(FixedBossDateData)):
pos = len(FixedBossDateData[j])
tmpSTR = FixedBossDateData[j][:pos-1] + '\r\n'
FixedBossDateDataSTR += tmpSTR
contents = repo.get_contents("fixed_boss.ini")
repo.update_file(contents.path, "bossDB", FixedBossDateDataSTR, contents.sha)
#사다리함수
async def LadderFunc(number, ladderlist, channelVal):
result_ladder = random.sample(ladderlist, number)
lose_member = [item for item in ladderlist if item not in result_ladder]
result_ladderSTR = ','.join(map(str, result_ladder))
embed = discord.Embed(title = "🎲 사다리! 묻고 더블로 가!",color=0x00ff00)
embed.add_field(name = "👥 참가자", value = f"```fix\n{', '.join(ladderlist)}```", inline=False)
embed.add_field(name = "😍 당첨", value = f"```fix\n{', '.join(result_ladder)}```")
embed.add_field(name = "😭 낙첨", value = f"```{', '.join(lose_member)}```")
await channelVal.send(embed=embed, tts=False)
#data초기화
async def init_data_list(filename, first_line : str = "-----------"):
try :
contents = repo.get_contents(filename)
repo.update_file(contents.path, "deleted list " + str(filename), first_line, contents.sha)
print ('< 데이터 초기화 >')
except Exception as e :
print ('save error!!')
print(e.args[1]['message']) # output: This repository is empty.
errortime = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
print (errortime)
pass
#data저장
async def data_list_Save(filename, first_line : str = "-----------", save_data : dict = {}):
output_list = first_line+ '\n'
for key, value in save_data.items():
output_list += str(key) + ' ' + str(value) + '\n'
try :
contents = repo.get_contents(filename)
repo.update_file(contents.path, "updated " + str(filename), output_list, contents.sha)
except Exception as e :
print ('save error!!')
print(e.args[1]['message']) # output: This repository is empty.
errortime = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
print (errortime)
pass
#서버(길드) 정보
async def get_guild_channel_info(bot):
text_channel_name : list = []
text_channel_id : list = []
voice_channel_name : list = []
voice_channel_id : list = []
for guild in bot.guilds:
for text_channel in guild.text_channels:
text_channel_name.append(text_channel.name)
text_channel_id.append(str(text_channel.id))
for voice_channel in guild.voice_channels:
voice_channel_name.append(voice_channel.name)
voice_channel_id.append(str(voice_channel.id))
return text_channel_name, text_channel_id, voice_channel_name, voice_channel_id
class taskCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.checker = True
self.main_task.start()
@tasks.loop(seconds=1.0, count=1)
async def main_task(self):
boss_task = asyncio.get_event_loop().create_task(self.boss_check())
await boss_task
@main_task.before_loop
async def before_tast(self):
await self.bot.wait_until_ready()
################ 명존쎄 ################
@commands.command(name=command[8][0], aliases=command[8][1:])
async def command_task_list(self, ctx : commands.Context):
if ctx.message.channel.id != basicSetting[7]:
return
for t in asyncio.Task.all_tasks():
# print(t._coro.__name__)
if t._coro.__name__ == f"boss_check":
if t.done():
try:
t.exception()
except asyncio.CancelledError:
continue
continue
t.cancel()
# await ctx.send( '< 보탐봇 명치 맞고 숨 고르기 중! 잠시만요! >', tts=False)
try:
file = discord.File("./명치.JPG")
await ctx.send(file = file)
except:
await ctx.send( '< 보탐봇 명치 맞고 숨 고르기 중! 잠시만요! >', tts=False)
print("명치!")
await dbSave()
await data_list_Save("kill_list.ini", "-----척살명단-----", kill_Data)
await data_list_Save("item_list.ini", "-----아이템목록-----", item_Data)
for vc in self.bot.voice_clients:
if vc.guild.id == int(ctx.guild.id):
if vc.is_playing():
vc.stop()
await vc.disconnect(force=True)
if basicSetting[21] != "1":
print("명치복구완료!")
await dbLoad()
await self.bot.get_channel(channel).send( '< 다시 왔습니다!(보이스 미사용) >', tts=False)
self.checker = True
boss_task = asyncio.Task(self.boss_check())
return
async def boss_check(self):
await self.bot.wait_until_ready()
global channel
global endTime
global basicSetting
global bossData
global fixed_bossData
global bossNum
global fixed_bossNum
global chkvoicechannel
global chkrelogin
global bossTime
global tmp_bossTime
global fixed_bossTime
global bossTimeString
global bossDateString
global tmp_bossTimeString
global tmp_bossDateString
global bossFlag
global bossFlag0
global fixed_bossFlag
global fixed_bossFlag0
global bossMungFlag
global bossMungCnt
global channel_info
global channel_name
global channel_id
global channel_voice_name
global channel_voice_id
global channel_type
global endTime
global kill_Time
if chflg == 1 :
if len(self.bot.voice_clients) == 0 :
if basicSetting[21] == "1":
try:
await self.bot.get_channel(basicSetting[6]).connect(reconnect=True, timeout=5)
if self.bot.voice_clients[0].is_connected() :
await self.bot.get_channel(channel).send( '< 다시 왔습니다! >', tts=False)
self.checker = True
print("명치복구완료!")
except:
await self.bot.get_channel(channel).send( '< 음성채널 접속 에러! >', tts=False)
self.checker = False
print("명치복구실패!")
pass
await dbLoad()
while True:
############ 워닝잡자! ############
if log_stream.getvalue().find("Awaiting") != -1:
log_stream.truncate(0)
log_stream.seek(0)
await self.bot.get_channel(channel).send( '< 디코접속에러! 잠깐 나갔다 올께요! >', tts=False)
await dbSave()
break
log_stream.truncate(0)
log_stream.seek(0)
##################################
now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
priv0 = now+datetime.timedelta(minutes=int(basicSetting[3]))
priv = now+datetime.timedelta(minutes=int(basicSetting[1]))
tmp_aftr1 = now+datetime.timedelta(minutes=int(0-int(basicSetting[2])))
tmp_aftr2 = now+datetime.timedelta(minutes=int(0-int(basicSetting[22])))
if channel != '':
################ 보탐봇 재시작 ################
if endTime.strftime('%Y-%m-%d ') + endTime.strftime('%H:%M:%S') == now.strftime('%Y-%m-%d ') + now.strftime('%H:%M:%S'):
await dbSave()
await FixedBossDateSave()
await data_list_Save("kill_list.ini", "-----척살명단-----", kill_Data)
await data_list_Save("item_list.ini", "-----아이템목록-----", item_Data)
print("보탐봇재시작!")
endTime = endTime + datetime.timedelta(days = int(basicSetting[13]))
for voice_client in self.bot.voice_clients:
if voice_client.is_playing():
voice_client.stop()
await voice_client.disconnect(force=True)
await asyncio.sleep(2)
inidata_restart = repo_restart.get_contents("restart.txt")
file_data_restart = base64.b64decode(inidata_restart.content)
file_data_restart = file_data_restart.decode('utf-8')
inputData_restart = file_data_restart.split('\n')
if len(inputData_restart) < 3:
contents12 = repo_restart.get_contents("restart.txt")
repo_restart.update_file(contents12.path, "restart_0", "restart\nrestart\nrestrat\n", contents12.sha)
else:
contents12 = repo_restart.get_contents("restart.txt")
repo_restart.update_file(contents12.path, "restart_1", "", contents12.sha)
############# 음성접속! ###########
if len(self.bot.voice_clients) == 0 and self.checker and basicSetting[21] == "1":
try:
await self.bot.get_channel(basicSetting[6]).connect(reconnect=True, timeout=5)
print(f"{now.strftime('%Y-%m-%d %H:%M:%S')} : 음성 채널 자동 재접속완료!")
except discord.errors.ClientException as e:
print(f"{now.strftime('%Y-%m-%d %H:%M:%S')} : 음성 자동 접속 부분에서 서버 음성 채널 이미 접속 에러 : {e}")
self.checker = False
pass
except Exception as e:
print(f"{now.strftime('%Y-%m-%d %H:%M:%S')} : 음성 자동 접속 부분에서 서버 음성 채널 타임아웃 에러 : {e}")
self.checker = False
pass
if not self.bot.voice_clients[0].is_connected():
print(f"{now.strftime('%Y-%m-%d %H:%M:%S')} : 음성 채널 자동 복구실패!")
await self.bot.get_channel(channel).send( '< 음성 채널 접속에 실패하였습니다. 잠시 후 음성 채널 접속을 시도해주세요! >')
self.checker = False
pass
################ 킬 목록 초기화 ################
if kill_Time.strftime('%Y-%m-%d ') + kill_Time.strftime('%H:%M') == now.strftime('%Y-%m-%d ') + now.strftime('%H:%M'):
kill_Time = kill_Time + datetime.timedelta(days=int(1))
await init_data_list('kill_list.ini', '-----척살명단-----')
################ 고정 보스 확인 ################
for i in range(fixed_bossNum):
if int(basicSetting[3]) == 0:
fixed_bossFlag0[i] = True
if int(basicSetting[1]) == 0:
fixed_bossFlag[i] = True
################ before_alert1 ################
if fixed_bossTime[i] <= priv0 and fixed_bossTime[i] > priv:
if basicSetting[3] != '0':
if fixed_bossFlag0[i] == False:
fixed_bossFlag0[i] = True
await self.bot.get_channel(channel).send("```" + fixed_bossData[i][0] + ' ' + basicSetting[3] + '분 전 ' + fixed_bossData[i][3] +' [' + fixed_bossTime[i].strftime('%H:%M:%S') + ']```', tts=False)
try:
if basicSetting[21] == "1":
await PlaySound(self.bot.voice_clients[0], './sound/' + fixed_bossData[i][0] + '알림1.mp3')
except:
pass
################ before_alert ################
if fixed_bossTime[i] <= priv and fixed_bossTime[i] > now and fixed_bossFlag0[i] == True :
if basicSetting[1] != '0' :
if fixed_bossFlag[i] == False:
fixed_bossFlag[i] = True
await self.bot.get_channel(channel).send("```" + fixed_bossData[i][0] + ' ' + basicSetting[1] + '분 전 ' + fixed_bossData[i][3] +' [' + fixed_bossTime[i].strftime('%H:%M:%S') + ']```', tts=False)
try:
if basicSetting[21] == "1":
await PlaySound(self.bot.voice_clients[0], './sound/' + fixed_bossData[i][0] + '알림.mp3')
except:
pass
################ 보스 젠 시간 확인 ################
if fixed_bossTime[i] <= now and fixed_bossFlag[i] == True and fixed_bossFlag0[i] == True :
fixed_bossTime[i] = fixed_bossTime[i]+datetime.timedelta(hours=int(fixed_bossData[i][5]), minutes=int(fixed_bossData[i][6]), seconds = int(0))
fixed_bossFlag0[i] = False
fixed_bossFlag[i] = False
embed = discord.Embed(
description= "```" + fixed_bossData[i][0] + fixed_bossData[i][4] + "```" ,
color=0x00ff00
)
await self.bot.get_channel(channel).send(embed=embed, tts=False)
try:
if basicSetting[21] == "1":
await PlaySound(self.bot.voice_clients[0], './sound/' + fixed_bossData[i][0] + '젠.mp3')
except:
pass
################ 일반 보스 확인 ################
for i in range(bossNum):
if int(basicSetting[3]) == 0:
bossFlag0[i] = True
if int(basicSetting[1]) == 0:
bossFlag[i] = True
################ before_alert1 ################
if bossTime[i] <= priv0 and bossTime[i] > priv:
if basicSetting[3] != '0':
if bossFlag0[i] == False:
bossFlag0[i] = True
if bossData[i][6] != '' :
await self.bot.get_channel(channel).send("```" + bossData[i][0] + ' ' + basicSetting[3] + '분 전 ' + bossData[i][3] + " [" + bossTimeString[i] + "]" + '\n<' + bossData[i][6] + '>```', tts=False)
else :
await self.bot.get_channel(channel).send("```" + bossData[i][0] + ' ' + basicSetting[3] + '분 전 ' + bossData[i][3] + " [" + bossTimeString[i] + "]```", tts=False)
try:
if basicSetting[21] == "1":
await PlaySound(self.bot.voice_clients[0], './sound/' + bossData[i][0] + '알림1.mp3')
except:
pass
################ before_alert ################
if bossTime[i] <= priv and bossTime[i] > now and bossFlag0[i] == True:
if basicSetting[1] != '0' :
if bossFlag[i] == False:
bossFlag[i] = True
if bossData[i][6] != '' :
await self.bot.get_channel(channel).send("```" + bossData[i][0] + ' ' + basicSetting[1] + '분 전 ' + bossData[i][3] + " [" + bossTimeString[i] + "]" + '\n<' + bossData[i][6] + '>```', tts=False)
else :
await self.bot.get_channel(channel).send("```" + bossData[i][0] + ' ' + basicSetting[1] + '분 전 ' + bossData[i][3] + " [" + bossTimeString[i] + "]```", tts=False)
try:
if basicSetting[21] == "1":
await PlaySound(self.bot.voice_clients[0], './sound/' + bossData[i][0] + '알림.mp3')
except:
pass
################ 보스 젠 시간 확인 ################
if bossTime[i] <= now and bossFlag0[i] == True and bossFlag[i] == True :
#print ('if ', bossTime[i])
bossMungFlag[i] = True
tmp_bossTime[i] = bossTime[i]
tmp_bossTimeString[i] = tmp_bossTime[i].strftime('%H:%M:%S')
tmp_bossDateString[i] = tmp_bossTime[i].strftime('%Y-%m-%d')
bossTimeString[i] = '99:99:99'
bossDateString[i] = '9999-99-99'
bossTime[i] = now+datetime.timedelta(days=365)
if bossData[i][6] != '' :
embed = discord.Embed(
description= "```" + bossData[i][0] + bossData[i][4] + '\n<' + bossData[i][6] + '>```' ,
color=0x00ff00
)
else :
embed = discord.Embed(
description= "```" + bossData[i][0] + bossData[i][4] + "```" ,
color=0x00ff00
)
await self.bot.get_channel(channel).send(embed=embed, tts=False)
try:
if basicSetting[21] == "1":
await PlaySound(self.bot.voice_clients[0], './sound/' + bossData[i][0] + '젠.mp3')
except:
pass
################ 보스 자동 멍 처리 ################
if bossMungFlag[i] == True:
if bossData[i][7] == "1":
aftr = tmp_aftr1
else:
aftr = tmp_aftr2
if (bossTime[i]+datetime.timedelta(days=-365)) <= aftr:
if basicSetting[2] != '0' and basicSetting[22] != '0' and bossFlag[i] == True and bossFlag0[i] == True and bossMungFlag[i] == True :
if int(basicSetting[17]) <= bossMungCnt[i] and int(basicSetting[17]) != 0:
bossTime[i] = datetime.datetime.now()+datetime.timedelta(days=365, hours = int(basicSetting[0]))
tmp_bossTime[i] = datetime.datetime.now()+datetime.timedelta(days=365, hours = int(basicSetting[0]))
bossTimeString[i] = '99:99:99'
bossDateString[i] = '9999-99-99'
tmp_bossTimeString[i] = '99:99:99'
tmp_bossDateString[i] = '9999-99-99'
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = 0
if bossData[i][2] == '0':
await self.bot.get_channel(channel).send(f'```자동 미입력 횟수 {basicSetting[17]}회 초과! [{bossData[i][0]}] 삭제!```', tts=False)
print ('자동미입력 횟수초과 <' + bossData[i][0] + ' 삭제완료>')
else:
await self.bot.get_channel(channel).send(f'```자동 멍처리 횟수 {basicSetting[17]}회 초과! [{bossData[i][0]}] 삭제!```', tts=False)
print ('자동멍처리 횟수초과 <' + bossData[i][0] + ' 삭제완료>')
#await dbSave()
else:
################ 미입력 보스 ################
if bossData[i][2] == '0':
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = bossMungCnt[i] + 1
tmp_bossTime[i] = bossTime[i] = nextTime = tmp_bossTime[i]+datetime.timedelta(hours=int(bossData[i][1]), minutes=int(bossData[i][5]))
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
await self.bot.get_channel(channel).send("```" + bossData[i][0] + ' 미입력 됐습니다.```', tts=False)
embed = discord.Embed(
description= '```다음 ' + bossData[i][0] + ' ' + bossTimeString[i] + '입니다.```',
color=0xff0000
)
await self.bot.get_channel(channel).send(embed=embed, tts=False)
try:
if basicSetting[21] == "1":
await PlaySound(self.bot.voice_clients[0], './sound/' + bossData[i][0] + '미입력.mp3')
except:
pass
################ 멍 보스 ################
else :
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = bossMungCnt[i] + 1
tmp_bossTime[i] = bossTime[i] = nextTime = tmp_bossTime[i]+datetime.timedelta(hours=int(bossData[i][1]), minutes=int(bossData[i][5]))
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
await self.bot.get_channel(channel).send("```" + bossData[i][0] + ' 멍 입니다.```')
embed = discord.Embed(
description= '```다음 ' + bossData[i][0] + ' ' + bossTimeString[i] + '입니다.```',
color=0xff0000
)
await self.bot.get_channel(channel).send(embed=embed, tts=False)
try:
if basicSetting[21] == "1":
await PlaySound(self.bot.voice_clients[0], './sound/' + bossData[i][0] + '멍.mp3')
except:
pass
await asyncio.sleep(1) # task runs every 60 seconds
self.checker = False
for voice_client in self.bot.voice_clients:
if voice_client.is_playing():
voice_client.stop()
await voice_client.disconnect(force=True)
for t in asyncio.Task.all_tasks():
if t._coro.__name__ == f"boss_check":
print("-------------")
if t.done():
try:
t.exception()
except asyncio.CancelledError:
continue
continue
t.cancel()
await dbSave()
await data_list_Save("kill_list.ini", "-----척살명단-----", kill_Data)
await data_list_Save("item_list.ini", "-----아이템목록-----", item_Data)
boss_task = asyncio.Task(self.boss_check())
class mainCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
################ 보탐봇 입장 ################
@commands.has_permissions(manage_messages=True)
@commands.command(name=command[0][0], aliases=command[0][1:])
async def join_(self, ctx):
global basicSetting
global chflg
if basicSetting[7] == "":
channel = ctx.message.channel.id #메세지가 들어온 채널 ID
print ('[ ', basicSetting[7], ' ]')
print ('] ', ctx.message.channel.name, ' [')
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith("textchannel ="):
inputData_textCH[i] = 'textchannel = ' + str(channel) + '\r'
basicSetting[7] = channel
#print ('======', inputData_text[i])
result_textCH = '\n'.join(inputData_textCH)
#print (result_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
await ctx.send(f"< 텍스트채널 [{ctx.message.channel.name}] 접속완료 >\n< 음성채널 접속 후 [{command[5][0]}] 명령을 사용 하세요 >", tts=False)
print('< 텍스트채널 [' + ctx.guild.get_channel(basicSetting[7]).name + '] 접속완료>')
if basicSetting[6] != "" and basicSetting[21] == "1":
try:
await ctx.guild.get_channel(basicSetting[6]).connect(reconnect=True, timeout=5)
print('< 음성채널 [' + ctx.guild.get_channel(basicSetting[6]).name + '] 접속완료>')
except:
print('< 음성채널 [' + ctx.guild.get_channel(basicSetting[6]).name + '] 접속에러! >')
pass
if basicSetting[8] != "":
if str(basicSetting[8]) in channel_id:
print('< 사다리채널 [' + ctx.guild.get_channel(int(basicSetting[8])).name + '] 접속완료 >')
else:
basicSetting[8] = ""
print(f"사다리채널 ID 오류! [{command[28][0]} 사다리] 명령으로 재설정 바랍니다.")
if basicSetting[11] != "":
if str(basicSetting[11]) in channel_id:
print('< 정산채널 [' + ctx.guild.get_channel(int(basicSetting[11])).name + '] 접속완료>')
else:
basicSetting[11] = ""
print(f"정산채널 ID 오류! [{command[28][0]} 정산] 명령으로 재설정 바랍니다.")
if basicSetting[18] != "":
if str(basicSetting[18]) in channel_id:
print('< 척살채널 [' + ctx.guild.get_channel(int(basicSetting[18])).name + '] 접속완료>')
else:
basicSetting[18] = ""
print(f"척살채널 ID 오류! [{command[28][0]} 척살] 명령으로 재설정 바랍니다.")
if basicSetting[19] != "":
if str(basicSetting[19]) in channel_id:
print('< 경주채널 [' + ctx.guild.get_channel(int(basicSetting[19])).name + '] 접속완료>')
else:
basicSetting[19] = ""
print(f"경주채널 ID 오류! [{command[28][0]} 경주] 명령으로 재설정 바랍니다.")
if basicSetting[20] != "":
if str(basicSetting[20]) in channel_id:
print('< 아이템채널 [' + ctx.guild.get_channel(int(basicSetting[20])).name + '] 접속완료>')
else:
basicSetting[20] = ""
print(f"아이템채널 ID 오류! [{command[28][0]} 아이템] 명령으로 재설정 바랍니다.")
if int(basicSetting[13]) != 0 :
print('< 보탐봇 재시작 시간 ' + endTime.strftime('%Y-%m-%d ') + endTime.strftime('%H:%M:%S') + ' >')
print('< 보탐봇 재시작 주기 ' + basicSetting[13] + '일 >')
else :
print('< 보탐봇 재시작 설정안됨 >')
chflg = 1
else:
curr_guild_info = None
for guild in self.bot.guilds:
for text_channel in guild.text_channels:
if basicSetting[7] == text_channel.id:
curr_guild_info = guild
emoji_list : list = ["⭕", "❌"]
guild_error_message = await ctx.send(f"이미 **[{curr_guild_info.name}]** 서버 **[{setting_channel_name}]** 채널이 명령어 채널로 설정되어 있습니다.\n해당 채널로 명령어 채널을 변경 하시려면 ⭕ 그대로 사용하시려면 ❌ 를 눌러주세요.\n(10초이내 미입력시 기존 설정 그대로 설정됩니다.)", tts=False)
for emoji in emoji_list:
await guild_error_message.add_reaction(emoji)
def reaction_check(reaction, user):
return (reaction.message.id == guild_error_message.id) and (user.id == ctx.author.id) and (str(reaction) in emoji_list)
try:
reaction, user = await self.bot.wait_for('reaction_add', check = reaction_check, timeout = 10)
except asyncio.TimeoutError:
return await ctx.send(f"시간이 초과됐습니다. **[{curr_guild_info.name}]** 서버 **[{setting_channel_name}]** 채널에서 사용해주세요!")
if str(reaction) == "⭕":
if ctx.voice_client is not None:
await ctx.voice_client.disconnect(force=True)
basicSetting[6] = ""
basicSetting[7] = int(ctx.message.channel.id)
print ('[ ', basicSetting[7], ' ]')
print ('] ', ctx.message.channel.name, ' [')
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith("textchannel ="):
inputData_textCH[i] = 'textchannel = ' + str(basicSetting[7]) + '\r'
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
return await ctx.send(f"명령어 채널이 **[{ctx.author.guild.name}]** 서버 **[{ctx.message.channel.name}]** 채널로 새로 설정되었습니다.\n< 음성채널 접속 후 [{command[5][0]}] 명령을 사용 하세요 >")
else:
return await ctx.send(f"명령어 채널 설정이 취소되었습니다.\n**[{curr_guild_info.name}]** 서버 **[{setting_channel_name}]** 채널에서 사용해주세요!")
################ 보탐봇 메뉴 출력 ################
@commands.command(name=command[1][0], aliases=command[1][1:])
async def menu_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
command_list = ''
command_list += ','.join(command[2]) + '\n' #!설정확인
command_list += ','.join(command[3]) + '\n' #!채널확인
command_list += ','.join(command[4]) + ' [채널명]\n' #!채널이동
command_list += ','.join(command[5]) + ' ※ 관리자만 실행 가능\n' #!소환
command_list += ','.join(command[6]) + '\n' #!불러오기
command_list += ','.join(command[7]) + '\n' #!초기화
command_list += ','.join(command[8]) + '\n' #!명치
command_list += ','.join(command[9]) + '\n' #!재시작
command_list += ','.join(command[10]) + '\n' #!미예약
command_list += ','.join(command[11]) + ' [인원] [금액]\n' #!분배
command_list += ','.join(command[12]) + ' [뽑을인원수] [아이디1] [아이디2]...\n' #!사다리
command_list += ','.join(command[27]) + ' [아이디1] [아이디2]...(최대 12명)\n' #!경주
command_list += ','.join(command[41]) + ' [추첨인원] (대기시간/초) *(메모)\n' #!럭키박스
command_list += ','.join(command[35]) + ' [판매금액] (거래소세금)\n' #!수수료
command_list += ','.join(command[36]) + ' [거래소금액] [실거래금액] (거래소세금)\n' #!페이백
command_list += ','.join(command[13]) + ' [아이디]\n' #!정산
command_list += ','.join(command[14]) + ' 또는 ' + ','.join(command[14]) + ' 0000, 00:00\n' #!보스일괄
command_list += ','.join(command[40]) + ' 또는 ' + ','.join(command[40]) + ' 0000, 00:00\n' #!멍일괄
command_list += ','.join(command[43]) + f' [00:00:00 : 보스명(엔터) ...]\n※ 보스탐 결과 복붙 가능\nex){command[43][0]} + 12:34:00 : {bossData[0][0]}\n+ 10:56:00 : {bossData[1][0]}\n+ (+1d) 12:12:00 : {bossData[2][0]}...\n' #!컷등록
command_list += ','.join(command[44]) + f' [00:00:00 : 보스명(엔터) ...]\n※ [00:00:00 보스명] 형태로 여러줄(엔터)로 구분하여 등록\nex){command[44][0]} + 12:34:00 : {bossData[0][0]}\n10:56:00 : {bossData[1][0]}\n+ (+1d) 12:12:00 : {bossData[2][0]}...\n' #!예상등록
command_list += ','.join(command[45]) + ' [시간(00:00)] [추가시간(숫자)] [보스명1] [보스명2] [보스명3] ...\n' #!추가등록
command_list += ','.join(command[15]) + '\n' #!q
command_list += ','.join(command[16]) + ' [할말]\n' #!v
command_list += ','.join(command[17]) + '\n' #!리젠
command_list += ','.join(command[18]) + '\n' #!현재시간
command_list += ','.join(command[24]) + '\n' #!킬초기화
command_list += ','.join(command[25]) + '\n' #!킬횟수 확인
command_list += ','.join(command[25]) + ' [아이디]\n' #!킬
command_list += ','.join(command[26]) + ' [아이디]\n' #!킬삭제
command_list += ','.join(command[33]) + ' [아이디] 또는 ' + ','.join(command[33]) + ' [아이디] [횟수]\n' #!킬차감
command_list += ','.join(command[29]) + '\n' #!아이템 목록 초기화
command_list += ','.join(command[30]) + '\n' #!아이템 목록 확인
command_list += ','.join(command[30]) + ' [아이템] 또는 ' + ','.join(command[30]) + ' [아이템] [개수]\n' #!아이템 목록 입력
command_list += ','.join(command[31]) + ' [아이템]\n' #!아이템 목록에서 삭제
command_list += ','.join(command[32]) + ' [아이템] 또는 ' + ','.join(command[32]) + ' [아이템] [개수]\n' #!아이템 차감
command_list += ','.join(command[19]) + '\n' #!공지
command_list += ','.join(command[19]) + ' [공지내용]\n' #!공지
command_list += ','.join(command[20]) + '\n' #!공지삭제
command_list += ','.join(command[21]) + ' [할말]\n' #!상태
command_list += ','.join(command[28]) + ' 사다리, 정산, 척살, 경주, 아이템\n' #!채널설정
command_list += ','.join(command[42]) + ' 사다리, 정산, 척살, 경주, 아이템\n' #!채널삭제
command_list += ','.join(command[34]) + ' ※ 관리자만 실행 가능\n\n' #서버나가기
command_list += ','.join(command[22]) + '\n' #보스탐
command_list += ','.join(command[23]) + '\n' #!보스탐
command_list += '[보스명]컷 또는 [보스명]컷 0000, 00:00\n'
command_list += '[보스명] 컷 또는 [보스명] 컷 0000, 00:00\n'
command_list += '[보스명]멍 또는 [보스명]멍 0000, 00:00\n'
command_list += '[보스명]예상 또는 [보스명]예상 0000, 00:00\n'
command_list += '[보스명]삭제\n'
command_list += '[보스명]메모 [할말]\n'
embed = discord.Embed(
title = "----- 명령어 -----",
description= '```' + command_list + '```',
color=0xff00ff
)
embed.add_field(
name="----- 추가기능 -----",
value= '```- [보스명]컷/멍/예상 [할말] : 보스시간 입력 후 빈칸 두번!! 메모 가능\n- [보스명]컷 명령어는 초성으로 입력가능합니다.\n ex)' + bossData[0][0] + '컷 => ' + convertToInitialLetters(bossData[0][0] +'컷') + ', ' + bossData[0][0] + ' 컷 => ' + convertToInitialLetters(bossData[0][0] +' 컷') + '```'
)
await ctx.send( embed=embed, tts=False)
else:
return
################ 보탐봇 기본 설정확인 ################
@commands.command(name=command[2][0], aliases=command[2][1:])
async def setting_(self, ctx):
#print (ctx.message.channel.id)
if ctx.message.channel.id == basicSetting[7]:
setting_val = '보탐봇버전 : Server Ver. 28 (2021. 6. 23.)\n'
if basicSetting[6] != "" :
setting_val += '음성채널 : ' + self.bot.get_channel(basicSetting[6]).name + '\n'
setting_val += '텍스트채널 : ' + self.bot.get_channel(basicSetting[7]).name +'\n'
if basicSetting[8] != "" :
setting_val += '사다리채널 : ' + self.bot.get_channel(int(basicSetting[8])).name + '\n'
if basicSetting[11] != "" :
setting_val += '정산채널 : ' + self.bot.get_channel(int(basicSetting[11])).name + '\n'
if basicSetting[18] != "" :
setting_val += '척살채널 : ' + self.bot.get_channel(int(basicSetting[18])).name + '\n'
if basicSetting[19] != "" :
setting_val += '경주채널 : ' + self.bot.get_channel(int(basicSetting[19])).name + '\n'
if basicSetting[20] != "" :
setting_val += '아이템채널 : ' + self.bot.get_channel(int(basicSetting[20])).name + '\n'
setting_val += '보스젠알림시간1 : ' + basicSetting[1] + ' 분 전\n'
setting_val += '보스젠알림시간2 : ' + basicSetting[3] + ' 분 전\n'
setting_val += '보스멍확인시간1 : ' + basicSetting[2] + ' 분 후\n'
setting_val += '보스멍확인시간2 : ' + basicSetting[22] + ' 분 후\n'
if basicSetting[21] == "0":
setting_val += '보이스사용여부 : 사용안함\n'
else:
setting_val += '보이스사용여부 : 사용중\n'
embed = discord.Embed(
title = "----- 설정내용 -----",
description= f'```{setting_val}```',
color=0xff00ff
)
embed.add_field(
name="----- Special Thanks to. -----",
value= '```총무, 옹님, 공부중, 꽃신, 별빛, 크마, D.H.Kim, K.H.Sim, 쿠쿠, 오브로드, D.H.Oh, Bit, 팥빵, 천려, 이파리, 도미, 일깡, B.Park```'
)
await ctx.send(embed=embed, tts=False)
else:
return
################ 서버 채널 확인 ################
@commands.command(name=command[3][0], aliases=command[3][1:])
async def chChk_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
channel_name, channel_id, channel_voice_name, channel_voice_id = await get_guild_channel_info(self.bot)
ch_information = []
cnt = 0
ch_information.append("")
ch_voice_information = []
cntV = 0
ch_voice_information.append("")
for guild in self.bot.guilds:
ch_information[cnt] = f"{ch_information[cnt]}👑 {guild.name} 👑\n"
for i in range(len(channel_name)):
for text_channel in guild.text_channels:
if channel_id[i] == str(text_channel.id):
if len(ch_information[cnt]) > 900 :
ch_information.append("")
cnt += 1
ch_information[cnt] = f"{ch_information[cnt]}[{channel_id[i]}] {channel_name[i]}\n"
ch_voice_information[cntV] = f"{ch_voice_information[cntV]}👑 {guild.name} 👑\n"
for i in range(len(channel_voice_name)):
for voice_channel in guild.voice_channels:
if channel_voice_id[i] == str(voice_channel.id):
if len(ch_voice_information[cntV]) > 900 :
ch_voice_information.append("")
cntV += 1
ch_voice_information[cntV] = f"{ch_voice_information[cntV]}[{channel_voice_id[i]}] {channel_voice_name[i]}\n"
######################
if len(ch_information) == 1 and len(ch_voice_information) == 1:
embed = discord.Embed(
title = "----- 채널 정보 -----",
description = '',
color=0xff00ff
)
embed.add_field(
name="< 택스트 채널 >",
value= '```' + ch_information[0] + '```',
inline = False
)
embed.add_field(
name="< 보이스 채널 >",
value= '```' + ch_voice_information[0] + '```',
inline = False
)
await ctx.send( embed=embed, tts=False)
else :
embed = discord.Embed(
title = "----- 채널 정보 -----\n< 택스트 채널 >",
description= '```' + ch_information[0] + '```',
color=0xff00ff
)
await ctx.send( embed=embed, tts=False)
for i in range(len(ch_information)-1):
embed = discord.Embed(
title = '',
description= '```' + ch_information[i+1] + '```',
color=0xff00ff
)
await ctx.send( embed=embed, tts=False)
embed = discord.Embed(
title = "< 음성 채널 >",
description= '```' + ch_voice_information[0] + '```',
color=0xff00ff
)
await ctx.send( embed=embed, tts=False)
for i in range(len(ch_voice_information)-1):
embed = discord.Embed(
title = '',
description= '```' + ch_voice_information[i+1] + '```',
color=0xff00ff
)
await ctx.send( embed=embed, tts=False)
else:
return
################ 텍스트채널이동 ################
@commands.command(name=command[4][0], aliases=command[4][1:])
async def chMove_(self, ctx):
global basicSetting
if ctx.message.channel.id == basicSetting[7]:
msg = ctx.message.content[len(ctx.invoked_with)+1:]
channel = None
for i in range(len(channel_name)):
if channel_name[i] == msg:
channel = int(channel_id[i])
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith('textchannel ='):
inputData_textCH[i] = 'textchannel = ' + str(channel) + '\r'
basicSetting[7] = int(channel)
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
await ctx.send( f"명령어 채널이 < {ctx.message.channel.name} >에서 < {self.bot.get_channel(channel).name} > 로 이동되었습니다.", tts=False)
await self.bot.get_channel(channel).send( f"< {self.bot.get_channel(channel).name} 이동완료 >", tts=False)
else:
return
################ 보탐봇 음성채널 소환 ################
@commands.has_permissions(manage_messages=True)
@commands.command(name=command[5][0], aliases=command[5][1:])
async def connectVoice_(self, ctx):
global basicSetting
if ctx.message.channel.id == basicSetting[7]:
if basicSetting[21] != "1":
return await ctx.send('```보이스를 사용하지 않도록 설정되어 있습니다.```', tts=False)
if ctx.voice_client is None:
if ctx.author.voice:
try:
await ctx.author.voice.channel.connect(reconnect=True, timeout=5)
except:
await ctx.send('음성채널에 접속에 실패하였습니다.', tts=False)
pass
else:
await ctx.send('음성채널에 먼저 들어가주세요.', tts=False)
return
else:
if ctx.voice_client.is_playing():
ctx.voice_client.stop()
await ctx.voice_client.move_to(ctx.author.voice.channel)
voice_channel = ctx.author.voice.channel
print ('< ', basicSetting[6], ' >')
print ('> ', self.bot.get_channel(voice_channel.id).name, ' <')
if basicSetting[6] == "":
inidata_voiceCH = repo.get_contents("test_setting.ini")
file_data_voiceCH = base64.b64decode(inidata_voiceCH.content)
file_data_voiceCH = file_data_voiceCH.decode('utf-8')
inputData_voiceCH = file_data_voiceCH.split('\n')
for i in range(len(inputData_voiceCH)):
if inputData_voiceCH[i].startswith('voicechannel ='):
inputData_voiceCH[i] = 'voicechannel = ' + str(voice_channel.id) + '\r'
basicSetting[6] = int(voice_channel.id)
result_voiceCH = '\n'.join(inputData_voiceCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_voiceCH, contents.sha)
elif basicSetting[6] != int(voice_channel.id):
inidata_voiceCH = repo.get_contents("test_setting.ini")
file_data_voiceCH = base64.b64decode(inidata_voiceCH.content)
file_data_voiceCH = file_data_voiceCH.decode('utf-8')
inputData_voiceCH = file_data_voiceCH.split('\n')
for i in range(len(inputData_voiceCH)):
if inputData_voiceCH[i].startswith('voicechannel ='):
inputData_voiceCH[i] = 'voicechannel = ' + str(voice_channel.id) + '\r'
basicSetting[6] = int(voice_channel.id)
result_voiceCH = '\n'.join(inputData_voiceCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_voiceCH, contents.sha)
await ctx.send('< 음성채널 [' + self.bot.get_channel(voice_channel.id).name + '] 접속완료>', tts=False)
else:
return
################ my_bot.db에 저장된 보스타임 불러오기 ################
@commands.command(name=command[6][0], aliases=command[6][1:])
async def loadDB_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
await dbLoad()
if LoadChk == 0:
await ctx.send('<불러오기 완료>', tts=False)
else:
await ctx.send('<보스타임 정보가 없습니다.>', tts=False)
else:
return
################ 저장된 정보 초기화 ################
@commands.command(name=command[7][0], aliases=command[7][1:])
async def initVal_(self, ctx):
global basicSetting
global bossData
global fixed_bossData
global bossTime
global tmp_bossTime
global fixed_bossTime
global bossTimeString
global bossDateString
global tmp_bossTimeString
global tmp_bossDateString
global bossFlag
global bossFlag0
global fixed_bossFlag
global fixed_bossFlag0
global bossMungFlag
global bossMungCnt
global FixedBossDateData
global indexFixedBossname
if ctx.message.channel.id == basicSetting[7]:
basicSetting = []
bossData = []
fixed_bossData = []
bossTime = []
tmp_bossTime = []
fixed_bossTime = []
bossTimeString = []
bossDateString = []
tmp_bossTimeString = []
tmp_bossDateString = []
bossFlag = []
bossFlag0 = []
fixed_bossFlag = []
fixed_bossFlag0 = []
bossMungFlag = []
bossMungCnt = []
FixedBossDateData = []
indexFixedBossname = []
init()
await dbSave()
await ctx.send('< 초기화 완료 >', tts=False)
print ("< 초기화 완료 >")
else:
return
################ 보탐봇 재시작 ################
@commands.command(name=command[9][0], aliases=command[9][1:])
async def restart_(self, ctx):
global basicSetting
global bossTimeString
global bossDateString
if ctx.message.channel.id == basicSetting[7]:
if basicSetting[2] != '0' and basicSetting[22] != '0':
for i in range(bossNum):
if bossMungFlag[i] == True:
bossTimeString[i] = tmp_bossTime[i].strftime('%H:%M:%S')
bossDateString[i] = tmp_bossTime[i].strftime('%Y-%m-%d')
await dbSave()
await data_list_Save("kill_list.ini", "-----척살명단-----", kill_Data)
await data_list_Save("item_list.ini", "-----아이템목록-----", item_Data)
for voice_client in self.bot.voice_clients:
if voice_client.is_playing():
voice_client.stop()
await voice_client.disconnect(force=True)
print("보탐봇강제재시작!")
await asyncio.sleep(2)
inidata_restart = repo_restart.get_contents("restart.txt")
file_data_restart = base64.b64decode(inidata_restart.content)
file_data_restart = file_data_restart.decode('utf-8')
inputData_restart = file_data_restart.split('\n')
if len(inputData_restart) < 3:
contents12 = repo_restart.get_contents("restart.txt")
repo_restart.update_file(contents12.path, "restart_0", "restart\nrestart\nrestrat\n", contents12.sha)
else:
contents12 = repo_restart.get_contents("restart.txt")
repo_restart.update_file(contents12.path, "restart_1", "", contents12.sha)
else:
return
################ 미예약 보스타임 출력 ################
@commands.command(name=command[10][0], aliases=command[10][1:])
async def nocheckBoss_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
tmp_boss_information = []
tmp_cnt = 0
tmp_boss_information.append('')
for i in range(bossNum):
if bossTimeString[i] == '99:99:99' and bossMungFlag[i] != True :
if len(tmp_boss_information[tmp_cnt]) > 1800 :
tmp_boss_information.append('')
tmp_cnt += 1
tmp_boss_information[tmp_cnt] = tmp_boss_information[tmp_cnt] + bossData[i][0] + ','
if len(tmp_boss_information) == 1:
if len(tmp_boss_information[0]) != 0:
tmp_boss_information[0] = "```fix\n" + tmp_boss_information[0][:len(tmp_boss_information[0])-1] + "\n```"
else :
tmp_boss_information[0] = '``` ```'
embed = discord.Embed(
title = "----- 미예약 보스 -----",
description= tmp_boss_information[0],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
else:
if len(tmp_boss_information[0]) != 0:
if len(tmp_boss_information) == 1 :
tmp_boss_information[0] = "```fix\n" + tmp_boss_information[0][:len(tmp_boss_information[0])-1] + "\n```"
else:
tmp_boss_information[0] = "```fix\n" + tmp_boss_information[0] + "\n```"
else :
tmp_boss_information[0] = '``` ```'
embed = discord.Embed(
title = "----- 미예약 보스 -----",
description= tmp_boss_information[0],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
for i in range(len(tmp_boss_information)-1):
if len(tmp_boss_information[i+1]) != 0:
if i == len(tmp_boss_information)-2:
tmp_boss_information[i+1] = "```fix\n" + tmp_boss_information[i+1][:len(tmp_boss_information[i+1])-1] + "\n```"
else:
tmp_boss_information[i+1] = "```fix\n" + tmp_boss_information[i+1] + "\n```"
else :
tmp_boss_information[i+1] = '``` ```'
embed = discord.Embed(
title = '',
description= tmp_boss_information[i+1],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
else:
return
################ 분배 결과 출력 ################
@commands.command(name=command[11][0], aliases=command[11][1:])
async def bunbae_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
msg = ctx.message.content[len(ctx.invoked_with)+1:]
separate_money = []
separate_money = msg.split(" ")
num_sep = floor(int(separate_money[0]))
cal_tax1 = floor(float(separate_money[1])*0.05)
real_money = floor(floor(float(separate_money[1])) - cal_tax1)
cal_tax2 = floor(real_money/num_sep) - floor(float(floor(real_money/num_sep))*0.95)
if num_sep == 0 :
await ctx.send('```분배 인원이 0입니다. 재입력 해주세요.```', tts=False)
else :
embed = discord.Embed(
title = "----- 분배결과! -----",
description= '```1차 세금 : ' + str(cal_tax1) + '\n1차 수령액 : ' + str(real_money) + '\n분배자 거래소등록금액 : ' + str(floor(real_money/num_sep)) + '\n2차 세금 : ' + str(cal_tax2) + '\n인당 실수령액 : ' + str(floor(float(floor(real_money/num_sep))*0.95)) + '```',
color=0xff00ff
)
await ctx.send(embed=embed, tts=False)
else:
return
################ 사다리 결과 출력 ################
@commands.command(name=command[12][0], aliases=command[12][1:])
async def ladder_(self, ctx : commands.Context, *, args : str = None):
if basicSetting[8] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[8]:
if not args:
return await ctx.send(f'```명령어 [인원] [아이디1] [아이디2] ... 형태로 입력해주시기 바랍나다.```')
ladder = args.split()
try:
num_cong = int(ladder[0]) # 뽑을 인원
del(ladder[0])
except ValueError:
return await ctx.send(f'```뽑을 인원은 숫자로 입력바랍니다\nex)!사다리 1 가 나 다 ...```')
if num_cong >= len(ladder):
return await ctx.send(f'```추첨인원이 총 인원과 같거나 많습니다. 재입력 해주세요```')
if len(ladder) > 20:
await LadderFunc(num_cong, ladder, ctx)
return
input_dict : dict = {}
ladder_description : list = []
ladder_data : list = []
output_list : list = []
result :dict = {}
for i in range(len(ladder)):
input_dict[f"{i+1}"] = ladder[i]
if i < num_cong:
output_list.append("o")
else:
output_list.append("x")
for i in range(len(ladder)+1):
tmp_list = []
if i%2 != 0:
sample_list = ["| |-", "| | "]
else:
sample_list = ["| | ", "|-| "]
for i in range(len(ladder)//2):
value = random.choice(sample_list)
tmp_list.append(value)
ladder_description.append(tmp_list)
tmp_result = list(input_dict.keys())
input_data : str = ""
for i in range(len(tmp_result)):
if int(tmp_result[i]) < 9:
input_data += f"{tmp_result[i]} "
else:
input_data += f"{tmp_result[i]}"
input_value_data = " ".join(list(input_dict.values()))
for i in range(len(ladder_description)):
if (len(ladder) % 2) != 0:
ladder_data.append(f"{''.join(ladder_description[i])}|\n")
else:
ladder_data.append(f"{''.join(ladder_description[i])[:-1]}\n")
random.shuffle(output_list)
output_data = list(" ".join(output_list))
for line in reversed(ladder_data):
for i, x in enumerate(line):
if i % 2 == 1 and x == '-':
output_data[i-1], output_data[i+1] = output_data[i+1], output_data[i-1]
for i in range(output_data.count(" ")):
output_data.remove(" ")
for i in range(len(tmp_result)):
result[tmp_result[i]] = output_data[i]
result_str : str = ""
join_member : list = []
win_member : list = []
lose_member : list = []
for x, y in result.items():
join_member.append(f"{x}:{input_dict[f'{x}']}")
if y == "o":
win_member.append(f"{input_dict[f'{x}']}")
else :
lose_member.append(f"{input_dict[f'{x}']}")
embed = discord.Embed(title = "🎲 사다리! 묻고 더블로 가!",
color=0x00ff00
)
embed.description = f"||```{input_data}\n{''.join(ladder_data)}{' '.join(output_list)}```||"
embed.add_field(name = "👥 참가자", value = f"```fix\n{', '.join(join_member)}```", inline=False)
embed.add_field(name = "😍 당첨", value = f"```fix\n{', '.join(win_member)}```")
embed.add_field(name = "😭 낙첨", value = f"```{', '.join(lose_member)}```")
return await ctx.send(embed = embed)
else:
return
################ 정산확인 ################
@commands.command(name=command[13][0], aliases=command[13][1:])
async def jungsan_(self, ctx):
if basicSetting[11] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[11]:
msg = ctx.message.content[len(ctx.invoked_with)+1:]
if basicSetting[10] !="" and basicSetting[12] !="" and basicSetting[14] !="" and basicSetting[15] !="" and basicSetting[16] !="" :
SearchID = msg
gc = gspread.authorize(credentials)
wks = gc.open(basicSetting[12]).worksheet(basicSetting[14])
wks.update_acell(basicSetting[15], SearchID)
result = wks.acell(basicSetting[16]).value
embed = discord.Embed(
description= '```' + SearchID + ' 님이 받을 다이야는 ' + result + ' 다이야 입니다.```',
color=0xff00ff
)
await ctx.send(embed=embed, tts=False)
else:
return
################ 보스타임 일괄 설정 ################
@commands.command(name=command[14][0], aliases=command[14][1:])
async def allBossInput_(self, ctx):
global basicSetting
global bossData
global fixed_bossData
global bossTime
global tmp_bossTime
global fixed_bossTime
global bossTimeString
global bossDateString
global tmp_bossTimeString
global tmp_bossDateString
global bossFlag
global bossFlag0
global bossMungFlag
global bossMungCnt
if ctx.message.channel.id == basicSetting[7]:
msg = ctx.message.content[len(ctx.invoked_with)+1:]
for i in range(bossNum):
if bossTimeString[i] == '99:99:99':
tmp_msg = msg
if len(tmp_msg) > 3 :
if tmp_msg.find(':') != -1 :
chkpos = tmp_msg.find(':')
hours1 = tmp_msg[chkpos-2:chkpos]
minutes1 = tmp_msg[chkpos+1:chkpos+3]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
chkpos = len(tmp_msg)-2
hours1 = tmp_msg[chkpos-2:chkpos]
minutes1 = tmp_msg[chkpos:chkpos+2]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = now2
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = 1
if tmp_now > now2 :
tmp_now = tmp_now + datetime.timedelta(days=int(-1))
if tmp_now < now2 :
deltaTime = datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
while now2 > tmp_now :
tmp_now = tmp_now + deltaTime
bossMungCnt[i] = bossMungCnt[i] + 1
now2 = tmp_now
bossMungCnt[i] = bossMungCnt[i] - 1
else :
now2 = now2 + datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
tmp_bossTime[i] = bossTime[i] = nextTime = now2
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
await dbSave()
await dbLoad()
await dbSave()
await ctx.send('<보스 일괄 입력 완료>', tts=False)
print ("<보스 일괄 입력 완료>")
else:
return
################ 멍보스타임 일괄 설정 ################
@commands.command(name=command[40][0], aliases=command[40][1:])
async def mungBossInput_(self, ctx):
global basicSetting
global bossData
global fixed_bossData
global bossTime
global tmp_bossTime
global fixed_bossTime
global bossTimeString
global bossDateString
global tmp_bossTimeString
global tmp_bossDateString
global bossFlag
global bossFlag0
global bossMungFlag
global bossMungCnt
if ctx.message.channel.id == basicSetting[7]:
msg = ctx.message.content[len(ctx.invoked_with)+1:]
for i in range(bossNum):
if bossData[i][2] == "1" and bossTimeString[i] == '99:99:99':
tmp_msg = msg
if len(tmp_msg) > 3 :
if tmp_msg.find(':') != -1 :
chkpos = tmp_msg.find(':')
hours1 = tmp_msg[chkpos-2:chkpos]
minutes1 = tmp_msg[chkpos+1:chkpos+3]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
chkpos = len(tmp_msg)-2
hours1 = tmp_msg[chkpos-2:chkpos]
minutes1 = tmp_msg[chkpos:chkpos+2]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = now2
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = 1
if tmp_now > now2 :
tmp_now = tmp_now + datetime.timedelta(days=int(-1))
if tmp_now < now2 :
deltaTime = datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
while now2 > tmp_now :
tmp_now = tmp_now + deltaTime
bossMungCnt[i] = bossMungCnt[i] + 1
now2 = tmp_now
bossMungCnt[i] = bossMungCnt[i] - 1
else :
now2 = now2 + datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
tmp_bossTime[i] = bossTime[i] = nextTime = now2
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
await dbSave()
await dbLoad()
await dbSave()
await ctx.send('<멍보스 일괄 입력 완료>', tts=False)
print ("<멍보스 일괄 입력 완료>")
else:
return
################ 가장 근접한 보스타임 출력 ################
@commands.command(name=command[15][0], aliases=command[15][1:])
async def nearTimeBoss_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
checkTime = datetime.datetime.now() + datetime.timedelta(days=1, hours = int(basicSetting[0]))
datelist = []
datelist2 = []
ouput_bossData = []
aa = []
sorted_datelist = []
for i in range(bossNum):
if bossMungFlag[i] != True and bossTimeString[i] != '99:99:99' :
datelist2.append(bossTime[i])
for i in range(fixed_bossNum):
if fixed_bossTime[i] < datetime.datetime.now() + datetime.timedelta(hours=int(basicSetting[0])+3):
datelist2.append(fixed_bossTime[i])
datelist = list(set(datelist2))
for i in range(bossNum):
if bossMungFlag[i] != True :
aa.append(bossData[i][0]) #output_bossData[0] : 보스명
aa.append(bossTime[i]) #output_bossData[1] : 시간
aa.append(bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : 시간(00:00:00)
ouput_bossData.append(aa)
aa = []
for i in range(fixed_bossNum):
aa.append(fixed_bossData[i][0]) #output_bossData[0] : 보스명
aa.append(fixed_bossTime[i]) #output_bossData[1] : 시간
aa.append(fixed_bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : 시간(00:00:00)
ouput_bossData.append(aa)
aa = []
tmp_sorted_datelist = sorted(datelist)
for i in range(len(tmp_sorted_datelist)):
if checkTime > tmp_sorted_datelist[i]:
sorted_datelist.append(tmp_sorted_datelist[i])
if len(sorted_datelist) == 0:
await ctx.send( '<보스타임 정보가 없습니다.>', tts=False)
else :
result_lefttime = ''
if len(sorted_datelist) > int(basicSetting[9]):
for j in range(int(basicSetting[9])):
for i in range(len(ouput_bossData)):
if sorted_datelist[j] == ouput_bossData[i][1]:
leftTime = ouput_bossData[i][1] - (datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0])))
total_seconds = int(leftTime.total_seconds())
hours, remainder = divmod(total_seconds,60*60)
minutes, seconds = divmod(remainder,60)
result_lefttime += '다음 ' + ouput_bossData[i][0] + '탐까지 %02d:%02d:%02d 남았습니다. ' % (hours,minutes,seconds) + '[' + ouput_bossData[i][2] + ']\n'
else :
for j in range(len(sorted_datelist)):
for i in range(len(ouput_bossData)):
if sorted_datelist[j] == ouput_bossData[i][1]:
leftTime = ouput_bossData[i][1] - (datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0])))
total_seconds = int(leftTime.total_seconds())
hours, remainder = divmod(total_seconds,60*60)
minutes, seconds = divmod(remainder,60)
result_lefttime += '다음 ' + ouput_bossData[i][0] + '탐까지 %02d:%02d:%02d 남았습니다. ' % (hours,minutes,seconds) + '[' + ouput_bossData[i][2] + ']\n'
embed = discord.Embed(
description= result_lefttime,
color=0xff0000
)
await ctx.send( embed=embed, tts=False)
else:
return
################ 음성파일 생성 후 재생 ################
@commands.command(name=command[16][0], aliases=command[16][1:])
async def playText_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
if basicSetting[21] != "1":
return await ctx.send('```보이스를 사용하지 않도록 설정되어 있습니다.```', tts=False)
msg = ctx.message.content[len(ctx.invoked_with)+1:]
sayMessage = msg
try:
await MakeSound(ctx.message.author.display_name +'님이, ' + sayMessage, './sound/say')
except:
await ctx.send( f"```음성파일 생성에 실패하였습니다.!(amazon polly 사용시 키 값을 확인하세요!)```")
return
await ctx.send("```< " + ctx.author.display_name + " >님이 \"" + sayMessage + "\"```", tts=False)
try:
if aws_key != "" and aws_secret_key != "":
await PlaySound(ctx.voice_client, './sound/say.mp3')
else:
await PlaySound(ctx.voice_client, './sound/say.wav')
except:
await ctx.send( f"```음성파일 재생에 실패하였습니다. 접속에 문제가 있거나 음성채널에 접속 되지 않은 상태입니다.!```")
return
else:
return
################ 리젠시간 출력 ################
@commands.command(name=command[17][0], aliases=command[17][1:])
async def regenTime_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
await ctx.send(embed=regenembed, tts=False)
else:
return
################ 현재시간 확인 ################
@commands.command(name=command[18][0], aliases=command[18][1:])
async def currentTime_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
curruntTime = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
embed = discord.Embed(
title = '현재시간은 ' + curruntTime.strftime('%H') + '시 ' + curruntTime.strftime('%M') + '분 ' + curruntTime.strftime('%S')+ '초 입니다.',
color=0xff00ff
)
await ctx.send( embed=embed, tts=False)
else:
return
################ 공지 등록/확인 ################
@commands.command(name=command[19][0], aliases=command[19][1:])
async def notice_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
msg = ctx.message.content.split(" ")
if len(msg) > 1:
sayMessage = " ".join(msg[1:])
contents = repo.get_contents("notice.ini")
repo.update_file(contents.path, "notice 등록", sayMessage, contents.sha)
await ctx.send( '< 공지 등록완료 >', tts=False)
else:
notice_initdata = repo.get_contents("notice.ini")
notice = base64.b64decode(notice_initdata.content)
notice = notice.decode('utf-8')
if notice != '' :
embed = discord.Embed(
description= str(notice),
color=0xff00ff
)
else :
embed = discord.Embed(
description= '```등록된 공지가 없습니다.```',
color=0xff00ff
)
await ctx.send(embed=embed, tts=False)
else:
return
################ 공지 삭제 ################
@commands.command(name=command[20][0], aliases=command[20][1:])
async def noticeDel_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
contents = repo.get_contents("notice.ini")
repo.update_file(contents.path, "notice 삭제", '', contents.sha)
await ctx.send( '< 공지 삭제완료 >', tts=False)
else:
return
################ 봇 상태메세지 변경 ################
@commands.command(name=command[21][0], aliases=command[21][1:])
async def botStatus_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
msg = ctx.message.content[len(ctx.invoked_with)+1:]
sayMessage = msg
await self.bot.change_presence(status=discord.Status.online, activity=discord.Game(name=sayMessage, type=1), afk = False)
await ctx.send( '< 상태메세지 변경완료 >', tts=False)
else:
return
################ 보스타임 출력 ################
@commands.command(name=command[22][0], aliases=command[22][1:])
async def bossTime_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
datelist = []
datelist2 = []
ouput_bossData = []
aa = []
for i in range(bossNum):
if bossMungFlag[i] == True :
datelist2.append(tmp_bossTime[i])
else :
datelist2.append(bossTime[i])
for i in range(fixed_bossNum):
if fixed_bossTime[i] < datetime.datetime.now() + datetime.timedelta(hours=int(basicSetting[0])+3):
datelist2.append(fixed_bossTime[i])
datelist = list(set(datelist2))
tmp_boss_information = []
tmp_cnt = 0
tmp_time_delta = 0
tmp_boss_information.append('')
for i in range(bossNum):
if bossTimeString[i] == '99:99:99' and bossMungFlag[i] != True :
if len(tmp_boss_information[tmp_cnt]) > 1000 :
tmp_boss_information.append('')
tmp_cnt += 1
tmp_boss_information[tmp_cnt] = tmp_boss_information[tmp_cnt] + bossData[i][0] + ','
else :
aa.append(bossData[i][0]) #output_bossData[0] : 보스명
if bossMungFlag[i] == True :
aa.append(tmp_bossTime[i]) #output_bossData[1] : 시간
tmp_time_delta = (tmp_bossTime[i].date() - (datetime.datetime.now() + datetime.timedelta(hours=int(basicSetting[0]))).date()).days
if tmp_time_delta == 0:
aa.append(tmp_bossTime[i].strftime('%H:%M:%S'))
else:
if tmp_time_delta > 0:
aa.append(f"(+{tmp_time_delta}d) {tmp_bossTime[i].strftime('%H:%M:%S')}")
else:
aa.append(f"({tmp_time_delta}d) {tmp_bossTime[i].strftime('%H:%M:%S')}")
tmp_time_delta = 0
# aa.append(tmp_bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : 시간(00:00:00) -> 초빼기 : aa.append(tmp_bossTime[i].strftime('%H:%M'))
aa.append('-') #output_bossData[3] : -
else :
aa.append(bossTime[i]) #output_bossData[1] : 시간
tmp_time_delta = (tmp_bossTime[i].date() - (datetime.datetime.now() + datetime.timedelta(hours=int(basicSetting[0]))).date()).days
if tmp_time_delta == 0:
aa.append(tmp_bossTime[i].strftime('%H:%M:%S'))
else:
if tmp_time_delta > 0:
aa.append(f"(+{tmp_time_delta}d) {tmp_bossTime[i].strftime('%H:%M:%S')}")
else:
aa.append(f"({tmp_time_delta}d) {tmp_bossTime[i].strftime('%H:%M:%S')}")
tmp_time_delta = 0
# aa.append(bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : 시간(00:00:00) -> 초빼기 : aa.append(bossTime[i].strftime('%H:%M'))
aa.append('+') #output_bossData[3] : +
aa.append(bossData[i][2]) #output_bossData[4] : 멍/미입력 보스
aa.append(bossMungCnt[i]) #output_bossData[5] : 멍/미입력횟수
aa.append(bossData[i][6]) #output_bossData[6] : 메세지
ouput_bossData.append(aa)
aa = []
for i in range(fixed_bossNum):
aa.append(fixed_bossData[i][0]) #output_bossData[0] : 보스명
aa.append(fixed_bossTime[i]) #output_bossData[1] : 시간
aa.append(fixed_bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : 시간(00:00:00) -> 초빼기 : aa.append(fixed_bossTime[i].strftime('%H:%M'))
aa.append('@') #output_bossData[3] : @
aa.append(0) #output_bossData[4] : 멍/미입력 보스
aa.append(0) #output_bossData[5] : 멍/미입력횟수
aa.append("") #output_bossData[6] : 메세지
ouput_bossData.append(aa)
aa = []
boss_information = []
cnt = 0
boss_information.append('')
for timestring in sorted(datelist):
if len(boss_information[cnt]) > 1800 :
boss_information.append('')
cnt += 1
for i in range(len(ouput_bossData)):
if timestring == ouput_bossData[i][1]:
if ouput_bossData[i][4] == '0' :
if ouput_bossData[i][5] == 0 :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' ' + ouput_bossData[i][6] + '\n'
else :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' (미 ' + str(ouput_bossData[i][5]) + '회)' + ' ' + ouput_bossData[i][6] + '\n'
else :
if ouput_bossData[i][5] == 0 :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' ' + ouput_bossData[i][6] + '\n'
else :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' (멍 ' + str(ouput_bossData[i][5]) + '회)' + ' ' + ouput_bossData[i][6] + '\n'
if len(boss_information) == 1 and len(tmp_boss_information) == 1:
###########################
if len(boss_information[0]) != 0:
boss_information[0] = "```diff\n" + boss_information[0] + "\n```"
else :
boss_information[0] = '``` ```'
if len(tmp_boss_information[0]) != 0:
tmp_boss_information[0] = "```fix\n" + tmp_boss_information[0][:len(tmp_boss_information[0])-1] + "\n```"
else :
tmp_boss_information[0] = '``` ```'
embed = discord.Embed(
title = "----- 보스탐 정보 -----",
description= boss_information[0],
color=0x0000ff
)
embed.add_field(
name="----- 미예약 보스 -----",
value= tmp_boss_information[0],
inline = False
)
await ctx.send( embed=embed, tts=False)
else :
###########################일반보스출력
if len(boss_information[0]) != 0:
boss_information[0] = "```diff\n" + boss_information[0] + "\n```"
else :
boss_information[0] = '``` ```'
embed = discord.Embed(
title = "----- 보스탐 정보 -----",
description= boss_information[0],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
for i in range(len(boss_information)-1):
if len(boss_information[i+1]) != 0:
boss_information[i+1] = "```diff\n" + boss_information[i+1] + "\n```"
else :
boss_information[i+1] = '``` ```'
embed = discord.Embed(
title = '',
description= boss_information[i+1],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
###########################미예약보스출력
if len(tmp_boss_information[0]) != 0:
if len(tmp_boss_information) == 1 :
tmp_boss_information[0] = "```fix\n" + tmp_boss_information[0][:len(tmp_boss_information[0])-1] + "\n```"
else:
tmp_boss_information[0] = "```fix\n" + tmp_boss_information[0] + "\n```"
else :
tmp_boss_information[0] = '``` ```'
embed = discord.Embed(
title = "----- 미예약 보스 -----",
description= tmp_boss_information[0],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
for i in range(len(tmp_boss_information)-1):
if len(tmp_boss_information[i+1]) != 0:
if i == len(tmp_boss_information)-2:
tmp_boss_information[i+1] = "```fix\n" + tmp_boss_information[i+1][:len(tmp_boss_information[i+1])-1] + "\n```"
else:
tmp_boss_information[i+1] = "```fix\n" + tmp_boss_information[i+1] + "\n```"
else :
tmp_boss_information[i+1] = '``` ```'
embed = discord.Embed(
title = '',
description= tmp_boss_information[i+1],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
await dbSave()
await data_list_Save("kill_list.ini", "-----척살명단-----", kill_Data)
await data_list_Save("item_list.ini", "-----아이템목록-----", item_Data)
else:
return
################ 보스타임 출력(고정보스포함) ################
@commands.command(name=command[23][0], aliases=command[23][1:])
async def bossTime_fixed_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
datelist = []
datelist2 = []
ouput_bossData = []
aa = []
fixed_datelist = []
for i in range(bossNum):
if bossMungFlag[i] == True :
datelist2.append(tmp_bossTime[i])
else :
datelist2.append(bossTime[i])
datelist = list(set(datelist2))
tmp_boss_information = []
tmp_cnt = 0
tmp_boss_information.append('')
for i in range(bossNum):
if bossTimeString[i] == '99:99:99' and bossMungFlag[i] != True :
if len(tmp_boss_information[tmp_cnt]) > 1800 :
tmp_boss_information.append('')
tmp_cnt += 1
tmp_boss_information[tmp_cnt] = tmp_boss_information[tmp_cnt] + bossData[i][0] + ','
else :
aa.append(bossData[i][0]) #output_bossData[0] : 보스명
if bossMungFlag[i] == True :
aa.append(tmp_bossTime[i]) #output_bossData[1] : 시간
if (datetime.datetime.now() + datetime.timedelta(hours=int(basicSetting[0]))).strftime('%Y-%m-%d') == tmp_bossTime[i].strftime('%Y-%m-%d'):
aa.append(tmp_bossTime[i].strftime('%H:%M:%S'))
else:
aa.append(f"[{tmp_bossTime[i].strftime('%Y-%m-%d')}] {tmp_bossTime[i].strftime('%H:%M:%S')}")
# aa.append(tmp_bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : 시간(00:00:00) -> 초빼기 : aa.append(tmp_bossTime[i].strftime('%H:%M'))
aa.append('-') #output_bossData[3] : -
else :
aa.append(bossTime[i]) #output_bossData[1] : 시간
if (datetime.datetime.now() + datetime.timedelta(hours=int(basicSetting[0]))).strftime('%Y-%m-%d') == bossTime[i].strftime('%Y-%m-%d'):
aa.append(bossTime[i].strftime('%H:%M:%S'))
else:
aa.append(f"[{bossTime[i].strftime('%Y-%m-%d')}] {bossTime[i].strftime('%H:%M:%S')}")
# aa.append(bossTime[i].strftime('%H:%M:%S')) #output_bossData[2] : 시간(00:00:00) -> 초빼기 : aa.append(bossTime[i].strftime('%H:%M'))
aa.append('+') #output_bossData[3] : +
aa.append(bossData[i][2]) #output_bossData[4] : 멍/미입력 보스
aa.append(bossMungCnt[i]) #output_bossData[5] : 멍/미입력횟수
aa.append(bossData[i][6]) #output_bossData[6] : 메세지
ouput_bossData.append(aa)
aa = []
for i in range(fixed_bossNum):
fixed_datelist.append(fixed_bossTime[i])
fixed_datelist = list(set(fixed_datelist))
fixedboss_information = []
cntF = 0
fixedboss_information.append('')
for timestring1 in sorted(fixed_datelist):
if len(fixedboss_information[cntF]) > 1800 :
fixedboss_information.append('')
cntF += 1
for i in range(fixed_bossNum):
if timestring1 == fixed_bossTime[i]:
if (datetime.datetime.now() + datetime.timedelta(hours=int(basicSetting[0]))).strftime('%Y-%m-%d') == fixed_bossTime[i].strftime('%Y-%m-%d'):
tmp_timeSTR = fixed_bossTime[i].strftime('%H:%M:%S') #초빼기 : tmp_timeSTR = fixed_bossTime[i].strftime('%H:%M')
else:
tmp_timeSTR = '[' + fixed_bossTime[i].strftime('%Y-%m-%d') + '] ' + fixed_bossTime[i].strftime('%H:%M:%S') #초빼기 : tmp_timeSTR = '[' + fixed_bossTime[i].strftime('%Y-%m-%d') + '] ' + fixed_bossTime[i].strftime('%H:%M')
fixedboss_information[cntF] = fixedboss_information[cntF] + tmp_timeSTR + ' : ' + fixed_bossData[i][0] + '\n'
boss_information = []
cnt = 0
boss_information.append('')
for timestring in sorted(datelist):
if len(boss_information[cnt]) > 1800 :
boss_information.append('')
cnt += 1
for i in range(len(ouput_bossData)):
if timestring == ouput_bossData[i][1]:
if ouput_bossData[i][4] == '0' :
if ouput_bossData[i][5] == 0 :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' ' + ouput_bossData[i][6] + '\n'
else :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' (미 ' + str(ouput_bossData[i][5]) + '회)' + ' ' + ouput_bossData[i][6] + '\n'
else :
if ouput_bossData[i][5] == 0 :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' ' + ouput_bossData[i][6] + '\n'
else :
boss_information[cnt] = boss_information[cnt] + ouput_bossData[i][3] + ' ' + ouput_bossData[i][2] + ' : ' + ouput_bossData[i][0] + ' (멍 ' + str(ouput_bossData[i][5]) + '회)' + ' ' + ouput_bossData[i][6] + '\n'
###########################고정보스출력
if len(fixedboss_information[0]) != 0:
fixedboss_information[0] = "```diff\n" + fixedboss_information[0] + "\n```"
else :
fixedboss_information[0] = '``` ```'
embed = discord.Embed(
title = "----- 고 정 보 스 -----",
description= fixedboss_information[0],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
for i in range(len(fixedboss_information)-1):
if len(fixedboss_information[i+1]) != 0:
fixedboss_information[i+1] = "```diff\n" + fixedboss_information[i+1] + "\n```"
else :
fixedboss_information[i+1] = '``` ```'
embed = discord.Embed(
title = '',
description= fixedboss_information[i+1],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
###########################일반보스출력
if len(boss_information[0]) != 0:
boss_information[0] = "```diff\n" + boss_information[0] + "\n```"
else :
boss_information[0] = '``` ```'
embed = discord.Embed(
title = "----- 보스탐 정보 -----",
description= boss_information[0],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
for i in range(len(boss_information)-1):
if len(boss_information[i+1]) != 0:
boss_information[i+1] = "```diff\n" + boss_information[i+1] + "\n```"
else :
boss_information[i+1] = '``` ```'
embed = discord.Embed(
title = '',
description= boss_information[i+1],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
###########################미예약보스출력
if len(tmp_boss_information[0]) != 0:
if len(tmp_boss_information) == 1 :
tmp_boss_information[0] = "```fix\n" + tmp_boss_information[0][:len(tmp_boss_information[0])-1] + "\n```"
else:
tmp_boss_information[0] = "```fix\n" + tmp_boss_information[0] + "\n```"
else :
tmp_boss_information[0] = '``` ```'
embed = discord.Embed(
title = "----- 미예약 보스 -----",
description= tmp_boss_information[0],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
for i in range(len(tmp_boss_information)-1):
if len(tmp_boss_information[i+1]) != 0:
if i == len(tmp_boss_information)-2:
tmp_boss_information[i+1] = "```fix\n" + tmp_boss_information[i+1][:len(tmp_boss_information[i+1])-1] + "\n```"
else:
tmp_boss_information[i+1] = "```fix\n" + tmp_boss_information[i+1] + "\n```"
else :
tmp_boss_information[i+1] = '``` ```'
embed = discord.Embed(
title = '',
description= tmp_boss_information[i+1],
color=0x0000ff
)
await ctx.send( embed=embed, tts=False)
await dbSave()
await data_list_Save("kill_list.ini", "-----척살명단-----", kill_Data)
await data_list_Save("item_list.ini", "-----아이템목록-----", item_Data)
else:
return
################ 킬초기화 ################
@commands.command(name=command[24][0], aliases=command[24][1:])
async def killInit_(self, ctx):
if basicSetting[18] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[18]:
global kill_Data
kill_Data = {}
await init_data_list('kill_list.ini', '-----척살명단-----')
return await ctx.send( '< 킬 목록 초기화완료 >', tts=False)
else:
return
################ 킬명단 확인 및 추가################
@commands.command(name=command[25][0], aliases=command[25][1:])
async def killList_(self, ctx, *, args : str = None):
if basicSetting[18] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[18]:
global kill_Data
if not args:
kill_output = ''
for key, value in kill_Data.items():
kill_output += ':skull_crossbones: ' + str(key) + ' : ' + str(value) + '번 따히!\n'
if kill_output != '' :
embed = discord.Embed(
description= str(kill_output),
color=0xff00ff
)
else :
embed = discord.Embed(
description= '등록된 킬 목록이 없습니다. 분발하세요!',
color=0xff00ff
)
return await ctx.send(embed=embed, tts=False)
if args in kill_Data:
kill_Data[args] += 1
else:
kill_Data[args] = 1
embed = discord.Embed(
description= ':skull_crossbones: ' + args + ' 따히! [' + str(kill_Data[args]) + '번]\n',
color=0xff00ff
)
return await ctx.send(embed=embed, tts=False)
else:
return
################ 킬삭제 ################
@commands.command(name=command[26][0], aliases=command[26][1:])
async def killDel_(self, ctx, *, args : str = None):
if basicSetting[18] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[18]:
global kill_Data
if not args:
return await ctx.send( '```제대로 된 아이디를 입력해주세요!\n```', tts=False)
if args in kill_Data:
del kill_Data[args]
return await ctx.send( ':angel: ' + args + ' 삭제완료!', tts=False)
else :
return await ctx.send( '```킬 목록에 등록되어 있지 않습니다!\n```', tts=False)
else:
return
################ 킬 차감 ################
@commands.command(name=command[33][0], aliases=command[33][1:])
async def killSubtract_(self, ctx, *, args : str = None):
if basicSetting[18] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[18]:
global kill_Data
if not args:
return await ctx.send(f'{command[33][0]} [아이디] 혹은 {command[33][0]} [아이디] [횟수] 양식에 맞춰 입력해주세요!', tts = False)
input_data = args.split()
if len(input_data) == 1:
kill_name = args
count = 1
elif len(input_data) == 2:
kill_name = input_data[0]
try:
count = int(input_data[1])
except ValueError:
return await ctx.send(f'[횟수]는 숫자로 입력바랍니다')
else:
return await ctx.send(f'{command[33][0]} [아이디] 혹은 {command[33][0]} [아이디] [횟수] 양식에 맞춰 입력해주세요!', tts = False)
if kill_name in kill_Data:
if kill_Data[kill_name] < int(count):
return await ctx.send( f"등록된 킬 횟수[{str(kill_Data[kill_name])}번]보다 차감 횟수[{str(count)}번]가 많습니다. 킬 횟수에 맞게 재입력 바랍니다.", tts=False)
else:
kill_Data[kill_name] -= int(count)
else:
return await ctx.send( '```킬 목록에 등록되어 있지 않습니다!\n```', tts=False)
embed = discord.Embed(
description= f':angel: [{kill_name}] [{str(count)}번] 차감 완료! [잔여 : {str(kill_Data[kill_name])}번]\n',
color=0xff00ff
)
if kill_Data[kill_name] == 0:
del kill_Data[kill_name]
return await ctx.send(embed=embed, tts=False)
else:
return
################ 경주 ################
@commands.command(name=command[27][0], aliases=command[27][1:])
async def race_(self, ctx):
if basicSetting[19] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[19]:
msg = ctx.message.content[len(ctx.invoked_with)+1:]
race_info = []
fr = []
racing_field = []
str_racing_field = []
cur_pos = []
race_val = []
random_pos = []
racing_result = []
output = ':camera: :camera: :camera: 신나는 레이싱! :camera: :camera: :camera:\n'
#racing_unit = [':giraffe:', ':elephant:', ':tiger2:', ':hippopotamus:', ':crocodile:',':leopard:',':ox:', ':sheep:', ':pig2:',':dromedary_camel:',':dragon:',':rabbit2:'] #동물스킨
#racing_unit = [':red_car:', ':taxi:', ':bus:', ':trolleybus:', ':race_car:', ':police_car:', ':ambulance:', ':fire_engine:', ':minibus:', ':truck:', ':articulated_lorry:', ':tractor:', ':scooter:', ':manual_wheelchair:', ':motor_scooter:', ':auto_rickshaw:', ':blue_car:', ':bike:', ':helicopter:', ':steam_locomotive:'] #탈것스킨
#random.shuffle(racing_unit)
racing_member = msg.split(" ")
racing_unit = []
emoji = discord.Emoji
emoji = ctx.message.guild.emojis
for j in range(len(tmp_racing_unit)):
racing_unit.append(':' + tmp_racing_unit[j] + ':')
for i in range(len(emoji)):
if emoji[i].name == tmp_racing_unit[j].strip(":"):
racing_unit[j] = '<:' + tmp_racing_unit[j] + ':' + str(emoji[i].id) + '>'
random.shuffle(racing_unit)
field_size = 60
tmp_race_tab = 35 - len(racing_member)
if len(racing_member) <= 1:
await ctx.send('레이스 인원이 2명보다 작습니다.')
return
elif len(racing_member) >= 13:
await ctx.send('레이스 인원이 12명 초과입니다.')
return
else :
race_val = random.sample(range(tmp_race_tab, tmp_race_tab+len(racing_member)), len(racing_member))
random.shuffle(race_val)
for i in range(len(racing_member)):
fr.append(racing_member[i])
fr.append(racing_unit[i])
fr.append(race_val[i])
race_info.append(fr)
fr = []
for i in range(field_size):
fr.append(" ")
racing_field.append(fr)
fr = []
for i in range(len(racing_member)):
racing_field[i][0] = "|"
racing_field[i][field_size-2] = race_info[i][1]
if len(race_info[i][0]) > 5:
racing_field[i][field_size-1] = "| " + race_info[i][0][:5] + '..'
else:
racing_field[i][field_size-1] = "| " + race_info[i][0]
str_racing_field.append("".join(racing_field[i]))
cur_pos.append(field_size-2)
for i in range(len(racing_member)):
output += str_racing_field[i] + '\n'
result_race = await ctx.send(output + ':traffic_light: 3초 후 경주가 시작됩니다!')
await asyncio.sleep(1)
await result_race.edit(content = output + ':traffic_light: 2초 후 경주가 시작됩니다!')
await asyncio.sleep(1)
await result_race.edit(content = output + ':traffic_light: 1초 후 경주가 시작됩니다!')
await asyncio.sleep(1)
await result_race.edit(content = output + ':checkered_flag: 경주 시작!')
for i in range(len(racing_member)):
test = random.sample(range(2,field_size-2), race_info[i][2])
while len(test) != tmp_race_tab + len(racing_member)-1 :
test.append(1)
test.append(1)
test.sort(reverse=True)
random_pos.append(test)
for j in range(len(random_pos[0])):
if j%2 == 0:
output = ':camera: :camera_with_flash: :camera: 신나는 레이싱! :camera_with_flash: :camera: :camera_with_flash:\n'
else :
output = ':camera_with_flash: :camera: :camera_with_flash: 신나는 레이싱! :camera: :camera_with_flash: :camera:\n'
str_racing_field = []
for i in range(len(racing_member)):
temp_pos = cur_pos[i]
racing_field[i][random_pos[i][j]], racing_field[i][temp_pos] = racing_field[i][temp_pos], racing_field[i][random_pos[i][j]]
cur_pos[i] = random_pos[i][j]
str_racing_field.append("".join(racing_field[i]))
await asyncio.sleep(1)
for i in range(len(racing_member)):
output += str_racing_field[i] + '\n'
await result_race.edit(content = output + ':checkered_flag: 경주 시작!')
for i in range(len(racing_field)):
fr.append(race_info[i][0])
fr.append((race_info[i][2]) - tmp_race_tab + 1)
racing_result.append(fr)
fr = []
result = sorted(racing_result, key=lambda x: x[1])
result_str = ''
for i in range(len(result)):
if result[i][1] == 1:
result[i][1] = ':first_place:'
elif result[i][1] == 2:
result[i][1] = ':second_place:'
elif result[i][1] == 3:
result[i][1] = ':third_place:'
elif result[i][1] == 4:
result[i][1] = ':four:'
elif result[i][1] == 5:
result[i][1] = ':five:'
elif result[i][1] == 6:
result[i][1] = ':six:'
elif result[i][1] == 7:
result[i][1] = ':seven:'
elif result[i][1] == 8:
result[i][1] = ':eight:'
elif result[i][1] == 9:
result[i][1] = ':nine:'
elif result[i][1] == 10:
result[i][1] = ':keycap_ten:'
else:
result[i][1] = ':x:'
result_str += result[i][1] + " " + result[i][0] + " "
#print(result)
await asyncio.sleep(1)
return await result_race.edit(content = output + ':tada: 경주 종료!\n' + result_str)
else:
return
################ 채널설정 ################
@commands.command(name=command[28][0], aliases=command[28][1:])
async def set_channel_(self, ctx):
global basicSetting
msg = ctx.message.content[len(ctx.invoked_with)+1:]
channel = ctx.message.channel.id #메세지가 들어온 채널 ID
if channel == basicSetting[7] and msg in ["사다리", "정산", "척살", "경주", "아이템"]:
return await ctx.send(f'명령어 채널은 `{msg} 채널`로 `설정`할 수 없습니다.', tts=False)
if msg == '사다리' : #사다리 채널 설정
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith('ladderchannel'):
inputData_textCH[i] = 'ladderchannel = ' + str(channel) + '\r'
basicSetting[8] = channel
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
print(f'< 사다리채널 [{ctx.message.channel.name}] 설정완료 >')
return await ctx.send(f'< 사다리채널 [{ctx.message.channel.name}] 설정완료 >', tts=False)
elif msg == '정산' :
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith('jungsanchannel'):
inputData_textCH[i] = 'jungsanchannel = ' + str(channel) + '\r'
basicSetting[11] = channel
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
print(f'< 정산채널 [{ctx.message.channel.name}] 설정완료 >')
return await ctx.send(f'< 정산채널 [{ctx.message.channel.name}] 설정완료 >', tts=False)
elif msg == '척살' :
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith('killchannel'):
inputData_textCH[i] = 'killchannel = ' + str(channel) + '\r'
basicSetting[18] = channel
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
print(f'< 척살채널 [{ctx.message.channel.name}] 설정완료 >')
return await ctx.send(f'< 척살채널 [{ctx.message.channel.name}] 설정완료 >', tts=False)
elif msg == '경주' :
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith('racingchannel'):
inputData_textCH[i] = 'racingchannel = ' + str(channel) + '\r'
basicSetting[19] = channel
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
print(f'< 경주채널 [{ctx.message.channel.name}] 설정완료 >')
return await ctx.send(f'< 경주채널 [{ctx.message.channel.name}] 설정완료 >', tts=False)
elif msg == '아이템' :
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith('itemchannel'):
inputData_textCH[i] = 'itemchannel = ' + str(channel) + '\r'
basicSetting[20] = channel
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
print(f'< 아이템채널 [{ctx.message.channel.name}] 설정완료 >')
return await ctx.send(f'< 아이템채널 [{ctx.message.channel.name}] 설정완료 >', tts=False)
else :
return await ctx.send(f'```올바른 명령어를 입력해주세요.```', tts=False)
################ 채널삭제 ################
@commands.command(name=command[42][0], aliases=command[42][1:])
async def remove_channel_(self, ctx):
global basicSetting
if ctx.message.channel.id != basicSetting[7]:
return
msg = ctx.message.content[len(ctx.invoked_with)+1:]
channel = ctx.message.channel.id #메세지가 들어온 채널 ID
if msg == '사다리' : #사다리 채널 설정
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
ch_name = ctx.guild.get_channel(int(basicSetting[8]))
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith('ladderchannel'):
inputData_textCH[i] = 'ladderchannel = \r'
basicSetting[8] = ""
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
print(f'< 사다리채널 [{ch_name}] 삭제완료 >')
return await ctx.send(f'< 사다리채널 [{ch_name}] 삭제완료 >', tts=False)
elif msg == '정산' :
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
ch_name = ctx.guild.get_channel(int(basicSetting[11]))
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith('jungsanchannel'):
inputData_textCH[i] = 'jungsanchannel = \r'
basicSetting[11] = ""
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
print(f'< 정산채널 [{ch_name}] 삭제완료 >')
return await ctx.send(f'< 정산채널 [{ch_name}] 삭제완료 >', tts=False)
elif msg == '척살' :
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
ch_name = ctx.guild.get_channel(int(basicSetting[18]))
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith('killchannel'):
inputData_textCH[i] = 'killchannel = \r'
basicSetting[18] = ""
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
print(f'< 척살채널 [{ch_name}] 삭제완료 >')
return await ctx.send(f'< 척살채널 [{ch_name}] 삭제완료 >', tts=False)
elif msg == '경주' :
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
ch_name = ctx.guild.get_channel(int(basicSetting[19]))
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith('racingchannel'):
inputData_textCH[i] = 'racingchannel = \r'
basicSetting[19] = ""
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
print(f'< 경주채널 [{ch_name}] 삭제완료 >')
return await ctx.send(f'< 경주채널 [{ch_name}] 삭제완료 >', tts=False)
elif msg == '아이템' :
inidata_textCH = repo.get_contents("test_setting.ini")
file_data_textCH = base64.b64decode(inidata_textCH.content)
file_data_textCH = file_data_textCH.decode('utf-8')
inputData_textCH = file_data_textCH.split('\n')
ch_name = ctx.guild.get_channel(int(basicSetting[20]))
for i in range(len(inputData_textCH)):
if inputData_textCH[i].startswith('itemchannel'):
inputData_textCH[i] = 'itemchannel = \r'
basicSetting[20] = ""
result_textCH = '\n'.join(inputData_textCH)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_textCH, contents.sha)
print(f'< 아이템채널 [{ch_name}] 삭제완료 >')
return await ctx.send(f'< 아이템채널 [{ch_name}] 삭제완료 >', tts=False)
else :
return await ctx.send(f'```올바른 명령어를 입력해주세요.```', tts=False)
################ 아이템초기화 확인 ################
@commands.command(name=command[29][0], aliases=command[29][1:])
async def itemInit_(self, ctx):
if basicSetting[20] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[20]:
global item_Data
item_Data = {}
await init_data_list('item_list.ini', '-----아이템 목록-----')
return await ctx.send( '< 아이템 목록 초기화완료 >', tts=False)
else:
return
################ 아이템 목록 확인 및 추가 ################
@commands.command(name=command[30][0], aliases=command[30][1:])
async def itemList_(self, ctx, *, args : str = None):
if basicSetting[20] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[20]:
global item_Data
if not args:
sorted_item_list = sorted(item_Data.items(), key=lambda x: x[0])
embed_list : list = []
embed_index : int = 0
embed_cnt : int = 0
embed = discord.Embed(title = '', description = f'`{self.bot.user.name}\'s 창고`', color = 0x00ff00)
embed_list.append(embed)
if len(sorted_item_list) > 0 :
for item_id, count in sorted_item_list:
embed_cnt += 1
if embed_cnt > 24 :
embed_cnt = 0
embed_index += 1
tmp_embed = discord.Embed(
title = "",
description = "",
color=0x00ff00
)
embed_list.append(tmp_embed)
embed_list[embed_index].add_field(name = item_id, value = count)
embed_list[len(embed_list)-1].set_footer(text = f"전체 아이템 종류 : {len(item_Data)}개")
if len(embed_list) > 1:
for embed_data in embed_list:
await asyncio.sleep(0.1)
await ctx.send(embed = embed_data)
return
else:
return await ctx.send(embed=embed, tts=False)
else :
embed.add_field(name = '\u200b\n', value = '창고가 비었습니다.\n\u200b')
return await ctx.send(embed=embed, tts=False)
input_data = args.split()
if len(input_data) == 1:
item_name = args
count = 1
elif len(input_data) == 2:
item_name = input_data[0]
try:
count = int(input_data[1])
except ValueError:
return await ctx.send(f'아이템 [개수]는 숫자로 입력바랍니다')
else:
return await ctx.send(f'{command[30][0]} [아이템명] 혹은 {command[30][0]} [아이템명] [개수] 양식에 맞춰 입력해주세요!', tts = False)
if item_name in item_Data:
item_Data[item_name] += int(count)
else:
item_Data[item_name] = int(count)
embed = discord.Embed(
description= f':inbox_tray: **[{item_name}] [{str(count)}개]** 등록 완료! [잔여 : {str(item_Data[item_name])}개]\n',
color=0xff00ff
)
return await ctx.send(embed=embed, tts=False)
else:
return
################ 아이템 삭제 ################
@commands.command(name=command[31][0], aliases=command[31][1:])
async def itemDel_(self, ctx, *, args : str = None):
if basicSetting[20] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[20]:
global item_Data
if not args:
return await ctx.send( f'{command[31][0]} [아이템명] 양식에 맞춰 입력해주세요!', tts = False)
if args in item_Data:
del item_Data[args]
embed = discord.Embed(
description= ':outbox_tray: ' + args + ' 삭제완료!',
color=0xff00ff
)
return await ctx.send(embed=embed, tts=False)
else :
return await ctx.send( '```아이템 목록에 등록되어 있지 않습니다!\n```', tts=False)
else:
return
################ 아이템 차감 ################
@commands.command(name=command[32][0], aliases=command[32][1:])
async def itemSubtract_(self, ctx, *, args : str = None):
if basicSetting[20] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[20]:
global item_Data
if not args:
return await ctx.send(f'{command[32][0]} [아이템명] 혹은 {command[32][0]} [아이템명] [개수] 양식에 맞춰 입력해주세요!', tts = False)
input_data = args.split()
if len(input_data) == 1:
item_name = args
count = 1
elif len(input_data) == 2:
item_name = input_data[0]
try:
count = int(input_data[1])
except ValueError:
return await ctx.send(f'아이템 [개수]는 숫자로 입력바랍니다')
else:
return await ctx.send(f'{command[32][0]} [아이템명] 혹은 {command[32][0]} [아이템명] [개수] 양식에 맞춰 입력해주세요!', tts = False)
if item_name in item_Data:
if item_Data[item_name] < int(count):
return await ctx.send( f"등록된 아이템 개수[{str(item_Data[item_name])}개]보다 차감 개수[{str(count)}개]가 많습니다. 등록 개수에 맞게 재입력 바랍니다.", tts=False)
else:
item_Data[item_name] -= int(count)
else:
return await ctx.send( '```아이템 목록에 등록되어 있지 않습니다!\n```', tts=False)
embed = discord.Embed(
description= f':outbox_tray: **[{item_name}] [{str(count)}개]** 차감 완료! [잔여 : {str(item_Data[item_name])}개]\n',
color=0xff00ff
)
if item_Data[item_name] == 0:
del item_Data[item_name]
return await ctx.send(embed=embed, tts=False)
else:
return
################ 서버 나가기 ################
@commands.has_permissions(manage_messages=True)
@commands.command(name=command[34][0], aliases=command[34][1:])
async def leaveGuild_(self, ctx):
if ctx.message.channel.id == basicSetting[7]:
guild_list : str = ""
guild_name : str = ""
for i, gulid_name in enumerate(self.bot.guilds):
guild_list += f"`{i+1}.` {gulid_name}\n"
embed = discord.Embed(
title = "----- 서버 목록 -----",
description = guild_list,
color=0x00ff00
)
await ctx.send(embed = embed)
try:
await ctx.send(f"```떠나고 싶은 서버의 [숫자]를 입력하여 선택해 주세요```")
message_result : discord.Message = await self.bot.wait_for("message", timeout = 10, check=(lambda message: message.channel == ctx.message.channel and message.author == ctx.message.author))
except asyncio.TimeoutError:
return await ctx.send(f"```서버 선택 시간이 초과됐습니다! 필요시 명령어를 재입력해 주세요```")
try:
guild_name = self.bot.guilds[int(message_result.content)-1].name
await self.bot.get_guild(self.bot.guilds[int(message_result.content)-1].id).leave()
return await ctx.send(f"```[{guild_name}] 서버에서 떠났습니다.!```")
except ValueError:
return
################ 수수료 계산기 ################
@commands.command(name=command[35][0], aliases=command[35][1:])
async def tax_check(self, ctx, *, args : str = None):
if basicSetting[20] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[20]:
if not args:
return await ctx.send(f"**{command[35][0]} [판매금액] (거래소세금)** 양식으로 입력 해주세요\n※ 거래소세금은 미입력시 5%입니다.")
input_money_data : list = args.split()
len_input_money_data = len(input_money_data)
try:
for i in range(len_input_money_data):
input_money_data[i] = int(input_money_data[i])
except ValueError:
return await ctx.send(f"**[판매금액] (거래소세금)**은 숫자로 입력 해주세요.")
if len_input_money_data < 1 or len_input_money_data > 3:
return await ctx.send(f"**{command[35][0]} [판매금액] (거래소세금)** 양식으로 입력 해주세요\n※ 거래소세금은 미입력시 5%입니다.")
elif len_input_money_data == 2:
tax = input_money_data[1]
else:
tax = 5
price_first_tax = int(input_money_data[0] * ((100-tax)/100))
price_second_tax = int(price_first_tax * ((100-tax)/100))
price_rev_tax = int((input_money_data[0] * 100)/(100-tax)+0.5)
embed = discord.Embed(
title = f"🧮 수수료 계산결과 (세율 {tax}% 기준) ",
description = f"",
color=0x00ff00
)
embed.add_field(name = "⚖️ 수수료 지원", value = f"```등록가 : {price_rev_tax}\n수령가 : {input_money_data[0]}\n세 금 : {price_rev_tax-input_money_data[0]}```")
embed.add_field(name = "⚖️ 1차 거래", value = f"```등록가 : {input_money_data[0]}\n정산가 : {price_first_tax}\n세 금 : {input_money_data[0]-price_first_tax}```")
embed.add_field(name = "⚖️ 2차 거래", value = f"```등록가 : {price_first_tax}\n정산가 : {price_second_tax}\n세 금 : {price_first_tax-price_second_tax}```")
return await ctx.send(embed = embed)
else:
return
################ 페이백 계산기 ################
@commands.command(name=command[36][0], aliases=command[36][1:])
async def payback_check(self, ctx, *, args : str = None):
if basicSetting[20] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id == basicSetting[7] or ctx.message.channel.id == basicSetting[20]:
if not args:
return await ctx.send(f"**{command[36][0]} [거래소가격] [실거래가] (거래소세금)** 양식으로 입력 해주세요\n※ 거래소세금은 미입력시 5%입니다.")
input_money_data : list = args.split()
len_input_money_data = len(input_money_data)
try:
for i in range(len_input_money_data):
input_money_data[i] = int(input_money_data[i])
except ValueError:
return await ctx.send(f"**[판매금액] (거래소세금)**은 숫자로 입력 해주세요.")
if len_input_money_data < 2 or len_input_money_data > 4:
return await ctx.send(f"**{command[36][0]} [거래소가격] [실거래가] (거래소세금)** 양식으로 입력 해주세요\n※ 거래소세금은 미입력시 5%입니다.")
elif len_input_money_data == 3:
tax = input_money_data[2]
else:
tax = 5
price_reg_tax = int(input_money_data[0] * ((100-tax)/100))
price_real_tax = int(input_money_data[1] * ((100-tax)/100))
reault_payback = price_reg_tax - price_real_tax
reault_payback1= price_reg_tax - input_money_data[1]
embed = discord.Embed(
title = f"🧮 페이백 계산결과1 (세율 {tax}% 기준) ",
description = f"**```fix\n{reault_payback}```**",
color=0x00ff00
)
embed.add_field(name = "⚖️ 거래소", value = f"```등록가 : {input_money_data[0]}\n정산가 : {price_reg_tax}\n세 금 : {input_money_data[0]-price_reg_tax}```")
embed.add_field(name = "🕵️ 실거래", value = f"```등록가 : {input_money_data[1]}\n정산가 : {price_real_tax}\n세 금 : {input_money_data[1]-price_real_tax}```")
await ctx.send(embed = embed)
embed2 = discord.Embed(
title = f"🧮 페이백 계산결과2 (세율 {tax}% 기준) ",
description = f"**```fix\n{reault_payback1}```**",
color=0x00ff00
)
embed2.add_field(name = "⚖️ 거래소", value = f"```등록가 : {input_money_data[0]}\n정산가 : {price_reg_tax}\n세 금 : {input_money_data[0]-price_reg_tax}```")
embed2.add_field(name = "🕵️ 실거래", value = f"```내판가 : {input_money_data[1]}```")
return await ctx.send(embed = embed2)
else:
return
@commands.command(name=command[37][0], aliases=command[37][1:])
async def command_rock_paper_scissors_game(self, ctx : commands.Context):
if basicSetting[19] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id != basicSetting[7] and ctx.message.channel.id != basicSetting[19]:
return
message_rock_paper_scissors : discord.message.Message = await ctx.send("안내면 진거 가위바위..")
reaction_emoji : list = ["✌️", "✊", "✋"]
for emoji in reaction_emoji:
await message_rock_paper_scissors.add_reaction(emoji)
def reaction_check(reaction, user):
return (reaction.message.id == message_rock_paper_scissors.id) and (user.id == ctx.author.id) and (str(reaction) in reaction_emoji)
try:
reaction_result, user = await self.bot.wait_for('reaction_add', check = reaction_check, timeout = int(basicSetting[5]))
except asyncio.TimeoutError:
return await ctx.send(f"시간이 초과됐습니다. ")
bot_result : str = random.choice(reaction_emoji)
result_rock_paper_scissors : str = ""
if reaction_result is None:
result_rock_paper_scissors = f"왜 안냄?"
elif str(reaction_result) == bot_result:
result_rock_paper_scissors = f"봇 {bot_result} : {reaction_result} {ctx.author.mention}\n🤔비겼다!"
elif str(reaction_result) == "✌️" and bot_result == "✋":
result_rock_paper_scissors = f"봇 {bot_result} : {reaction_result} {ctx.author.mention}\n👍짝짝짝"
elif str(reaction_result) == "✊" and bot_result == "✌️":
result_rock_paper_scissors = f"봇 {bot_result} : {reaction_result} {ctx.author.mention}\n👍짝짝짝"
elif str(reaction_result) == "✋" and bot_result == "✊":
result_rock_paper_scissors = f"봇 {bot_result} : {reaction_result} {ctx.author.mention}\n👍짝짝짝"
else:
result_rock_paper_scissors = f"봇 {bot_result} : {reaction_result} {ctx.author.mention}\n🤪저런.."
return await ctx.send(result_rock_paper_scissors)
################ 보이스사용 ################
@commands.command(name=command[38][0], aliases=command[38][1:])
async def command_voice_use(self, ctx : commands.Context):
if ctx.message.channel.id != basicSetting[7]:
return
inidata_voice_use = repo.get_contents("test_setting.ini")
file_data_voice_use = base64.b64decode(inidata_voice_use.content)
file_data_voice_use = file_data_voice_use.decode('utf-8')
inputData_voice_use = file_data_voice_use.split('\n')
for i in range(len(inputData_voice_use)):
if inputData_voice_use[i].startswith("voice_use ="):
inputData_voice_use[i] = f"voice_use = 1\r"
basicSetting[21] = "1"
result_voice_use = '\n'.join(inputData_voice_use)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_voice_use, contents.sha)
if basicSetting[6] != "":
try:
await self.bot.get_channel(basicSetting[6]).connect(reconnect=True, timeout=5)
except:
await ctx.send( '< 음성채널 접속 에러! >', tts=False)
pass
if self.bot.voice_clients[0].is_connected() :
print("보이스 사용 설정 완료!")
return await ctx.send(f"```보이스를 사용하도록 설정하였습니다.!```")
return await ctx.send(f"```보이스 사용 설정이 완료 되었습니다!\n< 음성채널 접속 후 [{command[5][0]}] 명령을 사용 하세요 >```")
################ 보이스미사용 ################
@commands.command(name=command[39][0], aliases=command[39][1:])
async def command_voice_not_use(self, ctx : commands.Context):
if ctx.message.channel.id != basicSetting[7]:
return
for vc in self.bot.voice_clients:
if vc.guild.id == int(ctx.guild.id):
if vc.is_playing():
vc.stop()
await vc.disconnect(force=True)
inidata_voice_use = repo.get_contents("test_setting.ini")
file_data_voice_use = base64.b64decode(inidata_voice_use.content)
file_data_voice_use = file_data_voice_use.decode('utf-8')
inputData_voice_use = file_data_voice_use.split('\n')
for i in range(len(inputData_voice_use)):
if inputData_voice_use[i].startswith("voice_use ="):
inputData_voice_use[i] = f"voice_use = 0\r"
basicSetting[21] = "0"
result_voice_use = '\n'.join(inputData_voice_use)
contents = repo.get_contents("test_setting.ini")
repo.update_file(contents.path, "test_setting", result_voice_use, contents.sha)
return await ctx.send(f"```보이스를 사용하지 않도록 설정하였습니다.!```")
################ 럭키박스 ################
@commands.command(name=command[41][0], aliases=command[41][1:])
async def command_randombox_game(self, ctx : commands.Context, *, args : str = None):
if basicSetting[19] != "" and ctx.message.channel.id == basicSetting[7]:
return
if ctx.message.channel.id != basicSetting[7] and ctx.message.channel.id != basicSetting[19]:
return
if not args:
return await ctx.send(f'```명령어 [추첨인원] (대기시간/초) *(메모) 형태로 입력해주시기 바랍나다.```')
memo_data : str = ""
waiting_time : int = 30
if args.find("*") == -1:
input_game_data = args.split()
else:
input_game_data = args[:args.find("*")-1].split()
memo_data = args[args.find("*")+1:]
try:
num_cong = int(input_game_data[0]) # 뽑을 인원
if num_cong <= 0:
return await ctx.send(f'```추첨인원이 0보다 작거나 같습니다. 재입력 해주세요```')
except ValueError:
return await ctx.send('```추첨인원은 숫자로 입력 바랍니다\nex)!럭키박스 1```')
if len(input_game_data) >= 2:
waiting_time : int = 30
try:
waiting_time = int(input_game_data[1]) # 대기시간
if waiting_time <= 0 :
return await ctx.send(f'```대기시간이 0보다 작거나 같습니다. 재입력 해주세요```')
except ValueError:
return await ctx.send(f'```대기시간(초)는 숫자로 입력 바랍니다\nex)!럭키박스 1 60```')
reaction_emoji : list = ["✅", "❌"]
embed = discord.Embed(title = f"📦 럭키박스! 묻고 더블로 가! (잔여시간 : {waiting_time}초)", description = f"참가를 원하시면 ✅를 클릭해주세요!", timestamp =datetime.datetime.now(datetime.timezone(datetime.timedelta(hours=int(basicSetting[0])))),
color=0x00ff00
)
if memo_data != "":
embed.add_field(name = "📜 메모", value = f"```{memo_data}```", inline=False)
game_message : discord.message.Message = await ctx.send(embed = embed)
for emoji in reaction_emoji:
await game_message.add_reaction(emoji)
cache_msg = await ctx.fetch_message(game_message.id)
for i in range(waiting_time):
embed.title = f"📦 럭키박스! 묻고 더블로 가! (잔여시간 : {waiting_time - i}초)"
await game_message.edit(embed=embed)
cache_msg = await ctx.fetch_message(game_message.id)
if cache_msg.reactions[1].count >= 2:
tmp_users = await cache_msg.reactions[1].users().flatten()
for user in tmp_users:
if user.id == ctx.author.id:
embed.title = f"😫 럭키박스! 취소! 😱"
embed.description = ""
await game_message.edit(embed=embed)
return await ctx.send(f"```게임이 취소되었습니다.!```")
await asyncio.sleep(1)
if cache_msg.reactions[0].count == 1:
embed.title = f"😫 럭키박스! 추첨 실패! 😱"
embed.description = ""
await game_message.edit(embed=embed)
return await ctx.send(f"```참여자가 없어 게임이 취소되었습니다.!```")
if num_cong >= cache_msg.reactions[0].count-1:
embed.title = f"😫 럭키박스! 추첨 실패! 😱"
embed.description = ""
await game_message.edit(embed=embed)
return await ctx.send(f'```추첨인원이 참여인원과 같거나 많습니다. 재입력 해주세요```')
participant_users = await cache_msg.reactions[0].users().flatten()
del_index : int = 0
for i, user in enumerate(participant_users):
if self.bot.user.id == user.id:
del_index = i
del participant_users[del_index]
user_name_list : list = []
for user in participant_users:
user_name_list.append(user.mention)
for _ in range(num_cong + 5):
random.shuffle(user_name_list)
result_users = None
for _ in range(num_cong + 5):
result_users = random.sample(user_name_list, num_cong)
lose_user = list(set(user_name_list)-set(result_users))
embed.title = f"🎉 럭키박스! 결과발표! 🎉"
embed.description = ""
embed.add_field(name = f"👥 참가자 ({len(user_name_list)}명)", value = f"{', '.join(user_name_list)}", inline=False)
embed.add_field(name = f"😍 당첨 ({num_cong}명)", value = f"{', '.join(result_users)}")
if len(lose_user) != 0:
embed.add_field(name = f"😭 낙첨 ({len(lose_user)}명)", value = f"{', '.join(lose_user)}")
return await game_message.edit(embed=embed)
################ 컷등록 ################
@commands.command(name=command[43][0], aliases=command[43][1:])
async def multi_boss_cut(self, ctx, *, args : str = None):
if ctx.message.channel.id != basicSetting[7]:
return
if not args:
return await ctx.send('```보스타임 정보를 입력해주세요```', tts=False)
boss_data_list : list = args.split("\n")
boss_data_dict : dict = {}
result_boss_name : list = []
for boss_data in boss_data_list:
tmp_boss_name = boss_data[boss_data.rfind(": ")+1:].strip()
if tmp_boss_name.find(" ") != -1:
tmp_boss_name = tmp_boss_name[:tmp_boss_name.find(" ")].strip()
tmp_boss_time = boss_data[:boss_data.rfind(" : ")].strip()
try:
if list(tmp_boss_time).count(":") > 1:
tmp_hour = int(tmp_boss_time[tmp_boss_time.find(":")-2:tmp_boss_time.find(":")])
tmp_minute = int(tmp_boss_time[tmp_boss_time.find(":")+1:tmp_boss_time.rfind(":")])
tmp_second = int(tmp_boss_time[tmp_boss_time.rfind(":")+1:])
else:
tmp_hour = int(tmp_boss_time[tmp_boss_time.find(":")-2:tmp_boss_time.find(":")])
tmp_minute = int(tmp_boss_time[tmp_boss_time.rfind(":")+1:])
tmp_second = 0
if tmp_hour > 23 or tmp_hour < 0 or tmp_minute > 60 or tmp_second > 60:
return await ctx.send(f"**[{tmp_boss_name}]**의 올바른 시간(00:00:00 ~ 23:59:59)을 입력해주세요.")
except:
return await ctx.send(f"**[{tmp_boss_name}]**의 올바른 시간(00:00:00 ~ 23:59:59)을 입력해주세요. ")
if "@" != boss_data[0]:
boss_data_dict[tmp_boss_name] = {"hour" : tmp_hour, "minute" : tmp_minute, "second" : tmp_second}
for i in range(bossNum):
if bossData[i][0] in boss_data_dict:
curr_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(boss_data_dict[bossData[i][0]]["hour"]), minute=int(boss_data_dict[bossData[i][0]]["minute"]), second=int(boss_data_dict[bossData[i][0]]["second"]))
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = 0
if tmp_now > now2 :
tmp_now = tmp_now + datetime.timedelta(days=int(-1))
if tmp_now < now2 :
deltaTime = datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
while now2 > tmp_now :
tmp_now = tmp_now + deltaTime
bossMungCnt[i] = bossMungCnt[i] + 1
now2 = tmp_now
bossMungCnt[i] = bossMungCnt[i] - 1
else :
now2 = now2 + datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
tmp_bossTime[i] = bossTime[i] = nextTime = now2
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
if curr_now + datetime.timedelta(minutes=int(basicSetting[1])) <= tmp_bossTime[i] < curr_now + datetime.timedelta(minutes=int(basicSetting[3])):
bossFlag0[i] = True
if tmp_bossTime[i] < curr_now + datetime.timedelta(minutes=int(basicSetting[1])):
bossFlag[i] = True
bossFlag0[i] = True
result_boss_name.append(bossData[i][0])
return await ctx.send(f"```[{', '.join(result_boss_name)}] 보스 [컷등록]이 완료되었습니다. [{command[22][0]}]으로 등록시간을 확인해보세요```", tts=False)
################ 예상등록 ################
@commands.command(name=command[44][0], aliases=command[44][1:])
async def multi_boss_predict(self, ctx, *, args : str = None):
if ctx.message.channel.id != basicSetting[7]:
return
if not args:
return await ctx.send('```보스타임 정보를 입력해주세요```', tts=False)
boss_data_list : list = args.split("\n")
boss_data_dict : dict = {}
result_boss_name : list = []
for boss_data in boss_data_list:
tmp_boss_name = boss_data[boss_data.rfind(": ")+1:].strip()
if tmp_boss_name.find(" ") != -1:
tmp_boss_name = tmp_boss_name[:tmp_boss_name.find(" ")].strip()
tmp_boss_time = boss_data[:boss_data.rfind(" : ")].strip()
try:
if list(tmp_boss_time).count(":") > 1:
tmp_hour = int(tmp_boss_time[tmp_boss_time.find(":")-2:tmp_boss_time.find(":")])
tmp_minute = int(tmp_boss_time[tmp_boss_time.find(":")+1:tmp_boss_time.rfind(":")])
tmp_second = int(tmp_boss_time[tmp_boss_time.rfind(":")+1:])
else:
tmp_hour = int(tmp_boss_time[tmp_boss_time.find(":")-2:tmp_boss_time.find(":")])
tmp_minute = int(tmp_boss_time[tmp_boss_time.rfind(":")+1:])
tmp_second = 0
if tmp_hour > 23 or tmp_hour < 0 or tmp_minute > 60 or tmp_second > 60:
return await ctx.send(f"**[{tmp_boss_name}]**의 올바른 시간(00:00:00 ~ 23:59:59)을 입력해주세요. ")
except:
return await ctx.send(f"**[{tmp_boss_name}]**의 올바른 시간(00:00:00 ~ 23:59:59)을 입력해주세요. ")
if "@" != boss_data[0]:
boss_data_dict[tmp_boss_name] = {"hour" : tmp_hour, "minute" : tmp_minute, "second" : tmp_second}
for i in range(bossNum):
if bossData[i][0] in boss_data_dict:
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(boss_data_dict[bossData[i][0]]["hour"]), minute=int(boss_data_dict[bossData[i][0]]["minute"]), second=int(boss_data_dict[bossData[i][0]]["second"]))
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = 0
if tmp_now < now2 :
tmp_now = tmp_now + datetime.timedelta(days=int(1))
tmp_bossTime[i] = bossTime[i] = nextTime = tmp_now
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
if now2 + datetime.timedelta(minutes=int(basicSetting[1])) <= tmp_bossTime[i] < now2 + datetime.timedelta(minutes=int(basicSetting[3])):
bossFlag0[i] = True
if tmp_bossTime[i] < now2 + datetime.timedelta(minutes=int(basicSetting[1])):
bossFlag[i] = True
bossFlag0[i] = True
result_boss_name.append(bossData[i][0])
return await ctx.send(f"```[{', '.join(result_boss_name)}] 보스 [예상등록]이 완료되었습니다. [{command[22][0]}]으로 등록시간을 확인해보세요```", tts=False)
################ 추가등록 ################
@commands.command(name=command[45][0], aliases=command[45][1:])
async def multi_boss_delta_add(self, ctx, *, args : str = None):
if ctx.message.channel.id != basicSetting[7]:
return
if not args:
return await ctx.send(f"```[{command[45][0]}] [시간(00:00)] [추가시간(숫자)] [보스명1] [보스명2] [보스명3] ... 양식으로 입력해주세요```", tts=False)
input_data_list : list = []
input_data_list = args.split()
result_boss_name : list = []
if len(input_data_list) < 3:
return await ctx.send(f"```[{command[45][0]}] [시간(00:00)] [추가시간(숫자)] [보스명1] [보스명2] [보스명3] ... 양식으로 입력해주세요```", tts=False)
try:
input_hour = int(input_data_list[0][:input_data_list[0].find(":")])
input_minute = int(input_data_list[0][input_data_list[0].find(":")+1:])
input_delta_time = int(input_data_list[1])
except:
return await ctx.send(f"시간 및 추가시간은 숫자로 입력해주세요. ")
boss_name_list : list = input_data_list[2:]
if input_hour > 23 or input_hour < 0 or input_minute > 60:
return await ctx.send(f"올바른 시간(00:00:00 ~ 23:59:59)을 입력해주세요.")
for i in range(bossNum):
if bossData[i][0] in boss_name_list:
curr_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(input_hour), minute=int(input_minute), second=0) + datetime.timedelta(hours=int(input_delta_time))
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = 0
if tmp_now < now2 :
deltaTime = datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
while now2 > tmp_now :
tmp_now = tmp_now + deltaTime
bossMungCnt[i] = bossMungCnt[i] + 1
now2 = tmp_now
bossMungCnt[i] = bossMungCnt[i] - 1
else :
now2 = tmp_now
tmp_bossTime[i] = bossTime[i] = nextTime = now2
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
if curr_now + datetime.timedelta(minutes=int(basicSetting[1])) <= tmp_bossTime[i] < curr_now + datetime.timedelta(minutes=int(basicSetting[3])):
bossFlag0[i] = True
if tmp_bossTime[i] < curr_now + datetime.timedelta(minutes=int(basicSetting[1])):
bossFlag[i] = True
bossFlag0[i] = True
result_boss_name.append(bossData[i][0])
return await ctx.send(f"```[{', '.join(list(result_boss_name))}] 보스 [추가등록]이 완료되었습니다. [{command[22][0]}]으로 등록시간을 확인해보세요```", tts=False)
################ ?????????????? ################
@commands.command(name='!오빠')
async def brother1_(self, ctx):
if basicSetting[21] != "1":
return await ctx.send('```보이스를 사용하지 않도록 설정되어 있습니다.```', tts=False)
return await PlaySound(ctx.voice_client, './sound/오빠.mp3')
@commands.command(name='!언니')
async def sister_(self, ctx):
if basicSetting[21] != "1":
return await ctx.send('```보이스를 사용하지 않도록 설정되어 있습니다.```', tts=False)
return await PlaySound(ctx.voice_client, './sound/언니.mp3')
@commands.command(name='!형')
async def brother2_(self, ctx):
if basicSetting[21] != "1":
return await ctx.send('```보이스를 사용하지 않도록 설정되어 있습니다.```', tts=False)
return await PlaySound(ctx.voice_client, './sound/형.mp3')
@commands.command(name='!TJ', aliases=['!tj'])
async def TJ_(self, ctx):
if basicSetting[21] != "1":
return await ctx.send('```보이스를 사용하지 않도록 설정되어 있습니다.```', tts=False)
resultTJ = random.randrange(1,9)
return await PlaySound(ctx.voice_client, './sound/TJ' + str(resultTJ) +'.mp3')
class IlsangDistributionBot(commands.AutoShardedBot):
def __init__(self):
super().__init__(command_prefix=[""], help_command=None)
def run(self):
super().run(access_token, reconnect=True)
async def on_ready(self):
global basicSetting
global channel
global channel_info
global channel_name
global channel_id
global channel_voice_name
global channel_voice_id
global channel_type
global chkvoicechannel
global chflg
global endTime
global setting_channel_name
print("Logged in as ") #화면에 봇의 아이디, 닉네임이 출력됩니다.
print(self.user.name)
print(self.user.id)
print("===========")
channel_name, channel_id, channel_voice_name, channel_voice_id = await get_guild_channel_info(self)
await dbLoad()
if str(basicSetting[7]) in channel_id:
channel = basicSetting[7]
setting_channel_name = self.get_channel(basicSetting[7]).name
now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
print('< 접속시간 [' + now.strftime('%Y-%m-%d ') + now.strftime('%H:%M:%S') + '] >')
print('< 텍스트채널 [' + self.get_channel(basicSetting[7]).name + '] 접속완료>')
if basicSetting[21] == "1" and str(basicSetting[6]) in channel_voice_id:
try:
await self.get_channel(basicSetting[6]).connect(reconnect=True, timeout=5)
print('< 음성채널 [' + self.get_channel(basicSetting[6]).name + '] 접속완료 >')
except:
print('< 음성채널 [' + self.get_channel(basicSetting[6]).name + '] 접속에러 >')
pass
elif basicSetting[21] == "1" and str(basicSetting[6]) not in channel_voice_id:
print(f"설정된 음성채널 값이 없거나 잘못 됐습니다. 음성채널 접속 후 **[{command[5][0]}]** 명령어 먼저 입력하여 사용해주시기 바랍니다.")
await self.get_channel(int(basicSetting[7])).send(f"설정된 음성채널 값이 없거나 잘못 됐습니다. 음성채널 접속 후 **[{command[5][0]}]** 명령어 먼저 입력하여 사용해주시기 바랍니다.")
if basicSetting[8] != "":
if str(basicSetting[8]) in channel_id:
print('< 사다리채널 [' + self.get_channel(int(basicSetting[8])).name + '] 접속완료 >')
else:
basicSetting[8] = ""
print(f"사다리채널 ID 오류! [{command[28][0]} 사다리] 명령으로 재설정 바랍니다.")
if basicSetting[11] != "":
if str(basicSetting[11]) in channel_id:
print('< 정산채널 [' + self.get_channel(int(basicSetting[11])).name + '] 접속완료>')
else:
basicSetting[11] = ""
print(f"정산채널 ID 오류! [{command[28][0]} 정산] 명령으로 재설정 바랍니다.")
if basicSetting[18] != "":
if str(basicSetting[18]) in channel_id:
print('< 척살채널 [' + self.get_channel(int(basicSetting[18])).name + '] 접속완료>')
else:
basicSetting[18] = ""
print(f"척살채널 ID 오류! [{command[28][0]} 척살] 명령으로 재설정 바랍니다.")
if basicSetting[19] != "":
if str(basicSetting[19]) in channel_id:
print('< 경주채널 [' + self.get_channel(int(basicSetting[19])).name + '] 접속완료>')
else:
basicSetting[19] = ""
print(f"경주채널 ID 오류! [{command[28][0]} 경주] 명령으로 재설정 바랍니다.")
if basicSetting[20] != "":
if str(basicSetting[20]) in channel_id:
print('< 아이템채널 [' + self.get_channel(int(basicSetting[20])).name + '] 접속완료>')
else:
basicSetting[20] = ""
print(f"아이템채널 ID 오류! [{command[28][0]} 아이템] 명령으로 재설정 바랍니다.")
if int(basicSetting[13]) != 0 :
print('< 보탐봇 재시작 시간 ' + endTime.strftime('%Y-%m-%d ') + endTime.strftime('%H:%M:%S') + ' >')
print('< 보탐봇 재시작 주기 ' + basicSetting[13] + '일 >')
else :
print('< 보탐봇 재시작 설정안됨 >')
chflg = 1
else:
basicSetting[6] = ""
basicSetting[7] = ""
print(f"설정된 채널 값이 없거나 잘못 됐습니다. **[{command[0][0]}]** 명령어 먼저 입력하여 사용해주시기 바랍니다.")
# 디스코드에는 현재 본인이 어떤 게임을 플레이하는지 보여주는 기능이 있습니다.
# 이 기능을 사용하여 봇의 상태를 간단하게 출력해줄 수 있습니다.
await self.change_presence(status=discord.Status.online, activity=discord.Game(name=command[1][0], type=1), afk=False)
async def on_message(self, msg):
await self.wait_until_ready()
if msg.author.bot: #만약 메시지를 보낸사람이 봇일 경우에는
return None #동작하지 않고 무시합니다.
ori_msg = msg
global channel
global basicSetting
global bossData
global fixed_bossData
global bossNum
global fixed_bossNum
global chkvoicechannel
global chkrelogin
global bossTime
global tmp_bossTime
global fixed_bossTime
global bossTimeString
global bossDateString
global tmp_bossTimeString
global tmp_bossDateString
global bossFlag
global bossFlag0
global bossMungFlag
global bossMungCnt
global channel_info
global channel_name
global channel_id
global channel_voice_name
global channel_voice_id
global channel_type
global chflg
global LoadChk
global indexFixedBossname
global FixedBossDateData
global gc #정산
global credentials #정산
global regenembed
global command
global kill_Data
id = msg.author.id #id라는 변수에는 메시지를 보낸사람의 ID를 담습니다.
if chflg == 1 :
if self.get_channel(basicSetting[7]).id == msg.channel.id:
channel = basicSetting[7]
message = msg
for command_str in ["컷", "멍", "예상", "삭제", "메모", "카톡켬", "카톡끔"]:
if command_str in message.content:
tmp_msg : str = ""
for key, value in boss_nick.items():
if message.content[:message.content.find(command_str)].strip() in value:
message.content = message.content.replace(message.content[:message.content.find(command_str)], key)
hello = message.content
for i in range(bossNum):
################ 보스 컷처리 ################
if message.content.startswith(bossData[i][0] +'컷') or message.content.startswith(convertToInitialLetters(bossData[i][0] +'컷')) or message.content.startswith(bossData[i][0] +' 컷') or message.content.startswith(convertToInitialLetters(bossData[i][0] +' 컷')):
if hello.find(' ') != -1 :
bossData[i][6] = hello[hello.find(' ')+2:]
hello = hello[:hello.find(' ')]
else:
bossData[i][6] = ''
curr_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_msg = bossData[i][0] +'컷'
if len(hello) > len(tmp_msg) + 3 :
if hello.find(':') != -1 :
chkpos = hello.find(':')
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos+1:chkpos+3]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
chkpos = len(hello)-2
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos:chkpos+2]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = now2
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = 0
if tmp_now > now2 :
tmp_now = tmp_now + datetime.timedelta(days=int(-1))
if tmp_now < now2 :
deltaTime = datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
while now2 > tmp_now :
tmp_now = tmp_now + deltaTime
bossMungCnt[i] = bossMungCnt[i] + 1
now2 = tmp_now
bossMungCnt[i] = bossMungCnt[i] - 1
else :
now2 = now2 + datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
tmp_bossTime[i] = bossTime[i] = nextTime = now2
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
if curr_now + datetime.timedelta(minutes=int(basicSetting[1])) <= tmp_bossTime[i] < curr_now + datetime.timedelta(minutes=int(basicSetting[3])):
bossFlag0[i] = True
if tmp_bossTime[i] < curr_now + datetime.timedelta(minutes=int(basicSetting[1])):
bossFlag[i] = True
bossFlag0[i] = True
embed = discord.Embed(
description= '```다음 ' + bossData[i][0] + ' ' + bossTimeString[i] + '입니다.```',
color=0xff0000
)
await self.get_channel(channel).send(embed=embed, tts=False)
################ 보스 멍 처리 ################
if message.content.startswith(bossData[i][0] +'멍') or message.content.startswith(bossData[i][0] +' 멍'):
if hello.find(' ') != -1 :
bossData[i][6] = hello[hello.find(' ')+2:]
hello = hello[:hello.find(' ')]
else:
bossData[i][6] = ''
tmp_msg = bossData[i][0] +'멍'
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
if len(hello) > len(tmp_msg) + 3 :
temptime = tmp_now
if hello.find(':') != -1 :
chkpos = hello.find(':')
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos+1:chkpos+3]
temptime = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
chkpos = len(hello)-2
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos:chkpos+2]
temptime = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
bossMungCnt[i] = 0
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
if temptime > tmp_now :
temptime = temptime + datetime.timedelta(days=int(-1))
if temptime < tmp_now :
deltaTime = datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
while temptime < tmp_now :
temptime = temptime + deltaTime
bossMungCnt[i] = bossMungCnt[i] + 1
tmp_bossTime[i] = bossTime[i] = temptime
tmp_bossTimeString[i] = bossTimeString[i] = temptime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = temptime.strftime('%Y-%m-%d')
if tmp_now + datetime.timedelta(minutes=int(basicSetting[1])) <= tmp_bossTime[i] < tmp_now + datetime.timedelta(minutes=int(basicSetting[3])):
bossFlag0[i] = True
if tmp_bossTime[i] < tmp_now + datetime.timedelta(minutes=int(basicSetting[1])):
bossFlag[i] = True
bossFlag0[i] = True
embed = discord.Embed(
description= '```다음 ' + bossData[i][0] + ' ' + bossTimeString[i] + '입니다.```',
color=0xff0000
)
await self.get_channel(channel).send(embed=embed, tts=False)
else:
if tmp_bossTime[i] < tmp_now :
nextTime = tmp_bossTime[i] + datetime.timedelta(hours = int(bossData[i][1]), minutes = int(bossData[i][5]))
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = bossMungCnt[i] + 1
tmp_bossTime[i] = bossTime[i] = nextTime
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
if tmp_now + datetime.timedelta(minutes=int(basicSetting[1])) <= tmp_bossTime[i] < tmp_now + datetime.timedelta(minutes=int(basicSetting[3])):
bossFlag0[i] = True
if tmp_bossTime[i] < tmp_now + datetime.timedelta(minutes=int(basicSetting[1])):
bossFlag[i] = True
bossFlag0[i] = True
embed = discord.Embed(
description= '```다음 ' + bossData[i][0] + ' ' + bossTimeString[i] + '입니다.```',
color=0xff0000
)
await self.get_channel(channel).send(embed=embed, tts=False)
else:
await self.get_channel(channel).send('```' + bossData[i][0] + '탐이 아직 안됐습니다. 다음 ' + bossData[i][0] + '탐 [' + tmp_bossTimeString[i] + '] 입니다```', tts=False)
################ 예상 보스 타임 입력 ################
if message.content.startswith(bossData[i][0] +'예상') or message.content.startswith(bossData[i][0] +' 예상'):
if hello.find(' ') != -1 :
bossData[i][6] = hello[hello.find(' ')+2:]
hello = hello[:hello.find(' ')]
else:
bossData[i][6] = ''
tmp_msg = bossData[i][0] +'예상'
if len(hello) > len(tmp_msg) + 4 :
if hello.find(':') != -1 :
chkpos = hello.find(':')
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos+1:chkpos+3]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
else:
chkpos = len(hello)-2
hours1 = hello[chkpos-2:chkpos]
minutes1 = hello[chkpos:chkpos+2]
now2 = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = datetime.datetime.now() + datetime.timedelta(hours = int(basicSetting[0]))
tmp_now = tmp_now.replace(hour=int(hours1), minute=int(minutes1))
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = 0
if tmp_now < now2 :
tmp_now = tmp_now + datetime.timedelta(days=int(1))
tmp_bossTime[i] = bossTime[i] = nextTime = tmp_now
tmp_bossTimeString[i] = bossTimeString[i] = nextTime.strftime('%H:%M:%S')
tmp_bossDateString[i] = bossDateString[i] = nextTime.strftime('%Y-%m-%d')
if now2 + datetime.timedelta(minutes=int(basicSetting[1])) <= tmp_bossTime[i] < now2 + datetime.timedelta(minutes=int(basicSetting[3])):
bossFlag0[i] = True
if tmp_bossTime[i] < now2 + datetime.timedelta(minutes=int(basicSetting[1])):
bossFlag[i] = True
bossFlag0[i] = True
embed = discord.Embed(
description= '```다음 ' + bossData[i][0] + ' ' + bossTimeString[i] + '입니다.```',
color=0xff0000
)
await self.get_channel(channel).send(embed=embed, tts=False)
else:
await self.get_channel(channel).send('```' + bossData[i][0] +' 예상 시간을 입력해주세요.```', tts=False)
################ 보스타임 삭제 ################
if message.content == bossData[i][0] +'삭제' or message.content == bossData[i][0] +' 삭제':
bossTime[i] = datetime.datetime.now()+datetime.timedelta(days=365, hours = int(basicSetting[0]))
tmp_bossTime[i] = datetime.datetime.now()+datetime.timedelta(days=365, hours = int(basicSetting[0]))
bossTimeString[i] = '99:99:99'
bossDateString[i] = '9999-99-99'
tmp_bossTimeString[i] = '99:99:99'
tmp_bossDateString[i] = '9999-99-99'
bossFlag[i] = False
bossFlag0[i] = False
bossMungFlag[i] = False
bossMungCnt[i] = 0
await self.get_channel(channel).send('<' + bossData[i][0] + ' 삭제완료>', tts=False)
await dbSave()
print ('<' + bossData[i][0] + ' 삭제완료>')
################ 보스별 메모 ################
if message.content.startswith(bossData[i][0] +'메모 '):
tmp_msg = bossData[i][0] +'메모 '
bossData[i][6] = hello[len(tmp_msg):]
await self.get_channel(channel).send('< ' + bossData[i][0] + ' [ ' + bossData[i][6] + ' ] 메모등록 완료>', tts=False)
if message.content.startswith(bossData[i][0] +'메모삭제'):
bossData[i][6] = ''
await self.get_channel(channel).send('< ' + bossData[i][0] + ' 메모삭제 완료>', tts=False)
await self.process_commands(ori_msg)
async def on_command_error(self, ctx : commands.Context, error : commands.CommandError):
if isinstance(error, CommandNotFound):
return
elif isinstance(error, MissingRequiredArgument):
return
elif isinstance(error, discord.ext.commands.MissingPermissions):
return await ctx.send(f"**[{ctx.message.content.split()[0]}]** 명령을 사용할 권한이 없습니다.!")
elif isinstance(error, discord.ext.commands.CheckFailure):
return await ctx.send(f"**[{ctx.message.content.split()[0]}]** 명령을 사용할 권한이 없습니다.!")
raise error
async def close(self):
await super().close()
print("iter디코봇 종료 완료.")
ilsang_distribution_bot : IlsangDistributionBot = IlsangDistributionBot()
ilsang_distribution_bot.add_cog(mainCog(ilsang_distribution_bot))
ilsang_distribution_bot.add_cog(taskCog(ilsang_distribution_bot))
ilsang_distribution_bot.run()
|
[] |
[] |
[
"BOT_TOKEN",
"GIT_REPO_RESTART",
"GIT_REPO",
"AWS_KEY",
"GIT_TOKEN",
"AWS_SECRET_KEY"
] |
[]
|
["BOT_TOKEN", "GIT_REPO_RESTART", "GIT_REPO", "AWS_KEY", "GIT_TOKEN", "AWS_SECRET_KEY"]
|
python
| 6 | 0 | |
admin/admin_test.go
|
// Copyright 2017 Alexander Zaytsev <[email protected]>.
// All rights reserved. Use of this source code is governed
// by s BSD-style license that can be found in the LICENSE file.
package admin
import (
"context"
"os"
"path"
"strings"
"testing"
"github.com/z0rr0/lruss/conf"
)
const (
programRepo = "github.com/z0rr0/lruss"
testConfigName = "config.example.json"
)
type fataler interface {
Fatalf(format string, args ...interface{})
Fatal(args ...interface{})
}
func getConfig() string {
dirs := []string{os.Getenv("GOPATH"), "src"}
dirs = append(dirs, strings.Split(programRepo, "/")...)
dirs = append(dirs, testConfigName)
return path.Join(dirs...)
}
func initConfig(f fataler) *conf.Cfg {
cfgFile := getConfig()
cfg, err := conf.New(cfgFile)
if err != nil {
f.Fatal(err)
}
cfg.Redis.Db = 0
err = cfg.SetRedisPool()
if err != nil {
f.Fatalf("set redis pool error: %v", err)
}
c := cfg.GetConn()
defer c.Close()
_, err = c.Do("FLUSHDB")
if err != nil {
f.Fatalf("flushdb error: %v", err)
}
return cfg
}
func cleanDb(ctx context.Context) error {
cfg, err := conf.GetContext(ctx)
if err != nil {
return err
}
c := cfg.GetConn()
defer c.Close()
_, err = c.Do("FLUSHDB")
return err
}
func TestCheckCSRF(t *testing.T) {
cfg := initConfig(t)
ctx := conf.SetContext(context.Background(), cfg)
ctx = SetContext(ctx, "test")
defer cleanDb(ctx)
token, err := GetCSRF(ctx)
if err != nil {
t.Errorf("failed token, error: %v", err)
}
t.Logf("csrf token=%v\n", token)
if l := len(token); l != csrfLen*2 {
t.Errorf("invalid token length %d", l)
}
isValid, err := CheckCSRF(ctx, "bad token")
if err != nil {
t.Errorf("failed token check: %v", err)
}
if isValid {
t.Error("bad token can't be valid")
}
isValid, err = CheckCSRF(ctx, token)
if err != nil {
t.Errorf("failed token check: %v", err)
}
if !isValid {
t.Error("failed response for valid token")
}
}
func BenchmarkGetCSRF(b *testing.B) {
cfg := initConfig(b)
ctx := conf.SetContext(context.Background(), cfg)
ctx = SetContext(ctx, "test")
defer cleanDb(ctx)
for i := 0; i < b.N; i++ {
token, err := GetCSRF(ctx)
if err != nil {
b.Errorf("failed token, error: %v", err)
}
if l := len(token); l != csrfLen*2 {
b.Errorf("invalid token length %d", l)
}
}
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
src/tools/python/build_carrier_curl_requests.py
|
import os
import easypost
# Builds a file containing every cURL request to add a Carrier Account via EasyPost
# USAGE: API_KEY=123... venv/bin/python build_carrier_curl_requests.py > carrier_curl_requests.sh
URL = os.getenv('URL', 'https://api.easypost.com/v2')
API_KEY = os.getenv('API_KEY')
def main():
carrier_types = get_carrier_types()
# TODO: this may have a side effect of ordering the items inside each object too
for carrier in sorted(carrier_types, key=lambda x: x['type']):
curl_request = build_carrier_curl_request(carrier)
print(curl_request)
def get_carrier_types():
"""Get the carrier_types from the EasyPost API."""
easypost.api_key = API_KEY
easypost.api_base = URL
carrier_accounts = easypost.CarrierAccount.types()
return carrier_accounts
def build_carrier_curl_request(carrier):
"""Builds a cURL request for a carrier via EasyPost."""
fedex_custom_workflow_carriers = ['FedexAccount', 'FedexSmartpostAccount']
ups_custom_workflow_carriers = ['UpsAccount', 'UpsDapAccount']
canadapost_custom_workflow_carriers = ['CanadaPostAccount'] # noqa
# Add carrier account title comment
carrier_output = f'# {carrier.get("type")}\n'
# Add curl command and registration url
if carrier.get('type') in (fedex_custom_workflow_carriers or ups_custom_workflow_carriers):
carrier_output += 'curl -X POST https://api.easypost.com/v2/carrier_accounts/register \\\n'
else:
carrier_output += 'curl -X POST https://api.easypost.com/v2/carrier_accounts \\\n'
# Add authentication, carrier account type and description
carrier_output += "-u 'API_KEY': \\\n"
carrier_output += f"-d 'carrier_account[type]={carrier.get('type')}' \\\n"
carrier_output += f"-d 'carrier_account[description]={carrier.get('type')}' \\\n"
# Iterate over the carrier fields and print the credential structure
carrier_fields = carrier.get('fields').to_dict()
if carrier.get('type') in fedex_custom_workflow_carriers:
for category in carrier_fields['creation_fields']:
for item in carrier_fields['creation_fields'][category]:
carrier_output += f"-d 'carrier_account[registration_data][{item}]=VALUE' \\\n"
carrier_output += '| json_pp\n'
elif carrier.get('type') in (ups_custom_workflow_carriers or canadapost_custom_workflow_carriers):
# TODO: Fix UPS carrier account
# TODO: Fix CanadaPost carrier account
pass
else:
end = '| json_pp\n'
for top_level in carrier_fields:
# If there is a custom_workflow such as 3rd party auth or a similar flow
# we should warn about that here. The credential structure will differ from
# a normal carrier account and is currently not automated
if top_level == 'custom_workflow':
end += '## REQUIRES CUSTOM WORKFLOW ##\n'
else:
for item in carrier_fields[top_level]:
carrier_output += f"-d 'carrier_account[{top_level}][{item}]=VALUE' \\\n"
carrier_output += end
return carrier_output
if __name__ == '__main__':
main()
|
[] |
[] |
[
"URL",
"API_KEY"
] |
[]
|
["URL", "API_KEY"]
|
python
| 2 | 0 | |
ansible/plugins/callback/foreman.py
|
# -*- coding: utf-8 -*-
# (c) 2015, 2016 Daniel Lobato <[email protected]>
# (c) 2016 Guido Günther <[email protected]>
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: foreman
type: notification
short_description: Sends events to Foreman
description:
- This callback will report facts and task events to Foreman https://theforeman.org/
version_added: "2.2"
requirements:
- whitelisting in configuration
- requests (python library)
options:
url:
description: URL to the Foreman server
env:
- name: FOREMAN_URL
required: True
ssl_cert:
description: X509 certificate to authenticate to Foreman if https is used
env:
- name: FOREMAN_SSL_CERT
ssl_key:
description: the corresponding private key
env:
- name: FOREMAN_SSL_KEY
verify_certs:
description:
- Toggle to decidewhether to verify the Foreman certificate.
- It can be set to '1' to verify SSL certificates using the installed CAs or to a path pointing to a CA bundle.
- Set to '0' to disable certificate checking.
env:
- name: FOREMAN_SSL_VERIFY
'''
import os
from datetime import datetime
from collections import defaultdict
import json
import time
try:
import requests
HAS_REQUESTS = True
except ImportError:
HAS_REQUESTS = False
from ansible.plugins.callback import CallbackBase
class CallbackModule(CallbackBase):
"""
This callback will report facts and reports to Foreman https://theforeman.org/
It makes use of the following environment variables:
FOREMAN_URL: URL to the Foreman server
FOREMAN_SSL_CERT: X509 certificate to authenticate to Foreman if
https is used
FOREMAN_SSL_KEY: the corresponding private key
FOREMAN_SSL_VERIFY: whether to verify the Foreman certificate
It can be set to '1' to verify SSL certificates using the
installed CAs or to a path pointing to a CA bundle. Set to '0'
to disable certificate checking.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'notification'
CALLBACK_NAME = 'foreman'
CALLBACK_NEEDS_WHITELIST = True
FOREMAN_URL = os.getenv('FOREMAN_URL', "http://localhost:3000")
FOREMAN_SSL_CERT = (os.getenv('FOREMAN_SSL_CERT',
"/etc/foreman/client_cert.pem"),
os.getenv('FOREMAN_SSL_KEY',
"/etc/foreman/client_key.pem"))
FOREMAN_SSL_VERIFY = os.getenv('FOREMAN_SSL_VERIFY', "1")
FOREMAN_HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json"
}
TIME_FORMAT = "%Y-%m-%d %H:%M:%S %f"
def __init__(self):
super(CallbackModule, self).__init__()
self.items = defaultdict(list)
self.start_time = int(time.time())
if HAS_REQUESTS:
requests_major = int(requests.__version__.split('.')[0])
if requests_major >= 2:
self.ssl_verify = self._ssl_verify()
else:
self._disable_plugin('The `requests` python module is too old.')
else:
self._disable_plugin('The `requests` python module is not installed.')
if self.FOREMAN_URL.startswith('https://'):
if not os.path.exists(self.FOREMAN_SSL_CERT[0]):
self._disable_plugin('FOREMAN_SSL_CERT %s not found.' % self.FOREMAN_SSL_CERT[0])
if not os.path.exists(self.FOREMAN_SSL_CERT[1]):
self._disable_plugin('FOREMAN_SSL_KEY %s not found.' % self.FOREMAN_SSL_CERT[1])
def _disable_plugin(self, msg):
self.disabled = True
self._display.warning(msg + ' Disabling the Foreman callback plugin.')
def _ssl_verify(self):
if self.FOREMAN_SSL_VERIFY.lower() in ["1", "true", "on"]:
verify = True
elif self.FOREMAN_SSL_VERIFY.lower() in ["0", "false", "off"]:
requests.packages.urllib3.disable_warnings()
self._display.warning("SSL verification of %s disabled" %
self.FOREMAN_URL)
verify = False
else: # Set to a CA bundle:
verify = self.FOREMAN_SSL_VERIFY
return verify
def send_facts(self, host, data):
"""
Sends facts to Foreman, to be parsed by foreman_ansible fact
parser. The default fact importer should import these facts
properly.
"""
data["_type"] = "ansible"
data["_timestamp"] = datetime.now().strftime(self.TIME_FORMAT)
facts = {"name": host,
"facts": data,
}
requests.post(url=self.FOREMAN_URL + '/api/v2/hosts/facts',
data=json.dumps(facts),
headers=self.FOREMAN_HEADERS,
cert=self.FOREMAN_SSL_CERT,
verify=self.ssl_verify)
def _build_log(self, data):
logs = []
for entry in data:
source, msg = entry
if 'failed' in msg:
level = 'err'
else:
level = 'notice' if 'changed' in msg and msg['changed'] else 'info'
logs.append({
"log": {
'sources': {
'source': source
},
'messages': {
'message': json.dumps(msg)
},
'level': level
}
})
return logs
def send_reports(self, stats):
"""
Send reports to Foreman to be parsed by its config report
importer. THe data is in a format that Foreman can handle
without writing another report importer.
"""
status = defaultdict(lambda: 0)
metrics = {}
for host in stats.processed.keys():
sum = stats.summarize(host)
status["applied"] = sum['changed']
status["failed"] = sum['failures'] + sum['unreachable']
status["skipped"] = sum['skipped']
log = self._build_log(self.items[host])
metrics["time"] = {"total": int(time.time()) - self.start_time}
now = datetime.now().strftime(self.TIME_FORMAT)
report = {
"report": {
"host": host,
"reported_at": now,
"metrics": metrics,
"status": status,
"logs": log,
}
}
# To be changed to /api/v2/config_reports in 1.11. Maybe we
# could make a GET request to get the Foreman version & do
# this automatically.
requests.post(url=self.FOREMAN_URL + '/api/v2/reports',
data=json.dumps(report),
headers=self.FOREMAN_HEADERS,
cert=self.FOREMAN_SSL_CERT,
verify=self.ssl_verify)
self.items[host] = []
def append_result(self, result):
name = result._task.get_name()
host = result._host.get_name()
self.items[host].append((name, result._result))
# Ansible callback API
def v2_runner_on_failed(self, result, ignore_errors=False):
self.append_result(result)
def v2_runner_on_unreachable(self, result):
self.append_result(result)
def v2_runner_on_async_ok(self, result, jid):
self.append_result(result)
def v2_runner_on_async_failed(self, result, jid):
self.append_result(result)
def v2_playbook_on_stats(self, stats):
self.send_reports(stats)
def v2_runner_on_ok(self, result):
res = result._result
module = result._task.action
if module == 'setup' or 'ansible_facts' in res:
host = result._host.get_name()
self.send_facts(host, res)
else:
self.append_result(result)
|
[] |
[] |
[
"FOREMAN_URL",
"FOREMAN_SSL_KEY",
"FOREMAN_SSL_CERT",
"FOREMAN_SSL_VERIFY"
] |
[]
|
["FOREMAN_URL", "FOREMAN_SSL_KEY", "FOREMAN_SSL_CERT", "FOREMAN_SSL_VERIFY"]
|
python
| 4 | 0 | |
pkg/vm/stackitem/json.go
|
package stackitem
import (
"bytes"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
gio "io"
"math"
"math/big"
"github.com/nspcc-dev/neo-go/pkg/io"
)
// decoder is a wrapper around json.Decoder helping to mimic C# json decoder behaviour.
type decoder struct {
json.Decoder
depth int
}
// MaxAllowedInteger is the maximum integer allowed to be encoded.
const MaxAllowedInteger = 2<<53 - 1
// maxJSONDepth is a maximum allowed depth-level of decoded JSON.
const maxJSONDepth = 10
// ToJSON encodes Item to JSON.
// It behaves as following:
// ByteArray -> base64 string
// BigInteger -> number
// Bool -> bool
// Null -> null
// Array, Struct -> array
// Map -> map with keys as UTF-8 bytes
func ToJSON(item Item) ([]byte, error) {
buf := io.NewBufBinWriter()
toJSON(buf, item)
if buf.Err != nil {
return nil, buf.Err
}
return buf.Bytes(), nil
}
func toJSON(buf *io.BufBinWriter, item Item) {
w := buf.BinWriter
if w.Err != nil {
return
} else if buf.Len() > MaxSize {
w.Err = errors.New("item is too big")
}
switch it := item.(type) {
case *Array, *Struct:
w.WriteB('[')
items := it.Value().([]Item)
for i, v := range items {
toJSON(buf, v)
if i < len(items)-1 {
w.WriteB(',')
}
}
w.WriteB(']')
case *Map:
w.WriteB('{')
for i := range it.value {
// map key can always be converted to []byte
// but are not always a valid UTF-8.
key, err := ToString(it.value[i].Key)
if err != nil {
if buf.Err == nil {
buf.Err = err
}
return
}
w.WriteB('"')
w.WriteBytes([]byte(key))
w.WriteBytes([]byte(`":`))
toJSON(buf, it.value[i].Value)
if i < len(it.value)-1 {
w.WriteB(',')
}
}
w.WriteB('}')
case *BigInteger:
if it.value.CmpAbs(big.NewInt(MaxAllowedInteger)) == 1 {
w.Err = errors.New("too big integer")
return
}
w.WriteBytes([]byte(it.value.String()))
case *ByteArray:
w.WriteB('"')
val := it.Value().([]byte)
b := make([]byte, base64.StdEncoding.EncodedLen(len(val)))
base64.StdEncoding.Encode(b, val)
w.WriteBytes(b)
w.WriteB('"')
case *Bool:
if it.value {
w.WriteBytes([]byte("true"))
} else {
w.WriteBytes([]byte("false"))
}
case Null:
w.WriteBytes([]byte("null"))
default:
w.Err = fmt.Errorf("invalid item: %s", it.String())
return
}
if w.Err == nil && buf.Len() > MaxSize {
w.Err = errors.New("item is too big")
}
}
// FromJSON decodes Item from JSON.
// It behaves as following:
// string -> ByteArray from base64
// number -> BigInteger
// bool -> Bool
// null -> Null
// array -> Array
// map -> Map, keys are UTF-8
func FromJSON(data []byte) (Item, error) {
d := decoder{Decoder: *json.NewDecoder(bytes.NewReader(data))}
if item, err := d.decode(); err != nil {
return nil, err
} else if _, err := d.Token(); err != gio.EOF {
return nil, errors.New("unexpected items")
} else {
return item, nil
}
}
func (d *decoder) decode() (Item, error) {
tok, err := d.Token()
if err != nil {
return nil, err
}
switch t := tok.(type) {
case json.Delim:
switch t {
case json.Delim('{'), json.Delim('['):
if d.depth == maxJSONDepth {
return nil, errors.New("JSON depth limit exceeded")
}
d.depth++
var item Item
if t == json.Delim('{') {
item, err = d.decodeMap()
} else {
item, err = d.decodeArray()
}
d.depth--
return item, err
default:
// no error above means corresponding closing token
// was encountered for map or array respectively
return nil, nil
}
case string:
b, err := base64.StdEncoding.DecodeString(t)
if err != nil {
return nil, err
}
return NewByteArray(b), nil
case float64:
if math.Floor(t) != t {
return nil, fmt.Errorf("real value is not allowed: %v", t)
}
return NewBigInteger(big.NewInt(int64(t))), nil
case bool:
return NewBool(t), nil
default:
// it can be only `nil`
return Null{}, nil
}
}
func (d *decoder) decodeArray() (*Array, error) {
items := []Item{}
for {
item, err := d.decode()
if err != nil {
return nil, err
}
if item == nil {
return NewArray(items), nil
}
items = append(items, item)
}
}
func (d *decoder) decodeMap() (*Map, error) {
m := NewMap()
for {
key, err := d.Token()
if err != nil {
return nil, err
}
k, ok := key.(string)
if !ok {
return m, nil
}
val, err := d.decode()
if err != nil {
return nil, err
}
m.Add(NewByteArray([]byte(k)), val)
}
}
// ToJSONWithTypes serializes any stackitem to JSON in a lossless way.
func ToJSONWithTypes(item Item) ([]byte, error) {
result, err := toJSONWithTypes(item, make(map[Item]bool))
if err != nil {
return nil, err
}
return json.Marshal(result)
}
func toJSONWithTypes(item Item, seen map[Item]bool) (interface{}, error) {
typ := item.Type()
result := map[string]interface{}{
"type": typ.String(),
}
var value interface{}
switch it := item.(type) {
case *Array, *Struct:
if seen[item] {
return "", errors.New("recursive structures can't be serialized to json")
}
seen[item] = true
arr := []interface{}{}
for _, elem := range it.Value().([]Item) {
s, err := toJSONWithTypes(elem, seen)
if err != nil {
return "", err
}
arr = append(arr, s)
}
value = arr
case *Bool:
value = it.value
case *Buffer, *ByteArray:
value = base64.StdEncoding.EncodeToString(it.Value().([]byte))
case *BigInteger:
value = it.value.String()
case *Map:
if seen[item] {
return "", errors.New("recursive structures can't be serialized to json")
}
seen[item] = true
arr := []interface{}{}
for i := range it.value {
// map keys are primitive types and can always be converted to json
key, _ := toJSONWithTypes(it.value[i].Key, seen)
val, err := toJSONWithTypes(it.value[i].Value, seen)
if err != nil {
return "", err
}
arr = append(arr, map[string]interface{}{
"key": key,
"value": val,
})
}
value = arr
case *Pointer:
value = it.pos
}
if value != nil {
result["value"] = value
}
return result, nil
}
type (
rawItem struct {
Type string `json:"type"`
Value json.RawMessage `json:"value,omitempty"`
}
rawMapElement struct {
Key json.RawMessage `json:"key"`
Value json.RawMessage `json:"value"`
}
)
// FromJSONWithTypes deserializes an item from typed-json representation.
func FromJSONWithTypes(data []byte) (Item, error) {
raw := new(rawItem)
if err := json.Unmarshal(data, raw); err != nil {
return nil, err
}
typ, err := FromString(raw.Type)
if err != nil {
return nil, errors.New("invalid type")
}
switch typ {
case AnyT:
return Null{}, nil
case PointerT:
var pos int
if err := json.Unmarshal(raw.Value, &pos); err != nil {
return nil, err
}
return NewPointer(pos, nil), nil
case BooleanT:
var b bool
if err := json.Unmarshal(raw.Value, &b); err != nil {
return nil, err
}
return NewBool(b), nil
case IntegerT:
var s string
if err := json.Unmarshal(raw.Value, &s); err != nil {
return nil, err
}
val, ok := new(big.Int).SetString(s, 10)
if !ok {
return nil, errors.New("invalid integer")
}
return NewBigInteger(val), nil
case ByteArrayT, BufferT:
var s string
if err := json.Unmarshal(raw.Value, &s); err != nil {
return nil, err
}
val, err := base64.StdEncoding.DecodeString(s)
if err != nil {
return nil, err
}
if typ == ByteArrayT {
return NewByteArray(val), nil
}
return NewBuffer(val), nil
case ArrayT, StructT:
var arr []json.RawMessage
if err := json.Unmarshal(raw.Value, &arr); err != nil {
return nil, err
}
items := make([]Item, len(arr))
for i := range arr {
it, err := FromJSONWithTypes(arr[i])
if err != nil {
return nil, err
}
items[i] = it
}
if typ == ArrayT {
return NewArray(items), nil
}
return NewStruct(items), nil
case MapT:
var arr []rawMapElement
if err := json.Unmarshal(raw.Value, &arr); err != nil {
return nil, err
}
m := NewMap()
for i := range arr {
key, err := FromJSONWithTypes(arr[i].Key)
if err != nil {
return nil, err
} else if !IsValidMapKey(key) {
return nil, fmt.Errorf("invalid map key of type %s", key.Type())
}
value, err := FromJSONWithTypes(arr[i].Value)
if err != nil {
return nil, err
}
m.Add(key, value)
}
return m, nil
case InteropT:
return NewInterop(nil), nil
default:
return nil, errors.New("unexpected type")
}
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
post/src/lambda_post/utils.py
|
import logging
import os
import time
import requests
import re
import random
import praw
import dotenv
from boto3.dynamodb.conditions import Key
from bs4 import BeautifulSoup
dotenv.load_dotenv()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
TIMEOUT_SECONDS = 5
def post_from_dynamodb(reddit_creds, dynamodb_resource, dynamodb_client, table_name):
all_entries = dynamodb_scan(dynamodb_resource, table_name)
times_posted = [
dict.get("timesPosted")
for dict in all_entries
if dict.get("timesPosted") is not None
]
least_posted_entries = [
entry for entry in all_entries if entry.get("timesPosted") == min(times_posted)
]
chosen_content = random.choice(least_posted_entries)
is_valid_link = validate_link(chosen_content["url"], chosen_content["description"])
if is_valid_link:
post_youtube(chosen_content, reddit_creds)
updated_times_posted = str(chosen_content.get("timesPosted") + 1)
try:
dynamodb_client.update_item(
TableName=table_name,
Key={"url": {"S": chosen_content["url"]}},
UpdateExpression="set timesPosted=:t, timestampLastPosted=:s",
ExpressionAttributeValues={
":t": {"N": updated_times_posted},
":s": {"N": str(int(time.time()))},
},
ReturnValues="UPDATED_NEW",
)
except Exception as e:
logger.info(e)
logger.info(chosen_content["url"])
def smart_truncate(content, length=300, suffix="..."):
# from https://stackoverflow.com/questions/250357/truncate-a-string-without-ending-in-the-middle-of-a-word
if len(content) <= length:
return content
else:
return " ".join(content[: length + 1].split(" ")[0:-1]) + suffix
def post_youtube(content_dict, reddit_creds):
title = clean_description(content_dict["description"])
url = content_dict["url"]
session = requests.Session()
session.verify = False # Disable SSL warnings
reddit = praw.Reddit(
user_agent="test",
client_id=reddit_creds["CLIENT_ID"],
client_secret=reddit_creds["CLIENT_SECRET"],
username=reddit_creds["USERNAME"],
password=reddit_creds["PASSWORD"],
requestor_kwargs={"session": session},
)
reddit.validate_on_submit = True
reddit.subreddit("learnASL").submit(title=title, url=url)
logger.info("YouTube embedded video posted to Reddit")
def load_creds_env():
creds = {}
creds["CLIENT_ID"] = os.environ["CLIENT_ID"]
creds["CLIENT_SECRET"] = os.environ["CLIENT_SECRET"]
creds["USERNAME"] = os.environ["USERNAME"]
creds["PASSWORD"] = os.environ["PASSWORD"]
creds["AWS_REGION"] = os.environ["AWS_REGION"]
creds["DYNAMODB_TABLE_NAME"] = os.environ["DYNAMODB_TABLE_NAME"]
return creds
# TODO:
## COMMON FUNCTIONS
def validate_link(url, description):
if "youtube" not in url.lower():
return False
if "youtube.com/billvicars" in url.lower():
return False
if "video coming soon" in description.lower():
return False
if not " " in description.lower():
return False
if "playlist" in description.lower():
return False
if "quiz" in description.lower():
return False
return True
def clean_description(title):
title = re.sub("(\n|\t|\r)+", "", title) # remove newlines
title = re.sub("\s\s+", " ", title) # remove double spaces
title = re.sub("^\d+\.", "", title) # remove preceding numbers like 09.
title = re.sub("\:$", "", title) # remove : at the end
title = title.strip()
title = smart_truncate(title)
return title
def smart_truncate(content, length=300, suffix="..."):
# from https://stackoverflow.com/questions/250357/truncate-a-string-without-ending-in-the-middle-of-a-word
if len(content) <= length:
return content
else:
return " ".join(content[: length + 1].split(" ")[0:-1]) + suffix
def dynamodb_scan(dynamodb_resource, table_name):
table = dynamodb_resource.Table(table_name)
scan = table.scan()
return scan["Items"]
def verify():
verify_str = os.environ["VERIFY"]
if verify_str == "True":
return True
else:
return False
|
[] |
[] |
[
"USERNAME",
"PASSWORD",
"AWS_REGION",
"CLIENT_ID",
"VERIFY",
"CLIENT_SECRET",
"DYNAMODB_TABLE_NAME"
] |
[]
|
["USERNAME", "PASSWORD", "AWS_REGION", "CLIENT_ID", "VERIFY", "CLIENT_SECRET", "DYNAMODB_TABLE_NAME"]
|
python
| 7 | 0 | |
src/inventory_client/retry_roundtripper.go
|
package inventory_client
import (
"net/http"
"os"
"time"
"github.com/jpillora/backoff"
"github.com/sirupsen/logrus"
)
// This type implements the http.RoundTripper interface
type RetryRoundTripper struct {
Proxied http.RoundTripper
log *logrus.Logger
delay time.Duration
maxDelay time.Duration
maxRetries uint
}
func (rrt RetryRoundTripper) RoundTrip(req *http.Request) (res *http.Response, e error) {
b := &backoff.Backoff{
//These are the defaults
Min: rrt.delay,
Max: rrt.maxDelay,
Factor: 2,
Jitter: false,
}
return rrt.retry(rrt.maxRetries, b, rrt.Proxied.RoundTrip, req)
}
func (rrt RetryRoundTripper) retry(maxRetries uint, backoff *backoff.Backoff, fn func(req *http.Request) (res *http.Response, e error), req *http.Request) (res *http.Response, err error) {
var i uint
for i = 1; i <= maxRetries; i++ {
res, err = fn(req)
if err != nil || res.StatusCode < 200 || res.StatusCode >= 300 {
if i <= maxRetries {
delay := backoff.Duration()
rrt.log.WithError(err).Warnf("Failed executing HTTP call: %s %s status code %d, attempt number %d, Going to retry in: %s, request sent with: HTTP_PROXY: %s, http_proxy: %s, HTTPS_PROXY: %s, https_proxy: %s, NO_PROXY: %s, no_proxy: %s",
req.Method, req.URL, res.StatusCode, i, delay, os.Getenv("HTTP_PROXY"), os.Getenv("http_proxy"), os.Getenv("HTTPS_PROXY"), os.Getenv("https_proxy"), os.Getenv("NO_PROXY"), os.Getenv("no_proxy"))
time.Sleep(delay)
}
} else {
break
}
}
return res, err
}
|
[
"\"HTTP_PROXY\"",
"\"http_proxy\"",
"\"HTTPS_PROXY\"",
"\"https_proxy\"",
"\"NO_PROXY\"",
"\"no_proxy\""
] |
[] |
[
"NO_PROXY",
"https_proxy",
"HTTP_PROXY",
"HTTPS_PROXY",
"http_proxy",
"no_proxy"
] |
[]
|
["NO_PROXY", "https_proxy", "HTTP_PROXY", "HTTPS_PROXY", "http_proxy", "no_proxy"]
|
go
| 6 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.