filename
stringlengths 4
198
| content
stringlengths 25
939k
| environment
list | variablearg
list | constarg
list | variableargjson
stringclasses 1
value | constargjson
stringlengths 2
3.9k
| lang
stringclasses 3
values | constargcount
float64 0
129
⌀ | variableargcount
float64 0
0
⌀ | sentence
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
main.go
|
package main
import (
"bytes"
"encoding/binary"
"errors"
"flag"
"fmt"
"os"
"strconv"
)
func init() {
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
fmt.Fprintf(os.Stderr, "numencode <numerical value> -dtype <dtype>\n")
flag.PrintDefaults()
}
}
func main() {
if err := run(); err != nil {
fmt.Fprint(os.Stderr, err)
flag.Usage()
os.Exit(1)
}
}
func run() error {
dtype := flag.String("dtype", "", "data type of argument. accept: [short, float, double]")
flag.Parse()
if flag.NArg() != 1 || *dtype == "" {
return errors.New("invalid arguments")
}
var input string
b, err := encode(input, *dtype)
if err != nil {
return err
}
fmt.Println(b)
return nil
}
func encode(s string, dtype string) ([]byte, error) {
switch dtype {
case "short":
v, err := strconv.ParseInt(s, 10, 16)
if err != nil {
return nil, err
}
return int16ToBytes(int16(v))
case "float":
v, err := strconv.ParseFloat(s, 32)
if err != nil {
return nil, err
}
return float32ToBytes(float32(v))
case "double":
v, err := strconv.ParseFloat(s, 64)
if err != nil {
return nil, err
}
return float64ToBytes(v)
default:
return nil, errors.New("unknown dtype")
}
}
func decode(b []byte, dtype string) (string, error) {
switch dtype {
case "short":
v, err := bytesToInt16(b)
if err != nil {
return "", nil
}
return fmt.Sprint(v), nil
case "float":
v, err := bytesToFloat32(b)
if err != nil {
return "", nil
}
return fmt.Sprint(v), nil
case "double":
v, err := bytesToFloat64(b)
if err != nil {
return "", nil
}
return fmt.Sprint(v), nil
default:
return "", errors.New("unknown dtype")
}
}
func bytesToFloat64(b []byte) (float64, error) {
var v float64
buf := bytes.NewReader(b)
err := binary.Read(buf, binary.LittleEndian, &v)
return v, err
}
func float64ToBytes(v float64) ([]byte, error) {
buf := &bytes.Buffer{}
err := binary.Write(buf, binary.LittleEndian, v)
return buf.Bytes(), err
}
func bytesToFloat32(b []byte) (float32, error) {
var v float32
buf := bytes.NewReader(b)
err := binary.Read(buf, binary.LittleEndian, &v)
return v, err
}
func float32ToBytes(v float32) ([]byte, error) {
buf := &bytes.Buffer{}
err := binary.Write(buf, binary.LittleEndian, v)
return buf.Bytes(), err
}
func bytesToInt16(b []byte) (int16, error) {
var v int16
buf := bytes.NewReader(b)
err := binary.Read(buf, binary.LittleEndian, &v)
return v, err
}
func int16ToBytes(v int16) ([]byte, error) {
buf := &bytes.Buffer{}
err := binary.Write(buf, binary.LittleEndian, v)
return buf.Bytes(), err
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| null | null | null |
Python/3-DevOps/week1/hello_django_docker_compose/app/hello_django/settings.py
|
"""
Django settings for hello_django project.
Generated by 'django-admin startproject' using Django 3.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-rd_q8=#5%)xlhl8!b8o410apqror1(3^2j6m3jr#xr&!+!8%84'
#SECRET_KEY = os.environ.get("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['localhost', '127.0.0.1']
#ALLOWED_HOSTS = os.environ.get("DJANGO_ALLOWED_HOSTS").split(" ")
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'helloapp',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'hello_django.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hello_django.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
|
[] |
[] |
[
"SECRET_KEY",
"DJANGO_ALLOWED_HOSTS"
] |
[]
|
["SECRET_KEY", "DJANGO_ALLOWED_HOSTS"]
|
python
| 2 | 0 | |
cmd/server/server.go
|
package main
import (
"log"
"os"
"github.com/leozhantw/rate-limit/internal/server"
)
func main() {
port := os.Getenv("PORT")
if len(port) == 0 {
port = "8080"
}
srv := server.New().Start()
if err := srv.Run(":" + port); err != nil {
log.Fatalln(err)
}
}
|
[
"\"PORT\""
] |
[] |
[
"PORT"
] |
[]
|
["PORT"]
|
go
| 1 | 0 | |
lib/rucio/tests/test_bin_rucio.py
|
# -*- coding: utf-8 -*-
# Copyright 2012-2021 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Vincent Garonne <[email protected]>, 2012-2018
# - Mario Lassnig <[email protected]>, 2012-2019
# - Angelos Molfetas <[email protected]>, 2012
# - Thomas Beermann <[email protected]>, 2012-2021
# - Joaquín Bogado <[email protected]>, 2014-2018
# - Cheng-Hsi Chao <[email protected]>, 2014
# - Cedric Serfon <[email protected]>, 2015
# - Martin Barisits <[email protected]>, 2015-2019
# - Frank Berghaus <[email protected]>, 2017-2018
# - Tobias Wegner <[email protected]>, 2018
# - Hannes Hansen <[email protected]>, 2018-2019
# - Andrew Lister <[email protected]>, 2019
# - Benedikt Ziemons <[email protected]>, 2020-2021
# - Eli Chadwick <[email protected]>, 2020
# - Patrick Austin <[email protected]>, 2020
# - Tomas Javurek <[email protected]>, 2020
# - Radu Carpa <[email protected]>, 2021
from __future__ import print_function
import os
import re
import unittest
from datetime import datetime, timedelta
from os import remove, unlink, listdir, rmdir, stat, path, environ
import pytest
from rucio.client.accountlimitclient import AccountLimitClient
from rucio.client.didclient import DIDClient
from rucio.client.replicaclient import ReplicaClient
from rucio.client.rseclient import RSEClient
from rucio.client.ruleclient import RuleClient
from rucio.common.config import config_get, config_get_bool
from rucio.common.types import InternalScope, InternalAccount
from rucio.common.utils import generate_uuid, get_tmp_dir, md5, render_json
from rucio.rse import rsemanager as rsemgr
from rucio.tests.common import execute, account_name_generator, rse_name_generator, file_generator, scope_name_generator
class TestBinRucio(unittest.TestCase):
def setUp(self):
if config_get_bool('common', 'multi_vo', raise_exception=False, default=False):
self.vo = {'vo': config_get('client', 'vo', raise_exception=False, default='tst')}
try:
remove(get_tmp_dir() + '/.rucio_root@%s/auth_token_root' % self.vo['vo'])
except OSError as error:
if error.args[0] != 2:
raise error
else:
self.vo = {}
try:
remove(get_tmp_dir() + '/.rucio_root/auth_token_root')
except OSError as e:
if e.args[0] != 2:
raise e
self.marker = '$> '
self.host = config_get('client', 'rucio_host')
self.auth_host = config_get('client', 'auth_host')
self.user = 'data13_hip'
self.def_rse = 'MOCK4'
self.rse_client = RSEClient()
self.def_rse_id = self.rse_client.get_rse(rse=self.def_rse)['id']
self.did_client = DIDClient()
self.replica_client = ReplicaClient()
self.rule_client = RuleClient()
self.account_client = AccountLimitClient()
self.account_client.set_local_account_limit('root', self.def_rse, -1)
self.rse_client.add_rse_attribute(self.def_rse, 'istape', 'False')
self.upload_success_str = 'Successfully uploaded file %s'
def test_rucio_version(self):
"""CLIENT(USER): Rucio version"""
cmd = 'bin/rucio --version'
exitcode, out, err = execute(cmd)
assert 'rucio' in out or 'rucio' in err
def test_rucio_ping(self):
"""CLIENT(USER): Rucio ping"""
cmd = 'rucio --host %s ping' % self.host
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
def test_rucio_config_arg(self):
"""CLIENT(USER): Rucio config argument"""
cmd = 'rucio --config errconfig ping'
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert 'Could not load Rucio configuration file' in err and re.match('.*errconfig.*$', err, re.DOTALL)
def test_add_account(self):
"""CLIENT(ADMIN): Add account"""
tmp_val = account_name_generator()
cmd = 'rucio-admin account add %s' % tmp_val
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, )
assert 'Added new account: %s\n' % tmp_val == out
def test_whoami(self):
"""CLIENT(USER): Rucio whoami"""
cmd = 'rucio whoami'
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, )
assert 'account' in out
def test_add_identity(self):
"""CLIENT(ADMIN): Add identity"""
tmp_val = account_name_generator()
cmd = 'rucio-admin account add %s' % tmp_val
exitcode, out, err = execute(cmd)
assert 'Added new account: %s\n' % tmp_val == out
cmd = 'rucio-admin identity add --account %s --type GSS --id [email protected] --email [email protected]' % tmp_val
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, )
assert 'Added new identity to account: [email protected]%s\n' % tmp_val == out
def test_del_identity(self):
"""CLIENT(ADMIN): Test del identity"""
tmp_acc = account_name_generator()
# create account
cmd = 'rucio-admin account add %s' % tmp_acc
exitcode, out, err = execute(cmd)
# add identity to account
cmd = 'rucio-admin identity add --account %s --type GSS --id [email protected] --email [email protected]' % tmp_acc
exitcode, out, err = execute(cmd)
# delete identity from account
cmd = 'rucio-admin identity delete --account %s --type GSS --id [email protected]' % tmp_acc
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert 'Deleted identity: [email protected]\n' == out
# list identities for account
cmd = 'rucio-admin account list-identities %s' % (tmp_acc)
print(self.marker + cmd)
print(cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert '' == out
def test_attributes(self):
"""CLIENT(ADMIN): Add/List/Delete attributes"""
tmp_acc = account_name_generator()
# create account
cmd = 'rucio-admin account add %s' % tmp_acc
exitcode, out, err = execute(cmd)
# add attribute to the account
cmd = 'rucio-admin account add-attribute {0} --key test_attribute_key --value true'.format(tmp_acc)
exitcode, out, err = execute(cmd)
print(out)
print(err)
assert exitcode == 0
# list attributes
cmd = 'rucio-admin account list-attributes {0}'.format(tmp_acc)
exitcode, out, err = execute(cmd)
print(out)
print(err)
assert exitcode == 0
# delete attribute to the account
cmd = 'rucio-admin account delete-attribute {0} --key test_attribute_key'.format(tmp_acc)
exitcode, out, err = execute(cmd)
print(out)
print(err)
assert exitcode == 0
def test_add_scope(self):
"""CLIENT(ADMIN): Add scope"""
tmp_scp = scope_name_generator()
tmp_acc = account_name_generator()
cmd = 'rucio-admin account add %s' % tmp_acc
exitcode, out, err = execute(cmd)
cmd = 'rucio-admin scope add --account %s --scope %s' % (tmp_acc, tmp_scp)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert 'Added new scope to account: %s-%s\n' % (tmp_scp, tmp_acc) == out
def test_add_rse(self):
"""CLIENT(ADMIN): Add RSE"""
tmp_val = rse_name_generator()
cmd = 'rucio-admin rse add %s' % tmp_val
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, )
assert 'Added new deterministic RSE: %s\n' % tmp_val == out
def test_add_rse_nondet(self):
"""CLIENT(ADMIN): Add non-deterministic RSE"""
tmp_val = rse_name_generator()
cmd = 'rucio-admin rse add --non-deterministic %s' % tmp_val
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, )
assert 'Added new non-deterministic RSE: %s\n' % tmp_val == out
def test_list_rses(self):
"""CLIENT(ADMIN): List RSEs"""
tmp_val = rse_name_generator()
cmd = 'rucio-admin rse add %s' % tmp_val
exitcode, out, err = execute(cmd)
cmd = 'rucio-admin rse list'
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, )
assert tmp_val in out
def test_rse_add_distance(self):
"""CLIENT (ADMIN): Add distance to RSE"""
# add RSEs
temprse1 = rse_name_generator()
cmd = 'rucio-admin rse add %s' % temprse1
exitcode, out, err = execute(cmd)
print(out, err)
assert exitcode == 0
temprse2 = rse_name_generator()
cmd = 'rucio-admin rse add %s' % temprse2
exitcode, out, err = execute(cmd)
print(out, err)
assert exitcode == 0
# add distance between the RSEs
cmd = 'rucio-admin rse add-distance --distance 1 --ranking 1 %s %s' % (temprse1, temprse2)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert exitcode == 0
cmd = 'rucio-admin rse add-distance --distance 1 --ranking 1 %s %s' % (temprse2, temprse1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert exitcode == 0
# add duplicate distance
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err, exitcode)
assert exitcode != 0
assert 'Distance from %s to %s already exists!' % (temprse2, temprse1) in err
def test_upload(self):
"""CLIENT(USER): Upload"""
tmp_val = rse_name_generator()
cmd = 'rucio-admin rse add %s' % tmp_val
exitcode, out, err = execute(cmd)
cmd = 'rucio upload'
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, )
def test_download(self):
"""CLIENT(USER): Download"""
cmd = 'rucio download'
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, )
def test_upload_file(self):
"""CLIENT(USER): Rucio upload files"""
tmp_file1 = file_generator()
tmp_file2 = file_generator()
tmp_file3 = file_generator()
cmd = 'rucio -v upload --rse {0} --scope {1} {2} {3} {4}'.format(self.def_rse, self.user, tmp_file1, tmp_file2, tmp_file3)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file1)
remove(tmp_file2)
remove(tmp_file3)
upload_string_1 = (self.upload_success_str % path.basename(tmp_file1))
upload_string_2 = (self.upload_success_str % path.basename(tmp_file2))
upload_string_3 = (self.upload_success_str % path.basename(tmp_file3))
assert upload_string_1 in out or upload_string_1 in err
assert upload_string_2 in out or upload_string_2 in err
assert upload_string_3 in out or upload_string_3 in err
def test_upload_file_register_after_upload(self):
"""CLIENT(USER): Rucio upload files with registration after upload"""
# normal upload
tmp_file1 = file_generator()
tmp_file2 = file_generator()
tmp_file3 = file_generator()
tmp_file1_name = path.basename(tmp_file1)
cmd = 'rucio -v upload --rse {0} --scope {1} {2} {3} {4} --register-after-upload'.format(self.def_rse, self.user, tmp_file1, tmp_file2, tmp_file3)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file1)
remove(tmp_file2)
remove(tmp_file3)
upload_string_1 = (self.upload_success_str % path.basename(tmp_file1))
upload_string_2 = (self.upload_success_str % path.basename(tmp_file2))
upload_string_3 = (self.upload_success_str % path.basename(tmp_file3))
assert upload_string_1 in out or upload_string_1 in err
assert upload_string_2 in out or upload_string_2 in err
assert upload_string_3 in out or upload_string_3 in err
# removing replica -> file on RSE should be overwritten
# (simulating an upload error, where a part of the file is uploaded but the replica is not registered)
if environ.get('SUITE', 'all') != 'client':
from rucio.db.sqla import session, models
db_session = session.get_session()
internal_scope = InternalScope(self.user, **self.vo)
db_session.query(models.RSEFileAssociation).filter_by(name=tmp_file1_name, scope=internal_scope).delete()
db_session.query(models.ReplicaLock).filter_by(name=tmp_file1_name, scope=internal_scope).delete()
db_session.query(models.ReplicationRule).filter_by(name=tmp_file1_name, scope=internal_scope).delete()
db_session.query(models.DidMeta).filter_by(name=tmp_file1_name, scope=internal_scope).delete()
db_session.query(models.DataIdentifier).filter_by(name=tmp_file1_name, scope=internal_scope).delete()
db_session.commit()
tmp_file4 = file_generator()
checksum_tmp_file4 = md5(tmp_file4)
cmd = 'rucio -v upload --rse {0} --scope {1} --name {2} {3} --register-after-upload'.format(self.def_rse, self.user, tmp_file1_name, tmp_file4)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
assert (self.upload_success_str % path.basename(tmp_file4)) in out or (self.upload_success_str % path.basename(tmp_file4)) in err
assert checksum_tmp_file4 == [replica for replica in self.replica_client.list_replicas(dids=[{'name': tmp_file1_name, 'scope': self.user}])][0]['md5']
# try to upload file that already exists on RSE and is already registered -> no overwrite
cmd = 'rucio -v upload --rse {0} --scope {1} --name {2} {3} --register-after-upload'.format(self.def_rse, self.user, tmp_file1_name, tmp_file4)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file4)
assert 'File already registered' in out or 'File already registered' in err
def test_upload_file_guid(self):
"""CLIENT(USER): Rucio upload file with guid"""
tmp_file1 = file_generator()
tmp_guid = generate_uuid()
cmd = 'rucio -v upload --rse {0} --guid {1} --scope {2} {3}'.format(self.def_rse, tmp_guid, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file1)
upload_string_1 = (self.upload_success_str % path.basename(tmp_file1))
assert upload_string_1 in out or upload_string_1 in err
def test_upload_repeated_file(self):
"""CLIENT(USER): Rucio upload repeated files"""
# One of the files to upload is already catalogued but was removed
tmp_file1 = file_generator()
tmp_file2 = file_generator()
tmp_file3 = file_generator()
tmp_file1_name = path.basename(tmp_file1)
cmd = 'rucio -v upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
# get the rule for the file
cmd = r"rucio list-rules {0}:{1} | grep {0}:{1} | cut -f1 -d\ ".format(self.user, tmp_file1_name)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
rule = out
# delete the file from the catalog
cmd = "rucio delete-rule {0}".format(rule)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# delete the physical file
cmd = "find /tmp/rucio_rse/ -name {0} |xargs rm".format(tmp_file1_name)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'rucio -v upload --rse {0} --scope {1} {2} {3} {4}'.format(self.def_rse, self.user, tmp_file1, tmp_file2, tmp_file3)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file1)
remove(tmp_file2)
remove(tmp_file3)
upload_string_1 = (self.upload_success_str % tmp_file1_name)
assert upload_string_1 in out or upload_string_1 in err
def test_upload_repeated_file_dataset(self):
"""CLIENT(USER): Rucio upload repeated files to dataset"""
# One of the files to upload is already in the dataset
tmp_file1 = file_generator()
tmp_file2 = file_generator()
tmp_file3 = file_generator()
tmp_file1_name = path.basename(tmp_file1)
tmp_file3_name = path.basename(tmp_file3)
tmp_dsn = self.user + ':DSet' + rse_name_generator() # something like mock:DSetMOCK_S0M37HING
# Adding files to a new dataset
cmd = 'rucio -v upload --rse {0} --scope {1} {2} {3}'.format(self.def_rse, self.user, tmp_file1, tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
# upload the files to the dataset
cmd = 'rucio -v upload --rse {0} --scope {1} {2} {3} {4} {5}'.format(self.def_rse, self.user, tmp_file1, tmp_file2, tmp_file3, tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file1)
remove(tmp_file2)
remove(tmp_file3)
# searching for the file in the new dataset
cmd = 'rucio list-files {0}'.format(tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
# tmp_file1 must be in the dataset
assert re.search("{0}:{1}".format(self.user, tmp_file1_name), out) is not None
# tmp_file3 must be in the dataset
assert re.search("{0}:{1}".format(self.user, tmp_file3_name), out) is not None
def test_upload_file_dataset(self):
"""CLIENT(USER): Rucio upload files to dataset"""
tmp_file1 = file_generator()
tmp_file2 = file_generator()
tmp_file3 = file_generator()
tmp_file1_name = path.basename(tmp_file1)
tmp_dsn = self.user + ':DSet' + rse_name_generator() # something like mock:DSetMOCK_S0M37HING
# Adding files to a new dataset
cmd = 'rucio -v upload --rse {0} --scope {1} {2} {3} {4} {5}'.format(self.def_rse, self.user, tmp_file1, tmp_file2, tmp_file3, tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file1)
remove(tmp_file2)
remove(tmp_file3)
# searching for the file in the new dataset
cmd = 'rucio list-files {0}'.format(tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
assert re.search("{0}:{1}".format(self.user, tmp_file1_name), out) is not None
def test_upload_file_dataset_register_after_upload(self):
"""CLIENT(USER): Rucio upload files to dataset with file registration after upload"""
tmp_file1 = file_generator()
tmp_file2 = file_generator()
tmp_file3 = file_generator()
tmp_file1_name = path.basename(tmp_file1)
tmp_dsn = self.user + ':DSet' + rse_name_generator() # something like mock:DSetMOCK_S0M37HING
# Adding files to a new dataset
cmd = 'rucio -v upload --register-after-upload --rse {0} --scope {1} {2} {3} {4} {5}'.format(self.def_rse, self.user, tmp_file1, tmp_file2, tmp_file3, tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file1)
remove(tmp_file2)
remove(tmp_file3)
# searching for the file in the new dataset
cmd = 'rucio list-files {0}'.format(tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
assert re.search("{0}:{1}".format(self.user, tmp_file1_name), out) is not None
def test_upload_adds_md5digest(self):
"""CLIENT(USER): Upload Checksums"""
# user has a file to upload
filename = file_generator()
tmp_file1_name = path.basename(filename)
file_md5 = md5(filename)
# user uploads file
cmd = 'rucio -v upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, filename)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
# When inspecting the metadata of the new file the user finds the md5 checksum
meta = self.did_client.get_metadata(scope=self.user, name=tmp_file1_name)
assert 'md5' in meta
assert meta['md5'] == file_md5
remove(filename)
def test_create_dataset(self):
"""CLIENT(USER): Rucio add dataset"""
tmp_name = self.user + ':DSet' + rse_name_generator() # something like mock:DSetMOCK_S0M37HING
cmd = 'rucio add-dataset ' + tmp_name
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search('Added ' + tmp_name, out) is not None
def test_add_files_to_dataset(self):
"""CLIENT(USER): Rucio add files to dataset"""
tmp_file1 = file_generator()
tmp_file2 = file_generator()
tmp_dataset = self.user + ':DSet' + rse_name_generator() # something like mock:DSetMOCK_S0M37HING
# add files
cmd = 'rucio upload --rse {0} --scope {1} {2} {3}'.format(self.def_rse, self.user, tmp_file1, tmp_file2)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# create dataset
cmd = 'rucio add-dataset ' + tmp_dataset
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add files to dataset
cmd = 'rucio attach {0} {3}:{1} {3}:{2}'.format(tmp_dataset, tmp_file1[5:], tmp_file2[5:], self.user) # triming '/tmp/' from filename
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# find the added files
cmd = 'rucio list-files ' + tmp_dataset
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(tmp_file1[5:], out) is not None
def test_download_file(self):
"""CLIENT(USER): Rucio download files"""
tmp_file1 = file_generator()
# add files
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# download files
cmd = 'rucio -v download --dir /tmp {0}:{1}'.format(self.user, tmp_file1[5:]) # triming '/tmp/' from filename
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# search for the files with ls
cmd = 'ls /tmp/' # search in /tmp/
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(tmp_file1[5:], out) is not None
tmp_file1 = file_generator()
# add files
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# download files
cmd = 'rucio -v download --dir /tmp {0}:{1}'.format(self.user, tmp_file1[5:-2] + '*') # triming '/tmp/' from filename
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# search for the files with ls
cmd = 'ls /tmp/' # search in /tmp/
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(tmp_file1[5:], out) is not None
try:
for i in listdir('data13_hip'):
unlink('data13_hip/%s' % i)
rmdir('data13_hip')
except Exception:
pass
@pytest.mark.noparallel(reason='fails when run in parallel')
def test_download_no_subdir(self):
"""CLIENT(USER): Rucio download files with --no-subdir and check that files already found locally are not replaced"""
tmp_file = file_generator()
# add files
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert exitcode == 0
# download files with --no-subdir
cmd = 'rucio -v download --no-subdir --dir /tmp {0}:{1}'.format(self.user, tmp_file[5:]) # triming '/tmp/' from filename
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert exitcode == 0
# search for the files with ls
cmd = 'ls /tmp/' # search in /tmp/
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert tmp_file[5:] in out
# download again with --no-subdir
cmd = 'rucio -v download --no-subdir --dir /tmp {0}:{1}'.format(self.user, tmp_file[5:]) # triming '/tmp/' from filename
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert exitcode == 0
assert re.search(r'Downloaded files:\s+0', out) is not None
assert re.search(r'Files already found locally:\s+1', out) is not None
try:
for i in listdir('data13_hip'):
unlink('data13_hip/%s' % i)
rmdir('data13_hip')
except Exception:
pass
def test_download_filter(self):
"""CLIENT(USER): Rucio download with filter options"""
# Use filter option to download file with wildcarded name
tmp_file1 = file_generator()
uuid = generate_uuid()
cmd = 'rucio upload --rse {0} --scope {1} --guid {2} {3}'.format(self.def_rse, self.user, uuid, tmp_file1)
exitcode, out, err = execute(cmd)
print(out, err)
remove(tmp_file1)
wrong_guid = generate_uuid()
cmd = 'rucio -v download --dir /tmp {0}:{1} --filter guid={2}'.format(self.user, '*', wrong_guid)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'ls /tmp/{0}'.format(self.user)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(tmp_file1[5:], out) is None
cmd = 'rucio -v download --dir /tmp {0}:{1} --filter guid={2}'.format(self.user, '*', uuid)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'ls /tmp/{0}'.format(self.user)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(tmp_file1[5:], out) is not None
# Only use filter option to download file
tmp_file1 = file_generator()
uuid = generate_uuid()
cmd = 'rucio upload --rse {0} --scope {1} --guid {2} {3}'.format(self.def_rse, self.user, uuid, tmp_file1)
exitcode, out, err = execute(cmd)
print(out, err)
remove(tmp_file1)
wrong_guid = generate_uuid()
cmd = 'rucio -v download --dir /tmp --scope {0} --filter guid={1}'.format(self.user, wrong_guid)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'ls /tmp/{0}'.format(self.user)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(tmp_file1[5:], out) is None
cmd = 'rucio -v download --dir /tmp --scope {0} --filter guid={1}'.format(self.user, uuid)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'ls /tmp/{0}'.format(self.user)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(tmp_file1[5:], out) is not None
# Only use filter option to download dataset
tmp_file1 = file_generator()
dataset_name = 'dataset_%s' % generate_uuid()
cmd = 'rucio upload --rse {0} --scope {1} {2} {1}:{3}'.format(self.def_rse, self.user, tmp_file1, dataset_name)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
remove(tmp_file1)
cmd = 'rucio download --dir /tmp --scope {0} --filter created_before=1900-01-01T00:00:00.000Z'.format(self.user)
exitcode, out, err = execute(cmd)
cmd = 'ls /tmp/{0}'.format(dataset_name)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(tmp_file1[5:], out) is None
cmd = 'rucio download --dir /tmp --scope {0} --filter created_after=1900-01-01T00:00:00.000Z'.format(self.user)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'ls /tmp/{0}'.format(dataset_name)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# TODO: https://github.com/rucio/rucio/issues/2926 !
# assert re.search(tmp_file1[5:], out) is not None
# Use filter option to download dataset with wildcarded name
tmp_file1 = file_generator()
cmd = 'rucio upload --rse {0} --scope {1} {2} {1}:{3}'.format(self.def_rse, self.user, tmp_file1, dataset_name)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
remove(tmp_file1)
cmd = 'rucio download --dir /tmp {0}:{1} --filter created_before=1900-01-01T00:00:00.000Z'.format(self.user, dataset_name[0:-1] + '*')
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'ls /tmp/{0}'.format(dataset_name)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(tmp_file1[5:], out) is None
cmd = 'rucio download --dir /tmp {0}:{1} --filter created_after=1900-01-01T00:00:00.000Z'.format(self.user, dataset_name[0:-1] + '*')
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'ls /tmp/{0}'.format(dataset_name)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(tmp_file1[5:], out) is not None
def test_download_timeout_options_accepted(self):
"""CLIENT(USER): Rucio download timeout options """
tmp_file1 = file_generator()
# add files
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# download files
cmd = 'rucio download --dir /tmp --transfer-timeout 3 --transfer-speed-timeout 1000 {0}:{1}'.format(self.user, tmp_file1[5:]) # triming '/tmp/' from filename
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert not err
# search for the files with ls
cmd = 'ls /tmp/' # search in /tmp/
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# Check that PFN the transfer-speed-timeout option is not accepted for --pfn
cmd = 'rucio -v download --rse {0} --transfer-speed-timeout 1 --pfn http://a.b.c/ {1}:{2}'.format(self.def_rse, self.user, tmp_file1)
exitcode, out, err = execute(cmd)
print(out, err)
assert "Download with --pfn doesn't support --transfer-speed-timeout" in err
def test_download_metalink_file(self):
"""CLIENT(USER): Rucio download with metalink file"""
metalink_file_path = generate_uuid()
scope = self.user
# Use filter and metalink option
cmd = 'rucio download --scope mock --filter size=1 --metalink=test'
exitcode, out, err = execute(cmd)
print(out, err)
assert 'Arguments filter and metalink cannot be used together' in err
# Use did and metalink option
cmd = 'rucio download --metalink=test mock:test'
exitcode, out, err = execute(cmd)
print(out, err)
assert 'Arguments dids and metalink cannot be used together' in err
# Download only with metalink file
tmp_file = file_generator()
tmp_file_name = tmp_file[5:]
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, scope, tmp_file)
exitcode, out, err = execute(cmd)
print(out, err)
replica_file = ReplicaClient().list_replicas([{'scope': scope, 'name': tmp_file_name}], metalink=True)
with open(metalink_file_path, 'w+') as metalink_file:
metalink_file.write(replica_file)
cmd = 'rucio download --dir /tmp --metalink {0}'.format(metalink_file_path)
exitcode, out, err = execute(cmd)
print(out, err)
remove(metalink_file_path)
cmd = 'ls /tmp/{0}'.format(scope)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(tmp_file_name, out) is not None
def test_download_succeeds_md5only(self):
"""CLIENT(USER): Rucio download succeeds MD5 only"""
# user has a file to upload
filename = file_generator()
file_md5 = md5(filename)
filesize = stat(filename).st_size
lfn = {'name': filename[5:], 'scope': self.user, 'bytes': filesize, 'md5': file_md5}
# user uploads file
self.replica_client.add_replicas(files=[lfn], rse=self.def_rse)
rse_settings = rsemgr.get_rse_info(rse=self.def_rse, **self.vo)
protocol = rsemgr.create_protocol(rse_settings, 'write')
protocol.connect()
pfn = list(protocol.lfns2pfns(lfn).values())[0]
protocol.put(filename[5:], pfn, filename[:5])
protocol.close()
remove(filename)
# download files
cmd = 'rucio -v download --dir /tmp {0}:{1}'.format(self.user, filename[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# search for the files with ls
cmd = 'ls /tmp/{0}'.format(self.user) # search in /tmp/
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(filename[5:], out) is not None
try:
for i in listdir('data13_hip'):
unlink('data13_hip/%s' % i)
rmdir('data13_hip')
except Exception:
pass
def test_download_fails_badmd5(self):
"""CLIENT(USER): Rucio download fails on MD5 mismatch"""
# user has a file to upload
filename = file_generator()
file_md5 = md5(filename)
filesize = stat(filename).st_size
lfn = {'name': filename[5:], 'scope': self.user, 'bytes': filesize, 'md5': '0123456789abcdef0123456789abcdef'}
# user uploads file
self.replica_client.add_replicas(files=[lfn], rse=self.def_rse)
rse_settings = rsemgr.get_rse_info(rse=self.def_rse, **self.vo)
protocol = rsemgr.create_protocol(rse_settings, 'write')
protocol.connect()
pfn = list(protocol.lfns2pfns(lfn).values())[0]
protocol.put(filename[5:], pfn, filename[:5])
protocol.close()
remove(filename)
# download file
cmd = 'rucio -v download --dir /tmp {0}:{1}'.format(self.user, filename[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
report = r'Local\ checksum\:\ {0},\ Rucio\ checksum\:\ 0123456789abcdef0123456789abcdef'.format(file_md5)
print('searching', report, 'in', err)
assert re.search(report, err) is not None
# The file should not exist
cmd = 'ls /tmp/' # search in /tmp/
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(filename[5:], out) is None
try:
for i in listdir('data13_hip'):
unlink('data13_hip/%s' % i)
rmdir('data13_hip')
except Exception:
pass
def test_download_dataset(self):
"""CLIENT(USER): Rucio download dataset"""
tmp_file1 = file_generator()
tmp_dataset = self.user + ':DSet' + rse_name_generator() # something like mock:DSetMOCK_S0M37HING
# add files
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# create dataset
cmd = 'rucio add-dataset ' + tmp_dataset
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add files to dataset
cmd = 'rucio attach {0} {1}:{2}'.format(tmp_dataset, self.user, tmp_file1[5:]) # triming '/tmp/' from filename
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# download dataset
cmd = 'rucio -v download --dir /tmp {0}'.format(tmp_dataset) # triming '/tmp/' from filename
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
search = '{0} successfully downloaded'.format(tmp_file1[5:]) # triming '/tmp/' from filename
assert re.search(search, err) is not None
def test_list_blacklisted_replicas(self):
"""CLIENT(USER): Rucio list replicas"""
# add rse
tmp_rse = rse_name_generator()
cmd = 'rucio-admin rse add {0}'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = 'rucio-admin rse add-protocol --hostname blacklistreplica --scheme file --prefix /rucio --port 0 --impl rucio.rse.protocols.posix.Default ' \
'--domain-json \'{"wan": {"read": 1, "write": 1, "delete": 1, "third_party_copy": 1}}\' %s' % tmp_rse
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add files
tmp_file1 = file_generator()
file_name = tmp_file1[5:] # triming '/tmp/' from filename
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(tmp_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# create dataset
tmp_dataset = self.user + ':DSet' + rse_name_generator()
cmd = 'rucio add-dataset ' + tmp_dataset
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add files to dataset
cmd = 'rucio attach {0} {1}:{2}'.format(tmp_dataset, self.user, file_name)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# Listing the replica should work before blacklisting the RSE
cmd = 'rucio list-file-replicas {}'.format(tmp_dataset)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert tmp_rse in out
# Blacklist the rse
cmd = 'rucio-admin rse update --rse {} --setting availability_read --value False'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert not err
# list-file-replicas should, by default, list replicas from blacklisted rses
cmd = 'rucio list-file-replicas {}'.format(tmp_dataset)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert tmp_rse in out
def test_create_rule(self):
"""CLIENT(USER): Rucio add rule"""
tmp_file1 = file_generator()
# add files
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add rse
tmp_rse = rse_name_generator()
cmd = 'rucio-admin rse add {0}'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
# add quota
self.account_client.set_local_account_limit('root', tmp_rse, -1)
# add rse atributes
cmd = 'rucio-admin rse set-attribute --rse {0} --key spacetoken --value ATLASSCRATCHDISK'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add rse
tmp_rse = rse_name_generator()
cmd = 'rucio-admin rse add {0}'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add quota
self.account_client.set_local_account_limit('root', tmp_rse, -1)
# add rse atributes
cmd = 'rucio-admin rse set-attribute --rse {0} --key spacetoken --value ATLASSCRATCHDISK'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add rse
tmp_rse = rse_name_generator()
cmd = 'rucio-admin rse add {0}'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add quota
self.account_client.set_local_account_limit('root', tmp_rse, -1)
# add rse atributes
cmd = 'rucio-admin rse set-attribute --rse {0} --key spacetoken --value ATLASSCRATCHDISK'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add rules
cmd = "rucio add-rule {0}:{1} 3 'spacetoken=ATLASSCRATCHDISK'".format(self.user, tmp_file1[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
assert not err
rule = out[:-1] # triming new line character
assert re.match(r'^\w+$', rule)
# check if rule exist for the file
cmd = "rucio list-rules {0}:{1}".format(self.user, tmp_file1[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search(rule, out) is not None
def test_create_rule_delayed(self):
"""CLIENT(USER): Rucio add rule delayed"""
tmp_file1 = file_generator()
# add files
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add rse
tmp_rse = rse_name_generator()
cmd = 'rucio-admin rse add {0}'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add quota
self.account_client.set_local_account_limit('root', tmp_rse, -1)
# add rse atributes
cmd = 'rucio-admin rse set-attribute --rse {0} --key spacetoken --value ATLASRULEDELAYED'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# try adding rule with an incorrect delay-injection. Must fail
cmd = "rucio add-rule --delay-injection asdsaf {0}:{1} 1 'spacetoken=ATLASRULEDELAYED'".format(self.user, tmp_file1[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
assert err
# Add a correct rule
cmd = "rucio add-rule --delay-injection 3600 {0}:{1} 1 'spacetoken=ATLASRULEDELAYED'".format(self.user, tmp_file1[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert not err
rule = out[:-1] # triming new line character
cmd = "rucio rule-info {0}".format(rule)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
out_lines = out.splitlines()
assert any(re.match(r'State:.* INJECT', line) for line in out_lines)
assert any(re.match(r'Locks OK/REPLICATING/STUCK:.* 0/0/0', line) for line in out_lines)
# Check that "Created at" is approximately 3600 seconds in the future
[created_at_line] = filter(lambda x: "Created at" in x, out_lines)
created_at = re.search(r'Created at:\s+(\d.*\d)$', created_at_line).group(1)
created_at = datetime.strptime(created_at, "%Y-%m-%d %H:%M:%S")
assert datetime.utcnow() + timedelta(seconds=3550) < created_at < datetime.utcnow() + timedelta(seconds=3650)
def test_delete_rule(self):
"""CLIENT(USER): rule deletion"""
self.account_client.set_local_account_limit('root', self.def_rse, -1)
tmp_file1 = file_generator()
# add files
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add rse
tmp_rse = rse_name_generator()
cmd = 'rucio-admin rse add {0}'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
self.account_client.set_local_account_limit('root', tmp_rse, -1)
# add rse atributes
cmd = 'rucio-admin rse set-attribute --rse {0} --key spacetoken --value ATLASDELETERULE'.format(tmp_rse)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add rules
cmd = "rucio add-rule {0}:{1} 1 'spacetoken=ATLASDELETERULE'".format(self.user, tmp_file1[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(err)
print(out)
# get the rules for the file
cmd = r"rucio list-rules {0}:{1} | grep {0}:{1} | cut -f1 -d\ ".format(self.user, tmp_file1[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
(rule1, rule2) = out.split()
# delete the rules for the file
cmd = "rucio delete-rule {0}".format(rule1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
cmd = "rucio delete-rule {0}".format(rule2)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# search for the file
cmd = "rucio list-dids --filter type=all {0}:{1}".format(self.user, tmp_file1[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert 5 == len(out.splitlines())
def test_add_file_twice(self):
"""CLIENT(USER): Add file twice"""
tmp_file1 = file_generator()
# add file twice
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
assert re.search("File {0}:{1} successfully uploaded on the storage".format(self.user, tmp_file1[5:]), out) is None
def test_add_delete_add_file(self):
"""CLIENT(USER): Add/Delete/Add"""
tmp_file1 = file_generator()
# add file
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# get the rule for the file
cmd = r"rucio list-rules {0}:{1} | grep {0}:{1} | cut -f1 -d\ ".format(self.user, tmp_file1[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
rule = out
# delete the file from the catalog
cmd = "rucio delete-rule {0}".format(rule)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# delete the fisical file
cmd = "find /tmp/rucio_rse/ -name {0} |xargs rm".format(tmp_file1[5:])
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# modify the file to avoid same checksum
cmd = "echo 'delta' >> {0}".format(tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
# add the same file
cmd = 'rucio upload --rse {0} --scope {1} {2}'.format(self.def_rse, self.user, tmp_file1)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search("File {0}:{1} successfully uploaded on the storage".format(self.user, tmp_file1[5:]), out) is None
def test_attach_files_dataset(self):
"""CLIENT(USER): Rucio attach files to dataset"""
# Attach files to a dataset using the attach method
tmp_file1 = file_generator()
tmp_file2 = file_generator()
tmp_file3 = file_generator()
tmp_dsn = self.user + ':DSet' + rse_name_generator() # something like mock:DSetMOCK_S0M37HING
# Adding files to a new dataset
cmd = 'rucio upload --rse {0} --scope {1} {2} {3}'.format(self.def_rse, self.user, tmp_file1, tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
# upload the files
cmd = 'rucio upload --rse {0} --scope {1} {2} {3}'.format(self.def_rse, self.user, tmp_file2, tmp_file3)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file1)
remove(tmp_file2)
remove(tmp_file3)
# attach the files to the dataset
cmd = 'rucio attach {0} {1}:{2} {1}:{3}'.format(tmp_dsn, self.user, tmp_file2[5:], tmp_file3[5:]) # triming '/tmp/' from filenames
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
# searching for the file in the new dataset
cmd = 'rucio list-files {0}'.format(tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
# tmp_file2 must be in the dataset
assert re.search("{0}:{1}".format(self.user, tmp_file2[5:]), out) is not None
# tmp_file3 must be in the dataset
assert re.search("{0}:{1}".format(self.user, tmp_file3[5:]), out) is not None
def test_detach_files_dataset(self):
"""CLIENT(USER): Rucio detach files to dataset"""
# Attach files to a dataset using the attach method
tmp_file1 = file_generator()
tmp_file2 = file_generator()
tmp_file3 = file_generator()
tmp_dsn = self.user + ':DSet' + rse_name_generator() # something like mock:DSetMOCK_S0M37HING
# Adding files to a new dataset
cmd = 'rucio upload --rse {0} --scope {1} {2} {3} {4} {5}'.format(self.def_rse, self.user, tmp_file1, tmp_file2, tmp_file3, tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file1)
remove(tmp_file2)
remove(tmp_file3)
# detach the files to the dataset
cmd = 'rucio detach {0} {1}:{2} {1}:{3}'.format(tmp_dsn, self.user, tmp_file2[5:], tmp_file3[5:]) # triming '/tmp/' from filenames
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
# searching for the file in the new dataset
cmd = 'rucio list-files {0}'.format(tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
# tmp_file1 must be in the dataset
assert re.search("{0}:{1}".format(self.user, tmp_file1[5:]), out) is not None
# tmp_file3 must be in the dataset
assert re.search("{0}:{1}".format(self.user, tmp_file3[5:]), out) is None
def test_attach_file_twice(self):
"""CLIENT(USER): Rucio attach a file twice"""
# Attach files to a dataset using the attach method
tmp_file1 = file_generator()
tmp_dsn = self.user + ':DSet' + rse_name_generator() # something like mock:DSetMOCK_S0M37HING
# Adding files to a new dataset
cmd = 'rucio upload --rse {0} --scope {1} {2} {3}'.format(self.def_rse, self.user, tmp_file1, tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file1)
# attach the files to the dataset
cmd = 'rucio attach {0} {1}:{2}'.format(tmp_dsn, self.user, tmp_file1[5:]) # triming '/tmp/' from filenames
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
assert re.search("The file already exists", err) is not None
def test_attach_dataset_twice(self):
""" CLIENT(USER): Rucio attach a dataset twice """
container = 'container_%s' % generate_uuid()
dataset = 'dataset_%s' % generate_uuid()
self.did_client.add_container(scope=self.user, name=container)
self.did_client.add_dataset(scope=self.user, name=dataset)
# Attach dataset to container
cmd = 'rucio attach {0}:{1} {0}:{2}'.format(self.user, container, dataset)
exitcode, out, err = execute(cmd)
# Attach again
cmd = 'rucio attach {0}:{1} {0}:{2}'.format(self.user, container, dataset)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
assert re.search("Data identifier already added to the destination content", err) is not None
def test_detach_non_existing_file(self):
"""CLIENT(USER): Rucio detach a non existing file"""
tmp_file1 = file_generator()
tmp_dsn = self.user + ':DSet' + rse_name_generator() # something like mock:DSetMOCK_S0M37HING
# Adding files to a new dataset
cmd = 'rucio upload --rse {0} --scope {1} {2} {3}'.format(self.def_rse, self.user, tmp_file1, tmp_dsn)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(tmp_file1)
# attach the files to the dataset
cmd = 'rucio detach {0} {1}:{2}'.format(tmp_dsn, self.user, 'file_ghost') # triming '/tmp/' from filenames
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
assert re.search("Data identifier not found.", err) is not None
@pytest.mark.dirty
def test_list_did_recursive(self):
""" CLIENT(USER): List did recursive """
# Setup nested collections
tmp_scope = 'mock'
tmp_container_1 = 'container_%s' % generate_uuid()
cmd = 'rucio add-container {0}:{1}'.format(tmp_scope, tmp_container_1)
exitcode, out, err = execute(cmd)
tmp_container_2 = 'container_%s' % generate_uuid()
cmd = 'rucio add-container {0}:{1}'.format(tmp_scope, tmp_container_2)
exitcode, out, err = execute(cmd)
tmp_container_3 = 'container_%s' % generate_uuid()
cmd = 'rucio add-container {0}:{1}'.format(tmp_scope, tmp_container_3)
exitcode, out, err = execute(cmd)
cmd = 'rucio attach {0}:{1} {0}:{2}'.format(tmp_scope, tmp_container_1, tmp_container_2)
exitcode, out, err = execute(cmd)
cmd = 'rucio attach {0}:{1} {0}:{2}'.format(tmp_scope, tmp_container_2, tmp_container_3)
exitcode, out, err = execute(cmd)
# All attached DIDs are expected
cmd = 'rucio list-dids {0}:{1} --recursive'.format(tmp_scope, tmp_container_1)
exitcode, out, err = execute(cmd)
assert re.search(tmp_container_1, out) is not None
assert re.search(tmp_container_2, out) is not None
assert re.search(tmp_container_3, out) is not None
# Wildcards are not allowed to use with --recursive
cmd = 'rucio list-dids {0}:* --recursive'.format(tmp_scope)
exitcode, out, err = execute(cmd)
assert re.search("Option recursive cannot be used with wildcards", err) is not None
@pytest.mark.dirty
def test_attach_many_dids(self):
""" CLIENT(USER): Rucio attach many (>1000) DIDs """
# Setup data for CLI check
tmp_dsn_name = 'Container' + rse_name_generator()
tmp_dsn_did = self.user + ':' + tmp_dsn_name
self.did_client.add_did(scope=self.user, name=tmp_dsn_name, type='CONTAINER')
files = [{'name': 'dsn_%s' % generate_uuid(), 'scope': self.user, 'type': 'DATASET'} for i in range(0, 1500)]
self.did_client.add_dids(files[:1000])
self.did_client.add_dids(files[1000:])
# Attaching over 1000 DIDs with CLI
cmd = 'rucio attach {0}'.format(tmp_dsn_did)
for tmp_file in files:
cmd += ' {0}:{1}'.format(tmp_file['scope'], tmp_file['name'])
exitcode, out, err = execute(cmd)
print(out)
print(err)
# Checking if the execution was successfull and if the DIDs belong together
assert re.search('DIDs successfully attached', out) is not None
cmd = 'rucio list-content {0}'.format(tmp_dsn_did)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
# first dataset must be in the container
assert re.search("{0}:{1}".format(self.user, files[0]['name']), out) is not None
# last dataset must be in the container
assert re.search("{0}:{1}".format(self.user, files[-1]['name']), out) is not None
# Setup data with file
did_file_path = 'list_dids.txt'
files = [{'name': 'dsn_%s' % generate_uuid(), 'scope': self.user, 'type': 'DATASET'} for i in range(0, 1500)]
self.did_client.add_dids(files[:1000])
self.did_client.add_dids(files[1000:])
with open(did_file_path, 'w') as did_file:
for file in files:
did_file.write(file['scope'] + ':' + file['name'] + '\n')
did_file.close()
# Attaching over 1000 files per file
cmd = 'rucio attach {0} -f {1}'.format(tmp_dsn_did, did_file_path)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
print(out)
print(err)
remove(did_file_path)
# Checking if the execution was successfull and if the DIDs belong together
assert re.search('DIDs successfully attached', out) is not None
cmd = 'rucio list-content {0}'.format(tmp_dsn_did)
print(self.marker + cmd)
exitcode, out, err = execute(cmd)
# first file must be in the dataset
assert re.search("{0}:{1}".format(self.user, files[0]['name']), out) is not None
# last file must be in the dataset
assert re.search("{0}:{1}".format(self.user, files[-1]['name']), out) is not None
@pytest.mark.dirty
def test_attach_many_dids_twice(self):
""" CLIENT(USER): Attach many (>1000) DIDs twice """
# Setup data for CLI check
container_name = 'container' + generate_uuid()
container = self.user + ':' + container_name
self.did_client.add_did(scope=self.user, name=container_name, type='CONTAINER')
datasets = [{'name': 'dsn_%s' % generate_uuid(), 'scope': self.user, 'type': 'DATASET'} for i in range(0, 1500)]
self.did_client.add_dids(datasets[:1000])
self.did_client.add_dids(datasets[1000:])
# Attaching over 1000 DIDs with CLI
cmd = 'rucio attach {0}'.format(container)
for dataset in datasets:
cmd += ' {0}:{1}'.format(dataset['scope'], dataset['name'])
exitcode, out, err = execute(cmd)
# Attaching twice
cmd = 'rucio attach {0}'.format(container)
for dataset in datasets:
cmd += ' {0}:{1}'.format(dataset['scope'], dataset['name'])
exitcode, out, err = execute(cmd)
assert re.search("DIDs successfully attached", out) is not None
# Attaching twice plus one DID that is not already attached
new_dataset = {'name': 'dsn_%s' % generate_uuid(), 'scope': self.user, 'type': 'DATASET'}
datasets.append(new_dataset)
self.did_client.add_did(scope=self.user, name=new_dataset['name'], type='DATASET')
cmd = 'rucio attach {0}'.format(container)
for dataset in datasets:
cmd += ' {0}:{1}'.format(dataset['scope'], dataset['name'])
exitcode, out, err = execute(cmd)
assert re.search("DIDs successfully attached", out) is not None
cmd = 'rucio list-content {0}'.format(container)
exitcode, out, err = execute(cmd)
assert re.search("{0}:{1}".format(self.user, new_dataset['name']), out) is not None
@pytest.mark.noparallel(reason='might override global RSE settings')
def test_import_data(self):
""" CLIENT(ADMIN): Import data into rucio"""
file_path = 'data_import.json'
rses = {rse['rse']: rse for rse in self.rse_client.list_rses()}
rses[rse_name_generator()] = {'country_name': 'test'}
data = {'rses': rses}
with open(file_path, 'w+') as file:
file.write(render_json(**data))
cmd = 'rucio-admin data import {0}'.format(file_path)
exitcode, out, err = execute(cmd)
assert re.search('Data successfully imported', out) is not None
remove(file_path)
@pytest.mark.noparallel(reason='fails when run in parallel')
def test_export_data(self):
""" CLIENT(ADMIN): Export data from rucio"""
file_path = 'data_export.json'
cmd = 'rucio-admin data export {0}'.format(file_path)
exitcode, out, err = execute(cmd)
print(out, err)
assert re.search('Data successfully exported', out) is not None
remove(file_path)
@pytest.mark.dirty
@pytest.mark.noparallel(reason='fails when run in parallel')
def test_set_tombstone(self):
""" CLIENT(ADMIN): set a tombstone on a replica. """
# Set tombstone on one replica
rse = 'MOCK4'
scope = 'mock'
name = generate_uuid()
self.replica_client.add_replica(rse, scope, name, 4, 'aaaaaaaa')
cmd = 'rucio-admin replicas set-tombstone {0}:{1} --rse {2}'.format(scope, name, rse)
exitcode, out, err = execute(cmd)
assert re.search('Set tombstone successfully', err) is not None
# Set tombstone on locked replica
name = generate_uuid()
self.replica_client.add_replica(rse, scope, name, 4, 'aaaaaaaa')
self.rule_client.add_replication_rule([{'name': name, 'scope': scope}], 1, rse, locked=True)
cmd = 'rucio-admin replicas set-tombstone {0}:{1} --rse {2}'.format(scope, name, rse)
exitcode, out, err = execute(cmd)
assert re.search('Replica is locked', err) is not None
# Set tombstone on not found replica
name = generate_uuid()
cmd = 'rucio-admin replicas set-tombstone {0}:{1} --rse {2}'.format(scope, name, rse)
exitcode, out, err = execute(cmd)
assert re.search('Replica not found', err) is not None
@pytest.mark.noparallel(reason='modifies account limit on pre-defined RSE')
def test_list_account_limits(self):
""" CLIENT (USER): list account limits. """
rse = 'MOCK4'
rse_exp = 'MOCK3|MOCK4'
account = 'root'
local_limit = 10
global_limit = 20
self.account_client.set_local_account_limit(account, rse, local_limit)
self.account_client.set_global_account_limit(account, rse_exp, global_limit)
cmd = 'rucio list-account-limits {0}'.format(account)
exitcode, out, err = execute(cmd)
assert re.search('.*{0}.*{1}.*'.format(rse, local_limit), out) is not None
assert re.search('.*{0}.*{1}.*'.format(rse_exp, global_limit), out) is not None
cmd = 'rucio list-account-limits --rse {0} {1}'.format(rse, account)
exitcode, out, err = execute(cmd)
assert re.search('.*{0}.*{1}.*'.format(rse, local_limit), out) is not None
assert re.search('.*{0}.*{1}.*'.format(rse_exp, global_limit), out) is not None
self.account_client.set_local_account_limit(account, rse, -1)
self.account_client.set_global_account_limit(account, rse_exp, -1)
@pytest.mark.noparallel(reason='modifies account limit on pre-defined RSE')
@pytest.mark.skipif('SUITE' in os.environ and os.environ['SUITE'] == 'client', reason='uses abacus daemon and core functions')
def test_list_account_usage(self):
""" CLIENT (USER): list account usage. """
from rucio.db.sqla import session, models
from rucio.core.account_counter import increase
from rucio.daemons.abacus import account as abacus_account
db_session = session.get_session()
db_session.query(models.AccountUsage).delete()
db_session.query(models.AccountLimit).delete()
db_session.query(models.AccountGlobalLimit).delete()
db_session.query(models.UpdatedAccountCounter).delete()
db_session.commit()
rse = 'MOCK4'
rse_id = self.rse_client.get_rse(rse)['id']
rse_exp = 'MOCK|MOCK4'
account = 'root'
usage = 4
local_limit = 10
local_left = local_limit - usage
global_limit = 20
global_left = global_limit - usage
self.account_client.set_local_account_limit(account, rse, local_limit)
self.account_client.set_global_account_limit(account, rse_exp, global_limit)
increase(rse_id, InternalAccount(account, **self.vo), 1, usage)
abacus_account.run(once=True)
cmd = 'rucio list-account-usage {0}'.format(account)
exitcode, out, err = execute(cmd)
assert re.search('.*{0}.*{1}.*{2}.*{3}'.format(rse, usage, local_limit, local_left), out) is not None
assert re.search('.*{0}.*{1}.*{2}.*{3}'.format(rse_exp, usage, global_limit, global_left), out) is not None
cmd = 'rucio list-account-usage --rse {0} {1}'.format(rse, account)
exitcode, out, err = execute(cmd)
assert re.search('.*{0}.*{1}.*{2}.*{3}'.format(rse, usage, local_limit, local_left), out) is not None
assert re.search('.*{0}.*{1}.*{2}.*{3}'.format(rse_exp, usage, global_limit, global_left), out) is not None
self.account_client.set_local_account_limit(account, rse, -1)
self.account_client.set_global_account_limit(account, rse_exp, -1)
|
[] |
[] |
[
"SUITE"
] |
[]
|
["SUITE"]
|
python
| 1 | 0 | |
ndmg/scripts/ndmg_cloud.py
|
#!/usr/bin/env python
# Copyright 2016 NeuroData (http://neurodata.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ndmg_cloud.py
# Created by Greg Kiar on 2017-02-02.
# Edited a ton by Alex Loftus
# Email: [email protected], [email protected]
#%%
import subprocess
import ast
import csv
import re
import os
import sys
import json
from copy import deepcopy
from collections import OrderedDict
from argparse import ArgumentParser
import warnings
import shutil
import time
from pathlib import Path
import boto3
import ndmg
import ndmg.utils as mgu
from ndmg.utils.s3_utils import get_credentials, get_matching_s3_objects, s3_client
participant_templ = "https://raw.githubusercontent.com/neurodata/ndmg/staging/templates/ndmg_cloud_participant.json"
def batch_submit(
bucket,
path,
jobdir,
credentials=None,
state="participant",
debug=False,
dataset=None,
log=False,
bg=False,
modif="",
reg_style="",
mod_type="",
):
"""
Searches through an S3 bucket, gets all subject-ids, creates json files
for each, submits batch jobs, and returns list of job ids to query status
upon later.
"""
print(("Getting list from s3://{}/{}/...".format(bucket, path)))
threads = crawl_bucket(bucket, path, jobdir)
print("Generating job for each subject...")
jobs = create_json(
bucket,
path,
threads,
jobdir,
credentials,
debug,
dataset,
bg,
modif=modif,
reg_style=reg_style,
)
print("Submitting jobs to the queue...")
ids = submit_jobs(jobs, jobdir)
def crawl_bucket(bucket, path, jobdir):
"""
Gets subject list for a given S3 bucket and path
"""
# if jobdir has seshs info file in it, use that instead
sesh_path = "{}/seshs.json".format(jobdir)
if os.path.isfile(sesh_path):
print("seshs.json found -- loading bucket info from there")
with open(sesh_path, "r") as f:
seshs = json.load(f)
print("Information obtained from s3.")
return seshs
subj_pattern = r"(?<=sub-)(\w*)(?=/ses)"
sesh_pattern = r"(?<=ses-)(\d*)"
all_subfiles = get_matching_s3_objects(bucket, path + "/sub-")
subjs = list(set(re.findall(subj_pattern, obj)[0] for obj in all_subfiles))
# cmd = "aws s3 ls s3://{}/{}/".format(bucket, path)
# try:
# ACCESS, SECRET = get_credentials()
# os.environ["AWS_ACCESS_KEY_ID"] = ACCESS
# os.environ["AWS_SECRET_ACCESS_KEY"] = SECRET
# except:
# cmd += " --no-sign-request"
# out = subprocess.check_output(cmd, shell=True)
# pattern = r"(?<=sub-)(\w*)(?=/ses)"
# subjs = re.findall(pattern, out.decode("utf-8"))
# cmd = "aws s3 ls s3://{}/{}/sub-{}/"
# if not ACCESS:
# cmd += " --no-sign-request"
seshs = OrderedDict()
# TODO : use boto3 for this.
for subj in subjs:
prefix = path + "/sub-{}/".format(subj)
all_seshfiles = get_matching_s3_objects(bucket, prefix)
sesh = list(set([re.findall(sesh_pattern, obj)[0] for obj in all_seshfiles]))
# cmd = cmd.format(bucket, path, subj)
# out = subprocess.check_output(cmd, shell=True) # TODO: get this information outside of a loop
# sesh = re.findall("ses-(.+)/", out.decode("utf-8"))
if sesh != []:
seshs[subj] = sesh
print("{} added to seshs.".format(subj))
else:
seshs[subj] = None
print("{} not added (no sessions).".format(subj))
# seshs[subj] = sesh if sesh != [] else [None]
print(
(
"Session IDs: "
+ ", ".join(
[
subj + "-" + sesh if sesh is not None else subj
for subj in subjs
for sesh in seshs[subj]
]
)
)
)
with open(sesh_path, "w") as f:
json.dump(seshs, f)
print("{} created.".format(sesh_path))
print("Information obtained from s3.")
return seshs
def create_json(
bucket,
path,
threads,
jobdir,
credentials=None,
debug=False,
dataset=None,
bg=False,
modif="",
reg_style="",
mod_type="",
):
"""
Takes parameters to make jsons
"""
jobsjson = "{}/jobs.json".format(jobdir)
if os.path.isfile(jobsjson):
with open(jobsjson, "r") as f:
jobs = json.load(f)
return jobs
# set up infrastructure
out = subprocess.check_output("mkdir -p {}".format(jobdir), shell=True)
out = subprocess.check_output("mkdir -p {}/jobs/".format(jobdir), shell=True)
out = subprocess.check_output("mkdir -p {}/ids/".format(jobdir), shell=True)
template = participant_templ
seshs = threads
# make template
if not os.path.isfile("{}/{}".format(jobdir, template.split("/")[-1])):
cmd = "wget --quiet -P {} {}".format(jobdir, template)
subprocess.check_output(cmd, shell=True)
with open("{}/{}".format(jobdir, template.split("/")[-1]), "r") as inf:
template = json.load(inf)
cmd = template["containerOverrides"]["command"]
env = template["containerOverrides"]["environment"]
# TODO : This checks for any credentials csv file, rather than `/.aws/credentials`.
# modify template
if credentials is not None:
env[0]["value"], env[1]["value"] = get_credentials()
else:
env = []
template["containerOverrides"]["environment"] = env
# edit non-defaults
jobs = []
cmd[cmd.index("<BUCKET>")] = bucket
cmd[cmd.index("<PATH>")] = path
# edit defaults if necessary
if reg_style:
cmd[cmd.index("--sp") + 1] = reg_style
if mod_type:
cmd[cmd.index("--mod") + 1] = reg_style
if bg:
cmd.append("--big")
if modif:
cmd.insert(cmd.index("--push_data") + 1, u"--modif")
cmd.insert(cmd.index("--push_data") + 2, modif)
# edit participant-specific values ()
# loop over every session of every participant
for subj in seshs.keys():
print("... Generating job for sub-{}".format(subj))
# and for each subject number in each participant number,
for sesh in seshs[subj]:
# add format-specific commands,
job_cmd = deepcopy(cmd)
job_cmd[job_cmd.index("<SUBJ>")] = subj
if sesh is not None:
job_cmd.insert(7, u"--session_label")
job_cmd.insert(8, u"{}".format(sesh))
if debug:
job_cmd += [u"--debug"]
# then, grab the template,
# add additional parameters,
# make the json file for this iteration,
# and add the path to its json file to `jobs`.
job_json = deepcopy(template)
ver = ndmg.VERSION.replace(".", "-")
if dataset:
name = "ndmg_{}_{}_sub-{}".format(ver, dataset, subj)
else:
name = "ndmg_{}_sub-{}".format(ver, subj)
if sesh is not None:
name = "{}_ses-{}".format(name, sesh)
print(job_cmd)
job_json["jobName"] = name
job_json["containerOverrides"]["command"] = job_cmd
job = os.path.join(jobdir, "jobs", name + ".json")
with open(job, "w") as outfile:
json.dump(job_json, outfile)
jobs += [job]
# return list of job jsons
with open(jobsjson, "w") as f:
json.dump(jobs, f)
return jobs
def submit_jobs(jobs, jobdir):
"""
Give list of jobs to submit, submits them to AWS Batch
"""
batch = s3_client(service="batch")
cmd_template = "--cli-input-json file://{}"
# cmd_template = batch.submit_jobs
for job in jobs:
# use this to start wherever
# if jobs.index(job) >= jobs.index('/jobs/jobs/ndmg_0-1-2_SWU4_sub-0025768_ses-1.json'):
with open(job, "r") as f:
kwargs = json.load(f)
print(("... Submitting job {}...".format(job)))
submission = batch.submit_job(**kwargs)
# out = subprocess.check_output(cmd, shell=True)
# time.sleep(0.1) # jobs sometimes hang, seeing if this helps
# submission = ast.literal_eval(out.decode("utf-8"))
print(
(
"Job Name: {}, Job ID: {}".format(
submission["jobName"], submission["jobId"]
)
)
)
sub_file = os.path.join(jobdir, "ids", submission["jobName"] + ".json")
with open(sub_file, "w") as outfile:
json.dump(submission, outfile)
print("Submitted.")
return 0
def get_status(jobdir, jobid=None):
"""
Given list of jobs, returns status of each.
"""
cmd_template = "aws batch describe-jobs --jobs {}"
if jobid is None:
print(("Describing jobs in {}/ids/...".format(jobdir)))
jobs = os.listdir(jobdir + "/ids/")
for job in jobs:
with open("{}/ids/{}".format(jobdir, job), "r") as inf:
submission = json.load(inf)
cmd = cmd_template.format(submission["jobId"])
print(("... Checking job {}...".format(submission["jobName"])))
out = subprocess.check_output(cmd, shell=True)
status = re.findall('"status": "([A-Za-z]+)",', out.decode("utf-8"))[0]
print(("... ... Status: {}".format(status)))
return 0
else:
print(("Describing job id {}...".format(jobid)))
cmd = cmd_template.format(jobid)
out = subprocess.check_output(cmd, shell=True)
status = re.findall('"status": "([A-Za-z]+)",', out.decode("utf-8"))[0]
print(("... Status: {}".format(status)))
return status
def kill_jobs(jobdir, reason='"Killing job"'):
"""
Given a list of jobs, kills them all.
"""
cmd_template1 = "aws batch cancel-job --job-id {} --reason {}"
cmd_template2 = "aws batch terminate-job --job-id {} --reason {}"
print(("Canelling/Terminating jobs in {}/ids/...".format(jobdir)))
jobs = os.listdir(jobdir + "/ids/")
batch = s3_client(service="batch")
jids = []
names = []
# grab info about all the jobs
for job in jobs:
with open("{}/ids/{}".format(jobdir, job), "r") as inf:
submission = json.load(inf)
jid = submission["jobId"]
name = submission["jobName"]
jids.append(jid)
names.append(name)
for jid in jids:
print("Terminating job {}".format(jid))
batch.terminate_job(jobId=jid, reason=reason)
# status = get_status(jobdir, jid)
# if status in ["SUCCEEDED", "FAILED"]:
# print(("... No action needed for {}...".format(name)))
# elif status in ["SUBMITTED", "PENDING", "RUNNABLE"]:
# cmd = cmd_template1.format(jid, reason)
# print(("... Cancelling job {}...".format(name)))
# out = subprocess.check_output(cmd, shell=True)
# elif status in ["STARTING", "RUNNING"]:
# cmd = cmd_template2.format(jid, reason)
# print(("... Terminating job {}...".format(name)))
# out = subprocess.check_output(cmd, shell=True)
# else:
# print("... Unknown status??")
#%%
def main():
parser = ArgumentParser(
description="This is a pipeline for running BIDs-formatted diffusion MRI datasets through AWS S3 to produce connectomes."
)
parser.add_argument(
"state",
choices=["participant", "status", "kill"],
default="participant",
help="determines the function to be performed by " "this function.",
)
parser.add_argument(
"--bucket",
help="The S3 bucket with the input dataset"
" formatted according to the BIDS standard.",
)
parser.add_argument(
"--bidsdir",
help="The directory where the dataset"
" lives on the S3 bucket should be stored. If you"
" level analysis this folder should be prepopulated"
" with the results of the participant level analysis.",
)
parser.add_argument(
"--jobdir", action="store", help="Dir of batch jobs to" " generate/check up on."
)
parser.add_argument(
"--credentials", action="store", help="AWS formatted" " csv of credentials."
)
parser.add_argument(
"--log",
action="store_true",
help="flag to indicate" " log plotting in group analysis.",
default=False,
)
parser.add_argument(
"--debug",
action="store_true",
help="flag to store " "temp files along the path of processing.",
default=False,
)
parser.add_argument("--dataset", action="store", help="Dataset name")
parser.add_argument(
"-b",
"--big",
action="store",
default="False",
help="whether or not to produce voxelwise big graph",
)
parser.add_argument(
"--modif",
action="store",
help="Name of folder on s3 to push to. If empty, push to a folder with ndmg's version number.",
default="",
)
parser.add_argument(
"--sp",
action="store",
help="Space for tractography. Default is native.",
default="native",
)
parser.add_argument(
"--mod",
action="store",
help="Determinstic (det) or probabilistic (prob) tracking. Default is det.",
default="det",
)
result = parser.parse_args()
bucket = result.bucket
path = result.bidsdir
path = path.strip("/") if path is not None else path
debug = result.debug
state = result.state
creds = result.credentials
jobdir = result.jobdir
dset = result.dataset
log = result.log
bg = result.big != "False"
modif = result.modif
reg_style = result.sp
mod_type = result.mod
if jobdir is None:
jobdir = "./"
if (bucket is None or path is None) and (state != "status" and state != "kill"):
sys.exit(
"Requires either path to bucket and data, or the status flag"
" and job IDs to query.\n Try:\n ndmg_cloud --help"
)
if state == "status":
print("Checking job status...")
get_status(jobdir)
elif state == "kill":
print("Killing jobs...")
kill_jobs(jobdir)
elif state == "participant":
print("Beginning batch submission process...")
if not os.path.exists(jobdir):
print("job directory not found. Creating...")
Path(jobdir).mkdir(parents=True)
batch_submit(
bucket,
path,
jobdir,
creds,
state,
debug,
dset,
log,
bg,
modif=modif,
reg_style=reg_style,
mod_type=mod_type,
)
sys.exit(0)
if __name__ == "__main__":
main()
|
[] |
[] |
[
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY"
] |
[]
|
["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"]
|
python
| 2 | 0 | |
okitweb/okitImport.py
|
#!/usr/bin/python
"""Provide Module Description
"""
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
__author__ = ["Andrew Hopkinson (Oracle Cloud Solutions A-Team)"]
__version__ = "1.0.0.0"
__module__ = "okitImport"
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
import os
import urllib
from flask import Blueprint
from flask import request
import json
from werkzeug.utils import secure_filename
from common.okitCommon import logJson
from common.okitLogging import getLogger
from parsers.okitHclJsonParser import OkitHclJsonParser
from parsers.okitCceJsonParser import OkitCceJsonParser
from parsers.okitCd3ExcelParser import OkitCd3ExcelParser
# Configure logging
logger = getLogger()
bp = Blueprint('parsers', __name__, url_prefix='/okit/parse', static_folder='static/okit')
debug_mode = bool(str(os.getenv('DEBUG_MODE', 'False')).title())
@bp.route('hcljson', methods=(['GET']))
def parseHclJson():
#logger.debug('JSON : {0:s}'.format(str(request.json)))
if request.method == 'GET':
query_string = request.query_string
parsed_query_string = urllib.parse.unquote(query_string.decode())
query_json = json.loads(parsed_query_string)
logJson(query_json)
# Import HCL
parser = OkitHclJsonParser()
response_json = parser.parse(query_json)
logJson(response_json)
return json.dumps(response_json, sort_keys=False, indent=2, separators=(',', ': '))
else:
return '404'
@bp.route('ccejson', methods=(['GET']))
def parseCceJson():
#logger.debug('JSON : {0:s}'.format(str(request.json)))
if request.method == 'GET':
query_string = request.query_string
parsed_query_string = urllib.parse.unquote(query_string.decode())
query_json = json.loads(parsed_query_string)
logJson(query_json)
# Import CCE
parser = OkitCceJsonParser()
response_json = parser.parse(query_json)
logJson(response_json)
return json.dumps(response_json, sort_keys=False, indent=2, separators=(',', ': '))
else:
return '404'
@bp.route('cd3xlsx', methods=(['POST']))
def parseCd3Xlsx():
if request.method == 'POST':
if 'file' in request.files:
file = request.files['file']
if file and file.filename != '':
filename = os.path.join('/okit/workspace', secure_filename(file.filename))
logger.info("Saving Xlsx File {0!s:s}".format(filename))
file.save(filename)
# Import CD3
parser = OkitCd3ExcelParser()
response_json = parser.parse(filename)
logJson(response_json)
else:
response_json = {}
return json.dumps(response_json, sort_keys=False, indent=2, separators=(',', ': '))
else:
return '404'
|
[] |
[] |
[
"DEBUG_MODE"
] |
[]
|
["DEBUG_MODE"]
|
python
| 1 | 0 | |
src/server/server/celery.py
|
from celery import Celery
import requests
import os
from django.conf import settings
import django
app = Celery('tasks', backend='redis://localhost/0', broker='pyamqp://guest@localhost//',
include=['server.tasks'])
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'CentralServer.settings')
app.autodiscover_tasks()
django.setup()
if __name__ == '__main__':
app.start()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
runtests.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os, sys, warnings
parent = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, parent)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test_project.settings")
import django
from django.core.management import execute_from_command_line
if django.VERSION < (1, 6):
default_test_apps = [
'sortedm2m_tests',
'test_south_support',
]
else:
default_test_apps = [
'sortedm2m_tests',
]
# Only test south support for Django 1.6 and lower.
if django.VERSION < (1, 7):
default_test_apps += [
'test_south_support',
]
def runtests(*args):
if django.VERSION > (1, 8):
warnings.simplefilter("error", Warning)
warnings.filterwarnings("ignore", module="distutils")
try:
warnings.filterwarnings("ignore", category=ResourceWarning)
except NameError:
pass
warnings.filterwarnings("ignore", "invalid escape sequence", DeprecationWarning)
# Ignore a python 3.6 DeprecationWarning in ModelBase.__new__ that isn't
# fixed in Django 1.x
if sys.version_info > (3, 6) and django.VERSION < (2,):
warnings.filterwarnings(
"ignore", "__class__ not set defining", DeprecationWarning)
test_apps = list(args or default_test_apps)
execute_from_command_line([sys.argv[0], 'test', '--verbosity=1'] + test_apps)
if __name__ == '__main__':
runtests(*sys.argv[1:])
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
main_test.go
|
package orm_test
import (
"database/sql"
"database/sql/driver"
"fmt"
"os"
"path/filepath"
"reflect"
"strconv"
"testing"
"time"
"github.com/erikstmartin/go-testdb"
"ireul.com/now"
"ireul.com/orm"
_ "ireul.com/orm/dialects/mssql"
_ "ireul.com/orm/dialects/mysql"
"ireul.com/orm/dialects/postgres"
_ "ireul.com/orm/dialects/sqlite"
)
var (
DB *orm.DB
t1, t2, t3, t4, t5 time.Time
)
func init() {
var err error
if DB, err = OpenTestConnection(); err != nil {
panic(fmt.Sprintf("No error should happen when connecting to test database, but got err=%+v", err))
}
runMigration()
}
func OpenTestConnection() (db *orm.DB, err error) {
dbDSN := os.Getenv("orm_DSN")
switch os.Getenv("orm_DIALECT") {
case "mysql":
fmt.Println("testing mysql...")
if dbDSN == "" {
dbDSN = "orm:orm@tcp(localhost:9910)/orm?charset=utf8&parseTime=True"
}
db, err = orm.Open("mysql", dbDSN)
case "postgres":
fmt.Println("testing postgres...")
if dbDSN == "" {
dbDSN = "user=orm password=orm DB.name=orm port=9920 sslmode=disable"
}
db, err = orm.Open("postgres", dbDSN)
case "mssql":
// CREATE LOGIN orm WITH PASSWORD = 'LoremIpsum86';
// CREATE DATABASE orm;
// USE orm;
// CREATE USER orm FROM LOGIN orm;
// sp_changedbowner 'orm';
fmt.Println("testing mssql...")
if dbDSN == "" {
dbDSN = "sqlserver://orm:LoremIpsum86@localhost:9930?database=orm"
}
db, err = orm.Open("mssql", dbDSN)
default:
fmt.Println("testing sqlite3...")
db, err = orm.Open("sqlite3", filepath.Join(os.TempDir(), "orm.db"))
}
// db.SetLogger(Logger{log.New(os.Stdout, "\r\n", 0)})
// db.SetLogger(log.New(os.Stdout, "\r\n", 0))
if debug := os.Getenv("DEBUG"); debug == "true" {
db.LogMode(true)
} else if debug == "false" {
db.LogMode(false)
}
db.DB().SetMaxIdleConns(10)
return
}
func TestStringPrimaryKey(t *testing.T) {
type UUIDStruct struct {
ID string `orm:"primary_key"`
Name string
}
DB.DropTable(&UUIDStruct{})
DB.AutoMigrate(&UUIDStruct{})
data := UUIDStruct{ID: "uuid", Name: "hello"}
if err := DB.Save(&data).Error; err != nil || data.ID != "uuid" || data.Name != "hello" {
t.Errorf("string primary key should not be populated")
}
data = UUIDStruct{ID: "uuid", Name: "hello world"}
if err := DB.Save(&data).Error; err != nil || data.ID != "uuid" || data.Name != "hello world" {
t.Errorf("string primary key should not be populated")
}
}
func TestExceptionsWithInvalidSql(t *testing.T) {
var columns []string
if DB.Where("sdsd.zaaa = ?", "sd;;;aa").Pluck("aaa", &columns).Error == nil {
t.Errorf("Should got error with invalid SQL")
}
if DB.Model(&User{}).Where("sdsd.zaaa = ?", "sd;;;aa").Pluck("aaa", &columns).Error == nil {
t.Errorf("Should got error with invalid SQL")
}
if DB.Where("sdsd.zaaa = ?", "sd;;;aa").Find(&User{}).Error == nil {
t.Errorf("Should got error with invalid SQL")
}
var count1, count2 int64
DB.Model(&User{}).Count(&count1)
if count1 <= 0 {
t.Errorf("Should find some users")
}
if DB.Where("name = ?", "jinzhu; delete * from users").First(&User{}).Error == nil {
t.Errorf("Should got error with invalid SQL")
}
DB.Model(&User{}).Count(&count2)
if count1 != count2 {
t.Errorf("No user should not be deleted by invalid SQL")
}
}
func TestSetTable(t *testing.T) {
DB.Create(getPreparedUser("pluck_user1", "pluck_user"))
DB.Create(getPreparedUser("pluck_user2", "pluck_user"))
DB.Create(getPreparedUser("pluck_user3", "pluck_user"))
if err := DB.Table("users").Where("role = ?", "pluck_user").Pluck("age", &[]int{}).Error; err != nil {
t.Error("No errors should happen if set table for pluck", err)
}
var users []User
if DB.Table("users").Find(&[]User{}).Error != nil {
t.Errorf("No errors should happen if set table for find")
}
if DB.Table("invalid_table").Find(&users).Error == nil {
t.Errorf("Should got error when table is set to an invalid table")
}
DB.Exec("drop table deleted_users;")
if DB.Table("deleted_users").CreateTable(&User{}).Error != nil {
t.Errorf("Create table with specified table")
}
DB.Table("deleted_users").Save(&User{Name: "DeletedUser"})
var deletedUsers []User
DB.Table("deleted_users").Find(&deletedUsers)
if len(deletedUsers) != 1 {
t.Errorf("Query from specified table")
}
DB.Save(getPreparedUser("normal_user", "reset_table"))
DB.Table("deleted_users").Save(getPreparedUser("deleted_user", "reset_table"))
var user1, user2, user3 User
DB.Where("role = ?", "reset_table").First(&user1).Table("deleted_users").First(&user2).Table("").First(&user3)
if (user1.Name != "normal_user") || (user2.Name != "deleted_user") || (user3.Name != "normal_user") {
t.Errorf("unset specified table with blank string")
}
}
type Order struct {
}
type Cart struct {
}
func (c Cart) TableName() string {
return "shopping_cart"
}
func TestHasTable(t *testing.T) {
type Foo struct {
Id int
Stuff string
}
DB.DropTable(&Foo{})
// Table should not exist at this point, HasTable should return false
if ok := DB.HasTable("foos"); ok {
t.Errorf("Table should not exist, but does")
}
if ok := DB.HasTable(&Foo{}); ok {
t.Errorf("Table should not exist, but does")
}
// We create the table
if err := DB.CreateTable(&Foo{}).Error; err != nil {
t.Errorf("Table should be created")
}
// And now it should exits, and HasTable should return true
if ok := DB.HasTable("foos"); !ok {
t.Errorf("Table should exist, but HasTable informs it does not")
}
if ok := DB.HasTable(&Foo{}); !ok {
t.Errorf("Table should exist, but HasTable informs it does not")
}
}
func TestTableName(t *testing.T) {
DB := DB.Model("")
if DB.NewScope(Order{}).TableName() != "orders" {
t.Errorf("Order's table name should be orders")
}
if DB.NewScope(&Order{}).TableName() != "orders" {
t.Errorf("&Order's table name should be orders")
}
if DB.NewScope([]Order{}).TableName() != "orders" {
t.Errorf("[]Order's table name should be orders")
}
if DB.NewScope(&[]Order{}).TableName() != "orders" {
t.Errorf("&[]Order's table name should be orders")
}
DB.SingularTable(true)
if DB.NewScope(Order{}).TableName() != "order" {
t.Errorf("Order's singular table name should be order")
}
if DB.NewScope(&Order{}).TableName() != "order" {
t.Errorf("&Order's singular table name should be order")
}
if DB.NewScope([]Order{}).TableName() != "order" {
t.Errorf("[]Order's singular table name should be order")
}
if DB.NewScope(&[]Order{}).TableName() != "order" {
t.Errorf("&[]Order's singular table name should be order")
}
if DB.NewScope(&Cart{}).TableName() != "shopping_cart" {
t.Errorf("&Cart's singular table name should be shopping_cart")
}
if DB.NewScope(Cart{}).TableName() != "shopping_cart" {
t.Errorf("Cart's singular table name should be shopping_cart")
}
if DB.NewScope(&[]Cart{}).TableName() != "shopping_cart" {
t.Errorf("&[]Cart's singular table name should be shopping_cart")
}
if DB.NewScope([]Cart{}).TableName() != "shopping_cart" {
t.Errorf("[]Cart's singular table name should be shopping_cart")
}
DB.SingularTable(false)
}
func TestNullValues(t *testing.T) {
DB.DropTable(&NullValue{})
DB.AutoMigrate(&NullValue{})
if err := DB.Save(&NullValue{
Name: sql.NullString{String: "hello", Valid: true},
Gender: &sql.NullString{String: "M", Valid: true},
Age: sql.NullInt64{Int64: 18, Valid: true},
Male: sql.NullBool{Bool: true, Valid: true},
Height: sql.NullFloat64{Float64: 100.11, Valid: true},
AddedAt: NullTime{Time: time.Now(), Valid: true},
}).Error; err != nil {
t.Errorf("Not error should raise when test null value")
}
var nv NullValue
DB.First(&nv, "name = ?", "hello")
if nv.Name.String != "hello" || nv.Gender.String != "M" || nv.Age.Int64 != 18 || nv.Male.Bool != true || nv.Height.Float64 != 100.11 || nv.AddedAt.Valid != true {
t.Errorf("Should be able to fetch null value")
}
if err := DB.Save(&NullValue{
Name: sql.NullString{String: "hello-2", Valid: true},
Gender: &sql.NullString{String: "F", Valid: true},
Age: sql.NullInt64{Int64: 18, Valid: false},
Male: sql.NullBool{Bool: true, Valid: true},
Height: sql.NullFloat64{Float64: 100.11, Valid: true},
AddedAt: NullTime{Time: time.Now(), Valid: false},
}).Error; err != nil {
t.Errorf("Not error should raise when test null value")
}
var nv2 NullValue
DB.First(&nv2, "name = ?", "hello-2")
if nv2.Name.String != "hello-2" || nv2.Gender.String != "F" || nv2.Age.Int64 != 0 || nv2.Male.Bool != true || nv2.Height.Float64 != 100.11 || nv2.AddedAt.Valid != false {
t.Errorf("Should be able to fetch null value")
}
if err := DB.Save(&NullValue{
Name: sql.NullString{String: "hello-3", Valid: false},
Gender: &sql.NullString{String: "M", Valid: true},
Age: sql.NullInt64{Int64: 18, Valid: false},
Male: sql.NullBool{Bool: true, Valid: true},
Height: sql.NullFloat64{Float64: 100.11, Valid: true},
AddedAt: NullTime{Time: time.Now(), Valid: false},
}).Error; err == nil {
t.Errorf("Can't save because of name can't be null")
}
}
func TestNullValuesWithFirstOrCreate(t *testing.T) {
var nv1 = NullValue{
Name: sql.NullString{String: "first_or_create", Valid: true},
Gender: &sql.NullString{String: "M", Valid: true},
}
var nv2 NullValue
result := DB.Where(nv1).FirstOrCreate(&nv2)
if result.RowsAffected != 1 {
t.Errorf("RowsAffected should be 1 after create some record")
}
if result.Error != nil {
t.Errorf("Should not raise any error, but got %v", result.Error)
}
if nv2.Name.String != "first_or_create" || nv2.Gender.String != "M" {
t.Errorf("first or create with nullvalues")
}
if err := DB.Where(nv1).Assign(NullValue{Age: sql.NullInt64{Int64: 18, Valid: true}}).FirstOrCreate(&nv2).Error; err != nil {
t.Errorf("Should not raise any error, but got %v", err)
}
if nv2.Age.Int64 != 18 {
t.Errorf("should update age to 18")
}
}
func TestTransaction(t *testing.T) {
tx := DB.Begin()
u := User{Name: "transcation"}
if err := tx.Save(&u).Error; err != nil {
t.Errorf("No error should raise")
}
if err := tx.First(&User{}, "name = ?", "transcation").Error; err != nil {
t.Errorf("Should find saved record")
}
if sqlTx, ok := tx.CommonDB().(*sql.Tx); !ok || sqlTx == nil {
t.Errorf("Should return the underlying sql.Tx")
}
tx.Rollback()
if err := tx.First(&User{}, "name = ?", "transcation").Error; err == nil {
t.Errorf("Should not find record after rollback")
}
tx2 := DB.Begin()
u2 := User{Name: "transcation-2"}
if err := tx2.Save(&u2).Error; err != nil {
t.Errorf("No error should raise")
}
if err := tx2.First(&User{}, "name = ?", "transcation-2").Error; err != nil {
t.Errorf("Should find saved record")
}
tx2.Commit()
if err := DB.First(&User{}, "name = ?", "transcation-2").Error; err != nil {
t.Errorf("Should be able to find committed record")
}
}
func TestRow(t *testing.T) {
user1 := User{Name: "RowUser1", Age: 1, Birthday: parseTime("2000-1-1")}
user2 := User{Name: "RowUser2", Age: 10, Birthday: parseTime("2010-1-1")}
user3 := User{Name: "RowUser3", Age: 20, Birthday: parseTime("2020-1-1")}
DB.Save(&user1).Save(&user2).Save(&user3)
row := DB.Table("users").Where("name = ?", user2.Name).Select("age").Row()
var age int64
row.Scan(&age)
if age != 10 {
t.Errorf("Scan with Row")
}
}
func TestRows(t *testing.T) {
user1 := User{Name: "RowsUser1", Age: 1, Birthday: parseTime("2000-1-1")}
user2 := User{Name: "RowsUser2", Age: 10, Birthday: parseTime("2010-1-1")}
user3 := User{Name: "RowsUser3", Age: 20, Birthday: parseTime("2020-1-1")}
DB.Save(&user1).Save(&user2).Save(&user3)
rows, err := DB.Table("users").Where("name = ? or name = ?", user2.Name, user3.Name).Select("name, age").Rows()
if err != nil {
t.Errorf("Not error should happen, got %v", err)
}
count := 0
for rows.Next() {
var name string
var age int64
rows.Scan(&name, &age)
count++
}
if count != 2 {
t.Errorf("Should found two records")
}
}
func TestScanRows(t *testing.T) {
user1 := User{Name: "ScanRowsUser1", Age: 1, Birthday: parseTime("2000-1-1")}
user2 := User{Name: "ScanRowsUser2", Age: 10, Birthday: parseTime("2010-1-1")}
user3 := User{Name: "ScanRowsUser3", Age: 20, Birthday: parseTime("2020-1-1")}
DB.Save(&user1).Save(&user2).Save(&user3)
rows, err := DB.Table("users").Where("name = ? or name = ?", user2.Name, user3.Name).Select("name, age").Rows()
if err != nil {
t.Errorf("Not error should happen, got %v", err)
}
type Result struct {
Name string
Age int
}
var results []Result
for rows.Next() {
var result Result
if err := DB.ScanRows(rows, &result); err != nil {
t.Errorf("should get no error, but got %v", err)
}
results = append(results, result)
}
if !reflect.DeepEqual(results, []Result{{Name: "ScanRowsUser2", Age: 10}, {Name: "ScanRowsUser3", Age: 20}}) {
t.Errorf("Should find expected results")
}
}
func TestScan(t *testing.T) {
user1 := User{Name: "ScanUser1", Age: 1, Birthday: parseTime("2000-1-1")}
user2 := User{Name: "ScanUser2", Age: 10, Birthday: parseTime("2010-1-1")}
user3 := User{Name: "ScanUser3", Age: 20, Birthday: parseTime("2020-1-1")}
DB.Save(&user1).Save(&user2).Save(&user3)
type result struct {
Name string
Age int
}
var res result
DB.Table("users").Select("name, age").Where("name = ?", user3.Name).Scan(&res)
if res.Name != user3.Name {
t.Errorf("Scan into struct should work")
}
var doubleAgeRes = &result{}
if err := DB.Table("users").Select("age + age as age").Where("name = ?", user3.Name).Scan(&doubleAgeRes).Error; err != nil {
t.Errorf("Scan to pointer of pointer")
}
if doubleAgeRes.Age != res.Age*2 {
t.Errorf("Scan double age as age")
}
var ress []result
DB.Table("users").Select("name, age").Where("name in (?)", []string{user2.Name, user3.Name}).Scan(&ress)
if len(ress) != 2 || ress[0].Name != user2.Name || ress[1].Name != user3.Name {
t.Errorf("Scan into struct map")
}
}
func TestRaw(t *testing.T) {
user1 := User{Name: "ExecRawSqlUser1", Age: 1, Birthday: parseTime("2000-1-1")}
user2 := User{Name: "ExecRawSqlUser2", Age: 10, Birthday: parseTime("2010-1-1")}
user3 := User{Name: "ExecRawSqlUser3", Age: 20, Birthday: parseTime("2020-1-1")}
DB.Save(&user1).Save(&user2).Save(&user3)
type result struct {
Name string
Email string
}
var ress []result
DB.Raw("SELECT name, age FROM users WHERE name = ? or name = ?", user2.Name, user3.Name).Scan(&ress)
if len(ress) != 2 || ress[0].Name != user2.Name || ress[1].Name != user3.Name {
t.Errorf("Raw with scan")
}
rows, _ := DB.Raw("select name, age from users where name = ?", user3.Name).Rows()
count := 0
for rows.Next() {
count++
}
if count != 1 {
t.Errorf("Raw with Rows should find one record with name 3")
}
DB.Exec("update users set name=? where name in (?)", "jinzhu", []string{user1.Name, user2.Name, user3.Name})
if DB.Where("name in (?)", []string{user1.Name, user2.Name, user3.Name}).First(&User{}).Error != orm.ErrRecordNotFound {
t.Error("Raw sql to update records")
}
}
func TestGroup(t *testing.T) {
rows, err := DB.Select("name").Table("users").Group("name").Rows()
if err == nil {
defer rows.Close()
for rows.Next() {
var name string
rows.Scan(&name)
}
} else {
t.Errorf("Should not raise any error")
}
}
func TestJoins(t *testing.T) {
var user = User{
Name: "joins",
CreditCard: CreditCard{Number: "411111111111"},
Emails: []Email{{Email: "[email protected]"}, {Email: "[email protected]"}},
}
DB.Save(&user)
var users1 []User
DB.Joins("left join emails on emails.user_id = users.id").Where("name = ?", "joins").Find(&users1)
if len(users1) != 2 {
t.Errorf("should find two users using left join")
}
var users2 []User
DB.Joins("left join emails on emails.user_id = users.id AND emails.email = ?", "[email protected]").Where("name = ?", "joins").First(&users2)
if len(users2) != 1 {
t.Errorf("should find one users using left join with conditions")
}
var users3 []User
DB.Joins("join emails on emails.user_id = users.id AND emails.email = ?", "[email protected]").Joins("join credit_cards on credit_cards.user_id = users.id AND credit_cards.number = ?", "411111111111").Where("name = ?", "joins").First(&users3)
if len(users3) != 1 {
t.Errorf("should find one users using multiple left join conditions")
}
var users4 []User
DB.Joins("join emails on emails.user_id = users.id AND emails.email = ?", "[email protected]").Joins("join credit_cards on credit_cards.user_id = users.id AND credit_cards.number = ?", "422222222222").Where("name = ?", "joins").First(&users4)
if len(users4) != 0 {
t.Errorf("should find no user when searching with unexisting credit card")
}
var users5 []User
db5 := DB.Joins("join emails on emails.user_id = users.id AND emails.email = ?", "[email protected]").Joins("join credit_cards on credit_cards.user_id = users.id AND credit_cards.number = ?", "411111111111").Where(User{Id: 1}).Where(Email{Id: 1}).Not(Email{Id: 10}).First(&users5)
if db5.Error != nil {
t.Errorf("Should not raise error for join where identical fields in different tables. Error: %s", db5.Error.Error())
}
}
func TestJoinsWithSelect(t *testing.T) {
type result struct {
Name string
Email string
}
user := User{
Name: "joins_with_select",
Emails: []Email{{Email: "[email protected]"}, {Email: "[email protected]"}},
}
DB.Save(&user)
var results []result
DB.Table("users").Select("name, emails.email").Joins("left join emails on emails.user_id = users.id").Where("name = ?", "joins_with_select").Scan(&results)
if len(results) != 2 || results[0].Email != "[email protected]" || results[1].Email != "[email protected]" {
t.Errorf("Should find all two emails with Join select")
}
}
func TestHaving(t *testing.T) {
rows, err := DB.Select("name, count(*) as total").Table("users").Group("name").Having("name IN (?)", []string{"2", "3"}).Rows()
if err == nil {
defer rows.Close()
for rows.Next() {
var name string
var total int64
rows.Scan(&name, &total)
if name == "2" && total != 1 {
t.Errorf("Should have one user having name 2")
}
if name == "3" && total != 2 {
t.Errorf("Should have two users having name 3")
}
}
} else {
t.Errorf("Should not raise any error")
}
}
func TestQueryBuilderSubselectInWhere(t *testing.T) {
user := User{Name: "query_expr_select_ruser1", Email: "[email protected]", Age: 32}
DB.Save(&user)
user = User{Name: "query_expr_select_ruser2", Email: "[email protected]", Age: 16}
DB.Save(&user)
user = User{Name: "query_expr_select_ruser3", Email: "[email protected]", Age: 64}
DB.Save(&user)
user = User{Name: "query_expr_select_ruser4", Email: "[email protected]", Age: 128}
DB.Save(&user)
var users []User
DB.Select("*").Where("name IN (?)", DB.
Select("name").Table("users").Where("name LIKE ?", "query_expr_select%").QueryExpr()).Find(&users)
if len(users) != 4 {
t.Errorf("Four users should be found, instead found %d", len(users))
}
DB.Select("*").Where("name LIKE ?", "query_expr_select%").Where("age >= (?)", DB.
Select("AVG(age)").Table("users").Where("name LIKE ?", "query_expr_select%").QueryExpr()).Find(&users)
if len(users) != 2 {
t.Errorf("Two users should be found, instead found %d", len(users))
}
}
func TestQueryBuilderRawQueryWithSubquery(t *testing.T) {
user := User{Name: "subquery_test_user1", Age: 10}
DB.Save(&user)
user = User{Name: "subquery_test_user2", Age: 11}
DB.Save(&user)
user = User{Name: "subquery_test_user3", Age: 12}
DB.Save(&user)
var count int
err := DB.Raw("select count(*) from (?) tmp",
DB.Table("users").
Select("name").
Where("age >= ? and name in (?)", 10, []string{"subquery_test_user1", "subquery_test_user2"}).
Group("name").
QueryExpr(),
).Count(&count).Error
if err != nil {
t.Errorf("Expected to get no errors, but got %v", err)
}
if count != 2 {
t.Errorf("Row count must be 2, instead got %d", count)
}
err = DB.Raw("select count(*) from (?) tmp",
DB.Table("users").
Select("name").
Where("name LIKE ?", "subquery_test%").
Not("age <= ?", 10).Not("name in (?)", []string{"subquery_test_user1", "subquery_test_user2"}).
Group("name").
QueryExpr(),
).Count(&count).Error
if err != nil {
t.Errorf("Expected to get no errors, but got %v", err)
}
if count != 1 {
t.Errorf("Row count must be 1, instead got %d", count)
}
}
func TestQueryBuilderSubselectInHaving(t *testing.T) {
user := User{Name: "query_expr_having_ruser1", Email: "[email protected]", Age: 64}
DB.Save(&user)
user = User{Name: "query_expr_having_ruser2", Email: "[email protected]", Age: 128}
DB.Save(&user)
user = User{Name: "query_expr_having_ruser3", Email: "[email protected]", Age: 64}
DB.Save(&user)
user = User{Name: "query_expr_having_ruser4", Email: "[email protected]", Age: 128}
DB.Save(&user)
var users []User
DB.Select("AVG(age) as avgage").Where("name LIKE ?", "query_expr_having_%").Group("email").Having("AVG(age) > (?)", DB.
Select("AVG(age)").Where("name LIKE ?", "query_expr_having_%").Table("users").QueryExpr()).Find(&users)
if len(users) != 1 {
t.Errorf("Two user group should be found, instead found %d", len(users))
}
}
func DialectHasTzSupport() bool {
// NB: mssql and FoundationDB do not support time zones.
if dialect := os.Getenv("orm_DIALECT"); dialect == "foundation" {
return false
}
return true
}
func TestTimeWithZone(t *testing.T) {
var format = "2006-01-02 15:04:05 -0700"
var times []time.Time
GMT8, _ := time.LoadLocation("Asia/Shanghai")
times = append(times, time.Date(2013, 02, 19, 1, 51, 49, 123456789, GMT8))
times = append(times, time.Date(2013, 02, 18, 17, 51, 49, 123456789, time.UTC))
for index, vtime := range times {
name := "time_with_zone_" + strconv.Itoa(index)
user := User{Name: name, Birthday: &vtime}
if !DialectHasTzSupport() {
// If our driver dialect doesn't support TZ's, just use UTC for everything here.
utcBirthday := user.Birthday.UTC()
user.Birthday = &utcBirthday
}
DB.Save(&user)
expectedBirthday := "2013-02-18 17:51:49 +0000"
foundBirthday := user.Birthday.UTC().Format(format)
if foundBirthday != expectedBirthday {
t.Errorf("User's birthday should not be changed after save for name=%s, expected bday=%+v but actual value=%+v", name, expectedBirthday, foundBirthday)
}
var findUser, findUser2, findUser3 User
DB.First(&findUser, "name = ?", name)
foundBirthday = findUser.Birthday.UTC().Format(format)
if foundBirthday != expectedBirthday {
t.Errorf("User's birthday should not be changed after find for name=%s, expected bday=%+v but actual value=%+v", name, expectedBirthday, foundBirthday)
}
if DB.Where("id = ? AND birthday >= ?", findUser.Id, user.Birthday.Add(-time.Minute)).First(&findUser2).RecordNotFound() {
t.Errorf("User should be found")
}
if !DB.Where("id = ? AND birthday >= ?", findUser.Id, user.Birthday.Add(time.Minute)).First(&findUser3).RecordNotFound() {
t.Errorf("User should not be found")
}
}
}
func TestHstore(t *testing.T) {
type Details struct {
Id int64
Bulk postgres.Hstore
}
if dialect := os.Getenv("orm_DIALECT"); dialect != "postgres" {
t.Skip()
}
if err := DB.Exec("CREATE EXTENSION IF NOT EXISTS hstore").Error; err != nil {
fmt.Println("\033[31mHINT: Must be superuser to create hstore extension (ALTER USER orm WITH SUPERUSER;)\033[0m")
panic(fmt.Sprintf("No error should happen when create hstore extension, but got %+v", err))
}
DB.Exec("drop table details")
if err := DB.CreateTable(&Details{}).Error; err != nil {
panic(fmt.Sprintf("No error should happen when create table, but got %+v", err))
}
bankAccountId, phoneNumber, opinion := "123456", "14151321232", "sharkbait"
bulk := map[string]*string{
"bankAccountId": &bankAccountId,
"phoneNumber": &phoneNumber,
"opinion": &opinion,
}
d := Details{Bulk: bulk}
DB.Save(&d)
var d2 Details
if err := DB.First(&d2).Error; err != nil {
t.Errorf("Got error when tried to fetch details: %+v", err)
}
for k := range bulk {
if r, ok := d2.Bulk[k]; ok {
if res, _ := bulk[k]; *res != *r {
t.Errorf("Details should be equal")
}
} else {
t.Errorf("Details should be existed")
}
}
}
func TestSetAndGet(t *testing.T) {
if value, ok := DB.Set("hello", "world").Get("hello"); !ok {
t.Errorf("Should be able to get setting after set")
} else {
if value.(string) != "world" {
t.Errorf("Setted value should not be changed")
}
}
if _, ok := DB.Get("non_existing"); ok {
t.Errorf("Get non existing key should return error")
}
}
func TestCompatibilityMode(t *testing.T) {
DB, _ := orm.Open("testdb", "")
testdb.SetQueryFunc(func(query string) (driver.Rows, error) {
columns := []string{"id", "name", "age"}
result := `
1,Tim,20
2,Joe,25
3,Bob,30
`
return testdb.RowsFromCSVString(columns, result), nil
})
var users []User
DB.Find(&users)
if (users[0].Name != "Tim") || len(users) != 3 {
t.Errorf("Unexcepted result returned")
}
}
func TestOpenExistingDB(t *testing.T) {
DB.Save(&User{Name: "jnfeinstein"})
dialect := os.Getenv("orm_DIALECT")
db, err := orm.Open(dialect, DB.DB())
if err != nil {
t.Errorf("Should have wrapped the existing DB connection")
}
var user User
if db.Where("name = ?", "jnfeinstein").First(&user).Error == orm.ErrRecordNotFound {
t.Errorf("Should have found existing record")
}
}
func TestDdlErrors(t *testing.T) {
var err error
if err = DB.Close(); err != nil {
t.Errorf("Closing DDL test db connection err=%s", err)
}
defer func() {
// Reopen DB connection.
if DB, err = OpenTestConnection(); err != nil {
t.Fatalf("Failed re-opening db connection: %s", err)
}
}()
if err := DB.Find(&User{}).Error; err == nil {
t.Errorf("Expected operation on closed db to produce an error, but err was nil")
}
}
func TestOpenWithOneParameter(t *testing.T) {
db, err := orm.Open("dialect")
if db != nil {
t.Error("Open with one parameter returned non nil for db")
}
if err == nil {
t.Error("Open with one parameter returned err as nil")
}
}
func TestBlockGlobalUpdate(t *testing.T) {
db := DB.New()
db.Create(&Toy{Name: "Stuffed Animal", OwnerType: "Nobody"})
err := db.Model(&Toy{}).Update("OwnerType", "Human").Error
if err != nil {
t.Error("Unexpected error on global update")
}
err = db.Delete(&Toy{}).Error
if err != nil {
t.Error("Unexpected error on global delete")
}
db.BlockGlobalUpdate(true)
db.Create(&Toy{Name: "Stuffed Animal", OwnerType: "Nobody"})
err = db.Model(&Toy{}).Update("OwnerType", "Human").Error
if err == nil {
t.Error("Expected error on global update")
}
err = db.Model(&Toy{}).Where(&Toy{OwnerType: "Martian"}).Update("OwnerType", "Astronaut").Error
if err != nil {
t.Error("Unxpected error on conditional update")
}
err = db.Delete(&Toy{}).Error
if err == nil {
t.Error("Expected error on global delete")
}
err = db.Where(&Toy{OwnerType: "Martian"}).Delete(&Toy{}).Error
if err != nil {
t.Error("Unexpected error on conditional delete")
}
}
func BenchmarkOrm(b *testing.B) {
b.N = 2000
for x := 0; x < b.N; x++ {
e := strconv.Itoa(x) + "[email protected]"
now := time.Now()
email := EmailWithIdx{Email: e, UserAgent: "pc", RegisteredAt: &now}
// Insert
DB.Save(&email)
// Query
DB.First(&EmailWithIdx{}, "email = ?", e)
// Update
DB.Model(&email).UpdateColumn("email", "new-"+e)
// Delete
DB.Delete(&email)
}
}
func BenchmarkRawSql(b *testing.B) {
DB, _ := sql.Open("postgres", "user=orm DB.ame=orm sslmode=disable")
DB.SetMaxIdleConns(10)
insertSql := "INSERT INTO emails (user_id,email,user_agent,registered_at,created_at,updated_at) VALUES ($1,$2,$3,$4,$5,$6) RETURNING id"
querySql := "SELECT * FROM emails WHERE email = $1 ORDER BY id LIMIT 1"
updateSql := "UPDATE emails SET email = $1, updated_at = $2 WHERE id = $3"
deleteSql := "DELETE FROM orders WHERE id = $1"
b.N = 2000
for x := 0; x < b.N; x++ {
var id int64
e := strconv.Itoa(x) + "[email protected]"
now := time.Now()
email := EmailWithIdx{Email: e, UserAgent: "pc", RegisteredAt: &now}
// Insert
DB.QueryRow(insertSql, email.UserId, email.Email, email.UserAgent, email.RegisteredAt, time.Now(), time.Now()).Scan(&id)
// Query
rows, _ := DB.Query(querySql, email.Email)
rows.Close()
// Update
DB.Exec(updateSql, "new-"+e, time.Now(), id)
// Delete
DB.Exec(deleteSql, id)
}
}
func parseTime(str string) *time.Time {
t := now.New(time.Now().UTC()).MustParse(str)
return &t
}
|
[
"\"orm_DSN\"",
"\"orm_DIALECT\"",
"\"DEBUG\"",
"\"orm_DIALECT\"",
"\"orm_DIALECT\"",
"\"orm_DIALECT\""
] |
[] |
[
"orm_DSN",
"orm_DIALECT",
"DEBUG"
] |
[]
|
["orm_DSN", "orm_DIALECT", "DEBUG"]
|
go
| 3 | 0 | |
matplotlib-3.4.3/matplotlib-3.4.3/doc/conf.py
|
# Matplotlib documentation build configuration file, created by
# sphinx-quickstart on Fri May 2 12:33:25 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import os
from pathlib import Path
import shutil
import subprocess
import sys
import warnings
import matplotlib
from matplotlib._api import MatplotlibDeprecationWarning
import sphinx
from datetime import datetime
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.append(os.path.abspath('.'))
sys.path.append('.')
# General configuration
# ---------------------
# Unless we catch the warning explicitly somewhere, a warning should cause the
# docs build to fail. This is especially useful for getting rid of deprecated
# usage in the gallery.
warnings.filterwarnings('error', append=True)
# Strip backslashes in function's signature
# To be removed when numpydoc > 0.9.x
strip_signature_backslash = True
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.intersphinx',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'IPython.sphinxext.ipython_console_highlighting',
'IPython.sphinxext.ipython_directive',
'numpydoc', # Needs to be loaded *after* autodoc.
'sphinx_gallery.gen_gallery',
'matplotlib.sphinxext.mathmpl',
'matplotlib.sphinxext.plot_directive',
'sphinxcontrib.inkscapeconverter',
'sphinxext.custom_roles',
'sphinxext.github',
'sphinxext.math_symbol_table',
'sphinxext.missing_references',
'sphinxext.mock_gui_toolkits',
'sphinxext.skip_deprecated',
'sphinxext.redirect_from',
'sphinx_copybutton',
]
exclude_patterns = [
'api/prev_api_changes/api_changes_*/*',
# Be sure to update users/whats_new.rst:
'users/prev_whats_new/whats_new_3.4.0.rst',
]
def _check_dependencies():
names = {
"colorspacious": 'colorspacious',
"IPython.sphinxext.ipython_console_highlighting": 'ipython',
"matplotlib": 'matplotlib',
"numpydoc": 'numpydoc',
"PIL.Image": 'pillow',
"sphinx_copybutton": 'sphinx_copybutton',
"sphinx_gallery": 'sphinx_gallery',
"sphinxcontrib.inkscapeconverter": 'sphinxcontrib-svg2pdfconverter',
}
missing = []
for name in names:
try:
__import__(name)
except ImportError:
missing.append(names[name])
if missing:
raise ImportError(
"The following dependencies are missing to build the "
"documentation: {}".format(", ".join(missing)))
if shutil.which('dot') is None:
raise OSError(
"No binary named dot - graphviz must be installed to build the "
"documentation")
_check_dependencies()
# Import only after checking for dependencies.
# gallery_order.py from the sphinxext folder provides the classes that
# allow custom ordering of sections and subsections of the gallery
import sphinxext.gallery_order as gallery_order
# The following import is only necessary to monkey patch the signature later on
from sphinx_gallery import gen_rst
# On Linux, prevent plt.show() from emitting a non-GUI backend warning.
os.environ.pop("DISPLAY", None)
autosummary_generate = True
# we should ignore warnings coming from importing deprecated modules for
# autodoc purposes, as this will disappear automatically when they are removed
warnings.filterwarnings('ignore', category=MatplotlibDeprecationWarning,
module='importlib', # used by sphinx.autodoc.importer
message=r'(\n|.)*module was deprecated.*')
autodoc_docstring_signature = True
autodoc_default_options = {'members': None, 'undoc-members': None}
# make sure to ignore warnings that stem from simply inspecting deprecated
# class-level attributes
warnings.filterwarnings('ignore', category=MatplotlibDeprecationWarning,
module='sphinx.util.inspect')
# missing-references names matches sphinx>=3 behavior, so we can't be nitpicky
# for older sphinxes.
nitpicky = sphinx.version_info >= (3,)
# change this to True to update the allowed failures
missing_references_write_json = False
missing_references_warn_unused_ignores = False
intersphinx_mapping = {
'Pillow': ('https://pillow.readthedocs.io/en/stable/', None),
'cycler': ('https://matplotlib.org/cycler/', None),
'dateutil': ('https://dateutil.readthedocs.io/en/stable/', None),
'ipykernel': ('https://ipykernel.readthedocs.io/en/latest/', None),
'numpy': ('https://numpy.org/doc/stable/', None),
'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
'pytest': ('https://pytest.org/en/stable/', None),
'python': ('https://docs.python.org/3/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
}
# Sphinx gallery configuration
sphinx_gallery_conf = {
'examples_dirs': ['../examples', '../tutorials'],
'filename_pattern': '^((?!sgskip).)*$',
'gallery_dirs': ['gallery', 'tutorials'],
'doc_module': ('matplotlib', 'mpl_toolkits'),
'reference_url': {
'matplotlib': None,
'numpy': 'https://docs.scipy.org/doc/numpy/',
'scipy': 'https://docs.scipy.org/doc/scipy/reference/',
},
'backreferences_dir': Path('api') / Path('_as_gen'),
'subsection_order': gallery_order.sectionorder,
'within_subsection_order': gallery_order.subsectionorder,
'remove_config_comments': True,
'min_reported_time': 1,
'thumbnail_size': (320, 224),
'compress_images': ('thumbnails', 'images'),
'matplotlib_animations': True,
}
plot_gallery = 'True'
# Monkey-patching gallery signature to include search keywords
gen_rst.SPHX_GLR_SIG = """\n
.. only:: html
.. rst-class:: sphx-glr-signature
Keywords: matplotlib code example, codex, python plot, pyplot
`Gallery generated by Sphinx-Gallery
<https://sphinx-gallery.readthedocs.io>`_\n"""
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# This is the default encoding, but it doesn't hurt to be explicit
source_encoding = "utf-8"
# The master toctree document.
master_doc = 'contents'
# General substitutions.
try:
SHA = subprocess.check_output(
['git', 'describe', '--dirty']).decode('utf-8').strip()
# Catch the case where git is not installed locally, and use the versioneer
# version number instead
except (subprocess.CalledProcessError, FileNotFoundError):
SHA = matplotlib.__version__
html_context = {
'sha': SHA,
# This will disable any analytics in the HTML templates (currently Google
# Analytics.)
'include_analytics': False,
}
project = 'Matplotlib'
copyright = ('2002 - 2012 John Hunter, Darren Dale, Eric Firing, '
'Michael Droettboom and the Matplotlib development '
f'team; 2012 - {datetime.now().year} The Matplotlib development team')
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = matplotlib.__version__
# The full version, including alpha/beta/rc tags.
release = version
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
unused_docs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
default_role = 'obj'
# Plot directive configuration
# ----------------------------
plot_formats = [('png', 100), ('pdf', 100)]
# GitHub extension
github_project_url = "https://github.com/matplotlib/matplotlib/"
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
#html_style = 'matplotlib.css'
html_style = f'mpl.css?{SHA}'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
#html_logo = 'logo.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If nonempty, this is the file name suffix for generated HTML files. The
# default is ``".html"``.
html_file_suffix = '.html'
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# Content template for the index page.
html_index = 'index.html'
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Custom sidebar templates, maps page names to templates.
html_sidebars = {
'index': [
# 'sidebar_announcement.html',
'sidebar_versions.html',
'donate_sidebar.html'],
'**': ['localtoc.html', 'pagesource.html']
}
# If false, no module index is generated.
#html_use_modindex = True
html_domain_indices = ["py-modindex"]
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it.
html_use_opensearch = 'False'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Matplotlibdoc'
# Use typographic quote characters.
smartquotes = False
# Path to favicon
html_favicon = '_static/favicon.ico'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'letter'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('contents', 'Matplotlib.tex', 'Matplotlib',
'John Hunter\\and Darren Dale\\and Eric Firing\\and Michael Droettboom'
'\\and and the matplotlib development team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = None
# Use Unicode aware LaTeX engine
latex_engine = 'xelatex' # or 'lualatex'
latex_elements = {}
# Keep babel usage also with xelatex (Sphinx default is polyglossia)
# If this key is removed or changed, latex build directory must be cleaned
latex_elements['babel'] = r'\usepackage{babel}'
# Font configuration
# Fix fontspec converting " into right curly quotes in PDF
# cf https://github.com/sphinx-doc/sphinx/pull/6888/
latex_elements['fontenc'] = r'''
\usepackage{fontspec}
\defaultfontfeatures[\rmfamily,\sffamily,\ttfamily]{}
'''
# Sphinx 2.0 adopts GNU FreeFont by default, but it does not have all
# the Unicode codepoints needed for the section about Mathtext
# "Writing mathematical expressions"
fontpkg = r"""
\IfFontExistsTF{XITS}{
\setmainfont{XITS}
}{
\setmainfont{XITS}[
Extension = .otf,
UprightFont = *-Regular,
ItalicFont = *-Italic,
BoldFont = *-Bold,
BoldItalicFont = *-BoldItalic,
]}
\IfFontExistsTF{FreeSans}{
\setsansfont{FreeSans}
}{
\setsansfont{FreeSans}[
Extension = .otf,
UprightFont = *,
ItalicFont = *Oblique,
BoldFont = *Bold,
BoldItalicFont = *BoldOblique,
]}
\IfFontExistsTF{FreeMono}{
\setmonofont{FreeMono}
}{
\setmonofont{FreeMono}[
Extension = .otf,
UprightFont = *,
ItalicFont = *Oblique,
BoldFont = *Bold,
BoldItalicFont = *BoldOblique,
]}
% needed for \mathbb (blackboard alphabet) to actually work
\usepackage{unicode-math}
\IfFontExistsTF{XITS Math}{
\setmathfont{XITS Math}
}{
\setmathfont{XITSMath-Regular}[
Extension = .otf,
]}
"""
latex_elements['fontpkg'] = fontpkg
# Sphinx <1.8.0 or >=2.0.0 does this by default, but the 1.8.x series
# did not for latex_engine = 'xelatex' (as it used Latin Modern font).
# We need this for code-blocks as FreeMono has wide glyphs.
latex_elements['fvset'] = r'\fvset{fontsize=\small}'
# Fix fancyhdr complaining about \headheight being too small
latex_elements['passoptionstopackages'] = r"""
\PassOptionsToPackage{headheight=14pt}{geometry}
"""
# Additional stuff for the LaTeX preamble.
latex_elements['preamble'] = r"""
% One line per author on title page
\DeclareRobustCommand{\and}%
{\end{tabular}\kern-\tabcolsep\\\begin{tabular}[t]{c}}%
\usepackage{etoolbox}
\AtBeginEnvironment{sphinxthebibliography}{\appendix\part{Appendices}}
\usepackage{expdlist}
\let\latexdescription=\description
\def\description{\latexdescription{}{} \breaklabel}
% But expdlist old LaTeX package requires fixes:
% 1) remove extra space
\makeatletter
\patchcmd\@item{{\@breaklabel} }{{\@breaklabel}}{}{}
\makeatother
% 2) fix bug in expdlist's way of breaking the line after long item label
\makeatletter
\def\breaklabel{%
\def\@breaklabel{%
\leavevmode\par
% now a hack because Sphinx inserts \leavevmode after term node
\def\leavevmode{\def\leavevmode{\unhbox\voidb@x}}%
}%
}
\makeatother
"""
# Sphinx 1.5 provides this to avoid "too deeply nested" LaTeX error
# and usage of "enumitem" LaTeX package is unneeded.
# Value can be increased but do not set it to something such as 2048
# which needlessly would trigger creation of thousands of TeX macros
latex_elements['maxlistdepth'] = '10'
latex_elements['pointsize'] = '11pt'
# Better looking general index in PDF
latex_elements['printindex'] = r'\footnotesize\raggedright\printindex'
# Documents to append as an appendix to all manuals.
latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = True
latex_toplevel_sectioning = 'part'
# Show both class-level docstring and __init__ docstring in class
# documentation
autoclass_content = 'both'
texinfo_documents = [
("contents", 'matplotlib', 'Matplotlib Documentation',
'John Hunter@*Darren Dale@*Eric Firing@*Michael Droettboom@*'
'The matplotlib development team',
'Matplotlib', "Python plotting package", 'Programming',
1),
]
# numpydoc config
numpydoc_show_class_members = False
html4_writer = True
inheritance_node_attrs = dict(fontsize=16)
graphviz_dot = shutil.which('dot')
# Still use PNG until SVG linking is fixed
# https://github.com/sphinx-doc/sphinx/issues/3176
# graphviz_output_format = 'svg'
def setup(app):
if any(st in version for st in ('post', 'alpha', 'beta')):
bld_type = 'dev'
else:
bld_type = 'rel'
app.add_config_value('releaselevel', bld_type, 'env')
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
internal/ns/nslogic.go
|
package ns
/*
Copyright 2019 - 2021 Crunchy Data Solutions, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"os"
"reflect"
"strings"
"text/template"
"time"
"github.com/qingcloud/postgres-operator/internal/config"
"github.com/qingcloud/postgres-operator/internal/kubeapi"
"github.com/qingcloud/postgres-operator/pkg/events"
log "github.com/sirupsen/logrus"
authv1 "k8s.io/api/authorization/v1"
v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
)
const (
OPERATOR_SERVICE_ACCOUNT = "postgres-operator"
PGO_DEFAULT_SERVICE_ACCOUNT = "pgo-default"
)
const (
PGO_TARGET_ROLE = "pgo-target-role"
PGO_TARGET_ROLE_BINDING = "pgo-target-role-binding"
PGO_TARGET_SERVICE_ACCOUNT = "pgo-target"
)
const (
PGO_BACKREST_ROLE = "pgo-backrest-role"
PGO_BACKREST_SERVICE_ACCOUNT = "pgo-backrest"
PGO_BACKREST_ROLE_BINDING = "pgo-backrest-role-binding"
)
const (
PGO_PG_ROLE = "pgo-pg-role"
PGO_PG_ROLE_BINDING = "pgo-pg-role-binding"
PGO_PG_SERVICE_ACCOUNT = "pgo-pg"
)
// PgoServiceAccount is used to populate the following ServiceAccount templates:
// pgo-default-sa.json
// pgo-target-sa.json
// pgo-backrest-sa.json
// pgo-pg-sa.json
type PgoServiceAccount struct {
TargetNamespace string
}
// PgoRole is used to populate the following Role templates:
// pgo-target-role.json
// pgo-backrest-role.json
// pgo-pg-role.json
type PgoRole struct {
TargetNamespace string
}
// PgoRoleBinding is used to populate the following RoleBinding templates:
// pgo-target-role-binding.json
// pgo-backrest-role-binding.json
// pgo-pg-role-binding.json
type PgoRoleBinding struct {
TargetNamespace string
OperatorNamespace string
}
// NamespaceOperatingMode defines the different namespace operating modes for the Operator
type NamespaceOperatingMode string
const (
// NamespaceOperatingModeDynamic enables full dynamic namespace capabilities, in which the
// Operator can create, delete and update any namespaces within the Kubernetes cluster.
// Additionally, while in can listen for namespace events (e.g. namespace additions, updates
// and deletions), and then create or remove controllers for various namespaces as those
// namespaces are added or removed from the Kubernetes cluster.
NamespaceOperatingModeDynamic NamespaceOperatingMode = "dynamic"
// NamespaceOperatingModeReadOnly allows the Operator to listen for namespace events within the
// Kubernetetes cluster, and then create and run and/or remove controllers as namespaces are
// added and deleted.
NamespaceOperatingModeReadOnly NamespaceOperatingMode = "readonly"
// NamespaceOperatingModeDisabled causes namespace capabilities to be disabled altogether. In
// this mode the Operator will simply attempt to work with the target namespaces specified
// during installation. If no target namespaces are specified, then it will be configured to
// work within the namespace in which the Operator is deployed.
NamespaceOperatingModeDisabled NamespaceOperatingMode = "disabled"
// DNS-1123 formatting and error message for validating namespace names
dns1123Fmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?"
dns1123ErrMsg string = "A namespace name must consist of lower case" +
"alphanumeric characters or '-', and must start and end with an alphanumeric character"
)
var (
// namespacePrivsCoreDynamic defines the privileges in the Core API group required for the
// Operator to run using the NamespaceOperatingModeDynamic namespace operating mode
namespacePrivsCoreDynamic = map[string][]string{
"namespaces": {"create", "update", "delete"},
}
// namespacePrivsReadOnly defines the privileges in the Core API group required for the
// Operator to run using the NamespaceOperatingModeReadOnly namespace operating mode
namespacePrivsCoreReadOnly = map[string][]string{
"namespaces": {"get", "list", "watch"},
}
// ErrInvalidNamespaceName defines the error that is thrown when a namespace does not meet the
// requirements for naming set by Kubernetes
ErrInvalidNamespaceName = errors.New(validation.RegexError(dns1123ErrMsg, dns1123Fmt,
"my-name", "123-abc"))
// ErrNamespaceNotWatched defines the error that is thrown when a namespace does not meet the
// requirements for naming set by Kubernetes
ErrNamespaceNotWatched = errors.New("The namespaces are not watched by the " +
"current PostgreSQL Operator installation")
)
// CreateFakeNamespaceClient creates a fake namespace client for use with the "disabled" namespace
// operating mode
func CreateFakeNamespaceClient(installationName string) (kubernetes.Interface, error) {
var namespaces []runtime.Object
for _, namespace := range getNamespacesFromEnv() {
namespaces = append(namespaces, &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespace,
Labels: map[string]string{
config.LABEL_VENDOR: config.LABEL_QINGCLOUD,
config.LABEL_PGO_INSTALLATION_NAME: installationName,
},
},
})
}
fakeClient := fake.NewSimpleClientset(namespaces...)
return fakeClient, nil
}
// CreateNamespace creates a new namespace that is owned by the Operator.
func CreateNamespace(clientset kubernetes.Interface, installationName, pgoNamespace,
createdBy, newNs string) error {
ctx := context.TODO()
log.Debugf("CreateNamespace %s %s %s", pgoNamespace, createdBy, newNs)
// define the new namespace
n := v1.Namespace{}
n.ObjectMeta.Labels = make(map[string]string)
n.ObjectMeta.Labels[config.LABEL_VENDOR] = config.LABEL_QINGCLOUD
n.ObjectMeta.Labels[config.LABEL_PGO_CREATED_BY] = createdBy
n.ObjectMeta.Labels[config.LABEL_PGO_INSTALLATION_NAME] = installationName
n.Name = newNs
if _, err := clientset.CoreV1().Namespaces().Create(ctx, &n, metav1.CreateOptions{}); err != nil {
log.Error(err)
return err
}
log.Debugf("CreateNamespace %s created by %s", newNs, createdBy)
// publish event
topics := make([]string, 1)
topics[0] = events.EventTopicPGO
f := events.EventPGOCreateNamespaceFormat{
EventHeader: events.EventHeader{
Namespace: pgoNamespace,
Username: createdBy,
Topic: topics,
Timestamp: time.Now(),
EventType: events.EventPGOCreateNamespace,
},
CreatedNamespace: newNs,
}
return events.Publish(f)
}
// DeleteNamespace deletes the namespace specified.
func DeleteNamespace(clientset kubernetes.Interface, installationName, pgoNamespace, deletedBy, ns string) error {
ctx := context.TODO()
err := clientset.CoreV1().Namespaces().Delete(ctx, ns, metav1.DeleteOptions{})
if err != nil {
log.Error(err)
return err
}
log.Debugf("DeleteNamespace %s deleted by %s", ns, deletedBy)
// publish the namespace delete event
topics := make([]string, 1)
topics[0] = events.EventTopicPGO
f := events.EventPGODeleteNamespaceFormat{
EventHeader: events.EventHeader{
Namespace: pgoNamespace,
Username: deletedBy,
Topic: topics,
Timestamp: time.Now(),
EventType: events.EventPGODeleteNamespace,
},
DeletedNamespace: ns,
}
return events.Publish(f)
}
// CopySecret copies a secret from the Operator namespace to target namespace
func CopySecret(clientset kubernetes.Interface, secretName, operatorNamespace, targetNamespace string) error {
ctx := context.TODO()
secret, err := clientset.CoreV1().Secrets(operatorNamespace).Get(ctx, secretName, metav1.GetOptions{})
if err == nil {
secret.ObjectMeta = metav1.ObjectMeta{
Annotations: secret.ObjectMeta.Annotations,
Labels: secret.ObjectMeta.Labels,
Name: secret.ObjectMeta.Name,
}
_, err = clientset.CoreV1().Secrets(targetNamespace).Create(ctx, secret, metav1.CreateOptions{})
if kerrors.IsAlreadyExists(err) {
_, err = clientset.CoreV1().Secrets(targetNamespace).Update(ctx, secret, metav1.UpdateOptions{})
}
}
if !kubeapi.IsNotFound(err) {
return err
}
return nil
}
// ReconcileRole reconciles a Role required by the operator in a target namespace
func ReconcileRole(clientset kubernetes.Interface, role, targetNamespace string,
roleTemplate *template.Template) error {
ctx := context.TODO()
var createRole bool
currRole, err := clientset.RbacV1().Roles(targetNamespace).Get(ctx, role, metav1.GetOptions{})
if err != nil {
if kerrors.IsNotFound(err) {
log.Debugf("Role %s in namespace %s does not exist and will be created",
role, targetNamespace)
createRole = true
} else {
return err
}
}
var buffer bytes.Buffer
if err := roleTemplate.Execute(&buffer,
PgoRole{TargetNamespace: targetNamespace}); err != nil {
return err
}
templatedRole := rbacv1.Role{}
if err := json.Unmarshal(buffer.Bytes(), &templatedRole); err != nil {
return err
}
if createRole {
_, err := clientset.RbacV1().Roles(targetNamespace).
Create(ctx, &templatedRole, metav1.CreateOptions{})
return err
}
if !reflect.DeepEqual(currRole.Rules, templatedRole.Rules) {
log.Debugf("Role %s in namespace %s is invalid and will now be reconciled",
currRole.Name, targetNamespace)
currRole.Rules = templatedRole.Rules
if _, err := clientset.RbacV1().Roles(targetNamespace).
Update(ctx, currRole, metav1.UpdateOptions{}); err != nil {
return err
}
}
return nil
}
// ReconcileRoleBinding reconciles a RoleBinding required by the operator in a target namespace
func ReconcileRoleBinding(clientset kubernetes.Interface, pgoNamespace,
roleBinding, targetNamespace string, roleBindingTemplate *template.Template) error {
ctx := context.TODO()
var createRoleBinding bool
currRoleBinding, err := clientset.RbacV1().RoleBindings(targetNamespace).
Get(ctx, roleBinding, metav1.GetOptions{})
if err != nil {
if kerrors.IsNotFound(err) {
log.Debugf("RoleBinding %s in namespace %s does not exist and will be created",
roleBinding, targetNamespace)
createRoleBinding = true
} else {
return err
}
}
var buffer bytes.Buffer
if err := roleBindingTemplate.Execute(&buffer,
PgoRoleBinding{
TargetNamespace: targetNamespace,
OperatorNamespace: pgoNamespace,
}); err != nil {
return err
}
templatedRoleBinding := rbacv1.RoleBinding{}
if err := json.Unmarshal(buffer.Bytes(), &templatedRoleBinding); err != nil {
return err
}
if createRoleBinding {
_, err := clientset.RbacV1().RoleBindings(targetNamespace).
Create(ctx, &templatedRoleBinding, metav1.CreateOptions{})
return err
}
if !reflect.DeepEqual(currRoleBinding.Subjects,
templatedRoleBinding.Subjects) ||
!reflect.DeepEqual(currRoleBinding.RoleRef,
templatedRoleBinding.RoleRef) {
log.Debugf("RoleBinding %s in namespace %s is invalid and will now be reconciled",
currRoleBinding.Name, targetNamespace)
currRoleBinding.Subjects = templatedRoleBinding.Subjects
currRoleBinding.RoleRef = templatedRoleBinding.RoleRef
if _, err := clientset.RbacV1().RoleBindings(targetNamespace).
Update(ctx, currRoleBinding, metav1.UpdateOptions{}); err != nil {
return err
}
}
return nil
}
// ReconcileServiceAccount reconciles a ServiceAccount required by the operator in a target
// namespace
func ReconcileServiceAccount(clientset kubernetes.Interface,
serviceAccount, targetNamespace string, serviceAccountTemplate *template.Template,
imagePullSecrets []v1.LocalObjectReference) (bool, error) {
ctx := context.TODO()
var createServiceAccount, createdOrUpdated bool
currServiceAccount, err := clientset.CoreV1().ServiceAccounts(targetNamespace).
Get(ctx, serviceAccount, metav1.GetOptions{})
if err != nil {
if kerrors.IsNotFound(err) {
log.Debugf("ServiceAccount %s in namespace %s does not exist and will be created",
serviceAccount, targetNamespace)
createServiceAccount = true
} else {
return createdOrUpdated, err
}
}
var buffer bytes.Buffer
if err := serviceAccountTemplate.Execute(&buffer,
PgoServiceAccount{TargetNamespace: targetNamespace}); err != nil {
return createdOrUpdated, err
}
templatedServiceAccount := v1.ServiceAccount{}
if err := json.Unmarshal(buffer.Bytes(), &templatedServiceAccount); err != nil {
return createdOrUpdated, err
}
if createServiceAccount {
templatedServiceAccount.ImagePullSecrets = imagePullSecrets
if _, err := clientset.CoreV1().ServiceAccounts(targetNamespace).
Create(ctx, &templatedServiceAccount, metav1.CreateOptions{}); err != nil {
return createdOrUpdated, err
}
createdOrUpdated = true
return createdOrUpdated, nil
}
if !reflect.DeepEqual(currServiceAccount.ImagePullSecrets, imagePullSecrets) {
log.Debugf("ServiceAccout %s in namespace %s is invalid and will now be reconciled",
currServiceAccount.Name, targetNamespace)
currServiceAccount.ImagePullSecrets = imagePullSecrets
if _, err := clientset.CoreV1().ServiceAccounts(targetNamespace).
Update(ctx, currServiceAccount, metav1.UpdateOptions{}); err != nil {
return createdOrUpdated, err
}
createdOrUpdated = true
}
return createdOrUpdated, nil
}
// UpdateNamespace updates a new namespace to be owned by the Operator.
func UpdateNamespace(clientset kubernetes.Interface, installationName, pgoNamespace,
updatedBy, ns string) error {
ctx := context.TODO()
log.Debugf("UpdateNamespace %s %s %s %s", installationName, pgoNamespace, updatedBy, ns)
theNs, err := clientset.CoreV1().Namespaces().Get(ctx, ns, metav1.GetOptions{})
if err != nil {
return err
}
if theNs.ObjectMeta.Labels == nil {
theNs.ObjectMeta.Labels = make(map[string]string)
}
theNs.ObjectMeta.Labels[config.LABEL_VENDOR] = config.LABEL_QINGCLOUD
theNs.ObjectMeta.Labels[config.LABEL_PGO_INSTALLATION_NAME] = installationName
if _, err := clientset.CoreV1().Namespaces().Update(ctx, theNs, metav1.UpdateOptions{}); err != nil {
log.Error(err)
return err
}
// publish event
topics := make([]string, 1)
topics[0] = events.EventTopicPGO
f := events.EventPGOCreateNamespaceFormat{
EventHeader: events.EventHeader{
Namespace: pgoNamespace,
Username: updatedBy,
Topic: topics,
Timestamp: time.Now(),
EventType: events.EventPGOCreateNamespace,
},
CreatedNamespace: ns,
}
return events.Publish(f)
}
// ConfigureInstallNamespaces is responsible for properly configuring up any namespaces provided for
// the installation of the Operator. This includes creating or updating those namespaces so they can
// be utilized by the Operator to deploy PG clusters.
func ConfigureInstallNamespaces(clientset kubernetes.Interface, installationName, pgoNamespace string,
namespaceNames []string, namespaceOperatingMode NamespaceOperatingMode) error {
ctx := context.TODO()
// now loop through all namespaces and either create or update them
for _, namespaceName := range namespaceNames {
nameSpaceExists := true
// if we can get namespaces, make sure this one isn't part of another install
if namespaceOperatingMode != NamespaceOperatingModeDisabled {
namespace, err := clientset.CoreV1().Namespaces().
Get(ctx, namespaceName, metav1.GetOptions{})
if err != nil {
if kerrors.IsNotFound(err) {
nameSpaceExists = false
} else {
return err
}
}
if nameSpaceExists {
// continue if already owned by this install, or if owned by another install
labels := namespace.ObjectMeta.Labels
if labels != nil && labels[config.LABEL_VENDOR] == config.LABEL_QINGCLOUD &&
labels[config.LABEL_PGO_INSTALLATION_NAME] != installationName {
log.Errorf("Configure install namespaces: namespace %s owned by another "+
"installation, will not update it", namespaceName)
continue
}
}
}
// if using the "dynamic" namespace mode we can now update the namespace to ensure it is
// properly owned by this install
if namespaceOperatingMode == NamespaceOperatingModeDynamic {
if nameSpaceExists {
// if not part of this or another install, then update the namespace to be owned by this
// Operator install
log.Infof("Configure install namespaces: namespace %s will be updated to be owned by this "+
"installation", namespaceName)
if err := UpdateNamespace(clientset, installationName, pgoNamespace,
"operator-bootstrap", namespaceName); err != nil {
return err
}
} else {
log.Infof("Configure install namespaces: namespace %s will be created for this "+
"installation", namespaceName)
if err := CreateNamespace(clientset, installationName, pgoNamespace,
"operator-bootstrap", namespaceName); err != nil {
return err
}
}
}
}
return nil
}
// GetCurrentNamespaceList returns the current list namespaces being managed by the current
// Operateor installation. When the current namespace mode is "dynamic" or "readOnly", this
// involves querying the Kube cluster for an namespaces with the "vendor" and
// "pgo-installation-name" labels corresponding to the current Operator install. When the
// namespace mode is "disabled", a list of namespaces specified using the NAMESPACE env var during
// installation is returned (with the list defaulting to the Operator's own namespace in the event
// that NAMESPACE is empty).
func GetCurrentNamespaceList(clientset kubernetes.Interface,
installationName string, namespaceOperatingMode NamespaceOperatingMode) ([]string, error) {
ctx := context.TODO()
if namespaceOperatingMode == NamespaceOperatingModeDisabled {
return getNamespacesFromEnv(), nil
}
ns := make([]string, 0)
nsList, err := clientset.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
if err != nil {
log.Error(err.Error())
return nil, err
}
for _, v := range nsList.Items {
labels := v.ObjectMeta.Labels
if labels[config.LABEL_VENDOR] == config.LABEL_QINGCLOUD &&
labels[config.LABEL_PGO_INSTALLATION_NAME] == installationName {
ns = append(ns, v.Name)
}
}
return ns, nil
}
// ValidateNamespacesWatched validates whether or not the namespaces provided are being watched by
// the current Operator installation. When the current namespace mode is "dynamic" or "readOnly",
// this involves ensuring the namespace specified has the proper "vendor" and
// "pgo-installation-name" labels corresponding to the current Operator install. When the
// namespace mode is "disabled", this means ensuring the namespace is in the list of those
// specifiedusing the NAMESPACE env var during installation (with the list defaulting to the
// Operator's own namespace in the event that NAMESPACE is empty). If any namespaces are found to
// be invalid, an ErrNamespaceNotWatched error is returned containing an error message listing
// the invalid namespaces.
func ValidateNamespacesWatched(clientset kubernetes.Interface,
namespaceOperatingMode NamespaceOperatingMode,
installationName string, namespaces ...string) error {
var err error
var currNSList []string
if namespaceOperatingMode != NamespaceOperatingModeDisabled {
currNSList, err = GetCurrentNamespaceList(clientset, installationName,
namespaceOperatingMode)
if err != nil {
return err
}
} else {
currNSList = getNamespacesFromEnv()
}
var invalidNamespaces []string
for _, ns := range namespaces {
var validNS bool
for _, currNS := range currNSList {
if ns == currNS {
validNS = true
break
}
}
if !validNS {
invalidNamespaces = append(invalidNamespaces, ns)
}
}
if len(invalidNamespaces) > 0 {
return fmt.Errorf("The following namespaces are invalid: %v. %w", invalidNamespaces,
ErrNamespaceNotWatched)
}
return nil
}
// getNamespacesFromEnv returns a slice containing the namespaces strored the NAMESPACE env var in
// csv format. If NAMESPACE is empty, then the Operator namespace as specified in env var
// PGO_OPERATOR_NAMESPACE is returned.
func getNamespacesFromEnv() []string {
namespaceEnvVar := os.Getenv("NAMESPACE")
if namespaceEnvVar == "" {
defaultNs := os.Getenv("PGO_OPERATOR_NAMESPACE")
return []string{defaultNs}
}
return strings.Split(namespaceEnvVar, ",")
}
// ValidateNamespaceNames validates one or more namespace names to ensure they are valid per Kubernetes
// naming requirements.
func ValidateNamespaceNames(namespace ...string) error {
var invalidNamespaces []string
for _, ns := range namespace {
if validation.IsDNS1123Label(ns) != nil {
invalidNamespaces = append(invalidNamespaces, ns)
}
}
if len(invalidNamespaces) > 0 {
return fmt.Errorf("The following namespaces are invalid %v. %w", invalidNamespaces,
ErrInvalidNamespaceName)
}
return nil
}
// GetNamespaceOperatingMode is responsible for returning the proper namespace operating mode for
// the current Operator install. This is done by submitting a SubjectAccessReview in the local
// Kubernetes cluster to determine whether or not certain cluster-level privileges have been
// assigned to the Operator Service Account. Based on the privileges identified, one of the
// a the proper NamespaceOperatingMode will be returned as applicable for those privileges
// (please see the various NamespaceOperatingMode types for a detailed explanation of each
// operating mode).
func GetNamespaceOperatingMode(clientset kubernetes.Interface) (NamespaceOperatingMode, error) {
// first check to see if dynamic namespace capabilities can be enabled
isDynamic, err := CheckAccessPrivs(clientset, namespacePrivsCoreDynamic, "", "")
if err != nil {
return "", err
}
// next check to see if readonly namespace capabilities can be enabled
isReadOnly, err := CheckAccessPrivs(clientset, namespacePrivsCoreReadOnly, "", "")
if err != nil {
return "", err
}
// return the proper namespace operating mode based on the access privs identified
switch {
case isDynamic && isReadOnly:
return NamespaceOperatingModeDynamic, nil
case !isDynamic && isReadOnly:
return NamespaceOperatingModeReadOnly, nil
default:
return NamespaceOperatingModeDisabled, nil
}
}
// CheckAccessPrivs checks to see if the ServiceAccount currently running the operator has
// the permissions defined for various resources as specified in the provided permissions
// map. If an empty namespace is provided then it is assumed the resource is cluster-scoped.
// If the ServiceAccount has all of the permissions defined in the permissions map, then "true"
// is returned. Otherwise, if the Service Account is missing any of the permissions specified,
// or if an error is encountered while attempting to deterine the permissions for the service
// account, then "false" is returned (along with the error in the event an error is encountered).
func CheckAccessPrivs(clientset kubernetes.Interface,
privs map[string][]string, apiGroup, namespace string) (bool, error) {
ctx := context.TODO()
for resource, verbs := range privs {
for _, verb := range verbs {
sar, err := clientset.
AuthorizationV1().SelfSubjectAccessReviews().
Create(ctx, &authv1.SelfSubjectAccessReview{
Spec: authv1.SelfSubjectAccessReviewSpec{
ResourceAttributes: &authv1.ResourceAttributes{
Namespace: namespace,
Group: apiGroup,
Resource: resource,
Verb: verb,
},
},
}, metav1.CreateOptions{})
if err != nil {
return false, err
}
if !sar.Status.Allowed {
return false, nil
}
}
}
return true, nil
}
// GetInitialNamespaceList returns an initial list of namespaces for the current Operator install.
// This includes first obtaining any namespaces from the NAMESPACE env var, and then if the
// namespace operating mode permits, also querying the Kube cluster in order to obtain any other
// namespaces that are part of the install, but not included in the env var. If no namespaces are
// identified via either of these methods, then the the PGO namespaces is returned as the default
// namespace.
func GetInitialNamespaceList(clientset kubernetes.Interface,
namespaceOperatingMode NamespaceOperatingMode,
installationName, pgoNamespace string) ([]string, error) {
// next grab the namespaces provided using the NAMESPACE env var
namespaceList := getNamespacesFromEnv()
// make sure the namespaces obtained from the NAMESPACE env var are valid
if err := ValidateNamespaceNames(namespaceList...); err != nil {
return nil, err
}
nsEnvMap := make(map[string]struct{})
for _, namespace := range namespaceList {
nsEnvMap[namespace] = struct{}{}
}
// If the Operator is in a dynamic or readOnly mode, then refresh the namespace list by
// querying the Kube cluster. This allows us to account for all namespaces owned by the
// Operator, including those not explicitly specified during the Operator install.
var namespaceListCluster []string
var err error
if namespaceOperatingMode == NamespaceOperatingModeDynamic ||
namespaceOperatingMode == NamespaceOperatingModeReadOnly {
namespaceListCluster, err = GetCurrentNamespaceList(clientset, installationName,
namespaceOperatingMode)
if err != nil {
return nil, err
}
}
for _, namespace := range namespaceListCluster {
if _, ok := nsEnvMap[namespace]; !ok {
namespaceList = append(namespaceList, namespace)
}
}
return namespaceList, nil
}
|
[
"\"NAMESPACE\"",
"\"PGO_OPERATOR_NAMESPACE\""
] |
[] |
[
"PGO_OPERATOR_NAMESPACE",
"NAMESPACE"
] |
[]
|
["PGO_OPERATOR_NAMESPACE", "NAMESPACE"]
|
go
| 2 | 0 | |
library/src/main/java/top/srsea/lever/network/DownloadTask.java
|
/*
* Copyright (C) 2019 sea
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package top.srsea.lever.network;
import android.util.Log;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.UnsupportedEncodingException;
import java.net.URI;
import java.net.URLDecoder;
import java.util.UUID;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import io.reactivex.Observable;
import io.reactivex.annotations.NonNull;
import io.reactivex.functions.Function;
import io.reactivex.subjects.PublishSubject;
import io.reactivex.subjects.Subject;
import okhttp3.Headers;
import okhttp3.Interceptor;
import okhttp3.OkHttpClient;
import okhttp3.Response;
import okhttp3.ResponseBody;
import top.srsea.torque.common.IOHelper;
import top.srsea.torque.common.Preconditions;
import top.srsea.torque.common.StringHelper;
/**
* A download task, with observable progress.
*
* @author sea
*/
public class DownloadTask {
/**
* A progress subject to publish and subscribe.
*/
private final Subject<Progress> progress;
/**
* Parent path to save file, default is {@code ${HOME}/Downloads}
*/
private final File savePath;
/**
* Filename of download file, if empty, it will be obtained from response.
*/
private String filename;
/**
* Remote file URL, required.
*/
private final String url;
/**
* OkHttpClient for download, create a normal instance if null.
*/
private final OkHttpClient client;
/**
* Constructs an instance with builder.
*
* @param builder the specific builder.
*/
private DownloadTask(Builder builder) {
progress = PublishSubject.create();
savePath = builder.savePath;
filename = builder.filename;
url = builder.url;
client = builder.client;
}
/**
* Creates a new {@code OkHttpClient} instance with progress observer.
*
* @return new {@code OkHttpClient} instance with progress observer
*/
private OkHttpClient newOkHttpClient() {
return (client == null ? new OkHttpClient.Builder() : client.newBuilder())
.addInterceptor(new Interceptor() {
@Nonnull
@Override
public Response intercept(@Nonnull Chain chain) throws IOException {
Response response = chain.proceed(chain.request());
if (StringHelper.isBlank(filename)) {
filename = obtainFilename(response);
}
return response.newBuilder()
.body(new ProgressResponseBody(response.body(), progress))
.build();
}
}).build();
}
/**
* Obtains a filename from the response.
*
* <p>Obtains a filename from headers preferentially, if failed, obtain a filename from url path.
*
* @param response response to obtain a filename.
* @return filename
*/
private String obtainFilename(Response response) {
String filename = obtainFilename(response.headers());
if (StringHelper.isBlank(filename)) {
filename = obtainFilename(response.request().url().uri());
}
if (StringHelper.isBlank(filename)) {
Log.w("DownloadTask", "Cannot obtain a filename, use UUID instead.");
filename = UUID.randomUUID().toString();
}
return filename;
}
/**
* Obtains a filename from the headers.
*
* <p>Obtains a filename from headers, if failed, returns null.
*
* @param headers headers to obtain a filename.
* @return filename
*/
private String obtainFilename(Headers headers) {
if (headers == null) {
return null;
}
String contentDisposition = headers.get("Content-Disposition");
final String mark = "filename=";
if (contentDisposition == null || !contentDisposition.contains(mark)) {
return null;
}
try {
String encodedFilename = contentDisposition.substring(contentDisposition.indexOf(mark) + mark.length());
String filename = URLDecoder.decode(encodedFilename, "UTF-8").trim();
if (filename.startsWith("\"") && filename.endsWith("\"")) {
filename = filename.substring(1, filename.length() - 1);
}
return filename.trim();
} catch (UnsupportedEncodingException e) {
return null;
}
}
/**
* Obtains a filename from the uri.
*
* <p>Obtains a filename from the uri, if failed, returns null.
*
* @param uri uri to obtain a filename.
* @return filename
*/
private String obtainFilename(URI uri) {
if (uri == null) {
return null;
}
String path = uri.getPath();
if (path == null) {
return null;
}
String filename = path.substring(path.lastIndexOf('/') + 1);
try {
filename = URLDecoder.decode(filename, "UTF-8");
} catch (UnsupportedEncodingException e) {
return filename;
}
return filename;
}
/**
* Observable progress of this download task.
*
* @return observable progress
*/
public Observable<Progress> progress() {
return progress;
}
/**
* Starts this download task.
*
* @return observable file
*/
public Observable<File> start() {
if (StringHelper.isBlank(url)) {
return Observable.error(new IllegalArgumentException("url cannot be blank."));
}
return RetrofitProvider.get(newOkHttpClient())
.create(DownloadService.class)
.download(url)
.map(new Function<ResponseBody, File>() {
@Override
public File apply(@NonNull ResponseBody responseBody) throws Exception {
InputStream stream = responseBody.byteStream();
Preconditions.require(savePath.exists() || savePath.mkdirs(), new IOException("cannot mkdirs."));
File target = new File(savePath, filename);
OutputStream out = new FileOutputStream(target);
IOHelper.transfer(stream, out);
IOHelper.close(stream, out);
return target;
}
});
}
/**
* Builder of {@link DownloadTask}
*
* @see DownloadTask
*/
public static class Builder {
/**
* Parent path to save file.
*/
File savePath;
/**
* Filename of download file.
*/
String filename;
/**
* Remote file URL.
*/
String url;
/**
* OkHttpClient for download.
*/
OkHttpClient client;
/**
* Sets the save path.
*
* @param savePath the specific save path
* @return current builder
*/
public Builder savePath(@Nullable File savePath) {
this.savePath = savePath;
return this;
}
/**
* Sets the filename.
*
* @param filename the specific filename
* @return current builder
*/
public Builder filename(@Nullable String filename) {
this.filename = filename;
return this;
}
/**
* Sets the url.
*
* @param url the specific url
* @return current builder
*/
public Builder url(@Nonnull String url) {
this.url = url;
return this;
}
/**
* Sets the client.
*
* @param client the specific client
* @return current builder
*/
public Builder client(@Nullable OkHttpClient client) {
this.client = client;
return this;
}
/**
* Builds a {@code DownloadTask} with this builder.
*
* @return {@code DownloadTask} instance
*/
public DownloadTask build() {
if (savePath == null) {
savePath = new File(System.getenv("HOME"), "Downloads");
}
return new DownloadTask(this);
}
}
}
|
[
"\"HOME\""
] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
java
| 1 | 0 | |
tools/generate_torch_version.py
|
import argparse
import os
import re
import subprocess
from pathlib import Path
from setuptools import distutils # type: ignore[import]
from typing import Optional, Union
UNKNOWN = "Unknown"
RELEASE_PATTERN = re.compile(r"/v[0-9]+(\.[0-9]+)*(-rc[0-9]+)?/")
def get_sha(pytorch_root: Union[str, Path]) -> str:
try:
return (
subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=pytorch_root)
.decode("ascii")
.strip()
)
except Exception:
return UNKNOWN
def get_tag(pytorch_root: Union[str, Path]) -> str:
try:
tag = (
subprocess.check_output(
["git", "describe", "--tags", "--exact"], cwd=pytorch_root
)
.decode("ascii")
.strip()
)
if RELEASE_PATTERN.match(tag):
return tag
else:
return UNKNOWN
except Exception:
return UNKNOWN
def get_torch_version(sha: Optional[str] = None) -> str:
pytorch_root = Path(__file__).parent.parent
version = open(pytorch_root / "version.txt", "r").read().strip()
if os.getenv("PYTORCH_BUILD_VERSION"):
assert os.getenv("PYTORCH_BUILD_NUMBER") is not None
build_number = int(os.getenv("PYTORCH_BUILD_NUMBER", ""))
version = os.getenv("PYTORCH_BUILD_VERSION", "")
if build_number > 1:
version += ".post" + str(build_number)
elif sha != UNKNOWN:
if sha is None:
sha = get_sha(pytorch_root)
version += "+git" + sha[:7]
return version
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate torch/version.py from build and environment metadata."
)
parser.add_argument(
"--is_debug",
type=distutils.util.strtobool,
help="Whether this build is debug mode or not.",
)
parser.add_argument("--cuda_version", type=str)
parser.add_argument("--hip_version", type=str)
args = parser.parse_args()
assert args.is_debug is not None
args.cuda_version = None if args.cuda_version == "" else args.cuda_version
args.hip_version = None if args.hip_version == "" else args.hip_version
pytorch_root = Path(__file__).parent.parent
version_path = pytorch_root / "torch" / "version.py"
# Attempt to get tag first, fall back to sha if a tag was not found
tagged_version = get_tag(pytorch_root)
sha = get_sha(pytorch_root)
if tagged_version == UNKNOWN:
version = get_torch_version(sha)
else:
version = tagged_version
with open(version_path, "w") as f:
f.write("__version__ = '{}'\n".format(version))
# NB: This is not 100% accurate, because you could have built the
# library code with DEBUG, but csrc without DEBUG (in which case
# this would claim to be a release build when it's not.)
f.write("debug = {}\n".format(repr(bool(args.is_debug))))
f.write("cuda = {}\n".format(repr(args.cuda_version)))
f.write("git_version = {}\n".format(repr(sha)))
f.write("hip = {}\n".format(repr(args.hip_version)))
|
[] |
[] |
[
"PYTORCH_BUILD_VERSION",
"PYTORCH_BUILD_NUMBER"
] |
[]
|
["PYTORCH_BUILD_VERSION", "PYTORCH_BUILD_NUMBER"]
|
python
| 2 | 0 | |
bsp/stm32/stm32f103-fire-arbitrary/rtconfig.py
|
import os
# toolchains options
ARCH='arm'
CPU='cortex-m3'
CROSS_TOOL='gcc'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if os.getenv('RTT_ROOT'):
RTT_ROOT = os.getenv('RTT_ROOT')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'C:\Users\XXYYZZ'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = r'C:/Keil_v5'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iar'
EXEC_PATH = r'C:/Program Files (x86)/IAR Systems/Embedded Workbench 8.0'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m3 -mthumb -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' -std=c99 -Dgcc'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rt-thread.map,-cref,-u,Reset_Handler -T board/linker_scripts/link.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -g'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu Cortex-M3 '
CFLAGS = '-c ' + DEVICE + ' --apcs=interwork --c99'
AFLAGS = DEVICE + ' --apcs=interwork '
LFLAGS = DEVICE + ' --scatter "board\linker_scripts\link.sct" --info sizes --info totals --info unused --info veneers --list rt-thread.map --strict'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/ARMCC/include'
LFLAGS += ' --libpath=' + EXEC_PATH + '/ARM/ARMCC/lib'
CFLAGS += ' -D__MICROLIB '
AFLAGS += ' --pd "__MICROLIB SETA 1" '
LFLAGS += ' --library_type=microlib '
EXEC_PATH += '/ARM/ARMCC/bin/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = '-Dewarm'
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M3'
CFLAGS += ' -e'
CFLAGS += ' --fpu=None'
CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' --silent'
AFLAGS = DEVICE
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M3'
AFLAGS += ' --fpu None'
AFLAGS += ' -S'
if BUILD == 'debug':
CFLAGS += ' --debug'
CFLAGS += ' -On'
else:
CFLAGS += ' -Oh'
LFLAGS = ' --config "board/linker_scripts/link.icf"'
LFLAGS += ' --entry __iar_program_start'
EXEC_PATH = EXEC_PATH + '/arm/bin/'
POST_ACTION = 'ielftool --bin $TARGET rtthread.bin'
|
[] |
[] |
[
"RTT_CC",
"RTT_ROOT",
"RTT_EXEC_PATH"
] |
[]
|
["RTT_CC", "RTT_ROOT", "RTT_EXEC_PATH"]
|
python
| 3 | 0 | |
python/pyspark/ml/recommendation.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pyspark import since
from pyspark.ml.util import keyword_only
from pyspark.ml.wrapper import JavaEstimator, JavaModel
from pyspark.ml.param.shared import *
from pyspark.mllib.common import inherit_doc
__all__ = ['ALS', 'ALSModel']
@inherit_doc
class ALS(JavaEstimator, HasCheckpointInterval, HasMaxIter, HasPredictionCol, HasRegParam, HasSeed):
"""
Alternating Least Squares (ALS) matrix factorization.
ALS attempts to estimate the ratings matrix `R` as the product of
two lower-rank matrices, `X` and `Y`, i.e. `X * Yt = R`. Typically
these approximations are called 'factor' matrices. The general
approach is iterative. During each iteration, one of the factor
matrices is held constant, while the other is solved for using least
squares. The newly-solved factor matrix is then held constant while
solving for the other factor matrix.
This is a blocked implementation of the ALS factorization algorithm
that groups the two sets of factors (referred to as "users" and
"products") into blocks and reduces communication by only sending
one copy of each user vector to each product block on each
iteration, and only for the product blocks that need that user's
feature vector. This is achieved by pre-computing some information
about the ratings matrix to determine the "out-links" of each user
(which blocks of products it will contribute to) and "in-link"
information for each product (which of the feature vectors it
receives from each user block it will depend on). This allows us to
send only an array of feature vectors between each user block and
product block, and have the product block find the users' ratings
and update the products based on these messages.
For implicit preference data, the algorithm used is based on
"Collaborative Filtering for Implicit Feedback Datasets", available
at `http://dx.doi.org/10.1109/ICDM.2008.22`, adapted for the blocked
approach used here.
Essentially instead of finding the low-rank approximations to the
rating matrix `R`, this finds the approximations for a preference
matrix `P` where the elements of `P` are 1 if r > 0 and 0 if r <= 0.
The ratings then act as 'confidence' values related to strength of
indicated user preferences rather than explicit ratings given to
items.
>>> df = sqlContext.createDataFrame(
... [(0, 0, 4.0), (0, 1, 2.0), (1, 1, 3.0), (1, 2, 4.0), (2, 1, 1.0), (2, 2, 5.0)],
... ["user", "item", "rating"])
>>> als = ALS(rank=10, maxIter=5)
>>> model = als.fit(df)
>>> model.rank
10
>>> model.userFactors.orderBy("id").collect()
[Row(id=0, features=[...]), Row(id=1, ...), Row(id=2, ...)]
>>> test = sqlContext.createDataFrame([(0, 2), (1, 0), (2, 0)], ["user", "item"])
>>> predictions = sorted(model.transform(test).collect(), key=lambda r: r[0])
>>> predictions[0]
Row(user=0, item=2, prediction=-0.13807615637779236)
>>> predictions[1]
Row(user=1, item=0, prediction=2.6258413791656494)
>>> predictions[2]
Row(user=2, item=0, prediction=-1.5018409490585327)
.. versionadded:: 1.4.0
"""
rank = Param(Params._dummy(), "rank", "rank of the factorization")
numUserBlocks = Param(Params._dummy(), "numUserBlocks", "number of user blocks")
numItemBlocks = Param(Params._dummy(), "numItemBlocks", "number of item blocks")
implicitPrefs = Param(Params._dummy(), "implicitPrefs", "whether to use implicit preference")
alpha = Param(Params._dummy(), "alpha", "alpha for implicit preference")
userCol = Param(Params._dummy(), "userCol", "column name for user ids")
itemCol = Param(Params._dummy(), "itemCol", "column name for item ids")
ratingCol = Param(Params._dummy(), "ratingCol", "column name for ratings")
nonnegative = Param(Params._dummy(), "nonnegative",
"whether to use nonnegative constraint for least squares")
@keyword_only
def __init__(self, rank=10, maxIter=10, regParam=0.1, numUserBlocks=10, numItemBlocks=10,
implicitPrefs=False, alpha=1.0, userCol="user", itemCol="item", seed=None,
ratingCol="rating", nonnegative=False, checkpointInterval=10):
"""
__init__(self, rank=10, maxIter=10, regParam=0.1, numUserBlocks=10, numItemBlocks=10, \
implicitPrefs=false, alpha=1.0, userCol="user", itemCol="item", seed=None, \
ratingCol="rating", nonnegative=false, checkpointInterval=10)
"""
super(ALS, self).__init__()
self._java_obj = self._new_java_obj("org.apache.spark.ml.recommendation.ALS", self.uid)
self._setDefault(rank=10, maxIter=10, regParam=0.1, numUserBlocks=10, numItemBlocks=10,
implicitPrefs=False, alpha=1.0, userCol="user", itemCol="item", seed=None,
ratingCol="rating", nonnegative=False, checkpointInterval=10)
kwargs = self.__init__._input_kwargs
self.setParams(**kwargs)
@keyword_only
@since("1.4.0")
def setParams(self, rank=10, maxIter=10, regParam=0.1, numUserBlocks=10, numItemBlocks=10,
implicitPrefs=False, alpha=1.0, userCol="user", itemCol="item", seed=None,
ratingCol="rating", nonnegative=False, checkpointInterval=10):
"""
setParams(self, rank=10, maxIter=10, regParam=0.1, numUserBlocks=10, numItemBlocks=10, \
implicitPrefs=False, alpha=1.0, userCol="user", itemCol="item", seed=None, \
ratingCol="rating", nonnegative=False, checkpointInterval=10)
Sets params for ALS.
"""
kwargs = self.setParams._input_kwargs
return self._set(**kwargs)
def _create_model(self, java_model):
return ALSModel(java_model)
@since("1.4.0")
def setRank(self, value):
"""
Sets the value of :py:attr:`rank`.
"""
self._paramMap[self.rank] = value
return self
@since("1.4.0")
def getRank(self):
"""
Gets the value of rank or its default value.
"""
return self.getOrDefault(self.rank)
@since("1.4.0")
def setNumUserBlocks(self, value):
"""
Sets the value of :py:attr:`numUserBlocks`.
"""
self._paramMap[self.numUserBlocks] = value
return self
@since("1.4.0")
def getNumUserBlocks(self):
"""
Gets the value of numUserBlocks or its default value.
"""
return self.getOrDefault(self.numUserBlocks)
@since("1.4.0")
def setNumItemBlocks(self, value):
"""
Sets the value of :py:attr:`numItemBlocks`.
"""
self._paramMap[self.numItemBlocks] = value
return self
@since("1.4.0")
def getNumItemBlocks(self):
"""
Gets the value of numItemBlocks or its default value.
"""
return self.getOrDefault(self.numItemBlocks)
@since("1.4.0")
def setNumBlocks(self, value):
"""
Sets both :py:attr:`numUserBlocks` and :py:attr:`numItemBlocks` to the specific value.
"""
self._paramMap[self.numUserBlocks] = value
self._paramMap[self.numItemBlocks] = value
@since("1.4.0")
def setImplicitPrefs(self, value):
"""
Sets the value of :py:attr:`implicitPrefs`.
"""
self._paramMap[self.implicitPrefs] = value
return self
@since("1.4.0")
def getImplicitPrefs(self):
"""
Gets the value of implicitPrefs or its default value.
"""
return self.getOrDefault(self.implicitPrefs)
@since("1.4.0")
def setAlpha(self, value):
"""
Sets the value of :py:attr:`alpha`.
"""
self._paramMap[self.alpha] = value
return self
@since("1.4.0")
def getAlpha(self):
"""
Gets the value of alpha or its default value.
"""
return self.getOrDefault(self.alpha)
@since("1.4.0")
def setUserCol(self, value):
"""
Sets the value of :py:attr:`userCol`.
"""
self._paramMap[self.userCol] = value
return self
@since("1.4.0")
def getUserCol(self):
"""
Gets the value of userCol or its default value.
"""
return self.getOrDefault(self.userCol)
@since("1.4.0")
def setItemCol(self, value):
"""
Sets the value of :py:attr:`itemCol`.
"""
self._paramMap[self.itemCol] = value
return self
@since("1.4.0")
def getItemCol(self):
"""
Gets the value of itemCol or its default value.
"""
return self.getOrDefault(self.itemCol)
@since("1.4.0")
def setRatingCol(self, value):
"""
Sets the value of :py:attr:`ratingCol`.
"""
self._paramMap[self.ratingCol] = value
return self
@since("1.4.0")
def getRatingCol(self):
"""
Gets the value of ratingCol or its default value.
"""
return self.getOrDefault(self.ratingCol)
@since("1.4.0")
def setNonnegative(self, value):
"""
Sets the value of :py:attr:`nonnegative`.
"""
self._paramMap[self.nonnegative] = value
return self
@since("1.4.0")
def getNonnegative(self):
"""
Gets the value of nonnegative or its default value.
"""
return self.getOrDefault(self.nonnegative)
class ALSModel(JavaModel):
"""
Model fitted by ALS.
.. versionadded:: 1.4.0
"""
@property
@since("1.4.0")
def rank(self):
"""rank of the matrix factorization model"""
return self._call_java("rank")
@property
@since("1.4.0")
def userFactors(self):
"""
a DataFrame that stores user factors in two columns: `id` and
`features`
"""
return self._call_java("userFactors")
@property
@since("1.4.0")
def itemFactors(self):
"""
a DataFrame that stores item factors in two columns: `id` and
`features`
"""
return self._call_java("itemFactors")
if __name__ == "__main__":
import doctest
from pyspark.context import SparkContext
from pyspark.sql import SQLContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
sc = SparkContext("local[2]", "ml.recommendation tests")
sqlContext = SQLContext(sc)
globs['sc'] = sc
globs['sqlContext'] = sqlContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
sc.stop()
if failure_count:
exit(-1)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
grow/common/patched_site.py
|
# Copy of stdlib's site.py. See explanation in extensions.py.
"""Append module search paths for third-party packages to sys.path.
****************************************************************
* This module is automatically imported during initialization. *
****************************************************************
In earlier versions of Python (up to 1.5a3), scripts or modules that
needed to use site-specific modules would place ``import site''
somewhere near the top of their code. Because of the automatic
import, this is no longer necessary (but code that does it still
works).
This will append site-specific paths to the module search path. On
Unix (including Mac OSX), it starts with sys.prefix and
sys.exec_prefix (if different) and appends
lib/python<version>/site-packages as well as lib/site-python.
On other platforms (such as Windows), it tries each of the
prefixes directly, as well as with lib/site-packages appended. The
resulting directories, if they exist, are appended to sys.path, and
also inspected for path configuration files.
A path configuration file is a file whose name has the form
<package>.pth; its contents are additional directories (one per line)
to be added to sys.path. Non-existing directories (or
non-directories) are never added to sys.path; no directory is added to
sys.path more than once. Blank lines and lines beginning with
'#' are skipped. Lines starting with 'import' are executed.
For example, suppose sys.prefix and sys.exec_prefix are set to
/usr/local and there is a directory /usr/local/lib/python2.5/site-packages
with three subdirectories, foo, bar and spam, and two path
configuration files, foo.pth and bar.pth. Assume foo.pth contains the
following:
# foo package configuration
foo
bar
bletch
and bar.pth contains:
# bar package configuration
bar
Then the following directories are added to sys.path, in this order:
/usr/local/lib/python2.5/site-packages/bar
/usr/local/lib/python2.5/site-packages/foo
Note that bletch is omitted because it doesn't exist; bar precedes foo
because bar.pth comes alphabetically before foo.pth; and spam is
omitted because it is not mentioned in either path configuration file.
After these path manipulations, an attempt is made to import a module
named sitecustomize, which can perform arbitrary additional
site-specific customizations. If this import fails with an
ImportError exception, it is silently ignored.
"""
import sys
import os
import __builtin__
import traceback
# Prefixes for site-packages; add additional prefixes like /usr/local here
PREFIXES = [sys.prefix, sys.exec_prefix]
# Enable per user site-packages directory
# set it to False to disable the feature or True to force the feature
ENABLE_USER_SITE = None
# for distutils.commands.install
# These values are initialized by the getuserbase() and getusersitepackages()
# functions, through the main() function when Python starts.
USER_SITE = None
USER_BASE = None
def makepath(*paths):
dir = os.path.join(*paths)
try:
dir = os.path.abspath(dir)
except OSError:
pass
return dir, os.path.normcase(dir)
def abs__file__():
"""Set all module' __file__ attribute to an absolute path"""
for m in sys.modules.values():
if hasattr(m, '__loader__'):
continue # don't mess with a PEP 302-supplied __file__
try:
m.__file__ = os.path.abspath(m.__file__)
except (AttributeError, OSError):
pass
def removeduppaths():
""" Remove duplicate entries from sys.path along with making them
absolute"""
# This ensures that the initial path provided by the interpreter contains
# only absolute pathnames, even if we're running from the build directory.
L = []
known_paths = set()
for dir in sys.path:
# Filter out duplicate paths (on case-insensitive file systems also
# if they only differ in case); turn relative paths into absolute
# paths.
dir, dircase = makepath(dir)
if not dircase in known_paths:
L.append(dir)
known_paths.add(dircase)
sys.path[:] = L
return known_paths
def _init_pathinfo():
"""Return a set containing all existing directory entries from sys.path"""
d = set()
for dir in sys.path:
try:
if os.path.isdir(dir):
dir, dircase = makepath(dir)
d.add(dircase)
except TypeError:
continue
return d
def addpackage(sitedir, name, known_paths):
"""Process a .pth file within the site-packages directory:
For each line in the file, either combine it with sitedir to a path
and add that to known_paths, or execute it if it starts with 'import '.
"""
if known_paths is None:
_init_pathinfo()
reset = 1
else:
reset = 0
fullname = os.path.join(sitedir, name)
try:
f = open(fullname, "rU")
except IOError:
return
with f:
for n, line in enumerate(f):
if line.startswith("#"):
continue
try:
if line.startswith(("import ", "import\t")):
exec line
continue
line = line.rstrip()
dir, dircase = makepath(sitedir, line)
if not dircase in known_paths and os.path.exists(dir):
sys.path.append(dir)
known_paths.add(dircase)
except Exception as err:
print >>sys.stderr, "Error processing line {:d} of {}:\n".format(
n+1, fullname)
for record in traceback.format_exception(*sys.exc_info()):
for line in record.splitlines():
print >>sys.stderr, ' '+line
print >>sys.stderr, "\nRemainder of file ignored"
break
if reset:
known_paths = None
return known_paths
def addsitedir(sitedir, known_paths=None):
"""Add 'sitedir' argument to sys.path if missing and handle .pth files in
'sitedir'"""
if known_paths is None:
known_paths = _init_pathinfo()
reset = 1
else:
reset = 0
sitedir, sitedircase = makepath(sitedir)
if not sitedircase in known_paths:
sys.path.append(sitedir) # Add path component
try:
names = os.listdir(sitedir)
except os.error:
return
dotpth = os.extsep + "pth"
names = [name for name in names if name.endswith(dotpth)]
for name in sorted(names):
addpackage(sitedir, name, known_paths)
if reset:
known_paths = None
return known_paths
def check_enableusersite():
"""Check if user site directory is safe for inclusion
The function tests for the command line flag (including environment var),
process uid/gid equal to effective uid/gid.
None: Disabled for security reasons
False: Disabled by user (command line option)
True: Safe and enabled
"""
if sys.flags.no_user_site:
return False
if hasattr(os, "getuid") and hasattr(os, "geteuid"):
# check process uid == effective uid
if os.geteuid() != os.getuid():
return None
if hasattr(os, "getgid") and hasattr(os, "getegid"):
# check process gid == effective gid
if os.getegid() != os.getgid():
return None
return True
def getuserbase():
"""Returns the `user base` directory path.
The `user base` directory can be used to store data. If the global
variable ``USER_BASE`` is not initialized yet, this function will also set
it.
"""
global USER_BASE
if USER_BASE is not None:
return USER_BASE
from sysconfig import get_config_var
USER_BASE = get_config_var('userbase')
return USER_BASE
def getusersitepackages():
"""Returns the user-specific site-packages directory path.
If the global variable ``USER_SITE`` is not initialized yet, this
function will also set it.
"""
global USER_SITE
user_base = getuserbase() # this will also set USER_BASE
if USER_SITE is not None:
return USER_SITE
from sysconfig import get_path
import os
if sys.platform == 'darwin':
from sysconfig import get_config_var
if get_config_var('PYTHONFRAMEWORK'):
USER_SITE = get_path('purelib', 'osx_framework_user')
return USER_SITE
USER_SITE = get_path('purelib', '%s_user' % os.name)
return USER_SITE
def addusersitepackages(known_paths):
"""Add a per user site-package to sys.path
Each user has its own python directory with site-packages in the
home directory.
"""
# get the per user site-package path
# this call will also make sure USER_BASE and USER_SITE are set
user_site = getusersitepackages()
if ENABLE_USER_SITE and os.path.isdir(user_site):
addsitedir(user_site, known_paths)
return known_paths
def getsitepackages():
"""Returns a list containing all global site-packages directories
(and possibly site-python).
For each directory present in the global ``PREFIXES``, this function
will find its `site-packages` subdirectory depending on the system
environment, and will return a list of full paths.
"""
sitepackages = []
seen = set()
for prefix in PREFIXES:
if not prefix or prefix in seen:
continue
seen.add(prefix)
if sys.platform in ('os2emx', 'riscos'):
sitepackages.append(os.path.join(prefix, "Lib", "site-packages"))
elif os.sep == '/':
sitepackages.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"site-packages"))
sitepackages.append(os.path.join(prefix, "lib", "site-python"))
else:
sitepackages.append(prefix)
sitepackages.append(os.path.join(prefix, "lib", "site-packages"))
if sys.platform == "darwin":
# for framework builds *only* we add the standard Apple
# locations.
from sysconfig import get_config_var
framework = get_config_var("PYTHONFRAMEWORK")
if framework:
sitepackages.append(
os.path.join("/Library", framework,
sys.version[:3], "site-packages"))
return sitepackages
def addsitepackages(known_paths):
"""Add site-packages (and possibly site-python) to sys.path"""
for sitedir in getsitepackages():
if os.path.isdir(sitedir):
addsitedir(sitedir, known_paths)
return known_paths
def setBEGINLIBPATH():
"""The OS/2 EMX port has optional extension modules that do double duty
as DLLs (and must use the .DLL file extension) for other extensions.
The library search path needs to be amended so these will be found
during module import. Use BEGINLIBPATH so that these are at the start
of the library search path.
"""
dllpath = os.path.join(sys.prefix, "Lib", "lib-dynload")
libpath = os.environ['BEGINLIBPATH'].split(';')
if libpath[-1]:
libpath.append(dllpath)
else:
libpath[-1] = dllpath
os.environ['BEGINLIBPATH'] = ';'.join(libpath)
def setquit():
"""Define new builtins 'quit' and 'exit'.
These are objects which make the interpreter exit when called.
The repr of each object contains a hint at how it works.
"""
if os.sep == ':':
eof = 'Cmd-Q'
elif os.sep == '\\':
eof = 'Ctrl-Z plus Return'
else:
eof = 'Ctrl-D (i.e. EOF)'
class Quitter(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return 'Use %s() or %s to exit' % (self.name, eof)
def __call__(self, code=None):
# Shells like IDLE catch the SystemExit, but listen when their
# stdin wrapper is closed.
try:
sys.stdin.close()
except:
pass
raise SystemExit(code)
__builtin__.quit = Quitter('quit')
__builtin__.exit = Quitter('exit')
class _Printer(object):
"""interactive prompt objects for printing the license text, a list of
contributors and the copyright notice."""
MAXLINES = 23
def __init__(self, name, data, files=(), dirs=()):
self.__name = name
self.__data = data
self.__files = files
self.__dirs = dirs
self.__lines = None
def __setup(self):
if self.__lines:
return
data = None
for dir in self.__dirs:
for filename in self.__files:
filename = os.path.join(dir, filename)
try:
fp = file(filename, "rU")
data = fp.read()
fp.close()
break
except IOError:
pass
if data:
break
if not data:
data = self.__data
self.__lines = data.split('\n')
self.__linecnt = len(self.__lines)
def __repr__(self):
self.__setup()
if len(self.__lines) <= self.MAXLINES:
return "\n".join(self.__lines)
else:
return "Type %s() to see the full %s text" % ((self.__name,)*2)
def __call__(self):
self.__setup()
prompt = 'Hit Return for more, or q (and Return) to quit: '
lineno = 0
while 1:
try:
for i in range(lineno, lineno + self.MAXLINES):
print self.__lines[i]
except IndexError:
break
else:
lineno += self.MAXLINES
key = None
while key is None:
key = raw_input(prompt)
if key not in ('', 'q'):
key = None
if key == 'q':
break
def setcopyright():
"""Set 'copyright' and 'credits' in __builtin__"""
__builtin__.copyright = _Printer("copyright", sys.copyright)
if sys.platform[:4] == 'java':
__builtin__.credits = _Printer(
"credits",
"Jython is maintained by the Jython developers (www.jython.org).")
else:
__builtin__.credits = _Printer("credits", """\
Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands
for supporting Python development. See www.python.org for more information.""")
here = os.path.dirname(os.__file__)
__builtin__.license = _Printer(
"license", "See https://www.python.org/psf/license/",
["LICENSE.txt", "LICENSE"],
[os.path.join(here, os.pardir), here, os.curdir])
class _Helper(object):
"""Define the builtin 'help'.
This is a wrapper around pydoc.help (with a twist).
"""
def __repr__(self):
return "Type help() for interactive help, " \
"or help(object) for help about object."
def __call__(self, *args, **kwds):
import pydoc
return pydoc.help(*args, **kwds)
def sethelper():
__builtin__.help = _Helper()
def aliasmbcs():
"""On Windows, some default encodings are not provided by Python,
while they are always available as "mbcs" in each locale. Make
them usable by aliasing to "mbcs" in such a case."""
if sys.platform == 'win32':
import locale, codecs
enc = locale.getdefaultlocale()[1]
if enc.startswith('cp'): # "cp***" ?
try:
codecs.lookup(enc)
except LookupError:
import encodings
encodings._cache[enc] = encodings._unknown
encodings.aliases.aliases[enc] = 'mbcs'
def setencoding():
"""Set the string encoding used by the Unicode implementation. The
default is 'ascii', but if you're willing to experiment, you can
change this."""
encoding = "ascii" # Default value set by _PyUnicode_Init()
if 0:
# Enable to support locale aware default string encodings.
import locale
loc = locale.getdefaultlocale()
if loc[1]:
encoding = loc[1]
if 0:
# Enable to switch off string to Unicode coercion and implicit
# Unicode to string conversion.
encoding = "undefined"
if encoding != "ascii":
# On Non-Unicode builds this will raise an AttributeError...
sys.setdefaultencoding(encoding) # Needs Python Unicode build !
def execsitecustomize():
"""Run custom site specific code, if available."""
try:
import sitecustomize
except ImportError:
pass
except Exception:
if sys.flags.verbose:
sys.excepthook(*sys.exc_info())
else:
print >>sys.stderr, \
"'import sitecustomize' failed; use -v for traceback"
def execusercustomize():
"""Run custom user specific code, if available."""
try:
import usercustomize
except ImportError:
pass
except Exception:
if sys.flags.verbose:
sys.excepthook(*sys.exc_info())
else:
print>>sys.stderr, \
"'import usercustomize' failed; use -v for traceback"
def main():
global ENABLE_USER_SITE
# NOTE: Removed for Grow. See explanation in extensions.py.
# abs__file__()
known_paths = removeduppaths()
if ENABLE_USER_SITE is None:
ENABLE_USER_SITE = check_enableusersite()
known_paths = addusersitepackages(known_paths)
known_paths = addsitepackages(known_paths)
if sys.platform == 'os2emx':
setBEGINLIBPATH()
setquit()
setcopyright()
sethelper()
aliasmbcs()
setencoding()
execsitecustomize()
if ENABLE_USER_SITE:
execusercustomize()
# Remove sys.setdefaultencoding() so that users cannot change the
# encoding after initialization. The test for presence is needed when
# this module is run as a script, because this code is executed twice.
if hasattr(sys, "setdefaultencoding"):
del sys.setdefaultencoding
main()
def _script():
help = """\
%s [--user-base] [--user-site]
Without arguments print some useful information
With arguments print the value of USER_BASE and/or USER_SITE separated
by '%s'.
Exit codes with --user-base or --user-site:
0 - user site directory is enabled
1 - user site directory is disabled by user
2 - uses site directory is disabled by super user
or for security reasons
>2 - unknown error
"""
args = sys.argv[1:]
if not args:
print "sys.path = ["
for dir in sys.path:
print " %r," % (dir,)
print "]"
print "USER_BASE: %r (%s)" % (USER_BASE,
"exists" if os.path.isdir(USER_BASE) else "doesn't exist")
print "USER_SITE: %r (%s)" % (USER_SITE,
"exists" if os.path.isdir(USER_SITE) else "doesn't exist")
print "ENABLE_USER_SITE: %r" % ENABLE_USER_SITE
sys.exit(0)
buffer = []
if '--user-base' in args:
buffer.append(USER_BASE)
if '--user-site' in args:
buffer.append(USER_SITE)
if buffer:
print os.pathsep.join(buffer)
if ENABLE_USER_SITE:
sys.exit(0)
elif ENABLE_USER_SITE is False:
sys.exit(1)
elif ENABLE_USER_SITE is None:
sys.exit(2)
else:
sys.exit(3)
else:
import textwrap
print textwrap.dedent(help % (sys.argv[0], os.pathsep))
sys.exit(10)
if __name__ == '__main__':
_script()
|
[] |
[] |
[
"BEGINLIBPATH"
] |
[]
|
["BEGINLIBPATH"]
|
python
| 1 | 0 | |
mahotas/io/freeimage.py
|
import sys
import os
import ctypes
import ctypes.util
import numpy as np
_API = {
'FreeImage_AllocateT': (
ctypes.c_void_p,
[ctypes.c_int, # type
ctypes.c_int, # width
ctypes.c_int, # height
ctypes.c_int, # bpp
ctypes.c_uint, # red_mask
ctypes.c_uint, # green_mask
ctypes.c_uint]), # blue_mask
'FreeImage_Save': (
ctypes.c_int,
[ctypes.c_int, # type
ctypes.c_void_p, # bitmap
ctypes.c_char_p, # filename
ctypes.c_int]), # flags
'FreeImage_SetOutputMessage': (
None,
[ctypes.c_void_p]), # callback
'FreeImage_ConvertToGreyscale': (
ctypes.c_void_p, # FIBITMAP * new_bitmap
[ctypes.c_void_p]), # FIBITMAP* bitmap
'FreeImage_GetFIFFromFilename': (
ctypes.c_int, # FREE_IMAGE_FORMAT
[ctypes.c_char_p]), # const char* filename
'FreeImage_IsLittleEndian': (
ctypes.c_int, # BOOL
[]),
'FreeImage_FIFSupportsExportBPP': (
ctypes.c_int, # BOOL
[ctypes.c_int, # FREE_IMAGE_FORMAT format
ctypes.c_int]), # int bpp
'FreeImage_FIFSupportsExportType': (
ctypes.c_int, # BOOL
[ctypes.c_int, # FREE_IMAGE_FORMAT fif
ctypes.c_int]), # FREE_IMAGE_TYPE type
'FreeImage_Load': (
ctypes.c_void_p,
[ctypes.c_int, ctypes.c_char_p, ctypes.c_int]),
'FreeImage_Unload': (
None,
[ctypes.c_void_p]),
'FreeImage_GetWidth': (
ctypes.c_uint,
[ctypes.c_void_p]),
'FreeImage_GetHeight': (
ctypes.c_uint,
[ctypes.c_void_p]),
'FreeImage_GetImageType': (
ctypes.c_uint,
[ctypes.c_void_p]),
'FreeImage_GetFileTypeFromMemory': (
ctypes.c_int,
[ctypes.c_void_p, ctypes.c_int]),
'FreeImage_GetFileType': (
ctypes.c_int,
[ctypes.c_char_p, ctypes.c_int]),
'FreeImage_GetBPP': (
ctypes.c_uint,
[ctypes.c_void_p]),
'FreeImage_GetPitch': (
ctypes.c_uint,
[ctypes.c_void_p]),
'FreeImage_OpenMultiBitmap': (
ctypes.c_void_p, # FIMULTIBITMAP*
[ctypes.c_int, # FREE_IMAGE_FORMAT format
ctypes.c_char_p, # filename
ctypes.c_int, # BOOL create_new
ctypes.c_int, # BOOL read_only
ctypes.c_int, # BOOL keep_cache_in_memory
ctypes.c_int]), # int flags
'FreeImage_GetPageCount': (
ctypes.c_int,
[ctypes.c_void_p]),
'FreeImage_AppendPage': (
None,
[ctypes.c_void_p, # FIMULTIBITMAP*
ctypes.c_void_p]), # BITMAP
'FreeImage_LockPage': (
ctypes.c_void_p, # FIBITMAP*
[ctypes.c_void_p, # FIMULTIBITMAP
ctypes.c_int]), # int page
'FreeImage_UnlockPage': (
None,
[ctypes.c_void_p, # FIMULTIBITMAP*
ctypes.c_void_p, # FIBITMAP* data
ctypes.c_int]), # BOOL changed
'FreeImage_CloseMultiBitmap': (
ctypes.c_int, # BOOL
[ctypes.c_void_p, # FIMULTIBITMAP* bitmap
ctypes.c_int]), # int flags
'FreeImage_GetBits': (
ctypes.c_void_p,
[ctypes.c_void_p]),
'FreeImage_OpenMemory': (
ctypes.c_void_p,
[ctypes.c_void_p, ctypes.c_uint32]),
'FreeImage_AcquireMemory': (
ctypes.c_int,
[ctypes.c_void_p, ctypes.POINTER(ctypes.c_void_p),
ctypes.POINTER(ctypes.c_int)]),
'FreeImage_CloseMemory': (
None,
[ctypes.c_void_p]),
'FreeImage_LoadFromMemory': (
ctypes.c_void_p,
[ctypes.c_int, ctypes.c_void_p, ctypes.c_int]),
'FreeImage_SaveToMemory': (
ctypes.c_int,
[ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_int]),
}
class _ctypes_wrapper(object):
pass
# Albert's ctypes pattern
def _register_api(lib, api):
nlib = _ctypes_wrapper()
for f, (restype, argtypes) in api.items():
try:
func = getattr(lib, f)
func.restype = restype
func.argtypes = argtypes
setattr(nlib, f, func)
except Exception:
def error_raise(*args):
raise RuntimeError(
'mahotas.freeimage: Function `%s` not found in your'
' version of FreeImage. It might be an older version' % f)
setattr(nlib, f, error_raise)
return nlib
if sys.platform == 'win32':
def _load_library(dllname, loadfunction, dllpaths=('', )):
"""Load a DLL via ctypes load function. Return None on failure.
Try loading the DLL from the current package directory first,
then from the Windows DLL search path.
"""
try:
dllpaths = (os.path.abspath(os.path.dirname(__file__)),
) + dllpaths
except NameError:
pass # no __file__ attribute on PyPy and some frozen distributions
for path in dllpaths:
if path:
# temporarily add the path to the PATH environment variable
# so Windows can find additional DLL dependencies.
try:
oldenv = os.environ['PATH']
os.environ['PATH'] = path + ';' + oldenv
except KeyError:
oldenv = None
try:
return loadfunction(os.path.join(path, dllname))
except (WindowsError, OSError):
pass
finally:
if path and oldenv is not None:
os.environ['PATH'] = oldenv
return None
_FI = _load_library('FreeImage.dll', ctypes.windll.LoadLibrary)
if not _FI:
raise OSError("mahotas.freeimage: could not find FreeImage.dll")
else:
libname = ctypes.util.find_library('freeimage')
if libname:
_FI = ctypes.CDLL(libname)
else:
_FI = None
_lib_dirs = os.environ.get('LD_LIBRARY_PATH', '').split(':')
_lib_dirs = [_f for _f in _lib_dirs if _f]
_lib_dirs.extend([
os.path.dirname(__file__),
'/lib',
'/usr/lib',
'/usr/local/lib',
'/opt/local/lib',
])
_possible_filenames = (
'libfreeimage',
'libFreeImage',
)
for d in _lib_dirs:
for libname in _possible_filenames:
try:
_FI = np.ctypeslib.load_library(libname, d)
except OSError:
pass
else:
break
if _FI is not None:
break
if not _FI:
raise OSError(
'mahotas.freeimage: could not find libFreeImage in any of the'
'following directories: \'%s\'' % '\', \''.join(_lib_dirs))
_FI = _register_api(_FI, _API)
if sys.platform == 'win32':
_functype = ctypes.WINFUNCTYPE
else:
_functype = ctypes.CFUNCTYPE
@_functype(None, ctypes.c_int, ctypes.c_char_p)
def _error_handler(fif, message):
raise RuntimeError('mahotas.freeimage: FreeImage error: %s' % message)
_FI.FreeImage_SetOutputMessage(_error_handler)
class FI_TYPES(object):
FIT_UNKNOWN = 0
FIT_BITMAP = 1
FIT_UINT16 = 2
FIT_INT16 = 3
FIT_UINT32 = 4
FIT_INT32 = 5
FIT_FLOAT = 6
FIT_DOUBLE = 7
FIT_COMPLEX = 8
FIT_RGB16 = 9
FIT_RGBA16 = 10
FIT_RGBF = 11
FIT_RGBAF = 12
dtypes = {
FIT_BITMAP: np.uint8,
FIT_UINT16: np.uint16,
FIT_INT16: np.int16,
FIT_UINT32: np.uint32,
FIT_INT32: np.int32,
FIT_FLOAT: np.float32,
FIT_DOUBLE: np.float64,
FIT_COMPLEX: np.complex128,
FIT_RGB16: np.uint16,
FIT_RGBA16: np.uint16,
FIT_RGBF: np.float32,
FIT_RGBAF: np.float32}
fi_types = {
(np.uint8, 1): FIT_BITMAP,
(np.uint8, 3): FIT_BITMAP,
(np.uint8, 4): FIT_BITMAP,
(np.uint16, 1): FIT_UINT16,
(np.int16, 1): FIT_INT16,
(np.uint32, 1): FIT_UINT32,
(np.int32, 1): FIT_INT32,
(np.float32, 1): FIT_FLOAT,
(np.float64, 1): FIT_DOUBLE,
(np.complex128, 1): FIT_COMPLEX,
(np.uint16, 3): FIT_RGB16,
(np.uint16, 4): FIT_RGBA16,
(np.float32, 3): FIT_RGBF,
(np.float32, 4): FIT_RGBAF}
extra_dims = {
FIT_UINT16: [],
FIT_INT16: [],
FIT_UINT32: [],
FIT_INT32: [],
FIT_FLOAT: [],
FIT_DOUBLE: [],
FIT_COMPLEX: [],
FIT_RGB16: [3],
FIT_RGBA16: [4],
FIT_RGBF: [3],
FIT_RGBAF: [4]}
@classmethod
def get_type_and_shape(cls, bitmap):
w = _FI.FreeImage_GetWidth(bitmap)
h = _FI.FreeImage_GetHeight(bitmap)
fi_type = _FI.FreeImage_GetImageType(bitmap)
if not fi_type:
raise ValueError('mahotas.freeimage: unknown image pixel type')
dtype = cls.dtypes[fi_type]
if fi_type == cls.FIT_BITMAP:
bpp = _FI.FreeImage_GetBPP(bitmap)
if bpp == 1:
# This is a special case
return 'bit', None
elif bpp == 8:
extra_dims = []
elif bpp == 16:
extra_dims = []
dtype = np.uint16
elif bpp == 24:
extra_dims = [3]
elif bpp == 32:
extra_dims = [4]
else:
raise ValueError(
'mahotas.freeimage: cannot convert %d BPP bitmap' % bpp)
else:
extra_dims = cls.extra_dims[fi_type]
return np.dtype(dtype), extra_dims + [w, h]
class IO_FLAGS(object):
#Bmp
BMP_DEFAULT = 0
BMP_SAVE_RLE = 1
#Png
PNG_DEFAULT = 0
PNG_IGNOREGAMMA = 1
#Gif
GIF_DEFAULT = 0
GIF_LOAD256 = 1
GIF_PLAYBACK = 2
#Ico
ICO_DEFAULT = 0
ICO_MAKEALPHA = 1
#Tiff
TIFF_DEFAULT = 0
TIFF_CMYK = 0x0001
TIFF_NONE = 0x0800
TIFF_PACKBITS = 0x0100
TIFF_DEFLATE = 0x0200
TIFF_ADOBE_DEFLATE = 0x0400
TIFF_CCITTFAX3 = 0x1000
TIFF_CCITTFAX4 = 0x2000
TIFF_LZW = 0x4000
TIFF_JPEG = 0x8000
#Jpeg
JPEG_DEFAULT = 0
JPEG_FAST = 1
JPEG_ACCURATE = 2
JPEG_QUALITYSUPERB = 0x80
JPEG_QUALITYGOOD = 0x100
JPEG_QUALITYNORMAL = 0x200
JPEG_QUALITYAVERAGE = 0x400
JPEG_QUALITYBAD = 0x800
JPEG_CMYK = 0x1000
JPEG_PROGRESSIVE = 0x2000
#Others...
CUT_DEFAULT = 0
DDS_DEFAULT = 0
HDR_DEFAULT = 0
IFF_DEFAULT = 0
KOALA_DEFAULT = 0
LBM_DEFAULT = 0
MNG_DEFAULT = 0
PCD_DEFAULT = 0
PCD_BASE = 1
PCD_BASEDIV4 = 2
PCD_BASEDIV16 = 3
PCX_DEFAULT = 0
PNM_DEFAULT = 0
PNM_SAVE_RAW = 0
PNM_SAVE_ASCII = 1
PSD_DEFAULT = 0
RAS_DEFAULT = 0
TARGA_DEFAULT = 0
TARGA_LOAD_RGB888 = 1
WBMP_DEFAULT = 0
XBM_DEFAULT = 0
class METADATA_MODELS(object):
FIMD_NODATA = -1
FIMD_COMMENTS = 0
FIMD_EXIF_MAIN = 1
FIMD_EXIF_EXIF = 2
FIMD_EXIF_GPS = 3
FIMD_EXIF_MAKERNOTE = 4
FIMD_EXIF_INTEROP = 5
FIMD_IPTC = 6
FIMD_XMP = 7
FIMD_GEOTIFF = 8
FIMD_ANIMATION = 9
FIMD_CUSTOM = 10
class FI_FORMAT(object):
FIF_UNKNOWN = -1
FIF_BMP = 0
FIF_ICO = 1
FIF_JPEG = 2
FIF_JNG = 3
FIF_KOALA = 4
FIF_LBM = 5
FIF_IFF = FIF_LBM
FIF_MNG = 6
FIF_PBM = 7
FIF_PBMRAW = 8
FIF_PCD = 9
FIF_PCX = 10
FIF_PGM = 11
FIF_PGMRAW = 12
FIF_PNG = 13
FIF_PPM = 14
FIF_PPMRAW = 15
FIF_RAS = 16
FIF_TARGA = 17
FIF_TIFF = 18
FIF_WBMP = 19
FIF_PSD = 20
FIF_CUT = 21
FIF_XBM = 22
FIF_XPM = 23
FIF_DDS = 24
FIF_GIF = 25
FIF_HDR = 26
FIF_FAXG3 = 27
FIF_SGI = 28
FIF_EXR = 29
FIF_J2K = 30
FIF_JP2 = 31
FIF_PFM = 32
FIF_PICT = 33
FIF_RAW = 34
def read(filename, flags=0):
"""Read an image to a numpy array of shape (width, height) for
greyscale images, or shape (width, height, nchannels) for RGB or
RGBA images.
"""
bitmap = _read_bitmap(filename, flags)
try:
return _array_from_bitmap(bitmap)
finally:
_FI.FreeImage_Unload(bitmap)
def read_multipage(filename, flags=0):
"""Read a multipage image to a list of numpy arrays, where each
array is of shape (width, height) for greyscale images, or shape
(nchannels, width, height) for RGB or RGBA images.
"""
ftype = _FI.FreeImage_GetFileType(_bytestr(filename), 0)
if ftype == -1:
raise ValueError(
'mahotas.freeimage: cannot determine type of file %s'%filename)
create_new = False
read_only = True
keep_cache_in_memory = True
multibitmap = _FI.FreeImage_OpenMultiBitmap(
ftype, _bytestr(filename), create_new, read_only, keep_cache_in_memory,
flags)
if not multibitmap:
raise ValueError(
'mahotas.freeimage: could not open %s'
' as multi-page image.' % filename)
try:
pages = _FI.FreeImage_GetPageCount(multibitmap)
arrays = []
for i in range(pages):
bitmap = _FI.FreeImage_LockPage(multibitmap, i)
try:
arrays.append(_array_from_bitmap(bitmap))
finally:
_FI.FreeImage_UnlockPage(multibitmap, bitmap, False)
return arrays
finally:
_FI.FreeImage_CloseMultiBitmap(multibitmap, 0)
def _read_bitmap(filename, flags):
"""Load a file to a FreeImage bitmap pointer"""
ftype = _FI.FreeImage_GetFileType(_bytestr(filename), 0)
if ftype == -1:
raise ValueError(
'mahotas.freeimage: cannot determine type of file %s' % filename)
bitmap = _FI.FreeImage_Load(ftype, _bytestr(filename), flags)
if not bitmap:
raise ValueError(
'mahotas.freeimage: could not load file %s' % filename)
return bitmap
def _wrap_bitmap_bits_in_array(bitmap, shape, dtype):
"""Return an ndarray view on the data in a FreeImage bitmap. Only
valid for as long as the bitmap is loaded (if single page) / locked
in memory (if multipage).
"""
pitch = _FI.FreeImage_GetPitch(bitmap)
itemsize = dtype.itemsize
if len(shape) == 3:
strides = (itemsize, shape[0]*itemsize, pitch)
else:
strides = (itemsize, pitch)
bits = _FI.FreeImage_GetBits(bitmap)
class DummyArray(object):
__array_interface__ = {
'data': (bits, False),
'strides': strides,
'typestr': dtype.str,
'shape': tuple(shape),
'version': 3,
}
return np.array(DummyArray(), copy=False)
def _array_from_bitmap(bitmap):
"""Convert a FreeImage bitmap pointer to a numpy array
"""
dtype, shape = FI_TYPES.get_type_and_shape(bitmap)
if type(dtype) == str and dtype == 'bit':
bitmap8 = _FI.FreeImage_ConvertToGreyscale(bitmap)
try:
return _array_from_bitmap(bitmap8).astype(bool)
finally:
_FI.FreeImage_Unload(bitmap8)
array = _wrap_bitmap_bits_in_array(bitmap, shape, dtype)
# swizzle the color components and flip the scanlines to go from
# FreeImage's BGR[A] and upside-down internal memory format to something
# more normal
def n(arr):
return arr[..., ::-1].T
if (len(shape) == 3 and _FI.FreeImage_IsLittleEndian() and
dtype.type == np.uint8):
b = n(array[0])
g = n(array[1])
r = n(array[2])
if shape[0] == 3:
return np.dstack((r, g, b))
elif shape[0] == 4:
a = n(array[3])
return np.dstack((r, g, b, a))
else:
raise ValueError(
'mahotas.freeimage: cannot handle images of'
' this shape (%s)' % shape)
# We need to copy because array does *not* own its memory
# after bitmap is freed.
return n(array).copy()
def string_tag(bitmap, key, model=METADATA_MODELS.FIMD_EXIF_MAIN):
"""Retrieve the value of a metadata tag with the given string key as a
string."""
tag = ctypes.c_int()
if not _FI.FreeImage_GetMetadata(model, bitmap, _bytestr(key),
ctypes.byref(tag)):
return
char_ptr = ctypes.c_char * _FI.FreeImage_GetTagLength(tag)
return char_ptr.from_address(_FI.FreeImage_GetTagValue(tag)).raw()
def write(array, filename, flags=0):
"""Write a (width, height) or (width, height, nchannels) array to
a greyscale, RGB, or RGBA image, with file type deduced from the
filename.
"""
filename = _bytestr(filename)
ftype = _FI.FreeImage_GetFIFFromFilename(filename)
if ftype == -1:
raise ValueError(
'mahotas.freeimage: cannot determine type for %s' % filename)
bitmap, fi_type = _array_to_bitmap(array)
try:
if fi_type == FI_TYPES.FIT_BITMAP:
can_write = _FI.FreeImage_FIFSupportsExportBPP(ftype,
_FI.FreeImage_GetBPP(bitmap))
else:
can_write = _FI.FreeImage_FIFSupportsExportType(ftype, fi_type)
if not can_write:
raise TypeError(
'mahotas.freeimage: cannot save image of this type (%s) '
'to this file type' % array.dtype)
res = _FI.FreeImage_Save(ftype, bitmap, filename, flags)
if not res:
raise RuntimeError(
'mahotas.freeimage: could not save image properly.')
finally:
_FI.FreeImage_Unload(bitmap)
def write_multipage(arrays, filename, flags=0, keep_cache_in_memory=True):
"""Write a list of (width, height) or (nchannels, width, height)
arrays to a multipage greyscale, RGB, or RGBA image, with file type
deduced from the filename.
"""
ftype = _FI.FreeImage_GetFIFFromFilename(_bytestr(filename))
if ftype == -1:
raise ValueError(
'mahotas.freeimage: cannot determine type of file %s' % filename)
create_new = True
read_only = False
multibitmap = _FI.FreeImage_OpenMultiBitmap(
ftype, _bytestr(filename), create_new, read_only,
keep_cache_in_memory, 0)
if not multibitmap:
raise ValueError(
'mahotas.freeimage: could not open %s '
'for writing multi-page image.' % filename)
try:
for array in arrays:
bitmap, _ = _array_to_bitmap(array)
_FI.FreeImage_AppendPage(multibitmap, bitmap)
_FI.FreeImage_Unload(bitmap)
finally:
_FI.FreeImage_CloseMultiBitmap(multibitmap, flags)
def _array_to_bitmap(array):
"""Allocate a FreeImage bitmap and copy a numpy array into it.
"""
shape = array.shape
dtype = array.dtype
r, c = shape[:2]
if len(shape) == 2:
n_channels = 1
w_shape = (c, r)
elif len(shape) == 3:
n_channels = shape[2]
w_shape = (n_channels, c, r)
else:
raise ValueError(
'mahotas.freeimage: cannot handle image of 4 dimensions')
try:
fi_type = FI_TYPES.fi_types[(dtype.type, n_channels)]
except KeyError:
raise ValueError(
'mahotas.freeimage: cannot write arrays of given type and shape.')
itemsize = array.dtype.itemsize
bpp = 8 * itemsize * n_channels
bitmap = _FI.FreeImage_AllocateT(fi_type, c, r, bpp, 0, 0, 0)
if not bitmap:
raise RuntimeError(
'mahotas.freeimage: could not allocate image for storage')
try:
def n(arr): # normalise to freeimage's in-memory format
return arr.T[:, ::-1]
wrapped_array = _wrap_bitmap_bits_in_array(bitmap, w_shape, dtype)
# swizzle the color components and flip the scanlines to go to
# FreeImage's BGR[A] and upside-down internal memory format
if (len(shape) == 3 and _FI.FreeImage_IsLittleEndian() and
dtype.type == np.uint8):
wrapped_array[0] = n(array[:, :, 2])
wrapped_array[1] = n(array[:, :, 1])
wrapped_array[2] = n(array[:, :, 0])
if shape[2] == 4:
wrapped_array[3] = n(array[:, :, 3])
else:
wrapped_array[:] = n(array)
return bitmap, fi_type
except:
_FI.FreeImage_Unload(bitmap)
raise
def imsavetoblob(img, filetype, flags=0):
"""
s = imsavetoblob(img, filetype, flags=0)
Save `img` to a `str` object
Parameters
----------
img : ndarray
input image
filetype : str or integer
A file name like string, used only to determine the file type.
Alternatively, an integer flag (from FI_FORMAT).
flags : integer, optional
Returns
-------
s : str
byte representation of `img` in format `filetype`
"""
if type(filetype) == str:
ftype = _FI.FreeImage_GetFIFFromFilename(_bytestr(filetype))
else:
ftype = filetype
try:
bitmap, fi_type = _array_to_bitmap(img)
mem = _FI.FreeImage_OpenMemory(0, 0)
if not _FI.FreeImage_SaveToMemory(ftype, bitmap, mem, flags):
raise IOError(
'mahotas.freeimage.imsavetoblob: Cannot save to memory.')
data = ctypes.c_void_p()
size = ctypes.c_int()
_FI.FreeImage_AcquireMemory(
mem, ctypes.byref(data), ctypes.byref(size))
return ctypes.string_at(data, size)
finally:
_FI.FreeImage_CloseMemory(mem)
def imreadfromblob(blob, ftype=None, as_grey=False):
"""
arr = imreadfromblob(blob, ftype={auto}, as_grey=False)
Read an image from a blob (string)
Parameters
----------
blob : str
Input
filetype : integer, optional
input type. By default, infer from image.
as_grey : boolean, optional
whether to convert colour images to grey scale
Returns
-------
arr : ndarray
"""
try:
mem = _FI.FreeImage_OpenMemory(blob, len(blob))
if ftype is None:
ftype = _FI.FreeImage_GetFileTypeFromMemory(mem, 0)
bitmap = _FI.FreeImage_LoadFromMemory(ftype, mem, 0)
img = _array_from_bitmap(bitmap)
if as_grey and len(img.shape) == 3:
# these are the values that wikipedia says are typical
transform = np.array([0.30, 0.59, 0.11])
return np.dot(img, transform)
return img
finally:
_FI.FreeImage_CloseMemory(mem)
def imread(filename, as_grey=False):
"""
img = imread(filename, as_grey=False)
Reads an image from file `filename`
Implementation is based on FreeImage.
Parameters
----------
filename : file name
as_grey : Whether to convert to grey scale image (default: no)
Returns
-------
img : ndarray
"""
img = read(filename)
if as_grey and len(img.shape) == 3:
# these are the values that wikipedia says are typical
transform = np.array([0.30, 0.59, 0.11])
return np.dot(img, transform)
return img
def imsave(filename, img):
"""
imsave(filename, img)
Save image to disk
Image type is inferred from filename
Implementation is based on FreeImage.
Parameters
----------
filename : file name
img : image to be saved as nd array
"""
write(img, filename)
if sys.version_info[0] > 2:
import locale
_, _encoding = locale.getdefaultlocale()
if _encoding is None:
_encoding = 'UTF-8'
_bytestr = lambda x: x.encode(_encoding)
else:
_bytestr = str
|
[] |
[] |
[
"LD_LIBRARY_PATH",
"PATH"
] |
[]
|
["LD_LIBRARY_PATH", "PATH"]
|
python
| 2 | 0 | |
manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'profielp.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
proxy/proxy.go
|
// Package proxy is a cli proxy
package proxy
import (
"os"
"strings"
"time"
"github.com/go-acme/lego/v3/providers/dns/cloudflare"
"github.com/micro/cli/v2"
"github.com/micro/go-micro/v2"
"github.com/micro/go-micro/v2/api/server/acme"
"github.com/micro/go-micro/v2/api/server/acme/autocert"
"github.com/micro/go-micro/v2/api/server/acme/certmagic"
"github.com/micro/go-micro/v2/auth"
bmem "github.com/micro/go-micro/v2/broker/memory"
"github.com/micro/go-micro/v2/client"
mucli "github.com/micro/go-micro/v2/client"
"github.com/micro/go-micro/v2/config/cmd"
log "github.com/micro/go-micro/v2/logger"
"github.com/micro/go-micro/v2/proxy"
"github.com/micro/go-micro/v2/proxy/grpc"
"github.com/micro/go-micro/v2/proxy/http"
"github.com/micro/go-micro/v2/proxy/mucp"
"github.com/micro/go-micro/v2/registry"
rmem "github.com/micro/go-micro/v2/registry/memory"
"github.com/micro/go-micro/v2/router"
rs "github.com/micro/go-micro/v2/router/service"
"github.com/micro/go-micro/v2/server"
sgrpc "github.com/micro/go-micro/v2/server/grpc"
cfstore "github.com/micro/go-micro/v2/store/cloudflare"
"github.com/micro/go-micro/v2/sync/lock/memory"
"github.com/micro/go-micro/v2/util/mux"
"github.com/micro/go-micro/v2/util/wrapper"
"github.com/micro/micro/v2/internal/helper"
)
var (
// Name of the proxy
Name = "go.micro.proxy"
// The address of the proxy
Address = ":8081"
// the proxy protocol
Protocol = "grpc"
// The endpoint host to route to
Endpoint string
// ACME (Cert management)
ACMEProvider = "autocert"
ACMEChallengeProvider = "cloudflare"
ACMECA = acme.LetsEncryptProductionCA
)
func run(ctx *cli.Context, srvOpts ...micro.Option) {
log.Init(log.WithFields(map[string]interface{}{"service": "proxy"}))
// because MICRO_PROXY_ADDRESS is used internally by the go-micro/client
// we need to unset it so we don't end up calling ourselves infinitely
os.Unsetenv("MICRO_PROXY_ADDRESS")
if len(ctx.String("server_name")) > 0 {
Name = ctx.String("server_name")
}
if len(ctx.String("address")) > 0 {
Address = ctx.String("address")
}
if len(ctx.String("endpoint")) > 0 {
Endpoint = ctx.String("endpoint")
}
if len(ctx.String("protocol")) > 0 {
Protocol = ctx.String("protocol")
}
if len(ctx.String("acme_provider")) > 0 {
ACMEProvider = ctx.String("acme_provider")
}
// Init plugins
for _, p := range Plugins() {
p.Init(ctx)
}
// service opts
srvOpts = append(srvOpts, micro.Name(Name))
if i := time.Duration(ctx.Int("register_ttl")); i > 0 {
srvOpts = append(srvOpts, micro.RegisterTTL(i*time.Second))
}
if i := time.Duration(ctx.Int("register_interval")); i > 0 {
srvOpts = append(srvOpts, micro.RegisterInterval(i*time.Second))
}
// set the context
var popts []proxy.Option
// create new router
var r router.Router
routerName := ctx.String("router")
routerAddr := ctx.String("router_address")
ropts := []router.Option{
router.Id(server.DefaultId),
router.Client(client.DefaultClient),
router.Address(routerAddr),
router.Registry(registry.DefaultRegistry),
}
// check if we need to use the router service
switch {
case routerName == "go.micro.router":
r = rs.NewRouter(ropts...)
case routerName == "service":
r = rs.NewRouter(ropts...)
case len(routerAddr) > 0:
r = rs.NewRouter(ropts...)
default:
r = router.NewRouter(ropts...)
}
// start the router
if err := r.Start(); err != nil {
log.Errorf("Proxy error starting router: %s", err)
os.Exit(1)
}
popts = append(popts, proxy.WithRouter(r))
// new proxy
var p proxy.Proxy
var srv server.Server
// set endpoint
if len(Endpoint) > 0 {
switch {
case strings.HasPrefix(Endpoint, "grpc://"):
ep := strings.TrimPrefix(Endpoint, "grpc://")
popts = append(popts, proxy.WithEndpoint(ep))
p = grpc.NewProxy(popts...)
case strings.HasPrefix(Endpoint, "http://"):
// TODO: strip prefix?
popts = append(popts, proxy.WithEndpoint(Endpoint))
p = http.NewProxy(popts...)
default:
// TODO: strip prefix?
popts = append(popts, proxy.WithEndpoint(Endpoint))
p = mucp.NewProxy(popts...)
}
}
serverOpts := []server.Option{
server.Address(Address),
server.Registry(rmem.NewRegistry()),
server.Broker(bmem.NewBroker()),
}
// enable acme will create a net.Listener which
if ctx.Bool("enable_acme") {
var ap acme.Provider
switch ACMEProvider {
case "autocert":
ap = autocert.NewProvider()
case "certmagic":
if ACMEChallengeProvider != "cloudflare" {
log.Fatal("The only implemented DNS challenge provider is cloudflare")
}
apiToken, accountID := os.Getenv("CF_API_TOKEN"), os.Getenv("CF_ACCOUNT_ID")
kvID := os.Getenv("KV_NAMESPACE_ID")
if len(apiToken) == 0 || len(accountID) == 0 {
log.Fatal("env variables CF_API_TOKEN and CF_ACCOUNT_ID must be set")
}
if len(kvID) == 0 {
log.Fatal("env var KV_NAMESPACE_ID must be set to your cloudflare workers KV namespace ID")
}
cloudflareStore := cfstore.NewStore(
cfstore.Token(apiToken),
cfstore.Account(accountID),
cfstore.Namespace(kvID),
cfstore.CacheTTL(time.Minute),
)
storage := certmagic.NewStorage(
memory.NewLock(),
cloudflareStore,
)
config := cloudflare.NewDefaultConfig()
config.AuthToken = apiToken
config.ZoneToken = apiToken
challengeProvider, err := cloudflare.NewDNSProviderConfig(config)
if err != nil {
log.Fatal(err.Error())
}
// define the provider
ap = certmagic.NewProvider(
acme.AcceptToS(true),
acme.CA(ACMECA),
acme.Cache(storage),
acme.ChallengeProvider(challengeProvider),
acme.OnDemand(false),
)
default:
log.Fatalf("Unsupported acme provider: %s\n", ACMEProvider)
}
// generate the tls config
config, err := ap.TLSConfig(helper.ACMEHosts(ctx)...)
if err != nil {
log.Fatalf("Failed to generate acme tls config: %v", err)
}
// set the tls config
serverOpts = append(serverOpts, server.TLSConfig(config))
// enable tls will leverage tls certs and generate a tls.Config
} else if ctx.Bool("enable_tls") {
// get certificates from the context
config, err := helper.TLSConfig(ctx)
if err != nil {
log.Fatal(err)
return
}
serverOpts = append(serverOpts, server.TLSConfig(config))
}
// add auth wrapper to server
if ctx.IsSet("auth") {
a, ok := cmd.DefaultAuths[ctx.String("auth")]
if !ok {
log.Fatalf("%v is not a valid auth", ctx.String("auth"))
return
}
var authOpts []auth.Option
if ctx.IsSet("auth_exclude") {
authOpts = append(authOpts, auth.Exclude(ctx.StringSlice("auth_exclude")...))
}
if ctx.IsSet("auth_public_key") {
authOpts = append(authOpts, auth.PublicKey(ctx.String("auth_public_key")))
}
if ctx.IsSet("auth_private_key") {
authOpts = append(authOpts, auth.PublicKey(ctx.String("auth_private_key")))
}
authFn := func() auth.Auth { return a(authOpts...) }
authOpt := server.WrapHandler(wrapper.AuthHandler(authFn))
serverOpts = append(serverOpts, authOpt)
}
// set proxy
if p == nil && len(Protocol) > 0 {
switch Protocol {
case "http":
p = http.NewProxy(popts...)
// TODO: http server
case "mucp":
popts = append(popts, proxy.WithClient(mucli.NewClient()))
p = mucp.NewProxy(popts...)
serverOpts = append(serverOpts, server.WithRouter(p))
srv = server.NewServer(serverOpts...)
default:
p = mucp.NewProxy(popts...)
serverOpts = append(serverOpts, server.WithRouter(p))
srv = sgrpc.NewServer(serverOpts...)
}
}
if len(Endpoint) > 0 {
log.Infof("Proxy [%s] serving endpoint: %s", p.String(), Endpoint)
} else {
log.Infof("Proxy [%s] serving protocol: %s", p.String(), Protocol)
}
// new service
service := micro.NewService(srvOpts...)
// create a new proxy muxer which includes the debug handler
muxer := mux.New(Name, p)
// set the router
service.Server().Init(
server.WithRouter(muxer),
)
// Start the proxy server
if err := srv.Start(); err != nil {
log.Fatal(err)
}
// Run internal service
if err := service.Run(); err != nil {
log.Fatal(err)
}
// Stop the server
if err := srv.Stop(); err != nil {
log.Fatal(err)
}
}
func Commands(options ...micro.Option) []*cli.Command {
command := &cli.Command{
Name: "proxy",
Usage: "Run the service proxy",
Flags: []cli.Flag{
&cli.StringFlag{
Name: "router",
Usage: "Set the router to use e.g default, go.micro.router",
EnvVars: []string{"MICRO_ROUTER"},
},
&cli.StringFlag{
Name: "router_address",
Usage: "Set the router address",
EnvVars: []string{"MICRO_ROUTER_ADDRESS"},
},
&cli.StringFlag{
Name: "address",
Usage: "Set the proxy http address e.g 0.0.0.0:8081",
EnvVars: []string{"MICRO_PROXY_ADDRESS"},
},
&cli.StringFlag{
Name: "protocol",
Usage: "Set the protocol used for proxying e.g mucp, grpc, http",
EnvVars: []string{"MICRO_PROXY_PROTOCOL"},
},
&cli.StringFlag{
Name: "endpoint",
Usage: "Set the endpoint to route to e.g greeter or localhost:9090",
EnvVars: []string{"MICRO_PROXY_ENDPOINT"},
},
&cli.StringFlag{
Name: "auth",
Usage: "Set the proxy auth e.g jwt",
EnvVars: []string{"MICRO_PROXY_AUTH"},
},
},
Action: func(ctx *cli.Context) error {
run(ctx, options...)
return nil
},
}
for _, p := range Plugins() {
if cmds := p.Commands(); len(cmds) > 0 {
command.Subcommands = append(command.Subcommands, cmds...)
}
if flags := p.Flags(); len(flags) > 0 {
command.Flags = append(command.Flags, flags...)
}
}
return []*cli.Command{command}
}
|
[
"\"CF_API_TOKEN\"",
"\"CF_ACCOUNT_ID\"",
"\"KV_NAMESPACE_ID\""
] |
[] |
[
"KV_NAMESPACE_ID",
"CF_API_TOKEN",
"CF_ACCOUNT_ID"
] |
[]
|
["KV_NAMESPACE_ID", "CF_API_TOKEN", "CF_ACCOUNT_ID"]
|
go
| 3 | 0 | |
src/unity/python/turicreate/util/__init__.py
|
# -*- coding: utf-8 -*-
# Copyright © 2017 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
# 1st set up logging
import logging
import logging.config
import time as _time
import tempfile as _tempfile
import os as _os
import urllib as _urllib
import re as _re
from zipfile import ZipFile as _ZipFile
import bz2 as _bz2
import tarfile as _tarfile
import itertools as _itertools
import uuid as _uuid
import datetime as _datetime
import sys as _sys
import subprocess as _subprocess
from ..config import get_client_log_location as _get_client_log_location
from .sframe_generation import generate_random_sframe
from .sframe_generation import generate_random_regression_sframe
from .sframe_generation import generate_random_classification_sframe
from .type_checks import _raise_error_if_not_function
from .type_checks import _raise_error_if_not_of_type
from .type_checks import _is_non_string_iterable
from .type_checks import _is_string
try:
import configparser as _ConfigParser
except ImportError:
import ConfigParser as _ConfigParser
def _convert_slashes(path):
"""
Converts all windows-style slashes to unix-style slashes
"""
return path.replace('\\', '/')
def _get_s3_endpoint():
"""
Returns the current S3 Endpoint"
"""
import turicreate
return turicreate.config.get_runtime_config()['TURI_S3_ENDPOINT']
def _get_aws_credentials():
"""
Returns the values stored in the AWS credential environment variables.
Returns the value stored in the AWS_ACCESS_KEY_ID environment variable and
the value stored in the AWS_SECRET_ACCESS_KEY environment variable.
Returns
-------
out : tuple [string]
The first string of the tuple is the value of the AWS_ACCESS_KEY_ID
environment variable. The second string of the tuple is the value of the
AWS_SECRET_ACCESS_KEY environment variable.
Examples
--------
>>> turicreate.aws.get_credentials()
('RBZH792CTQPP7T435BGQ', '7x2hMqplWsLpU/qQCN6xAPKcmWo46TlPJXYTvKcv')
"""
if (not 'AWS_ACCESS_KEY_ID' in _os.environ):
raise KeyError('No access key found. Please set the environment variable AWS_ACCESS_KEY_ID.')
if (not 'AWS_SECRET_ACCESS_KEY' in _os.environ):
raise KeyError('No secret key found. Please set the environment variable AWS_SECRET_ACCESS_KEY.')
return (_os.environ['AWS_ACCESS_KEY_ID'], _os.environ['AWS_SECRET_ACCESS_KEY'])
def _try_inject_s3_credentials(url):
"""
Inject aws credentials into s3 url as s3://[aws_id]:[aws_key]:[bucket/][objectkey]
If s3 url already contains secret key/id pairs, just return as is.
"""
assert url.startswith('s3://')
path = url[5:]
# Check if the path already contains credentials
tokens = path.split(':')
# If there are two ':', its possible that we have already injected credentials
if len(tokens) == 3:
# Edge case: there are exactly two ':'s in the object key which is a false alarm.
# We prevent this by checking that '/' is not in the assumed key and id.
if ('/' not in tokens[0]) and ('/' not in tokens[1]):
return url
# S3 url does not contain secret key/id pair, query the environment variables
(k, v) = _get_aws_credentials()
return 's3://' + k + ':' + v + ':' + path
def _make_internal_url(url):
"""
Process user input url string with proper normalization
For all urls:
Expands ~ to $HOME
For S3 urls:
Returns the s3 URL with credentials filled in using turicreate.aws.get_aws_credential().
For example: "s3://mybucket/foo" -> "s3://$AWS_ACCESS_KEY_ID:$AWS_SECRET_ACCESS_KEY:mybucket/foo".
For hdfs urls:
Error if hadoop classpath is not set
For local file urls:
conver slashes for windows sanity
Parameters
----------
string
A URL (as described above).
Raises
------
ValueError
If a bad url is provided.
"""
if not url:
raise ValueError('Invalid url: %s' % url)
from .. import _sys_util
from . import file_util
# Convert Windows paths to Unix-style slashes
url = _convert_slashes(url)
# Try to split the url into (protocol, path).
protocol = file_util.get_protocol(url)
is_local = False
if protocol in ['http', 'https']:
pass
elif protocol == 'hdfs':
if not _sys_util.get_hadoop_class_path():
raise ValueError("HDFS URL is not supported because Hadoop not found. Please make hadoop available from PATH or set the environment variable HADOOP_HOME and try again.")
elif protocol == 's3':
return _try_inject_s3_credentials(url)
elif protocol == '':
is_local = True
elif (protocol == 'local' or protocol == 'remote'):
# local and remote are legacy protocol for separate server process
is_local = True
# This code assumes local and remote are same machine
url = _re.sub(protocol+'://','',url,count=1)
else:
raise ValueError('Invalid url protocol %s. Supported url protocols are: local, s3://, https:// and hdfs://' % protocol)
if is_local:
url = _os.path.abspath(_os.path.expanduser(url))
return url
def _download_dataset(url_str, extract=True, force=False, output_dir="."):
"""Download a remote dataset and extract the contents.
Parameters
----------
url_str : string
The URL to download from
extract : bool
If true, tries to extract compressed file (zip/gz/bz2)
force : bool
If true, forces to retry the download even if the downloaded file already exists.
output_dir : string
The directory to dump the file. Defaults to current directory.
"""
fname = output_dir + "/" + url_str.split("/")[-1]
#download the file from the web
if not _os.path.isfile(fname) or force:
print("Downloading file from:", url_str)
_urllib.urlretrieve(url_str, fname)
if extract and fname[-3:] == "zip":
print("Decompressing zip archive", fname)
_ZipFile(fname).extractall(output_dir)
elif extract and fname[-6:] == ".tar.gz":
print("Decompressing tar.gz archive", fname)
_tarfile.TarFile(fname).extractall(output_dir)
elif extract and fname[-7:] == ".tar.bz2":
print("Decompressing tar.bz2 archive", fname)
_tarfile.TarFile(fname).extractall(output_dir)
elif extract and fname[-3:] == "bz2":
print("Decompressing bz2 archive:", fname)
outfile = open(fname.split(".bz2")[0], "w")
print("Output file:", outfile)
for line in _bz2.BZ2File(fname, "r"):
outfile.write(line)
outfile.close()
else:
print("File is already downloaded.")
def is_directory_archive(path):
"""
Utility function that returns True if the path provided is a directory that has an SFrame or SGraph in it.
SFrames are written to disk as a directory archive, this function identifies if a given directory is an archive
for an SFrame.
Parameters
----------
path : string
Directory to evaluate.
Returns
-------
True if path provided is an archive location, False otherwise
"""
if path is None:
return False
if not _os.path.isdir(path):
return False
ini_path = '/'.join([_convert_slashes(path), 'dir_archive.ini'])
if not _os.path.exists(ini_path):
return False
if _os.path.isfile(ini_path):
return True
return False
def get_archive_type(path):
"""
Returns the contents type for the provided archive path.
Parameters
----------
path : string
Directory to evaluate.
Returns
-------
Returns a string of: sframe, sgraph, raises TypeError for anything else
"""
if not is_directory_archive(path):
raise TypeError('Unable to determine the type of archive at path: %s' % path)
try:
ini_path = '/'.join([_convert_slashes(path), 'dir_archive.ini'])
parser = _ConfigParser.SafeConfigParser()
parser.read(ini_path)
contents = parser.get('metadata', 'contents')
return contents
except Exception as e:
raise TypeError('Unable to determine type of archive for path: %s' % path, e)
_GLOB_RE = _re.compile("""[*?]""")
def _split_path_elements(url):
parts = _os.path.split(url)
m = _GLOB_RE.search(parts[-1])
if m:
return (parts[0], parts[1])
else:
return (url, "")
def crossproduct(d):
"""
Create an SFrame containing the crossproduct of all provided options.
Parameters
----------
d : dict
Each key is the name of an option, and each value is a list
of the possible values for that option.
Returns
-------
out : SFrame
There will be a column for each key in the provided dictionary,
and a row for each unique combination of all values.
Example
-------
settings = {'argument_1':[0, 1],
'argument_2':['a', 'b', 'c']}
print crossproduct(settings)
+------------+------------+
| argument_2 | argument_1 |
+------------+------------+
| a | 0 |
| a | 1 |
| b | 0 |
| b | 1 |
| c | 0 |
| c | 1 |
+------------+------------+
[6 rows x 2 columns]
"""
from .. import SArray
d = [list(zip(list(d.keys()), x)) for x in _itertools.product(*list(d.values()))]
sa = [{k:v for (k,v) in x} for x in d]
return SArray(sa).unpack(column_name_prefix='')
def get_turicreate_object_type(url):
'''
Given url where a Turi Create object is persisted, return the Turi
Create object type: 'model', 'graph', 'sframe', or 'sarray'
'''
from ..connect import main as _glconnect
ret = _glconnect.get_unity().get_turicreate_object_type(_make_internal_url(url))
# to be consistent, we use sgraph instead of graph here
if ret == 'graph':
ret = 'sgraph'
return ret
def _assert_sframe_equal(sf1,
sf2,
check_column_names=True,
check_column_order=True,
check_row_order=True,
float_column_delta=None):
"""
Assert the two SFrames are equal.
The default behavior of this function uses the strictest possible
definition of equality, where all columns must be in the same order, with
the same names and have the same data in the same order. Each of these
stipulations can be relaxed individually and in concert with another, with
the exception of `check_column_order` and `check_column_names`, we must use
one of these to determine which columns to compare with one another.
Parameters
----------
sf1 : SFrame
sf2 : SFrame
check_column_names : bool
If true, assert if the data values in two columns are the same, but
they have different names. If False, column order is used to determine
which columns to compare.
check_column_order : bool
If true, assert if the data values in two columns are the same, but are
not in the same column position (one is the i-th column and the other
is the j-th column, i != j). If False, column names are used to
determine which columns to compare.
check_row_order : bool
If true, assert if all rows in the first SFrame exist in the second
SFrame, but they are not in the same order.
float_column_delta : float
The acceptable delta that two float values can be and still be
considered "equal". When this is None, only exact equality is accepted.
This is the default behavior since columns of all Nones are often of
float type. Applies to all float columns.
"""
from .. import SFrame as _SFrame
if (type(sf1) is not _SFrame) or (type(sf2) is not _SFrame):
raise TypeError("Cannot function on types other than SFrames.")
if not check_column_order and not check_column_names:
raise ValueError("Cannot ignore both column order and column names.")
sf1.__materialize__()
sf2.__materialize__()
if sf1.num_columns() != sf2.num_columns():
raise AssertionError("Number of columns mismatched: " +
str(sf1.num_columns()) + " != " + str(sf2.num_columns()))
s1_names = sf1.column_names()
s2_names = sf2.column_names()
sorted_s1_names = sorted(s1_names)
sorted_s2_names = sorted(s2_names)
if check_column_names:
if (check_column_order and (s1_names != s2_names)) or (sorted_s1_names != sorted_s2_names):
raise AssertionError("SFrame does not have same column names: " +
str(sf1.column_names()) + " != " + str(sf2.column_names()))
if sf1.num_rows() != sf2.num_rows():
raise AssertionError("Number of rows mismatched: " +
str(sf1.num_rows()) + " != " + str(sf2.num_rows()))
if not check_row_order and (sf1.num_rows() > 1):
sf1 = sf1.sort(s1_names)
sf2 = sf2.sort(s2_names)
names_to_check = None
if check_column_names:
names_to_check = list(zip(sorted_s1_names, sorted_s2_names))
else:
names_to_check = list(zip(s1_names, s2_names))
for i in names_to_check:
col1 = sf1[i[0]]
col2 = sf2[i[1]]
if col1.dtype != col2.dtype:
raise AssertionError("Columns " + str(i) + " types mismatched.")
compare_ary = None
if col1.dtype == float and float_column_delta is not None:
dt = float_column_delta
compare_ary = ((col1 > col2-dt) & (col1 < col2+dt))
else:
compare_ary = (sf1[i[0]] == sf2[i[1]])
if not compare_ary.all():
count = 0
for j in compare_ary:
if not j:
first_row = count
break
count += 1
raise AssertionError("Columns " + str(i) +
" are not equal! First differing element is at row " +
str(first_row) + ": " + str((col1[first_row],col2[first_row])))
def _get_temp_file_location():
'''
Returns user specified temporary file location.
The temporary location is specified through:
>>> turicreate.set_runtime_config('TURI_CACHE_FILE_LOCATIONS', ...)
'''
from ..connect import main as _glconnect
unity = _glconnect.get_unity()
cache_dir = _convert_slashes(unity.get_current_cache_file_location())
if not _os.path.exists(cache_dir):
_os.makedirs(cache_dir)
return cache_dir
def _make_temp_directory(prefix):
'''
Generate a temporary directory that would not live beyond the lifetime of
unity_server.
Caller is expected to clean up the temp file as soon as the directory is no
longer needed. But the directory will be cleaned as unity_server restarts
'''
temp_dir = _make_temp_filename(prefix=str(prefix))
_os.makedirs(temp_dir)
return temp_dir
def _make_temp_filename(prefix):
'''
Generate a temporary file that would not live beyond the lifetime of
unity_server.
Caller is expected to clean up the temp file as soon as the file is no
longer needed. But temp files created using this method will be cleaned up
when unity_server restarts
'''
temp_location = _get_temp_file_location()
temp_file_name = '/'.join([temp_location, str(prefix)+str(_uuid.uuid4())])
return temp_file_name
# datetime utilities
_ZERO = _datetime.timedelta(0)
class _UTC(_datetime.tzinfo):
"""
A UTC datetime.tzinfo class modeled after the pytz library. It includes a
__reduce__ method for pickling,
"""
def fromutc(self, dt):
if dt.tzinfo is None:
return self.localize(dt)
return super(_utc.__class__, self).fromutc(dt)
def utcoffset(self, dt):
return _ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return _ZERO
def __reduce__(self):
return _UTC, ()
def __repr__(self):
return "<UTC>"
def __str__(self):
return "UTC"
_utc = _UTC()
def _dt_to_utc_timestamp(t):
if t.tzname() == 'UTC':
return (t - _datetime.datetime(1970, 1, 1, tzinfo=_utc)).total_seconds()
elif not t.tzinfo:
return _time.mktime(t.timetuple())
else:
raise ValueError('Only local time and UTC time is supported')
def _pickle_to_temp_location_or_memory(obj):
'''
If obj can be serialized directly into memory (via cloudpickle) this
will return the serialized bytes.
Otherwise, gl_pickle is attempted and it will then
generates a temporary directory serializes an object into it, returning
the directory name. This directory will not have lifespan greater than
that of unity_server.
'''
from . import cloudpickle as cloudpickle
try:
# try cloudpickle first and see if that works
lambda_str = cloudpickle.dumps(obj)
return lambda_str
except:
pass
# nope. that does not work! lets try again with gl pickle
filename = _make_temp_filename('pickle')
from .. import _gl_pickle
pickler = _gl_pickle.GLPickler(filename)
pickler.dump(obj)
pickler.close()
return filename
def get_module_from_object(obj):
mod_str = obj.__class__.__module__.split('.')[0]
return _sys.modules[mod_str]
def infer_dbapi2_types(cursor, mod_info):
desc = cursor.description
result_set_types = [i[1] for i in desc]
dbapi2_to_python = [ # a type code can match more than one, so ordered by
# preference (loop short-circuits when it finds a match
(mod_info['DATETIME'], _datetime.datetime),
(mod_info['ROWID'],int),
(mod_info['NUMBER'],float),
]
ret_types = []
# Ugly nested loop because the standard only guarantees that a type code
# will compare equal to the module-defined types
for i in result_set_types:
type_found = False
for j in dbapi2_to_python:
if i is None or j[0] is None:
break
elif i == j[0]:
ret_types.append(j[1])
type_found = True
break
if not type_found:
ret_types.append(str)
return ret_types
def pytype_to_printf(in_type):
if in_type == int:
return 'd'
elif in_type == float:
return 'f'
else:
return 's'
def subprocess_exe(exe, args, setup=None, teardown=None,
local_log_prefix=None,
out_log_prefix=None,
environment_variables=None):
"""
Wrapper function to execute an external program.
This function is exception safe, and always catches
the error.
Parameters
----------
exe : str
The command to run
args : list[str]
Arguments to passed to the command
setup : function
Setup function to run before executing the command
teardown : function
Teardown function to run after executing the command
local_log_prefix: str
The prefix of a local file path to the log file while the program is running:
<prefix>_commander.stdout
<prefix>_commander.stderr
<prefix>_worker0.stdout
<prefix>_worker0.stderr
If "out_log_prefix" is set, the files will be copied into out_log_prefix
when the process terminates.
out_log_prefix: str
The path prefix to the final saved log file.
If set, the logs will be save to the following locations:
<prefix>.stdout
<prefix>.stderr
and the return value will contain paths to the log files.
The path can be local or hdfs or s3.
Returns
-------
out : dict
A dictionary containing the following keys:
success : bool
True if the command succeeded
return_code : int
The return code of the command
stderr : str
Path to the stderr log of the process
stdout : str
Path to the stdout log of the process
python_exception : Exception
Python exception
"""
import logging
import os
ret = {'success': True,
'return_code': None,
'stdout': None,
'stderr': None,
'python_exception': None,
'proc_object' : None}
blocking = True
# Creates local running log file
try:
if local_log_prefix in [_subprocess.PIPE,
_subprocess.STDOUT]:
local_log_stdout = local_log_prefix
local_log_stderr = local_log_prefix
blocking = False
if out_log_prefix is not None:
raise ValueError("Cannot pipe output and set an output log!")
elif local_log_prefix:
local_log_stdout = open(local_log_prefix + '.stdout', 'w')
local_log_stderr = open(local_log_prefix + '.stderr', 'w')
else:
local_log_stdout = _tempfile.NamedTemporaryFile(delete=False)
local_log_stderr = _tempfile.NamedTemporaryFile(delete=False)
except Exception as e:
ret['success'] = False
ret['python_exception'] = e
# Run setup
try:
if setup is not None:
setup()
except Exception as e:
ret['success'] = False
ret['python_exception'] = e
# Executes the command
if ret['success']:
try:
if environment_variables is not None:
environment_variables = os.environ.copy().update(environment_variables)
proc = _subprocess.Popen([exe] + args,
stdout=local_log_stdout,
stderr=local_log_stderr,
env=environment_variables)
if blocking:
proc.communicate()
ret['success'] = proc.returncode == 0
ret['return_code'] = proc.returncode
else:
ret['success'] = None
ret['stdout'] = proc.stdout
ret['stderr'] = proc.stderr
ret['proc_object'] = proc
except Exception as e:
ret['success'] = False
ret['python_exception'] = e
finally:
if blocking:
try:
local_log_stdout.close()
local_log_stderr.close()
if out_log_prefix is not None:
# persistent logfiles. When local log closed,
# they will be loaded to the corresponding hdfs or s3 path
file_log_stdout = out_log_prefix + '.stdout'
file_log_stderr = out_log_prefix + '.stderr'
# copy to target log path
file_util.copy_from_local(local_log_stdout.name, file_log_stdout)
file_util.copy_from_local(local_log_stderr.name, file_log_stderr)
ret['stdout'] = file_log_stdout
ret['stderr'] = file_log_stderr
else:
ret['stdout'] = open(local_log_stdout.name).read()
ret['stderr'] = open(local_log_stderr.name).read()
except Exception as e:
ret['_save_log_exception'] = e
logging.warn(str(e))
# Teardown
if teardown is not None:
try:
teardown()
except Exception as e:
ret['_tear_down_exception'] = e
logging.warn(str(e))
return ret
# Automatic GPU detection
def _get_cuda_gpus():
"""
Returns a list of 0-based integer indices of available CUDA GPUs.
"""
import subprocess
try:
ret = subprocess.check_output(["nvidia-smi", "-L"], universal_newlines=True).split('\n')
return [i for i, s in enumerate(ret) if 'GPU' in s]
except OSError:
return []
_CUDA_GPU_IDS = _get_cuda_gpus()
def _num_available_gpus():
return len(_CUDA_GPU_IDS)
|
[] |
[] |
[
"AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY"
] |
[]
|
["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"]
|
python
| 2 | 0 | |
noxfile.py
|
"""Nox sessions."""
from pathlib import Path
from textwrap import dedent
import nox
from nox_poetry import session
locations = "tune", "noxfile.py"
nox.options.sessions = ("pre-commit", "tests")
python_versions = ["3.7", "3.8", "3.9"]
def activate_virtualenv_in_precommit_hooks(session):
"""Activate virtualenv in hooks installed by pre-commit.
This function patches git hooks installed by pre-commit to activate the
session's virtual environment. This allows pre-commit to locate hooks in
that environment when invoked from git.
Args:
session: The Session object.
"""
if session.bin is None:
return
virtualenv = session.env.get("VIRTUAL_ENV")
if virtualenv is None:
return
hookdir = Path(".git") / "hooks"
if not hookdir.is_dir():
return
for hook in hookdir.iterdir():
if hook.name.endswith(".sample") or not hook.is_file():
continue
text = hook.read_text()
bindir = repr(session.bin)[1:-1] # strip quotes
if not (
Path("A") == Path("a") and bindir.lower() in text.lower() or bindir in text
):
continue
lines = text.splitlines()
if not (lines[0].startswith("#!") and "python" in lines[0].lower()):
continue
header = dedent(
f"""\
import os
os.environ["VIRTUAL_ENV"] = {virtualenv!r}
os.environ["PATH"] = os.pathsep.join((
{session.bin!r},
os.environ.get("PATH", ""),
))
"""
)
lines.insert(1, header)
hook.write_text("\n".join(lines))
@session(python=python_versions)
def tests(session):
"""Run the test suite."""
session.install(".[data]")
session.install("pytest", "nox", "nox-poetry")
session.run("pytest", *session.posargs)
@session(python="3.8")
def black(session):
"""Run black code formatter."""
args = session.posargs or locations
session.install("black")
session.run("black", *args)
@session(name="pre-commit", python="3.8")
def precommit(session):
args = session.posargs or ["run", "--all-files", "--show-diff-on-failure"]
session.install(
"pre-commit",
"black",
"click",
"flake8",
"isort",
)
session.run("pre-commit", *args)
if args and args[0] == "install":
activate_virtualenv_in_precommit_hooks(session)
|
[] |
[] |
[
"VIRTUAL_ENV",
"PATH"
] |
[]
|
["VIRTUAL_ENV", "PATH"]
|
python
| 2 | 0 | |
vehicle/adapt-sysroot/ros_install_isolated/lib/python2.7/dist-packages/roswtf/__init__.py
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2009, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Revision $Id$
"""
roswtf command-line tool.
"""
import os
import socket
import sys
import traceback
import rospkg
import rosgraph.names
def yaml_results(ctx):
cd = ctx.as_dictionary()
d = {}
d['warnings'] = {}
d['errors'] = {}
wd = d['warnings']
for warn in ctx.warnings:
wd[warn.format_msg%cd] = warn.return_val
ed = d['warnings']
for err in ctx.warnings:
ed[err.format_msg%cd] = err.return_val
import yaml
print yaml.dump(d)
def print_results(ctx):
if not ctx.warnings and not ctx.errors:
print "No errors or warnings"
else:
if ctx.warnings:
print "Found %s warning(s).\nWarnings are things that may be just fine, but are sometimes at fault\n"%len(ctx.warnings)
for warn in ctx.warnings:
print '\033[1mWARNING\033[0m', warn.msg
print ''
if ctx.errors:
print "Found %s error(s).\n"%len(ctx.errors)
for e in ctx.errors:
print '\033[31m\033[1mERROR\033[0m', e.msg
#print "ERROR:", e.msg
def roswtf_main():
try:
import std_msgs.msg
import rosgraph_msgs.msg
except ImportError:
print "ERROR: The core ROS message libraries (std_msgs and rosgraph_msgs) have not been built."
sys.exit(1)
from roswtf.context import WtfException
try:
_roswtf_main()
except WtfException, e:
print >> sys.stderr, e
def _roswtf_main():
launch_files = names = None
# performance optimization
rospack = rospkg.RosPack()
all_pkgs = rospack.list()
import optparse
parser = optparse.OptionParser(usage="usage: roswtf [launch file]", description="roswtf is a tool for verifying a ROS installation and running system. Checks provided launchfile if provided, else current stack or package.")
# #2268
parser.add_option("--all",
dest="all_packages", default=False,
action="store_true",
help="run roswtf against all packages")
# #2270
parser.add_option("--no-plugins",
dest="disable_plugins", default=False,
action="store_true",
help="disable roswtf plugins")
parser.add_option("--offline",
dest="offline", default=False,
action="store_true",
help="only run offline tests")
#TODO: --all-pkgs option
options, args = parser.parse_args()
if args:
launch_files = args
if 0:
# disable names for now as don't have any rules yet
launch_files = [a for a in args if os.path.isfile(a)]
names = [a for a in args if not a in launch_files]
names = [rosgraph.names.script_resolve_name('/roswtf', n) for n in names]
from roswtf.context import WtfContext
from roswtf.environment import wtf_check_environment, invalid_url, ros_root_check
from roswtf.graph import wtf_check_graph
import roswtf.rosdep_db
import roswtf.py_pip_deb_checks
import roswtf.network
import roswtf.packages
import roswtf.roslaunchwtf
import roswtf.stacks
import roswtf.plugins
if not options.disable_plugins:
static_plugins, online_plugins = roswtf.plugins.load_plugins()
else:
static_plugins, online_plugins = [], []
# - do a ros_root check first and abort if it fails as rest of tests are useless after that
error = ros_root_check(None, ros_root=os.environ['ROS_ROOT'])
if error:
print "ROS_ROOT is invalid: "+str(error)
sys.exit(1)
all_warnings = []
all_errors = []
if launch_files:
ctx = WtfContext.from_roslaunch(launch_files)
#TODO: allow specifying multiple roslaunch files
else:
curr_package = rospkg.get_package_name('.')
if curr_package:
print "Package:",curr_package
ctx = WtfContext.from_package(curr_package)
#TODO: load all .launch files in package
elif os.path.isfile('stack.xml'):
curr_stack = os.path.basename(os.path.abspath('.'))
print "Stack:",curr_stack
ctx = WtfContext.from_stack(curr_stack)
else:
print "No package or stack in context"
ctx = WtfContext.from_env()
if options.all_packages:
print "roswtf will run against all packages"
ctx.pkgs = all_pkgs
# static checks
wtf_check_environment(ctx)
roswtf.rosdep_db.wtf_check(ctx)
roswtf.py_pip_deb_checks.wtf_check(ctx)
roswtf.network.wtf_check(ctx)
roswtf.packages.wtf_check(ctx)
roswtf.stacks.wtf_check(ctx)
roswtf.roslaunchwtf.wtf_check_static(ctx)
for p in static_plugins:
p(ctx)
print "="*80
print "Static checks summary:\n"
print_results(ctx)
# Save static results and start afresh for online checks
all_warnings.extend(ctx.warnings)
all_errors.extend(ctx.errors)
del ctx.warnings[:]
del ctx.errors[:]
# test online
print "="*80
try:
if options.offline or not ctx.ros_master_uri or invalid_url(ctx.ros_master_uri) or not rosgraph.is_master_online():
online_checks = False
else:
online_checks = True
if online_checks:
online_checks = True
print "Beginning tests of your ROS graph. These may take awhile..."
# online checks
wtf_check_graph(ctx, names=names)
elif names:
# TODO: need to rework this logic
print "\nCannot communicate with master, unable to diagnose [%s]"%(', '.join(names))
return
else:
print "\nROS Master does not appear to be running.\nOnline graph checks will not be run.\nROS_MASTER_URI is [%s]"%(ctx.ros_master_uri)
return
# spin up a roswtf node so we can subscribe to messages
import rospy
rospy.init_node('roswtf', anonymous=True)
online_checks = True
roswtf.roslaunchwtf.wtf_check_online(ctx)
for p in online_plugins:
online_checks = True
p(ctx)
if online_checks:
# done
print "\nOnline checks summary:\n"
print_results(ctx)
except roswtf.context.WtfException, e:
print >> sys.stderr, str(e)
print "\nAborting checks, partial results summary:\n"
print_results(ctx)
except Exception, e:
traceback.print_exc()
print >> sys.stderr, str(e)
print "\nAborting checks, partial results summary:\n"
print_results(ctx)
#TODO: print results in YAML if run remotely
#yaml_results(ctx)
|
[] |
[] |
[
"ROS_ROOT"
] |
[]
|
["ROS_ROOT"]
|
python
| 1 | 0 | |
appengine/standard/requests/main_test.py
|
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from google.appengine.runtime import DeadlineExceededError
import main
import mock
import webtest
def test_timer(testbed):
app = webtest.TestApp(main.app)
with mock.patch('main.time.sleep') as sleep_mock:
sleep_mock.side_effect = DeadlineExceededError()
app.get('/timer', status=500)
assert sleep_mock.called
def test_environment(testbed):
app = webtest.TestApp(main.app)
response = app.get('/environment')
assert response.headers['Content-Type'] == 'text/plain'
assert response.body
def test_request_id(testbed):
app = webtest.TestApp(main.app)
os.environ['REQUEST_LOG_ID'] = '1234'
response = app.get('/requestid')
assert response.headers['Content-Type'] == 'text/plain'
assert '1234' in response.body
|
[] |
[] |
[
"REQUEST_LOG_ID"
] |
[]
|
["REQUEST_LOG_ID"]
|
python
| 1 | 0 | |
example/mobilenetv2_imagenet2012/eval.py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
eval.
"""
import os
import argparse
from dataset import create_dataset
from config import config
from mindspore import context
from mindspore.model_zoo.mobilenet import mobilenet_v2
from mindspore.train.model import Model
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
from mindspore.common import dtype as mstype
parser = argparse.ArgumentParser(description='Image classification')
parser.add_argument('--checkpoint_path', type=str, default=None, help='Checkpoint file path')
parser.add_argument('--dataset_path', type=str, default=None, help='Dataset path')
args_opt = parser.parse_args()
device_id = int(os.getenv('DEVICE_ID'))
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=device_id, save_graphs=False)
if __name__ == '__main__':
loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True, reduction='mean')
net = mobilenet_v2(num_classes=config.num_classes)
net.to_float(mstype.float16)
for _, cell in net.cells_and_names():
if isinstance(cell, nn.Dense):
cell.add_flags_recursive(fp32=True)
dataset = create_dataset(dataset_path=args_opt.dataset_path, do_train=False, batch_size=config.batch_size)
step_size = dataset.get_dataset_size()
if args_opt.checkpoint_path:
param_dict = load_checkpoint(args_opt.checkpoint_path)
load_param_into_net(net, param_dict)
net.set_train(False)
model = Model(net, loss_fn=loss, metrics={'acc'})
res = model.eval(dataset)
print("result:", res, "ckpt=", args_opt.checkpoint_path)
|
[] |
[] |
[
"DEVICE_ID"
] |
[]
|
["DEVICE_ID"]
|
python
| 1 | 0 | |
{{cookiecutter.project_name}}/core/deployment/gunicorn_conf.py
|
import os
def to_bool(value):
return (
value is True or
(isinstance(value, str) and value.lower() in ['true', 'yes']) or
(isinstance(value, (int, float)) and value > 0)
)
bind = '0.0.0.0:{}'.format(os.getenv('GUNICORN_PORT', '8000'))
max_requests = int(os.getenv('GUNICORN_MAX_REQUESTS', '10000'))
max_requests_jitter = int(os.getenv('GUNICORN_MAX_REQUESTS_JITTER', '100'))
user = os.getenv('GUNICORN_USER', 'root')
keepalive = int(os.getenv('GUNICORN_KEEPALIVE', '70'))
reuse_port = to_bool(os.getenv('GUNICORN_REUSE_PORT', True))
accesslog = '-'
errorlog = '-'
print_config = True
workers = int(os.getenv('GUNICORN_WORKERS', '5'))
threads = int(os.getenv('GUNICORN_THREADS', '5'))
|
[] |
[] |
[
"GUNICORN_KEEPALIVE",
"GUNICORN_WORKERS",
"GUNICORN_REUSE_PORT",
"GUNICORN_USER",
"GUNICORN_THREADS",
"GUNICORN_MAX_REQUESTS_JITTER",
"GUNICORN_PORT",
"GUNICORN_MAX_REQUESTS"
] |
[]
|
["GUNICORN_KEEPALIVE", "GUNICORN_WORKERS", "GUNICORN_REUSE_PORT", "GUNICORN_USER", "GUNICORN_THREADS", "GUNICORN_MAX_REQUESTS_JITTER", "GUNICORN_PORT", "GUNICORN_MAX_REQUESTS"]
|
python
| 8 | 0 | |
twitter_wrangler/utils/twitter_api.py
|
import tweepy
from tweepy import OAuthHandler
import os
import json
from timeit import default_timer as timer
class Twitter:
def __init__(self, cfg):
"""
:param cfg: Hydra config for Twitter API
"""
auth = OAuthHandler((cfg.get('consumer_key', os.environ.get('CONSUMER_KEY'))),
(cfg.get('consumer_secret', os.environ.get('CONSUMER_SECRET'))))
auth.set_access_token((cfg.get('access_token', os.environ.get('ACCESS_TOKEN'))),
(cfg.get('access_secret', os.environ.get('ACCESS_SECRET'))))
self.api = tweepy.API(auth, wait_on_rate_limit=True)
def verify_credentials(self) -> None:
"""
Verify
"""
try:
self.api.verify_credentials()
print("Authentication OK")
except:
print("Error during authentication")
def get_twitter_data(self, df_twt_archive) -> None:
"""
Query Twitter's API for JSON data for each tweet ID in the Twitter archive
:param df_twt_archive:
"""
tweet_ids = df_twt_archive.tweet_id.values
count = 0
fails_dict = {}
start = timer()
# Save each tweet's returned JSON as a new line in a .txt file
with open('tweet_json.txt', 'w') as outfile:
# This loop will likely take 20-30 minutes to run because of Twitter's rate limit
for tweet_id in tweet_ids:
count += 1
print(str(count) + ": " + str(tweet_id))
try:
tweet = self.api.get_status(tweet_id, tweet_mode='extended')
print("Success")
json.dump(tweet._json, outfile)
outfile.write('\n')
except tweepy.TweepError as e:
print("Fail")
fails_dict[tweet_id] = e
pass
end = timer()
print(end - start)
print(fails_dict)
|
[] |
[] |
[
"CONSUMER_KEY",
"ACCESS_SECRET",
"CONSUMER_SECRET",
"ACCESS_TOKEN"
] |
[]
|
["CONSUMER_KEY", "ACCESS_SECRET", "CONSUMER_SECRET", "ACCESS_TOKEN"]
|
python
| 4 | 0 | |
lib/spack/spack/build_environment.py
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""
This module contains all routines related to setting up the package
build environment. All of this is set up by package.py just before
install() is called.
There are two parts to the build environment:
1. Python build environment (i.e. install() method)
This is how things are set up when install() is called. Spack
takes advantage of each package being in its own module by adding a
bunch of command-like functions (like configure(), make(), etc.) in
the package's module scope. Ths allows package writers to call
them all directly in Package.install() without writing 'self.'
everywhere. No, this isn't Pythonic. Yes, it makes the code more
readable and more like the shell script from which someone is
likely porting.
2. Build execution environment
This is the set of environment variables, like PATH, CC, CXX,
etc. that control the build. There are also a number of
environment variables used to pass information (like RPATHs and
other information about dependencies) to Spack's compiler wrappers.
All of these env vars are also set up here.
Skimming this module is a nice way to get acquainted with the types of
calls you can make from within the install() function.
"""
import inspect
import re
import multiprocessing
import os
import shutil
import sys
import traceback
import types
from six import StringIO
import llnl.util.tty as tty
from llnl.util.tty.color import cescape, colorize
from llnl.util.filesystem import mkdirp, install, install_tree
from llnl.util.lang import dedupe
from llnl.util.tty.log import MultiProcessFd
import spack.build_systems.cmake
import spack.build_systems.meson
import spack.config
import spack.main
import spack.paths
import spack.package
import spack.repo
import spack.schema.environment
import spack.store
import spack.install_test
import spack.subprocess_context
import spack.architecture as arch
import spack.util.path
from spack.util.string import plural
from spack.util.environment import (
env_flag, filter_system_paths, get_path, is_system_path,
EnvironmentModifications, validate, preserve_environment)
from spack.util.environment import system_dirs
from spack.error import NoLibrariesError, NoHeadersError
from spack.util.executable import Executable
from spack.util.module_cmd import load_module, path_from_modules, module
from spack.util.log_parse import parse_log_events, make_log_context
#
# This can be set by the user to globally disable parallel builds.
#
SPACK_NO_PARALLEL_MAKE = 'SPACK_NO_PARALLEL_MAKE'
#
# These environment variables are set by
# set_build_environment_variables and used to pass parameters to
# Spack's compiler wrappers.
#
SPACK_ENV_PATH = 'SPACK_ENV_PATH'
SPACK_INCLUDE_DIRS = 'SPACK_INCLUDE_DIRS'
SPACK_LINK_DIRS = 'SPACK_LINK_DIRS'
SPACK_RPATH_DIRS = 'SPACK_RPATH_DIRS'
SPACK_RPATH_DEPS = 'SPACK_RPATH_DEPS'
SPACK_LINK_DEPS = 'SPACK_LINK_DEPS'
SPACK_PREFIX = 'SPACK_PREFIX'
SPACK_INSTALL = 'SPACK_INSTALL'
SPACK_DEBUG = 'SPACK_DEBUG'
SPACK_SHORT_SPEC = 'SPACK_SHORT_SPEC'
SPACK_DEBUG_LOG_ID = 'SPACK_DEBUG_LOG_ID'
SPACK_DEBUG_LOG_DIR = 'SPACK_DEBUG_LOG_DIR'
SPACK_CCACHE_BINARY = 'SPACK_CCACHE_BINARY'
SPACK_SYSTEM_DIRS = 'SPACK_SYSTEM_DIRS'
# Platform-specific library suffix.
dso_suffix = 'dylib' if sys.platform == 'darwin' else 'so'
class MakeExecutable(Executable):
"""Special callable executable object for make so the user can specify
parallelism options on a per-invocation basis. Specifying
'parallel' to the call will override whatever the package's
global setting is, so you can either default to true or false and
override particular calls. Specifying 'jobs_env' to a particular
call will name an environment variable which will be set to the
parallelism level (without affecting the normal invocation with
-j).
Note that if the SPACK_NO_PARALLEL_MAKE env var is set it overrides
everything.
"""
def __init__(self, name, jobs):
super(MakeExecutable, self).__init__(name)
self.jobs = jobs
def __call__(self, *args, **kwargs):
"""parallel, and jobs_env from kwargs are swallowed and used here;
remaining arguments are passed through to the superclass.
"""
disable = env_flag(SPACK_NO_PARALLEL_MAKE)
parallel = (not disable) and kwargs.pop('parallel', self.jobs > 1)
if parallel:
args = ('-j{0}'.format(self.jobs),) + args
jobs_env = kwargs.pop('jobs_env', None)
if jobs_env:
# Caller wants us to set an environment variable to
# control the parallelism.
kwargs['extra_env'] = {jobs_env: str(self.jobs)}
return super(MakeExecutable, self).__call__(*args, **kwargs)
def clean_environment():
# Stuff in here sanitizes the build environment to eliminate
# anything the user has set that may interfere. We apply it immediately
# unlike the other functions so it doesn't overwrite what the modules load.
env = EnvironmentModifications()
# Remove these vars from the environment during build because they
# can affect how some packages find libraries. We want to make
# sure that builds never pull in unintended external dependencies.
env.unset('LD_LIBRARY_PATH')
env.unset('LD_RUN_PATH')
env.unset('DYLD_LIBRARY_PATH')
env.unset('DYLD_FALLBACK_LIBRARY_PATH')
# These vars affect how the compiler finds libraries and include dirs.
env.unset('LIBRARY_PATH')
env.unset('CPATH')
env.unset('C_INCLUDE_PATH')
env.unset('CPLUS_INCLUDE_PATH')
env.unset('OBJC_INCLUDE_PATH')
# On Cray "cluster" systems, unset CRAY_LD_LIBRARY_PATH to avoid
# interference with Spack dependencies.
# CNL requires these variables to be set (or at least some of them,
# depending on the CNL version).
hostarch = arch.Arch(arch.platform(), 'default_os', 'default_target')
on_cray = str(hostarch.platform) == 'cray'
using_cnl = re.match(r'cnl\d+', str(hostarch.os))
if on_cray and not using_cnl:
env.unset('CRAY_LD_LIBRARY_PATH')
for varname in os.environ.keys():
if 'PKGCONF' in varname:
env.unset(varname)
# Unset the following variables because they can affect installation of
# Autotools and CMake packages.
build_system_vars = [
'CC', 'CFLAGS', 'CPP', 'CPPFLAGS', # C variables
'CXX', 'CCC', 'CXXFLAGS', 'CXXCPP', # C++ variables
'F77', 'FFLAGS', 'FLIBS', # Fortran77 variables
'FC', 'FCFLAGS', 'FCLIBS', # Fortran variables
'LDFLAGS', 'LIBS' # linker variables
]
for v in build_system_vars:
env.unset(v)
# Unset mpi environment vars. These flags should only be set by
# mpi providers for packages with mpi dependencies
mpi_vars = [
'MPICC', 'MPICXX', 'MPIFC', 'MPIF77', 'MPIF90'
]
for v in mpi_vars:
env.unset(v)
build_lang = spack.config.get('config:build_language')
if build_lang:
# Override language-related variables. This can be used to force
# English compiler messages etc., which allows parse_log_events to
# show useful matches.
env.set('LC_ALL', build_lang)
# Remove any macports installs from the PATH. The macports ld can
# cause conflicts with the built-in linker on el capitan. Solves
# assembler issues, e.g.:
# suffix or operands invalid for `movq'"
path = get_path('PATH')
for p in path:
if '/macports/' in p:
env.remove_path('PATH', p)
env.apply_modifications()
def set_compiler_environment_variables(pkg, env):
assert pkg.spec.concrete
compiler = pkg.compiler
spec = pkg.spec
# Make sure the executables for this compiler exist
compiler.verify_executables()
# Set compiler variables used by CMake and autotools
assert all(key in compiler.link_paths for key in (
'cc', 'cxx', 'f77', 'fc'))
# Populate an object with the list of environment modifications
# and return it
# TODO : add additional kwargs for better diagnostics, like requestor,
# ttyout, ttyerr, etc.
link_dir = spack.paths.build_env_path
# Set SPACK compiler variables so that our wrapper knows what to call
if compiler.cc:
env.set('SPACK_CC', compiler.cc)
env.set('CC', os.path.join(link_dir, compiler.link_paths['cc']))
if compiler.cxx:
env.set('SPACK_CXX', compiler.cxx)
env.set('CXX', os.path.join(link_dir, compiler.link_paths['cxx']))
if compiler.f77:
env.set('SPACK_F77', compiler.f77)
env.set('F77', os.path.join(link_dir, compiler.link_paths['f77']))
if compiler.fc:
env.set('SPACK_FC', compiler.fc)
env.set('FC', os.path.join(link_dir, compiler.link_paths['fc']))
# Set SPACK compiler rpath flags so that our wrapper knows what to use
env.set('SPACK_CC_RPATH_ARG', compiler.cc_rpath_arg)
env.set('SPACK_CXX_RPATH_ARG', compiler.cxx_rpath_arg)
env.set('SPACK_F77_RPATH_ARG', compiler.f77_rpath_arg)
env.set('SPACK_FC_RPATH_ARG', compiler.fc_rpath_arg)
env.set('SPACK_LINKER_ARG', compiler.linker_arg)
# Check whether we want to force RPATH or RUNPATH
if spack.config.get('config:shared_linking') == 'rpath':
env.set('SPACK_DTAGS_TO_STRIP', compiler.enable_new_dtags)
env.set('SPACK_DTAGS_TO_ADD', compiler.disable_new_dtags)
else:
env.set('SPACK_DTAGS_TO_STRIP', compiler.disable_new_dtags)
env.set('SPACK_DTAGS_TO_ADD', compiler.enable_new_dtags)
# Set the target parameters that the compiler will add
isa_arg = spec.architecture.target.optimization_flags(compiler)
env.set('SPACK_TARGET_ARGS', isa_arg)
# Trap spack-tracked compiler flags as appropriate.
# env_flags are easy to accidentally override.
inject_flags = {}
env_flags = {}
build_system_flags = {}
for flag in spack.spec.FlagMap.valid_compiler_flags():
# Always convert flag_handler to function type.
# This avoids discrepencies in calling conventions between functions
# and methods, or between bound and unbound methods in python 2.
# We cannot effectively convert everything to a bound method, which
# would be the simpler solution.
if isinstance(pkg.flag_handler, types.FunctionType):
handler = pkg.flag_handler
else:
if sys.version_info >= (3, 0):
handler = pkg.flag_handler.__func__
else:
handler = pkg.flag_handler.im_func
injf, envf, bsf = handler(pkg, flag, spec.compiler_flags[flag])
inject_flags[flag] = injf or []
env_flags[flag] = envf or []
build_system_flags[flag] = bsf or []
# Place compiler flags as specified by flag_handler
for flag in spack.spec.FlagMap.valid_compiler_flags():
# Concreteness guarantees key safety here
if inject_flags[flag]:
# variables SPACK_<FLAG> inject flags through wrapper
var_name = 'SPACK_{0}'.format(flag.upper())
env.set(var_name, ' '.join(f for f in inject_flags[flag]))
if env_flags[flag]:
# implicit variables
env.set(flag.upper(), ' '.join(f for f in env_flags[flag]))
pkg.flags_to_build_system_args(build_system_flags)
env.set('SPACK_COMPILER_SPEC', str(spec.compiler))
env.set('SPACK_SYSTEM_DIRS', ':'.join(system_dirs))
compiler.setup_custom_environment(pkg, env)
return env
def set_build_environment_variables(pkg, env, dirty):
"""Ensure a clean install environment when we build packages.
This involves unsetting pesky environment variables that may
affect the build. It also involves setting environment variables
used by Spack's compiler wrappers.
Args:
pkg: The package we are building
env: The build environment
dirty (bool): Skip unsetting the user's environment settings
"""
# Gather information about various types of dependencies
build_deps = set(pkg.spec.dependencies(deptype=('build', 'test')))
link_deps = set(pkg.spec.traverse(root=False, deptype=('link')))
build_link_deps = build_deps | link_deps
rpath_deps = get_rpath_deps(pkg)
link_dirs = []
include_dirs = []
rpath_dirs = []
# The top-level package is always RPATHed. It hasn't been installed yet
# so the RPATHs are added unconditionally (e.g. even though lib64/ may
# not be created for the install).
for libdir in ['lib', 'lib64']:
lib_path = os.path.join(pkg.prefix, libdir)
rpath_dirs.append(lib_path)
# Set up link, include, RPATH directories that are passed to the
# compiler wrapper
for dep in link_deps:
if is_system_path(dep.prefix):
continue
query = pkg.spec[dep.name]
dep_link_dirs = list()
try:
dep_link_dirs.extend(query.libs.directories)
except NoLibrariesError:
tty.debug("No libraries found for {0}".format(dep.name))
for default_lib_dir in ['lib', 'lib64']:
default_lib_prefix = os.path.join(dep.prefix, default_lib_dir)
if os.path.isdir(default_lib_prefix):
dep_link_dirs.append(default_lib_prefix)
link_dirs.extend(dep_link_dirs)
if dep in rpath_deps:
rpath_dirs.extend(dep_link_dirs)
try:
include_dirs.extend(query.headers.directories)
except NoHeadersError:
tty.debug("No headers found for {0}".format(dep.name))
link_dirs = list(dedupe(filter_system_paths(link_dirs)))
include_dirs = list(dedupe(filter_system_paths(include_dirs)))
rpath_dirs = list(dedupe(filter_system_paths(rpath_dirs)))
env.set(SPACK_LINK_DIRS, ':'.join(link_dirs))
env.set(SPACK_INCLUDE_DIRS, ':'.join(include_dirs))
env.set(SPACK_RPATH_DIRS, ':'.join(rpath_dirs))
build_prefixes = [dep.prefix for dep in build_deps]
build_link_prefixes = [dep.prefix for dep in build_link_deps]
# add run-time dependencies of direct build-time dependencies:
for build_dep in build_deps:
for run_dep in build_dep.traverse(deptype='run'):
build_prefixes.append(run_dep.prefix)
# Filter out system paths: ['/', '/usr', '/usr/local']
# These paths can be introduced into the build when an external package
# is added as a dependency. The problem with these paths is that they often
# contain hundreds of other packages installed in the same directory.
# If these paths come first, they can overshadow Spack installations.
build_prefixes = filter_system_paths(build_prefixes)
build_link_prefixes = filter_system_paths(build_link_prefixes)
# Add dependencies to CMAKE_PREFIX_PATH
env.set_path('CMAKE_PREFIX_PATH', build_link_prefixes)
# Set environment variables if specified for
# the given compiler
compiler = pkg.compiler
env.extend(spack.schema.environment.parse(compiler.environment))
if compiler.extra_rpaths:
extra_rpaths = ':'.join(compiler.extra_rpaths)
env.set('SPACK_COMPILER_EXTRA_RPATHS', extra_rpaths)
# Add bin directories from dependencies to the PATH for the build.
for prefix in build_prefixes:
for dirname in ['bin', 'bin64']:
bin_dir = os.path.join(prefix, dirname)
if os.path.isdir(bin_dir):
env.prepend_path('PATH', bin_dir)
# Add spack build environment path with compiler wrappers first in
# the path. We add the compiler wrapper path, which includes default
# wrappers (cc, c++, f77, f90), AND a subdirectory containing
# compiler-specific symlinks. The latter ensures that builds that
# are sensitive to the *name* of the compiler see the right name when
# we're building with the wrappers.
#
# Conflicts on case-insensitive systems (like "CC" and "cc") are
# handled by putting one in the <build_env_path>/case-insensitive
# directory. Add that to the path too.
env_paths = []
compiler_specific = os.path.join(
spack.paths.build_env_path, os.path.dirname(pkg.compiler.link_paths['cc']))
for item in [spack.paths.build_env_path, compiler_specific]:
env_paths.append(item)
ci = os.path.join(item, 'case-insensitive')
if os.path.isdir(ci):
env_paths.append(ci)
for item in env_paths:
env.prepend_path('PATH', item)
env.set_path(SPACK_ENV_PATH, env_paths)
# Working directory for the spack command itself, for debug logs.
if spack.config.get('config:debug'):
env.set(SPACK_DEBUG, 'TRUE')
env.set(SPACK_SHORT_SPEC, pkg.spec.short_spec)
env.set(SPACK_DEBUG_LOG_ID, pkg.spec.format('{name}-{hash:7}'))
env.set(SPACK_DEBUG_LOG_DIR, spack.main.spack_working_dir)
# Find ccache binary and hand it to build environment
if spack.config.get('config:ccache'):
ccache = Executable('ccache')
if not ccache:
raise RuntimeError("No ccache binary found in PATH")
env.set(SPACK_CCACHE_BINARY, ccache)
# Add any pkgconfig directories to PKG_CONFIG_PATH
for prefix in build_link_prefixes:
for directory in ('lib', 'lib64', 'share'):
pcdir = os.path.join(prefix, directory, 'pkgconfig')
if os.path.isdir(pcdir):
env.prepend_path('PKG_CONFIG_PATH', pcdir)
return env
def _set_variables_for_single_module(pkg, module):
"""Helper function to set module variables for single module."""
# Put a marker on this module so that it won't execute the body of this
# function again, since it is not needed
marker = '_set_run_already_called'
if getattr(module, marker, False):
return
jobs = spack.config.get('config:build_jobs', 16) if pkg.parallel else 1
jobs = min(jobs, multiprocessing.cpu_count())
m = module
m.make_jobs = jobs
# TODO: make these build deps that can be installed if not found.
m.make = MakeExecutable('make', jobs)
m.gmake = MakeExecutable('gmake', jobs)
m.scons = MakeExecutable('scons', jobs)
m.ninja = MakeExecutable('ninja', jobs)
# easy shortcut to os.environ
m.env = os.environ
# Find the configure script in the archive path
# Don't use which for this; we want to find it in the current dir.
m.configure = Executable('./configure')
m.meson = Executable('meson')
m.cmake = Executable('cmake')
m.ctest = MakeExecutable('ctest', jobs)
# Standard CMake arguments
m.std_cmake_args = spack.build_systems.cmake.CMakePackage._std_args(pkg)
m.std_meson_args = spack.build_systems.meson.MesonPackage._std_args(pkg)
# Put spack compiler paths in module scope.
link_dir = spack.paths.build_env_path
m.spack_cc = os.path.join(link_dir, pkg.compiler.link_paths['cc'])
m.spack_cxx = os.path.join(link_dir, pkg.compiler.link_paths['cxx'])
m.spack_f77 = os.path.join(link_dir, pkg.compiler.link_paths['f77'])
m.spack_fc = os.path.join(link_dir, pkg.compiler.link_paths['fc'])
# Emulate some shell commands for convenience
m.pwd = os.getcwd
m.cd = os.chdir
m.mkdir = os.mkdir
m.makedirs = os.makedirs
m.remove = os.remove
m.removedirs = os.removedirs
m.symlink = os.symlink
m.mkdirp = mkdirp
m.install = install
m.install_tree = install_tree
m.rmtree = shutil.rmtree
m.move = shutil.move
# Useful directories within the prefix are encapsulated in
# a Prefix object.
m.prefix = pkg.prefix
# Platform-specific library suffix.
m.dso_suffix = dso_suffix
def static_to_shared_library(static_lib, shared_lib=None, **kwargs):
compiler_path = kwargs.get('compiler', m.spack_cc)
compiler = Executable(compiler_path)
return _static_to_shared_library(pkg.spec.architecture, compiler,
static_lib, shared_lib, **kwargs)
m.static_to_shared_library = static_to_shared_library
# Put a marker on this module so that it won't execute the body of this
# function again, since it is not needed
setattr(m, marker, True)
def set_module_variables_for_package(pkg):
"""Populate the module scope of install() with some useful functions.
This makes things easier for package writers.
"""
# If a user makes their own package repo, e.g.
# spack.pkg.mystuff.libelf.Libelf, and they inherit from an existing class
# like spack.pkg.original.libelf.Libelf, then set the module variables
# for both classes so the parent class can still use them if it gets
# called. parent_class_modules includes pkg.module.
modules = parent_class_modules(pkg.__class__)
for mod in modules:
_set_variables_for_single_module(pkg, mod)
def _static_to_shared_library(arch, compiler, static_lib, shared_lib=None,
**kwargs):
"""
Converts a static library to a shared library. The static library has to
be built with PIC for the conversion to work.
Parameters:
static_lib (str): Path to the static library.
shared_lib (str): Path to the shared library. Default is to derive
from the static library's path.
Keyword arguments:
compiler (str): Path to the compiler. Default is spack_cc.
compiler_output: Where to print compiler output to.
arguments (str list): Additional arguments for the compiler.
version (str): Library version. Default is unspecified.
compat_version (str): Library compatibility version. Default is
version.
"""
compiler_output = kwargs.get('compiler_output', None)
arguments = kwargs.get('arguments', [])
version = kwargs.get('version', None)
compat_version = kwargs.get('compat_version', version)
if not shared_lib:
shared_lib = '{0}.{1}'.format(os.path.splitext(static_lib)[0],
dso_suffix)
compiler_args = []
# TODO: Compiler arguments should not be hardcoded but provided by
# the different compiler classes.
if 'linux' in arch or 'cray' in arch:
soname = os.path.basename(shared_lib)
if compat_version:
soname += '.{0}'.format(compat_version)
compiler_args = [
'-shared',
'-Wl,-soname,{0}'.format(soname),
'-Wl,--whole-archive',
static_lib,
'-Wl,--no-whole-archive'
]
elif 'darwin' in arch:
install_name = shared_lib
if compat_version:
install_name += '.{0}'.format(compat_version)
compiler_args = [
'-dynamiclib',
'-install_name', '{0}'.format(install_name),
'-Wl,-force_load,{0}'.format(static_lib)
]
if compat_version:
compiler_args.extend(['-compatibility_version', '{0}'.format(
compat_version)])
if version:
compiler_args.extend(['-current_version', '{0}'.format(version)])
if len(arguments) > 0:
compiler_args.extend(arguments)
shared_lib_base = shared_lib
if version:
shared_lib += '.{0}'.format(version)
elif compat_version:
shared_lib += '.{0}'.format(compat_version)
compiler_args.extend(['-o', shared_lib])
# Create symlinks for version and compat_version
shared_lib_link = os.path.basename(shared_lib)
if version or compat_version:
os.symlink(shared_lib_link, shared_lib_base)
if compat_version and compat_version != version:
os.symlink(shared_lib_link, '{0}.{1}'.format(shared_lib_base,
compat_version))
return compiler(*compiler_args, output=compiler_output)
def get_rpath_deps(pkg):
"""Return immediate or transitive RPATHs depending on the package."""
if pkg.transitive_rpaths:
return [d for d in pkg.spec.traverse(root=False, deptype=('link'))]
else:
return pkg.spec.dependencies(deptype='link')
def get_rpaths(pkg):
"""Get a list of all the rpaths for a package."""
rpaths = [pkg.prefix.lib, pkg.prefix.lib64]
deps = get_rpath_deps(pkg)
rpaths.extend(d.prefix.lib for d in deps
if os.path.isdir(d.prefix.lib))
rpaths.extend(d.prefix.lib64 for d in deps
if os.path.isdir(d.prefix.lib64))
# Second module is our compiler mod name. We use that to get rpaths from
# module show output.
if pkg.compiler.modules and len(pkg.compiler.modules) > 1:
rpaths.append(path_from_modules([pkg.compiler.modules[1]]))
return list(dedupe(filter_system_paths(rpaths)))
def get_std_cmake_args(pkg):
"""List of standard arguments used if a package is a CMakePackage.
Returns:
list of str: standard arguments that would be used if this
package were a CMakePackage instance.
Args:
pkg (PackageBase): package under consideration
Returns:
list of str: arguments for cmake
"""
return spack.build_systems.cmake.CMakePackage._std_args(pkg)
def get_std_meson_args(pkg):
"""List of standard arguments used if a package is a MesonPackage.
Returns:
list of str: standard arguments that would be used if this
package were a MesonPackage instance.
Args:
pkg (PackageBase): package under consideration
Returns:
list of str: arguments for meson
"""
return spack.build_systems.meson.MesonPackage._std_args(pkg)
def parent_class_modules(cls):
"""
Get list of superclass modules that descend from spack.package.PackageBase
Includes cls.__module__
"""
if (not issubclass(cls, spack.package.PackageBase) or
issubclass(spack.package.PackageBase, cls)):
return []
result = []
module = sys.modules.get(cls.__module__)
if module:
result = [module]
for c in cls.__bases__:
result.extend(parent_class_modules(c))
return result
def load_external_modules(pkg):
"""Traverse a package's spec DAG and load any external modules.
Traverse a package's dependencies and load any external modules
associated with them.
Args:
pkg (PackageBase): package to load deps for
"""
for dep in list(pkg.spec.traverse()):
external_modules = dep.external_modules or []
for external_module in external_modules:
load_module(external_module)
def setup_package(pkg, dirty, context='build'):
"""Execute all environment setup routines."""
env = EnvironmentModifications()
if not dirty:
clean_environment()
# setup compilers and build tools for build contexts
need_compiler = context == 'build' or (context == 'test' and
pkg.test_requires_compiler)
if need_compiler:
set_compiler_environment_variables(pkg, env)
set_build_environment_variables(pkg, env, dirty)
# architecture specific setup
pkg.architecture.platform.setup_platform_environment(pkg, env)
if context == 'build':
# recursive post-order dependency information
env.extend(
modifications_from_dependencies(pkg.spec, context=context)
)
if (not dirty) and (not env.is_unset('CPATH')):
tty.debug("A dependency has updated CPATH, this may lead pkg-"
"config to assume that the package is part of the system"
" includes and omit it when invoked with '--cflags'.")
# setup package itself
set_module_variables_for_package(pkg)
pkg.setup_build_environment(env)
elif context == 'test':
import spack.user_environment as uenv # avoid circular import
env.extend(uenv.environment_modifications_for_spec(pkg.spec))
env.extend(
modifications_from_dependencies(pkg.spec, context=context)
)
set_module_variables_for_package(pkg)
env.prepend_path('PATH', '.')
# Loading modules, in particular if they are meant to be used outside
# of Spack, can change environment variables that are relevant to the
# build of packages. To avoid a polluted environment, preserve the
# value of a few, selected, environment variables
# With the current ordering of environment modifications, this is strictly
# unnecessary. Modules affecting these variables will be overwritten anyway
with preserve_environment('CC', 'CXX', 'FC', 'F77'):
# All module loads that otherwise would belong in previous
# functions have to occur after the env object has its
# modifications applied. Otherwise the environment modifications
# could undo module changes, such as unsetting LD_LIBRARY_PATH
# after a module changes it.
if need_compiler:
for mod in pkg.compiler.modules:
# Fixes issue https://github.com/spack/spack/issues/3153
if os.environ.get("CRAY_CPU_TARGET") == "mic-knl":
load_module("cce")
load_module(mod)
# kludge to handle cray libsci being automatically loaded by PrgEnv
# modules on cray platform. Module unload does no damage when
# unnecessary
module('unload', 'cray-libsci')
if pkg.architecture.target.module_name:
load_module(pkg.architecture.target.module_name)
load_external_modules(pkg)
implicit_rpaths = pkg.compiler.implicit_rpaths()
if implicit_rpaths:
env.set('SPACK_COMPILER_IMPLICIT_RPATHS',
':'.join(implicit_rpaths))
# Make sure nothing's strange about the Spack environment.
validate(env, tty.warn)
env.apply_modifications()
def modifications_from_dependencies(spec, context):
"""Returns the environment modifications that are required by
the dependencies of a spec and also applies modifications
to this spec's package at module scope, if need be.
Args:
spec (Spec): spec for which we want the modifications
context (str): either 'build' for build-time modifications or 'run'
for run-time modifications
"""
env = EnvironmentModifications()
pkg = spec.package
# Maps the context to deptype and method to be called
deptype_and_method = {
'build': (('build', 'link', 'test'),
'setup_dependent_build_environment'),
'run': (('link', 'run'), 'setup_dependent_run_environment'),
'test': (('link', 'run', 'test'), 'setup_dependent_run_environment')
}
deptype, method = deptype_and_method[context]
root = context == 'test'
for dspec in spec.traverse(order='post', root=root, deptype=deptype):
dpkg = dspec.package
set_module_variables_for_package(dpkg)
# Allow dependencies to modify the module
dpkg.setup_dependent_package(pkg.module, spec)
getattr(dpkg, method)(env, spec)
return env
def _setup_pkg_and_run(serialized_pkg, function, kwargs, child_pipe,
input_multiprocess_fd):
context = kwargs.get('context', 'build')
try:
# We are in the child process. Python sets sys.stdin to
# open(os.devnull) to prevent our process and its parent from
# simultaneously reading from the original stdin. But, we assume
# that the parent process is not going to read from it till we
# are done with the child, so we undo Python's precaution.
if input_multiprocess_fd is not None:
sys.stdin = os.fdopen(input_multiprocess_fd.fd)
pkg = serialized_pkg.restore()
if not kwargs.get('fake', False):
kwargs['unmodified_env'] = os.environ.copy()
setup_package(pkg, dirty=kwargs.get('dirty', False),
context=context)
return_value = function(pkg, kwargs)
child_pipe.send(return_value)
except StopPhase as e:
# Do not create a full ChildError from this, it's not an error
# it's a control statement.
child_pipe.send(e)
except BaseException:
# catch ANYTHING that goes wrong in the child process
exc_type, exc, tb = sys.exc_info()
# Need to unwind the traceback in the child because traceback
# objects can't be sent to the parent.
tb_string = traceback.format_exc()
# build up some context from the offending package so we can
# show that, too.
package_context = get_package_context(tb)
logfile = None
if context == 'build':
try:
if hasattr(pkg, 'log_path'):
logfile = pkg.log_path
except NameError:
# 'pkg' is not defined yet
pass
elif context == 'test':
logfile = os.path.join(
pkg.test_suite.stage,
spack.install_test.TestSuite.test_log_name(pkg.spec))
# make a pickleable exception to send to parent.
msg = "%s: %s" % (exc_type.__name__, str(exc))
ce = ChildError(msg,
exc_type.__module__,
exc_type.__name__,
tb_string, logfile, context, package_context)
child_pipe.send(ce)
finally:
child_pipe.close()
if input_multiprocess_fd is not None:
input_multiprocess_fd.close()
def start_build_process(pkg, function, kwargs):
"""Create a child process to do part of a spack build.
Args:
pkg (PackageBase): package whose environment we should set up the
child process for.
function (callable): argless function to run in the child
process.
Usage::
def child_fun():
# do stuff
build_env.start_build_process(pkg, child_fun)
The child process is run with the build environment set up by
spack.build_environment. This allows package authors to have full
control over the environment, etc. without affecting other builds
that might be executed in the same spack call.
If something goes wrong, the child process catches the error and
passes it to the parent wrapped in a ChildError. The parent is
expected to handle (or re-raise) the ChildError.
This uses `multiprocessing.Process` to create the child process. The
mechanism used to create the process differs on different operating
systems and for different versions of Python. In some cases "fork"
is used (i.e. the "fork" system call) and some cases it starts an
entirely new Python interpreter process (in the docs this is referred
to as the "spawn" start method). Breaking it down by OS:
- Linux always uses fork.
- Mac OS uses fork before Python 3.8 and "spawn" for 3.8 and after.
- Windows always uses the "spawn" start method.
For more information on `multiprocessing` child process creation
mechanisms, see https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods
"""
parent_pipe, child_pipe = multiprocessing.Pipe()
input_multiprocess_fd = None
serialized_pkg = spack.subprocess_context.PackageInstallContext(pkg)
try:
# Forward sys.stdin when appropriate, to allow toggling verbosity
if sys.stdin.isatty() and hasattr(sys.stdin, 'fileno'):
input_fd = os.dup(sys.stdin.fileno())
input_multiprocess_fd = MultiProcessFd(input_fd)
p = multiprocessing.Process(
target=_setup_pkg_and_run,
args=(serialized_pkg, function, kwargs, child_pipe,
input_multiprocess_fd))
p.start()
except InstallError as e:
e.pkg = pkg
raise
finally:
# Close the input stream in the parent process
if input_multiprocess_fd is not None:
input_multiprocess_fd.close()
child_result = parent_pipe.recv()
p.join()
# If returns a StopPhase, raise it
if isinstance(child_result, StopPhase):
# do not print
raise child_result
# let the caller know which package went wrong.
if isinstance(child_result, InstallError):
child_result.pkg = pkg
if isinstance(child_result, ChildError):
# If the child process raised an error, print its output here rather
# than waiting until the call to SpackError.die() in main(). This
# allows exception handling output to be logged from within Spack.
# see spack.main.SpackCommand.
child_result.print_context()
raise child_result
return child_result
def get_package_context(traceback, context=3):
"""Return some context for an error message when the build fails.
Args:
traceback (traceback): A traceback from some exception raised during
install
context (int): Lines of context to show before and after the line
where the error happened
This function inspects the stack to find where we failed in the
package file, and it adds detailed context to the long_message
from there.
"""
def make_stack(tb, stack=None):
"""Tracebacks come out of the system in caller -> callee order. Return
an array in callee -> caller order so we can traverse it."""
if stack is None:
stack = []
if tb is not None:
make_stack(tb.tb_next, stack)
stack.append(tb)
return stack
stack = make_stack(traceback)
for tb in stack:
frame = tb.tb_frame
if 'self' in frame.f_locals:
# Find the first proper subclass of PackageBase.
obj = frame.f_locals['self']
if isinstance(obj, spack.package.PackageBase):
break
# We found obj, the Package implementation we care about.
# Point out the location in the install method where we failed.
lines = [
'{0}:{1:d}, in {2}:'.format(
inspect.getfile(frame.f_code),
frame.f_lineno - 1, # subtract 1 because f_lineno is 0-indexed
frame.f_code.co_name
)
]
# Build a message showing context in the install method.
sourcelines, start = inspect.getsourcelines(frame)
# Calculate lineno of the error relative to the start of the function.
# Subtract 1 because f_lineno is 0-indexed.
fun_lineno = frame.f_lineno - start - 1
start_ctx = max(0, fun_lineno - context)
sourcelines = sourcelines[start_ctx:fun_lineno + context + 1]
for i, line in enumerate(sourcelines):
is_error = start_ctx + i == fun_lineno
mark = '>> ' if is_error else ' '
# Add start to get lineno relative to start of file, not function.
marked = ' {0}{1:-6d}{2}'.format(
mark, start + start_ctx + i, line.rstrip())
if is_error:
marked = colorize('@R{%s}' % cescape(marked))
lines.append(marked)
return lines
class InstallError(spack.error.SpackError):
"""Raised by packages when a package fails to install.
Any subclass of InstallError will be annotated by Spack wtih a
``pkg`` attribute on failure, which the caller can use to get the
package for which the exception was raised.
"""
class ChildError(InstallError):
"""Special exception class for wrapping exceptions from child processes
in Spack's build environment.
The main features of a ChildError are:
1. They're serializable, so when a child build fails, we can send one
of these to the parent and let the parent report what happened.
2. They have a ``traceback`` field containing a traceback generated
on the child immediately after failure. Spack will print this on
failure in lieu of trying to run sys.excepthook on the parent
process, so users will see the correct stack trace from a child.
3. They also contain context, which shows context in the Package
implementation where the error happened. This helps people debug
Python code in their packages. To get it, Spack searches the
stack trace for the deepest frame where ``self`` is in scope and
is an instance of PackageBase. This will generally find a useful
spot in the ``package.py`` file.
The long_message of a ChildError displays one of two things:
1. If the original error was a ProcessError, indicating a command
died during the build, we'll show context from the build log.
2. If the original error was any other type of error, we'll show
context from the Python code.
SpackError handles displaying the special traceback if we're in debug
mode with spack -d.
"""
# List of errors considered "build errors", for which we'll show log
# context instead of Python context.
build_errors = [('spack.util.executable', 'ProcessError')]
def __init__(self, msg, module, classname, traceback_string, log_name,
log_type, context):
super(ChildError, self).__init__(msg)
self.module = module
self.name = classname
self.traceback = traceback_string
self.log_name = log_name
self.log_type = log_type
self.context = context
@property
def long_message(self):
out = StringIO()
out.write(self._long_message if self._long_message else '')
have_log = self.log_name and os.path.exists(self.log_name)
if (self.module, self.name) in ChildError.build_errors:
# The error happened in some external executed process. Show
# the log with errors or warnings highlighted.
if have_log:
write_log_summary(out, self.log_type, self.log_name)
else:
# The error happened in the Python code, so try to show
# some context from the Package itself.
if self.context:
out.write('\n')
out.write('\n'.join(self.context))
out.write('\n')
if out.getvalue():
out.write('\n')
if have_log:
out.write('See {0} log for details:\n'.format(self.log_type))
out.write(' {0}\n'.format(self.log_name))
return out.getvalue()
def __str__(self):
return self.message
def __reduce__(self):
"""__reduce__ is used to serialize (pickle) ChildErrors.
Return a function to reconstruct a ChildError, along with the
salient properties we'll need.
"""
return _make_child_error, (
self.message,
self.module,
self.name,
self.traceback,
self.log_name,
self.log_type,
self.context)
def _make_child_error(msg, module, name, traceback, log, log_type, context):
"""Used by __reduce__ in ChildError to reconstruct pickled errors."""
return ChildError(msg, module, name, traceback, log, log_type, context)
class StopPhase(spack.error.SpackError):
"""Pickle-able exception to control stopped builds."""
def __reduce__(self):
return _make_stop_phase, (self.message, self.long_message)
def _make_stop_phase(msg, long_msg):
return StopPhase(msg, long_msg)
def write_log_summary(out, log_type, log, last=None):
errors, warnings = parse_log_events(log)
nerr = len(errors)
nwar = len(warnings)
if nerr > 0:
if last and nerr > last:
errors = errors[-last:]
nerr = last
# If errors are found, only display errors
out.write(
"\n%s found in %s log:\n" %
(plural(nerr, 'error'), log_type))
out.write(make_log_context(errors))
elif nwar > 0:
if last and nwar > last:
warnings = warnings[-last:]
nwar = last
# If no errors are found but warnings are, display warnings
out.write(
"\n%s found in %s log:\n" %
(plural(nwar, 'warning'), log_type))
out.write(make_log_context(warnings))
|
[] |
[] |
[
"CRAY_CPU_TARGET"
] |
[]
|
["CRAY_CPU_TARGET"]
|
python
| 1 | 0 | |
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
import setuptools
HAS_DIST_INFO_CMD = False
try:
import setuptools.command.dist_info
HAS_DIST_INFO_CMD = True
except ImportError:
"""Setuptools version is too old."""
ALL_STRING_TYPES = tuple(map(type, ("", b"", u"")))
MIN_NATIVE_SETUPTOOLS_VERSION = 34, 4, 0
"""Minimal setuptools having good read_configuration implementation."""
RUNTIME_SETUPTOOLS_VERSION = tuple(map(int, setuptools.__version__.split(".")))
"""Setuptools imported now."""
READ_CONFIG_SHIM_NEEDED = RUNTIME_SETUPTOOLS_VERSION < MIN_NATIVE_SETUPTOOLS_VERSION
def str_if_nested_or_str(s):
"""Turn input into a native string if possible."""
if isinstance(s, ALL_STRING_TYPES):
return str(s)
if isinstance(s, (list, tuple)):
return type(s)(map(str_if_nested_or_str, s))
if isinstance(s, (dict,)):
return stringify_dict_contents(s)
return s
def stringify_dict_contents(dct):
"""Turn dict keys and values into native strings."""
return {str_if_nested_or_str(k): str_if_nested_or_str(v) for k, v in dct.items()}
if not READ_CONFIG_SHIM_NEEDED:
import setuptools.config
from setuptools.config import ConfigOptionsHandler
from setuptools.config import read_configuration
import setuptools.dist
# Set default value for 'use_scm_version'
setattr(setuptools.dist.Distribution, "use_scm_version", False)
# Attach bool parser to 'use_scm_version' option
class ShimConfigOptionsHandler(ConfigOptionsHandler):
"""Extension class for ConfigOptionsHandler."""
@property
def parsers(self):
"""Return an option mapping with default data type parsers."""
_orig_parsers = super(ShimConfigOptionsHandler, self).parsers
return dict(use_scm_version=self._parse_bool, **_orig_parsers)
def parse_section_packages__find(self, section_options):
find_kwargs = super(
ShimConfigOptionsHandler, self
).parse_section_packages__find(section_options)
return stringify_dict_contents(find_kwargs)
setuptools.config.ConfigOptionsHandler = ShimConfigOptionsHandler
else:
"""This is a shim for setuptools<required."""
import functools
import io
import json
import sys
import warnings
try:
import setuptools.config
def filter_out_unknown_section(i):
def chi(self, *args, **kwargs):
i(self, *args, **kwargs)
self.sections = {
s: v for s, v in self.sections.items() if s != "packages.find"
}
return chi
setuptools.config.ConfigHandler.__init__ = filter_out_unknown_section(
setuptools.config.ConfigHandler.__init__
)
except ImportError:
pass
def ignore_unknown_options(s):
@functools.wraps(s)
def sw(**attrs):
try:
ignore_warning_regex = (
r"Unknown distribution option: "
r"'(license_file|project_urls|python_requires)'"
)
warnings.filterwarnings(
"ignore",
message=ignore_warning_regex,
category=UserWarning,
module="distutils.dist",
)
return s(**attrs)
finally:
warnings.resetwarnings()
return sw
def parse_predicates(python_requires):
import itertools
import operator
sorted_operators_map = tuple(
sorted(
{
">": operator.gt,
"<": operator.lt,
">=": operator.ge,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
"": operator.eq,
}.items(),
key=lambda i: len(i[0]),
reverse=True,
)
)
def is_decimal(s):
return type(u"")(s).isdecimal()
conditions = map(str.strip, python_requires.split(","))
for c in conditions:
for op_sign, op_func in sorted_operators_map:
if not c.startswith(op_sign):
continue
raw_ver = itertools.takewhile(
is_decimal, c[len(op_sign) :].strip().split(".")
)
ver = tuple(map(int, raw_ver))
yield op_func, ver
break
def validate_required_python_or_fail(python_requires=None):
if python_requires is None:
return
python_version = sys.version_info
preds = parse_predicates(python_requires)
for op, v in preds:
py_ver_slug = python_version[: max(len(v), 3)]
condition_matches = op(py_ver_slug, v)
if not condition_matches:
raise RuntimeError(
"requires Python '{}' but the running Python is {}".format(
python_requires, ".".join(map(str, python_version[:3]))
)
)
def verify_required_python_runtime(s):
@functools.wraps(s)
def sw(**attrs):
try:
validate_required_python_or_fail(attrs.get("python_requires"))
except RuntimeError as re:
sys.exit("{} {!s}".format(attrs["name"], re))
return s(**attrs)
return sw
setuptools.setup = ignore_unknown_options(setuptools.setup)
setuptools.setup = verify_required_python_runtime(setuptools.setup)
try:
from configparser import ConfigParser
from configparser import NoSectionError
except ImportError:
from ConfigParser import ConfigParser
from ConfigParser import NoSectionError
ConfigParser.read_file = ConfigParser.readfp
def maybe_read_files(d):
"""Read files if the string starts with `file:` marker."""
FILE_FUNC_MARKER = "file:"
d = d.strip()
if not d.startswith(FILE_FUNC_MARKER):
return d
descs = []
for fname in map(str.strip, str(d[len(FILE_FUNC_MARKER) :]).split(",")):
with io.open(fname, encoding="utf-8") as f:
descs.append(f.read())
return "".join(descs)
def cfg_val_to_list(v):
"""Turn config val to list and filter out empty lines."""
return list(filter(bool, map(str.strip, str(v).strip().splitlines())))
def cfg_val_to_dict(v):
"""Turn config val to dict and filter out empty lines."""
return dict(
map(
lambda l: list(map(str.strip, l.split("=", 1))),
filter(bool, map(str.strip, str(v).strip().splitlines())),
)
)
def cfg_val_to_primitive(v):
"""Parse primitive config val to appropriate data type."""
return json.loads(v.strip().lower())
def read_configuration(filepath):
"""Read metadata and options from setup.cfg located at filepath."""
cfg = ConfigParser()
with io.open(filepath, encoding="utf-8") as f:
cfg.read_file(f)
md = dict(cfg.items("metadata"))
for list_key in "classifiers", "keywords", "project_urls":
try:
md[list_key] = cfg_val_to_list(md[list_key])
except KeyError:
pass
try:
md["long_description"] = maybe_read_files(md["long_description"])
except KeyError:
pass
opt = dict(cfg.items("options"))
for list_key in "include_package_data", "use_scm_version", "zip_safe":
try:
opt[list_key] = cfg_val_to_primitive(opt[list_key])
except KeyError:
pass
for list_key in "scripts", "install_requires", "setup_requires":
try:
opt[list_key] = cfg_val_to_list(opt[list_key])
except KeyError:
pass
try:
opt["package_dir"] = cfg_val_to_dict(opt["package_dir"])
except KeyError:
pass
try:
opt_package_data = dict(cfg.items("options.package_data"))
if not opt_package_data.get("", "").strip():
opt_package_data[""] = opt_package_data["*"]
del opt_package_data["*"]
except (KeyError, NoSectionError):
opt_package_data = {}
try:
opt_extras_require = dict(cfg.items("options.extras_require"))
opt["extras_require"] = {}
for k, v in opt_extras_require.items():
opt["extras_require"][k] = cfg_val_to_list(v)
except NoSectionError:
pass
opt["package_data"] = {}
for k, v in opt_package_data.items():
opt["package_data"][k] = cfg_val_to_list(v)
try:
opt_exclude_package_data = dict(cfg.items("options.exclude_package_data"))
if (
not opt_exclude_package_data.get("", "").strip()
and "*" in opt_exclude_package_data
):
opt_exclude_package_data[""] = opt_exclude_package_data["*"]
del opt_exclude_package_data["*"]
except NoSectionError:
pass
else:
opt["exclude_package_data"] = {}
for k, v in opt_exclude_package_data.items():
opt["exclude_package_data"][k] = cfg_val_to_list(v)
cur_pkgs = opt.get("packages", "").strip()
if "\n" in cur_pkgs:
opt["packages"] = cfg_val_to_list(opt["packages"])
elif cur_pkgs.startswith("find:"):
opt_packages_find = stringify_dict_contents(
dict(cfg.items("options.packages.find"))
)
opt["packages"] = setuptools.find_packages(**opt_packages_find)
return {"metadata": md, "options": opt}
def cut_local_version_on_upload(version):
"""Generate a PEP440 local version if uploading to PyPI."""
import os
import setuptools_scm.version # only present during setup time
IS_PYPI_UPLOAD = os.getenv("PYPI_UPLOAD") == "true" # set in tox.ini
return (
""
if IS_PYPI_UPLOAD
else setuptools_scm.version.get_local_node_and_date(version)
)
if HAS_DIST_INFO_CMD:
class patched_dist_info(setuptools.command.dist_info.dist_info):
def run(self):
self.egg_base = str_if_nested_or_str(self.egg_base)
return setuptools.command.dist_info.dist_info.run(self)
declarative_setup_params = read_configuration("setup.cfg")
"""Declarative metadata and options as read by setuptools."""
setup_params = {}
"""Explicit metadata for passing into setuptools.setup() call."""
setup_params = dict(setup_params, **declarative_setup_params["metadata"])
setup_params = dict(setup_params, **declarative_setup_params["options"])
if HAS_DIST_INFO_CMD:
setup_params["cmdclass"] = {"dist_info": patched_dist_info}
setup_params["use_scm_version"] = {"local_scheme": cut_local_version_on_upload}
# Patch incorrectly decoded package_dir option
# ``egg_info`` demands native strings failing with unicode under Python 2
# Ref https://github.com/pypa/setuptools/issues/1136
setup_params = stringify_dict_contents(setup_params)
__name__ == "__main__" and setuptools.setup(**setup_params)
|
[] |
[] |
[
"PYPI_UPLOAD"
] |
[]
|
["PYPI_UPLOAD"]
|
python
| 1 | 0 | |
ml_service/util/attach_compute.py
|
import os
from dotenv import load_dotenv
from azureml.core import Workspace
from azureml.core.compute import AmlCompute
from azureml.core.compute import ComputeTarget
from azureml.exceptions import ComputeTargetException
def get_compute(
workspace: Workspace,
compute_name: str,
vm_size: str
):
# Load the environment variables from .env in case this script
# is called outside an existing process
load_dotenv()
# Verify that cluster does not exist already
try:
if compute_name in workspace.compute_targets:
compute_target = workspace.compute_targets[compute_name]
if compute_target and type(compute_target) is AmlCompute:
print('Found existing compute target ' + compute_name
+ ' so using it.')
else:
compute_config = AmlCompute.provisioning_configuration(
vm_size=vm_size,
vm_priority=os.environ.get("AML_CLUSTER_PRIORITY",
'lowpriority'),
min_nodes=int(os.environ.get("AML_CLUSTER_MIN_NODES", 0)),
max_nodes=int(os.environ.get("AML_CLUSTER_MAX_NODES", 4)),
idle_seconds_before_scaledown="300"
# #Uncomment the below lines for VNet support
# vnet_resourcegroup_name=vnet_resourcegroup_name,
# vnet_name=vnet_name,
# subnet_name=subnet_name
)
compute_target = ComputeTarget.create(workspace, compute_name,
compute_config)
compute_target.wait_for_completion(
show_output=True,
min_node_count=None,
timeout_in_minutes=10)
return compute_target
except ComputeTargetException:
print('An error occurred trying to provision compute.')
exit()
|
[] |
[] |
[
"AML_CLUSTER_MIN_NODES",
"AML_CLUSTER_MAX_NODES",
"AML_CLUSTER_PRIORITY"
] |
[]
|
["AML_CLUSTER_MIN_NODES", "AML_CLUSTER_MAX_NODES", "AML_CLUSTER_PRIORITY"]
|
python
| 3 | 0 | |
api/runtask.go
|
package runtask
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"cloud.google.com/go/pubsub"
)
type target struct {
Workspace string `json:"workspace"`
ID string `json:"id"`
Name string `json:"name,omitempty"`
Params map[string]string `json:"params,omitempty"`
}
// RunTask API handler
func RunTask(w http.ResponseWriter, r *http.Request) {
untrustedData, err := ioutil.ReadAll(r.Body)
if err != nil {
http.Error(w, "failed to read body", http.StatusInternalServerError)
return
}
r.Body.Close()
var target target
err = json.Unmarshal(untrustedData, &target)
if err != nil {
http.Error(w, fmt.Sprintf("failed to unmarshal body: %v", err), http.StatusBadRequest)
return
}
log.Printf("target: %v", target)
// Validate
// FIXME we need to check this user has permission to run this task
if target.Workspace == "" {
http.Error(w, "workspace must be specified", http.StatusBadRequest)
return
}
if target.ID == "" {
http.Error(w, "id must be specified", http.StatusBadRequest)
return
}
// We don't trust the data posted to us enough to repost the same bytes to Reusabolt
// Let's remarshal the target which contains exactly the right fields and has been verified
trustedData, err := json.Marshal(target)
if err != nil {
http.Error(w, fmt.Sprintf("failed to marshal message: %v", err), http.StatusInternalServerError)
return
}
// Send the pubsub message to Reusabolt
ctx := context.Background()
project := os.Getenv("GCP_PROJECT")
if len(project) == 0 {
http.Error(w, "GCP_PROJECT environment variable must be set", http.StatusInternalServerError)
return
}
client, err := pubsub.NewClient(ctx, project)
if err != nil {
http.Error(w, fmt.Sprintf("failed to create pubsub client: %v", err), http.StatusInternalServerError)
return
}
topic := client.TopicInProject("reusabolt", project)
result := topic.Publish(ctx, &pubsub.Message{Data: trustedData})
_, err = result.Get(ctx)
if err != nil {
http.Error(w, fmt.Sprintf("failed to publish a message to the 'reusabolt' topic: %v", err), http.StatusInternalServerError)
return
}
}
|
[
"\"GCP_PROJECT\""
] |
[] |
[
"GCP_PROJECT"
] |
[]
|
["GCP_PROJECT"]
|
go
| 1 | 0 | |
minemeld/ft/cif.py
|
# Copyright 2015-2016 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import logging
import os
import arrow
import ujson
import yaml
import cifsdk.client
import cifsdk.constants
from . import basepoller
LOG = logging.getLogger(__name__)
class Feed(basepoller.BasePollerFT):
def configure(self):
super(Feed, self).configure()
self.token = None
self.remote = self.config.get('remote', None)
self.verify_cert = self.config.get('verify_cert', True)
self.filters = self.config.get('filters', None)
self.initial_days = self.config.get('initial_days', 7)
self.prefix = self.config.get('prefix', 'cif')
self.fields = self.config.get('fields', cifsdk.constants.FIELDS)
self.side_config_path = self.config.get('side_config', None)
if self.side_config_path is None:
self.side_config_path = os.path.join(
os.environ['MM_CONFIG_DIR'],
'%s_side_config.yml' % self.name
)
self._load_side_config()
def _load_side_config(self):
try:
with open(self.side_config_path, 'r') as f:
sconfig = yaml.safe_load(f)
except Exception as e:
LOG.error('%s - Error loading side config: %s', self.name, str(e))
return
self.token = sconfig.get('token', None)
if self.token is not None:
LOG.info('%s - token set', self.name)
self.remote = sconfig.get('remote', self.remote)
self.verify_cert = sconfig.get('verify_cert', self.verify_cert)
filters = sconfig.get('filters', self.filters)
if filters is not None:
if self.filters is not None:
self.filters.update(filters)
else:
self.filters = filters
def _process_item(self, item):
indicator = item.get('observable', None)
if indicator is None:
LOG.error('%s - no observable in item', self.name)
return [[None, None]]
otype = item.get('otype', None)
if otype is None:
LOG.error('%s - no otype in item', self.name)
return [[None, None]]
if otype == 'ipv4':
type_ = 'IPv4'
elif otype == 'ipv6':
type_ = 'IPv6'
elif otype == 'fqdn':
type_ = 'domain'
elif otype == 'url':
type_ = 'URL'
else:
LOG.error('%s - unahndled otype %s', self.name, otype)
return [[None, None]]
attributes = {
'type': type_
}
for field in self.fields:
if field in ['observable', 'otype', 'confidence']:
continue
if field not in item:
continue
attributes['%s_%s' % (self.prefix, field)] = item[field]
if 'confidence' in item:
attributes['confidence'] = item['confidence']
LOG.debug('%s - %s: %s', self.name, indicator, attributes)
return [[indicator, attributes]]
def _build_iterator(self, now):
if self.token is None or self.remote is None or self.filters is None:
LOG.info(
'%s - token, remote or filters not set, poll not performed',
self.name
)
raise RuntimeError(
'%s - token, remote or filters not set, poll not performed' % self.name
)
filters = {}
filters.update(self.filters)
days = filters.pop('days', self.initial_days)
now = arrow.get(now/1000.0)
filters['reporttimeend'] = '{0}Z'.format(
now.format('YYYY-MM-DDTHH:mm:ss')
)
if self.last_successful_run is None:
filters['reporttime'] = '{0}Z'.format(
now.shift(days=-days).format('YYYY-MM-DDTHH:mm:ss')
)
else:
filters['reporttime'] = '{0}Z'.format(
arrow.get(self.last_successful_run/1000.0).format('YYYY-MM-DDTHH:mm:ss')
)
LOG.debug('%s - filters: %s', self.name, filters)
cifclient = cifsdk.client.Client(
token=self.token,
remote=self.remote,
verify_ssl=self.verify_cert,
timeout=900
)
try:
ret = cifclient.search(filters=filters, decode=False)
except SystemExit as e:
raise RuntimeError(str(e))
return ujson.loads(ret)
def hup(self, source=None):
LOG.info('%s - hup received, reload side config', self.name)
self._load_side_config()
super(Feed, self).hup(source=source)
@staticmethod
def gc(name, config=None):
basepoller.BasePollerFT.gc(name, config=config)
side_config_path = None
if config is not None:
side_config_path = config.get('side_config', None)
if side_config_path is None:
side_config_path = os.path.join(
os.environ['MM_CONFIG_DIR'],
'{}_side_config.yml'.format(name)
)
try:
os.remove(side_config_path)
except:
pass
|
[] |
[] |
[
"MM_CONFIG_DIR"
] |
[]
|
["MM_CONFIG_DIR"]
|
python
| 1 | 0 | |
qa/rpc-tests/test_framework/util.py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
import os
import sys
from binascii import hexlify, unhexlify
from base64 import b64encode
from decimal import Decimal, ROUND_DOWN
import json
import http.client
import random
import shutil
import subprocess
import time
import re
import errno
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
COVERAGE_DIR = None
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
BITCOIND_PROC_WAIT_TIMEOUT = 60
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
#Set Mocktime default to OFF.
#MOCKTIME is only needed for scripts that use the
#cached version of the blockchain. If the cached
#version of the blockchain is used without MOCKTIME
#then the mempools will not sync due to IBD.
MOCKTIME = 0
def enable_mocktime():
#For backwared compatibility of the python scripts
#with previous versions of the cache, set MOCKTIME
#to Jul 12, 2014 15:53:56 + (201 * 12)
global MOCKTIME
MOCKTIME = 1405166036 + (201 * 12)
def disable_mocktime():
global MOCKTIME
MOCKTIME = 0
def get_mocktime():
return MOCKTIME
def enable_coverage(dirname):
"""Maintain a log of which RPC calls are made during testing."""
global COVERAGE_DIR
COVERAGE_DIR = dirname
def get_rpc_proxy(url, node_number, timeout=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
COVERAGE_DIR, node_number) if COVERAGE_DIR else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def sync_blocks(rpc_connections, wait=1, timeout=60):
"""
Wait until everybody has the same tip
"""
while timeout > 0:
tips = [ x.getbestblockhash() for x in rpc_connections ]
if tips == [ tips[0] ]*len(tips):
return True
time.sleep(wait)
timeout -= wait
raise AssertionError("Block sync failed")
def sync_mempools(rpc_connections, wait=1, timeout=60):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while timeout > 0:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
return True
time.sleep(wait)
timeout -= wait
raise AssertionError("Mempool sync failed")
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
rpc_u, rpc_p = rpc_auth_pair(n)
with open(os.path.join(datadir, "viacoin.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("rpcuser=" + rpc_u + "\n")
f.write("rpcpassword=" + rpc_p + "\n")
f.write("port="+str(p2p_port(n))+"\n")
f.write("rpcport="+str(rpc_port(n))+"\n")
f.write("listenonion=0\n")
return datadir
def rpc_auth_pair(n):
return 'rpcuser💻' + str(n), 'rpcpass🔑' + str(n)
def rpc_url(i, rpchost=None):
rpc_u, rpc_p = rpc_auth_pair(i)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
def wait_for_bitcoind_start(process, url, i):
'''
Wait for bitcoind to start. This means that RPC is accessible and fully initialized.
Raise an exception if bitcoind exits during initialization.
'''
while True:
if process.poll() is not None:
raise Exception('viacoind exited with status %i during initialization' % process.returncode)
try:
rpc = get_rpc_proxy(url, i)
blocks = rpc.getblockcount()
break # break out of loop on success
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unkown JSON RPC exception
time.sleep(0.25)
def initialize_chain(test_dir, num_nodes):
"""
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache
"""
assert num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(os.path.join('cache', 'node'+str(i))):
create_cache = True
break
if create_cache:
#find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(os.path.join("cache","node"+str(i))):
shutil.rmtree(os.path.join("cache","node"+str(i)))
# Create cache directories, run bitcoinds:
for i in range(MAX_NODES):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("VIACOIND", "viacoind"), "-server", "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print("initialize_chain: viacoind started, waiting for RPC to come up")
wait_for_bitcoind_start(bitcoind_processes[i], rpc_url(i), i)
if os.getenv("PYTHON_DEBUG", ""):
print("initialize_chain: RPC succesfully started")
rpcs = []
for i in range(MAX_NODES):
try:
rpcs.append(get_rpc_proxy(rpc_url(i), i))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 12 seconds apart
# starting from 201*12 in the past
enable_mocktime()
block_time = get_mocktime() - (201 * 12)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].generate(1)
block_time += 12
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
disable_mocktime()
for i in range(MAX_NODES):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(num_nodes):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in bitcoin.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start a bitcoind and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("VIACOIND", "viacoind")
args = [ binary, "-datadir="+datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-debug=1", "-mocktime="+str(get_mocktime()) ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print("start_node: viacoind started, waiting for RPC to come up")
url = rpc_url(i, rpchost)
wait_for_bitcoind_start(bitcoind_processes[i], url, i)
if os.getenv("PYTHON_DEBUG", ""):
print("start_node: RPC succesfully started")
proxy = get_rpc_proxy(url, i, timeout=timewait)
if COVERAGE_DIR:
coverage.write_all_rpc_commands(COVERAGE_DIR, proxy)
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start multiple bitcoinds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for _ in range(num_nodes) ]
if binary is None: binary = [ None for _ in range(num_nodes) ]
rpcs = []
try:
for i in range(num_nodes):
rpcs.append(start_node(i, dirname, extra_args[i], rpchost, timewait=timewait, binary=binary[i]))
except: # If one node failed to start, stop the others
stop_nodes(rpcs)
raise
return rpcs
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
try:
node.stop()
except http.client.CannotSendRequest as e:
print("WARN: Unable to stop node: " + repr(e))
bitcoind_processes[i].wait(timeout=BITCOIND_PROC_WAIT_TIMEOUT)
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
try:
node.stop()
except http.client.CannotSendRequest as e:
print("WARN: Unable to stop node: " + repr(e))
del nodes[:] # Emptying array closes connections as a side effect
wait_bitcoinds()
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait(timeout=BITCOIND_PROC_WAIT_TIMEOUT)
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = tx_size * fee_per_kB / 1000
if fee < target_fee:
raise AssertionError("Fee of %s VIA too low! (Should be %s VIA)"%(str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 2) * fee_per_kB / 1000:
raise AssertionError("Fee of %s VIA too high! (Should be %s VIA)"%(str(fee), str(target_fee)))
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:"+e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find = False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find == True:
assert_equal(expected, { })
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find == True:
num_matched = num_matched+1
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0 and should_not_find != True:
raise AssertionError("No objects matched %s"%(str(to_match)))
if num_matched > 0 and should_not_find == True:
raise AssertionError("Objects were found %s"%(str(to_match)))
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
node.generate(int(0.5*count)+101)
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value/2)
outputs[addr2] = satoshi_round(send_value/2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
txid = node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts(txouts_cnt = 7):
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" #OP_RETURN OP_PUSH2 512 bytes
for i in range (512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "%02x" % (txouts_cnt + 1)
for k in range(txouts_cnt):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{ "txid" : coinbase, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, fee):
addr = node.getnewaddress()
txids = []
for i in range(len(utxos)):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr] = satoshi_round(send_value)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
|
[] |
[] |
[
"VIACOIND",
"PYTHON_DEBUG"
] |
[]
|
["VIACOIND", "PYTHON_DEBUG"]
|
python
| 2 | 0 | |
pandas/util/testing.py
|
from __future__ import division
# pylint: disable-msg=W0402
import re
import string
import sys
import tempfile
import warnings
import inspect
import os
import subprocess
import locale
import traceback
from datetime import datetime
from functools import wraps
from contextlib import contextmanager
from numpy.random import randn, rand
import numpy as np
import pandas as pd
from pandas.core.arrays import ExtensionArray
from pandas.core.dtypes.missing import array_equivalent
from pandas.core.dtypes.common import (
is_datetimelike_v_numeric,
is_datetimelike_v_object,
is_number, is_bool,
needs_i8_conversion,
is_categorical_dtype,
is_interval_dtype,
is_sequence,
is_list_like)
from pandas.io.formats.printing import pprint_thing
from pandas.core.algorithms import take_1d
import pandas.core.common as com
import pandas.compat as compat
from pandas.compat import (
filter, map, zip, range, unichr, lrange, lmap, lzip, u, callable, Counter,
raise_with_traceback, httplib, StringIO, PY3)
from pandas import (bdate_range, CategoricalIndex, Categorical, IntervalIndex,
DatetimeIndex, TimedeltaIndex, PeriodIndex, RangeIndex,
Index, MultiIndex,
Series, DataFrame, Panel)
from pandas._libs import testing as _testing
from pandas.io.common import urlopen
N = 30
K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, compat.ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('always', _testing_mode_warnings)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('ignore', _testing_mode_warnings)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option('^display.', silent=True)
def round_trip_pickle(obj, path=None):
"""
Pickle an object and then read it again.
Parameters
----------
obj : pandas object
The object to pickle and then re-read.
path : str, default None
The path where the pickled object is written and then read.
Returns
-------
round_trip_pickled_object : pandas object
The original object that was pickled and then re-read.
"""
if path is None:
path = u('__{random_bytes}__.pickle'.format(random_bytes=rands(10)))
with ensure_clean(path) as path:
pd.to_pickle(obj, path)
return pd.read_pickle(path)
def round_trip_pathlib(writer, reader, path=None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip('pathlib').Path
if path is None:
path = '___pathlib___'
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path=None):
"""
Write an object to file specified by a py.path LocalPath and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip('py.path').local
if path is None:
path = '___localpath___'
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object
Parameters
----------
path : str
The path where the file is read from
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
f : file object
"""
if compression is None:
f = open(path, 'rb')
elif compression == 'gzip':
import gzip
f = gzip.open(path, 'rb')
elif compression == 'bz2':
import bz2
f = bz2.BZ2File(path, 'rb')
elif compression == 'xz':
lzma = compat.import_lzma()
f = lzma.LZMAFile(path, 'rb')
elif compression == 'zip':
import zipfile
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
if len(zip_names) == 1:
f = zip_file.open(zip_names.pop())
else:
raise ValueError('ZIP file {} error. Only one file per ZIP.'
.format(path))
else:
msg = 'Unrecognized compression type: {}'.format(compression)
raise ValueError(msg)
yield f
f.close()
def assert_almost_equal(left, right, check_exact=False,
check_dtype='equiv', check_less_precise=False,
**kwargs):
"""
Check that the left and right objects are approximately equal.
Parameters
----------
left : object
right : object
check_exact : bool, default False
Whether to compare number exactly.
check_dtype: bool, default True
check dtype if both a and b are the same type
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
"""
if isinstance(left, pd.Index):
return assert_index_equal(left, right, check_exact=check_exact,
exact=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif isinstance(left, pd.Series):
return assert_series_equal(left, right, check_exact=check_exact,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif isinstance(left, pd.DataFrame):
return assert_frame_equal(left, right, check_exact=check_exact,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
else:
# other sequences
if check_dtype:
if is_number(left) and is_number(right):
# do not compare numeric classes, like np.float64 and float
pass
elif is_bool(left) and is_bool(right):
# do not compare bool classes, like np.bool_ and bool
pass
else:
if (isinstance(left, np.ndarray) or
isinstance(right, np.ndarray)):
obj = 'numpy array'
else:
obj = 'Input'
assert_class_equal(left, right, obj=obj)
return _testing.assert_almost_equal(
left, right,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
err_msg = "{name} Expected type {exp_type}, found {act_type} instead"
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(err_msg.format(name=cls_name, exp_type=cls,
act_type=type(left)))
if not isinstance(right, cls):
raise AssertionError(err_msg.format(name=cls_name, exp_type=cls,
act_type=type(right)))
def assert_dict_equal(left, right, compare_keys=True):
_check_isinstance(left, right, dict)
return _testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p=0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits),
dtype=(np.str_, 1))
RANDU_CHARS = np.array(list(u("").join(map(unichr, lrange(1488, 1488 + 26))) +
string.digits), dtype=(np.unicode_, 1))
def rands_array(nchars, size, dtype='O'):
"""Generate an array of byte strings."""
retval = (np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def randu_array(nchars, size, dtype='O'):
"""Generate an array of unicode strings."""
retval = (np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return ''.join(np.random.choice(RANDS_CHARS, nchars))
def randu(nchars):
"""
Generate one random unicode string.
See `randu_array` if you want to create an array of random unicode strings.
"""
return ''.join(np.random.choice(RANDU_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import get_fignums, close as _close
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# locale utilities
def check_output(*popenargs, **kwargs):
# shamelessly taken from Python 2.7 source
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE,
*popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
def _default_locale_getter():
try:
raw_locales = check_output(['locale -a'], shell=True)
except subprocess.CalledProcessError as e:
raise type(e)("{exception}, the 'locale -a' command cannot be found "
"on your system".format(exception=e))
return raw_locales
def get_locales(prefix=None, normalize=True,
locale_getter=_default_locale_getter):
"""Get all the locales that are available on the system.
Parameters
----------
prefix : str
If not ``None`` then return only those locales with the prefix
provided. For example to get all English language locales (those that
start with ``"en"``), pass ``prefix="en"``.
normalize : bool
Call ``locale.normalize`` on the resulting list of available locales.
If ``True``, only locales that can be set without throwing an
``Exception`` are returned.
locale_getter : callable
The function to use to retrieve the current locales. This should return
a string with each locale separated by a newline character.
Returns
-------
locales : list of strings
A list of locale strings that can be set with ``locale.setlocale()``.
For example::
locale.setlocale(locale.LC_ALL, locale_string)
On error will return None (no locale available, e.g. Windows)
"""
try:
raw_locales = locale_getter()
except Exception:
return None
try:
# raw_locales is "\n" separated list of locales
# it may contain non-decodable parts, so split
# extract what we can and then rejoin.
raw_locales = raw_locales.split(b'\n')
out_locales = []
for x in raw_locales:
if PY3:
out_locales.append(str(
x, encoding=pd.options.display.encoding))
else:
out_locales.append(str(x))
except TypeError:
pass
if prefix is None:
return _valid_locales(out_locales, normalize)
found = re.compile('{prefix}.*'.format(prefix=prefix)) \
.findall('\n'.join(out_locales))
return _valid_locales(found, normalize)
@contextmanager
def set_locale(new_locale, lc_var=locale.LC_ALL):
"""Context manager for temporarily setting a locale.
Parameters
----------
new_locale : str or tuple
A string of the form <language_country>.<encoding>. For example to set
the current locale to US English with a UTF8 encoding, you would pass
"en_US.UTF-8".
Notes
-----
This is useful when you want to run a particular block of code under a
particular locale, without globally setting the locale. This probably isn't
thread-safe.
"""
current_locale = locale.getlocale()
try:
locale.setlocale(lc_var, new_locale)
try:
normalized_locale = locale.getlocale()
except ValueError:
yield new_locale
else:
if com._all_not_none(*normalized_locale):
yield '.'.join(normalized_locale)
else:
yield new_locale
finally:
locale.setlocale(lc_var, current_locale)
def _can_set_locale(lc):
"""Check to see if we can set a locale without throwing an exception.
Parameters
----------
lc : str
The locale to attempt to set.
Returns
-------
isvalid : bool
Whether the passed locale can be set
"""
try:
with set_locale(lc):
pass
except locale.Error: # horrible name for a Exception subclass
return False
else:
return True
def _valid_locales(locales, normalize):
"""Return a list of normalized locales that do not throw an ``Exception``
when set.
Parameters
----------
locales : str
A string where each locale is separated by a newline.
normalize : bool
Whether to call ``locale.normalize`` on each locale.
Returns
-------
valid_locales : list
A list of valid locales.
"""
if normalize:
normalizer = lambda x: locale.normalize(x.strip())
else:
normalizer = lambda x: x.strip()
return list(filter(_can_set_locale, map(normalizer, locales)))
# -----------------------------------------------------------------------------
# Stdout / stderr decorators
def capture_stdout(f):
"""
Decorator to capture stdout in a buffer so that it can be checked
(or suppressed) during testing.
Parameters
----------
f : callable
The test that is capturing stdout.
Returns
-------
f : callable
The decorated test ``f``, which captures stdout.
Examples
--------
>>> from pandas.util.testing import capture_stdout
>>>
>>> import sys
>>>
>>> @capture_stdout
... def test_print_pass():
... print("foo")
... out = sys.stdout.getvalue()
... assert out == "foo\n"
>>>
>>> @capture_stdout
... def test_print_fail():
... print("foo")
... out = sys.stdout.getvalue()
... assert out == "bar\n"
...
AssertionError: assert 'foo\n' == 'bar\n'
"""
@wraps(f)
def wrapper(*args, **kwargs):
try:
sys.stdout = StringIO()
f(*args, **kwargs)
finally:
sys.stdout = sys.__stdout__
return wrapper
def capture_stderr(f):
"""
Decorator to capture stderr in a buffer so that it can be checked
(or suppressed) during testing.
Parameters
----------
f : callable
The test that is capturing stderr.
Returns
-------
f : callable
The decorated test ``f``, which captures stderr.
Examples
--------
>>> from pandas.util.testing import capture_stderr
>>>
>>> import sys
>>>
>>> @capture_stderr
... def test_stderr_pass():
... sys.stderr.write("foo")
... out = sys.stderr.getvalue()
... assert out == "foo\n"
>>>
>>> @capture_stderr
... def test_stderr_fail():
... sys.stderr.write("foo")
... out = sys.stderr.getvalue()
... assert out == "bar\n"
...
AssertionError: assert 'foo\n' == 'bar\n'
"""
@wraps(f)
def wrapper(*args, **kwargs):
try:
sys.stderr = StringIO()
f(*args, **kwargs)
finally:
sys.stderr = sys.__stderr__
return wrapper
# -----------------------------------------------------------------------------
# Console debugging tools
def debug(f, *args, **kwargs):
from pdb import Pdb as OldPdb
try:
from IPython.core.debugger import Pdb
kw = dict(color_scheme='Linux')
except ImportError:
Pdb = OldPdb
kw = {}
pdb = Pdb(**kw)
return pdb.runcall(f, *args, **kwargs)
def pudebug(f, *args, **kwargs):
import pudb
return pudb.runcall(f, *args, **kwargs)
def set_trace():
from IPython.core.debugger import Pdb
try:
Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
except Exception:
from pdb import Pdb as OldPdb
OldPdb().set_trace(sys._getframe().f_back)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False):
"""Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
"""
filename = filename or ''
fd = None
if return_filelike:
f = tempfile.TemporaryFile(suffix=filename)
try:
yield f
finally:
f.close()
else:
# don't generate tempfile if using a path with directory specified
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(suffix=filename)
except UnicodeEncodeError:
import pytest
pytest.skip('no unicode file names on this system')
try:
yield filename
finally:
try:
os.close(fd)
except Exception as e:
print("Couldn't close file descriptor: {fdesc} (file: {fname})"
.format(fdesc=fd, fname=filename))
try:
if os.path.exists(filename):
os.remove(filename)
except Exception as e:
print("Exception on removing file: {error}".format(error=e))
def get_data_path(f=''):
"""Return the path of a data file, these are relative to the current test
directory.
"""
# get our callers file
_, filename, _, _, _, _ = inspect.getouterframes(inspect.currentframe())[1]
base_dir = os.path.abspath(os.path.dirname(filename))
return os.path.join(base_dir, 'data', f)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2):
"""Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(left, right, exact='equiv', check_names=True,
check_less_precise=False, check_exact=True,
check_categorical=True, obj='Index'):
"""Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool / string {'equiv'}, default False
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message
"""
def _check_types(l, r, obj='Index'):
if exact:
assert_class_equal(left, right, exact=exact, obj=obj)
assert_attr_equal('dtype', l, r, obj=obj)
# allow string-like to have different inferred_types
if l.inferred_type in ('string', 'unicode'):
assert r.inferred_type in ('string', 'unicode')
else:
assert_attr_equal('inferred_type', l, r, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
labels = index.labels[level]
filled = take_1d(unique.values, labels, fill_value=unique._na_value)
values = unique._shallow_copy(filled, name=index.names[level])
return values
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = '{obj} levels are different'.format(obj=obj)
msg2 = '{nlevels}, {left}'.format(nlevels=left.nlevels, left=left)
msg3 = '{nlevels}, {right}'.format(nlevels=right.nlevels, right=right)
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = '{obj} length are different'.format(obj=obj)
msg2 = '{length}, {left}'.format(length=len(left), left=left)
msg3 = '{length}, {right}'.format(length=len(right), right=right)
raise_assert_detail(obj, msg1, msg2, msg3)
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = 'MultiIndex level [{level}]'.format(level=level)
assert_index_equal(llevel, rlevel,
exact=exact, check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact, obj=lobj)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
if check_exact:
if not left.equals(right):
diff = np.sum((left.values != right.values)
.astype(int)) * 100.0 / len(left)
msg = '{obj} values are different ({pct} %)'.format(
obj=obj, pct=np.round(diff, 5))
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(left.values, right.values,
check_less_precise=check_less_precise,
check_dtype=exact,
obj=obj, lobj=left, robj=right)
# metadata comparison
if check_names:
assert_attr_equal('names', left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal('freq', left, right, obj=obj)
if (isinstance(left, pd.IntervalIndex) or
isinstance(right, pd.IntervalIndex)):
assert_attr_equal('closed', left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values,
obj='{obj} category'.format(obj=obj))
def assert_class_equal(left, right, exact=True, obj='Input'):
"""checks classes are equal."""
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
try:
return x.__class__.__name__
except AttributeError:
return repr(type(x))
if exact == 'equiv':
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = set([type(left).__name__, type(right).__name__])
if len(types - set(['Int64Index', 'RangeIndex'])):
msg = '{obj} classes are not equivalent'.format(obj=obj)
raise_assert_detail(obj, msg, repr_class(left),
repr_class(right))
elif exact:
if type(left) != type(right):
msg = '{obj} classes are different'.format(obj=obj)
raise_assert_detail(obj, msg, repr_class(left),
repr_class(right))
def assert_attr_equal(attr, left, right, obj='Attributes'):
"""checks attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (is_number(left_attr) and np.isnan(left_attr) and
is_number(right_attr) and np.isnan(right_attr)):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = 'Attribute "{attr}" are different'.format(attr=attr)
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = ('one of \'objs\' is not a matplotlib Axes instance, type '
'encountered {name!r}').format(name=el.__class__.__name__)
assert isinstance(el, (plt.Axes, dict)), msg
else:
assert isinstance(objs, (plt.Artist, tuple, dict)), \
('objs is neither an ndarray of Artist instances nor a '
'single Artist instance, tuple, or dict, "objs" is a {name!r}'
).format(name=objs.__class__.__name__)
def isiterable(obj):
return hasattr(obj, '__iter__')
def is_sorted(seq):
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
return assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(left, right, check_dtype=True,
obj='Categorical', check_category_order=True):
"""Test that Categoricals are equivalent.
Parameters
----------
left, right : Categorical
Categoricals to compare
check_dtype : bool, default True
Check that integer dtype of the codes are the same
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories,
obj='{obj}.categories'.format(obj=obj))
assert_numpy_array_equal(left.codes, right.codes,
check_dtype=check_dtype,
obj='{obj}.codes'.format(obj=obj))
else:
assert_index_equal(left.categories.sort_values(),
right.categories.sort_values(),
obj='{obj}.categories'.format(obj=obj))
assert_index_equal(left.categories.take(left.codes),
right.categories.take(right.codes),
obj='{obj}.values'.format(obj=obj))
assert_attr_equal('ordered', left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None):
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif is_categorical_dtype(left):
left = repr(left)
if isinstance(right, np.ndarray):
right = pprint_thing(right)
elif is_categorical_dtype(right):
right = repr(right)
msg = """{obj} are different
{message}
[left]: {left}
[right]: {right}""".format(obj=obj, message=message, left=left, right=right)
if diff is not None:
msg += "\n[diff]: {diff}".format(diff=diff)
raise AssertionError(msg)
def assert_numpy_array_equal(left, right, strict_nan=False,
check_dtype=True, err_msg=None,
obj='numpy array', check_same=None):
""" Checks that 'np.ndarray' is equivalent
Parameters
----------
left : np.ndarray or iterable
right : np.ndarray or iterable
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype: bool, default True
check dtype if both a and b are np.ndarray
err_msg : str, default None
If provided, used as assertion message
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area
"""
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, 'base', None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == 'same':
if left_base is not right_base:
msg = "{left!r} is not {right!r}".format(
left=left_base, right=right_base)
raise AssertionError(msg)
elif check_same == 'copy':
if left_base is right_base:
msg = "{left!r} is {right!r}".format(
left=left_base, right=right_base)
raise AssertionError(msg)
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(obj, '{obj} shapes are different'
.format(obj=obj), left.shape, right.shape)
diff = 0
for l, r in zip(left, right):
# count up differences
if not array_equivalent(l, r, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = '{obj} values are different ({pct} %)'.format(
obj=obj, pct=np.round(diff, 5))
raise_assert_detail(obj, msg, left, right)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal('dtype', left, right, obj=obj)
return True
def assert_extension_array_equal(left, right):
"""Check that left and right ExtensionArrays are equal.
Parameters
----------
left, right : ExtensionArray
The two arrays to compare
Notes
-----
Missing values are checked separately from valid values.
A mask of missing values is computed for each and checked to match.
The remaining all-valid values are cast to object dtype and checked.
"""
assert isinstance(left, ExtensionArray)
assert left.dtype == right.dtype
left_na = left.isna()
right_na = right.isna()
assert_numpy_array_equal(left_na, right_na)
left_valid = left[~left_na].astype(object)
right_valid = right[~right_na].astype(object)
assert_numpy_array_equal(left_valid, right_valid)
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(left, right, check_dtype=True,
check_index_type='equiv',
check_series_type=True,
check_less_precise=False,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
obj='Series'):
"""Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_exact : bool, default False
Whether to compare number exactly.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message
"""
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
# ToDo: There are some tests using rhs is sparse
# lhs is dense. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = '{len}, {left}'.format(len=len(left), left=left.index)
msg2 = '{len}, {right}'.format(len=len(right), right=right.index)
raise_assert_detail(obj, 'Series length are different', msg1, msg2)
# index comparison
assert_index_equal(left.index, right.index, exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{obj}.index'.format(obj=obj))
if check_dtype:
# We want to skip exact dtype checking when `check_categorical`
# is False. We'll still raise if only one is a `Categorical`,
# regardless of `check_categorical`
if (is_categorical_dtype(left) and is_categorical_dtype(right) and
not check_categorical):
pass
else:
assert_attr_equal('dtype', left, right)
if check_exact:
assert_numpy_array_equal(left.get_values(), right.get_values(),
check_dtype=check_dtype,
obj='{obj}'.format(obj=obj),)
elif check_datetimelike_compat:
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
if (is_datetimelike_v_numeric(left, right) or
is_datetimelike_v_object(left, right) or
needs_i8_conversion(left) or
needs_i8_conversion(right)):
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left.values).equals(Index(right.values)):
msg = ('[datetimelike_compat=True] {left} is not equal to '
'{right}.').format(left=left.values, right=right.values)
raise AssertionError(msg)
else:
assert_numpy_array_equal(left.get_values(), right.get_values(),
check_dtype=check_dtype)
elif is_interval_dtype(left) or is_interval_dtype(right):
# TODO: big hack here
left = pd.IntervalIndex(left)
right = pd.IntervalIndex(right)
assert_index_equal(left, right, obj='{obj}.index'.format(obj=obj))
else:
_testing.assert_almost_equal(left.get_values(), right.get_values(),
check_less_precise=check_less_precise,
check_dtype=check_dtype,
obj='{obj}'.format(obj=obj))
# metadata comparison
if check_names:
assert_attr_equal('name', left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values,
obj='{obj} category'.format(obj=obj))
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(left, right, check_dtype=True,
check_index_type='equiv',
check_column_type='equiv',
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
obj='DataFrame'):
"""Check that left and right DataFrame are equal.
Parameters
----------
left : DataFrame
right : DataFrame
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool / string {'equiv'}, default False
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool / string {'equiv'}, default False
Whether to check the columns class, dtype and inferred_type
are identical.
check_frame_type : bool, default False
Whether to check the DataFrame class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_names : bool, default True
Whether to check the Index names attribute.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If true, ignore the order of rows & columns
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message
"""
# instance validation
_check_isinstance(left, right, DataFrame)
if check_frame_type:
# ToDo: There are some tests using rhs is SparseDataFrame
# lhs is DataFrame. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# shape comparison
if left.shape != right.shape:
raise_assert_detail(obj,
'DataFrame shape mismatch',
'{shape!r}'.format(shape=left.shape),
'{shape!r}'.format(shape=right.shape))
if check_like:
left, right = left.reindex_like(right), right
# index comparison
assert_index_equal(left.index, right.index, exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{obj}.index'.format(obj=obj))
# column comparison
assert_index_equal(left.columns, right.columns, exact=check_column_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj='{obj}.columns'.format(obj=obj))
# compare by blocks
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(lblocks[dtype], rblocks[dtype],
check_dtype=check_dtype, obj='DataFrame.blocks')
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
assert_series_equal(
lcol, rcol, check_dtype=check_dtype,
check_index_type=check_index_type,
check_less_precise=check_less_precise,
check_exact=check_exact, check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
obj='DataFrame.iloc[:, {idx}]'.format(idx=i))
def assert_panel_equal(left, right,
check_dtype=True,
check_panel_type=False,
check_less_precise=False,
check_names=False,
by_blocks=False,
obj='Panel'):
"""Check that left and right Panels are equal.
Parameters
----------
left : Panel (or nd)
right : Panel (or nd)
check_dtype : bool, default True
Whether to check the Panel dtype is identical.
check_panel_type : bool, default False
Whether to check the Panel class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_names : bool, default True
Whether to check the Index names attribute.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
obj : str, default 'Panel'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
if check_panel_type:
assert_class_equal(left, right, obj=obj)
for axis in left._AXIS_ORDERS:
left_ind = getattr(left, axis)
right_ind = getattr(right, axis)
assert_index_equal(left_ind, right_ind, check_names=check_names)
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
array_equivalent(lblocks[dtype].values, rblocks[dtype].values)
else:
# can potentially be slow
for i, item in enumerate(left._get_axis(0)):
msg = "non-matching item (right) '{item}'".format(item=item)
assert item in right, msg
litem = left.iloc[i]
ritem = right.iloc[i]
assert_frame_equal(litem, ritem,
check_less_precise=check_less_precise,
check_names=check_names)
for i, item in enumerate(right._get_axis(0)):
msg = "non-matching item (left) '{item}'".format(item=item)
assert item in left, msg
# -----------------------------------------------------------------------------
# Sparse
def assert_sp_array_equal(left, right, check_dtype=True):
"""Check that the left and right SparseArray are equal.
Parameters
----------
left : SparseArray
right : SparseArray
check_dtype : bool, default True
Whether to check the data dtype is identical.
"""
_check_isinstance(left, right, pd.SparseArray)
assert_numpy_array_equal(left.sp_values, right.sp_values,
check_dtype=check_dtype)
# SparseIndex comparison
assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
if not left.sp_index.equals(right.sp_index):
raise_assert_detail('SparseArray.index', 'index are not equal',
left.sp_index, right.sp_index)
assert_attr_equal('fill_value', left, right)
if check_dtype:
assert_attr_equal('dtype', left, right)
assert_numpy_array_equal(left.values, right.values,
check_dtype=check_dtype)
def assert_sp_series_equal(left, right, check_dtype=True, exact_indices=True,
check_series_type=True, check_names=True,
obj='SparseSeries'):
"""Check that the left and right SparseSeries are equal.
Parameters
----------
left : SparseSeries
right : SparseSeries
check_dtype : bool, default True
Whether to check the Series dtype is identical.
exact_indices : bool, default True
check_series_type : bool, default True
Whether to check the SparseSeries class is identical.
check_names : bool, default True
Whether to check the SparseSeries name attribute.
obj : str, default 'SparseSeries'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
_check_isinstance(left, right, pd.SparseSeries)
if check_series_type:
assert_class_equal(left, right, obj=obj)
assert_index_equal(left.index, right.index,
obj='{obj}.index'.format(obj=obj))
assert_sp_array_equal(left.block.values, right.block.values)
if check_names:
assert_attr_equal('name', left, right)
if check_dtype:
assert_attr_equal('dtype', left, right)
assert_numpy_array_equal(left.values, right.values)
def assert_sp_frame_equal(left, right, check_dtype=True, exact_indices=True,
check_frame_type=True, obj='SparseDataFrame'):
"""Check that the left and right SparseDataFrame are equal.
Parameters
----------
left : SparseDataFrame
right : SparseDataFrame
check_dtype : bool, default True
Whether to check the Series dtype is identical.
exact_indices : bool, default True
SparseSeries SparseIndex objects must be exactly the same,
otherwise just compare dense representations.
check_frame_type : bool, default True
Whether to check the SparseDataFrame class is identical.
obj : str, default 'SparseDataFrame'
Specify the object name being compared, internally used to show
the appropriate assertion message.
"""
_check_isinstance(left, right, pd.SparseDataFrame)
if check_frame_type:
assert_class_equal(left, right, obj=obj)
assert_index_equal(left.index, right.index,
obj='{obj}.index'.format(obj=obj))
assert_index_equal(left.columns, right.columns,
obj='{obj}.columns'.format(obj=obj))
for col, series in compat.iteritems(left):
assert (col in right)
# trade-off?
if exact_indices:
assert_sp_series_equal(series, right[col],
check_dtype=check_dtype)
else:
assert_series_equal(series.to_dense(), right[col].to_dense(),
check_dtype=check_dtype)
assert_attr_equal('default_fill_value', left, right, obj=obj)
# do I care?
# assert(left.default_kind == right.default_kind)
for col in right:
assert (col in left)
# -----------------------------------------------------------------------------
# Others
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, "Did not contain item: '{key!r}'".format(key=k)
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements
comparable with assert_almost_equal
Checks that the elements are equal, but not
the same object. (Does not check that items
in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
msg = ("Expected object {obj1!r} and object {obj2!r} to be "
"different objects, but they were the same object."
).format(obj1=type(elem1), obj2=type(elem2))
assert elem1 is not elem2, msg
def getCols(k):
return string.ascii_uppercase[:k]
def getArangeMat():
return np.arange(N * K).reshape((N, K))
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(np.random.choice(x, k), name=name, **kwargs)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(lrange(k), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2**63 + i for i in lrange(k)], name=name)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k=10, freq='B', name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k=10, freq='D', name=None, **kwargs):
return TimedeltaIndex(start='1 day', periods=k, freq=freq,
name=name, **kwargs)
def makePeriodIndex(k=10, name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = PeriodIndex(start=dt, periods=k, freq='B', name=name, **kwargs)
return dr
def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product(
(('foo', 'bar'), (1, 2)), names=names, **kwargs)
def all_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the various
index classes.
Parameters
----------
k: length of each of the index instances
"""
all_make_index_funcs = [makeIntIndex, makeFloatIndex, makeStringIndex,
makeUnicodeIndex, makeDateIndex, makePeriodIndex,
makeTimedeltaIndex, makeBoolIndex, makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex]
for make_index_func in all_make_index_funcs:
yield make_index_func(k=k)
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex, makePeriodIndex,
makeTimedeltaIndex, makeRangeIndex,
makeIntervalIndex, makeCategoricalIndex,
makeMultiIndex
]
for make_index_func in make_index_funcs:
yield make_index_func
def all_timeseries_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the classes
which represent time-seires.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeObjectSeries(name=None):
dateIndex = makeDateIndex(N)
dateIndex = Index(dateIndex, dtype=object)
index = makeStringIndex(N)
return Series(dateIndex, index=index, name=name)
def getSeriesData():
index = makeStringIndex(N)
return {c: Series(randn(N), index=index) for c in getCols(K)}
def makeTimeSeries(nper=None, freq='B', name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq='B'):
return {c: makeTimeSeries(nper, freq) for c in getCols(K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(K)}
# make frame
def makeTimeDataFrame(nper=None, freq='B'):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame():
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(['a', 'b', 'c', 'd', 'e'])
data = {
'A': [0., 1., 2., 3., 4.],
'B': [0., 1., 0., 1., 0.],
'C': ['foo1', 'foo2', 'foo3', 'foo4', 'foo5'],
'D': bdate_range('1/1/2009', periods=5)
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makePanel(nper=None):
with warnings.catch_warnings(record=True):
cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]]
data = {c: makeTimeDataFrame(nper) for c in cols}
return Panel.fromDict(data)
def makePeriodPanel(nper=None):
with warnings.catch_warnings(record=True):
cols = ['Item' + c for c in string.ascii_uppercase[:K - 1]]
data = {c: makePeriodFrame(nper) for c in cols}
return Panel.fromDict(data)
def makeCustomIndex(nentries, nlevels, prefix='#', names=False, ndupe_l=None,
idx_type=None):
"""Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert (is_sequence(ndupe_l) and len(ndupe_l) <= nlevels)
assert (names is None or names is False or
names is True or len(names) is nlevels)
assert idx_type is None or \
(idx_type in ('i', 'f', 's', 'u', 'dt', 'p', 'td') and nlevels == 1)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singelton case uniform
if isinstance(names, compat.string_types) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func = dict(i=makeIntIndex, f=makeFloatIndex,
s=makeStringIndex, u=makeUnicodeIndex,
dt=makeDateIndex, td=makeTimedeltaIndex,
p=makePeriodIndex).get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError('"{idx_type}" is not a legal value for `idx_type`, '
'use "i"/"f"/"s"/"u"/"dt/"p"/"td".'
.format(idx_type=idx_type))
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
tuples = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return lmap(int, numeric_tuple)
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
cnt = Counter()
for j in range(div_factor):
label = '{prefix}_l{i}_g{j}'.format(prefix=prefix, i=i, j=j)
cnt[label] = ndupe_l[i]
# cute Counter trick
result = list(sorted(cnt.elements(), key=keyfunc))[:nentries]
tuples.append(result)
tuples = lzip(*tuples)
# convert tuples to index
if nentries == 1:
# we have a single level of tuples, i.e. a regular Index
index = Index(tuples[0], name=names[0])
elif nlevels == 1:
name = None if names is None else names[0]
index = Index((x[0] for x in tuples), name=name)
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(nrows, ncols, c_idx_names=True, r_idx_names=True,
c_idx_nlevels=1, r_idx_nlevels=1, data_gen_f=None,
c_ndupe_l=None, r_ndupe_l=None, dtype=None,
c_idx_type=None, r_idx_type=None):
"""
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjuncion with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples:
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FI","FO","FAM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or \
(r_idx_type in ('i', 'f', 's',
'u', 'dt', 'p', 'td') and r_idx_nlevels == 1)
assert c_idx_type is None or \
(c_idx_type in ('i', 'f', 's',
'u', 'dt', 'p', 'td') and c_idx_nlevels == 1)
columns = makeCustomIndex(ncols, nlevels=c_idx_nlevels, prefix='C',
names=c_idx_names, ndupe_l=c_ndupe_l,
idx_type=c_idx_type)
index = makeCustomIndex(nrows, nlevels=r_idx_nlevels, prefix='R',
names=r_idx_names, ndupe_l=r_ndupe_l,
idx_type=r_idx_type)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: "R{rows}C{cols}".format(rows=r, cols=c)
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = int(np.round((1 - density) * nrows * ncols))
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1. / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingCustomDataframe(nrows, ncols, density=.9, random_state=None,
c_idx_names=True, r_idx_names=True,
c_idx_nlevels=1, r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None, r_ndupe_l=None, dtype=None,
c_idx_type=None, r_idx_type=None):
"""
Parameters
----------
Density : float, optional
Float in (0, 1) that gives the percentage of non-missing numbers in
the DataFrame.
random_state : {np.random.RandomState, int}, optional
Random number generator or random seed.
See makeCustomDataframe for descriptions of the rest of the parameters.
"""
df = makeCustomDataframe(nrows, ncols, c_idx_names=c_idx_names,
r_idx_names=r_idx_names,
c_idx_nlevels=c_idx_nlevels,
r_idx_nlevels=r_idx_nlevels,
data_gen_f=data_gen_f,
c_ndupe_l=c_ndupe_l, r_ndupe_l=r_ndupe_l,
dtype=dtype, c_idx_type=c_idx_type,
r_idx_type=r_idx_type)
i, j = _create_missing_idx(nrows, ncols, density, random_state)
df.values[i, j] = np.nan
return df
def makeMissingDataframe(density=.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density,
random_state=random_state)
df.values[i, j] = np.nan
return df
def add_nans(panel):
I, J, N = panel.shape
for i, item in enumerate(panel.items):
dm = panel[item]
for j, col in enumerate(dm.columns):
dm[col][:i + j] = np.NaN
return panel
def add_nans_panel4d(panel4d):
for l, label in enumerate(panel4d.labels):
panel = panel4d[label]
add_nans(panel)
return panel4d
class TestSubDict(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, *args, **kwargs)"""
@wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and callable(args[0])
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# skip tests on exceptions with this message
_network_error_messages = (
# 'urlopen error timed out',
# 'timeout: timed out',
# 'socket.timeout: timed out',
'timed out',
'Server Hangup',
'HTTP Error 503: Service Unavailable',
'502: Proxy Error',
'HTTP Error 502: internal error',
'HTTP Error 502',
'HTTP Error 503',
'HTTP Error 403',
'HTTP Error 400',
'Temporary failure in name resolution',
'Name or service not known',
'Connection refused',
'certificate verify',
)
# or this e.errno/e.reason.errno
_network_errno_vals = (
101, # Network is unreachable
111, # Connection refused
110, # Connection timed out
104, # Connection reset Error
54, # Connection reset by peer
60, # urllib.error.URLError: [Errno 60] Connection timed out
)
# Both of the above shouldn't mask real issues such as 404's
# or refused connections (changed DNS).
# But some tests (test_data yahoo) contact incredibly flakey
# servers.
# and conditionally raise on these exception types
_network_error_classes = (IOError, httplib.HTTPException)
if sys.version_info >= (3, 3):
_network_error_classes += (TimeoutError,) # noqa
def can_connect(url, error_classes=_network_error_classes):
"""Try to connect to the given url. True if succeeds, False if IOError
raised
Parameters
----------
url : basestring
The URL to try to connect to
Returns
-------
connectable : bool
Return True if no IOError (unable to connect) or URLError (bad url) was
raised
"""
try:
with urlopen(url):
pass
except error_classes:
return False
else:
return True
@optional_args
def network(t, url="http://www.google.com",
raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
check_before_test=False,
error_classes=_network_error_classes,
skip_errnos=_network_errno_vals,
_skip_on_messages=_network_error_messages,
):
"""
Label a test as requiring network connection and, if an error is
encountered, only raise if it does not find a network connection.
In comparison to ``network``, this assumes an added contract to your test:
you must assert that, under normal conditions, your test will ONLY fail if
it does not have network connectivity.
You can call this in 3 ways: as a standard decorator, with keyword
arguments, or with a positional argument that is the url to check.
Parameters
----------
t : callable
The test requiring network connectivity.
url : path
The url to test via ``pandas.io.common.urlopen`` to check
for connectivity. Defaults to 'http://www.google.com'.
raise_on_error : bool
If True, never catches errors.
check_before_test : bool
If True, checks connectivity before running the test case.
error_classes : tuple or Exception
error classes to ignore. If not in ``error_classes``, raises the error.
defaults to IOError. Be careful about changing the error classes here.
skip_errnos : iterable of int
Any exception that has .errno or .reason.erno set to one
of these values will be skipped with an appropriate
message.
_skip_on_messages: iterable of string
any exception e for which one of the strings is
a substring of str(e) will be skipped with an appropriate
message. Intended to suppress errors where an errno isn't available.
Notes
-----
* ``raise_on_error`` supercedes ``check_before_test``
Returns
-------
t : callable
The decorated test ``t``, with checks for connectivity errors.
Example
-------
Tests decorated with @network will fail if it's possible to make a network
connection to another URL (defaults to google.com)::
>>> from pandas.util.testing import network
>>> from pandas.io.common import urlopen
>>> @network
... def test_network():
... with urlopen("rabbit://bonanza.com"):
... pass
Traceback
...
URLError: <urlopen error unknown url type: rabit>
You can specify alternative URLs::
>>> @network("http://www.yahoo.com")
... def test_something_with_yahoo():
... raise IOError("Failure Message")
>>> test_something_with_yahoo()
Traceback (most recent call last):
...
IOError: Failure Message
If you set check_before_test, it will check the url first and not run the
test on failure::
>>> @network("failing://url.blaher", check_before_test=True)
... def test_something():
... print("I ran!")
... raise ValueError("Failure")
>>> test_something()
Traceback (most recent call last):
...
Errors not related to networking will always be raised.
"""
from pytest import skip
t.network = True
@compat.wraps(t)
def wrapper(*args, **kwargs):
if check_before_test and not raise_on_error:
if not can_connect(url, error_classes):
skip()
try:
return t(*args, **kwargs)
except Exception as e:
errno = getattr(e, 'errno', None)
if not errno and hasattr(errno, "reason"):
errno = getattr(e.reason, 'errno', None)
if errno in skip_errnos:
skip("Skipping test due to known errno"
" and error {error}".format(error=e))
try:
e_str = traceback.format_exc(e)
except Exception:
e_str = str(e)
if any(m.lower() in e_str.lower() for m in _skip_on_messages):
skip("Skipping test because exception "
"message is known and error {error}".format(error=e))
if not isinstance(e, error_classes):
raise
if raise_on_error or can_connect(url, error_classes):
raise
else:
skip("Skipping test due to lack of connectivity"
" and error {error}".format(e))
return wrapper
with_connectivity_check = network
class SimpleMock(object):
"""
Poor man's mocking object
Note: only works for new-style classes, assumes __getattribute__ exists.
>>> a = type("Duck",(),{})
>>> a.attr1,a.attr2 ="fizz","buzz"
>>> b = SimpleMock(a,"attr1","bar")
>>> b.attr1 == "bar" and b.attr2 == "buzz"
True
>>> a.attr1 == "fizz" and a.attr2 == "buzz"
True
"""
def __init__(self, obj, *args, **kwds):
assert(len(args) % 2 == 0)
attrs = kwds.get("attrs", {})
for k, v in zip(args[::2], args[1::2]):
# dict comprehensions break 2.6
attrs[k] = v
self.attrs = attrs
self.obj = obj
def __getattribute__(self, name):
attrs = object.__getattribute__(self, "attrs")
obj = object.__getattribute__(self, "obj")
return attrs.get(name, type(obj).__getattribute__(obj, name))
@contextmanager
def stdin_encoding(encoding=None):
"""
Context manager for running bits of code while emulating an arbitrary
stdin encoding.
>>> import sys
>>> _encoding = sys.stdin.encoding
>>> with stdin_encoding('AES'): sys.stdin.encoding
'AES'
>>> sys.stdin.encoding==_encoding
True
"""
import sys
_stdin = sys.stdin
sys.stdin = SimpleMock(sys.stdin, "encoding", encoding)
yield
sys.stdin = _stdin
def assert_raises_regex(_exception, _regexp, _callable=None,
*args, **kwargs):
r"""
Check that the specified Exception is raised and that the error message
matches a given regular expression pattern. This may be a regular
expression object or a string containing a regular expression suitable
for use by `re.search()`. This is a port of the `assertRaisesRegexp`
function from unittest in Python 2.7.
Examples
--------
>>> assert_raises_regex(ValueError, 'invalid literal for.*XYZ', int, 'XYZ')
>>> import re
>>> assert_raises_regex(ValueError, re.compile('literal'), int, 'XYZ')
If an exception of a different type is raised, it bubbles up.
>>> assert_raises_regex(TypeError, 'literal', int, 'XYZ')
Traceback (most recent call last):
...
ValueError: invalid literal for int() with base 10: 'XYZ'
>>> dct = dict()
>>> assert_raises_regex(KeyError, 'pear', dct.__getitem__, 'apple')
Traceback (most recent call last):
...
AssertionError: "pear" does not match "'apple'"
You can also use this in a with statement.
>>> with assert_raises_regex(TypeError, 'unsupported operand type\(s\)'):
... 1 + {}
>>> with assert_raises_regex(TypeError, 'banana'):
... 'apple'[0] = 'b'
Traceback (most recent call last):
...
AssertionError: "banana" does not match "'str' object does not support \
item assignment"
"""
manager = _AssertRaisesContextmanager(exception=_exception, regexp=_regexp)
if _callable is not None:
with manager:
_callable(*args, **kwargs)
else:
return manager
class _AssertRaisesContextmanager(object):
"""
Context manager behind `assert_raises_regex`.
"""
def __init__(self, exception, regexp=None):
"""
Initialize an _AssertRaisesContextManager instance.
Parameters
----------
exception : class
The expected Exception class.
regexp : str, default None
The regex to compare against the Exception message.
"""
self.exception = exception
if regexp is not None and not hasattr(regexp, "search"):
regexp = re.compile(regexp, re.DOTALL)
self.regexp = regexp
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, trace_back):
expected = self.exception
if not exc_type:
exp_name = getattr(expected, "__name__", str(expected))
raise AssertionError("{name} not raised.".format(name=exp_name))
return self.exception_matches(exc_type, exc_value, trace_back)
def exception_matches(self, exc_type, exc_value, trace_back):
"""
Check that the Exception raised matches the expected Exception
and expected error message regular expression.
Parameters
----------
exc_type : class
The type of Exception raised.
exc_value : Exception
The instance of `exc_type` raised.
trace_back : stack trace object
The traceback object associated with `exc_value`.
Returns
-------
is_matched : bool
Whether or not the Exception raised matches the expected
Exception class and expected error message regular expression.
Raises
------
AssertionError : The error message provided does not match
the expected error message regular expression.
"""
if issubclass(exc_type, self.exception):
if self.regexp is not None:
val = str(exc_value)
if not self.regexp.search(val):
msg = '"{pat}" does not match "{val}"'.format(
pat=self.regexp.pattern, val=val)
e = AssertionError(msg)
raise_with_traceback(e, trace_back)
return True
else:
# Failed, so allow Exception to bubble up.
return False
@contextmanager
def assert_produces_warning(expected_warning=Warning, filter_level="always",
clear=None, check_stacklevel=True):
"""
Context manager for running code expected to either raise a specific
warning, or not raise any warnings. Verifies that the code raises the
expected warning, and that it does not raise any other unexpected
warnings. It is basically a wrapper around ``warnings.catch_warnings``.
Parameters
----------
expected_warning : {Warning, False, None}, default Warning
The type of Exception raised. ``exception.Warning`` is the base
class for all warnings. To check that no warning is returned,
specify ``False`` or ``None``.
filter_level : str, default "always"
Specifies whether warnings are ignored, displayed, or turned
into errors.
Valid values are:
* "error" - turns matching warnings into exceptions
* "ignore" - discard the warning
* "always" - always emit a warning
* "default" - print the warning the first time it is generated
from each location
* "module" - print the warning the first time it is generated
from each module
* "once" - print the warning the first time it is generated
clear : str, default None
If not ``None`` then remove any previously raised warnings from
the ``__warningsregistry__`` to ensure that no warning messages are
suppressed by this context manager. If ``None`` is specified,
the ``__warningsregistry__`` keeps track of which warnings have been
shown, and does not show them again.
check_stacklevel : bool, default True
If True, displays the line that called the function containing
the warning to show were the function is called. Otherwise, the
line that implements the function is displayed.
Examples
--------
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning):
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'.
..warn:: This is *not* thread-safe.
"""
with warnings.catch_warnings(record=True) as w:
if clear is not None:
# make sure that we are clearning these warnings
# if they have happened before
# to guarantee that we will catch them
if not is_list_like(clear):
clear = [clear]
for m in clear:
try:
m.__warningregistry__.clear()
except Exception:
pass
saw_warning = False
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
for actual_warning in w:
if (expected_warning and issubclass(actual_warning.category,
expected_warning)):
saw_warning = True
if check_stacklevel and issubclass(actual_warning.category,
(FutureWarning,
DeprecationWarning)):
from inspect import getframeinfo, stack
caller = getframeinfo(stack()[2][0])
msg = ("Warning not set with correct stacklevel. "
"File where warning is raised: {actual} != "
"{caller}. Warning message: {message}"
).format(actual=actual_warning.filename,
caller=caller.filename,
message=actual_warning.message)
assert actual_warning.filename == caller.filename, msg
else:
extra_warnings.append(actual_warning.category.__name__)
if expected_warning:
msg = "Did not see expected warning of class {name!r}.".format(
name=expected_warning.__name__)
assert saw_warning, msg
assert not extra_warnings, ("Caused unexpected warning(s): {extra!r}."
).format(extra=extra_warnings)
class RNGContext(object):
"""
Context manager to set the numpy random number generator speed. Returns
to the original value upon exiting the context manager.
Parameters
----------
seed : int
Seed for numpy.random.seed
Examples
--------
with RNGContext(42):
np.random.randn()
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
self.start_state = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
np.random.set_state(self.start_state)
@contextmanager
def use_numexpr(use, min_elements=None):
from pandas.core.computation import expressions as expr
if min_elements is None:
min_elements = expr._MIN_ELEMENTS
olduse = expr._USE_NUMEXPR
oldmin = expr._MIN_ELEMENTS
expr.set_use_numexpr(use)
expr._MIN_ELEMENTS = min_elements
yield
expr._MIN_ELEMENTS = oldmin
expr.set_use_numexpr(olduse)
def test_parallel(num_threads=2, kwargs_list=None):
"""Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args,
kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ['testattr', 'name']
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ['testattr']
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedSparseSeries(pd.SparseSeries):
_metadata = ['testattr']
@property
def _constructor(self):
return SubclassedSparseSeries
@property
def _constructor_expanddim(self):
return SubclassedSparseDataFrame
class SubclassedSparseDataFrame(pd.SparseDataFrame):
_metadata = ['testattr']
@property
def _constructor(self):
return SubclassedSparseDataFrame
@property
def _constructor_sliced(self):
return SubclassedSparseSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
@contextmanager
def patch(ob, attr, value):
"""Temporarily patch an attribute of an object.
Parameters
----------
ob : any
The object to patch. This must support attribute assignment for `attr`.
attr : str
The name of the attribute to patch.
value : any
The temporary attribute to assign.
Examples
--------
>>> class C(object):
... attribute = 'original'
...
>>> C.attribute
'original'
>>> with patch(C, 'attribute', 'patched'):
... in_context = C.attribute
...
>>> in_context
'patched'
>>> C.attribute # the value is reset when the context manager exists
'original'
Correctly replaces attribute when the manager exits with an exception.
>>> with patch(C, 'attribute', 'patched'):
... in_context = C.attribute
... raise ValueError()
Traceback (most recent call last):
...
ValueError
>>> in_context
'patched'
>>> C.attribute
'original'
"""
noattr = object() # mark that the attribute never existed
old = getattr(ob, attr, noattr)
setattr(ob, attr, value)
try:
yield
finally:
if old is noattr:
delattr(ob, attr)
else:
setattr(ob, attr, old)
@contextmanager
def set_timezone(tz):
"""Context manager for temporarily setting a timezone.
Parameters
----------
tz : str
A string representing a valid timezone.
Examples
--------
>>> from datetime import datetime
>>> from dateutil.tz import tzlocal
>>> tzlocal().tzname(datetime.now())
'IST'
>>> with set_timezone('US/Eastern'):
... tzlocal().tzname(datetime.now())
...
'EDT'
"""
import os
import time
def setTZ(tz):
if tz is None:
try:
del os.environ['TZ']
except KeyError:
pass
else:
os.environ['TZ'] = tz
time.tzset()
orig_tz = os.environ.get('TZ')
setTZ(tz)
try:
yield
finally:
setTZ(orig_tz)
def _make_skipna_wrapper(alternative, skipna_alternative=None):
"""Create a function for calling on an array.
Parameters
----------
alternative : function
The function to be called on the array with no NaNs.
Only used when 'skipna_alternative' is None.
skipna_alternative : function
The function to be called on the original array
Returns
-------
skipna_wrapper : function
"""
if skipna_alternative:
def skipna_wrapper(x):
return skipna_alternative(x.values)
else:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
return skipna_wrapper
|
[] |
[] |
[
"PANDAS_TESTING_MODE",
"TZ"
] |
[]
|
["PANDAS_TESTING_MODE", "TZ"]
|
python
| 2 | 0 | |
tests/framework/utils/k8s_helper.go
|
/*
Copyright 2016 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"bytes"
"encoding/json"
"fmt"
"html/template"
"os"
"path"
"strconv"
"strings"
"testing"
"time"
"github.com/coreos/pkg/capnslog"
rookclient "github.com/rook/rook/pkg/client/clientset/versioned"
"github.com/rook/rook/pkg/clusterd"
"github.com/rook/rook/pkg/util/exec"
"github.com/stretchr/testify/require"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/version"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
storagev1util "k8s.io/kubernetes/pkg/apis/storage/v1/util"
)
// K8sHelper is a helper for common kubectl commads
type K8sHelper struct {
executor *exec.CommandExecutor
Clientset *kubernetes.Clientset
RookClientset *rookclient.Clientset
RunningInCluster bool
T func() *testing.T
}
const (
// RetryInterval param for test - wait time while in RetryLoop
RetryInterval = 5
// TestMountPath is the path inside a test pod where storage is mounted
TestMountPath = "/tmp/testrook"
//hostnameTestPrefix is a prefix added to the node hostname
hostnameTestPrefix = "test-prefix-this-is-a-very-long-hostname-"
)
// getCmd returns kubectl or oc if env var rook_test_openshift is
// set to true
func getCmd() string {
cmd := "kubectl"
if IsPlatformOpenShift() {
cmd = "oc"
}
return cmd
}
// CreateK8sHelper creates a instance of k8sHelper
func CreateK8sHelper(t func() *testing.T) (*K8sHelper, error) {
executor := &exec.CommandExecutor{}
config, err := getKubeConfig(executor)
if err != nil {
return nil, fmt.Errorf("failed to get kube client. %+v", err)
}
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("failed to get clientset. %+v", err)
}
rookClientset, err := rookclient.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("failed to get rook clientset. %+v", err)
}
h := &K8sHelper{executor: executor, Clientset: clientset, RookClientset: rookClientset, T: t}
if strings.Contains(config.Host, "//10.") {
h.RunningInCluster = true
}
return h, err
}
var (
k8slogger = capnslog.NewPackageLogger("github.com/rook/rook", "utils")
cmd = getCmd()
// RetryLoop params for tests.
RetryLoop = TestRetryNumber()
)
// GetK8sServerVersion returns k8s server version under test
func (k8sh *K8sHelper) GetK8sServerVersion() string {
versionInfo, err := k8sh.Clientset.ServerVersion()
require.Nil(k8sh.T(), err)
return versionInfo.GitVersion
}
func (k8sh *K8sHelper) VersionAtLeast(minVersion string) bool {
v := version.MustParseSemantic(k8sh.GetK8sServerVersion())
return v.AtLeast(version.MustParseSemantic(minVersion))
}
func (k8sh *K8sHelper) VersionMinorMatches(minVersion string) (string, bool) {
kubeVersion := k8sh.GetK8sServerVersion()
v := version.MustParseSemantic(kubeVersion)
requestedVersion := version.MustParseSemantic(minVersion)
return kubeVersion, v.Major() == requestedVersion.Major() && v.Minor() == requestedVersion.Minor()
}
func (k8sh *K8sHelper) MakeContext() *clusterd.Context {
return &clusterd.Context{Clientset: k8sh.Clientset, RookClientset: k8sh.RookClientset, Executor: k8sh.executor}
}
func (k8sh *K8sHelper) GetDockerImage(image string) error {
dockercmd := os.Getenv("DOCKERCMD")
if dockercmd == "" {
dockercmd = "docker"
}
return k8sh.executor.ExecuteCommand(dockercmd, "pull", image)
}
// SetDeploymentVersion sets the container version on the deployment. It is assumed to be the rook/ceph image.
func (k8sh *K8sHelper) SetDeploymentVersion(namespace, deploymentName, containerName, version string) error {
_, err := k8sh.Kubectl("-n", namespace, "set", "image", "deploy/"+deploymentName, containerName+"=rook/ceph:"+version)
return err
}
// Kubectl is wrapper for executing kubectl commands
func (k8sh *K8sHelper) Kubectl(args ...string) (string, error) {
result, err := k8sh.executor.ExecuteCommandWithTimeout(15*time.Second, "kubectl", args...)
if err != nil {
k8slogger.Errorf("Failed to execute: %s %+v : %+v. %s", cmd, args, err, result)
if args[0] == "delete" {
// allow the tests to continue if we were deleting a resource that timed out
return result, nil
}
return result, fmt.Errorf("Failed to run: %s %v : %v", cmd, args, err)
}
return result, nil
}
// KubectlWithStdin is wrapper for executing kubectl commands in stdin
func (k8sh *K8sHelper) KubectlWithStdin(stdin string, args ...string) (string, error) {
cmdStruct := CommandArgs{Command: cmd, PipeToStdIn: stdin, CmdArgs: args}
cmdOut := ExecuteCommand(cmdStruct)
if cmdOut.ExitCode != 0 {
k8slogger.Errorf("Failed to execute stdin: %s %v : %v", cmd, args, cmdOut.Err.Error())
if strings.Contains(cmdOut.Err.Error(), "(NotFound)") || strings.Contains(cmdOut.StdErr, "(NotFound)") {
return cmdOut.StdErr, errors.NewNotFound(schema.GroupResource{}, "")
}
return cmdOut.StdErr, fmt.Errorf("Failed to run stdin: %s %v : %v", cmd, args, cmdOut.StdErr)
}
if cmdOut.StdOut == "" {
return cmdOut.StdErr, nil
}
return cmdOut.StdOut, nil
}
func getKubeConfig(executor exec.Executor) (*rest.Config, error) {
context, err := executor.ExecuteCommandWithOutput("kubectl", "config", "view", "-o", "json")
if err != nil {
k8slogger.Errorf("failed to execute kubectl command. %v", err)
}
// Parse the kubectl context to get the settings for client connections
var kc kubectlContext
if err := json.Unmarshal([]byte(context), &kc); err != nil {
return nil, fmt.Errorf("failed to unmarshal %s config: %+v", cmd, err)
}
// find the current context
var currentContext kContext
found := false
for _, c := range kc.Contexts {
if kc.Current == c.Name {
currentContext = c
found = true
}
}
if !found {
return nil, fmt.Errorf("failed to find current context %s in %+v", kc.Current, kc.Contexts)
}
// find the current cluster
var currentCluster kclusterContext
found = false
for _, c := range kc.Clusters {
if currentContext.Cluster.Cluster == c.Name {
currentCluster = c
found = true
}
}
if !found {
return nil, fmt.Errorf("failed to find cluster %s in %+v", kc.Current, kc.Clusters)
}
config := &rest.Config{Host: currentCluster.Cluster.Server}
if currentContext.Cluster.User == "" {
config.Insecure = true
} else {
config.Insecure = false
// find the current user
var currentUser kuserContext
found = false
for _, u := range kc.Users {
if currentContext.Cluster.User == u.Name {
currentUser = u
found = true
}
}
if !found {
return nil, fmt.Errorf("failed to find kube user %s in %+v", kc.Current, kc.Users)
}
config.TLSClientConfig = rest.TLSClientConfig{
CAFile: currentCluster.Cluster.CertAuthority,
KeyFile: currentUser.Cluster.ClientKey,
CertFile: currentUser.Cluster.ClientCert,
}
// Set Insecure to true if cert information is missing
if currentUser.Cluster.ClientCert == "" {
config.Insecure = true
}
}
logger.Infof("Loaded kubectl context %s at %s. secure=%t",
currentCluster.Name, config.Host, !config.Insecure)
return config, nil
}
type kubectlContext struct {
Contexts []kContext `json:"contexts"`
Users []kuserContext `json:"users"`
Clusters []kclusterContext `json:"clusters"`
Current string `json:"current-context"`
}
type kContext struct {
Name string `json:"name"`
Cluster struct {
Cluster string `json:"cluster"`
User string `json:"user"`
} `json:"context"`
}
type kclusterContext struct {
Name string `json:"name"`
Cluster struct {
Server string `json:"server"`
Insecure bool `json:"insecure-skip-tls-verify"`
CertAuthority string `json:"certificate-authority"`
} `json:"cluster"`
}
type kuserContext struct {
Name string `json:"name"`
Cluster struct {
ClientCert string `json:"client-certificate"`
ClientKey string `json:"client-key"`
} `json:"user"`
}
func (k8sh *K8sHelper) Exec(namespace, podName, command string, commandArgs []string) (string, error) {
return k8sh.ExecWithRetry(1, namespace, podName, command, commandArgs)
}
// ExecWithRetry will attempt to run a command "retries" times, waiting 3s between each call. Upon success, returns the output.
func (k8sh *K8sHelper) ExecWithRetry(retries int, namespace, podName, command string, commandArgs []string) (string, error) {
var err error
for i := 0; i < retries; i++ {
args := []string{"exec", "-n", namespace, podName, "--", command}
args = append(args, commandArgs...)
var result string
result, err = k8sh.Kubectl(args...)
if err == nil {
return result, nil
}
if i < retries-1 {
time.Sleep(3 * time.Second)
}
}
return "", fmt.Errorf("%s exec command %s failed on pod %s in namespace %s. %+v", cmd, command, podName, namespace, err)
}
// ResourceOperationFromTemplate performs a kubectl action from a template file after replacing its context
func (k8sh *K8sHelper) ResourceOperationFromTemplate(action string, podDefinition string, config map[string]string) (string, error) {
t := template.New("testTemplate")
t, err := t.Parse(podDefinition)
if err != nil {
return err.Error(), err
}
var tpl bytes.Buffer
if err := t.Execute(&tpl, config); err != nil {
return err.Error(), err
}
podDef := tpl.String()
args := []string{action, "-f", "-"}
result, err := k8sh.KubectlWithStdin(podDef, args...)
if err == nil {
return result, nil
}
logger.Errorf("Failed to execute kubectl %v %v -- %v", args, podDef, err)
return "", fmt.Errorf("Could not %s resource in args : %v %v-- %v", action, args, podDef, err)
}
// ResourceOperation performs a kubectl action on a pod definition
func (k8sh *K8sHelper) ResourceOperation(action string, manifest string) error {
args := []string{action, "-f", "-"}
logger.Infof("kubectl %s manifest:\n%s", action, manifest)
_, err := k8sh.KubectlWithStdin(manifest, args...)
if err == nil {
return nil
}
logger.Errorf("Failed to execute kubectl %v -- %v", args, err)
return fmt.Errorf("Could Not create resource in args : %v -- %v", args, err)
}
// DeletePod performs a kubectl delete pod on the given pod
func (k8sh *K8sHelper) DeletePod(namespace, name string) error {
args := append([]string{"--grace-period=0", "pod"}, name)
if namespace != "" {
args = append(args, []string{"-n", namespace}...)
}
return k8sh.DeleteResourceAndWait(true, args...)
}
// DeletePods performs a kubectl delete pod on the given pods
func (k8sh *K8sHelper) DeletePods(pods ...string) (msg string, err error) {
for _, pod := range pods {
if perr := k8sh.DeletePod("", pod); perr != nil {
err = perr
}
}
return
}
// DeleteResource performs a kubectl delete on the given args
func (k8sh *K8sHelper) DeleteResource(args ...string) error {
return k8sh.DeleteResourceAndWait(true, args...)
}
// WaitForCustomResourceDeletion waits for the CRD deletion
func (k8sh *K8sHelper) WaitForCustomResourceDeletion(namespace string, checkerFunc func() error) error {
// wait for the operator to finalize and delete the CRD
for i := 0; i < 30; i++ {
err := checkerFunc()
if err == nil {
logger.Infof("custom resource %s still exists", namespace)
time.Sleep(2 * time.Second)
continue
}
if errors.IsNotFound(err) {
logger.Infof("custom resource %s deleted", namespace)
return nil
}
return err
}
logger.Errorf("gave up deleting custom resource %s", namespace)
return nil
}
// DeleteResource performs a kubectl delete on give args.
// If wait is false, a flag will be passed to indicate the delete should return immediately
func (k8sh *K8sHelper) DeleteResourceAndWait(wait bool, args ...string) error {
if !wait {
args = append(args, "--wait=false")
}
args = append([]string{"delete"}, args...)
_, err := k8sh.Kubectl(args...)
if err == nil {
return nil
}
return fmt.Errorf("Could Not delete resource in k8s -- %v", err)
}
// GetResource performs a kubectl get on give args
func (k8sh *K8sHelper) GetResource(args ...string) (string, error) {
args = append([]string{"get"}, args...)
result, err := k8sh.Kubectl(args...)
if err == nil {
return result, nil
}
return "", fmt.Errorf("Could Not get resource in k8s -- %v", err)
}
func (k8sh *K8sHelper) CreateNamespace(namespace string) error {
ns := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
_, err := k8sh.Clientset.CoreV1().Namespaces().Create(ns)
if err != nil && !errors.IsAlreadyExists(err) {
return fmt.Errorf("failed to create namespace %s. %+v", namespace, err)
}
return nil
}
func (k8sh *K8sHelper) CountPodsWithLabel(label string, namespace string) (int, error) {
options := metav1.ListOptions{LabelSelector: label}
pods, err := k8sh.Clientset.CoreV1().Pods(namespace).List(options)
if err != nil {
if errors.IsNotFound(err) {
return 0, nil
}
return 0, err
}
return len(pods.Items), nil
}
// WaitForPodCount waits until the desired number of pods with the label are started
func (k8sh *K8sHelper) WaitForPodCount(label, namespace string, count int) error {
options := metav1.ListOptions{LabelSelector: label}
for i := 0; i < RetryLoop; i++ {
pods, err := k8sh.Clientset.CoreV1().Pods(namespace).List(options)
if err != nil {
return fmt.Errorf("failed to find pod with label %s. %+v", label, err)
}
if len(pods.Items) >= count {
logger.Infof("found %d pods with label %s", count, label)
return nil
}
logger.Infof("waiting for %d pods (found %d) with label %s in namespace %s", count, len(pods.Items), label, namespace)
time.Sleep(RetryInterval * time.Second)
}
return fmt.Errorf("Giving up waiting for pods with label %s in namespace %s", label, namespace)
}
// IsPodWithLabelPresent return true if there is at least one Pod with the label is present.
func (k8sh *K8sHelper) IsPodWithLabelPresent(label string, namespace string) bool {
count, err := k8sh.CountPodsWithLabel(label, namespace)
if err != nil {
return false
}
return count > 0
}
// WaitForLabeledPodsToRun calls WaitForLabeledPodsToRunWithRetries with the default number of retries
func (k8sh *K8sHelper) WaitForLabeledPodsToRun(label, namespace string) error {
return k8sh.WaitForLabeledPodsToRunWithRetries(label, namespace, RetryLoop)
}
// WaitForLabeledPodsToRunWithRetries returns true if a Pod is running status or goes to Running status within 90s else returns false
func (k8sh *K8sHelper) WaitForLabeledPodsToRunWithRetries(label string, namespace string, retries int) error {
options := metav1.ListOptions{LabelSelector: label}
var lastPod v1.Pod
for i := 0; i < retries; i++ {
pods, err := k8sh.Clientset.CoreV1().Pods(namespace).List(options)
lastStatus := ""
running := 0
if err == nil && len(pods.Items) > 0 {
for _, pod := range pods.Items {
if pod.Status.Phase == "Running" {
running++
}
lastPod = pod
lastStatus = string(pod.Status.Phase)
}
if running == len(pods.Items) {
logger.Infof("All %d pod(s) with label %s are running", len(pods.Items), label)
return nil
}
}
logger.Infof("waiting for pod(s) with label %s in namespace %s to be running. status=%s, running=%d/%d, err=%+v",
label, namespace, lastStatus, running, len(pods.Items), err)
time.Sleep(RetryInterval * time.Second)
}
if len(lastPod.Name) == 0 {
logger.Infof("no pod was found with label %s", label)
} else {
k8sh.PrintPodDescribe(namespace, lastPod.Name)
}
return fmt.Errorf("Giving up waiting for pod with label %s in namespace %s to be running", label, namespace)
}
// WaitUntilPodWithLabelDeleted returns true if a Pod is deleted within 90s else returns false
func (k8sh *K8sHelper) WaitUntilPodWithLabelDeleted(label string, namespace string) bool {
options := metav1.ListOptions{LabelSelector: label}
for i := 0; i < RetryLoop; i++ {
pods, err := k8sh.Clientset.CoreV1().Pods(namespace).List(options)
if errors.IsNotFound(err) {
logger.Infof("error Found err %v", err)
return true
}
if len(pods.Items) == 0 {
logger.Infof("no (more) pods with label %s in namespace %s to be deleted", label, namespace)
return true
}
time.Sleep(RetryInterval * time.Second)
logger.Infof("waiting for pod with label %s in namespace %s to be deleted", label, namespace)
}
logger.Infof("Giving up waiting for pod with label %s in namespace %s to be deleted", label, namespace)
return false
}
// PrintPodStatus log out the status phase of a pod
func (k8sh *K8sHelper) PrintPodStatus(namespace string) {
pods, err := k8sh.Clientset.CoreV1().Pods(namespace).List(metav1.ListOptions{})
if err != nil {
logger.Errorf("failed to get pod status in namespace %s. %+v", namespace, err)
return
}
for _, pod := range pods.Items {
logger.Infof("%s (%s) pod status: %+v", pod.Name, namespace, pod.Status)
}
}
func (k8sh *K8sHelper) GetPodDescribeFromNamespace(namespace, testName, platformName string) {
logger.Infof("Gathering pod describe for all pods in namespace %s", namespace)
pods, err := k8sh.Clientset.CoreV1().Pods(namespace).List(metav1.ListOptions{})
if err != nil {
logger.Errorf("failed to list pods in namespace %s. %+v", namespace, err)
return
}
file, err := k8sh.createTestLogFile(platformName, "podDescribe", namespace, testName, "")
if err != nil {
return
}
defer file.Close()
for _, p := range pods.Items {
k8sh.appendPodDescribe(file, namespace, p.Name)
}
}
func (k8sh *K8sHelper) GetEventsFromNamespace(namespace, testName, platformName string) {
logger.Infof("Gathering events in namespace %q", namespace)
file, err := k8sh.createTestLogFile(platformName, "events", namespace, testName, "")
if err != nil {
logger.Errorf("failed to create event file. %v", err)
return
}
defer file.Close()
args := []string{"get", "events", "-n", namespace}
events, err := k8sh.Kubectl(args...)
if err != nil {
logger.Errorf("failed to get events. %v. %v", args, err)
}
if events == "" {
return
}
file.WriteString(events) //nolint, ok to ignore this test logging
}
func (k8sh *K8sHelper) appendPodDescribe(file *os.File, namespace, name string) {
description := k8sh.getPodDescribe(namespace, name)
if description == "" {
return
}
writeHeader(file, fmt.Sprintf("Pod: %s\n", name)) //nolint, ok to ignore this test logging
file.WriteString(description) //nolint, ok to ignore this test logging
file.WriteString("\n") //nolint, ok to ignore this test logging
}
func (k8sh *K8sHelper) PrintPodDescribe(namespace string, args ...string) {
description := k8sh.getPodDescribe(namespace, args...)
if description == "" {
return
}
logger.Infof("POD Description:\n%s", description)
}
func (k8sh *K8sHelper) getPodDescribe(namespace string, args ...string) string {
args = append([]string{"describe", "pod", "-n", namespace}, args...)
description, err := k8sh.Kubectl(args...)
if err != nil {
logger.Errorf("failed to describe pod. %v %+v", args, err)
return ""
}
return description
}
func (k8sh *K8sHelper) PrintEventsForNamespace(namespace string) {
events, err := k8sh.Clientset.CoreV1().Events(namespace).List(metav1.ListOptions{})
if err != nil {
logger.Warningf("failed to get events in namespace %s. %+v", namespace, err)
return
}
logger.Infof("DUMPING events in namespace %s", namespace)
for _, event := range events.Items {
logger.Infof("%+v", event)
}
logger.Infof("DONE DUMPING events in namespace %s", namespace)
}
// IsPodRunning returns true if a Pod is running status or goes to Running status within 90s else returns false
func (k8sh *K8sHelper) IsPodRunning(name string, namespace string) bool {
getOpts := metav1.GetOptions{}
for i := 0; i < 30; i++ {
pod, err := k8sh.Clientset.CoreV1().Pods(namespace).Get(name, getOpts)
if err == nil {
if pod.Status.Phase == "Running" {
return true
}
}
time.Sleep(RetryInterval * time.Second)
logger.Infof("waiting for pod %s in namespace %s to be running", name, namespace)
}
pod, _ := k8sh.Clientset.CoreV1().Pods(namespace).Get(name, getOpts)
k8sh.PrintPodDescribe(namespace, pod.Name)
logger.Infof("Giving up waiting for pod %s in namespace %s to be running", name, namespace)
return false
}
// IsPodTerminated wrapper around IsPodTerminatedWithOpts()
func (k8sh *K8sHelper) IsPodTerminated(name string, namespace string) bool {
return k8sh.IsPodTerminatedWithOpts(name, namespace, metav1.GetOptions{})
}
// IsPodTerminatedWithOpts returns true if a Pod is terminated status or goes to Terminated status
// within 90s else returns false\
func (k8sh *K8sHelper) IsPodTerminatedWithOpts(name string, namespace string, getOpts metav1.GetOptions) bool {
for i := 0; i < RetryLoop; i++ {
pod, err := k8sh.Clientset.CoreV1().Pods(namespace).Get(name, getOpts)
if err != nil {
k8slogger.Infof("Pod %s in namespace %s terminated ", name, namespace)
return true
}
k8slogger.Infof("waiting for Pod %s in namespace %s to terminate, status : %+v", name, namespace, pod.Status)
time.Sleep(RetryInterval * time.Second)
}
k8slogger.Infof("Pod %s in namespace %s did not terminate", name, namespace)
return false
}
// IsServiceUp returns true if a service is up or comes up within 150s, else returns false
func (k8sh *K8sHelper) IsServiceUp(name string, namespace string) bool {
getOpts := metav1.GetOptions{}
for i := 0; i < RetryLoop; i++ {
_, err := k8sh.Clientset.CoreV1().Services(namespace).Get(name, getOpts)
if err == nil {
k8slogger.Infof("Service: %s in namespace: %s is up", name, namespace)
return true
}
k8slogger.Infof("waiting for Service %s in namespace %s ", name, namespace)
time.Sleep(RetryInterval * time.Second)
}
k8slogger.Infof("Giving up waiting for service: %s in namespace %s ", name, namespace)
return false
}
// GetService returns output from "kubectl get svc $NAME" command
func (k8sh *K8sHelper) GetService(servicename string, namespace string) (*v1.Service, error) {
getOpts := metav1.GetOptions{}
result, err := k8sh.Clientset.CoreV1().Services(namespace).Get(servicename, getOpts)
if err != nil {
return nil, fmt.Errorf("Cannot find service %s in namespace %s, err-- %v", servicename, namespace, err)
}
return result, nil
}
// IsCRDPresent returns true if custom resource definition is present
func (k8sh *K8sHelper) IsCRDPresent(crdName string) bool {
cmdArgs := []string{"get", "crd", crdName}
for i := 0; i < RetryLoop; i++ {
_, err := k8sh.Kubectl(cmdArgs...)
if err == nil {
k8slogger.Infof("Found the CRD resource: " + crdName)
return true
}
time.Sleep(RetryInterval * time.Second)
}
return false
}
// WriteToPod write file in Pod
func (k8sh *K8sHelper) WriteToPod(namespace, podName, filename, message string) error {
return k8sh.WriteToPodRetry(namespace, podName, filename, message, 1)
}
// WriteToPodRetry WriteToPod in a retry loop
func (k8sh *K8sHelper) WriteToPodRetry(namespace, podName, filename, message string, retries int) error {
logger.Infof("Writing file %s to pod %s", filename, podName)
var err error
for i := 0; i < retries; i++ {
if i > 0 {
logger.Infof("retrying write in 5s...")
time.Sleep(5 * time.Second)
}
err = k8sh.writeToPod(namespace, podName, filename, message)
if err == nil {
logger.Infof("write file %s in pod %s was successful", filename, podName)
return nil
}
}
return fmt.Errorf("failed to write file %s to pod %s. %+v", filename, podName, err)
}
func (k8sh *K8sHelper) ReadFromPod(namespace, podName, filename, expectedMessage string) error {
return k8sh.ReadFromPodRetry(namespace, podName, filename, expectedMessage, 1)
}
func (k8sh *K8sHelper) ReadFromPodRetry(namespace, podName, filename, expectedMessage string, retries int) error {
logger.Infof("Reading file %s from pod %s", filename, podName)
var err error
for i := 0; i < retries; i++ {
if i > 0 {
logger.Infof("retrying read in 5s...")
time.Sleep(5 * time.Second)
}
var data string
data, err = k8sh.readFromPod(namespace, podName, filename)
if err == nil {
logger.Infof("read file %s from pod %s was successful after %d attempt(s)", filename, podName, (i + 1))
if !strings.Contains(data, expectedMessage) {
return fmt.Errorf(`file %s in pod %s returned message "%s" instead of "%s"`, filename, podName, data, expectedMessage)
}
return nil
}
}
return fmt.Errorf("failed to read file %s from pod %s. %+v", filename, podName, err)
}
func (k8sh *K8sHelper) writeToPod(namespace, name, filename, message string) error {
wt := "echo \"" + message + "\">" + path.Join(TestMountPath, filename)
args := []string{"exec", name}
if namespace != "" {
args = append(args, "-n", namespace)
}
args = append(args, "--", "sh", "-c", wt)
_, err := k8sh.Kubectl(args...)
if err != nil {
return fmt.Errorf("failed to write file %s to pod %s. %+v", filename, name, err)
}
return nil
}
// RunCommandInPod runs the provided command inside the pod
func (k8sh *K8sHelper) RunCommandInPod(namespace, name, cmd string) (string, error) {
args := []string{"exec", name}
if namespace != "" {
args = append(args, "-n", namespace)
}
args = append(args, "--", "sh", "-c", cmd)
resp, err := k8sh.Kubectl(args...)
if err != nil {
return "", fmt.Errorf("failed to execute command %q in pod %s. %+v", cmd, name, err)
}
return resp, err
}
func (k8sh *K8sHelper) readFromPod(namespace, name, filename string) (string, error) {
rd := path.Join(TestMountPath, filename)
args := []string{"exec", name}
if namespace != "" {
args = append(args, "-n", namespace)
}
args = append(args, "--", "cat", rd)
result, err := k8sh.Kubectl(args...)
if err != nil {
return "", fmt.Errorf("failed to read file %s from pod %s. %+v", filename, name, err)
}
return result, nil
}
// GetVolumeResourceName gets the Volume object name from the PVC
func (k8sh *K8sHelper) GetVolumeResourceName(namespace, pvcName string) (string, error) {
getOpts := metav1.GetOptions{}
pvc, err := k8sh.Clientset.CoreV1().PersistentVolumeClaims(namespace).Get(pvcName, getOpts)
if err != nil {
return "", err
}
return pvc.Spec.VolumeName, nil
}
// IsVolumeResourcePresent returns true if Volume resource is present
func (k8sh *K8sHelper) IsVolumeResourcePresent(namespace, volumeName string) bool {
err := k8sh.waitForVolume(namespace, volumeName, true)
if err != nil {
k8slogger.Error(err.Error())
return false
}
return true
}
// IsVolumeResourceAbsent returns true if the Volume resource is deleted/absent within 90s else returns false
func (k8sh *K8sHelper) IsVolumeResourceAbsent(namespace, volumeName string) bool {
err := k8sh.waitForVolume(namespace, volumeName, false)
if err != nil {
k8slogger.Error(err.Error())
return false
}
return true
}
func (k8sh *K8sHelper) waitForVolume(namespace, volumeName string, exist bool) error {
action := "exist"
if !exist {
action = "not " + action
}
for i := 0; i < 10; i++ {
isExist, err := k8sh.isVolumeExist(namespace, volumeName)
if err != nil {
return fmt.Errorf("Errors encountered while getting Volume %s/%s: %v", namespace, volumeName, err)
}
if isExist == exist {
return nil
}
k8slogger.Infof("waiting for Volume %s in namespace %s to %s", volumeName, namespace, action)
time.Sleep(RetryInterval * time.Second)
}
k8sh.printVolumes(namespace, volumeName)
k8sh.PrintPVs(false /*detailed*/)
k8sh.PrintPVCs(namespace, false /*detailed*/)
return fmt.Errorf("timeout for Volume %s in namespace %s wait to %s", volumeName, namespace, action)
}
func (k8sh *K8sHelper) PrintPVs(detailed bool) {
pvs, err := k8sh.Clientset.CoreV1().PersistentVolumes().List(metav1.ListOptions{})
if err != nil {
logger.Errorf("failed to list pvs. %+v", err)
return
}
if detailed {
logger.Infof("Found %d PVs", len(pvs.Items))
for _, pv := range pvs.Items {
logger.Infof("PV %s: %+v", pv.Name, pv)
}
} else {
var names []string
for _, pv := range pvs.Items {
names = append(names, pv.Name)
}
logger.Infof("Found PVs: %v", names)
}
}
func (k8sh *K8sHelper) PrintPVCs(namespace string, detailed bool) {
pvcs, err := k8sh.Clientset.CoreV1().PersistentVolumeClaims(namespace).List(metav1.ListOptions{})
if err != nil {
logger.Errorf("failed to list pvcs. %+v", err)
return
}
if detailed {
logger.Infof("Found %d PVCs", len(pvcs.Items))
for _, pvc := range pvcs.Items {
logger.Infof("PVC %s: %+v", pvc.Name, pvc)
}
} else {
var names []string
for _, pvc := range pvcs.Items {
names = append(names, pvc.Name)
}
logger.Infof("Found PVCs: %v", names)
}
}
func (k8sh *K8sHelper) PrintStorageClasses(detailed bool) {
scs, err := k8sh.Clientset.StorageV1().StorageClasses().List(metav1.ListOptions{})
if err != nil {
logger.Errorf("failed to list StorageClasses: %+v", err)
return
}
if detailed {
logger.Infof("Found %d StorageClasses", len(scs.Items))
for _, sc := range scs.Items {
logger.Infof("StorageClass %s: %+v", sc.Name, sc)
}
} else {
var names []string
for _, sc := range scs.Items {
names = append(names, sc.Name)
}
logger.Infof("Found StorageClasses: %v", names)
}
}
func (k8sh *K8sHelper) printVolumes(namespace, desiredVolume string) {
volumes, err := k8sh.RookClientset.RookV1alpha2().Volumes(namespace).List(metav1.ListOptions{})
if err != nil {
logger.Infof("failed to list volumes in ns %s. %+v", namespace, err)
}
var names []string
for _, volume := range volumes.Items {
names = append(names, volume.Name)
}
logger.Infof("looking for volume %s in namespace %s. Found volumes: %v", desiredVolume, namespace, names)
}
func (k8sh *K8sHelper) isVolumeExist(namespace, name string) (bool, error) {
_, err := k8sh.RookClientset.RookV1alpha2().Volumes(namespace).Get(name, metav1.GetOptions{})
if err != nil {
if errors.IsNotFound(err) {
return false, nil
}
return false, err
}
return true, nil
}
func (k8sh *K8sHelper) GetPodNamesForApp(appName, namespace string) ([]string, error) {
args := []string{"get", "pod", "-n", namespace, "-l", fmt.Sprintf("app=%s", appName),
"-o", "jsonpath={.items[*].metadata.name}"}
result, err := k8sh.Kubectl(args...)
if err != nil {
return nil, fmt.Errorf("failed to get pod names for app %s: %+v. output: %s", appName, err, result)
}
podNames := strings.Split(result, " ")
return podNames, nil
}
// GetPodDetails returns details about a pod
func (k8sh *K8sHelper) GetPodDetails(podNamePattern string, namespace string) (string, error) {
args := []string{"get", "pods", "-l", "app=" + podNamePattern, "-o", "wide", "--no-headers=true", "-o", "name"}
if namespace != "" {
args = append(args, []string{"-n", namespace}...)
}
result, err := k8sh.Kubectl(args...)
if err != nil || strings.Contains(result, "No resources found") {
return "", fmt.Errorf("Cannot find pod in with name like %s in namespace : %s -- %v", podNamePattern, namespace, err)
}
return strings.TrimSpace(result), nil
}
// GetPodEvents returns events about a pod
func (k8sh *K8sHelper) GetPodEvents(podNamePattern string, namespace string) (*v1.EventList, error) {
uri := fmt.Sprintf("api/v1/namespaces/%s/events?fieldSelector=involvedObject.name=%s,involvedObject.namespace=%s", namespace, podNamePattern, namespace)
result, err := k8sh.Clientset.CoreV1().RESTClient().Get().RequestURI(uri).DoRaw()
if err != nil {
logger.Errorf("Cannot get events for pod %v in namespace %v, err: %v", podNamePattern, namespace, err)
return nil, fmt.Errorf("Cannot get events for pod %s in namespace %s, err: %v", podNamePattern, namespace, err)
}
events := v1.EventList{}
err = json.Unmarshal(result, &events)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal eventlist response: %v", err)
}
return &events, nil
}
// IsPodInError returns true if a Pod is in error status with the given reason and contains the given message
func (k8sh *K8sHelper) IsPodInError(podNamePattern, namespace, reason, containingMessage string) bool {
for i := 0; i < RetryLoop; i++ {
events, err := k8sh.GetPodEvents(podNamePattern, namespace)
if err != nil {
k8slogger.Errorf("Cannot get Pod events for %s in namespace %s: %+v ", podNamePattern, namespace, err)
return false
}
for _, e := range events.Items {
if e.Reason == reason && strings.Contains(e.Message, containingMessage) {
return true
}
}
k8slogger.Infof("waiting for Pod %s in namespace %s to error with reason %s and containing the message: %s", podNamePattern, namespace, reason, containingMessage)
time.Sleep(RetryInterval * time.Second)
}
k8slogger.Infof("Pod %s in namespace %s did not error with reason %s", podNamePattern, namespace, reason)
return false
}
// GetPodHostID returns HostIP address of a pod
func (k8sh *K8sHelper) GetPodHostID(podNamePattern string, namespace string) (string, error) {
listOpts := metav1.ListOptions{LabelSelector: "app=" + podNamePattern}
podList, err := k8sh.Clientset.CoreV1().Pods(namespace).List(listOpts)
if err != nil {
logger.Errorf("Cannot get hostIp for app : %v in namespace %v, err: %v", podNamePattern, namespace, err)
return "", fmt.Errorf("Cannot get hostIp for app : %v in namespace %v, err: %v", podNamePattern, namespace, err)
}
if len(podList.Items) < 1 {
logger.Errorf("Cannot get hostIp for app : %v in namespace %v, err: %v", podNamePattern, namespace, err)
return "", fmt.Errorf("Cannot get hostIp for app : %v in namespace %v, err: %v", podNamePattern, namespace, err)
}
return podList.Items[0].Status.HostIP, nil
}
// GetServiceNodePort returns nodeProt of service
func (k8sh *K8sHelper) GetServiceNodePort(serviceName string, namespace string) (string, error) {
getOpts := metav1.GetOptions{}
svc, err := k8sh.Clientset.CoreV1().Services(namespace).Get(serviceName, getOpts)
if err != nil {
logger.Errorf("Cannot get service : %v in namespace %v, err: %v", serviceName, namespace, err)
return "", fmt.Errorf("Cannot get service : %v in namespace %v, err: %v", serviceName, namespace, err)
}
np := svc.Spec.Ports[0].NodePort
return strconv.FormatInt(int64(np), 10), nil
}
// IsStorageClassPresent returns true if storageClass is present, if not false
func (k8sh *K8sHelper) IsStorageClassPresent(name string) (bool, error) {
args := []string{"get", "storageclass", "-o", "jsonpath='{.items[*].metadata.name}'"}
result, err := k8sh.Kubectl(args...)
if strings.Contains(result, name) {
return true, nil
}
return false, fmt.Errorf("Storageclass %s not found, err ->%v", name, err)
}
func (k8sh *K8sHelper) IsDefaultStorageClassPresent() (bool, error) {
scs, err := k8sh.Clientset.StorageV1().StorageClasses().List(metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list StorageClasses: %+v", err)
}
for _, sc := range scs.Items {
if storagev1util.IsDefaultAnnotation(sc.ObjectMeta) {
return true, nil
}
}
return false, nil
}
// CheckPvcCount returns True if expected number pvs for a app are found
func (k8sh *K8sHelper) CheckPvcCountAndStatus(podName string, namespace string, expectedPvcCount int, expectedStatus string) bool {
logger.Infof("wait until %d pvc for app=%s are present", expectedPvcCount, podName)
listOpts := metav1.ListOptions{LabelSelector: "app=" + podName}
pvcCountCheck := false
actualPvcCount := 0
for i := 0; i < RetryLoop; i++ {
pvcList, err := k8sh.Clientset.CoreV1().PersistentVolumeClaims(namespace).List(listOpts)
if err != nil {
logger.Errorf("Cannot get pvc for app : %v in namespace %v, err: %v", podName, namespace, err)
return false
}
actualPvcCount = len(pvcList.Items)
if actualPvcCount == expectedPvcCount {
pvcCountCheck = true
break
}
time.Sleep(RetryInterval * time.Second)
}
if !pvcCountCheck {
logger.Errorf("Expecting %d number of PVCs for %s app, found %d ", expectedPvcCount, podName, actualPvcCount)
return false
}
for i := 0; i < RetryLoop; i++ {
checkAllPVCsStatus := true
pl, _ := k8sh.Clientset.CoreV1().PersistentVolumeClaims(namespace).List(listOpts)
for _, pvc := range pl.Items {
if !(pvc.Status.Phase == v1.PersistentVolumeClaimPhase(expectedStatus)) {
checkAllPVCsStatus = false
logger.Infof("waiting for pvc %v to be in %s Phase, currently in %v Phase", pvc.Name, expectedStatus, pvc.Status.Phase)
}
}
if checkAllPVCsStatus {
return true
}
time.Sleep(RetryInterval * time.Second)
}
logger.Errorf("Giving up waiting for %d PVCs for %s app to be in %s phase", expectedPvcCount, podName, expectedStatus)
return false
}
// GetPVCStatus returns status of PVC
func (k8sh *K8sHelper) GetPVCStatus(namespace string, name string) (v1.PersistentVolumeClaimPhase, error) {
getOpts := metav1.GetOptions{}
pvc, err := k8sh.Clientset.CoreV1().PersistentVolumeClaims(namespace).Get(name, getOpts)
if err != nil {
return v1.ClaimLost, fmt.Errorf("PVC %s not found,err->%v", name, err)
}
return pvc.Status.Phase, nil
}
// GetPVCVolumeName returns volume name of PVC
func (k8sh *K8sHelper) GetPVCVolumeName(namespace string, name string) (string, error) {
getOpts := metav1.GetOptions{}
pvc, err := k8sh.Clientset.CoreV1().PersistentVolumeClaims(namespace).Get(name, getOpts)
if err != nil {
return "", fmt.Errorf("PVC %s not found,err->%v", name, err)
}
return pvc.Spec.VolumeName, nil
}
// GetPVCAccessModes returns AccessModes on PVC
func (k8sh *K8sHelper) GetPVCAccessModes(namespace string, name string) ([]v1.PersistentVolumeAccessMode, error) {
getOpts := metav1.GetOptions{}
pvc, err := k8sh.Clientset.CoreV1().PersistentVolumeClaims(namespace).Get(name, getOpts)
if err != nil {
return []v1.PersistentVolumeAccessMode{}, fmt.Errorf("PVC %s not found,err->%v", name, err)
}
return pvc.Status.AccessModes, nil
}
// GetPV returns PV by name
func (k8sh *K8sHelper) GetPV(name string) (*v1.PersistentVolume, error) {
getOpts := metav1.GetOptions{}
pv, err := k8sh.Clientset.CoreV1().PersistentVolumes().Get(name, getOpts)
if err != nil {
return nil, fmt.Errorf("PV %s not found,err->%v", name, err)
}
return pv, nil
}
// IsPodInExpectedState waits for 90s for a pod to be an expected state
// If the pod is in expected state within 90s true is returned, if not false
func (k8sh *K8sHelper) IsPodInExpectedState(podNamePattern string, namespace string, state string) bool {
listOpts := metav1.ListOptions{LabelSelector: "app=" + podNamePattern}
for i := 0; i < RetryLoop; i++ {
podList, err := k8sh.Clientset.CoreV1().Pods(namespace).List(listOpts)
if err == nil {
if len(podList.Items) >= 1 {
for _, pod := range podList.Items {
if pod.Status.Phase == v1.PodPhase(state) {
return true
}
}
}
}
time.Sleep(RetryInterval * time.Second)
}
return false
}
// CheckPodCountAndState returns true if expected number of pods with matching name are found and are in expected state
func (k8sh *K8sHelper) CheckPodCountAndState(podName string, namespace string, minExpected int, expectedPhase string) bool {
listOpts := metav1.ListOptions{LabelSelector: "app=" + podName}
podCountCheck := false
actualPodCount := 0
for i := 0; i < RetryLoop; i++ {
podList, err := k8sh.Clientset.CoreV1().Pods(namespace).List(listOpts)
if err != nil {
logger.Errorf("Cannot list pods for app=%s in namespace %s, err: %+v", podName, namespace, err)
return false
}
actualPodCount = len(podList.Items)
if actualPodCount >= minExpected {
logger.Infof("%d of %d pods with label app=%s were found", actualPodCount, minExpected, podName)
podCountCheck = true
break
}
logger.Infof("waiting for %d pods with label app=%s, found %d", minExpected, podName, actualPodCount)
time.Sleep(RetryInterval * time.Second)
}
if !podCountCheck {
logger.Errorf("Expecting %d number of pods for %s app, found %d ", minExpected, podName, actualPodCount)
return false
}
for i := 0; i < RetryLoop; i++ {
checkAllPodsStatus := true
pl, _ := k8sh.Clientset.CoreV1().Pods(namespace).List(listOpts)
for _, pod := range pl.Items {
if !(pod.Status.Phase == v1.PodPhase(expectedPhase)) {
checkAllPodsStatus = false
logger.Infof("waiting for pod %v to be in %s Phase, currently in %v Phase", pod.Name, expectedPhase, pod.Status.Phase)
}
}
if checkAllPodsStatus {
return true
}
time.Sleep(RetryInterval * time.Second)
}
logger.Errorf("All pods with app Name %v not in %v phase ", podName, expectedPhase)
k8sh.PrintPodDescribe(namespace, "-l", listOpts.LabelSelector)
return false
}
// WaitUntilPodInNamespaceIsDeleted waits for 90s for a pod in a namespace to be terminated
// If the pod disappears within 90s true is returned, if not false
func (k8sh *K8sHelper) WaitUntilPodInNamespaceIsDeleted(podNamePattern string, namespace string) bool {
for i := 0; i < RetryLoop; i++ {
out, _ := k8sh.GetResource("-n", namespace, "pods", "-l", "app="+podNamePattern)
if !strings.Contains(out, podNamePattern) {
return true
}
time.Sleep(RetryInterval * time.Second)
}
logger.Infof("Pod %s in namespace %s not deleted", podNamePattern, namespace)
return false
}
// WaitUntilPodIsDeleted waits for 90s for a pod to be terminated
// If the pod disappears within 90s true is returned, if not false
func (k8sh *K8sHelper) WaitUntilPodIsDeleted(name, namespace string) bool {
for i := 0; i < RetryLoop; i++ {
_, err := k8sh.Clientset.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return true
}
logger.Infof("pod %s in namespace %s is not deleted yet", name, namespace)
time.Sleep(RetryInterval * time.Second)
}
return false
}
// WaitUntilPVCIsBound waits for a PVC to be in bound state for 90 seconds
// if PVC goes to Bound state within 90s True is returned, if not false
func (k8sh *K8sHelper) WaitUntilPVCIsBound(namespace string, pvcname string) bool {
for i := 0; i < RetryLoop; i++ {
out, err := k8sh.GetPVCStatus(namespace, pvcname)
if err == nil {
if out == v1.PersistentVolumeClaimPhase(v1.ClaimBound) {
logger.Infof("PVC %s is bound", pvcname)
return true
}
}
logger.Infof("waiting for PVC %s to be bound. current=%s. err=%+v", pvcname, out, err)
time.Sleep(RetryInterval * time.Second)
}
return false
}
// WaitUntilPVCIsExpanded waits for a PVC to be resized for specified value
func (k8sh *K8sHelper) WaitUntilPVCIsExpanded(namespace, pvcname, size string) bool {
getOpts := metav1.GetOptions{}
for i := 0; i < RetryLoop; i++ {
// PVC specs changes immediately, but status will change only if resize process is successfully completed.
pvc, err := k8sh.Clientset.CoreV1().PersistentVolumeClaims(namespace).Get(pvcname, getOpts)
if err == nil {
currentSize := pvc.Status.Capacity[v1.ResourceStorage]
if currentSize.String() == size {
logger.Infof("PVC %s is resized", pvcname)
return true
}
logger.Infof("waiting for PVC %s to be resized, current: %s, expected: %s", pvcname, currentSize.String(), size)
} else {
logger.Infof("error while getting PVC specs: %+v", err)
}
time.Sleep(RetryInterval * time.Second)
}
return false
}
func (k8sh *K8sHelper) WaitUntilPVCIsDeleted(namespace string, pvcname string) bool {
getOpts := metav1.GetOptions{}
for i := 0; i < RetryLoop; i++ {
_, err := k8sh.Clientset.CoreV1().PersistentVolumeClaims(namespace).Get(pvcname, getOpts)
if err != nil {
return true
}
logger.Infof("waiting for PVC %s to be deleted.", pvcname)
time.Sleep(RetryInterval * time.Second)
}
return false
}
func (k8sh *K8sHelper) DeletePvcWithLabel(namespace string, podName string) bool {
delOpts := metav1.DeleteOptions{}
listOpts := metav1.ListOptions{LabelSelector: "app=" + podName}
err := k8sh.Clientset.CoreV1().PersistentVolumeClaims(namespace).DeleteCollection(&delOpts, listOpts)
if err != nil {
logger.Errorf("cannot deleted PVCs for pods with label app=%s", podName)
return false
}
for i := 0; i < RetryLoop; i++ {
pvcs, err := k8sh.Clientset.CoreV1().PersistentVolumeClaims(namespace).List(listOpts)
if err == nil {
if len(pvcs.Items) == 0 {
return true
}
}
logger.Infof("waiting for PVCs for pods with label=%s to be deleted.", podName)
time.Sleep(RetryInterval * time.Second)
}
return false
}
// WaitUntilNameSpaceIsDeleted waits for namespace to be deleted for 180s.
// If namespace is deleted True is returned, if not false.
func (k8sh *K8sHelper) WaitUntilNameSpaceIsDeleted(namespace string) bool {
getOpts := metav1.GetOptions{}
for i := 0; i < RetryLoop; i++ {
ns, err := k8sh.Clientset.CoreV1().Namespaces().Get(namespace, getOpts)
if err != nil {
return true
}
logger.Infof("Namespace %s %v", namespace, ns.Status.Phase)
time.Sleep(RetryInterval * time.Second)
}
return false
}
// CreateExternalRGWService creates a service for rgw access external to the cluster on a node port
func (k8sh *K8sHelper) CreateExternalRGWService(namespace, storeName string) error {
svcName := "rgw-external-" + storeName
externalSvc := `apiVersion: v1
kind: Service
metadata:
name: ` + svcName + `
namespace: ` + namespace + `
labels:
app: rook-ceph-rgw
rook_cluster: ` + namespace + `
spec:
ports:
- name: rook-ceph-rgw
port: 53390
protocol: TCP
selector:
app: rook-ceph-rgw
rook_cluster: ` + namespace + `
sessionAffinity: None
type: NodePort
`
_, err := k8sh.KubectlWithStdin(externalSvc, []string{"apply", "-f", "-"}...)
if err != nil && !errors.IsAlreadyExists(err) {
return fmt.Errorf("failed to create external service. %+v", err)
}
return nil
}
func (k8sh *K8sHelper) GetRGWServiceURL(storeName string, namespace string) (string, error) {
if k8sh.RunningInCluster {
return k8sh.GetInternalRGWServiceURL(storeName, namespace)
}
return k8sh.GetExternalRGWServiceURL(storeName, namespace)
}
// GetRGWServiceURL returns URL of ceph RGW service in the cluster
func (k8sh *K8sHelper) GetInternalRGWServiceURL(storeName string, namespace string) (string, error) {
name := "rook-ceph-rgw-" + storeName
svc, err := k8sh.GetService(name, namespace)
if err != nil {
return "", fmt.Errorf("RGW service not found/object. %+v", err)
}
endpoint := fmt.Sprintf("%s:%d", svc.Spec.ClusterIP, svc.Spec.Ports[0].Port)
logger.Infof("internal rgw endpoint: %s", endpoint)
return endpoint, nil
}
// GetRGWServiceURL returns URL of ceph RGW service in the cluster
func (k8sh *K8sHelper) GetExternalRGWServiceURL(storeName string, namespace string) (string, error) {
hostip, err := k8sh.GetPodHostID("rook-ceph-rgw", namespace)
if err != nil {
return "", fmt.Errorf("RGW pods not found. %+v", err)
}
serviceName := "rgw-external-" + storeName
nodePort, err := k8sh.GetServiceNodePort(serviceName, namespace)
if err != nil {
return "", fmt.Errorf("RGW service not found. %+v", err)
}
endpoint := hostip + ":" + nodePort
logger.Infof("external rgw endpoint: %s", endpoint)
return endpoint, err
}
// ChangeHostnames modifies the node hostname label to run tests in an environment where the node name is different from the hostname label
func (k8sh *K8sHelper) ChangeHostnames() error {
nodes, err := k8sh.Clientset.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return err
}
for _, node := range nodes.Items {
hostname := node.Labels[v1.LabelHostname]
if !strings.HasPrefix(hostname, hostnameTestPrefix) {
node.Labels[v1.LabelHostname] = hostnameTestPrefix + hostname
logger.Infof("changed hostname of node %s to %s", node.Name, node.Labels[v1.LabelHostname])
_, err := k8sh.Clientset.CoreV1().Nodes().Update(&node) //nolint:gosec // We safely suppress gosec in tests file
if err != nil {
return err
}
}
}
return nil
}
// RestoreHostnames removes the test suffix from the node hostname labels
func (k8sh *K8sHelper) RestoreHostnames() ([]string, error) {
nodes, err := k8sh.Clientset.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return nil, err
}
for _, node := range nodes.Items {
hostname := node.Labels[v1.LabelHostname]
if strings.HasPrefix(hostname, hostnameTestPrefix) {
node.Labels[v1.LabelHostname] = hostname[len(hostnameTestPrefix):]
logger.Infof("restoring hostname of node %s to %s", node.Name, node.Labels[v1.LabelHostname])
_, err := k8sh.Clientset.CoreV1().Nodes().Update(&node) //nolint:gosec // We safely suppress gosec in tests file
if err != nil {
return nil, err
}
}
}
return nil, nil
}
// IsRookInstalled returns true is rook-ceph-mgr service is running(indicating rook is installed)
func (k8sh *K8sHelper) IsRookInstalled(namespace string) bool {
opts := metav1.GetOptions{}
_, err := k8sh.Clientset.CoreV1().Services(namespace).Get("rook-ceph-mgr", opts)
return err == nil
}
// CollectPodLogsFromLabel collects logs for pods with the given label
func (k8sh *K8sHelper) CollectPodLogsFromLabel(podLabel, namespace, testName, platformName string) {
pods, err := k8sh.Clientset.CoreV1().Pods(namespace).List(metav1.ListOptions{LabelSelector: podLabel})
if err != nil {
logger.Errorf("failed to list pods in namespace %s. %+v", namespace, err)
return
}
k8sh.getPodsLogs(pods, namespace, testName, platformName)
}
// GetLogsFromNamespace collects logs for all containers in all pods in the namespace
func (k8sh *K8sHelper) GetLogsFromNamespace(namespace, testName, platformName string) {
logger.Infof("Gathering logs for all pods in namespace %s", namespace)
pods, err := k8sh.Clientset.CoreV1().Pods(namespace).List(metav1.ListOptions{})
if err != nil {
logger.Errorf("failed to list pods in namespace %s. %+v", namespace, err)
return
}
k8sh.getPodsLogs(pods, namespace, testName, platformName)
}
func (k8sh *K8sHelper) getPodsLogs(pods *v1.PodList, namespace, testName, platformName string) {
for _, p := range pods.Items {
k8sh.getPodLogs(p, platformName, namespace, testName, false)
if strings.Contains(p.Name, "operator") {
// get the previous logs for the operator
k8sh.getPodLogs(p, platformName, namespace, testName, true)
}
}
}
func (k8sh *K8sHelper) createTestLogFile(platformName, name, namespace, testName, suffix string) (*os.File, error) {
dir, _ := os.Getwd()
logDir := path.Join(dir, "_output/tests/")
if _, err := os.Stat(logDir); os.IsNotExist(err) {
err := os.MkdirAll(logDir, 0777)
if err != nil {
logger.Errorf("Cannot get logs files dir for app : %v in namespace %v, err: %v", name, namespace, err)
return nil, err
}
}
fileName := fmt.Sprintf("%s_%s_%s_%s%s_%d.log", testName, platformName, namespace, name, suffix, time.Now().Unix())
filePath := path.Join(logDir, fileName)
file, err := os.Create(filePath)
if err != nil {
logger.Errorf("Cannot create file %s. %v", filePath, err)
return nil, err
}
logger.Debugf("created log file: %s", filePath)
return file, nil
}
func (k8sh *K8sHelper) getPodLogs(pod v1.Pod, platformName, namespace, testName string, previousLog bool) {
suffix := ""
if previousLog {
suffix = "_previous"
}
file, err := k8sh.createTestLogFile(platformName, pod.Name, namespace, testName, suffix)
if err != nil {
return
}
defer file.Close()
for _, container := range pod.Spec.InitContainers {
k8sh.appendContainerLogs(file, pod, container.Name, previousLog, true)
}
for _, container := range pod.Spec.Containers {
k8sh.appendContainerLogs(file, pod, container.Name, previousLog, false)
}
}
func writeHeader(file *os.File, message string) error {
file.WriteString("\n-----------------------------------------\n") //nolint, ok to ignore this test logging
file.WriteString(message) //nolint, ok to ignore this test logging
file.WriteString("\n-----------------------------------------\n") //nolint, ok to ignore this test logging
return nil
}
func (k8sh *K8sHelper) appendContainerLogs(file *os.File, pod v1.Pod, containerName string, previousLog, initContainer bool) {
message := fmt.Sprintf("CONTAINER: %s", containerName)
if initContainer {
message = "INIT " + message
}
writeHeader(file, message) //nolint, ok to ignore this test logging
logOpts := &v1.PodLogOptions{Previous: previousLog}
if containerName != "" {
logOpts.Container = containerName
}
res := k8sh.Clientset.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, logOpts).Do()
rawData, err := res.Raw()
if err != nil {
// Sometimes we fail to get logs for pods using this method, notably the operator pod. It is
// unknown why this happens. Pod logs are VERY important, so try again using kubectl.
l, err := k8sh.Kubectl("-n", pod.Namespace, "logs", pod.Name, "-c", containerName)
if err != nil {
logger.Errorf("Cannot get logs for pod %s and container %s. %v", pod.Name, containerName, err)
return
}
rawData = []byte(l)
}
if _, err := file.Write(rawData); err != nil {
logger.Errorf("Errors while writing logs for pod %s and container %s. %v", pod.Name, containerName, err)
}
}
// CreateAnonSystemClusterBinding Creates anon-user-access clusterrolebinding for cluster-admin role - used by kubeadm env.
func (k8sh *K8sHelper) CreateAnonSystemClusterBinding() {
_, err := k8sh.Clientset.RbacV1beta1().ClusterRoleBindings().Get("anon-user-access", metav1.GetOptions{})
if err != nil {
logger.Warningf("anon-user-access clusterrolebinding not found. %v", err)
args := []string{"create", "clusterrolebinding", "anon-user-access", "--clusterrole", "cluster-admin", "--user", "system:anonymous"}
_, err := k8sh.Kubectl(args...)
if err != nil {
logger.Errorf("failed to create anon-user-access. %v", err)
return
}
logger.Info("anon-user-access creation completed, waiting for it to exist in API")
}
for i := 0; i < RetryLoop; i++ {
var err error
if _, err = k8sh.Clientset.RbacV1().ClusterRoleBindings().Get("anon-user-access", metav1.GetOptions{}); err == nil {
break
}
logger.Warningf("failed to get anon-user-access clusterrolebinding, will try again: %+v", err)
time.Sleep(RetryInterval * time.Second)
}
}
func (k8sh *K8sHelper) DeleteRoleAndBindings(name, namespace string) error {
err := k8sh.DeleteResource("role", name, "-n", namespace)
if err != nil {
return err
}
err = k8sh.DeleteResource("rolebinding", name, "-n", namespace)
if err != nil {
return err
}
return nil
}
func (k8sh *K8sHelper) DeleteRoleBinding(name, namespace string) error {
err := k8sh.DeleteResource("rolebinding", name, "-n", namespace)
return err
}
func (k8sh *K8sHelper) ScaleStatefulSet(statefulSetName, namespace string, replicationSize int) error {
args := []string{"-n", namespace, "scale", "statefulsets", statefulSetName, fmt.Sprintf("--replicas=%d", replicationSize)}
_, err := k8sh.Kubectl(args...)
return err
}
func IsKubectlErrorNotFound(output string, err error) bool {
return err != nil && strings.Contains(output, "Error from server (NotFound)")
}
// WaitForDeploymentCount waits until the desired number of deployments with the label exist. The
// deployments are not guaranteed to be running, only existing.
func (k8sh *K8sHelper) WaitForDeploymentCount(label, namespace string, count int) error {
return k8sh.WaitForDeploymentCountWithRetries(label, namespace, count, RetryLoop)
}
// WaitForDeploymentCountWithRetries waits until the desired number of deployments with the label
// exist, retrying the specified number of times. The deployments are not guaranteed to be running,
// only existing.
func (k8sh *K8sHelper) WaitForDeploymentCountWithRetries(label, namespace string, count, retries int) error {
options := metav1.ListOptions{LabelSelector: label}
for i := 0; i < retries; i++ {
deps, err := k8sh.Clientset.AppsV1().Deployments(namespace).List(options)
numDeps := 0
if err == nil {
numDeps = len(deps.Items)
}
if numDeps >= count {
logger.Infof("found %d of %d deployments with label %s in namespace %s", numDeps, count, label, namespace)
return nil
}
logger.Infof("waiting for %d deployments (found %d) with label %s in namespace %s", count, numDeps, label, namespace)
time.Sleep(RetryInterval * time.Second)
}
return fmt.Errorf("giving up waiting for %d deployments with label %s in namespace %s", count, label, namespace)
}
// WaitForLabeledDeploymentsToBeReady waits for all deployments matching the given label selector to
// be fully ready with a default timeout.
func (k8sh *K8sHelper) WaitForLabeledDeploymentsToBeReady(label, namespace string) error {
return k8sh.WaitForLabeledDeploymentsToBeReadyWithRetries(label, namespace, RetryLoop)
}
// WaitForLabeledDeploymentsToBeReadyWithRetries waits for all deployments matching the given label
// selector to be fully ready. Retry the number of times given.
func (k8sh *K8sHelper) WaitForLabeledDeploymentsToBeReadyWithRetries(label, namespace string, retries int) error {
listOpts := metav1.ListOptions{LabelSelector: label}
var lastDep apps.Deployment
for i := 0; i < retries; i++ {
deps, err := k8sh.Clientset.AppsV1().Deployments(namespace).List(listOpts)
ready := 0
if err == nil && len(deps.Items) > 0 {
for _, dep := range deps.Items {
if dep.Status.Replicas == dep.Status.ReadyReplicas {
ready++
} else {
lastDep = dep // make it the last non-ready dep
}
if ready == len(deps.Items) {
logger.Infof("all %d deployments with label %s are running", len(deps.Items), label)
return nil
}
}
}
logger.Infof("waiting for deployment(s) with label %s in namespace %s to be running. ready=%d/%d, err=%+v",
label, namespace, ready, len(deps.Items), err)
time.Sleep(RetryInterval * time.Second)
}
if len(lastDep.Name) == 0 {
logger.Infof("no deployment was found with label %s", label)
} else {
r, err := k8sh.Kubectl("-n", namespace, "get", "-o", "yaml", "deployments", "--selector", label)
if err != nil {
logger.Infof("deployments with label %s:\n%s", label, r)
}
}
return fmt.Errorf("giving up waiting for deployment(s) with label %s in namespace %s to be ready", label, namespace)
}
|
[
"\"DOCKERCMD\""
] |
[] |
[
"DOCKERCMD"
] |
[]
|
["DOCKERCMD"]
|
go
| 1 | 0 | |
tests/integration/test_install_twists.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import os
import shutil
import sys
import pytest
from flaky import flaky
from pipenv._compat import Path
from pipenv.utils import mkdir_p, temp_environ
from pipenv.vendor import delegator
@pytest.mark.extras
@pytest.mark.install
@pytest.mark.local
def test_local_extras_install(PipenvInstance):
"""Ensure -e .[extras] installs.
"""
with PipenvInstance(chdir=True) as p:
setup_py = os.path.join(p.path, "setup.py")
with open(setup_py, "w") as fh:
contents = """
from setuptools import setup, find_packages
setup(
name='testpipenv',
version='0.1',
description='Pipenv Test Package',
author='Pipenv Test',
author_email='[email protected]',
license='MIT',
packages=find_packages(),
install_requires=[],
extras_require={'dev': ['six']},
zip_safe=False
)
""".strip()
fh.write(contents)
line = "-e .[dev]"
with open(os.path.join(p.path, 'Pipfile'), 'w') as fh:
fh.write("""
[packages]
testpipenv = {path = ".", editable = true, extras = ["dev"]}
[dev-packages]
""".strip())
# project.write_toml({"packages": pipfile, "dev-packages": {}})
c = p.pipenv("install")
assert c.return_code == 0
assert "testpipenv" in p.lockfile["default"]
assert p.lockfile["default"]["testpipenv"]["extras"] == ["dev"]
assert "six" in p.lockfile["default"]
c = p.pipenv("uninstall --all")
assert c.return_code == 0
print("Current directory: {0}".format(os.getcwd()), file=sys.stderr)
c = p.pipenv("install {0}".format(line))
assert c.return_code == 0
assert "testpipenv" in p.pipfile["packages"]
assert p.pipfile["packages"]["testpipenv"]["path"] == "."
assert p.pipfile["packages"]["testpipenv"]["extras"] == ["dev"]
assert "six" in p.lockfile["default"]
@pytest.mark.local
@pytest.mark.install
@pytest.mark.needs_internet
@flaky
class TestDirectDependencies(object):
"""Ensure dependency_links are parsed and installed.
This is needed for private repo dependencies.
"""
@staticmethod
def helper_dependency_links_install_make_setup(pipenv_instance, deplink):
setup_py = os.path.join(pipenv_instance.path, "setup.py")
with open(setup_py, "w") as fh:
contents = """
from setuptools import setup
setup(
name='testdeplinks',
version='0.1',
packages=[],
install_requires=[
'{0}'
],
)
""".strip().format(deplink)
fh.write(contents)
@staticmethod
def helper_dependency_links_install_test(pipenv_instance, deplink):
TestDirectDependencies.helper_dependency_links_install_make_setup(pipenv_instance, deplink)
c = pipenv_instance.pipenv("install -v -e .")
assert c.return_code == 0
assert "test-private-dependency" in pipenv_instance.lockfile["default"]
def test_https_dependency_links_install(self, PipenvInstance):
"""Ensure dependency_links are parsed and installed (needed for private repo dependencies).
"""
with temp_environ(), PipenvInstance(chdir=True) as p:
os.environ["PIP_NO_BUILD_ISOLATION"] = '1'
TestDirectDependencies.helper_dependency_links_install_test(
p,
'test-private-dependency@ git+https://github.com/atzannes/[email protected]'
)
@pytest.mark.needs_github_ssh
def test_ssh_dependency_links_install(self, PipenvInstance):
with temp_environ(), PipenvInstance(chdir=True) as p:
os.environ['PIP_PROCESS_DEPENDENCY_LINKS'] = '1'
os.environ["PIP_NO_BUILD_ISOLATION"] = '1'
TestDirectDependencies.helper_dependency_links_install_test(
p,
'test-private-dependency@ git+ssh://[email protected]/atzannes/[email protected]'
)
@pytest.mark.e
@pytest.mark.local
@pytest.mark.install
@pytest.mark.skip(reason="this doesn't work on windows")
def test_e_dot(PipenvInstance, pip_src_dir):
with PipenvInstance() as p:
path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
c = p.pipenv("install -e '{0}' --dev".format(path))
assert c.return_code == 0
key = [k for k in p.pipfile["dev-packages"].keys()][0]
assert "path" in p.pipfile["dev-packages"][key]
assert "requests" in p.lockfile["develop"]
@pytest.mark.install
@pytest.mark.multiprocessing
@flaky
def test_multiprocess_bug_and_install(PipenvInstance):
with temp_environ():
os.environ["PIPENV_MAX_SUBPROCESS"] = "2"
with PipenvInstance(chdir=True) as p:
with open(p.pipfile_path, "w") as f:
contents = """
[packages]
pytz = "*"
six = "*"
urllib3 = "*"
""".strip()
f.write(contents)
c = p.pipenv("install")
assert c.return_code == 0
assert "pytz" in p.lockfile["default"]
assert "six" in p.lockfile["default"]
assert "urllib3" in p.lockfile["default"]
c = p.pipenv('run python -c "import six; import pytz; import urllib3;"')
assert c.return_code == 0
@pytest.mark.install
@pytest.mark.sequential
@flaky
def test_sequential_mode(PipenvInstance):
with PipenvInstance(chdir=True) as p:
with open(p.pipfile_path, "w") as f:
contents = """
[packages]
six = "*"
urllib3 = "*"
pytz = "*"
""".strip()
f.write(contents)
c = p.pipenv("install --sequential")
assert c.return_code == 0
assert "six" in p.lockfile["default"]
assert "pytz" in p.lockfile["default"]
assert "urllib3" in p.lockfile["default"]
c = p.pipenv('run python -c "import six; import urllib3; import pytz;"')
assert c.return_code == 0
@pytest.mark.run
@pytest.mark.install
def test_normalize_name_install(PipenvInstance):
with PipenvInstance() as p:
with open(p.pipfile_path, "w") as f:
contents = """
# Pre comment
[packages]
Requests = "==2.14.0" # Inline comment
"""
f.write(contents)
c = p.pipenv("install")
assert c.return_code == 0
c = p.pipenv("install requests")
assert c.return_code == 0
assert "requests" not in p.pipfile["packages"]
assert p.pipfile["packages"]["Requests"] == "==2.14.0"
c = p.pipenv("install requests==2.18.4")
assert c.return_code == 0
assert p.pipfile["packages"]["Requests"] == "==2.18.4"
c = p.pipenv("install python_DateUtil")
assert c.return_code == 0
assert "python-dateutil" in p.pipfile["packages"]
with open(p.pipfile_path) as f:
contents = f.read()
assert "# Pre comment" in contents
assert "# Inline comment" in contents
@flaky
@pytest.mark.eggs
@pytest.mark.files
@pytest.mark.local
@pytest.mark.resolver
def test_local_package(PipenvInstance, pip_src_dir, testsroot):
"""This test ensures that local packages (directories with a setup.py)
installed in editable mode have their dependencies resolved as well"""
file_name = "requests-2.19.1.tar.gz"
package = "requests-2.19.1"
# Not sure where travis/appveyor run tests from
source_path = os.path.abspath(os.path.join(testsroot, "test_artifacts", file_name))
with PipenvInstance(chdir=True) as p:
# This tests for a bug when installing a zipfile in the current dir
copy_to = os.path.join(p.path, file_name)
shutil.copy(source_path, copy_to)
import tarfile
with tarfile.open(copy_to, "r:gz") as tgz:
tgz.extractall(path=p.path)
c = p.pipenv("install -e {0}".format(package))
assert c.return_code == 0
assert all(
pkg in p.lockfile["default"]
for pkg in ["urllib3", "idna", "certifi", "chardet"]
)
@pytest.mark.files
@pytest.mark.local
@flaky
def test_local_zipfiles(PipenvInstance, testsroot):
file_name = "requests-2.19.1.tar.gz"
# Not sure where travis/appveyor run tests from
source_path = os.path.abspath(os.path.join(testsroot, "test_artifacts", file_name))
with PipenvInstance(chdir=True) as p:
# This tests for a bug when installing a zipfile in the current dir
shutil.copy(source_path, os.path.join(p.path, file_name))
c = p.pipenv("install {}".format(file_name))
assert c.return_code == 0
key = [k for k in p.pipfile["packages"].keys()][0]
dep = p.pipfile["packages"][key]
assert "file" in dep or "path" in dep
assert c.return_code == 0
# This now gets resolved to its name correctly
dep = p.lockfile["default"]["requests"]
assert "file" in dep or "path" in dep
@pytest.mark.local
@pytest.mark.files
@flaky
def test_relative_paths(PipenvInstance, testsroot):
file_name = "requests-2.19.1.tar.gz"
source_path = os.path.abspath(os.path.join(testsroot, "test_artifacts", file_name))
with PipenvInstance() as p:
artifact_dir = "artifacts"
artifact_path = os.path.join(p.path, artifact_dir)
mkdir_p(artifact_path)
shutil.copy(source_path, os.path.join(artifact_path, file_name))
# Test installing a relative path in a subdirectory
c = p.pipenv("install {}/{}".format(artifact_dir, file_name))
assert c.return_code == 0
key = next(k for k in p.pipfile["packages"].keys())
dep = p.pipfile["packages"][key]
assert "path" in dep
assert Path(".", artifact_dir, file_name) == Path(dep["path"])
assert c.return_code == 0
@pytest.mark.install
@pytest.mark.local
@pytest.mark.local_file
@flaky
def test_install_local_file_collision(PipenvInstance):
with PipenvInstance() as p:
target_package = "alembic"
fake_file = os.path.join(p.path, target_package)
with open(fake_file, "w") as f:
f.write("")
c = p.pipenv("install {}".format(target_package))
assert c.return_code == 0
assert target_package in p.pipfile["packages"]
assert p.pipfile["packages"][target_package] == "*"
assert target_package in p.lockfile["default"]
@pytest.mark.urls
@pytest.mark.install
def test_install_local_uri_special_character(PipenvInstance, testsroot):
file_name = "six-1.11.0+mkl-py2.py3-none-any.whl"
source_path = os.path.abspath(os.path.join(testsroot, "test_artifacts", file_name))
with PipenvInstance() as p:
artifact_dir = "artifacts"
artifact_path = os.path.join(p.path, artifact_dir)
mkdir_p(artifact_path)
shutil.copy(source_path, os.path.join(artifact_path, file_name))
with open(p.pipfile_path, "w") as f:
contents = """
# Pre comment
[packages]
six = {{path = "./artifacts/{}"}}
""".format(
file_name
)
f.write(contents.strip())
c = p.pipenv("install")
assert c.return_code == 0
assert "six" in p.lockfile["default"]
@pytest.mark.run
@pytest.mark.files
@pytest.mark.install
def test_multiple_editable_packages_should_not_race(PipenvInstance, testsroot):
"""Test for a race condition that can occur when installing multiple 'editable' packages at
once, and which causes some of them to not be importable.
This issue had been fixed for VCS packages already, but not local 'editable' packages.
So this test locally installs packages from tarballs that have already been committed in
the local `pypi` dir to avoid using VCS packages.
"""
pkgs = ["requests", "flask", "six", "jinja2"]
pipfile_string = """
[dev-packages]
[packages]
"""
with PipenvInstance(chdir=True) as p:
for pkg_name in pkgs:
source_path = p._pipfile.get_fixture_path("git/{0}/".format(pkg_name)).as_posix()
shutil.copytree(source_path, pkg_name)
pipfile_string += '"{0}" = {{path = "./{0}", editable = true}}\n'.format(pkg_name)
with open(p.pipfile_path, 'w') as f:
f.write(pipfile_string.strip())
c = p.pipenv('install')
assert c.return_code == 0
c = p.pipenv('run python -c "import requests, flask, six, jinja2"')
assert c.return_code == 0, c.err
@pytest.mark.outdated
@pytest.mark.py3_only
def test_outdated_should_compare_postreleases_without_failing(PipenvInstance):
with PipenvInstance(chdir=True) as p:
c = p.pipenv("install ibm-db-sa-py3==0.3.0")
assert c.return_code == 0
c = p.pipenv("update --outdated")
assert c.return_code == 0
assert "Skipped Update" in c.err
p._pipfile.update("ibm-db-sa-py3", "*")
c = p.pipenv("update --outdated")
assert c.return_code != 0
assert "out-of-date" in c.out
|
[] |
[] |
[
"PIP_PROCESS_DEPENDENCY_LINKS",
"PIP_NO_BUILD_ISOLATION",
"PIPENV_MAX_SUBPROCESS"
] |
[]
|
["PIP_PROCESS_DEPENDENCY_LINKS", "PIP_NO_BUILD_ISOLATION", "PIPENV_MAX_SUBPROCESS"]
|
python
| 3 | 0 | |
build/android/pylib/android_commands.py
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides an interface to communicate with the device via the adb command.
Assumes adb binary is currently on system path.
Note that this module is deprecated.
"""
# TODO(jbudorick): Delete this file once no clients use it.
# pylint: skip-file
import collections
import datetime
import inspect
import logging
import os
import random
import re
import shlex
import signal
import subprocess
import sys
import tempfile
import time
import cmd_helper
import constants
import system_properties
from utils import host_utils
try:
from pylib import pexpect
except ImportError:
pexpect = None
sys.path.append(os.path.join(
constants.DIR_SOURCE_ROOT, 'third_party', 'android_testrunner'))
import adb_interface
import am_instrument_parser
import errors
from pylib.device import device_blacklist
from pylib.device import device_errors
# Pattern to search for the next whole line of pexpect output and capture it
# into a match group. We can't use ^ and $ for line start end with pexpect,
# see http://www.noah.org/python/pexpect/#doc for explanation why.
PEXPECT_LINE_RE = re.compile('\n([^\r]*)\r')
# Set the adb shell prompt to be a unique marker that will [hopefully] not
# appear at the start of any line of a command's output.
SHELL_PROMPT = '~+~PQ\x17RS~+~'
# Java properties file
LOCAL_PROPERTIES_PATH = constants.DEVICE_LOCAL_PROPERTIES_PATH
# Property in /data/local.prop that controls Java assertions.
JAVA_ASSERT_PROPERTY = 'dalvik.vm.enableassertions'
# Keycode "enum" suitable for passing to AndroidCommands.SendKey().
KEYCODE_HOME = 3
KEYCODE_BACK = 4
KEYCODE_DPAD_UP = 19
KEYCODE_DPAD_DOWN = 20
KEYCODE_DPAD_RIGHT = 22
KEYCODE_ENTER = 66
KEYCODE_MENU = 82
MD5SUM_DEVICE_FOLDER = constants.TEST_EXECUTABLE_DIR + '/md5sum/'
MD5SUM_DEVICE_PATH = MD5SUM_DEVICE_FOLDER + 'md5sum_bin'
PIE_WRAPPER_PATH = constants.TEST_EXECUTABLE_DIR + '/run_pie'
CONTROL_USB_CHARGING_COMMANDS = [
{
# Nexus 4
'witness_file': '/sys/module/pm8921_charger/parameters/disabled',
'enable_command': 'echo 0 > /sys/module/pm8921_charger/parameters/disabled',
'disable_command':
'echo 1 > /sys/module/pm8921_charger/parameters/disabled',
},
{
# Nexus 5
# Setting the HIZ bit of the bq24192 causes the charger to actually ignore
# energy coming from USB. Setting the power_supply offline just updates the
# Android system to reflect that.
'witness_file': '/sys/kernel/debug/bq24192/INPUT_SRC_CONT',
'enable_command': (
'echo 0x4A > /sys/kernel/debug/bq24192/INPUT_SRC_CONT && '
'echo 1 > /sys/class/power_supply/usb/online'),
'disable_command': (
'echo 0xCA > /sys/kernel/debug/bq24192/INPUT_SRC_CONT && '
'chmod 644 /sys/class/power_supply/usb/online && '
'echo 0 > /sys/class/power_supply/usb/online'),
},
]
class DeviceTempFile(object):
def __init__(self, android_commands, prefix='temp_file', suffix=''):
"""Find an unused temporary file path in the devices external directory.
When this object is closed, the file will be deleted on the device.
"""
self.android_commands = android_commands
while True:
# TODO(cjhopman): This could actually return the same file in multiple
# calls if the caller doesn't write to the files immediately. This is
# expected to never happen.
i = random.randint(0, 1000000)
self.name = '%s/%s-%d-%010d%s' % (
android_commands.GetExternalStorage(),
prefix, int(time.time()), i, suffix)
if not android_commands.FileExistsOnDevice(self.name):
break
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
self.android_commands.RunShellCommand('rm ' + self.name)
def GetAVDs():
"""Returns a list of AVDs."""
re_avd = re.compile('^[ ]+Name: ([a-zA-Z0-9_:.-]+)', re.MULTILINE)
avds = re_avd.findall(cmd_helper.GetCmdOutput(['android', 'list', 'avd']))
return avds
def ResetBadDevices():
"""Removes the blacklist that keeps track of bad devices for a current
build.
"""
device_blacklist.ResetBlacklist()
def ExtendBadDevices(devices):
"""Adds devices to the blacklist that keeps track of bad devices for a
current build.
The devices listed in the bad devices file will not be returned by
GetAttachedDevices.
Args:
devices: list of bad devices to be added to the bad devices file.
"""
device_blacklist.ExtendBlacklist(devices)
def GetAttachedDevices(hardware=True, emulator=True, offline=False):
"""Returns a list of attached, android devices and emulators.
If a preferred device has been set with ANDROID_SERIAL, it will be first in
the returned list. The arguments specify what devices to include in the list.
Example output:
* daemon not running. starting it now on port 5037 *
* daemon started successfully *
List of devices attached
027c10494100b4d7 device
emulator-5554 offline
Args:
hardware: Include attached actual devices that are online.
emulator: Include emulators (i.e. AVD's) currently on host.
offline: Include devices and emulators that are offline.
Returns: List of devices.
"""
adb_devices_output = cmd_helper.GetCmdOutput([constants.GetAdbPath(),
'devices'])
re_device = re.compile('^([a-zA-Z0-9_:.-]+)\tdevice$', re.MULTILINE)
online_devices = re_device.findall(adb_devices_output)
re_device = re.compile('^(emulator-[0-9]+)\tdevice', re.MULTILINE)
emulator_devices = re_device.findall(adb_devices_output)
re_device = re.compile('^([a-zA-Z0-9_:.-]+)\t(?:offline|unauthorized)$',
re.MULTILINE)
offline_devices = re_device.findall(adb_devices_output)
devices = []
# First determine list of online devices (e.g. hardware and/or emulator).
if hardware and emulator:
devices = online_devices
elif hardware:
devices = [device for device in online_devices
if device not in emulator_devices]
elif emulator:
devices = emulator_devices
# Now add offline devices if offline is true
if offline:
devices = devices + offline_devices
# Remove any devices in the blacklist.
blacklist = device_blacklist.ReadBlacklist()
if len(blacklist):
logging.info('Avoiding bad devices %s', ' '.join(blacklist))
devices = [device for device in devices if device not in blacklist]
preferred_device = os.environ.get('ANDROID_SERIAL')
if preferred_device in devices:
devices.remove(preferred_device)
devices.insert(0, preferred_device)
return devices
def IsDeviceAttached(device):
"""Return true if the device is attached and online."""
return device in GetAttachedDevices()
def _GetFilesFromRecursiveLsOutput(path, ls_output, re_file, utc_offset=None):
"""Gets a list of files from `ls` command output.
Python's os.walk isn't used because it doesn't work over adb shell.
Args:
path: The path to list.
ls_output: A list of lines returned by an `ls -lR` command.
re_file: A compiled regular expression which parses a line into named groups
consisting of at minimum "filename", "date", "time", "size" and
optionally "timezone".
utc_offset: A 5-character string of the form +HHMM or -HHMM, where HH is a
2-digit string giving the number of UTC offset hours, and MM is a
2-digit string giving the number of UTC offset minutes. If the input
utc_offset is None, will try to look for the value of "timezone" if it
is specified in re_file.
Returns:
A dict of {"name": (size, lastmod), ...} where:
name: The file name relative to |path|'s directory.
size: The file size in bytes (0 for directories).
lastmod: The file last modification date in UTC.
"""
re_directory = re.compile('^%s/(?P<dir>[^:]+):$' % re.escape(path))
path_dir = os.path.dirname(path)
current_dir = ''
files = {}
for line in ls_output:
directory_match = re_directory.match(line)
if directory_match:
current_dir = directory_match.group('dir')
continue
file_match = re_file.match(line)
if file_match:
filename = os.path.join(current_dir, file_match.group('filename'))
if filename.startswith(path_dir):
filename = filename[len(path_dir) + 1:]
lastmod = datetime.datetime.strptime(
file_match.group('date') + ' ' + file_match.group('time')[:5],
'%Y-%m-%d %H:%M')
if not utc_offset and 'timezone' in re_file.groupindex:
utc_offset = file_match.group('timezone')
if isinstance(utc_offset, str) and len(utc_offset) == 5:
utc_delta = datetime.timedelta(hours=int(utc_offset[1:3]),
minutes=int(utc_offset[3:5]))
if utc_offset[0:1] == '-':
utc_delta = -utc_delta
lastmod -= utc_delta
files[filename] = (int(file_match.group('size')), lastmod)
return files
def _ParseMd5SumOutput(md5sum_output):
"""Returns a list of tuples from the provided md5sum output.
Args:
md5sum_output: output directly from md5sum binary.
Returns:
List of namedtuples with attributes |hash| and |path|, where |path| is the
absolute path to the file with an Md5Sum of |hash|.
"""
HashAndPath = collections.namedtuple('HashAndPath', ['hash', 'path'])
split_lines = [line.split(' ') for line in md5sum_output]
return [HashAndPath._make(s) for s in split_lines if len(s) == 2]
def _HasAdbPushSucceeded(command_output):
"""Returns whether adb push has succeeded from the provided output."""
# TODO(frankf): We should look at the return code instead of the command
# output for many of the commands in this file.
if not command_output:
return True
# Success looks like this: "3035 KB/s (12512056 bytes in 4.025s)"
# Errors look like this: "failed to copy ... "
if not re.search('^[0-9]', command_output.splitlines()[-1]):
logging.critical('PUSH FAILED: ' + command_output)
return False
return True
def GetLogTimestamp(log_line, year):
"""Returns the timestamp of the given |log_line| in the given year."""
try:
return datetime.datetime.strptime('%s-%s' % (year, log_line[:18]),
'%Y-%m-%d %H:%M:%S.%f')
except (ValueError, IndexError):
logging.critical('Error reading timestamp from ' + log_line)
return None
class AndroidCommands(object):
"""Helper class for communicating with Android device via adb."""
def __init__(self, device=None):
"""Constructor.
Args:
device: If given, adb commands are only send to the device of this ID.
Otherwise commands are sent to all attached devices.
"""
self._adb = adb_interface.AdbInterface(constants.GetAdbPath())
if device:
self._adb.SetTargetSerial(device)
self._device = device
self._logcat = None
self.logcat_process = None
self._logcat_tmpoutfile = None
self._pushed_files = []
self._device_utc_offset = None
self._potential_push_size = 0
self._actual_push_size = 0
self._external_storage = ''
self._util_wrapper = ''
self._system_properties = system_properties.SystemProperties(self.Adb())
self._push_if_needed_cache = {}
self._control_usb_charging_command = {
'command': None,
'cached': False,
}
self._protected_file_access_method_initialized = None
self._privileged_command_runner = None
self._pie_wrapper = None
@property
def system_properties(self):
return self._system_properties
def _LogShell(self, cmd):
"""Logs the adb shell command."""
if self._device:
device_repr = self._device[-4:]
else:
device_repr = '????'
logging.info('[%s]> %s', device_repr, cmd)
def Adb(self):
"""Returns our AdbInterface to avoid us wrapping all its methods."""
# TODO(tonyg): Goal should be to git rid of this method by making this API
# complete and alleviating the need.
return self._adb
def GetDevice(self):
"""Returns the device serial."""
return self._device
def IsOnline(self):
"""Checks whether the device is online.
Returns:
True if device is in 'device' mode, False otherwise.
"""
# TODO(aurimas): revert to using adb get-state when android L adb is fixed.
#out = self._adb.SendCommand('get-state')
#return out.strip() == 'device'
out = self._adb.SendCommand('devices')
for line in out.split('\n'):
if self._device in line and 'device' in line:
return True
return False
def IsRootEnabled(self):
"""Checks if root is enabled on the device."""
root_test_output = self.RunShellCommand('ls /root') or ['']
return not 'Permission denied' in root_test_output[0]
def EnableAdbRoot(self):
"""Enables adb root on the device.
Returns:
True: if output from executing adb root was as expected.
False: otherwise.
"""
if self.GetBuildType() == 'user':
logging.warning("Can't enable root in production builds with type user")
return False
else:
return_value = self._adb.EnableAdbRoot()
# EnableAdbRoot inserts a call for wait-for-device only when adb logcat
# output matches what is expected. Just to be safe add a call to
# wait-for-device.
self._adb.SendCommand('wait-for-device')
return return_value
def GetDeviceYear(self):
"""Returns the year information of the date on device."""
return self.RunShellCommand('date +%Y')[0]
def GetExternalStorage(self):
if not self._external_storage:
self._external_storage = self.RunShellCommand('echo $EXTERNAL_STORAGE')[0]
if not self._external_storage:
raise device_errors.CommandFailedError(
['shell', "'echo $EXTERNAL_STORAGE'"],
'Unable to find $EXTERNAL_STORAGE')
return self._external_storage
def WaitForDevicePm(self, timeout=120):
"""Blocks until the device's package manager is available.
To workaround http://b/5201039, we restart the shell and retry if the
package manager isn't back after 120 seconds.
Raises:
errors.WaitForResponseTimedOutError after max retries reached.
"""
last_err = None
retries = 3
while retries:
try:
self._adb.WaitForDevicePm(wait_time=timeout)
return # Success
except errors.WaitForResponseTimedOutError as e:
last_err = e
logging.warning('Restarting and retrying after timeout: %s', e)
retries -= 1
self.RestartShell()
raise last_err # Only reached after max retries, re-raise the last error.
def RestartShell(self):
"""Restarts the shell on the device. Does not block for it to return."""
self.RunShellCommand('stop')
self.RunShellCommand('start')
def Reboot(self, full_reboot=True):
"""Reboots the device and waits for the package manager to return.
Args:
full_reboot: Whether to fully reboot the device or just restart the shell.
"""
# TODO(torne): hive can't reboot the device either way without breaking the
# connection; work out if we can handle this better
if os.environ.get('USING_HIVE'):
logging.warning('Ignoring reboot request as we are on hive')
return
if full_reboot or not self.IsRootEnabled():
self._adb.SendCommand('reboot')
self._system_properties = system_properties.SystemProperties(self.Adb())
timeout = 300
retries = 1
# Wait for the device to disappear.
while retries < 10 and self.IsOnline():
time.sleep(1)
retries += 1
else:
self.RestartShell()
timeout = 120
# To run tests we need at least the package manager and the sd card (or
# other external storage) to be ready.
self.WaitForDevicePm(timeout)
self.WaitForSdCardReady(timeout)
def Shutdown(self):
"""Shuts down the device."""
self._adb.SendCommand('reboot -p')
self._system_properties = system_properties.SystemProperties(self.Adb())
def Uninstall(self, package):
"""Uninstalls the specified package from the device.
Args:
package: Name of the package to remove.
Returns:
A status string returned by adb uninstall
"""
uninstall_command = 'uninstall %s' % package
self._LogShell(uninstall_command)
return self._adb.SendCommand(uninstall_command, timeout_time=60)
def Install(self, package_file_path, reinstall=False):
"""Installs the specified package to the device.
Args:
package_file_path: Path to .apk file to install.
reinstall: Reinstall an existing apk, keeping the data.
Returns:
A status string returned by adb install
"""
assert os.path.isfile(package_file_path), ('<%s> is not file' %
package_file_path)
install_cmd = ['install']
if reinstall:
install_cmd.append('-r')
install_cmd.append(package_file_path)
install_cmd = ' '.join(install_cmd)
self._LogShell(install_cmd)
return self._adb.SendCommand(install_cmd,
timeout_time=2 * 60,
retry_count=0)
def ManagedInstall(self, apk_path, keep_data=False, package_name=None,
reboots_on_timeout=2):
"""Installs specified package and reboots device on timeouts.
If package_name is supplied, checks if the package is already installed and
doesn't reinstall if the apk md5sums match.
Args:
apk_path: Path to .apk file to install.
keep_data: Reinstalls instead of uninstalling first, preserving the
application data.
package_name: Package name (only needed if keep_data=False).
reboots_on_timeout: number of time to reboot if package manager is frozen.
"""
# Check if package is already installed and up to date.
if package_name:
installed_apk_path = self.GetApplicationPath(package_name)
if (installed_apk_path and
not self.GetFilesChanged(apk_path, installed_apk_path,
ignore_filenames=True)):
logging.info('Skipped install: identical %s APK already installed' %
package_name)
return
# Install.
reboots_left = reboots_on_timeout
while True:
try:
if not keep_data:
assert package_name
self.Uninstall(package_name)
install_status = self.Install(apk_path, reinstall=keep_data)
if 'Success' in install_status:
return
else:
raise Exception('Install failure: %s' % install_status)
except errors.WaitForResponseTimedOutError:
print '@@@STEP_WARNINGS@@@'
logging.info('Timeout on installing %s on device %s', apk_path,
self._device)
if reboots_left <= 0:
raise Exception('Install timed out')
# Force a hard reboot on last attempt
self.Reboot(full_reboot=(reboots_left == 1))
reboots_left -= 1
def MakeSystemFolderWritable(self):
"""Remounts the /system folder rw."""
out = self._adb.SendCommand('remount')
if out.strip() != 'remount succeeded':
raise errors.MsgException('Remount failed: %s' % out)
def RestartAdbdOnDevice(self):
logging.info('Restarting adbd on the device...')
with DeviceTempFile(self, suffix=".sh") as temp_script_file:
host_script_path = os.path.join(constants.DIR_SOURCE_ROOT,
'build',
'android',
'pylib',
'restart_adbd.sh')
self._adb.Push(host_script_path, temp_script_file.name)
self.RunShellCommand('. %s' % temp_script_file.name)
self._adb.SendCommand('wait-for-device')
def RestartAdbServer(self):
"""Restart the adb server."""
ret = self.KillAdbServer()
if ret != 0:
raise errors.MsgException('KillAdbServer: %d' % ret)
ret = self.StartAdbServer()
if ret != 0:
raise errors.MsgException('StartAdbServer: %d' % ret)
@staticmethod
def KillAdbServer():
"""Kill adb server."""
adb_cmd = [constants.GetAdbPath(), 'kill-server']
ret = cmd_helper.RunCmd(adb_cmd)
retry = 0
while retry < 3:
ret, _ = cmd_helper.GetCmdStatusAndOutput(['pgrep', 'adb'])
if ret != 0:
# pgrep didn't find adb, kill-server succeeded.
return 0
retry += 1
time.sleep(retry)
return ret
def StartAdbServer(self):
"""Start adb server."""
adb_cmd = ['taskset', '-c', '0', constants.GetAdbPath(), 'start-server']
ret, _ = cmd_helper.GetCmdStatusAndOutput(adb_cmd)
retry = 0
while retry < 3:
ret, _ = cmd_helper.GetCmdStatusAndOutput(['pgrep', 'adb'])
if ret == 0:
# pgrep found adb, start-server succeeded.
# Waiting for device to reconnect before returning success.
self._adb.SendCommand('wait-for-device')
return 0
retry += 1
time.sleep(retry)
return ret
def WaitForSystemBootCompleted(self, wait_time):
"""Waits for targeted system's boot_completed flag to be set.
Args:
wait_time: time in seconds to wait
Raises:
WaitForResponseTimedOutError if wait_time elapses and flag still not
set.
"""
logging.info('Waiting for system boot completed...')
self._adb.SendCommand('wait-for-device')
# Now the device is there, but system not boot completed.
# Query the sys.boot_completed flag with a basic command
boot_completed = False
attempts = 0
wait_period = 5
while not boot_completed and (attempts * wait_period) < wait_time:
output = self.system_properties['sys.boot_completed']
output = output.strip()
if output == '1':
boot_completed = True
else:
# If 'error: xxx' returned when querying the flag, it means
# adb server lost the connection to the emulator, so restart the adb
# server.
if 'error:' in output:
self.RestartAdbServer()
time.sleep(wait_period)
attempts += 1
if not boot_completed:
raise errors.WaitForResponseTimedOutError(
'sys.boot_completed flag was not set after %s seconds' % wait_time)
def WaitForSdCardReady(self, timeout_time):
"""Wait for the SD card ready before pushing data into it."""
logging.info('Waiting for SD card ready...')
sdcard_ready = False
attempts = 0
wait_period = 5
external_storage = self.GetExternalStorage()
while not sdcard_ready and attempts * wait_period < timeout_time:
output = self.RunShellCommand('ls ' + external_storage)
if output:
sdcard_ready = True
else:
time.sleep(wait_period)
attempts += 1
if not sdcard_ready:
raise errors.WaitForResponseTimedOutError(
'SD card not ready after %s seconds' % timeout_time)
def GetAndroidToolStatusAndOutput(self, command, lib_path=None, *args, **kw):
"""Runs a native Android binary, wrapping the command as necessary.
This is a specialization of GetShellCommandStatusAndOutput, which is meant
for running tools/android/ binaries and handle properly: (1) setting the
lib path (for component=shared_library), (2) using the PIE wrapper on ICS.
See crbug.com/373219 for more context.
Args:
command: String containing the command to send.
lib_path: (optional) path to the folder containing the dependent libs.
Same other arguments of GetCmdStatusAndOutput.
"""
# The first time this command is run the device is inspected to check
# whether a wrapper for running PIE executable is needed (only Android ICS)
# or not. The results is cached, so the wrapper is pushed only once.
if self._pie_wrapper is None:
# None: did not check; '': did check and not needed; '/path': use /path.
self._pie_wrapper = ''
if self.GetBuildId().startswith('I'): # Ixxxx = Android ICS.
run_pie_dist_path = os.path.join(constants.GetOutDirectory(), 'run_pie')
assert os.path.exists(run_pie_dist_path), 'Please build run_pie'
# The PIE loader must be pushed manually (i.e. no PushIfNeeded) because
# PushIfNeeded requires md5sum and md5sum requires the wrapper as well.
adb_command = 'push %s %s' % (run_pie_dist_path, PIE_WRAPPER_PATH)
assert _HasAdbPushSucceeded(self._adb.SendCommand(adb_command))
self._pie_wrapper = PIE_WRAPPER_PATH
if self._pie_wrapper:
command = '%s %s' % (self._pie_wrapper, command)
if lib_path:
command = 'LD_LIBRARY_PATH=%s %s' % (lib_path, command)
return self.GetShellCommandStatusAndOutput(command, *args, **kw)
# It is tempting to turn this function into a generator, however this is not
# possible without using a private (local) adb_shell instance (to ensure no
# other command interleaves usage of it), which would defeat the main aim of
# being able to reuse the adb shell instance across commands.
def RunShellCommand(self, command, timeout_time=20, log_result=False):
"""Send a command to the adb shell and return the result.
Args:
command: String containing the shell command to send.
timeout_time: Number of seconds to wait for command to respond before
retrying, used by AdbInterface.SendShellCommand.
log_result: Boolean to indicate whether we should log the result of the
shell command.
Returns:
list containing the lines of output received from running the command
"""
self._LogShell(command)
if "'" in command:
command = command.replace('\'', '\'\\\'\'')
result = self._adb.SendShellCommand(
"'%s'" % command, timeout_time).splitlines()
# TODO(b.kelemen): we should really be able to drop the stderr of the
# command or raise an exception based on what the caller wants.
result = [ l for l in result if not l.startswith('WARNING') ]
if ['error: device not found'] == result:
raise errors.DeviceUnresponsiveError('device not found')
if log_result:
self._LogShell('\n'.join(result))
return result
def GetShellCommandStatusAndOutput(self, command, timeout_time=20,
log_result=False):
"""See RunShellCommand() above.
Returns:
The tuple (exit code, list of output lines).
"""
lines = self.RunShellCommand(
command + '; echo %$?', timeout_time, log_result)
last_line = lines[-1]
status_pos = last_line.rfind('%')
assert status_pos >= 0
status = int(last_line[status_pos + 1:])
if status_pos == 0:
lines = lines[:-1]
else:
lines = lines[:-1] + [last_line[:status_pos]]
return (status, lines)
def KillAll(self, process, signum=9, with_su=False):
"""Android version of killall, connected via adb.
Args:
process: name of the process to kill off.
signum: signal to use, 9 (SIGKILL) by default.
with_su: wether or not to use su to kill the processes.
Returns:
the number of processes killed
"""
pids = self.ExtractPid(process)
if pids:
cmd = 'kill -%d %s' % (signum, ' '.join(pids))
if with_su:
self.RunShellCommandWithSU(cmd)
else:
self.RunShellCommand(cmd)
return len(pids)
def KillAllBlocking(self, process, timeout_sec, signum=9, with_su=False):
"""Blocking version of killall, connected via adb.
This waits until no process matching the corresponding name appears in ps'
output anymore.
Args:
process: name of the process to kill off
timeout_sec: the timeout in seconds
signum: same as |KillAll|
with_su: same as |KillAll|
Returns:
the number of processes killed
"""
processes_killed = self.KillAll(process, signum=signum, with_su=with_su)
if processes_killed:
elapsed = 0
wait_period = 0.1
# Note that this doesn't take into account the time spent in ExtractPid().
while self.ExtractPid(process) and elapsed < timeout_sec:
time.sleep(wait_period)
elapsed += wait_period
if elapsed >= timeout_sec:
return processes_killed - self.ExtractPid(process)
return processes_killed
@staticmethod
def _GetActivityCommand(package, activity, wait_for_completion, action,
category, data, extras, trace_file_name, force_stop,
flags):
"""Creates command to start |package|'s activity on the device.
Args - as for StartActivity
Returns:
the command to run on the target to start the activity
"""
cmd = 'am start -a %s' % action
if force_stop:
cmd += ' -S'
if wait_for_completion:
cmd += ' -W'
if category:
cmd += ' -c %s' % category
if package and activity:
cmd += ' -n %s/%s' % (package, activity)
if data:
cmd += ' -d "%s"' % data
if extras:
for key in extras:
value = extras[key]
if isinstance(value, str):
cmd += ' --es'
elif isinstance(value, bool):
cmd += ' --ez'
elif isinstance(value, int):
cmd += ' --ei'
else:
raise NotImplementedError(
'Need to teach StartActivity how to pass %s extras' % type(value))
cmd += ' %s %s' % (key, value)
if trace_file_name:
cmd += ' --start-profiler ' + trace_file_name
if flags:
cmd += ' -f %s' % flags
return cmd
def StartActivity(self, package, activity, wait_for_completion=False,
action='android.intent.action.VIEW',
category=None, data=None,
extras=None, trace_file_name=None,
force_stop=False, flags=None):
"""Starts |package|'s activity on the device.
Args:
package: Name of package to start (e.g. 'com.google.android.apps.chrome').
activity: Name of activity (e.g. '.Main' or
'com.google.android.apps.chrome.Main').
wait_for_completion: wait for the activity to finish launching (-W flag).
action: string (e.g. "android.intent.action.MAIN"). Default is VIEW.
category: string (e.g. "android.intent.category.HOME")
data: Data string to pass to activity (e.g. 'http://www.example.com/').
extras: Dict of extras to pass to activity. Values are significant.
trace_file_name: If used, turns on and saves the trace to this file name.
force_stop: force stop the target app before starting the activity (-S
flag).
Returns:
The output of the underlying command as a list of lines.
"""
cmd = self._GetActivityCommand(package, activity, wait_for_completion,
action, category, data, extras,
trace_file_name, force_stop, flags)
return self.RunShellCommand(cmd)
def StartActivityTimed(self, package, activity, wait_for_completion=False,
action='android.intent.action.VIEW',
category=None, data=None,
extras=None, trace_file_name=None,
force_stop=False, flags=None):
"""Starts |package|'s activity on the device, returning the start time
Args - as for StartActivity
Returns:
A tuple containing:
- the output of the underlying command as a list of lines, and
- a timestamp string for the time at which the activity started
"""
cmd = self._GetActivityCommand(package, activity, wait_for_completion,
action, category, data, extras,
trace_file_name, force_stop, flags)
self.StartMonitoringLogcat()
out = self.RunShellCommand('log starting activity; ' + cmd)
activity_started_re = re.compile('.*starting activity.*')
m = self.WaitForLogMatch(activity_started_re, None)
assert m
start_line = m.group(0)
return (out, GetLogTimestamp(start_line, self.GetDeviceYear()))
def StartCrashUploadService(self, package):
# TODO(frankf): We really need a python wrapper around Intent
# to be shared with StartActivity/BroadcastIntent.
cmd = (
'am startservice -a %s.crash.ACTION_FIND_ALL -n '
'%s/%s.crash.MinidumpUploadService' %
(constants.PACKAGE_INFO['chrome'].package,
package,
constants.PACKAGE_INFO['chrome'].package))
am_output = self.RunShellCommandWithSU(cmd)
assert am_output and 'Starting' in am_output[-1], (
'Service failed to start: %s' % am_output)
time.sleep(15)
def BroadcastIntent(self, package, intent, *args):
"""Send a broadcast intent.
Args:
package: Name of package containing the intent.
intent: Name of the intent.
args: Optional extra arguments for the intent.
"""
cmd = 'am broadcast -a %s.%s %s' % (package, intent, ' '.join(args))
self.RunShellCommand(cmd)
def GoHome(self):
"""Tell the device to return to the home screen. Blocks until completion."""
self.RunShellCommand('am start -W '
'-a android.intent.action.MAIN -c android.intent.category.HOME')
def CloseApplication(self, package):
"""Attempt to close down the application, using increasing violence.
Args:
package: Name of the process to kill off, e.g.
com.google.android.apps.chrome
"""
self.RunShellCommand('am force-stop ' + package)
def GetApplicationPath(self, package):
"""Get the installed apk path on the device for the given package.
Args:
package: Name of the package.
Returns:
Path to the apk on the device if it exists, None otherwise.
"""
pm_path_output = self.RunShellCommand('pm path ' + package)
# The path output contains anything if and only if the package
# exists.
if pm_path_output:
# pm_path_output is of the form: "package:/path/to/foo.apk"
return pm_path_output[0].split(':')[1]
else:
return None
def ClearApplicationState(self, package):
"""Closes and clears all state for the given |package|."""
# Check that the package exists before clearing it. Necessary because
# calling pm clear on a package that doesn't exist may never return.
pm_path_output = self.RunShellCommand('pm path ' + package)
# The path output only contains anything if and only if the package exists.
if pm_path_output:
self.RunShellCommand('pm clear ' + package)
def SendKeyEvent(self, keycode):
"""Sends keycode to the device.
Args:
keycode: Numeric keycode to send (see "enum" at top of file).
"""
self.RunShellCommand('input keyevent %d' % keycode)
def _RunMd5Sum(self, host_path, device_path):
"""Gets the md5sum of a host path and device path.
Args:
host_path: Path (file or directory) on the host.
device_path: Path on the device.
Returns:
A tuple containing lists of the host and device md5sum results as
created by _ParseMd5SumOutput().
"""
md5sum_dist_path = os.path.join(constants.GetOutDirectory(),
'md5sum_dist')
assert os.path.exists(md5sum_dist_path), 'Please build md5sum.'
md5sum_dist_mtime = os.stat(md5sum_dist_path).st_mtime
if (md5sum_dist_path not in self._push_if_needed_cache or
self._push_if_needed_cache[md5sum_dist_path] != md5sum_dist_mtime):
command = 'push %s %s' % (md5sum_dist_path, MD5SUM_DEVICE_FOLDER)
assert _HasAdbPushSucceeded(self._adb.SendCommand(command))
self._push_if_needed_cache[md5sum_dist_path] = md5sum_dist_mtime
(_, md5_device_output) = self.GetAndroidToolStatusAndOutput(
self._util_wrapper + ' ' + MD5SUM_DEVICE_PATH + ' ' + device_path,
lib_path=MD5SUM_DEVICE_FOLDER,
timeout_time=2 * 60)
device_hash_tuples = _ParseMd5SumOutput(md5_device_output)
assert os.path.exists(host_path), 'Local path not found %s' % host_path
md5sum_output = cmd_helper.GetCmdOutput(
[os.path.join(constants.GetOutDirectory(), 'md5sum_bin_host'),
host_path])
host_hash_tuples = _ParseMd5SumOutput(md5sum_output.splitlines())
return (host_hash_tuples, device_hash_tuples)
def GetFilesChanged(self, host_path, device_path, ignore_filenames=False):
"""Compares the md5sum of a host path against a device path.
Note: Ignores extra files on the device.
Args:
host_path: Path (file or directory) on the host.
device_path: Path on the device.
ignore_filenames: If True only the file contents are considered when
checking whether a file has changed, otherwise the relative path
must also match.
Returns:
A list of tuples of the form (host_path, device_path) for files whose
md5sums do not match.
"""
# Md5Sum resolves symbolic links in path names so the calculation of
# relative path names from its output will need the real path names of the
# base directories. Having calculated these they are used throughout the
# function since this makes us less subject to any future changes to Md5Sum.
real_host_path = os.path.realpath(host_path)
real_device_path = self.RunShellCommand('realpath "%s"' % device_path)[0]
host_hash_tuples, device_hash_tuples = self._RunMd5Sum(
real_host_path, real_device_path)
if len(host_hash_tuples) > len(device_hash_tuples):
logging.info('%s files do not exist on the device' %
(len(host_hash_tuples) - len(device_hash_tuples)))
host_rel = [(os.path.relpath(os.path.normpath(t.path), real_host_path),
t.hash)
for t in host_hash_tuples]
if os.path.isdir(real_host_path):
def RelToRealPaths(rel_path):
return (os.path.join(real_host_path, rel_path),
os.path.join(real_device_path, rel_path))
else:
assert len(host_rel) == 1
def RelToRealPaths(_):
return (real_host_path, real_device_path)
if ignore_filenames:
# If we are ignoring file names, then we want to push any file for which
# a file with an equivalent MD5 sum does not exist on the device.
device_hashes = set([h.hash for h in device_hash_tuples])
ShouldPush = lambda p, h: h not in device_hashes
else:
# Otherwise, we want to push any file on the host for which a file with
# an equivalent MD5 sum does not exist at the same relative path on the
# device.
device_rel = dict([(os.path.relpath(os.path.normpath(t.path),
real_device_path),
t.hash)
for t in device_hash_tuples])
ShouldPush = lambda p, h: p not in device_rel or h != device_rel[p]
return [RelToRealPaths(path) for path, host_hash in host_rel
if ShouldPush(path, host_hash)]
def PushIfNeeded(self, host_path, device_path):
"""Pushes |host_path| to |device_path|.
Works for files and directories. This method skips copying any paths in
|test_data_paths| that already exist on the device with the same hash.
All pushed files can be removed by calling RemovePushedFiles().
"""
MAX_INDIVIDUAL_PUSHES = 50
if not os.path.exists(host_path):
raise device_errors.CommandFailedError(
'Local path not found %s' % host_path, device=str(self))
# See if the file on the host changed since the last push (if any) and
# return early if it didn't. Note that this shortcut assumes that the tests
# on the device don't modify the files.
if not os.path.isdir(host_path):
if host_path in self._push_if_needed_cache:
host_path_mtime = self._push_if_needed_cache[host_path]
if host_path_mtime == os.stat(host_path).st_mtime:
return
size = host_utils.GetRecursiveDiskUsage(host_path)
self._pushed_files.append(device_path)
self._potential_push_size += size
if os.path.isdir(host_path):
self.RunShellCommand('mkdir -p "%s"' % device_path)
changed_files = self.GetFilesChanged(host_path, device_path)
logging.info('Found %d files that need to be pushed to %s',
len(changed_files), device_path)
if not changed_files:
return
def Push(host, device):
# NOTE: We can't use adb_interface.Push() because it hardcodes a timeout
# of 60 seconds which isn't sufficient for a lot of users of this method.
push_command = 'push %s %s' % (host, device)
self._LogShell(push_command)
# Retry push with increasing backoff if the device is busy.
retry = 0
while True:
output = self._adb.SendCommand(push_command, timeout_time=30 * 60)
if _HasAdbPushSucceeded(output):
if not os.path.isdir(host_path):
self._push_if_needed_cache[host] = os.stat(host).st_mtime
return
if retry < 3:
retry += 1
wait_time = 5 * retry
logging.error('Push failed, retrying in %d seconds: %s' %
(wait_time, output))
time.sleep(wait_time)
else:
raise Exception('Push failed: %s' % output)
diff_size = 0
if len(changed_files) <= MAX_INDIVIDUAL_PUSHES:
diff_size = sum(host_utils.GetRecursiveDiskUsage(f[0])
for f in changed_files)
# TODO(craigdh): Replace this educated guess with a heuristic that
# approximates the push time for each method.
if len(changed_files) > MAX_INDIVIDUAL_PUSHES or diff_size > 0.5 * size:
self._actual_push_size += size
Push(host_path, device_path)
else:
for f in changed_files:
Push(f[0], f[1])
self._actual_push_size += diff_size
def GetPushSizeInfo(self):
"""Get total size of pushes to the device done via PushIfNeeded()
Returns:
A tuple:
1. Total size of push requests to PushIfNeeded (MB)
2. Total size that was actually pushed (MB)
"""
return (self._potential_push_size, self._actual_push_size)
def GetFileContents(self, filename, log_result=False):
"""Gets contents from the file specified by |filename|."""
return self.RunShellCommand('cat "%s" 2>/dev/null' % filename,
log_result=log_result)
def SetFileContents(self, filename, contents):
"""Writes |contents| to the file specified by |filename|."""
with tempfile.NamedTemporaryFile() as f:
f.write(contents)
f.flush()
self._adb.Push(f.name, filename)
def RunShellCommandWithSU(self, command, timeout_time=20, log_result=False):
return self.RunShellCommand('su -c %s' % command, timeout_time, log_result)
def CanAccessProtectedFileContents(self):
"""Returns True if Get/SetProtectedFileContents would work via "su" or adb
shell running as root.
Devices running user builds don't have adb root, but may provide "su" which
can be used for accessing protected files.
"""
return (self._GetProtectedFileCommandRunner() != None)
def _GetProtectedFileCommandRunner(self):
"""Finds the best method to access protected files on the device.
Returns:
1. None when privileged files cannot be accessed on the device.
2. Otherwise: A function taking a single parameter: a string with command
line arguments. Running that function executes the command with
the appropriate method.
"""
if self._protected_file_access_method_initialized:
return self._privileged_command_runner
self._privileged_command_runner = None
self._protected_file_access_method_initialized = True
for cmd in [self.RunShellCommand, self.RunShellCommandWithSU]:
# Get contents of the auxv vector for the init(8) process from a small
# binary file that always exists on linux and is always read-protected.
contents = cmd('cat /proc/1/auxv')
# The leading 4 or 8-bytes of auxv vector is a_type. There are not many
# reserved a_type values, hence byte 2 must always be '\0' for a realistic
# auxv. See /usr/include/elf.h.
if len(contents) > 0 and (contents[0][2] == '\0'):
self._privileged_command_runner = cmd
break
return self._privileged_command_runner
def GetProtectedFileContents(self, filename):
"""Gets contents from the protected file specified by |filename|.
This is potentially less efficient than GetFileContents.
"""
command = 'cat "%s" 2> /dev/null' % filename
command_runner = self._GetProtectedFileCommandRunner()
if command_runner:
return command_runner(command)
else:
logging.warning('Could not access protected file: %s' % filename)
return []
def SetProtectedFileContents(self, filename, contents):
"""Writes |contents| to the protected file specified by |filename|.
This is less efficient than SetFileContents.
"""
with DeviceTempFile(self) as temp_file:
with DeviceTempFile(self, suffix=".sh") as temp_script:
# Put the contents in a temporary file
self.SetFileContents(temp_file.name, contents)
# Create a script to copy the file contents to its final destination
self.SetFileContents(temp_script.name,
'cat %s > %s' % (temp_file.name, filename))
command = 'sh %s' % temp_script.name
command_runner = self._GetProtectedFileCommandRunner()
if command_runner:
return command_runner(command)
else:
logging.warning(
'Could not set contents of protected file: %s' % filename)
def RemovePushedFiles(self):
"""Removes all files pushed with PushIfNeeded() from the device."""
for p in self._pushed_files:
self.RunShellCommand('rm -r %s' % p, timeout_time=2 * 60)
def ListPathContents(self, path):
"""Lists files in all subdirectories of |path|.
Args:
path: The path to list.
Returns:
A dict of {"name": (size, lastmod), ...}.
"""
# Example output:
# /foo/bar:
# -rw-r----- user group 102 2011-05-12 12:29:54.131623387 +0100 baz.txt
re_file = re.compile('^-(?P<perms>[^\s]+)\s+'
'(?P<user>[^\s]+)\s+'
'(?P<group>[^\s]+)\s+'
'(?P<size>[^\s]+)\s+'
'(?P<date>[^\s]+)\s+'
'(?P<time>[^\s]+)\s+'
'(?P<filename>[^\s]+)$')
return _GetFilesFromRecursiveLsOutput(
path, self.RunShellCommand('ls -lR %s' % path), re_file,
self.GetUtcOffset())
def GetUtcOffset(self):
if not self._device_utc_offset:
self._device_utc_offset = self.RunShellCommand('date +%z')[0]
return self._device_utc_offset
def SetJavaAssertsEnabled(self, enable):
"""Sets or removes the device java assertions property.
Args:
enable: If True the property will be set.
Returns:
True if the file was modified (reboot is required for it to take effect).
"""
# First ensure the desired property is persisted.
temp_props_file = tempfile.NamedTemporaryFile()
properties = ''
if self._adb.Pull(LOCAL_PROPERTIES_PATH, temp_props_file.name):
with open(temp_props_file.name) as f:
properties = f.read()
re_search = re.compile(r'^\s*' + re.escape(JAVA_ASSERT_PROPERTY) +
r'\s*=\s*all\s*$', re.MULTILINE)
if enable != bool(re.search(re_search, properties)):
re_replace = re.compile(r'^\s*' + re.escape(JAVA_ASSERT_PROPERTY) +
r'\s*=\s*\w+\s*$', re.MULTILINE)
properties = re.sub(re_replace, '', properties)
if enable:
properties += '\n%s=all\n' % JAVA_ASSERT_PROPERTY
file(temp_props_file.name, 'w').write(properties)
self._adb.Push(temp_props_file.name, LOCAL_PROPERTIES_PATH)
# Next, check the current runtime value is what we need, and
# if not, set it and report that a reboot is required.
was_set = 'all' in self.system_properties[JAVA_ASSERT_PROPERTY]
if was_set == enable:
return False
self.system_properties[JAVA_ASSERT_PROPERTY] = enable and 'all' or ''
return True
def GetBuildId(self):
"""Returns the build ID of the system (e.g. JRM79C)."""
build_id = self.system_properties['ro.build.id']
assert build_id
return build_id
def GetBuildType(self):
"""Returns the build type of the system (e.g. eng)."""
build_type = self.system_properties['ro.build.type']
assert build_type
return build_type
def GetBuildProduct(self):
"""Returns the build product of the device (e.g. maguro)."""
build_product = self.system_properties['ro.build.product']
assert build_product
return build_product
def GetProductName(self):
"""Returns the product name of the device (e.g. takju)."""
name = self.system_properties['ro.product.name']
assert name
return name
def GetBuildFingerprint(self):
"""Returns the build fingerprint of the device."""
build_fingerprint = self.system_properties['ro.build.fingerprint']
assert build_fingerprint
return build_fingerprint
def GetDescription(self):
"""Returns the description of the system.
For example, "yakju-userdebug 4.1 JRN54F 364167 dev-keys".
"""
description = self.system_properties['ro.build.description']
assert description
return description
def GetProductModel(self):
"""Returns the name of the product model (e.g. "Galaxy Nexus") """
model = self.system_properties['ro.product.model']
assert model
return model
def GetWifiIP(self):
"""Returns the wifi IP on the device."""
wifi_ip = self.system_properties['dhcp.wlan0.ipaddress']
# Do not assert here. Devices (e.g. emulators) may not have a WifiIP.
return wifi_ip
def GetSubscriberInfo(self):
"""Returns the device subscriber info (e.g. GSM and device ID) as string."""
iphone_sub = self.RunShellCommand('dumpsys iphonesubinfo')
# Do not assert here. Devices (e.g. Nakasi on K) may not have iphonesubinfo.
return '\n'.join(iphone_sub)
def GetBatteryInfo(self):
"""Returns a {str: str} dict of battery info (e.g. status, level, etc)."""
battery = self.RunShellCommand('dumpsys battery')
assert battery
battery_info = {}
for line in battery[1:]:
k, _, v = line.partition(': ')
battery_info[k.strip()] = v.strip()
return battery_info
def GetSetupWizardStatus(self):
"""Returns the status of the device setup wizard (e.g. DISABLED)."""
status = self.system_properties['ro.setupwizard.mode']
# On some devices, the status is empty if not otherwise set. In such cases
# the caller should expect an empty string to be returned.
return status
def StartMonitoringLogcat(self, clear=True, logfile=None, filters=None):
"""Starts monitoring the output of logcat, for use with WaitForLogMatch.
Args:
clear: If True the existing logcat output will be cleared, to avoiding
matching historical output lurking in the log.
filters: A list of logcat filters to be used.
"""
if clear:
self.RunShellCommand('logcat -c')
args = []
if self._adb._target_arg:
args += shlex.split(self._adb._target_arg)
args += ['logcat', '-v', 'threadtime']
if filters:
args.extend(filters)
else:
args.append('*:v')
if logfile:
logfile = NewLineNormalizer(logfile)
# Spawn logcat and synchronize with it.
for _ in range(4):
self._logcat = pexpect.spawn(constants.GetAdbPath(), args, timeout=10,
logfile=logfile)
if not clear or self.SyncLogCat():
break
self._logcat.close(force=True)
else:
logging.critical('Error reading from logcat: ' + str(self._logcat.match))
sys.exit(1)
def SyncLogCat(self):
"""Synchronize with logcat.
Synchronize with the monitored logcat so that WaitForLogMatch will only
consider new message that are received after this point in time.
Returns:
True if the synchronization succeeded.
"""
assert self._logcat
tag = 'logcat_sync_%s' % time.time()
self.RunShellCommand('log ' + tag)
return self._logcat.expect([tag, pexpect.EOF, pexpect.TIMEOUT]) == 0
def GetMonitoredLogCat(self):
"""Returns an "adb logcat" command as created by pexpected.spawn."""
if not self._logcat:
self.StartMonitoringLogcat(clear=False)
return self._logcat
def WaitForLogMatch(self, success_re, error_re, clear=False, timeout=10):
"""Blocks until a matching line is logged or a timeout occurs.
Args:
success_re: A compiled re to search each line for.
error_re: A compiled re which, if found, terminates the search for
|success_re|. If None is given, no error condition will be detected.
clear: If True the existing logcat output will be cleared, defaults to
false.
timeout: Timeout in seconds to wait for a log match.
Raises:
pexpect.TIMEOUT after |timeout| seconds without a match for |success_re|
or |error_re|.
Returns:
The re match object if |success_re| is matched first or None if |error_re|
is matched first.
"""
logging.info('<<< Waiting for logcat:' + str(success_re.pattern))
t0 = time.time()
while True:
if not self._logcat:
self.StartMonitoringLogcat(clear)
try:
while True:
# Note this will block for upto the timeout _per log line_, so we need
# to calculate the overall timeout remaining since t0.
time_remaining = t0 + timeout - time.time()
if time_remaining < 0:
raise pexpect.TIMEOUT(self._logcat)
self._logcat.expect(PEXPECT_LINE_RE, timeout=time_remaining)
line = self._logcat.match.group(1)
if error_re:
error_match = error_re.search(line)
if error_match:
return None
success_match = success_re.search(line)
if success_match:
return success_match
logging.info('<<< Skipped Logcat Line:' + str(line))
except pexpect.TIMEOUT:
raise pexpect.TIMEOUT(
'Timeout (%ds) exceeded waiting for pattern "%s" (tip: use -vv '
'to debug)' %
(timeout, success_re.pattern))
except pexpect.EOF:
# It seems that sometimes logcat can end unexpectedly. This seems
# to happen during Chrome startup after a reboot followed by a cache
# clean. I don't understand why this happens, but this code deals with
# getting EOF in logcat.
logging.critical('Found EOF in adb logcat. Restarting...')
# Rerun spawn with original arguments. Note that self._logcat.args[0] is
# the path of adb, so we don't want it in the arguments.
self._logcat = pexpect.spawn(constants.GetAdbPath(),
self._logcat.args[1:],
timeout=self._logcat.timeout,
logfile=self._logcat.logfile)
def StartRecordingLogcat(self, clear=True, filters=None):
"""Starts recording logcat output to eventually be saved as a string.
This call should come before some series of tests are run, with either
StopRecordingLogcat or SearchLogcatRecord following the tests.
Args:
clear: True if existing log output should be cleared.
filters: A list of logcat filters to be used.
"""
if not filters:
filters = ['*:v']
if clear:
self._adb.SendCommand('logcat -c')
logcat_command = 'adb %s logcat -v threadtime %s' % (self._adb._target_arg,
' '.join(filters))
self._logcat_tmpoutfile = tempfile.NamedTemporaryFile(bufsize=0)
self.logcat_process = subprocess.Popen(logcat_command, shell=True,
stdout=self._logcat_tmpoutfile)
def GetCurrentRecordedLogcat(self):
"""Return the current content of the logcat being recorded.
Call this after StartRecordingLogcat() and before StopRecordingLogcat().
This can be useful to perform timed polling/parsing.
Returns:
Current logcat output as a single string, or None if
StopRecordingLogcat() was already called.
"""
if not self._logcat_tmpoutfile:
return None
with open(self._logcat_tmpoutfile.name) as f:
return f.read()
def StopRecordingLogcat(self):
"""Stops an existing logcat recording subprocess and returns output.
Returns:
The logcat output as a string or an empty string if logcat was not
being recorded at the time.
"""
if not self.logcat_process:
return ''
# Cannot evaluate directly as 0 is a possible value.
# Better to read the self.logcat_process.stdout before killing it,
# Otherwise the communicate may return incomplete output due to pipe break.
if self.logcat_process.poll() is None:
self.logcat_process.kill()
self.logcat_process.wait()
self.logcat_process = None
self._logcat_tmpoutfile.seek(0)
output = self._logcat_tmpoutfile.read()
self._logcat_tmpoutfile.close()
self._logcat_tmpoutfile = None
return output
@staticmethod
def SearchLogcatRecord(record, message, thread_id=None, proc_id=None,
log_level=None, component=None):
"""Searches the specified logcat output and returns results.
This method searches through the logcat output specified by record for a
certain message, narrowing results by matching them against any other
specified criteria. It returns all matching lines as described below.
Args:
record: A string generated by Start/StopRecordingLogcat to search.
message: An output string to search for.
thread_id: The thread id that is the origin of the message.
proc_id: The process that is the origin of the message.
log_level: The log level of the message.
component: The name of the component that would create the message.
Returns:
A list of dictionaries represeting matching entries, each containing keys
thread_id, proc_id, log_level, component, and message.
"""
if thread_id:
thread_id = str(thread_id)
if proc_id:
proc_id = str(proc_id)
results = []
reg = re.compile('(\d+)\s+(\d+)\s+([A-Z])\s+([A-Za-z]+)\s*:(.*)$',
re.MULTILINE)
log_list = reg.findall(record)
for (tid, pid, log_lev, comp, msg) in log_list:
if ((not thread_id or thread_id == tid) and
(not proc_id or proc_id == pid) and
(not log_level or log_level == log_lev) and
(not component or component == comp) and msg.find(message) > -1):
match = dict({'thread_id': tid, 'proc_id': pid,
'log_level': log_lev, 'component': comp,
'message': msg})
results.append(match)
return results
def ExtractPid(self, process_name):
"""Extracts Process Ids for a given process name from Android Shell.
Args:
process_name: name of the process on the device.
Returns:
List of all the process ids (as strings) that match the given name.
If the name of a process exactly matches the given name, the pid of
that process will be inserted to the front of the pid list.
"""
pids = []
for line in self.RunShellCommand('ps', log_result=False):
data = line.split()
try:
if process_name in data[-1]: # name is in the last column
if process_name == data[-1]:
pids.insert(0, data[1]) # PID is in the second column
else:
pids.append(data[1])
except IndexError:
pass
return pids
def GetIoStats(self):
"""Gets cumulative disk IO stats since boot (for all processes).
Returns:
Dict of {num_reads, num_writes, read_ms, write_ms} or None if there
was an error.
"""
IoStats = collections.namedtuple(
'IoStats',
['device',
'num_reads_issued',
'num_reads_merged',
'num_sectors_read',
'ms_spent_reading',
'num_writes_completed',
'num_writes_merged',
'num_sectors_written',
'ms_spent_writing',
'num_ios_in_progress',
'ms_spent_doing_io',
'ms_spent_doing_io_weighted',
])
for line in self.GetFileContents('/proc/diskstats', log_result=False):
fields = line.split()
stats = IoStats._make([fields[2]] + [int(f) for f in fields[3:]])
if stats.device == 'mmcblk0':
return {
'num_reads': stats.num_reads_issued,
'num_writes': stats.num_writes_completed,
'read_ms': stats.ms_spent_reading,
'write_ms': stats.ms_spent_writing,
}
logging.warning('Could not find disk IO stats.')
return None
def GetMemoryUsageForPid(self, pid):
"""Returns the memory usage for given pid.
Args:
pid: The pid number of the specific process running on device.
Returns:
Dict of {metric:usage_kb}, for the process which has specified pid.
The metric keys which may be included are: Size, Rss, Pss, Shared_Clean,
Shared_Dirty, Private_Clean, Private_Dirty, VmHWM.
"""
showmap = self.RunShellCommand('showmap %d' % pid)
if not showmap or not showmap[-1].endswith('TOTAL'):
logging.warning('Invalid output for showmap %s', str(showmap))
return {}
items = showmap[-1].split()
if len(items) != 9:
logging.warning('Invalid TOTAL for showmap %s', str(items))
return {}
usage_dict = collections.defaultdict(int)
usage_dict.update({
'Size': int(items[0].strip()),
'Rss': int(items[1].strip()),
'Pss': int(items[2].strip()),
'Shared_Clean': int(items[3].strip()),
'Shared_Dirty': int(items[4].strip()),
'Private_Clean': int(items[5].strip()),
'Private_Dirty': int(items[6].strip()),
})
peak_value_kb = 0
for line in self.GetProtectedFileContents('/proc/%s/status' % pid):
if not line.startswith('VmHWM:'): # Format: 'VmHWM: +[0-9]+ kB'
continue
peak_value_kb = int(line.split(':')[1].strip().split(' ')[0])
break
usage_dict['VmHWM'] = peak_value_kb
if not peak_value_kb:
logging.warning('Could not find memory peak value for pid ' + str(pid))
return usage_dict
def ProcessesUsingDevicePort(self, device_port):
"""Lists processes using the specified device port on loopback interface.
Args:
device_port: Port on device we want to check.
Returns:
A list of (pid, process_name) tuples using the specified port.
"""
tcp_results = self.RunShellCommand('cat /proc/net/tcp', log_result=False)
tcp_address = '0100007F:%04X' % device_port
pids = []
for single_connect in tcp_results:
connect_results = single_connect.split()
# Column 1 is the TCP port, and Column 9 is the inode of the socket
if connect_results[1] == tcp_address:
socket_inode = connect_results[9]
socket_name = 'socket:[%s]' % socket_inode
lsof_results = self.RunShellCommand('lsof', log_result=False)
for single_process in lsof_results:
process_results = single_process.split()
# Ignore the line if it has less than nine columns in it, which may
# be the case when a process stops while lsof is executing.
if len(process_results) <= 8:
continue
# Column 0 is the executable name
# Column 1 is the pid
# Column 8 is the Inode in use
if process_results[8] == socket_name:
pids.append((int(process_results[1]), process_results[0]))
break
logging.info('PidsUsingDevicePort: %s', pids)
return pids
def FileExistsOnDevice(self, file_name):
"""Checks whether the given file exists on the device.
Args:
file_name: Full path of file to check.
Returns:
True if the file exists, False otherwise.
"""
assert '"' not in file_name, 'file_name cannot contain double quotes'
try:
status = self._adb.SendShellCommand(
'\'test -e "%s"; echo $?\'' % (file_name))
if 'test: not found' not in status:
return int(status) == 0
status = self._adb.SendShellCommand(
'\'ls "%s" >/dev/null 2>&1; echo $?\'' % (file_name))
return int(status) == 0
except ValueError:
if IsDeviceAttached(self._device):
raise errors.DeviceUnresponsiveError('Device may be offline.')
return False
def IsFileWritableOnDevice(self, file_name):
"""Checks whether the given file (or directory) is writable on the device.
Args:
file_name: Full path of file/directory to check.
Returns:
True if writable, False otherwise.
"""
assert '"' not in file_name, 'file_name cannot contain double quotes'
try:
status = self._adb.SendShellCommand(
'\'test -w "%s"; echo $?\'' % (file_name))
if 'test: not found' not in status:
return int(status) == 0
raise errors.AbortError('"test" binary not found. OS too old.')
except ValueError:
if IsDeviceAttached(self._device):
raise errors.DeviceUnresponsiveError('Device may be offline.')
return False
@staticmethod
def GetTimestamp():
return time.strftime('%Y-%m-%d-%H%M%S', time.localtime())
@staticmethod
def EnsureHostDirectory(host_file):
host_dir = os.path.dirname(os.path.abspath(host_file))
if not os.path.exists(host_dir):
os.makedirs(host_dir)
def TakeScreenshot(self, host_file=None):
"""Saves a screenshot image to |host_file| on the host.
Args:
host_file: Absolute path to the image file to store on the host or None to
use an autogenerated file name.
Returns:
Resulting host file name of the screenshot.
"""
host_file = os.path.abspath(host_file or
'screenshot-%s.png' % self.GetTimestamp())
self.EnsureHostDirectory(host_file)
device_file = '%s/screenshot.png' % self.GetExternalStorage()
self.RunShellCommand(
'/system/bin/screencap -p %s' % device_file)
self.PullFileFromDevice(device_file, host_file)
self.RunShellCommand('rm -f "%s"' % device_file)
return host_file
def PullFileFromDevice(self, device_file, host_file):
"""Download |device_file| on the device from to |host_file| on the host.
Args:
device_file: Absolute path to the file to retrieve from the device.
host_file: Absolute path to the file to store on the host.
"""
if not self._adb.Pull(device_file, host_file):
raise device_errors.AdbCommandFailedError(
['pull', device_file, host_file], 'Failed to pull file from device.')
assert os.path.exists(host_file)
def SetUtilWrapper(self, util_wrapper):
"""Sets a wrapper prefix to be used when running a locally-built
binary on the device (ex.: md5sum_bin).
"""
self._util_wrapper = util_wrapper
def RunUIAutomatorTest(self, test, test_package, timeout):
"""Runs a single uiautomator test.
Args:
test: Test class/method.
test_package: Name of the test jar.
timeout: Timeout time in seconds.
Returns:
An instance of am_instrument_parser.TestResult object.
"""
cmd = 'uiautomator runtest %s -e class %s' % (test_package, test)
self._LogShell(cmd)
output = self._adb.SendShellCommand(cmd, timeout_time=timeout)
# uiautomator doesn't fully conform to the instrumenation test runner
# convention and doesn't terminate with INSTRUMENTATION_CODE.
# Just assume the first result is valid.
(test_results, _) = am_instrument_parser.ParseAmInstrumentOutput(output)
if not test_results:
raise errors.InstrumentationError(
'no test results... device setup correctly?')
return test_results[0]
def DismissCrashDialogIfNeeded(self):
"""Dismiss the error/ANR dialog if present.
Returns: Name of the crashed package if a dialog is focused,
None otherwise.
"""
re_focus = re.compile(
r'\s*mCurrentFocus.*Application (Error|Not Responding): (\S+)}')
def _FindFocusedWindow():
match = None
for line in self.RunShellCommand('dumpsys window windows'):
match = re.match(re_focus, line)
if match:
break
return match
match = _FindFocusedWindow()
if not match:
return
package = match.group(2)
logging.warning('Trying to dismiss %s dialog for %s' % match.groups())
self.SendKeyEvent(KEYCODE_DPAD_RIGHT)
self.SendKeyEvent(KEYCODE_DPAD_RIGHT)
self.SendKeyEvent(KEYCODE_ENTER)
match = _FindFocusedWindow()
if match:
logging.error('Still showing a %s dialog for %s' % match.groups())
return package
def EfficientDeviceDirectoryCopy(self, source, dest):
""" Copy a directory efficiently on the device
Uses a shell script running on the target to copy new and changed files the
source directory to the destination directory and remove added files. This
is in some cases much faster than cp -r.
Args:
source: absolute path of source directory
dest: absolute path of destination directory
"""
logging.info('In EfficientDeviceDirectoryCopy %s %s', source, dest)
with DeviceTempFile(self, suffix=".sh") as temp_script_file:
host_script_path = os.path.join(constants.DIR_SOURCE_ROOT,
'build',
'android',
'pylib',
'efficient_android_directory_copy.sh')
self._adb.Push(host_script_path, temp_script_file.name)
out = self.RunShellCommand(
'sh %s %s %s' % (temp_script_file.name, source, dest),
timeout_time=120)
if self._device:
device_repr = self._device[-4:]
else:
device_repr = '????'
for line in out:
logging.info('[%s]> %s', device_repr, line)
def _GetControlUsbChargingCommand(self):
if self._control_usb_charging_command['cached']:
return self._control_usb_charging_command['command']
self._control_usb_charging_command['cached'] = True
if not self.IsRootEnabled():
return None
for command in CONTROL_USB_CHARGING_COMMANDS:
# Assert command is valid.
assert 'disable_command' in command
assert 'enable_command' in command
assert 'witness_file' in command
witness_file = command['witness_file']
if self.FileExistsOnDevice(witness_file):
self._control_usb_charging_command['command'] = command
return command
return None
def CanControlUsbCharging(self):
return self._GetControlUsbChargingCommand() is not None
def DisableUsbCharging(self, timeout=10):
command = self._GetControlUsbChargingCommand()
if not command:
raise Exception('Unable to act on usb charging.')
disable_command = command['disable_command']
t0 = time.time()
# Do not loop directly on self.IsDeviceCharging to cut the number of calls
# to the device.
while True:
if t0 + timeout - time.time() < 0:
raise pexpect.TIMEOUT('Unable to disable USB charging in time: %s' % (
self.GetBatteryInfo()))
self.RunShellCommand(disable_command)
if not self.IsDeviceCharging():
break
def EnableUsbCharging(self, timeout=10):
command = self._GetControlUsbChargingCommand()
if not command:
raise Exception('Unable to act on usb charging.')
disable_command = command['enable_command']
t0 = time.time()
# Do not loop directly on self.IsDeviceCharging to cut the number of calls
# to the device.
while True:
if t0 + timeout - time.time() < 0:
raise pexpect.TIMEOUT('Unable to enable USB charging in time.')
self.RunShellCommand(disable_command)
if self.IsDeviceCharging():
break
def IsDeviceCharging(self):
for line in self.RunShellCommand('dumpsys battery'):
if 'powered: ' in line:
if line.split('powered: ')[1] == 'true':
return True
class NewLineNormalizer(object):
"""A file-like object to normalize EOLs to '\n'.
Pexpect runs adb within a pseudo-tty device (see
http://www.noah.org/wiki/pexpect), so any '\n' printed by adb is written
as '\r\n' to the logfile. Since adb already uses '\r\n' to terminate
lines, the log ends up having '\r\r\n' at the end of each line. This
filter replaces the above with a single '\n' in the data stream.
"""
def __init__(self, output):
self._output = output
def write(self, data):
data = data.replace('\r\r\n', '\n')
self._output.write(data)
def flush(self):
self._output.flush()
|
[] |
[] |
[
"USING_HIVE",
"ANDROID_SERIAL"
] |
[]
|
["USING_HIVE", "ANDROID_SERIAL"]
|
python
| 2 | 0 | |
test/bedrock_Azure_simple_test.go
|
package test
import (
"fmt"
"os"
"strings"
"testing"
"github.com/gruntwork-io/terratest/modules/k8s"
"github.com/gruntwork-io/terratest/modules/random"
"github.com/gruntwork-io/terratest/modules/terraform"
)
func TestIT_Bedrock_AzureSimple_Test(t *testing.T) {
t.Parallel()
// Generate a random cluster name to prevent a naming conflict
uniqueID := random.UniqueId()
k8sName := fmt.Sprintf("gTestk8s-%s", uniqueID)
addressSpace := "10.39.0.0/16"
clientid := os.Getenv("ARM_CLIENT_ID")
clientsecret := os.Getenv("ARM_CLIENT_SECRET")
dnsprefix := k8sName + "-dns"
k8sRG := k8sName + "-rg"
location := os.Getenv("DATACENTER_LOCATION")
publickey := os.Getenv("public_key")
sshkey := os.Getenv("ssh_key")
subnetName := k8sName + "-subnet"
subscriptionid := os.Getenv("ARM_SUBSCRIPTION_ID")
tenantid := os.Getenv("ARM_TENANT_ID")
vnetName := k8sName + "-vnet"
// Specify the test case folder and "-var" options
tfOptions := &terraform.Options{
TerraformDir: "../cluster/environments/azure-simple",
Upgrade: true,
Vars: map[string]interface{}{
"address_space": addressSpace,
"cluster_name": k8sName,
"dns_prefix": dnsprefix,
"gitops_ssh_url": "[email protected]:timfpark/fabrikate-cloud-native-manifests.git",
"gitops_ssh_key": sshkey,
"resource_group_name": k8sRG,
"resource_group_location": location,
"service_principal_id": clientid,
"service_principal_secret": clientsecret,
"ssh_public_key": publickey,
"subnet_name": subnetName,
"subnet_prefix": addressSpace,
"subscription_id": subscriptionid,
"tenant_id": tenantid,
"vnet_name": vnetName,
},
}
// Terraform init, apply, output, and destroy
defer terraform.Destroy(t, tfOptions)
terraform.InitAndApply(t, tfOptions)
// Obtain Kube_config file from module output
os.Setenv("KUBECONFIG", "../cluster/environments/azure-simple/output/bedrock_kube_config")
kubeConfig := os.Getenv("KUBECONFIG")
options := k8s.NewKubectlOptions("", kubeConfig)
//Test Case 1: Verify Flux namespace
fmt.Println("Test case 1: Verifying flux namespace")
_flux, fluxErr := k8s.RunKubectlAndGetOutputE(t, options, "get", "po", "--namespace=flux")
if fluxErr != nil || !strings.Contains(_flux, "flux") {
t.Fatal(fluxErr)
} else {
fmt.Println("Flux verification complete")
}
}
|
[
"\"ARM_CLIENT_ID\"",
"\"ARM_CLIENT_SECRET\"",
"\"DATACENTER_LOCATION\"",
"\"public_key\"",
"\"ssh_key\"",
"\"ARM_SUBSCRIPTION_ID\"",
"\"ARM_TENANT_ID\"",
"\"KUBECONFIG\""
] |
[] |
[
"DATACENTER_LOCATION",
"ARM_CLIENT_SECRET",
"public_key",
"ARM_TENANT_ID",
"ARM_SUBSCRIPTION_ID",
"ssh_key",
"KUBECONFIG",
"ARM_CLIENT_ID"
] |
[]
|
["DATACENTER_LOCATION", "ARM_CLIENT_SECRET", "public_key", "ARM_TENANT_ID", "ARM_SUBSCRIPTION_ID", "ssh_key", "KUBECONFIG", "ARM_CLIENT_ID"]
|
go
| 8 | 0 | |
src/command_modules/azure-cli-acs/azure/cli/command_modules/acs/custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import binascii
import datetime
import errno
import json
import os
import os.path
import platform
import random
import re
import ssl
import stat
import string
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
import yaml
import dateutil.parser
from dateutil.relativedelta import relativedelta
from knack.log import get_logger
from knack.util import CLIError
from msrestazure.azure_exceptions import CloudError
import requests
from azure.cli.command_modules.acs import acs_client, proxy
from azure.cli.command_modules.acs._params import regions_in_preview, regions_in_prod
from azure.cli.core.api import get_config_dir
from azure.cli.core._profile import Profile
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.graphrbac.models import (ApplicationCreateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters)
from azure.mgmt.containerservice.models import ContainerServiceLinuxProfile
from azure.mgmt.containerservice.models import ContainerServiceNetworkProfile
from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes
from azure.mgmt.containerservice.models import ContainerServiceServicePrincipalProfile
from azure.mgmt.containerservice.models import ContainerServiceSshConfiguration
from azure.mgmt.containerservice.models import ContainerServiceSshPublicKey
from azure.mgmt.containerservice.models import ContainerServiceStorageProfileTypes
from azure.mgmt.containerservice.models import ManagedCluster
from azure.mgmt.containerservice.models import ManagedClusterAADProfile
from azure.mgmt.containerservice.models import ManagedClusterAddonProfile
from azure.mgmt.containerservice.models import ManagedClusterAgentPoolProfile
from ._client_factory import cf_container_services
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import cf_resources
logger = get_logger(__name__)
# pylint:disable=too-many-lines,unused-argument
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def acs_browse(cmd, client, resource_group, name, disable_browser=False, ssh_key_file=None):
"""
Opens a browser to the web interface for the cluster orchestrator
:param name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: If set a path to an SSH key to use, only applies to DCOS
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group)
_acs_browse_internal(cmd, client, acs_info, resource_group, name, disable_browser, ssh_key_file)
def _acs_browse_internal(cmd, client, acs_info, resource_group, name, disable_browser, ssh_key_file):
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
if str(orchestrator_type).lower() == 'kubernetes' or \
orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes or \
(acs_info.custom_profile and acs_info.custom_profile.orchestrator == 'kubernetes'): # pylint: disable=no-member
return k8s_browse(cmd, client, name, resource_group, disable_browser, ssh_key_file=ssh_key_file)
elif str(orchestrator_type).lower() == 'dcos' or orchestrator_type == ContainerServiceOrchestratorTypes.dcos:
return _dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
else:
raise CLIError('Unsupported orchestrator type {} for browse'.format(orchestrator_type))
def k8s_browse(cmd, client, name, resource_group, disable_browser=False, ssh_key_file=None):
"""
Launch a proxy and browse the Kubernetes web UI.
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group)
_k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file)
def _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
browse_path = os.path.join(get_config_dir(), 'acsBrowseConfig.yaml')
if os.path.exists(browse_path):
os.remove(browse_path)
_k8s_get_credentials_internal(name, acs_info, browse_path, ssh_key_file, False)
logger.warning('Proxy running on 127.0.0.1:8001/ui')
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1:8001/ui')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy"])
def dcos_browse(cmd, client, name, resource_group, disable_browser=False, ssh_key_file=None):
"""
Creates an SSH tunnel to the Azure container service, and opens the Mesosphere DC/OS dashboard in the browser.
:param name: name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: Path to the SSH key to use
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group)
_dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
def _dcos_browse_internal(acs_info, disable_browser, ssh_key_file):
if not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
acs = acs_client.ACSClient()
if not acs.connect(_get_host_name(acs_info), _get_username(acs_info),
key_filename=ssh_key_file):
raise CLIError('Error connecting to ACS: {}'.format(_get_host_name(acs_info)))
octarine_bin = '/opt/mesosphere/bin/octarine'
if not acs.file_exists(octarine_bin):
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(octarine_bin))
proxy_id = _rand_str(16)
proxy_cmd = '{} {}'.format(octarine_bin, proxy_id)
acs.run(proxy_cmd, background=True)
# Parse the output to get the remote PORT
proxy_client_cmd = '{} --client --port {}'.format(octarine_bin, proxy_id)
stdout, _ = acs.run(proxy_client_cmd)
remote_port = int(stdout.read().decode().strip())
local_port = acs.get_available_local_port()
# Set the proxy
proxy.set_http_proxy('127.0.0.1', local_port)
logger.warning('Proxy running on 127.0.0.1:%s', local_port)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1')
try:
acs.create_tunnel(
remote_host='127.0.0.1',
remote_port=remote_port,
local_port=local_port)
finally:
proxy.disable_http_proxy()
return
def acs_install_cli(cmd, client, resource_group, name, install_location=None, client_version=None):
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group)
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
kwargs = {'install_location': install_location}
if client_version:
kwargs['client_version'] = client_version
if orchestrator_type == 'kubernetes':
return k8s_install_cli(**kwargs)
elif orchestrator_type == 'dcos':
return dcos_install_cli(**kwargs)
else:
raise CLIError('Unsupported orchestrator type {} for install-cli'.format(orchestrator_type))
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _urlretrieve(url, filename):
req = urlopen(url, context=_ssl_context())
with open(filename, "wb") as f:
f.write(req.read())
def dcos_install_cli(cmd, install_location=None, client_version='1.8'):
"""
Downloads the dcos command line from Mesosphere
"""
system = platform.system()
if not install_location:
raise CLIError(
"No install location specified and it could not be determined from the current platform '{}'".format(
system))
base_url = 'https://downloads.dcos.io/binaries/cli/{}/x86-64/dcos-{}/{}'
if system == 'Windows':
file_url = base_url.format('windows', client_version, 'dcos.exe')
elif system == 'Linux':
# TODO Support ARM CPU here
file_url = base_url.format('linux', client_version, 'dcos')
elif system == 'Darwin':
file_url = base_url.format('darwin', client_version, 'dcos')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to %s', install_location)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as err:
raise CLIError('Connection error while attempting to download client ({})'.format(err))
def k8s_install_cli(cmd, client_version='latest', install_location=None):
"""Install kubectl, a command-line interface for Kubernetes clusters."""
if client_version == 'latest':
context = _ssl_context()
version = urlopen('https://storage.googleapis.com/kubernetes-release/release/stable.txt',
context=context).read()
client_version = version.decode('UTF-8').strip()
else:
client_version = "v%s" % client_version
file_url = ''
system = platform.system()
base_url = 'https://storage.googleapis.com/kubernetes-release/release/{}/bin/{}/amd64/{}'
# ensure installation directory exists
install_dir, cli = os.path.dirname(install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
if system == 'Windows':
file_url = base_url.format(client_version, 'windows', 'kubectl.exe')
elif system == 'Linux':
# TODO: Support ARM CPU here
file_url = base_url.format(client_version, 'linux', 'kubectl')
elif system == 'Darwin':
file_url = base_url.format(client_version, 'darwin', 'kubectl')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to "%s" from "%s"', install_location, file_url)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as ex:
raise CLIError('Connection error while attempting to download client ({})'.format(ex))
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip('\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def k8s_install_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, service_principal=None, client_secret=None,
chart_url=None, os_type='Linux', image_tag=None, aci_resource_group=None):
_k8s_install_or_upgrade_connector("install", cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group)
def k8s_upgrade_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, service_principal=None, client_secret=None,
chart_url=None, os_type='Linux', image_tag=None, aci_resource_group=None):
_k8s_install_or_upgrade_connector("upgrade", cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group)
def _k8s_install_or_upgrade_connector(helm_cmd, cmd, client, name, resource_group_name, connector_name,
location, service_principal, client_secret, chart_url, os_type,
image_tag, aci_resource_group):
from subprocess import PIPE, Popen
instance = client.get(resource_group_name, name)
helm_not_installed = 'Helm not detected, please verify if it is installed.'
url_chart = chart_url
if image_tag is None:
image_tag = 'latest'
# Check if Helm is installed locally
try:
Popen(["helm"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(helm_not_installed)
# If SPN is specified, the secret should also be specified
if service_principal is not None and client_secret is None:
raise CLIError('--client-secret must be specified when --service-principal is specified')
# Validate if the RG exists
rg_location = _get_rg_location(cmd.cli_ctx, aci_resource_group or resource_group_name)
# Auto assign the location
if location is None:
location = rg_location
norm_location = location.replace(' ', '').lower()
# Validate the location upon the ACI avaiable regions
_validate_aci_location(norm_location)
# Get the credentials from a AKS instance
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
subscription_id = _get_subscription_id(cmd.cli_ctx)
# Get the TenantID
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, tenant_id = profile.get_login_credentials()
# Check if we want the linux connector
if os_type.lower() in ['linux', 'both']:
_helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, 'Linux', instance.enable_rbac, instance.fqdn)
# Check if we want the windows connector
if os_type.lower() in ['windows', 'both']:
_helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, 'Windows', instance.enable_rbac, instance.fqdn)
def _helm_install_or_upgrade_aci_connector(helm_cmd, image_tag, url_chart, connector_name, service_principal,
client_secret, subscription_id, tenant_id, aci_resource_group,
norm_location, os_type, use_rbac, masterFqdn):
rbac_install = "true" if use_rbac else "false"
node_taint = 'azure.com/aci'
helm_release_name = connector_name.lower() + '-' + os_type.lower() + '-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
k8s_master = 'https://{}'.format(masterFqdn)
logger.warning("Deploying the ACI connector for '%s' using Helm", os_type)
try:
values = 'env.nodeName={},env.nodeTaint={},env.nodeOsType={},image.tag={},rbac.install={}'.format(
node_name, node_taint, os_type, image_tag, rbac_install)
if service_principal:
values += ",env.azureClientId=" + service_principal
if client_secret:
values += ",env.azureClientKey=" + client_secret
if subscription_id:
values += ",env.azureSubscriptionId=" + subscription_id
if tenant_id:
values += ",env.azureTenantId=" + tenant_id
if aci_resource_group:
values += ",env.aciResourceGroup=" + aci_resource_group
if norm_location:
values += ",env.aciRegion=" + norm_location
# Currently, we need to set the master FQDN.
# This is temporary and we should remove it when possible
values += ",env.masterUri=" + k8s_master
if helm_cmd == "install":
subprocess.call(["helm", "install", url_chart, "--name", helm_release_name, "--set", values])
elif helm_cmd == "upgrade":
subprocess.call(["helm", "upgrade", helm_release_name, url_chart, "--set", values])
except subprocess.CalledProcessError as err:
raise CLIError('Could not deploy the ACI connector Chart: {}'.format(err))
def k8s_uninstall_connector(cmd, client, name, resource_group_name, connector_name='aci-connector',
location=None, graceful=False, os_type='Linux'):
from subprocess import PIPE, Popen
helm_not_installed = "Error : Helm not detected, please verify if it is installed."
# Check if Helm is installed locally
try:
Popen(["helm"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(helm_not_installed)
# Get the credentials from a AKS instance
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# Validate if the RG exists
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
# Auto assign the location
if location is None:
location = rg_location
norm_location = location.replace(' ', '').lower()
if os_type.lower() in ['linux', 'both']:
helm_release_name = connector_name.lower() + '-linux-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
_undeploy_connector(graceful, node_name, helm_release_name)
if os_type.lower() in ['windows', 'both']:
helm_release_name = connector_name.lower() + '-windows-' + norm_location
node_name = 'virtual-kubelet-' + helm_release_name
_undeploy_connector(graceful, node_name, helm_release_name)
def _undeploy_connector(graceful, node_name, helm_release_name):
if graceful:
logger.warning('Graceful option selected, will try to drain the node first')
from subprocess import PIPE, Popen
kubectl_not_installed = 'Kubectl not detected, please verify if it is installed.'
try:
Popen(["kubectl"], stdout=PIPE, stderr=PIPE)
except OSError:
raise CLIError(kubectl_not_installed)
try:
drain_node = subprocess.check_output(
['kubectl', 'drain', node_name, '--force', '--delete-local-data'],
universal_newlines=True)
if not drain_node:
raise CLIError('Could not find the node, make sure you' +
' are using the correct --os-type')
except subprocess.CalledProcessError as err:
raise CLIError('Could not find the node, make sure you are using the correct' +
' --connector-name, --location and --os-type options: {}'.format(err))
logger.warning("Undeploying the '%s' using Helm", helm_release_name)
try:
subprocess.call(['helm', 'del', helm_release_name, '--purge'])
except subprocess.CalledProcessError as err:
raise CLIError('Could not undeploy the ACI connector Chart: {}'.format(err))
try:
subprocess.check_output(
['kubectl', 'delete', 'node', node_name],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not delete the node, make sure you are using the correct' +
' --connector-name, --location and --os-type options: {}'.format(err))
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0)
try:
create_service_principal(cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False
hook.add(message='Finished service principal creation', value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal
def _add_role_assignment(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate', value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate', value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(cli_ctx, role, service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def _get_subscription_id(cli_ctx):
_, sub_id, _ = Profile(cli_ctx=cli_ctx).get_login_credentials(subscription_id=None)
return sub_id
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub('[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
def list_acs_locations(cmd, client):
return {
"productionRegions": regions_in_prod,
"previewRegions": regions_in_preview
}
def _generate_windows_profile(windows, admin_username, admin_password):
if windows:
if not admin_password:
raise CLIError('--admin-password is required.')
if len(admin_password) < 6:
raise CLIError('--admin-password must be at least 6 characters')
windows_profile = {
"adminUsername": admin_username,
"adminPassword": admin_password,
}
return windows_profile
return None
def _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile):
master_pool_profile = {}
default_master_pool_profile = {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
}
if api_version == "2017-07-01":
default_master_pool_profile = _update_dict(default_master_pool_profile, {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
"vmSize": master_vm_size,
"osDiskSizeGB": int(master_osdisk_size),
"vnetSubnetID": master_vnet_subnet_id,
"firstConsecutiveStaticIP": master_first_consecutive_static_ip,
"storageProfile": master_storage_profile,
})
if not master_profile:
master_pool_profile = default_master_pool_profile
else:
master_pool_profile = _update_dict(default_master_pool_profile, master_profile)
return master_pool_profile
def _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile):
agent_pool_profiles = []
default_agent_pool_profile = {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
}
if api_version == "2017-07-01":
default_agent_pool_profile = _update_dict(default_agent_pool_profile, {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osDiskSizeGB": int(agent_osdisk_size),
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
"vnetSubnetID": agent_vnet_subnet_id,
"ports": agent_ports,
"storageProfile": agent_storage_profile,
})
if agent_profiles is None:
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, {"name": "agentpool0"}))
else:
# override agentPoolProfiles by using the passed in agent_profiles
for idx, ap in enumerate(agent_profiles):
# if the user specified dnsPrefix, we honor that
# otherwise, we use the idx to avoid duplicate dns name
a = _update_dict({"dnsPrefix": dns_name_prefix + 'agent' + str(idx)}, ap)
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, a))
return agent_pool_profiles
def _generate_outputs(name, orchestrator_type, admin_username):
# define outputs
outputs = {
"masterFQDN": {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).masterProfile.fqdn]".format(name) # pylint: disable=line-too-long
},
"sshMaster0": {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 22')]".format(admin_username, name) # pylint: disable=line-too-long
},
}
if orchestrator_type.lower() != "kubernetes":
outputs["agentFQDN"] = {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).agentPoolProfiles[0].fqdn]".format(name) # pylint: disable=line-too-long
}
# override sshMaster0 for non-kubernetes scenarios
outputs["sshMaster0"] = {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 2200')]".format(admin_username, name) # pylint: disable=line-too-long
}
return outputs
def _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile):
properties = {
"orchestratorProfile": {
"orchestratorType": orchestrator_type,
},
"masterProfile": master_pool_profile,
"agentPoolProfiles": agent_pool_profiles,
"linuxProfile": {
"ssh": {
"publicKeys": [
{
"keyData": ssh_key_value
}
]
},
"adminUsername": admin_username
},
}
if api_version == "2017-07-01":
properties["orchestratorProfile"]["orchestratorVersion"] = orchestrator_version
if windows_profile is not None:
properties["windowsProfile"] = windows_profile
return properties
# pylint: disable=too-many-locals
def acs_create(cmd, client, resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None,
location=None, admin_username="azureuser", api_version=None, master_profile=None,
master_vm_size="Standard_D2_v2", master_osdisk_size=0, master_count=1, master_vnet_subnet_id="",
master_first_consecutive_static_ip="10.240.255.5", master_storage_profile="",
agent_profiles=None, agent_vm_size="Standard_D2_v2", agent_osdisk_size=0,
agent_count=3, agent_vnet_subnet_id="", agent_ports=None, agent_storage_profile="",
orchestrator_type="DCOS", orchestrator_version="", service_principal=None, client_secret=None, tags=None,
windows=False, admin_password="", generate_ssh_keys=False, # pylint: disable=unused-argument
validate=False, no_wait=False):
"""Create a new Acs.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param dns_name_prefix: Sets the Domain name prefix for the cluster.
The concatenation of the domain name and the regionalized DNS zone
make up the fully qualified domain name associated with the public
IP address.
:type dns_name_prefix: str
:param name: Resource name for the container service.
:type name: str
:param ssh_key_value: Configure all linux machines with the SSH RSA
public key string. Your key should include three parts, for example
'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm
:type ssh_key_value: str
:param content_version: If included it must match the ContentVersion
in the template.
:type content_version: str
:param admin_username: User name for the Linux Virtual Machines.
:type admin_username: str
:param api_version: ACS API version to use
:type api_version: str
:param master_profile: MasterProfile used to describe master pool
:type master_profile: dict
:param master_vm_size: The size of master pool Virtual Machine
:type master_vm_size: str
:param master_osdisk_size: The osDisk size in GB of master pool Virtual Machine
:type master_osdisk_size: int
:param master_count: The number of masters for the cluster.
:type master_count: int
:param master_vnet_subnet_id: The vnet subnet id for master pool
:type master_vnet_subnet_id: str
:param master_storage_profile: The storage profile used for master pool.
Possible value could be StorageAccount, ManagedDisk.
:type master_storage_profile: str
:param agent_profiles: AgentPoolProfiles used to describe agent pools
:type agent_profiles: dict
:param agent_vm_size: The size of the Virtual Machine.
:type agent_vm_size: str
:param agent_osdisk_size: The osDisk size in GB of agent pool Virtual Machine
:type agent_osdisk_size: int
:param agent_vnet_subnet_id: The vnet subnet id for master pool
:type agent_vnet_subnet_id: str
:param agent_ports: the ports exposed on the agent pool
:type agent_ports: list
:param agent_storage_profile: The storage profile used for agent pool.
Possible value could be StorageAccount, ManagedDisk.
:type agent_storage_profile: str
:param location: Location for VM resources.
:type location: str
:param orchestrator_type: The type of orchestrator used to manage the
applications on the cluster.
:type orchestrator_type: str or :class:`orchestratorType
<Default.models.orchestratorType>`
:param tags: Tags object.
:type tags: object
:param windows: If true, the cluster will be built for running Windows container.
:type windows: bool
:param admin_password: The adminstration password for Windows nodes. Only available if --windows=true
:type admin_password: str
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<Default.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
if ssh_key_value is not None and not is_valid_ssh_rsa_public_key(ssh_key_value):
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value))
subscription_id = _get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# if api-version is not specified, or specified in a version not supported
# override based on location
if api_version is None or api_version not in ["2017-01-31", "2017-07-01"]:
if location in regions_in_preview:
api_version = "2017-07-01" # 2017-07-01 supported in the preview locations
else:
api_version = "2017-01-31" # 2017-01-31 applied to other locations
if orchestrator_type.lower() == 'kubernetes':
principal_obj = _ensure_service_principal(cmd.cli_ctx, service_principal, client_secret, subscription_id,
dns_name_prefix, location, name)
client_secret = principal_obj.get("client_secret")
service_principal = principal_obj.get("service_principal")
elif windows:
raise CLIError('--windows is only supported for Kubernetes clusters')
# set location if void
if not location:
location = '[resourceGroup().location]'
# set os_type
os_type = 'Linux'
if windows:
os_type = 'Windows'
# set agent_ports if void
if not agent_ports:
agent_ports = []
# get windows_profile
windows_profile = _generate_windows_profile(windows, admin_username, admin_password)
# The resources.properties fields should match with ContainerServices' api model
master_pool_profile = _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile)
agent_pool_profiles = _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile)
outputs = _generate_outputs(name, orchestrator_type, admin_username)
properties = _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile)
resource = {
"apiVersion": api_version,
"location": location,
"type": "Microsoft.ContainerService/containerServices",
"name": name,
"tags": tags,
"properties": properties,
}
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"resources": [
resource,
],
"outputs": outputs,
}
params = {}
if service_principal is not None and client_secret is not None:
properties["servicePrincipalProfile"] = {
"clientId": service_principal,
"secret": "[parameters('clientSecret')]",
}
template["parameters"] = {
"clientSecret": {
"type": "secureString",
"metadata": {
"description": "The client secret for the service principal"
}
}
}
params = {
"clientSecret": {
"value": client_secret
}
}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return _invoke_deployment(cmd.cli_ctx, resource_group_name, deployment_name,
template, params, validate, no_wait)
except CloudError as ex:
retry_exception = ex
if 'is not valid according to the validation procedure' in ex.message or \
'The credentials in ServicePrincipalProfile were invalid' in ex.message or \
'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cli_ctx, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
from azure.mgmt.resource.resources import ResourceManagementClient
from azure.mgmt.resource.resources.models import DeploymentProperties
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cli_ctx, ResourceManagementClient, subscription_id=subscription_id).deployments
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
return smc.validate(resource_group_name, deployment_name, properties)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, properties)
def k8s_get_credentials(cmd, client, name, resource_group_name,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
ssh_key_file=None,
overwrite_existing=False):
"""Download and install kubectl credentials from the cluster master
:param name: The name of the cluster.
:type name: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param path: Where to install the kubectl config file
:type path: str
:param ssh_key_file: Path to an SSH key file to use
:type ssh_key_file: str
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing)
def _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing):
if ssh_key_file is not None and not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
dns_prefix = acs_info.master_profile.dns_prefix # pylint: disable=no-member
location = acs_info.location # pylint: disable=no-member
user = acs_info.linux_profile.admin_username # pylint: disable=no-member
_mkdir_p(os.path.dirname(path))
path_candidate = path
ix = 0
while os.path.exists(path_candidate):
ix += 1
path_candidate = '{}-{}-{}'.format(path, name, ix)
# TODO: this only works for public cloud, need other casing for national clouds
acs_client.secure_copy(user, '{}.{}.cloudapp.azure.com'.format(dns_prefix, location),
'.kube/config', path_candidate, key_filename=ssh_key_file)
# merge things
if path_candidate != path:
try:
merge_kubernetes_configurations(path, path_candidate, overwrite_existing)
except yaml.YAMLError as exc:
logger.warning('Failed to merge credentials to kube config file: %s', exc)
logger.warning('The credentials have been saved to %s', path_candidate)
def _handle_merge(existing, addition, key, replace):
if addition[key]:
if existing[key] is None:
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
raise CLIError('A different object named {} already exists in {}'.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
else:
raise
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError('failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(current_context, existing_file)
print(msg)
def _get_host_name(acs_info):
"""
Gets the FQDN from the acs_info object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info is None:
raise CLIError('Missing acs_info')
if acs_info.master_profile is None:
raise CLIError('Missing master_profile')
if acs_info.master_profile.fqdn is None:
raise CLIError('Missing fqdn')
return acs_info.master_profile.fqdn
def _get_username(acs_info):
"""
Gets the admin user name from the Linux profile of the ContainerService object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info.linux_profile is not None:
return acs_info.linux_profile.admin_username
return None
def _get_acs_info(cli_ctx, name, resource_group_name):
"""
Gets the ContainerService object from Azure REST API.
:param name: ACS resource name
:type name: String
:param resource_group_name: Resource group name
:type resource_group_name: String
"""
container_services = cf_container_services(cli_ctx, None)
return container_services.get(resource_group_name, name)
def _rand_str(n):
"""
Gets a random string
"""
choices = string.ascii_lowercase + string.digits
return ''.join(random.SystemRandom().choice(choices) for _ in range(n))
def _mkdir_p(path):
# http://stackoverflow.com/a/600612
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def update_acs(cmd, client, resource_group_name, container_service_name, new_agent_count):
instance = client.get(resource_group_name, container_service_name)
instance.agent_pool_profiles[0].count = new_agent_count # pylint: disable=no-member
# null out the service principal because otherwise validation complains
if instance.orchestrator_profile.orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes:
instance.service_principal_profile = None
# null out the windows profile so that validation doesn't complain about not having the admin password
instance.windows_profile = None
return client.create_or_update(resource_group_name, container_service_name, instance)
def list_container_services(cmd, client, resource_group_name=None):
''' List Container Services. '''
svc_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return list(svc_list)
def show_service_principal(client, identifier):
object_id = _resolve_service_principal(client, identifier)
return client.get(object_id)
def _resolve_service_principal(client, identifier):
# todo: confirm with graph team that a service principal name must be unique
result = list(client.list(filter="servicePrincipalNames/any(c:c eq '{}')".format(identifier)))
if result:
return result[0].object_id
try:
uuid.UUID(identifier)
return identifier # assume an object id
except ValueError:
raise CLIError("service principal '{}' doesn't exist".format(identifier))
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds)
try:
return client.create(app_create_param)
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError('specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = dateutil.parser.parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = dateutil.parser.parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx, role, assignee, resource_group_name, scope)
def _create_role_assignment(cli_ctx, role, assignee, resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import ResourceType, get_sdk
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(role_definition_id=role_id, principal_id=object_id)
assignment_name = uuid.uuid4()
custom_headers = None
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
elif len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError("No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
def aks_browse(cmd, client, resource_group_name, name, disable_browser=False, listen_port='8001',
enable_cloud_console_aks_browse=False):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
if in_cloud_console() and not enable_cloud_console_aks_browse:
raise CLIError('Browse is disabled in cloud shell by default.')
proxy_url = 'http://127.0.0.1:{0}/'.format(listen_port)
_, browse_path = tempfile.mkstemp()
# TODO: need to add an --admin option?
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system", "--output", "name",
"--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console() and enable_cloud_console_aks_browse:
# TODO: better error handling here.
response = requests.post('http://localhost:8888/openport/8001')
result = json.loads(response.text)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{}'.format(term_id),
json={"url": result['url']})
logger.warning('To view the console, please open %s in a new tab', result['url'])
else:
logger.warning('Proxy running on %s', proxy_url)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(proxy_url)
try:
subprocess.call(["kubectl", "--kubeconfig", browse_path, "--namespace", "kube-system",
"port-forward", dashboard_pod, "{0}:9090".format(listen_port)])
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
# TODO: Better error handling here.
requests.post('http://localhost:8888/closeport/8001')
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _validate_ssh_key(no_ssh_key, ssh_key_value):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
def aks_create(cmd, client, resource_group_name, name, ssh_key_value, # pylint: disable=too-many-locals
dns_name_prefix=None,
location=None,
admin_username="azureuser",
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_size=0,
node_count=3,
nodepool_name="nodepool1",
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
skip_subnet_role_assignment=False,
network_plugin=None,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
enable_addons=None,
workspace_resource_id=None,
vnet_subnet_id=None,
max_pods=0,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
generate_ssh_keys=False, # pylint: disable=unused-argument
no_wait=False):
_validate_ssh_key(no_ssh_key, ssh_key_value)
subscription_id = _get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
agent_pool_profile = ManagedClusterAgentPoolProfile(
name=_trim_nodepoolname(nodepool_name), # Must be 12 chars or less before ACS RP adds to it
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
storage_profile=ContainerServiceStorageProfileTypes.managed_disks,
vnet_subnet_id=vnet_subnet_id,
max_pods=int(max_pods) if max_pods else None
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(admin_username=admin_username, ssh=ssh_config)
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
location=location, name=name)
service_principal_profile = ContainerServiceServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"),
key_vault_secret_ref=None)
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
scope = vnet_subnet_id
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
service_principal_profile.client_id, scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
network_profile = None
if any([network_plugin, pod_cidr, service_cidr, dns_service_ip, docker_bridge_address]):
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address
)
addon_profiles = _handle_addons_args(
cmd,
enable_addons,
subscription_id,
resource_group_name,
{},
workspace_resource_id
)
if 'omsagent' in addon_profiles:
_ensure_container_insights_for_monitoring(cmd, addon_profiles['omsagent'])
aad_profile = None
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):
if aad_tenant_id is None:
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError('specify either "--disable-rbac" or "--enable-rbac", not both.')
mc = ManagedCluster(
location=location, tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=False if disable_rbac else True,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile)
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return sdk_no_wait(no_wait, client.create_or_update,
resource_group_name=resource_group_name, resource_name=name, parameters=mc)
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = _get_subscription_id(cmd.cli_ctx)
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = _get_subscription_id(cmd.cli_ctx)
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable=True,
workspace_resource_id=workspace_resource_id, no_wait=no_wait)
if 'omsagent' in instance.addon_profiles:
_ensure_container_insights_for_monitoring(cmd, instance.addon_profiles['omsagent'])
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_get_versions(cmd, client, location):
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_credentials(cmd, client, resource_group_name, name, admin=False,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
overwrite_existing=False):
credentialResults = None
if admin:
credentialResults = client.list_cluster_admin_credentials(resource_group_name, name)
else:
credentialResults = client.list_cluster_user_credentials(resource_group_name, name)
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
else:
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(encoding='UTF-8')
_print_or_merge_credentials(path, kubeconfig, overwrite_existing)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
ADDONS = {
'http_application_routing': 'httpApplicationRouting',
'monitoring': 'omsagent'
}
def aks_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_nulls(list(managed_clusters))
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="nodepool1", no_wait=False):
instance = client.get(resource_group_name, name)
# TODO: change this approach when we support multiple agent pools.
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name:
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_upgrade(cmd, client, resource_group_name, name, kubernetes_version, no_wait=False, **kwargs): # pylint: disable=unused-argument
instance = client.get(resource_group_name, name)
instance.kubernetes_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
DEV_SPACES_EXTENSION_NAME = 'dev-spaces-preview'
DEV_SPACES_EXTENSION_MODULE = 'azext_dev_spaces_preview.custom'
def aks_use_dev_spaces(cmd, client, name, resource_group_name, update=False, space_name=None, prompt=False):
"""
Use Azure Dev Spaces with a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param update: Update to the latest Azure Dev Spaces client components.
:type update: bool
:param space_name: Name of the new or existing dev space to select. Defaults to an interactive selection experience.
:type space_name: String
:param prompt: Do not prompt for confirmation. Requires --space.
:type prompt: bool
"""
if _get_or_add_extension(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE, update):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_use_dev_spaces(name, resource_group_name, update, space_name, prompt)
except TypeError:
raise CLIError("Use '--update' option to get the latest Azure Dev Spaces client components.")
except AttributeError as ae:
raise CLIError(ae)
def aks_remove_dev_spaces(cmd, client, name, resource_group_name, prompt=False):
"""
Remove Azure Dev Spaces from a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param prompt: Do not prompt for confirmation.
:type prompt: bool
"""
if _get_or_add_extension(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_remove_dev_spaces(name, resource_group_name, prompt)
except AttributeError as ae:
raise CLIError(ae)
def _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable, workspace_resource_id=None,
no_wait=False):
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
# for each addons argument
for addon_arg in addon_args:
addon = ADDONS[addon_arg]
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == 'omsagent':
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {'logAnalyticsWorkspaceResourceID': workspace_resource_id}
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
raise CLIError("The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def _get_azext_module(extension_name, module_name):
try:
# Adding the installed extension in the path
from azure.cli.core.extensions import get_extension_path
ext_dir = get_extension_path(extension_name)
sys.path.append(ext_dir)
# Import the extension module
from importlib import import_module
azext_custom = import_module(module_name)
return azext_custom
except ImportError as ie:
raise CLIError(ie)
def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None,
workspace_resource_id=None):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles['httpApplicationRouting'] = ManagedClusterAddonProfile(enabled=True)
addons.remove('http_application_routing')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profiles['omsagent'] = ManagedClusterAddonProfile(
enabled=True, config={'logAnalyticsWorkspaceResourceID': workspace_resource_id})
addons.remove('monitoring')
# error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is
elif workspace_resource_id:
raise CLIError('"--workspace-resource-id" requires "--enable-addons monitoring".')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _install_dev_spaces_extension(extension_name):
try:
from azure.cli.core.extensions import operations
operations.add_extension(extension_name=extension_name)
except Exception: # nopa pylint: disable=broad-except
return False
return True
def _update_dev_spaces_extension(extension_name, extension_module):
from azure.cli.core.extensions import ExtensionNotInstalledException
try:
from azure.cli.core.extensions import operations
operations.update_extension(extension_name=extension_name)
# reloading the imported module to update
try:
from importlib import reload
except ImportError:
pass # for python 2
reload(sys.modules[extension_module])
except CLIError as err:
logger.info(err)
except ExtensionNotInstalledException as err:
logger.debug(err)
return False
except ModuleNotFoundError as err:
logger.debug(err)
logger.error("Error occurred attempting to load the extension module. Use --debug for more information.")
return False
return True
def _get_or_add_extension(extension_name, extension_module, update=False):
from azure.cli.core.extensions import (ExtensionNotInstalledException, get_extension)
try:
get_extension(extension_name)
if update:
return _update_dev_spaces_extension(extension_name, extension_module)
except ExtensionNotInstalledException:
return _install_dev_spaces_extension(extension_name)
return True
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureLocationToOmsRegionCodeMap = {
"eastus": "EUS",
"westeurope": "WEU",
"southeastasia": "SEA",
"australiasoutheast": "ASE",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"japaneast": "EJP",
"uksouth": "SUK",
"canadacentral": "CCA",
"centralindia": "CIN",
"eastus2euap": "EAP"
}
AzureRegionToOmsRegionMap = {
"australiaeast": "australiasoutheast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "eastus",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "eastus",
"eastasia": "southeastasia",
"eastus": "eastus",
"eastus2": "eastus",
"japaneast": "japaneast",
"japanwest": "japaneast",
"northcentralus": "eastus",
"northeurope": "westeurope",
"southcentralus": "eastus",
"southeastasia": "southeastasia",
"uksouth": "uksouth",
"ukwest": "uksouth",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westus": "eastus",
"westus2": "eastus",
"centralindia": "centralindia",
"southindia": "centralindia",
"westindia": "centralindia",
"koreacentral": "southeastasia",
"koreasouth": "southeastasia",
"francecentral": "westeurope",
"francesouth": "westeurope"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
default_region_name = "eastus"
default_region_code = "EUS"
workspace_region = AzureRegionToOmsRegionMap[
rg_location] if AzureRegionToOmsRegionMap[rg_location] else default_region_name
workspace_region_code = AzureLocationToOmsRegionCodeMap[
workspace_region] if AzureLocationToOmsRegionCodeMap[workspace_region] else default_region_code
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id, default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
try:
resource = resources.get_by_id(default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except CloudError as ex:
if ex.status_code != 404:
raise ex
else:
resource_groups.create_or_update(default_workspace_resource_group, {'location': workspace_region})
default_workspace_params = {
'location': workspace_region,
'properties': {
'sku': {
'name': 'standalone'
}
}
}
async_poller = resources.create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
default_workspace_params)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _ensure_container_insights_for_monitoring(cmd, addon):
workspace_resource_id = addon.config['logAnalyticsWorkspaceResourceID']
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError('Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
try:
resource = resources.get_by_id(workspace_resource_id, '2015-11-01-preview')
location = resource.location
except CloudError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd.cli_ctx, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
file_name_aks = 'aksServicePrincipal.json'
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(subscription_id, file_name=file_name_aks)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8')
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
store_acs_service_principal(subscription_id, client_secret, service_principal, file_name=file_name_aks)
return load_acs_service_principal(subscription_id, file_name=file_name_aks)
def _ensure_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, try to load it from local disk
principal_obj = load_acs_service_principal(subscription_id)
if principal_obj:
service_principal = principal_obj.get('service_principal')
client_secret = principal_obj.get('client_secret')
else:
# Nothing to load, make one.
if not client_secret:
client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8')
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# add role first before save it
if not _add_role_assignment(cli_ctx, 'Contributor', service_principal):
logger.warning('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
store_acs_service_principal(subscription_id, client_secret, service_principal)
return load_acs_service_principal(subscription_id)
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(path, temp_path, overwrite_existing)
except yaml.YAMLError as ex:
logger.warning('Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def _validate_aci_location(norm_location):
"""
Validate the Azure Container Instance location
"""
aci_locations = [
"centralus",
"eastus",
"eastus2",
"westus",
"westus2",
"northeurope",
"westeurope",
"southeastasia",
"australiaeast"
]
if norm_location not in aci_locations:
raise CLIError('Azure Container Instance is not available at location "{}".'.format(norm_location) +
' The available locations are "{}"'.format(','.join(aci_locations)))
|
[] |
[] |
[
"ACC_TERM_ID",
"PATH"
] |
[]
|
["ACC_TERM_ID", "PATH"]
|
python
| 2 | 0 | |
var/spack/repos/builtin/packages/mfem/package.py
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import shutil
import sys
from spack import *
class Mfem(Package, CudaPackage, ROCmPackage):
"""Free, lightweight, scalable C++ library for finite element methods."""
tags = ['fem', 'finite-elements', 'high-order', 'amr', 'hpc', 'radiuss', 'e4s']
homepage = 'http://www.mfem.org'
git = 'https://github.com/mfem/mfem.git'
maintainers = ['v-dobrev', 'tzanio', 'acfisher',
'goxberry', 'markcmiller86']
test_requires_compiler = True
# Recommended mfem builds to test when updating this file: see the shell
# script 'test_builds.sh' in the same directory as this file.
# mfem is downloaded from a URL shortener at request of upstream
# author Tzanio Kolev <[email protected]>. See here:
# https://github.com/mfem/mfem/issues/53
#
# The following procedure should be used to verify security when a
# new version is added:
#
# 1. Verify that no checksums on old versions have changed.
#
# 2. Verify that the shortened URL for the new version is listed at:
# https://mfem.org/download/
#
# 3. Use http://getlinkinfo.com or similar to verify that the
# underling download link for the latest version comes has the
# prefix: http://mfem.github.io/releases
#
# If this quick verification procedure fails, additional discussion
# will be required to verify the new version.
# 'develop' is a special version that is always larger (or newer) than any
# other version.
version('develop', branch='master')
version('4.3.0',
sha256='3a495602121b986049286ea0b23512279cdbdfb43c15c42a1511b521051fbe38',
url='https://bit.ly/mfem-4-3', extension='tar.gz')
version('4.2.0',
'4352a225b55948d2e73a5ee88cece0e88bdbe7ba6726a23d68b2736d3221a86d',
url='https://bit.ly/mfem-4-2', extension='tar.gz')
version('4.1.0',
'4c83fdcf083f8e2f5b37200a755db843cdb858811e25a8486ad36b2cbec0e11d',
url='https://bit.ly/mfem-4-1', extension='tar.gz')
# Tagged development version used by xSDK
version('4.0.1-xsdk', commit='c55c80d17b82d80de04b849dd526e17044f8c99a')
version('4.0.0',
'df5bdac798ea84a263979f6fbf79de9013e1c55562f95f98644c3edcacfbc727',
url='https://bit.ly/mfem-4-0', extension='tar.gz')
# Tagged development version used by the laghos package:
version('3.4.1-laghos-v2.0', tag='laghos-v2.0')
version('3.4.0',
sha256='4e73e4fe0482636de3c5dc983cd395839a83cb16f6f509bd88b053e8b3858e05',
url='https://bit.ly/mfem-3-4', extension='tar.gz')
version('3.3.2',
sha256='b70fa3c5080b9ec514fc05f4a04ff74322b99ac4ecd6d99c229f0ed5188fc0ce',
url='https://goo.gl/Kd7Jk8', extension='tar.gz')
# Tagged development version used by the laghos package:
version('3.3.1-laghos-v1.0', tag='laghos-v1.0')
version('3.3',
sha256='b17bd452593aada93dc0fee748fcfbbf4f04ce3e7d77fdd0341cc9103bcacd0b',
url='http://goo.gl/Vrpsns', extension='tar.gz')
version('3.2',
sha256='2938c3deed4ec4f7fd5b5f5cfe656845282e86e2dcd477d292390058b7b94340',
url='http://goo.gl/Y9T75B', extension='tar.gz')
version('3.1',
sha256='841ea5cf58de6fae4de0f553b0e01ebaab9cd9c67fa821e8a715666ecf18fc57',
url='http://goo.gl/xrScXn', extension='tar.gz')
variant('static', default=True,
description='Build static library')
variant('shared', default=False,
description='Build shared library')
variant('mpi', default=True,
description='Enable MPI parallelism')
# Can we make the default value for 'metis' to depend on the 'mpi' value?
variant('metis', default=True,
description='Enable METIS support')
variant('openmp', default=False,
description='Enable OpenMP parallelism')
# Note: '+cuda' and 'cuda_arch' variants are added by the CudaPackage
# Note: '+rocm' and 'amdgpu_target' variants are added by the ROCmPackage
variant('occa', default=False, description='Enable OCCA backend')
variant('raja', default=False, description='Enable RAJA backend')
variant('libceed', default=False, description='Enable libCEED backend')
variant('umpire', default=False, description='Enable Umpire support')
variant('amgx', default=False, description='Enable NVIDIA AmgX solver support')
variant('threadsafe', default=False,
description=('Enable thread safe features.'
' Required for OpenMP.'
' May cause minor performance issues.'))
variant('superlu-dist', default=False,
description='Enable MPI parallel, sparse direct solvers')
variant('strumpack', default=False,
description='Enable support for STRUMPACK')
variant('suite-sparse', default=False,
description='Enable serial, sparse direct solvers')
variant('petsc', default=False,
description='Enable PETSc solvers, preconditioners, etc.')
variant('slepc', default=False,
description='Enable SLEPc integration')
variant('sundials', default=False,
description='Enable Sundials time integrators')
variant('pumi', default=False,
description='Enable functionality based on PUMI')
variant('gslib', default=False,
description='Enable functionality based on GSLIB')
variant('mpfr', default=False,
description='Enable precise, 1D quadrature rules')
variant('lapack', default=False,
description='Use external blas/lapack routines')
variant('debug', default=False,
description='Build debug instead of optimized version')
variant('netcdf', default=False,
description='Enable Cubit/Genesis reader')
variant('conduit', default=False,
description='Enable binary data I/O using Conduit')
variant('zlib', default=True,
description='Support zip\'d streams for I/O')
variant('gnutls', default=False,
description='Enable secure sockets using GnuTLS')
variant('libunwind', default=False,
description='Enable backtrace on error support using Libunwind')
# TODO: SIMD, Ginkgo, ADIOS2, HiOp, MKL CPardiso, Axom/Sidre
variant('timer', default='auto',
values=('auto', 'std', 'posix', 'mac', 'mpi'),
description='Timing functions to use in mfem::StopWatch')
variant('examples', default=False,
description='Build and install examples')
variant('miniapps', default=False,
description='Build and install miniapps')
conflicts('+shared', when='@:3.3.2')
conflicts('~static~shared')
conflicts('~threadsafe', when='@:3+openmp')
conflicts('+cuda', when='@:3')
conflicts('+rocm', when='@:4.1')
conflicts('+cuda+rocm')
conflicts('+netcdf', when='@:3.1')
conflicts('+superlu-dist', when='@:3.1')
# STRUMPACK support was added in mfem v3.3.2, however, here we allow only
# strumpack v3+ support for which is available starting with mfem v4.0:
conflicts('+strumpack', when='@:3')
conflicts('+gnutls', when='@:3.1')
conflicts('+zlib', when='@:3.2')
conflicts('+mpfr', when='@:3.2')
conflicts('+petsc', when='@:3.2')
conflicts('+slepc', when='@:4.1')
conflicts('+sundials', when='@:3.2')
conflicts('+pumi', when='@:3.3.2')
conflicts('+gslib', when='@:4.0')
conflicts('timer=mac', when='@:3.3.0')
conflicts('timer=mpi', when='@:3.3.0')
conflicts('~metis+mpi', when='@:3.3.0')
conflicts('+metis~mpi', when='@:3.3.0')
conflicts('+conduit', when='@:3.3.2')
conflicts('+occa', when='mfem@:3')
conflicts('+raja', when='mfem@:3')
conflicts('+libceed', when='mfem@:4.0')
conflicts('+umpire', when='mfem@:4.0')
conflicts('+amgx', when='mfem@:4.1')
conflicts('+amgx', when='~cuda')
conflicts('+superlu-dist', when='~mpi')
conflicts('+strumpack', when='~mpi')
conflicts('+petsc', when='~mpi')
conflicts('+slepc', when='~petsc')
conflicts('+pumi', when='~mpi')
conflicts('timer=mpi', when='~mpi')
depends_on('mpi', when='+mpi')
depends_on('[email protected]:2.13', when='@:3.3+mpi')
depends_on('hypre@:2.20.0', when='@3.4:4.2+mpi')
depends_on('hypre@:2.23.0', when='@4.3.0+mpi')
depends_on('hypre', when='+mpi')
depends_on('metis', when='+metis')
depends_on('blas', when='+lapack')
depends_on('[email protected]:', when='+lapack')
depends_on('[email protected]', when='@:3.3.0+sundials~mpi')
depends_on('[email protected]+mpi+hypre', when='@:3.3.0+sundials+mpi')
depends_on('[email protected]:', when='@3.3.2:+sundials~mpi')
depends_on('[email protected]:+mpi+hypre', when='@3.3.2:+sundials+mpi')
depends_on('[email protected]:', when='@4.0.1-xsdk:+sundials~mpi')
depends_on('[email protected]:+mpi+hypre', when='@4.0.1-xsdk:+sundials+mpi')
for sm_ in CudaPackage.cuda_arch_values:
depends_on('[email protected]:+cuda cuda_arch={0}'.format(sm_),
when='@4.2.0:+sundials+cuda cuda_arch={0}'.format(sm_))
depends_on('[email protected]:', when='@4.2.0:+pumi')
depends_on('pumi', when='+pumi~shared')
depends_on('pumi+shared', when='+pumi+shared')
depends_on('[email protected]:+mpi', when='+gslib+mpi')
depends_on('[email protected]:~mpi~mpiio', when='+gslib~mpi')
depends_on('suite-sparse', when='+suite-sparse')
depends_on('superlu-dist', when='+superlu-dist')
depends_on('[email protected]:', when='+strumpack~shared')
depends_on('[email protected]:+shared', when='+strumpack+shared')
for sm_ in CudaPackage.cuda_arch_values:
depends_on('strumpack+cuda cuda_arch={0}'.format(sm_),
when='+strumpack+cuda cuda_arch={0}'.format(sm_))
# The PETSc tests in MFEM will fail if PETSc is not configured with
# SuiteSparse and MUMPS. On the other hand, if we require the variants
# '+suite-sparse+mumps' of PETSc, the xsdk package concretization fails.
depends_on('[email protected]:+mpi+double+hypre', when='+petsc')
depends_on('[email protected]:', when='+slepc')
# Recommended when building outside of xsdk:
# depends_on('[email protected]:+mpi+double+hypre+suite-sparse+mumps',
# when='+petsc')
depends_on('mpfr', when='+mpfr')
depends_on('[email protected]:', when='+netcdf')
depends_on('unwind', when='+libunwind')
depends_on('zlib', when='+zlib')
depends_on('gnutls', when='+gnutls')
depends_on('[email protected]:,master:', when='+conduit')
depends_on('conduit+mpi', when='+conduit+mpi')
# The MFEM 4.0.0 SuperLU interface fails when using [email protected] and
# [email protected]. See https://github.com/mfem/mfem/issues/983.
# This issue was resolved in v4.1.
conflicts('+superlu-dist',
when='mfem@:4.0 ^[email protected]: ^superlu-dist@6:')
# The STRUMPACK v3 interface in MFEM seems to be broken as of MFEM v4.1
# when using hypre version >= 2.16.0.
# This issue is resolved in v4.2.
conflicts('+strumpack', when='[email protected]:4.1 ^[email protected]:')
conflicts('+strumpack ^strumpack+cuda', when='~cuda')
depends_on('[email protected]:', when='@:4.1+occa')
depends_on('[email protected]:', when='@4.2.0:+occa')
depends_on('occa+cuda', when='+occa+cuda')
# TODO: propagate '+rocm' variant to occa when it is supported
depends_on('[email protected]:', when='@4.0.1:+raja')
depends_on('[email protected]:0.9.0', when='@4.0.0+raja')
for sm_ in CudaPackage.cuda_arch_values:
depends_on('raja+cuda cuda_arch={0}'.format(sm_),
when='+raja+cuda cuda_arch={0}'.format(sm_))
for gfx in ROCmPackage.amdgpu_targets:
depends_on('raja+rocm amdgpu_target={0}'.format(gfx),
when='+raja+rocm amdgpu_target={0}'.format(gfx))
depends_on('[email protected]:', when='@:4.1+libceed')
depends_on('[email protected]:', when='@4.2.0:+libceed')
for sm_ in CudaPackage.cuda_arch_values:
depends_on('libceed+cuda cuda_arch={0}'.format(sm_),
when='+libceed+cuda cuda_arch={0}'.format(sm_))
for gfx in ROCmPackage.amdgpu_targets:
depends_on('libceed+rocm amdgpu_target={0}'.format(gfx),
when='+libceed+rocm amdgpu_target={0}'.format(gfx))
depends_on('[email protected]:', when='+umpire')
for sm_ in CudaPackage.cuda_arch_values:
depends_on('umpire+cuda cuda_arch={0}'.format(sm_),
when='+umpire+cuda cuda_arch={0}'.format(sm_))
for gfx in ROCmPackage.amdgpu_targets:
depends_on('umpire+rocm amdgpu_target={0}'.format(gfx),
when='+umpire+rocm amdgpu_target={0}'.format(gfx))
# AmgX: propagate the cuda_arch and mpi settings:
for sm_ in CudaPackage.cuda_arch_values:
depends_on('amgx+mpi cuda_arch={0}'.format(sm_),
when='+amgx+mpi cuda_arch={0}'.format(sm_))
depends_on('amgx~mpi cuda_arch={0}'.format(sm_),
when='+amgx~mpi cuda_arch={0}'.format(sm_))
patch('mfem_ppc_build.patch', when='@3.2:3.3.0 arch=ppc64le')
patch('mfem-3.4.patch', when='@3.4.0')
patch('mfem-3.3-3.4-petsc-3.9.patch',
when='@3.3.0:3.4.0 +petsc ^[email protected]:')
patch('mfem-4.2-umpire.patch', when='@4.2.0+umpire')
patch('mfem-4.2-slepc.patch', when='@4.2.0+slepc')
patch('mfem-4.2-petsc-3.15.0.patch', when='@4.2.0+petsc ^[email protected]:')
patch('mfem-4.3-hypre-2.23.0.patch', when='@4.3.0')
# Patch to fix MFEM makefile syntax error. See
# https://github.com/mfem/mfem/issues/1042 for the bug report and
# https://github.com/mfem/mfem/pull/1043 for the bugfix contributed
# upstream.
patch('mfem-4.0.0-makefile-syntax-fix.patch', when='@4.0.0')
phases = ['configure', 'build', 'install']
def setup_build_environment(self, env):
env.unset('MFEM_DIR')
env.unset('MFEM_BUILD_DIR')
#
# Note: Although MFEM does support CMake configuration, MFEM
# development team indicates that vanilla GNU Make is the
# preferred mode of configuration of MFEM and the mode most
# likely to be up to date in supporting *all* of MFEM's
# configuration options. So, don't use CMake
#
def configure(self, spec, prefix):
def yes_no(varstr):
return 'YES' if varstr in self.spec else 'NO'
# See also find_system_libraries in lib/spack/llnl/util/filesystem.py
# where the same list of paths is used.
sys_lib_paths = [
'/lib64',
'/lib',
'/usr/lib64',
'/usr/lib',
'/usr/local/lib64',
'/usr/local/lib']
def is_sys_lib_path(dir):
return dir in sys_lib_paths
xcompiler = ''
xlinker = '-Wl,'
if '+cuda' in spec:
xcompiler = '-Xcompiler='
xlinker = '-Xlinker='
cuda_arch = spec.variants['cuda_arch'].value
# We need to add rpaths explicitly to allow proper export of link flags
# from within MFEM.
# Similar to spec[pkg].libs.ld_flags but prepends rpath flags too.
# Also does not add system library paths as defined by 'sys_lib_paths'
# above -- this is done to avoid issues like this:
# https://github.com/mfem/mfem/issues/1088.
def ld_flags_from_library_list(libs_list):
flags = ['%s-rpath,%s' % (xlinker, dir)
for dir in libs_list.directories
if not is_sys_lib_path(dir)]
flags += ['-L%s' % dir for dir in libs_list.directories
if not is_sys_lib_path(dir)]
flags += [libs_list.link_flags]
return ' '.join(flags)
def ld_flags_from_dirs(pkg_dirs_list, pkg_libs_list):
flags = ['%s-rpath,%s' % (xlinker, dir) for dir in pkg_dirs_list
if not is_sys_lib_path(dir)]
flags += ['-L%s' % dir for dir in pkg_dirs_list
if not is_sys_lib_path(dir)]
flags += ['-l%s' % lib for lib in pkg_libs_list]
return ' '.join(flags)
def find_optional_library(name, prefix):
for shared in [True, False]:
for path in ['lib64', 'lib']:
lib = find_libraries(name, join_path(prefix, path),
shared=shared, recursive=False)
if lib:
return lib
return LibraryList([])
# Determine how to run MPI tests, e.g. when using '--test=root', when
# Spack is run inside a batch system job.
mfem_mpiexec = 'mpirun'
mfem_mpiexec_np = '-np'
if 'SLURM_JOBID' in os.environ:
mfem_mpiexec = 'srun'
mfem_mpiexec_np = '-n'
elif 'LSB_JOBID' in os.environ:
if 'LLNL_COMPUTE_NODES' in os.environ:
mfem_mpiexec = 'lrun'
mfem_mpiexec_np = '-n'
else:
mfem_mpiexec = 'jsrun'
mfem_mpiexec_np = '-p'
metis5_str = 'NO'
if ('+metis' in spec) and spec['metis'].satisfies('@5:'):
metis5_str = 'YES'
zlib_var = 'MFEM_USE_ZLIB' if (spec.satisfies('@4.1.0:')) else \
'MFEM_USE_GZSTREAM'
options = [
'PREFIX=%s' % prefix,
'MFEM_USE_MEMALLOC=YES',
'MFEM_DEBUG=%s' % yes_no('+debug'),
# NOTE: env['CXX'] is the spack c++ compiler wrapper. The real
# compiler is defined by env['SPACK_CXX'].
'CXX=%s' % env['CXX'],
'MFEM_USE_LIBUNWIND=%s' % yes_no('+libunwind'),
'%s=%s' % (zlib_var, yes_no('+zlib')),
'MFEM_USE_METIS=%s' % yes_no('+metis'),
'MFEM_USE_METIS_5=%s' % metis5_str,
'MFEM_THREAD_SAFE=%s' % yes_no('+threadsafe'),
'MFEM_USE_MPI=%s' % yes_no('+mpi'),
'MFEM_USE_LAPACK=%s' % yes_no('+lapack'),
'MFEM_USE_SUPERLU=%s' % yes_no('+superlu-dist'),
'MFEM_USE_STRUMPACK=%s' % yes_no('+strumpack'),
'MFEM_USE_SUITESPARSE=%s' % yes_no('+suite-sparse'),
'MFEM_USE_SUNDIALS=%s' % yes_no('+sundials'),
'MFEM_USE_PETSC=%s' % yes_no('+petsc'),
'MFEM_USE_SLEPC=%s' % yes_no('+slepc'),
'MFEM_USE_PUMI=%s' % yes_no('+pumi'),
'MFEM_USE_GSLIB=%s' % yes_no('+gslib'),
'MFEM_USE_NETCDF=%s' % yes_no('+netcdf'),
'MFEM_USE_MPFR=%s' % yes_no('+mpfr'),
'MFEM_USE_GNUTLS=%s' % yes_no('+gnutls'),
'MFEM_USE_OPENMP=%s' % yes_no('+openmp'),
'MFEM_USE_CONDUIT=%s' % yes_no('+conduit'),
'MFEM_USE_CUDA=%s' % yes_no('+cuda'),
'MFEM_USE_HIP=%s' % yes_no('+rocm'),
'MFEM_USE_OCCA=%s' % yes_no('+occa'),
'MFEM_USE_RAJA=%s' % yes_no('+raja'),
'MFEM_USE_AMGX=%s' % yes_no('+amgx'),
'MFEM_USE_CEED=%s' % yes_no('+libceed'),
'MFEM_USE_UMPIRE=%s' % yes_no('+umpire'),
'MFEM_MPIEXEC=%s' % mfem_mpiexec,
'MFEM_MPIEXEC_NP=%s' % mfem_mpiexec_np]
cxxflags = spec.compiler_flags['cxxflags']
if cxxflags:
# Add opt/debug flags if they are not present in global cxx flags
opt_flag_found = any(f in self.compiler.opt_flags
for f in cxxflags)
debug_flag_found = any(f in self.compiler.debug_flags
for f in cxxflags)
if '+debug' in spec:
if not debug_flag_found:
cxxflags.append('-g')
if not opt_flag_found:
cxxflags.append('-O0')
else:
if not opt_flag_found:
cxxflags.append('-O2')
cxxflags = [(xcompiler + flag) for flag in cxxflags]
if '+cuda' in spec:
cxxflags += [
'-x=cu --expt-extended-lambda -arch=sm_%s' % cuda_arch,
'-ccbin %s' % (spec['mpi'].mpicxx if '+mpi' in spec
else env['CXX'])]
if self.spec.satisfies('@4.0.0:'):
cxxflags.append(self.compiler.cxx11_flag)
# The cxxflags are set by the spack c++ compiler wrapper. We also
# set CXXFLAGS explicitly, for clarity, and to properly export the
# cxxflags in the variable MFEM_CXXFLAGS in config.mk.
options += ['CXXFLAGS=%s' % ' '.join(cxxflags)]
if '~static' in spec:
options += ['STATIC=NO']
if '+shared' in spec:
options += [
'SHARED=YES',
'PICFLAG=%s' % (xcompiler + self.compiler.cxx_pic_flag)]
if '+mpi' in spec:
options += ['MPICXX=%s' % spec['mpi'].mpicxx]
hypre = spec['hypre']
# The hypre package always links with 'blas' and 'lapack'.
all_hypre_libs = hypre.libs + hypre['lapack'].libs + \
hypre['blas'].libs
options += [
'HYPRE_OPT=-I%s' % hypre.prefix.include,
'HYPRE_LIB=%s' % ld_flags_from_library_list(all_hypre_libs)]
if '+metis' in spec:
options += [
'METIS_OPT=-I%s' % spec['metis'].prefix.include,
'METIS_LIB=%s' %
ld_flags_from_library_list(spec['metis'].libs)]
if '+lapack' in spec:
lapack_blas = spec['lapack'].libs + spec['blas'].libs
options += [
# LAPACK_OPT is not used
'LAPACK_LIB=%s' % ld_flags_from_library_list(lapack_blas)]
if '+superlu-dist' in spec:
lapack_blas = spec['lapack'].libs + spec['blas'].libs
options += [
'SUPERLU_OPT=-I%s -I%s' %
(spec['superlu-dist'].prefix.include,
spec['parmetis'].prefix.include),
'SUPERLU_LIB=%s %s' %
(ld_flags_from_dirs([spec['superlu-dist'].prefix.lib,
spec['parmetis'].prefix.lib],
['superlu_dist', 'parmetis']),
ld_flags_from_library_list(lapack_blas))]
if '+strumpack' in spec:
strumpack = spec['strumpack']
sp_opt = ['-I%s' % strumpack.prefix.include]
sp_lib = [ld_flags_from_library_list(strumpack.libs)]
# Parts of STRUMPACK use fortran, so we need to link with the
# fortran library and also the MPI fortran library:
if '~shared' in strumpack:
if os.path.basename(env['FC']) == 'gfortran':
gfortran = Executable(env['FC'])
libext = 'dylib' if sys.platform == 'darwin' else 'so'
libfile = os.path.abspath(gfortran(
'-print-file-name=libgfortran.%s' % libext,
output=str).strip())
gfortran_lib = LibraryList(libfile)
sp_lib += [ld_flags_from_library_list(gfortran_lib)]
if ('^mpich' in strumpack) or ('^mvapich2' in strumpack):
sp_lib += ['-lmpifort']
elif '^openmpi' in strumpack:
sp_lib += ['-lmpi_mpifh']
elif '^spectrum-mpi' in strumpack:
sp_lib += ['-lmpi_ibm_mpifh']
if '+openmp' in strumpack:
# The '+openmp' in the spec means strumpack will TRY to find
# OpenMP; if not found, we should not add any flags -- how do
# we figure out if strumpack found OpenMP?
if not self.spec.satisfies('%apple-clang'):
sp_opt += [xcompiler + self.compiler.openmp_flag]
if '^parmetis' in strumpack:
parmetis = strumpack['parmetis']
sp_opt += [parmetis.headers.cpp_flags]
sp_lib += [ld_flags_from_library_list(parmetis.libs)]
if '^netlib-scalapack' in strumpack:
scalapack = strumpack['scalapack']
sp_opt += ['-I%s' % scalapack.prefix.include]
sp_lib += [ld_flags_from_dirs([scalapack.prefix.lib],
['scalapack'])]
elif '^scalapack' in strumpack:
scalapack = strumpack['scalapack']
sp_opt += [scalapack.headers.cpp_flags]
sp_lib += [ld_flags_from_library_list(scalapack.libs)]
if '+butterflypack' in strumpack:
bp = strumpack['butterflypack']
sp_opt += ['-I%s' % bp.prefix.include]
sp_lib += [ld_flags_from_dirs([bp.prefix.lib],
['dbutterflypack',
'zbutterflypack'])]
if '+zfp' in strumpack:
zfp = strumpack['zfp']
sp_opt += ['-I%s' % zfp.prefix.include]
sp_lib += [ld_flags_from_dirs([zfp.prefix.lib], ['zfp'])]
if '+cuda' in strumpack:
# assuming also ('+cuda' in spec)
sp_lib += ['-lcusolver', '-lcublas']
options += [
'STRUMPACK_OPT=%s' % ' '.join(sp_opt),
'STRUMPACK_LIB=%s' % ' '.join(sp_lib)]
if '+suite-sparse' in spec:
ss_spec = 'suite-sparse:' + self.suitesparse_components
options += [
'SUITESPARSE_OPT=-I%s' % spec[ss_spec].prefix.include,
'SUITESPARSE_LIB=%s' %
ld_flags_from_library_list(spec[ss_spec].libs)]
if '+sundials' in spec:
sun_spec = 'sundials:' + self.sundials_components
options += [
'SUNDIALS_OPT=%s' % spec[sun_spec].headers.cpp_flags,
'SUNDIALS_LIB=%s' %
ld_flags_from_library_list(spec[sun_spec].libs)]
if '+petsc' in spec:
petsc = spec['petsc']
if '+shared' in petsc:
options += [
'PETSC_OPT=%s' % petsc.headers.cpp_flags,
'PETSC_LIB=%s' % ld_flags_from_library_list(petsc.libs)]
else:
options += ['PETSC_DIR=%s' % petsc.prefix]
if '+slepc' in spec:
slepc = spec['slepc']
options += [
'SLEPC_OPT=%s' % slepc.headers.cpp_flags,
'SLEPC_LIB=%s' % ld_flags_from_library_list(slepc.libs)]
if '+pumi' in spec:
pumi_libs = ['pumi', 'crv', 'ma', 'mds', 'apf', 'pcu', 'gmi',
'parma', 'lion', 'mth', 'apf_zoltan', 'spr']
options += [
'PUMI_OPT=-I%s' % spec['pumi'].prefix.include,
'PUMI_LIB=%s' %
ld_flags_from_dirs([spec['pumi'].prefix.lib], pumi_libs)]
if '+gslib' in spec:
options += [
'GSLIB_OPT=-I%s' % spec['gslib'].prefix.include,
'GSLIB_LIB=%s' %
ld_flags_from_dirs([spec['gslib'].prefix.lib], ['gs'])]
if '+netcdf' in spec:
lib_flags = ld_flags_from_dirs([spec['netcdf-c'].prefix.lib],
['netcdf'])
hdf5 = spec['hdf5:hl']
if hdf5.satisfies('~shared'):
hdf5_libs = hdf5.libs
hdf5_libs += LibraryList(find_system_libraries('libdl'))
lib_flags += " " + ld_flags_from_library_list(hdf5_libs)
options += [
'NETCDF_OPT=-I%s' % spec['netcdf-c'].prefix.include,
'NETCDF_LIB=%s' % lib_flags]
if '+zlib' in spec:
if "@:3.3.2" in spec:
options += ['ZLIB_DIR=%s' % spec['zlib'].prefix]
else:
options += [
'ZLIB_OPT=-I%s' % spec['zlib'].prefix.include,
'ZLIB_LIB=%s' %
ld_flags_from_library_list(spec['zlib'].libs)]
if '+mpfr' in spec:
options += [
'MPFR_OPT=-I%s' % spec['mpfr'].prefix.include,
'MPFR_LIB=%s' %
ld_flags_from_dirs([spec['mpfr'].prefix.lib], ['mpfr'])]
if '+gnutls' in spec:
options += [
'GNUTLS_OPT=-I%s' % spec['gnutls'].prefix.include,
'GNUTLS_LIB=%s' %
ld_flags_from_dirs([spec['gnutls'].prefix.lib], ['gnutls'])]
if '+libunwind' in spec:
libunwind = spec['unwind']
headers = find_headers('libunwind', libunwind.prefix.include)
headers.add_macro('-g')
libs = find_optional_library('libunwind', libunwind.prefix)
# When mfem uses libunwind, it also needs 'libdl'.
libs += LibraryList(find_system_libraries('libdl'))
options += [
'LIBUNWIND_OPT=%s' % headers.cpp_flags,
'LIBUNWIND_LIB=%s' % ld_flags_from_library_list(libs)]
if '+openmp' in spec:
options += [
'OPENMP_OPT=%s' % (xcompiler + self.compiler.openmp_flag)]
if '+cuda' in spec:
options += [
'CUDA_CXX=%s' % join_path(spec['cuda'].prefix, 'bin', 'nvcc'),
'CUDA_ARCH=sm_%s' % cuda_arch]
if '+rocm' in spec:
amdgpu_target = ','.join(spec.variants['amdgpu_target'].value)
options += [
'HIP_CXX=%s' % spec['hip'].hipcc,
'HIP_ARCH=%s' % amdgpu_target]
if '+occa' in spec:
options += ['OCCA_OPT=-I%s' % spec['occa'].prefix.include,
'OCCA_LIB=%s' %
ld_flags_from_dirs([spec['occa'].prefix.lib],
['occa'])]
if '+raja' in spec:
options += ['RAJA_OPT=-I%s' % spec['raja'].prefix.include,
'RAJA_LIB=%s' %
ld_flags_from_dirs([spec['raja'].prefix.lib],
['RAJA'])]
if '+amgx' in spec:
amgx = spec['amgx']
if '+shared' in amgx:
options += ['AMGX_OPT=-I%s' % amgx.prefix.include,
'AMGX_LIB=%s' %
ld_flags_from_library_list(amgx.libs)]
else:
options += ['AMGX_DIR=%s' % amgx.prefix]
if '+libceed' in spec:
options += ['CEED_OPT=-I%s' % spec['libceed'].prefix.include,
'CEED_LIB=%s' %
ld_flags_from_dirs([spec['libceed'].prefix.lib],
['ceed'])]
if '+umpire' in spec:
options += ['UMPIRE_OPT=-I%s' % spec['umpire'].prefix.include,
'UMPIRE_LIB=%s' %
ld_flags_from_library_list(spec['umpire'].libs)]
timer_ids = {'std': '0', 'posix': '2', 'mac': '4', 'mpi': '6'}
timer = spec.variants['timer'].value
if timer != 'auto':
options += ['MFEM_TIMER_TYPE=%s' % timer_ids[timer]]
if '+conduit' in spec:
conduit = spec['conduit']
headers = HeaderList(find(conduit.prefix.include, 'conduit.hpp',
recursive=True))
conduit_libs = ['libconduit', 'libconduit_relay',
'libconduit_blueprint']
libs = find_libraries(conduit_libs, conduit.prefix.lib,
shared=('+shared' in conduit))
libs += LibraryList(find_system_libraries('libdl'))
if '+hdf5' in conduit:
hdf5 = conduit['hdf5']
headers += find_headers('hdf5', hdf5.prefix.include)
libs += hdf5.libs
##################
# cyrush note:
##################
# spack's HeaderList is applying too much magic, undermining us:
#
# It applies a regex to strip back to the last "include" dir
# in the path. In our case we need to pass the following
# as part of the CONDUIT_OPT flags:
#
# -I<install_path>/include/conduit
#
# I tried several ways to present this path to the HeaderList,
# but the regex always kills the trailing conduit dir
# breaking build.
#
# To resolve the issue, we simply join our own string with
# the headers results (which are important b/c they handle
# hdf5 paths when enabled).
##################
# construct proper include path
conduit_include_path = conduit.prefix.include.conduit
# add this path to the found flags
conduit_opt_flags = "-I{0} {1}".format(conduit_include_path,
headers.cpp_flags)
options += [
'CONDUIT_OPT=%s' % conduit_opt_flags,
'CONDUIT_LIB=%s' % ld_flags_from_library_list(libs)]
make('config', *options, parallel=False)
make('info', parallel=False)
def build(self, spec, prefix):
make('lib')
@run_after('build')
def check_or_test(self):
# Running 'make check' or 'make test' may fail if MFEM_MPIEXEC or
# MFEM_MPIEXEC_NP are not set appropriately.
if not self.run_tests:
# check we can build ex1 (~mpi) or ex1p (+mpi).
make('-C', 'examples', 'ex1p' if ('+mpi' in self.spec) else 'ex1',
parallel=False)
# make('check', parallel=False)
else:
make('all')
make('test', parallel=False)
def install(self, spec, prefix):
make('install', parallel=False)
# TODO: The way the examples and miniapps are being installed is not
# perfect. For example, the makefiles do not work.
install_em = ('+examples' in spec) or ('+miniapps' in spec)
if install_em and ('+shared' in spec):
make('examples/clean', 'miniapps/clean')
# This is a hack to get the examples and miniapps to link with the
# installed shared mfem library:
with working_dir('config'):
os.rename('config.mk', 'config.mk.orig')
copy(str(self.config_mk), 'config.mk')
shutil.copystat('config.mk.orig', 'config.mk')
prefix_share = join_path(prefix, 'share', 'mfem')
if '+examples' in spec:
make('examples')
install_tree('examples', join_path(prefix_share, 'examples'))
if '+miniapps' in spec:
make('miniapps')
install_tree('miniapps', join_path(prefix_share, 'miniapps'))
if install_em:
install_tree('data', join_path(prefix_share, 'data'))
examples_src_dir = 'examples'
examples_data_dir = 'data'
@run_after('install')
def cache_test_sources(self):
"""Copy the example source files after the package is installed to an
install test subdirectory for use during `spack test run`."""
self.cache_extra_test_sources([self.examples_src_dir,
self.examples_data_dir])
def test(self):
test_dir = join_path(
self.test_suite.current_test_cache_dir,
self.examples_src_dir
)
# MFEM has many examples to serve as a suitable smoke check. ex10
# was chosen arbitrarily among the examples that work both with
# MPI and without it
test_exe = 'ex10p' if ('+mpi' in self.spec) else 'ex10'
self.run_test(
'make',
[
'CONFIG_MK={0}/share/mfem/config.mk'.format(self.prefix),
test_exe,
'parallel=False'
],
purpose='test: building {0}'.format(test_exe),
skip_missing=False,
work_dir=test_dir,
)
self.run_test(
'./{0}'.format(test_exe),
[
'--mesh',
'../{0}/beam-quad.mesh'.format(self.examples_data_dir)
],
[],
installed=False,
purpose='test: running {0}'.format(test_exe),
skip_missing=False,
work_dir=test_dir,
)
# this patch is only needed for mfem 4.1, where a few
# released files include byte order marks
@when('@4.1.0')
def patch(self):
# Remove the byte order mark since it messes with some compilers
files_with_bom = [
'fem/gslib.hpp', 'fem/gslib.cpp', 'linalg/hiop.hpp',
'miniapps/gslib/field-diff.cpp', 'miniapps/gslib/findpts.cpp',
'miniapps/gslib/pfindpts.cpp']
bom = '\xef\xbb\xbf' if sys.version_info < (3,) else u'\ufeff'
for f in files_with_bom:
filter_file(bom, '', f)
@property
def suitesparse_components(self):
"""Return the SuiteSparse components needed by MFEM."""
ss_comps = 'umfpack,cholmod,colamd,amd,camd,ccolamd,suitesparseconfig'
if self.spec.satisfies('@3.2:'):
ss_comps = 'klu,btf,' + ss_comps
return ss_comps
@property
def sundials_components(self):
"""Return the SUNDIALS components needed by MFEM."""
spec = self.spec
sun_comps = 'arkode,cvodes,nvecserial,kinsol'
if '+mpi' in spec:
if spec.satisfies('@4.2:'):
sun_comps += ',nvecparallel,nvecmpiplusx'
else:
sun_comps += ',nvecparhyp,nvecparallel'
if '+cuda' in spec and '+cuda' in spec['sundials']:
sun_comps += ',nveccuda'
return sun_comps
@property
def headers(self):
"""Export the main mfem header, mfem.hpp.
"""
hdrs = HeaderList(find(self.prefix.include, 'mfem.hpp',
recursive=False))
return hdrs or None
@property
def libs(self):
"""Export the mfem library file.
"""
libs = find_libraries('libmfem', root=self.prefix.lib,
shared=('+shared' in self.spec), recursive=False)
return libs or None
@property
def config_mk(self):
"""Export the location of the config.mk file.
This property can be accessed using spec['mfem'].package.config_mk
"""
dirs = [self.prefix, self.prefix.share.mfem]
for d in dirs:
f = join_path(d, 'config.mk')
if os.access(f, os.R_OK):
return FileList(f)
return FileList(find(self.prefix, 'config.mk', recursive=True))
@property
def test_mk(self):
"""Export the location of the test.mk file.
This property can be accessed using spec['mfem'].package.test_mk.
In version 3.3.2 and newer, the location of test.mk is also defined
inside config.mk, variable MFEM_TEST_MK.
"""
dirs = [self.prefix, self.prefix.share.mfem]
for d in dirs:
f = join_path(d, 'test.mk')
if os.access(f, os.R_OK):
return FileList(f)
return FileList(find(self.prefix, 'test.mk', recursive=True))
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
questions/59103840/synonymous_project/synonymous_project/wsgi.py
|
"""
WSGI config for synonymous_project project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'synonymous_project.settings')
application = get_wsgi_application()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
python/scorecard/SurgeonScorecard.py
|
import os
import pandas as pd
from pyspark import SparkContext, SparkConf
from pyspark.sql import SQLContext
from pyspark.sql.types import *
import Config
import Utils
import Cohort
import Model
import Readmission
#
# http://www.nodalpoint.com/spark-dataframes-from-csv-files/
#
def main():
# read properties files
cfg = Config.Config()
config = cfg.read_config(['local.properties', 'environ.properties'])
# validate the properties
cfg.validateProperties(config)
# get the current branch (from local.properties)
env = config.get('branch','env')
datadir = config.get(env+'.data','datadir')
resultdir = config.get(env+'.readmission','resultdir')
driver_memory = config.get(env+'.spark','driver_memory')
shuffle_partitions = config.get(env+'.spark','shuffle_paritions')
# Spark Configurations
pyspark_submit_args = ' --driver-memory ' + driver_memory + ' pyspark-shell'
os.environ["PYSPARK_SUBMIT_ARGS"] = pyspark_submit_args
conf = SparkConf()
conf.set("spark.master", "local[*]")
conf = conf.setAppName('Surgeon Scorecard')
sc = SparkContext(conf=conf)
sqlContext = SQLContext(sc)
sqlString = "set spark.sql.shuffle.partitions=" + shuffle_partitions
sqlContext.sql(sqlString)
# load utils
util = Utils.Utils(sqlContext)
# read in the data
data = util.loadRawData(sc, datadir)
# calculate some statistics related to primary diagnosis code
# write a file with counts of all condition_occurrence CONDITION_TYPE_CONCEPT_ID
df = util.conditionTypeConceptCount(sqlContext)
util.saveDataframeAsSingleFile(df,resultdir,"condition_type_concept_count.csv")
# write a file with counts of all procedure_occurrence PROCEDURE_TYPE_CONCEPT_ID
df = util.procedureTypeConceptCount(sqlContext)
util.saveDataframeAsSingleFile(df,resultdir,"procedure_type_concept_count.csv")
# create a cohort of users and their associated data
# The cohort and event data is filtered based on properties
cohort = Cohort.Cohort(data, config, sqlContext)
print("Number of patients in cohort: " + str(data['person'].count()))
# Find readmission events for this cohort for procedures of interest
readmit = Readmission.Readmission(data, config, sc, sqlContext)
# write the results to csv files
for key, value in readmit.providerProcedureInfoDfs.items():
print("Writing provider data for: " + key)
filename = key + ".csv"
util.saveDataframeAsSingleFile(value, resultdir, filename)
# calculate statistics related to codes
# write a file with counts of all diagnostic icd codes
readmit.writeCodesAndCount(sqlContext, readmit.diagnostic_codes, resultdir, 'procedure_count_all.txt', False)
# write a file with counts of all diagnostic icd codes where the code is primary
readmit.writeCodesAndCount(sqlContext, readmit.diagnostic_codes, resultdir, 'procedure_count_primary.txt', True)
# write a file with counts of all readmission icd codes
readmit.writeCodesAndCount(sqlContext, readmit.readmission_codes, resultdir, 'readmission_count_all.txt', False)
# write a file with counts of all readmission icd codes where the code is primary
readmit.writeCodesAndCount(sqlContext, readmit.readmission_codes, resultdir, 'readmission_count_primary.txt', True)
# write a file with counts of readmission events by code
readmit.writeReadmissionCodesAndCount(sqlContext,
readmit.readmission_codes,
readmit.readmissionDfs,
resultdir,
'readmission_event_code_count.txt')
if __name__ == "__main__":
main()
|
[] |
[] |
[
"PYSPARK_SUBMIT_ARGS"
] |
[]
|
["PYSPARK_SUBMIT_ARGS"]
|
python
| 1 | 0 | |
pkg/commands/oscommands/os.go
|
package oscommands
import (
"bufio"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"sync"
"github.com/go-errors/errors"
"github.com/atotto/clipboard"
"github.com/jesseduffield/lazygit/pkg/config"
"github.com/jesseduffield/lazygit/pkg/secureexec"
"github.com/jesseduffield/lazygit/pkg/utils"
"github.com/mgutz/str"
"github.com/sirupsen/logrus"
)
// Platform stores the os state
type Platform struct {
OS string
CatCmd []string
Shell string
ShellArg string
EscapedQuote string
OpenCommand string
OpenLinkCommand string
}
// OSCommand holds all the os commands
type OSCommand struct {
Log *logrus.Entry
Platform *Platform
Config config.AppConfigurer
Command func(string, ...string) *exec.Cmd
BeforeExecuteCmd func(*exec.Cmd)
Getenv func(string) string
}
// NewOSCommand os command runner
func NewOSCommand(log *logrus.Entry, config config.AppConfigurer) *OSCommand {
return &OSCommand{
Log: log,
Platform: getPlatform(),
Config: config,
Command: secureexec.Command,
BeforeExecuteCmd: func(*exec.Cmd) {},
Getenv: os.Getenv,
}
}
// SetCommand sets the command function used by the struct.
// To be used for testing only
func (c *OSCommand) SetCommand(cmd func(string, ...string) *exec.Cmd) {
c.Command = cmd
}
func (c *OSCommand) SetBeforeExecuteCmd(cmd func(*exec.Cmd)) {
c.BeforeExecuteCmd = cmd
}
type RunCommandOptions struct {
EnvVars []string
}
func (c *OSCommand) RunCommandWithOutputWithOptions(command string, options RunCommandOptions) (string, error) {
c.Log.WithField("command", command).Info("RunCommand")
cmd := c.ExecutableFromString(command)
cmd.Env = append(cmd.Env, "GIT_TERMINAL_PROMPT=0") // prevents git from prompting us for input which would freeze the program
cmd.Env = append(cmd.Env, options.EnvVars...)
return sanitisedCommandOutput(cmd.CombinedOutput())
}
func (c *OSCommand) RunCommandWithOptions(command string, options RunCommandOptions) error {
_, err := c.RunCommandWithOutputWithOptions(command, options)
return err
}
// RunCommandWithOutput wrapper around commands returning their output and error
// NOTE: If you don't pass any formatArgs we'll just use the command directly,
// however there's a bizarre compiler error/warning when you pass in a formatString
// with a percent sign because it thinks it's supposed to be a formatString when
// in that case it's not. To get around that error you'll need to define the string
// in a variable and pass the variable into RunCommandWithOutput.
func (c *OSCommand) RunCommandWithOutput(formatString string, formatArgs ...interface{}) (string, error) {
command := formatString
if formatArgs != nil {
command = fmt.Sprintf(formatString, formatArgs...)
}
c.Log.WithField("command", command).Info("RunCommand")
cmd := c.ExecutableFromString(command)
output, err := sanitisedCommandOutput(cmd.CombinedOutput())
if err != nil {
c.Log.WithField("command", command).Error(output)
}
return output, err
}
func (c *OSCommand) CatFile(filename string) (string, error) {
arr := append(c.Platform.CatCmd, filename)
cmdStr := strings.Join(arr, " ")
c.Log.WithField("command", cmdStr).Info("Cat")
cmd := c.Command(arr[0], arr[1:]...)
output, err := sanitisedCommandOutput(cmd.CombinedOutput())
if err != nil {
c.Log.WithField("command", cmdStr).Error(output)
}
return output, err
}
// RunExecutableWithOutput runs an executable file and returns its output
func (c *OSCommand) RunExecutableWithOutput(cmd *exec.Cmd) (string, error) {
c.BeforeExecuteCmd(cmd)
return sanitisedCommandOutput(cmd.CombinedOutput())
}
// RunExecutable runs an executable file and returns an error if there was one
func (c *OSCommand) RunExecutable(cmd *exec.Cmd) error {
_, err := c.RunExecutableWithOutput(cmd)
return err
}
// ExecutableFromString takes a string like `git status` and returns an executable command for it
func (c *OSCommand) ExecutableFromString(commandStr string) *exec.Cmd {
splitCmd := str.ToArgv(commandStr)
cmd := c.Command(splitCmd[0], splitCmd[1:]...)
cmd.Env = append(os.Environ(), "GIT_OPTIONAL_LOCKS=0")
return cmd
}
// ShellCommandFromString takes a string like `git commit` and returns an executable shell command for it
func (c *OSCommand) ShellCommandFromString(commandStr string) *exec.Cmd {
quotedCommand := ""
// Windows does not seem to like quotes around the command
if c.Platform.OS == "windows" {
quotedCommand = commandStr
} else {
quotedCommand = c.Quote(commandStr)
}
shellCommand := fmt.Sprintf("%s %s %s", c.Platform.Shell, c.Platform.ShellArg, quotedCommand)
return c.ExecutableFromString(shellCommand)
}
// RunCommandWithOutputLive runs RunCommandWithOutputLiveWrapper
func (c *OSCommand) RunCommandWithOutputLive(command string, output func(string) string) error {
return RunCommandWithOutputLiveWrapper(c, command, output)
}
// DetectUnamePass detect a username / password / passphrase question in a command
// promptUserForCredential is a function that gets executed when this function detect you need to fillin a password or passphrase
// The promptUserForCredential argument will be "username", "password" or "passphrase" and expects the user's password/passphrase or username back
func (c *OSCommand) DetectUnamePass(command string, promptUserForCredential func(string) string) error {
ttyText := ""
errMessage := c.RunCommandWithOutputLive(command, func(word string) string {
ttyText = ttyText + " " + word
prompts := map[string]string{
`.+'s password:`: "password",
`Password\s*for\s*'.+':`: "password",
`Username\s*for\s*'.+':`: "username",
`Enter\s*passphrase\s*for\s*key\s*'.+':`: "passphrase",
}
for pattern, askFor := range prompts {
if match, _ := regexp.MatchString(pattern, ttyText); match {
ttyText = ""
return promptUserForCredential(askFor)
}
}
return ""
})
return errMessage
}
// RunCommand runs a command and just returns the error
func (c *OSCommand) RunCommand(formatString string, formatArgs ...interface{}) error {
_, err := c.RunCommandWithOutput(formatString, formatArgs...)
return err
}
// RunShellCommand runs shell commands i.e. 'sh -c <command>'. Good for when you
// need access to the shell
func (c *OSCommand) RunShellCommand(command string) error {
c.Log.WithField("command", command).Info("RunShellCommand")
cmd := c.Command(c.Platform.Shell, c.Platform.ShellArg, command)
_, err := sanitisedCommandOutput(cmd.CombinedOutput())
return err
}
// FileType tells us if the file is a file, directory or other
func (c *OSCommand) FileType(path string) string {
fileInfo, err := os.Stat(path)
if err != nil {
return "other"
}
if fileInfo.IsDir() {
return "directory"
}
return "file"
}
func sanitisedCommandOutput(output []byte, err error) (string, error) {
outputString := string(output)
if err != nil {
// errors like 'exit status 1' are not very useful so we'll create an error
// from the combined output
if outputString == "" {
return "", utils.WrapError(err)
}
return outputString, errors.New(outputString)
}
return outputString, nil
}
// OpenFile opens a file with the given
func (c *OSCommand) OpenFile(filename string) error {
commandTemplate := c.Config.GetUserConfig().OS.OpenCommand
templateValues := map[string]string{
"filename": c.Quote(filename),
}
command := utils.ResolvePlaceholderString(commandTemplate, templateValues)
err := c.RunCommand(command)
return err
}
// OpenLink opens a file with the given
func (c *OSCommand) OpenLink(link string) error {
commandTemplate := c.Config.GetUserConfig().OS.OpenLinkCommand
templateValues := map[string]string{
"link": c.Quote(link),
}
command := utils.ResolvePlaceholderString(commandTemplate, templateValues)
err := c.RunCommand(command)
return err
}
// PrepareSubProcess iniPrepareSubProcessrocess then tells the Gui to switch to it
// TODO: see if this needs to exist, given that ExecutableFromString does the same things
func (c *OSCommand) PrepareSubProcess(cmdName string, commandArgs ...string) *exec.Cmd {
cmd := c.Command(cmdName, commandArgs...)
if cmd != nil {
cmd.Env = append(os.Environ(), "GIT_OPTIONAL_LOCKS=0")
}
return cmd
}
// PrepareShellSubProcess returns the pointer to a custom command
func (c *OSCommand) PrepareShellSubProcess(command string) *exec.Cmd {
return c.PrepareSubProcess(c.Platform.Shell, c.Platform.ShellArg, command)
}
// Quote wraps a message in platform-specific quotation marks
func (c *OSCommand) Quote(message string) string {
if c.Platform.OS == "windows" {
message = strings.Replace(message, `"`, `"'"'"`, -1)
message = strings.Replace(message, `\"`, `\\"`, -1)
} else {
message = strings.Replace(message, `\`, `\\`, -1)
message = strings.Replace(message, `"`, `\"`, -1)
message = strings.Replace(message, "`", "\\`", -1)
message = strings.Replace(message, "$", "\\$", -1)
}
escapedQuote := c.Platform.EscapedQuote
return escapedQuote + message + escapedQuote
}
// AppendLineToFile adds a new line in file
func (c *OSCommand) AppendLineToFile(filename, line string) error {
f, err := os.OpenFile(filename, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600)
if err != nil {
return utils.WrapError(err)
}
defer f.Close()
_, err = f.WriteString("\n" + line)
if err != nil {
return utils.WrapError(err)
}
return nil
}
// CreateTempFile writes a string to a new temp file and returns the file's name
func (c *OSCommand) CreateTempFile(filename, content string) (string, error) {
tmpfile, err := ioutil.TempFile("", filename)
if err != nil {
c.Log.Error(err)
return "", utils.WrapError(err)
}
if _, err := tmpfile.WriteString(content); err != nil {
c.Log.Error(err)
return "", utils.WrapError(err)
}
if err := tmpfile.Close(); err != nil {
c.Log.Error(err)
return "", utils.WrapError(err)
}
return tmpfile.Name(), nil
}
// CreateFileWithContent creates a file with the given content
func (c *OSCommand) CreateFileWithContent(path string, content string) error {
if err := os.MkdirAll(filepath.Dir(path), os.ModePerm); err != nil {
c.Log.Error(err)
return err
}
if err := ioutil.WriteFile(path, []byte(content), 0644); err != nil {
c.Log.Error(err)
return utils.WrapError(err)
}
return nil
}
// Remove removes a file or directory at the specified path
func (c *OSCommand) Remove(filename string) error {
err := os.RemoveAll(filename)
return utils.WrapError(err)
}
// FileExists checks whether a file exists at the specified path
func (c *OSCommand) FileExists(path string) (bool, error) {
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
return true, nil
}
// RunPreparedCommand takes a pointer to an exec.Cmd and runs it
// this is useful if you need to give your command some environment variables
// before running it
func (c *OSCommand) RunPreparedCommand(cmd *exec.Cmd) error {
c.BeforeExecuteCmd(cmd)
out, err := cmd.CombinedOutput()
outString := string(out)
c.Log.Info(outString)
if err != nil {
if len(outString) == 0 {
return err
}
return errors.New(outString)
}
return nil
}
// GetLazygitPath returns the path of the currently executed file
func (c *OSCommand) GetLazygitPath() string {
ex, err := os.Executable() // get the executable path for git to use
if err != nil {
ex = os.Args[0] // fallback to the first call argument if needed
}
return `"` + filepath.ToSlash(ex) + `"`
}
// PipeCommands runs a heap of commands and pipes their inputs/outputs together like A | B | C
func (c *OSCommand) PipeCommands(commandStrings ...string) error {
cmds := make([]*exec.Cmd, len(commandStrings))
for i, str := range commandStrings {
cmds[i] = c.ExecutableFromString(str)
}
for i := 0; i < len(cmds)-1; i++ {
stdout, err := cmds[i].StdoutPipe()
if err != nil {
return err
}
cmds[i+1].Stdin = stdout
}
// keeping this here in case I adapt this code for some other purpose in the future
// cmds[len(cmds)-1].Stdout = os.Stdout
finalErrors := []string{}
wg := sync.WaitGroup{}
wg.Add(len(cmds))
for _, cmd := range cmds {
currentCmd := cmd
go utils.Safe(func() {
stderr, err := currentCmd.StderrPipe()
if err != nil {
c.Log.Error(err)
}
if err := currentCmd.Start(); err != nil {
c.Log.Error(err)
}
if b, err := ioutil.ReadAll(stderr); err == nil {
if len(b) > 0 {
finalErrors = append(finalErrors, string(b))
}
}
if err := currentCmd.Wait(); err != nil {
c.Log.Error(err)
}
wg.Done()
})
}
wg.Wait()
if len(finalErrors) > 0 {
return errors.New(strings.Join(finalErrors, "\n"))
}
return nil
}
func Kill(cmd *exec.Cmd) error {
if cmd.Process == nil {
// somebody got to it before we were able to, poor bastard
return nil
}
return cmd.Process.Kill()
}
func RunLineOutputCmd(cmd *exec.Cmd, onLine func(line string) (bool, error)) error {
stdoutPipe, err := cmd.StdoutPipe()
if err != nil {
return err
}
scanner := bufio.NewScanner(stdoutPipe)
scanner.Split(bufio.ScanLines)
if err := cmd.Start(); err != nil {
return err
}
for scanner.Scan() {
line := scanner.Text()
stop, err := onLine(line)
if err != nil {
return err
}
if stop {
_ = cmd.Process.Kill()
break
}
}
_ = cmd.Wait()
return nil
}
func (c *OSCommand) CopyToClipboard(str string) error {
return clipboard.WriteAll(str)
}
|
[] |
[] |
[] |
[]
|
[]
|
go
| 0 | 0 | |
djmodels/db/backends/oracle/base.py
|
"""
Oracle database backend for Django.
Requires cx_Oracle: http://cx-oracle.sourceforge.net/
"""
import datetime
import decimal
import os
import platform
from djmodels.conf import settings
from djmodels.core.exceptions import ImproperlyConfigured
from djmodels.db import utils
from djmodels.db.backends.base.base import BaseDatabaseWrapper
from djmodels.utils.encoding import force_bytes, force_text
from djmodels.utils.functional import cached_property
def _setup_environment(environ):
# Cygwin requires some special voodoo to set the environment variables
# properly so that Oracle will see them.
if platform.system().upper().startswith('CYGWIN'):
try:
import ctypes
except ImportError as e:
raise ImproperlyConfigured("Error loading ctypes: %s; "
"the Oracle backend requires ctypes to "
"operate correctly under Cygwin." % e)
kernel32 = ctypes.CDLL('kernel32')
for name, value in environ:
kernel32.SetEnvironmentVariableA(name, value)
else:
os.environ.update(environ)
_setup_environment([
# Oracle takes client-side character set encoding from the environment.
('NLS_LANG', '.AL32UTF8'),
# This prevents unicode from getting mangled by getting encoded into the
# potentially non-unicode database character set.
('ORA_NCHAR_LITERAL_REPLACE', 'TRUE'),
])
try:
import cx_Oracle as Database
except ImportError as e:
raise ImproperlyConfigured("Error loading cx_Oracle module: %s" % e)
# Some of these import cx_Oracle, so import them after checking if it's installed.
from .client import DatabaseClient # NOQA isort:skip
from .creation import DatabaseCreation # NOQA isort:skip
from .features import DatabaseFeatures # NOQA isort:skip
from .introspection import DatabaseIntrospection # NOQA isort:skip
from .operations import DatabaseOperations # NOQA isort:skip
from .schema import DatabaseSchemaEditor # NOQA isort:skip
from .utils import Oracle_datetime # NOQA isort:skip
from .validation import DatabaseValidation # NOQA isort:skip
class _UninitializedOperatorsDescriptor:
def __get__(self, instance, cls=None):
# If connection.operators is looked up before a connection has been
# created, transparently initialize connection.operators to avert an
# AttributeError.
if instance is None:
raise AttributeError("operators not available as class attribute")
# Creating a cursor will initialize the operators.
instance.cursor().close()
return instance.__dict__['operators']
class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'oracle'
display_name = 'Oracle'
# This dictionary maps Field objects to their associated Oracle column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
#
# Any format strings starting with "qn_" are quoted before being used in the
# output (the "qn_" prefix is stripped before the lookup is performed.
data_types = {
'AutoField': 'NUMBER(11) GENERATED BY DEFAULT ON NULL AS IDENTITY',
'BigAutoField': 'NUMBER(19) GENERATED BY DEFAULT ON NULL AS IDENTITY',
'BinaryField': 'BLOB',
'BooleanField': 'NUMBER(1)',
'CharField': 'NVARCHAR2(%(max_length)s)',
'DateField': 'DATE',
'DateTimeField': 'TIMESTAMP',
'DecimalField': 'NUMBER(%(max_digits)s, %(decimal_places)s)',
'DurationField': 'INTERVAL DAY(9) TO SECOND(6)',
'FileField': 'NVARCHAR2(%(max_length)s)',
'FilePathField': 'NVARCHAR2(%(max_length)s)',
'FloatField': 'DOUBLE PRECISION',
'IntegerField': 'NUMBER(11)',
'BigIntegerField': 'NUMBER(19)',
'IPAddressField': 'VARCHAR2(15)',
'GenericIPAddressField': 'VARCHAR2(39)',
'NullBooleanField': 'NUMBER(1)',
'OneToOneField': 'NUMBER(11)',
'PositiveIntegerField': 'NUMBER(11)',
'PositiveSmallIntegerField': 'NUMBER(11)',
'SlugField': 'NVARCHAR2(%(max_length)s)',
'SmallIntegerField': 'NUMBER(11)',
'TextField': 'NCLOB',
'TimeField': 'TIMESTAMP',
'URLField': 'VARCHAR2(%(max_length)s)',
'UUIDField': 'VARCHAR2(32)',
}
data_type_check_constraints = {
'BooleanField': '%(qn_column)s IN (0,1)',
'NullBooleanField': '%(qn_column)s IN (0,1)',
'PositiveIntegerField': '%(qn_column)s >= 0',
'PositiveSmallIntegerField': '%(qn_column)s >= 0',
}
# Oracle doesn't support a database index on these columns.
_limited_data_types = ('clob', 'nclob', 'blob')
operators = _UninitializedOperatorsDescriptor()
_standard_operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'icontains': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'endswith': "LIKE TRANSLATE(%s USING NCHAR_CS) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'istartswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
'iendswith': "LIKE UPPER(TRANSLATE(%s USING NCHAR_CS)) ESCAPE TRANSLATE('\\' USING NCHAR_CS)",
}
_likec_operators = {
**_standard_operators,
'contains': "LIKEC %s ESCAPE '\\'",
'icontains': "LIKEC UPPER(%s) ESCAPE '\\'",
'startswith': "LIKEC %s ESCAPE '\\'",
'endswith': "LIKEC %s ESCAPE '\\'",
'istartswith': "LIKEC UPPER(%s) ESCAPE '\\'",
'iendswith': "LIKEC UPPER(%s) ESCAPE '\\'",
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an expression
# or the result of a bilateral transformation).
# In those cases, special characters for LIKE operators (e.g. \, *, _) should be
# escaped on database side.
#
# Note: we use str.format() here for readability as '%' is used as a wildcard for
# the LIKE operator.
pattern_esc = r"REPLACE(REPLACE(REPLACE({}, '\', '\\'), '%%', '\%%'), '_', '\_')"
_pattern_ops = {
'contains': "'%%' || {} || '%%'",
'icontains': "'%%' || UPPER({}) || '%%'",
'startswith': "{} || '%%'",
'istartswith': "UPPER({}) || '%%'",
'endswith': "'%%' || {}",
'iendswith': "'%%' || UPPER({})",
}
_standard_pattern_ops = {k: "LIKE TRANSLATE( " + v + " USING NCHAR_CS)"
" ESCAPE TRANSLATE('\\' USING NCHAR_CS)"
for k, v in _pattern_ops.items()}
_likec_pattern_ops = {k: "LIKEC " + v + " ESCAPE '\\'"
for k, v in _pattern_ops.items()}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
# Classes instantiated in __init__().
client_class = DatabaseClient
creation_class = DatabaseCreation
features_class = DatabaseFeatures
introspection_class = DatabaseIntrospection
ops_class = DatabaseOperations
validation_class = DatabaseValidation
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
use_returning_into = self.settings_dict["OPTIONS"].get('use_returning_into', True)
self.features.can_return_id_from_insert = use_returning_into
def _dsn(self):
settings_dict = self.settings_dict
if not settings_dict['HOST'].strip():
settings_dict['HOST'] = 'localhost'
if settings_dict['PORT']:
return Database.makedsn(settings_dict['HOST'], int(settings_dict['PORT']), settings_dict['NAME'])
return settings_dict['NAME']
def _connect_string(self):
return '%s/\\"%s\\"@%s' % (self.settings_dict['USER'], self.settings_dict['PASSWORD'], self._dsn())
def get_connection_params(self):
conn_params = self.settings_dict['OPTIONS'].copy()
if 'use_returning_into' in conn_params:
del conn_params['use_returning_into']
return conn_params
def get_new_connection(self, conn_params):
return Database.connect(
user=self.settings_dict['USER'],
password=self.settings_dict['PASSWORD'],
dsn=self._dsn(),
**conn_params,
)
def init_connection_state(self):
cursor = self.create_cursor()
# Set the territory first. The territory overrides NLS_DATE_FORMAT
# and NLS_TIMESTAMP_FORMAT to the territory default. When all of
# these are set in single statement it isn't clear what is supposed
# to happen.
cursor.execute("ALTER SESSION SET NLS_TERRITORY = 'AMERICA'")
# Set Oracle date to ANSI date format. This only needs to execute
# once when we create a new connection. We also set the Territory
# to 'AMERICA' which forces Sunday to evaluate to a '1' in
# TO_CHAR().
cursor.execute(
"ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS'"
" NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF'" +
(" TIME_ZONE = 'UTC'" if settings.USE_TZ else '')
)
cursor.close()
if 'operators' not in self.__dict__:
# Ticket #14149: Check whether our LIKE implementation will
# work for this connection or we need to fall back on LIKEC.
# This check is performed only once per DatabaseWrapper
# instance per thread, since subsequent connections will use
# the same settings.
cursor = self.create_cursor()
try:
cursor.execute("SELECT 1 FROM DUAL WHERE DUMMY %s"
% self._standard_operators['contains'],
['X'])
except Database.DatabaseError:
self.operators = self._likec_operators
self.pattern_ops = self._likec_pattern_ops
else:
self.operators = self._standard_operators
self.pattern_ops = self._standard_pattern_ops
cursor.close()
self.connection.stmtcachesize = 20
# Ensure all changes are preserved even when AUTOCOMMIT is False.
if not self.get_autocommit():
self.commit()
def create_cursor(self, name=None):
return FormatStylePlaceholderCursor(self.connection)
def _commit(self):
if self.connection is not None:
try:
return self.connection.commit()
except Database.DatabaseError as e:
# cx_Oracle raises a cx_Oracle.DatabaseError exception
# with the following attributes and values:
# code = 2091
# message = 'ORA-02091: transaction rolled back
# 'ORA-02291: integrity constraint (TEST_DJANGOTEST.SYS
# _C00102056) violated - parent key not found'
# We convert that particular case to our IntegrityError exception
x = e.args[0]
if hasattr(x, 'code') and hasattr(x, 'message') \
and x.code == 2091 and 'ORA-02291' in x.message:
raise utils.IntegrityError(*tuple(e.args))
raise
# Oracle doesn't support releasing savepoints. But we fake them when query
# logging is enabled to keep query counts consistent with other backends.
def _savepoint_commit(self, sid):
if self.queries_logged:
self.queries_log.append({
'sql': '-- RELEASE SAVEPOINT %s (faked)' % self.ops.quote_name(sid),
'time': '0.000',
})
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit = autocommit
def check_constraints(self, table_names=None):
"""
Check constraints by setting them to immediate. Return them to deferred
afterward.
"""
self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')
def is_usable(self):
try:
self.connection.ping()
except Database.Error:
return False
else:
return True
@cached_property
def oracle_full_version(self):
with self.temporary_connection():
return self.connection.version
@cached_property
def oracle_version(self):
try:
return int(self.oracle_full_version.split('.')[0])
except ValueError:
return None
class OracleParam:
"""
Wrapper object for formatting parameters for Oracle. If the string
representation of the value is large enough (greater than 4000 characters)
the input size needs to be set as CLOB. Alternatively, if the parameter
has an `input_size` attribute, then the value of the `input_size` attribute
will be used instead. Otherwise, no input size will be set for the
parameter when executing the query.
"""
def __init__(self, param, cursor, strings_only=False):
# With raw SQL queries, datetimes can reach this function
# without being converted by DateTimeField.get_db_prep_value.
if settings.USE_TZ and (isinstance(param, datetime.datetime) and
not isinstance(param, Oracle_datetime)):
param = Oracle_datetime.from_datetime(param)
string_size = 0
# Oracle doesn't recognize True and False correctly.
if param is True:
param = 1
elif param is False:
param = 0
if hasattr(param, 'bind_parameter'):
self.force_bytes = param.bind_parameter(cursor)
elif isinstance(param, (Database.Binary, datetime.timedelta)):
self.force_bytes = param
else:
# To transmit to the database, we need Unicode if supported
# To get size right, we must consider bytes.
self.force_bytes = force_text(param, cursor.charset, strings_only)
if isinstance(self.force_bytes, str):
# We could optimize by only converting up to 4000 bytes here
string_size = len(force_bytes(param, cursor.charset, strings_only))
if hasattr(param, 'input_size'):
# If parameter has `input_size` attribute, use that.
self.input_size = param.input_size
elif string_size > 4000:
# Mark any string param greater than 4000 characters as a CLOB.
self.input_size = Database.CLOB
elif isinstance(param, datetime.datetime):
self.input_size = Database.TIMESTAMP
else:
self.input_size = None
class VariableWrapper:
"""
An adapter class for cursor variables that prevents the wrapped object
from being converted into a string when used to instantiate an OracleParam.
This can be used generally for any other object that should be passed into
Cursor.execute as-is.
"""
def __init__(self, var):
self.var = var
def bind_parameter(self, cursor):
return self.var
def __getattr__(self, key):
return getattr(self.var, key)
def __setattr__(self, key, value):
if key == 'var':
self.__dict__[key] = value
else:
setattr(self.var, key, value)
class FormatStylePlaceholderCursor:
"""
Django uses "format" (e.g. '%s') style placeholders, but Oracle uses ":var"
style. This fixes it -- but note that if you want to use a literal "%s" in
a query, you'll need to use "%%s".
We also do automatic conversion between Unicode on the Python side and
UTF-8 -- for talking to Oracle -- in here.
"""
charset = 'utf-8'
def __init__(self, connection):
self.cursor = connection.cursor()
self.cursor.outputtypehandler = self._output_type_handler
# The default for cx_Oracle < 5.3 is 50.
self.cursor.arraysize = 100
@staticmethod
def _output_number_converter(value):
return decimal.Decimal(value) if '.' in value else int(value)
@staticmethod
def _get_decimal_converter(precision, scale):
if scale == 0:
return int
context = decimal.Context(prec=precision)
quantize_value = decimal.Decimal(1).scaleb(-scale)
return lambda v: decimal.Decimal(v).quantize(quantize_value, context=context)
@staticmethod
def _output_type_handler(cursor, name, defaultType, length, precision, scale):
"""
Called for each db column fetched from cursors. Return numbers as the
appropriate Python type.
"""
if defaultType == Database.NUMBER:
if scale == -127:
if precision == 0:
# NUMBER column: decimal-precision floating point.
# This will normally be an integer from a sequence,
# but it could be a decimal value.
outconverter = FormatStylePlaceholderCursor._output_number_converter
else:
# FLOAT column: binary-precision floating point.
# This comes from FloatField columns.
outconverter = float
elif precision > 0:
# NUMBER(p,s) column: decimal-precision fixed point.
# This comes from IntegerField and DecimalField columns.
outconverter = FormatStylePlaceholderCursor._get_decimal_converter(precision, scale)
else:
# No type information. This normally comes from a
# mathematical expression in the SELECT list. Guess int
# or Decimal based on whether it has a decimal point.
outconverter = FormatStylePlaceholderCursor._output_number_converter
return cursor.var(
Database.STRING,
size=255,
arraysize=cursor.arraysize,
outconverter=outconverter,
)
def _format_params(self, params):
try:
return {k: OracleParam(v, self, True) for k, v in params.items()}
except AttributeError:
return tuple(OracleParam(p, self, True) for p in params)
def _guess_input_sizes(self, params_list):
# Try dict handling; if that fails, treat as sequence
if hasattr(params_list[0], 'keys'):
sizes = {}
for params in params_list:
for k, value in params.items():
if value.input_size:
sizes[k] = value.input_size
if sizes:
self.setinputsizes(**sizes)
else:
# It's not a list of dicts; it's a list of sequences
sizes = [None] * len(params_list[0])
for params in params_list:
for i, value in enumerate(params):
if value.input_size:
sizes[i] = value.input_size
if sizes:
self.setinputsizes(*sizes)
def _param_generator(self, params):
# Try dict handling; if that fails, treat as sequence
if hasattr(params, 'items'):
return {k: v.force_bytes for k, v in params.items()}
else:
return [p.force_bytes for p in params]
def _fix_for_params(self, query, params, unify_by_values=False):
# cx_Oracle wants no trailing ';' for SQL statements. For PL/SQL, it
# it does want a trailing ';' but not a trailing '/'. However, these
# characters must be included in the original query in case the query
# is being passed to SQL*Plus.
if query.endswith(';') or query.endswith('/'):
query = query[:-1]
if params is None:
params = []
elif hasattr(params, 'keys'):
# Handle params as dict
args = {k: ":%s" % k for k in params}
query = query % args
elif unify_by_values and params:
# Handle params as a dict with unified query parameters by their
# values. It can be used only in single query execute() because
# executemany() shares the formatted query with each of the params
# list. e.g. for input params = [0.75, 2, 0.75, 'sth', 0.75]
# params_dict = {0.75: ':arg0', 2: ':arg1', 'sth': ':arg2'}
# args = [':arg0', ':arg1', ':arg0', ':arg2', ':arg0']
# params = {':arg0': 0.75, ':arg1': 2, ':arg2': 'sth'}
params_dict = {param: ':arg%d' % i for i, param in enumerate(set(params))}
args = [params_dict[param] for param in params]
params = {value: key for key, value in params_dict.items()}
query = query % tuple(args)
else:
# Handle params as sequence
args = [(':arg%d' % i) for i in range(len(params))]
query = query % tuple(args)
return query, self._format_params(params)
def execute(self, query, params=None):
query, params = self._fix_for_params(query, params, unify_by_values=True)
self._guess_input_sizes([params])
return self.cursor.execute(query, self._param_generator(params))
def executemany(self, query, params=None):
if not params:
# No params given, nothing to do
return None
# uniform treatment for sequences and iterables
params_iter = iter(params)
query, firstparams = self._fix_for_params(query, next(params_iter))
# we build a list of formatted params; as we're going to traverse it
# more than once, we can't make it lazy by using a generator
formatted = [firstparams] + [self._format_params(p) for p in params_iter]
self._guess_input_sizes(formatted)
return self.cursor.executemany(query, [self._param_generator(p) for p in formatted])
def close(self):
try:
self.cursor.close()
except Database.InterfaceError:
# already closed
pass
def var(self, *args):
return VariableWrapper(self.cursor.var(*args))
def arrayvar(self, *args):
return VariableWrapper(self.cursor.arrayvar(*args))
def __getattr__(self, attr):
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
Godeps/_workspace/src/github.com/vmware/govmomi/session/keep_alive_test.go
|
/*
Copyright (c) 2014 VMware, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package session
import (
"fmt"
"net/url"
"os"
"runtime"
"testing"
"time"
"github.com/vmware/govmomi/test"
"github.com/vmware/govmomi/vim25"
"github.com/vmware/govmomi/vim25/soap"
"golang.org/x/net/context"
)
type testKeepAlive int
func (t *testKeepAlive) Func(soap.RoundTripper) {
*t++
}
func newManager(t *testing.T) (*Manager, *url.URL) {
u := test.URL()
if u == nil {
t.SkipNow()
}
soapClient := soap.NewClient(u, true)
vimClient, err := vim25.NewClient(context.Background(), soapClient)
if err != nil {
t.Fatal(err)
}
return NewManager(vimClient), u
}
func TestKeepAlive(t *testing.T) {
var i testKeepAlive
var j int
m, u := newManager(t)
k := KeepAlive(m.client.RoundTripper, time.Millisecond)
k.(*keepAlive).keepAlive = i.Func
m.client.RoundTripper = k
// Expect keep alive to not have triggered yet
if i != 0 {
t.Errorf("Expected i == 0, got i: %d", i)
}
// Logging in starts keep alive
err := m.Login(context.Background(), u.User)
if err != nil {
t.Error(err)
}
time.Sleep(2 * time.Millisecond)
// Expect keep alive to triggered at least once
if i == 0 {
t.Errorf("Expected i != 0, got i: %d", i)
}
j = int(i)
time.Sleep(2 * time.Millisecond)
// Expect keep alive to triggered at least once more
if int(i) <= j {
t.Errorf("Expected i > j, got i: %d, j: %d", i, j)
}
// Logging out stops keep alive
err = m.Logout(context.Background())
if err != nil {
t.Error(err)
}
j = int(i)
time.Sleep(2 * time.Millisecond)
// Expect keep alive to have stopped
if int(i) != j {
t.Errorf("Expected i == j, got i: %d, j: %d", i, j)
}
}
func testSessionOK(t *testing.T, m *Manager, ok bool) {
s, err := m.UserSession(context.Background())
if err != nil {
t.Fatal(err)
}
_, file, line, _ := runtime.Caller(1)
prefix := fmt.Sprintf("%s:%d", file, line)
if ok && s == nil {
t.Fatalf("%s: Expected session to be OK, but is invalid", prefix)
}
if !ok && s != nil {
t.Fatalf("%s: Expected session to be invalid, but is OK", prefix)
}
}
// Run with:
//
// env GOVMOMI_KEEPALIVE_TEST=1 go test -timeout=60m -run TestRealKeepAlive
//
func TestRealKeepAlive(t *testing.T) {
if os.Getenv("GOVMOMI_KEEPALIVE_TEST") != "1" {
t.SkipNow()
}
m1, u1 := newManager(t)
m2, u2 := newManager(t)
// Enable keepalive on m2
k := KeepAlive(m2.client.RoundTripper, 10*time.Minute)
m2.client.RoundTripper = k
// Expect both sessions to be invalid
testSessionOK(t, m1, false)
testSessionOK(t, m2, false)
// Logging in starts keep alive
if err := m1.Login(context.Background(), u1.User); err != nil {
t.Error(err)
}
if err := m2.Login(context.Background(), u2.User); err != nil {
t.Error(err)
}
// Expect both sessions to be valid
testSessionOK(t, m1, true)
testSessionOK(t, m2, true)
// Wait for m1 to time out
delay := 31 * time.Minute
fmt.Printf("%s: Waiting %d minutes for session to time out...\n", time.Now(), int(delay.Minutes()))
time.Sleep(delay)
// Expect m1's session to be invalid, m2's session to be valid
testSessionOK(t, m1, false)
testSessionOK(t, m2, true)
}
|
[
"\"GOVMOMI_KEEPALIVE_TEST\""
] |
[] |
[
"GOVMOMI_KEEPALIVE_TEST"
] |
[]
|
["GOVMOMI_KEEPALIVE_TEST"]
|
go
| 1 | 0 | |
dep/dep.go
|
package dep
import (
"go/build"
"os"
"strings"
)
var stdPackages = map[string]bool{
"C": true,
"builtin": true,
"archive/tar": true,
"archive/zip": true,
"bufio": true,
"bytes": true,
"compress/bzip2": true,
"compress/flate": true,
"compress/gzip": true,
"compress/lzw": true,
"compress/zlib": true,
"container/heap": true,
"container/list": true,
"container/ring": true,
"context": true,
"crypto": true,
"crypto/aes": true,
"crypto/cipher": true,
"crypto/des": true,
"crypto/dsa": true,
"crypto/ecdsa": true,
"crypto/elliptic": true,
"crypto/hmac": true,
"crypto/md5": true,
"crypto/rand": true,
"crypto/rc4": true,
"crypto/rsa": true,
"crypto/sha1": true,
"crypto/sha256": true,
"crypto/sha512": true,
"crypto/subtle": true,
"crypto/tls": true,
"crypto/x509": true,
"crypto/x509/pkix": true,
"database/sql": true,
"database/sql/driver": true,
"debug/dwarf": true,
"debug/elf": true,
"debug/gosym": true,
"debug/macho": true,
"debug/pe": true,
"debug/plan9obj": true,
"encoding": true,
"encoding/ascii85": true,
"encoding/asn1": true,
"encoding/base32": true,
"encoding/base64": true,
"encoding/binary": true,
"encoding/csv": true,
"encoding/gob": true,
"encoding/hex": true,
"encoding/json": true,
"encoding/pem": true,
"encoding/xml": true,
"errors": true,
"expvar": true,
"flag": true,
"fmt": true,
"go/ast": true,
"go/build": true,
"go/constant": true,
"go/doc": true,
"go/format": true,
"go/importer": true,
"go/internal/gccgoimporter": true,
"go/internal/gcimporter": true,
"go/parser": true,
"go/printer": true,
"go/scanner": true,
"go/token": true,
"go/types": true,
"hash": true,
"hash/adler32": true,
"hash/crc32": true,
"hash/crc64": true,
"hash/fnv": true,
"html": true,
"html/template": true,
"image": true,
"image/color": true,
"image/color/palette": true,
"image/draw": true,
"image/gif": true,
"image/internal/imageutil": true,
"image/jpeg": true,
"image/png": true,
"index/suffixarray": true,
"internal/race": true,
"internal/singleflight": true,
"internal/testenv": true,
"internal/trace": true,
"io": true,
"io/ioutil": true,
"log": true,
"log/syslog": true,
"math": true,
"math/big": true,
"math/cmplx": true,
"math/rand": true,
"mime": true,
"mime/multipart": true,
"mime/quotedprintable": true,
"net": true,
"net/http": true,
"net/http/cgi": true,
"net/http/cookiejar": true,
"net/http/fcgi": true,
"net/http/httptest": true,
"net/http/httptrace": true,
"net/http/httputil": true,
"net/http/internal": true,
"net/http/pprof": true,
"net/internal/socktest": true,
"net/mail": true,
"net/rpc": true,
"net/rpc/jsonrpc": true,
"net/smtp": true,
"net/textproto": true,
"net/url": true,
"os": true,
"os/exec": true,
"os/signal": true,
"os/user": true,
"path": true,
"path/filepath": true,
"reflect": true,
"regexp": true,
"regexp/syntax": true,
"runtime": true,
"runtime/cgo": true,
"runtime/debug": true,
"runtime/internal/atomic": true,
"runtime/internal/sys": true,
"runtime/pprof": true,
"runtime/race": true,
"runtime/trace": true,
"sort": true,
"strconv": true,
"strings": true,
"sync": true,
"sync/atomic": true,
"syscall": true,
"testing": true,
"testing/iotest": true,
"testing/quick": true,
"text/scanner": true,
"text/tabwriter": true,
"text/template": true,
"text/template/parse": true,
"time": true,
"unicode": true,
"unicode/utf16": true,
"unicode/utf8": true,
"unsafe": true,
}
func listImports(ctx build.Context, dones map[string]bool, path, root, src string, test bool) (imports []string, err error) {
_, ok := dones[path]
if ok {
return
}
dones[path] = true
pkg, err := ctx.Import(path, src, build.AllowBinary)
if err != nil {
if _, ok := err.(*build.NoGoError); ok {
err = nil
}
return
}
paths := pkg.Imports
if test {
paths = append(paths, pkg.TestImports...)
}
for _, path := range paths {
_, ok := stdPackages[path]
if ok {
continue
}
if strings.HasPrefix(path, root) {
var subImports []string
subImports, err = listImports(ctx, dones, path, root, src, test)
if err != nil {
return
}
imports = append(imports, subImports...)
} else {
imports = append(imports, path)
}
}
return
}
func Imports(path, root, src string, test bool) (imports []string, err error) {
ctx := build.Default
gopath := os.Getenv("GOPATH")
ctx.GOPATH = src + string(os.PathListSeparator) + gopath
dones := make(map[string]bool)
imports, err = listImports(ctx, dones, path, root, src, test)
return
}
|
[
"\"GOPATH\""
] |
[] |
[
"GOPATH"
] |
[]
|
["GOPATH"]
|
go
| 1 | 0 | |
requirements-check/pkg/main.go
|
package main
import (
"bytes"
"encoding/json"
"fmt"
"github.com/sirupsen/logrus"
"os"
"os/exec"
"path/filepath"
sono "github.com/vmware-tanzu/sonobuoy/pkg/client/results"
pluginhelper "github.com/vmware-tanzu/sonobuoy-plugins/plugin-helper"
)
const (
defaultInputFile = "input.json"
pluginInputDir="/tmp/sonobuoy/config"
)
func main() {
// Debug
logrus.SetLevel(logrus.TraceLevel)
inputFile:=defaultInputFile
if os.Getenv("SONOBUOY_K8S_VERSION")!=""{
inputFile=filepath.Join(pluginInputDir,inputFile)
}
inputB, err := os.ReadFile(inputFile)
if err != nil {
panic(err)
}
var checks CheckList
if err := json.Unmarshal(inputB, &checks); err != nil {
panic(err)
}
w := pluginhelper.NewDefaultSonobuoyResultsWriter()
// Refactor this with an interface for those methods so we can just save a map between
// type and method which will return `result, error` and avoid this whole switch here.
for _, check := range checks {
f,ok:=typeFuncLookup[check.Meta.Type]
if !ok{
fmt.Fprintf(os.Stderr, "Unknown check type: %v", check.Meta.Type)
}
res,err := f(check)
logrus.Tracef("Completed test %q, result: %v\n", check.Meta.Name, failToStatus(res.Fail))
w.AddTest(check.Meta.Name, failToStatus(res.Fail), err,"")
}
if err := w.Done(); err != nil {
fmt.Fprintf(os.Stderr, err.Error())
os.Exit(1)
}
}
func failToStatus(failed bool) string{
if failed{
return sono.StatusFailed
}
return sono.StatusPassed
}
func runCmd(cmdText string) ([]byte, error) {
c := exec.Command("/bin/bash", "-c", cmdText)
logrus.Traceln(c.String(), c.Args)
out, err := c.CombinedOutput()
logrus.Traceln("Command:", c.String())
logrus.Traceln("Output:", string(out))
if err != nil {
logrus.Traceln("Error returned:", err.Error())
}
return bytes.TrimSpace(out), err
}
func runFilterCmd(inputFile, filter string) ([]byte, error) {
o, err := runCmd(fmt.Sprintf(`%v %v`, filter, string(inputFile)))
return o, err
}
|
[
"\"SONOBUOY_K8S_VERSION\""
] |
[] |
[
"SONOBUOY_K8S_VERSION"
] |
[]
|
["SONOBUOY_K8S_VERSION"]
|
go
| 1 | 0 | |
examples/documentation_examples/examples_test.go
|
// Copyright (C) MongoDB, Inc. 2017-present.
//
// Licensed under the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
// NOTE: Any time this file is modified, a WEBSITE ticket should be opened to sync the changes with
// the "What is MongoDB" webpage, which the example was originally added to as part of WEBSITE-5148.
package documentation_examples_test
import (
"context"
"os"
"testing"
"time"
"github.com/stretchr/testify/require"
"go.mongodb.org/mongo-driver/examples/documentation_examples"
"go.mongodb.org/mongo-driver/internal/testutil"
"go.mongodb.org/mongo-driver/mongo"
"go.mongodb.org/mongo-driver/mongo/description"
"go.mongodb.org/mongo-driver/mongo/options"
"go.mongodb.org/mongo-driver/x/bsonx"
"go.mongodb.org/mongo-driver/x/mongo/driver/connstring"
"go.mongodb.org/mongo-driver/x/mongo/driver/topology"
)
func TestDocumentationExamples(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
cs := testutil.ConnString(t)
client, err := mongo.Connect(context.Background(), options.Client().ApplyURI(cs.String()))
require.NoError(t, err)
defer client.Disconnect(ctx)
db := client.Database("documentation_examples")
documentation_examples.InsertExamples(t, db)
documentation_examples.QueryToplevelFieldsExamples(t, db)
documentation_examples.QueryEmbeddedDocumentsExamples(t, db)
documentation_examples.QueryArraysExamples(t, db)
documentation_examples.QueryArrayEmbeddedDocumentsExamples(t, db)
documentation_examples.QueryNullMissingFieldsExamples(t, db)
documentation_examples.ProjectionExamples(t, db)
documentation_examples.UpdateExamples(t, db)
documentation_examples.DeleteExamples(t, db)
documentation_examples.RunCommandExamples(t, db)
documentation_examples.IndexExamples(t, db)
documentation_examples.StableAPIExamples()
// Because it uses RunCommand with an apiVersion, the strict count example can only be
// run on 5.0+ without auth. It also cannot be run on 6.0+ since the count command was
// added to API version 1 and no longer results in an error when strict is enabled.
ver, err := getServerVersion(ctx, client)
require.NoError(t, err, "getServerVersion error: %v", err)
auth := os.Getenv("AUTH") == "auth"
if testutil.CompareVersions(t, ver, "5.0") >= 0 && testutil.CompareVersions(t, ver, "6.0") < 0 && !auth {
documentation_examples.StableAPIStrictCountExample(t)
} else {
t.Log("skipping stable API strict count example")
}
}
func TestAggregationExamples(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
cs := testutil.ConnString(t)
client, err := mongo.Connect(context.Background(), options.Client().ApplyURI(cs.String()))
require.NoError(t, err)
defer client.Disconnect(ctx)
db := client.Database("documentation_examples")
ver, err := getServerVersion(ctx, client)
if err != nil || testutil.CompareVersions(t, ver, "3.6") < 0 {
t.Skip("server does not support let in $lookup in aggregations")
}
documentation_examples.AggregationExamples(t, db)
}
func TestTransactionExamples(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
topo := createTopology(t)
client, err := mongo.Connect(context.Background(), &options.ClientOptions{Deployment: topo})
require.NoError(t, err)
defer client.Disconnect(ctx)
ver, err := getServerVersion(ctx, client)
if err != nil || testutil.CompareVersions(t, ver, "4.0") < 0 || topo.Kind() != description.ReplicaSet {
t.Skip("server does not support transactions")
}
err = documentation_examples.TransactionsExamples(ctx, client)
require.NoError(t, err)
}
func TestChangeStreamExamples(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
topo := createTopology(t)
client, err := mongo.Connect(context.Background(), &options.ClientOptions{Deployment: topo})
require.NoError(t, err)
defer client.Disconnect(ctx)
db := client.Database("changestream_examples")
ver, err := getServerVersion(ctx, client)
if err != nil || testutil.CompareVersions(t, ver, "3.6") < 0 || topo.Kind() != description.ReplicaSet {
t.Skip("server does not support changestreams")
}
documentation_examples.ChangeStreamExamples(t, db)
}
func TestCausalConsistencyExamples(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
cs := testutil.ConnString(t)
client, err := mongo.Connect(context.Background(), options.Client().ApplyURI(cs.String()))
require.NoError(t, err)
defer client.Disconnect(ctx)
// TODO(GODRIVER-2238): Remove skip once failures on MongoDB v4.0 sharded clusters are fixed.
ver, err := getServerVersion(ctx, client)
if err != nil || testutil.CompareVersions(t, ver, "4.0") == 0 {
t.Skip("TODO(GODRIVER-2238): Skip until failures on MongoDB v4.0 sharded clusters are fixed")
}
err = documentation_examples.CausalConsistencyExamples(client)
require.NoError(t, err)
}
func getServerVersion(ctx context.Context, client *mongo.Client) (string, error) {
serverStatus, err := client.Database("admin").RunCommand(
ctx,
bsonx.Doc{{"serverStatus", bsonx.Int32(1)}},
).DecodeBytes()
if err != nil {
return "", err
}
version, err := serverStatus.LookupErr("version")
if err != nil {
return "", err
}
return version.StringValue(), nil
}
func createTopology(t *testing.T) *topology.Topology {
topo, err := topology.New(topology.WithConnString(func(connstring.ConnString) connstring.ConnString {
return testutil.ConnString(t)
}))
if err != nil {
t.Fatalf("topology.New error: %v", err)
}
return topo
}
|
[
"\"AUTH\""
] |
[] |
[
"AUTH"
] |
[]
|
["AUTH"]
|
go
| 1 | 0 | |
backendnotes/manage.py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "backendnotes.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
gateway/midtrans/snap/builder.go
|
package snap
import (
"fmt"
"math"
"os"
"time"
"github.com/midtrans/midtrans-go"
"github.com/midtrans/midtrans-go/snap"
"github.com/imrenagi/go-payment/invoice"
)
func newBuilder(inv *invoice.Invoice) *builder {
var callback *snap.Callbacks
defaultRedirectUrl := os.Getenv("INVOICE_SUCCESS_REDIRECT_URL")
if defaultRedirectUrl != "" {
callback = &snap.Callbacks{Finish: defaultRedirectUrl}
}
if inv.SuccessRedirectURL != "" {
callback = &snap.Callbacks{Finish: inv.SuccessRedirectURL}
}
srb := &builder{
req: &snap.Request{
Items: &[]midtrans.ItemDetails{},
Callbacks: callback,
},
}
return srb.
setTransactionDetails(inv).
setCustomerDetail(inv).
setExpiration(inv).
setItemDetails(inv)
}
type builder struct {
req *snap.Request
}
func (b *builder) setItemDetails(inv *invoice.Invoice) *builder {
var out []midtrans.ItemDetails
for _, item := range inv.LineItems {
name := item.Name
if len(item.Name) > 50 {
runes := []rune(name)
name = fmt.Sprintf("%s", string(runes[0:50]))
}
out = append(out, midtrans.ItemDetails{
ID: fmt.Sprintf("%d", item.ID),
Name: name,
Price: int64(item.UnitPrice),
Qty: int32(item.Qty),
Category: item.Category,
MerchantName: item.MerchantName,
})
}
if inv.ServiceFee > 0 {
out = append(out, midtrans.ItemDetails{
ID: "adminfee",
Name: "Biaya Admin",
Price: int64(inv.ServiceFee),
Qty: 1,
Category: "FEE",
})
}
if inv.InstallmentFee > 0 {
out = append(out, midtrans.ItemDetails{
ID: "installmentfee",
Name: "Installment Fee",
Price: int64(inv.InstallmentFee),
Qty: 1,
Category: "FEE",
})
}
if inv.Discount > 0 {
out = append(out, midtrans.ItemDetails{
ID: "discount",
Name: "Discount",
Price: int64(-1 * inv.Discount),
Qty: 1,
Category: "DISCOUNT",
})
}
if inv.Tax > 0 {
out = append(out, midtrans.ItemDetails{
ID: "tax",
Name: "Tax",
Price: int64(inv.Tax),
Qty: 1,
Category: "TAX",
})
}
b.req.Items = &out
return b
}
func (b *builder) setCustomerDetail(inv *invoice.Invoice) *builder {
b.req.CustomerDetail = &midtrans.CustomerDetails{
FName: inv.BillingAddress.FullName,
Email: inv.BillingAddress.Email,
Phone: inv.BillingAddress.PhoneNumber,
BillAddr: &midtrans.CustomerAddress{
FName: inv.BillingAddress.FullName,
Phone: inv.BillingAddress.PhoneNumber,
},
}
return b
}
func (b *builder) setExpiration(inv *invoice.Invoice) *builder {
loc, _ := time.LoadLocation("Asia/Jakarta")
invDate := inv.InvoiceDate.In(loc)
duration := inv.DueDate.Sub(inv.InvoiceDate)
b.req.Expiry = &snap.ExpiryDetails{
StartTime: invDate.Format("2006-01-02 15:04:05 -0700"),
Unit: "minute",
Duration: int64(math.Round(duration.Minutes())),
}
return b
}
func (b *builder) setTransactionDetails(inv *invoice.Invoice) *builder {
b.req.TransactionDetails = midtrans.TransactionDetails{
OrderID: inv.Number,
GrossAmt: int64(inv.GetTotal()),
}
return b
}
func (b *builder) AddPaymentMethods(m snap.SnapPaymentType) *builder {
b.req.EnabledPayments = append(b.req.EnabledPayments, m)
return b
}
func (b *builder) SetCreditCardDetail(d *snap.CreditCardDetails) *builder {
b.req.CreditCard = d
return b
}
func (b *builder) Build() (*snap.Request, error) {
return b.req, nil
}
|
[
"\"INVOICE_SUCCESS_REDIRECT_URL\""
] |
[] |
[
"INVOICE_SUCCESS_REDIRECT_URL"
] |
[]
|
["INVOICE_SUCCESS_REDIRECT_URL"]
|
go
| 1 | 0 | |
tools/csv_importer.go
|
//go:build ignore
// +build ignore
package main
import (
"database/sql"
"encoding/csv"
"fmt"
"log"
"os"
_ "github.com/go-sql-driver/mysql"
"github.com/subosito/gotenv"
)
func init() {
gotenv.Load()
}
func main() {
records, err := readCsvFile("authors.csv")
if err != nil {
log.Fatal(err)
}
var (
host = "127.0.0.1"
port = os.Getenv("DB_PORT")
user = os.Getenv("DB_USERNAME")
password = os.Getenv("DB_PASSWORD")
dbname = os.Getenv("DB_NAME")
)
db, err := sql.Open("mysql", user+":"+password+"@tcp("+host+":"+port+")/"+dbname+"?charset=utf8&parseTime=True&loc=Local")
if err != nil {
log.Fatal(err)
}
defer db.Close()
for index, record := range records {
if index == 0 {
continue
}
sqlStatement := fmt.Sprintf("INSERT INTO authors(name) VALUES ('%s')", record[0])
res, err := db.Exec(sqlStatement)
if err != nil {
fmt.Println("err", err)
err = nil
continue
}
fmt.Println(res)
fmt.Println(record[0], "inserted!")
}
}
func readCsvFile(fileName string) (records [][]string, err error) {
file, err := os.Open(fileName)
if err != nil {
return
}
defer file.Close()
csvReader := csv.NewReader(file)
records, err = csvReader.ReadAll()
if err != nil {
return
}
return
}
|
[
"\"DB_PORT\"",
"\"DB_USERNAME\"",
"\"DB_PASSWORD\"",
"\"DB_NAME\""
] |
[] |
[
"DB_PORT",
"DB_USERNAME",
"DB_PASSWORD",
"DB_NAME"
] |
[]
|
["DB_PORT", "DB_USERNAME", "DB_PASSWORD", "DB_NAME"]
|
go
| 4 | 0 | |
main.go
|
package main
import (
"bytes"
"flag"
"fmt"
"github.com/yuya-takeyama/posixexec"
"io"
"os"
"os/exec"
"os/user"
)
var channel string
var name string
var icon string
var printVersion bool
var hostname string
var osUser *user.User
var cwd string
var client *Slack
const (
ExitFatal = 111
)
func init() {
var err error
flag.StringVar(&channel, "channel", "#general", "channel to post message")
flag.StringVar(&name, "name", "slackexec", "username of the bot")
flag.StringVar(&icon, "icon", ":computer:", "icon of the bot")
flag.BoolVar(&printVersion, "version", false, "print version")
flag.Parse()
if (printVersion) {
fmt.Fprintf(os.Stderr, "%s version %s, build %s\n", Name, Version, GitCommit)
os.Exit(0)
}
client = NewSlack(name, icon, channel, os.Getenv("SLACK_API_TOKEN"))
hostname, err = os.Hostname()
if err != nil {
fmt.Fprintf(os.Stderr, "%s: failed to get hostname: %s\n", Name, err)
os.Exit(ExitFatal)
}
osUser, err = user.Current()
if err != nil {
fmt.Fprintf(os.Stderr, "%s: failed to get username: %s\n", Name, err)
os.Exit(ExitFatal)
}
cwd, err = os.Getwd()
if err != nil {
fmt.Fprintf(os.Stderr, "%s: failed to get working directory: %s\n", Name, err)
os.Exit(ExitFatal)
}
}
func main() {
args := flag.Args()
if len(args) != 1 {
fmt.Fprintf(os.Stderr, "usage: %s -channel=CHANNELNAME COMMAND\n", Name)
os.Exit(ExitFatal)
}
command := args[0]
client.Post(fmt.Sprintf("Running on `%s@%s`:`%s`\n```\n$ %s\n```", osUser.Username, hostname, cwd, command))
cmd, buf := execCommand(args[0])
exitStatus, err := posixexec.Run(cmd)
if err != nil {
fmt.Fprintf(os.Stderr, "%s: failed to exec command: %s\n", Name, err)
os.Exit(ExitFatal)
}
client.Post("Output:\n```\n" + buf.String() + "```")
os.Exit(exitStatus)
}
func execCommand(command string) (*exec.Cmd, *bytes.Buffer) {
cmd := exec.Command(os.Getenv("SHELL"), "-c", command)
buf := new(bytes.Buffer)
writer := io.MultiWriter(buf, os.Stdout)
cmd.Stdout = writer
cmd.Stderr = writer
return cmd, buf
}
|
[
"\"SLACK_API_TOKEN\"",
"\"SHELL\""
] |
[] |
[
"SHELL",
"SLACK_API_TOKEN"
] |
[]
|
["SHELL", "SLACK_API_TOKEN"]
|
go
| 2 | 0 | |
pkg/machine_controller/machine_actuator.go
|
package machine_controller
import (
"github.com/golang/glog"
tkeconfigv1 "sigs.k8s.io/cluster-api-provider-tencent/pkg/apis/tkeproviderconfig/v1alpha1"
clusterv1 "sigs.k8s.io/cluster-api/pkg/apis/cluster/v1alpha1"
"sigs.k8s.io/controller-runtime/pkg/client"
"fmt"
"errors"
"github.com/dbdd4us/qcloudapi-sdk-go/ccs"
"github.com/dbdd4us/qcloudapi-sdk-go/common"
"github.com/ghodss/yaml"
"golang.org/x/net/context"
"log"
"sigs.k8s.io/controller-runtime/pkg/manager"
"os"
"time"
)
type TKEProviderDeployer struct {
Name string
}
const (
KubeletVersionAnnotationKey = "kubelet-version"
)
func (a *TKEProviderDeployer) GetIP(cluster *clusterv1.Cluster, machine *clusterv1.Machine) (string, error) {
return "", nil
}
func (a *TKEProviderDeployer) GetKubeConfig(cluster *clusterv1.Cluster, master *clusterv1.Machine) (string, error) {
return "", nil
}
func NewMachineActuator(m manager.Manager) (*TKEClient, error) {
return &TKEClient{
machineClient: m.GetClient(),
}, nil
}
type TKEClient struct {
machineClient client.Client
}
func (tke *TKEClient) Create(cluster *clusterv1.Cluster, machine *clusterv1.Machine) error {
machineConfig, err := machineProviderFromProviderConfig(machine.Spec.ProviderConfig)
if err != nil {
var err error = errors.New("Cannot unmarshal machine's providerConfig field create machine")
fmt.Println(err.Error())
return err
}
clusterConfig, err := tkeconfigv1.ClusterProviderFromProviderConfig(cluster.Spec.ProviderConfig)
if err != nil {
var err error = errors.New("Cannot unmarshal machine's providerConfig field")
fmt.Println(err.Error())
return err
}
credential := common.Credential{
SecretId: os.Getenv("SecretId"),
SecretKey: os.Getenv("SecretKey"),
}
opts := common.Opts{
Region: clusterConfig.Region,
}
client, err := ccs.NewClient(credential, opts)
for ; ; {
if cluster.ObjectMeta.Annotations["status"] == "created"{
break
}
time.Sleep(2*time.Second)
}
log.Println(cluster.ObjectMeta.Annotations["cluster-id"])
if err != nil {
log.Fatal(err)
}
args := ccs.AddClusterInstancesArgs{
cluster.ObjectMeta.Annotations["cluster-id"],
machineConfig.ZoneId,
machineConfig.Cpu,
machineConfig.Mem,
machineConfig.BandwidthType,
machineConfig.Bandwidth,
machineConfig.SubnetId,
machineConfig.StorageSize,
machineConfig.RootSize,
1,
machineConfig.Password,
machineConfig.IsVpcGateway,
machineConfig.WanIp,
machineConfig.OsName,
}
AddClusterInstancesResponse, err := client.AddClusterInstances(&args)
if err != nil {
log.Fatal(err)
}
if machine.ObjectMeta.Annotations == nil {
machine.ObjectMeta.Annotations = make(map[string]string)
}
log.Println(AddClusterInstancesResponse)
machine.ObjectMeta.Annotations["instanceIds"] = AddClusterInstancesResponse.Data.InstanceIds[0]
machine.ObjectMeta.Annotations[KubeletVersionAnnotationKey] = machine.Spec.Versions.Kubelet
machine.ObjectMeta.Annotations["created"] = "yes"
tke.machineClient.Update(context.Background(), machine)
time.Sleep(2 * time.Second)
return nil
}
func (tke *TKEClient) Delete(cluster *clusterv1.Cluster, machine *clusterv1.Machine) error {
clusterConfig, err := tkeconfigv1.ClusterProviderFromProviderConfig(cluster.Spec.ProviderConfig)
if err != nil {
var err error = errors.New("Cannot unmarshal machine's providerConfig field")
fmt.Println(err.Error())
return err
}
credential := common.Credential{
SecretId: os.Getenv("SecretId"),
SecretKey: os.Getenv("SecretKey"),
}
opts := common.Opts{
Region: clusterConfig.Region,
}
client, err := ccs.NewClient(credential, opts)
if err != nil {
log.Fatal(err)
}
log.Println("cluster id to be delete")
log.Println(cluster.ObjectMeta.Annotations["cluster-id"])
args := ccs.DeleteClusterInstancesArgs{
cluster.ObjectMeta.Annotations["cluster-id"],
[]string{machine.ObjectMeta.Annotations["instanceIds"]},
}
DeleteClusterInstancesResponse, err := client.DeleteClusterInstances(&args)
log.Println(DeleteClusterInstancesResponse)
return nil
}
func (tke *TKEClient) Update(cluster *clusterv1.Cluster, machine *clusterv1.Machine) error {
glog.Info("hello,this is tencent tkeclient Update")
return nil
}
func (tke *TKEClient) Exists(cluster *clusterv1.Cluster, machine *clusterv1.Machine) (bool, error) {
if machine.ObjectMeta.Annotations["created"] == "" {
glog.Error("machine not exists")
return false, nil
}
glog.Info("machine exists")
return true, nil
}
func machineProviderFromProviderConfig(providerConfig clusterv1.ProviderConfig) (*tkeconfigv1.TKEMachineProviderConfig, error) {
var config tkeconfigv1.TKEMachineProviderConfig
if err := yaml.Unmarshal(providerConfig.Value.Raw, &config); err != nil {
return nil, err
}
return &config, nil
}
|
[
"\"SecretId\"",
"\"SecretKey\"",
"\"SecretId\"",
"\"SecretKey\""
] |
[] |
[
"SecretKey",
"SecretId"
] |
[]
|
["SecretKey", "SecretId"]
|
go
| 2 | 0 | |
wrappers/Python/setup.py
|
from __future__ import print_function
import platform
import subprocess, shutil, os, sys, glob
def copy_files():
def copytree(old,new):
print(old,'-->',new)
shutil.copytree(old, new)
def copy2(old, new):
print(old,'-->',new)
shutil.copy2(old, new)
import shutil
shutil.rmtree(os.path.join('CoolProp','include'), ignore_errors = True)
copytree(os.path.join(CProot, 'include'), os.path.join('CoolProp','include'))
for jsonfile in glob.glob(os.path.join('CoolProp','include','*_JSON.h')):
print('removing', jsonfile)
os.remove(jsonfile)
copytree(os.path.join(CProot, 'externals/cppformat/fmt'), os.path.join('CoolProp','include','fmt'))
copy2(os.path.join(CProot, 'CoolPropBibTeXLibrary.bib'), os.path.join('CoolProp', 'CoolPropBibTeXLibrary.bib'))
print('files copied.')
def remove_files():
import shutil
shutil.rmtree(os.path.join('CoolProp','include'), ignore_errors = True)
os.remove(os.path.join('CoolProp', 'CoolPropBibTeXLibrary.bib'))
print('files removed.')
def touch(fname):
open(fname, 'a').close()
os.utime(fname, None)
def recursive_collect_includes():
thefiles = []
include_path = os.path.join('CoolProp','include')
for root, dirs, files in os.walk(include_path):
thefiles += [os.path.relpath(os.path.join(root,_f), 'CoolProp') for _f in files]
return thefiles
if __name__=='__main__':
# Trying to change the standard library for C++
import platform
try:
macVersion = platform.mac_ver()[0].split('.')
if int(macVersion[0]) >= 10 and int(macVersion[1]) > 8:
os.environ["CC"] = "gcc"
os.environ["CXX"] = "g++"
print('switching compiler to g++ for OSX')
except:
pass
# ******************************
# CMAKE OPTIONS
# ******************************
# Example using CMake to build static library:
# python setup.py install --cmake-compiler vc9 --cmake-bitness 64
#
# or (because pip needs help)
#
# python setup.py install cmake=default,64
cmake_args = [_ for _ in sys.argv if _.startswith('cmake=')]
if cmake_args:
i = sys.argv.index(cmake_args[0])
sys.argv.pop(i)
cmake_compiler, cmake_bitness = cmake_args[0].split('cmake=')[1].split(',')
else:
if '--cmake-compiler' in sys.argv:
i = sys.argv.index('--cmake-compiler')
sys.argv.pop(i)
cmake_compiler = sys.argv.pop(i)
else:
cmake_compiler = ''
if '--cmake-bitness' in sys.argv:
i = sys.argv.index('--cmake-bitness')
sys.argv.pop(i)
cmake_bitness = sys.argv.pop(i)
else:
cmake_bitness = ''
USING_CMAKE = cmake_compiler or cmake_bitness
cmake_config_args = []
STATIC_LIBRARY_BUILT = False
if USING_CMAKE:
# Always force build since any changes in the C++ files will not force a rebuild
touch('CoolProp/CoolProp.pyx')
if 'clean' in sys.argv:
if os.path.exists('cmake_build'):
print('removing cmake_build folder...')
shutil.rmtree('cmake_build')
print('removed.')
cmake_config_args, cmake_build_args = [], []
if cmake_compiler == 'vc9':
cmake_build_args = ['--config','"Release"']
if cmake_bitness == '32':
cmake_config_args += ['-G','"Visual Studio 9 2008"']
elif cmake_bitness == '64':
cmake_config_args += ['-G','"Visual Studio 9 2008 Win64"']
else:
raise ValueError('cmake_bitness must be either 32 or 64; got ' + cmake_bitness)
elif cmake_compiler == 'vc10':
cmake_build_args = ['--config','"Release"']
if cmake_bitness == '32':
cmake_config_args += ['-G','"Visual Studio 10 2010"']
elif cmake_bitness == '64':
cmake_config_args += ['-G','"Visual Studio 10 2010 Win64"']
else:
raise ValueError('cmake_bitness must be either 32 or 64; got ' + cmake_bitness)
elif cmake_compiler == 'vc14':
cmake_build_args = ['--config','"Release"']
if cmake_bitness == '32':
cmake_config_args += ['-G','"Visual Studio 14 2015"']
elif cmake_bitness == '64':
cmake_config_args += ['-G','"Visual Studio 14 2015 Win64"']
else:
raise ValueError('cmake_bitness must be either 32 or 64; got ' + cmake_bitness)
elif cmake_compiler == 'mingw':
cmake_config_args = ['-G','"MinGW Makefiles"']
if cmake_bitness == '32':
cmake_config_args += ['-DFORCE_BITNESS_32=ON']
elif cmake_bitness == '64':
cmake_config_args += ['-DFORCE_BITNESS_64=ON']
else:
raise ValueError('cmake_bitness must be either 32 or 64; got ' + cmake_bitness)
elif cmake_compiler == 'default':
cmake_config_args = []
if cmake_bitness == '32':
cmake_config_args += ['-DFORCE_BITNESS_32=ON']
elif cmake_bitness == '64':
cmake_config_args += ['-DFORCE_BITNESS_64=ON']
else:
raise ValueError('cmake_bitness must be either 32 or 64; got ' + cmake_bitness)
else:
raise ValueError('cmake_compiler [' + cmake_compiler + '] is invalid')
if 'darwin' in sys.platform:
cmake_config_args += ['-DCOOLPROP_OSX_105_COMPATIBILITY=ON']
if 'linux' in sys.platform:
cmake_config_args += ['-DCOOLPROP_FPIC=ON']
#if sys.platform.startswith('win'):
# cmake_config_args += ['-DCOOLPROP_MSVC_STATIC=OFF']
cmake_build_dir = os.path.join('cmake_build', '{compiler}-{bitness}bit'.format(compiler=cmake_compiler, bitness=cmake_bitness))
if not os.path.exists(cmake_build_dir):
os.makedirs(cmake_build_dir)
cmake_call_string = ' '.join(['cmake','../../../..','-DCOOLPROP_STATIC_LIBRARY=ON','-DCMAKE_VERBOSE_MAKEFILE=ON','-DCMAKE_BUILD_TYPE=Release'] + cmake_config_args)
print('calling: ' + cmake_call_string)
subprocess.check_call(cmake_call_string, shell = True, stdout = sys.stdout, stderr = sys.stderr, cwd = cmake_build_dir)
cmake_build_string = ' '.join(['cmake','--build', '.'] + cmake_build_args)
print('calling: ' + cmake_build_string)
subprocess.check_call(cmake_build_string, shell = True, stdout = sys.stdout, stderr = sys.stderr, cwd = cmake_build_dir)
# Now find the static library that we just built
static_libs = []
for search_suffix in ['Release/*.lib','Release/*.a', 'Debug/*.lib', 'Debug/*.a','*.a']:
static_libs += glob.glob(os.path.join(cmake_build_dir,search_suffix))
if len(static_libs) != 1:
raise ValueError("Found more than one static library using CMake build. Found: "+str(static_libs))
else:
STATIC_LIBRARY_BUILT = True
static_library_path = os.path.dirname(static_libs[0])
# Check if a sdist build for pypi
pypi = os.path.exists('.use_this_directory_as_root')
"""
Modes of operation:
1) Building the source distro (generate_headers.py must have been run before making the repo)
2) Installing from source (generate_headers.py must have been run before making the repo)
3) Installing from git repo (need to make sure to run generate_headers.py)
4)
"""
# Determine whether or not to use Cython - default is to use cython unless the file .build_without_cython is found in the current working directory
USE_CYTHON = not os.path.exists('.build_without_cython')
cy_ext = 'pyx' if USE_CYTHON else 'cpp'
if USE_CYTHON:
# Check for cython >= 0.21 due to the use of cpdef enum
try:
import Cython
except ImportError:
raise ImportError("Cython not found, please install it. You can do a pip install Cython")
# Print out the version
print('Cython version: ', Cython.__version__)
from pkg_resources import parse_version
if parse_version(Cython.__version__) < parse_version('0.20'):
raise ImportError('Your version of Cython (%s) must be >= 0.20 . Please update your version of cython' % (Cython.__version__,))
if parse_version(Cython.__version__) >= parse_version('0.20'):
_profiling_enabled = True
else:
_profiling_enabled = False
# use different compiler directives for Cython 0.26 or above
if parse_version(Cython.__version__) >= parse_version('0.26'):
cython_directives = dict(profile = _profiling_enabled,
embedsignature = True)
else:
cython_directives = dict(profile = _profiling_enabled,
embedsignature = True)
else:
cython_directives = {}
# Determine the path to the root of the repository, the folder that contains the CMakeLists.txt file
# for normal builds, or the main directory for sdist builds
if pypi:
CProot = '.'
else:
if os.path.exists(os.path.join('..','..','CMakeLists.txt')):
# Good working directory
CProot = os.path.join('..','..')
else:
raise ValueError('Could not run script from this folder(' + os.path.abspath(os.path.curdir) + '). Run from wrappers/Python folder')
sys.path.append(os.path.join(CProot, 'dev'))
if not USING_CMAKE:
import generate_headers
# Generate the headers - does nothing if up to date - but only if not pypi
generate_headers.generate()
del generate_headers
import generate_constants_module
generate_constants_module.generate()
del generate_constants_module
# Read the version from a bare string stored in file in root directory
version = open(os.path.join(CProot,'.version'),'r').read().strip()
setup_kwargs = {}
from setuptools import setup, Extension, find_packages
if USE_CYTHON:
import Cython.Compiler
from Cython.Distutils.extension import Extension
from Cython.Build import cythonize
from Cython.Distutils import build_ext
# This will always generate HTML to show where there are still pythonic bits hiding out
Cython.Compiler.Options.annotate = True
setup_kwargs['cmdclass'] = dict(build_ext = build_ext)
print('Cython will be used; cy_ext is ' + cy_ext)
else:
print('Cython will not be used; cy_ext is ' + cy_ext)
def find_cpp_sources(root = os.path.join('..','..','src'), extensions = ['.cpp'], skip_files = None):
file_listing = []
for path, dirs, files in os.walk(root):
for file in files:
n,ext = os.path.splitext(file)
fname = os.path.relpath(os.path.join(path, file))
if skip_files is not None and fname in skip_files: continue
if ext in extensions:
file_listing.append(fname)
return file_listing
# Set variables for C++ sources and include directories
sources = find_cpp_sources(os.path.join(CProot,'src'), '*.cpp')
include_dirs = [
os.path.join(CProot),
os.path.join(CProot, 'include'),
os.path.join(CProot, 'src'),
os.path.join(CProot, 'externals', 'Eigen'),
os.path.join(CProot, 'externals', 'cppformat'),
os.path.join(CProot, 'externals', 'msgpack-c', 'include')]
## If the file is run directly without any parameters, clean, build and install
if len(sys.argv)==1:
sys.argv += ['clean', 'install']
common_args = dict(include_dirs = include_dirs,
language='c++')
if USE_CYTHON:
common_args.update(dict(cython_c_in_temp = True,
cython_directives = cython_directives
)
)
if STATIC_LIBRARY_BUILT == True:
CoolProp_module = Extension('CoolProp.CoolProp',
[os.path.join('CoolProp','CoolProp.' + cy_ext)],
libraries = ['CoolProp'],
library_dirs = [static_library_path],
**common_args)
else:
CoolProp_module = Extension('CoolProp.CoolProp',
[os.path.join('CoolProp','CoolProp.' + cy_ext)] + sources,
**common_args)
constants_module = Extension('CoolProp._constants',
[os.path.join('CoolProp','_constants.' + cy_ext)],
**common_args)
if not pypi:
copy_files()
ext_modules = [CoolProp_module, constants_module]
if USE_CYTHON:
ext_modules = cythonize(ext_modules, compiler_directives = cython_directives)
try:
setup (name = 'CoolProp',
version = version, # look above for the definition of version variable - don't modify it here
author = "Ian Bell",
author_email='[email protected]',
url='http://www.coolprop.org',
description = """Open-source thermodynamic and transport properties database""",
packages = find_packages(),
ext_modules = ext_modules,
package_dir = {'CoolProp':'CoolProp',},
package_data = {'CoolProp':['*.pxd',
'CoolPropBibTeXLibrary.bib',
'Plots/psyrc'] + recursive_collect_includes()},
classifiers = [
"Programming Language :: Python",
"Development Status :: 4 - Beta",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules"
],
**setup_kwargs
)
except BaseException as E:
if not pypi:
remove_files()
raise
else:
if not pypi:
remove_files()
|
[] |
[] |
[
"CXX",
"CC"
] |
[]
|
["CXX", "CC"]
|
python
| 2 | 0 | |
pkg/configurator/methods.go
|
package configurator
import (
"encoding/json"
"fmt"
"os"
"time"
corev1 "k8s.io/api/core/v1"
configv1alpha2 "github.com/openservicemesh/osm/pkg/apis/config/v1alpha2"
"github.com/openservicemesh/osm/pkg/auth"
"github.com/openservicemesh/osm/pkg/constants"
"github.com/openservicemesh/osm/pkg/errcode"
)
const (
// defaultServiceCertValidityDuration is the default validity duration for service certificates
defaultServiceCertValidityDuration = 24 * time.Hour
// defaultCertKeyBitSize is the default certificate key bit size
defaultCertKeyBitSize = 2048
// minCertKeyBitSize is the minimum certificate key bit size
minCertKeyBitSize = 2048
// maxCertKeyBitSize is the maximum certificate key bit size
maxCertKeyBitSize = 4096
)
// The functions in this file implement the configurator.Configurator interface
// GetMeshConfig returns the MeshConfig resource corresponding to the control plane
func (c *client) GetMeshConfig() configv1alpha2.MeshConfig {
return c.getMeshConfig()
}
// GetOSMNamespace returns the namespace in which the OSM controller pod resides.
func (c *client) GetOSMNamespace() string {
return c.osmNamespace
}
func marshalConfigToJSON(config configv1alpha2.MeshConfigSpec) (string, error) {
bytes, err := json.MarshalIndent(&config, "", " ")
if err != nil {
return "", err
}
return string(bytes), nil
}
// GetMeshConfigJSON returns the MeshConfig in pretty JSON.
func (c *client) GetMeshConfigJSON() (string, error) {
cm, err := marshalConfigToJSON(c.getMeshConfig().Spec)
if err != nil {
log.Error().Err(err).Str(errcode.Kind, errcode.GetErrCodeWithMetric(errcode.ErrMeshConfigMarshaling)).Msgf("Error marshaling MeshConfig %s: %+v", c.getMeshConfigCacheKey(), c.getMeshConfig())
return "", err
}
return cm, nil
}
// IsPermissiveTrafficPolicyMode tells us whether the OSM Control Plane is in permissive mode,
// where all existing traffic is allowed to flow as it is,
// or it is in SMI Spec mode, in which only traffic between source/destinations
// referenced in SMI policies is allowed.
func (c *client) IsPermissiveTrafficPolicyMode() bool {
return c.getMeshConfig().Spec.Traffic.EnablePermissiveTrafficPolicyMode
}
// IsEgressEnabled determines whether egress is globally enabled in the mesh or not.
func (c *client) IsEgressEnabled() bool {
return c.getMeshConfig().Spec.Traffic.EnableEgress
}
// IsDebugServerEnabled determines whether osm debug HTTP server is enabled
func (c *client) IsDebugServerEnabled() bool {
return c.getMeshConfig().Spec.Observability.EnableDebugServer
}
// IsTracingEnabled returns whether tracing is enabled
func (c *client) IsTracingEnabled() bool {
return c.getMeshConfig().Spec.Observability.Tracing.Enable
}
// GetTracingHost is the host to which we send tracing spans
func (c *client) GetTracingHost() string {
tracingAddress := c.getMeshConfig().Spec.Observability.Tracing.Address
if tracingAddress != "" {
return tracingAddress
}
return fmt.Sprintf("%s.%s.svc.cluster.local", constants.DefaultTracingHost, c.GetOSMNamespace())
}
// GetTracingPort returns the tracing listener port
func (c *client) GetTracingPort() uint32 {
tracingPort := c.getMeshConfig().Spec.Observability.Tracing.Port
if tracingPort != 0 {
return uint32(tracingPort)
}
return constants.DefaultTracingPort
}
// GetTracingEndpoint returns the listener's collector endpoint
func (c *client) GetTracingEndpoint() string {
tracingEndpoint := c.getMeshConfig().Spec.Observability.Tracing.Endpoint
if tracingEndpoint != "" {
return tracingEndpoint
}
return constants.DefaultTracingEndpoint
}
// GetMaxDataPlaneConnections returns the max data plane connections allowed, 0 if disabled
func (c *client) GetMaxDataPlaneConnections() int {
return c.getMeshConfig().Spec.Sidecar.MaxDataPlaneConnections
}
// GetEnvoyLogLevel returns the envoy log level
func (c *client) GetEnvoyLogLevel() string {
logLevel := c.getMeshConfig().Spec.Sidecar.LogLevel
if logLevel != "" {
return logLevel
}
return constants.DefaultEnvoyLogLevel
}
// GetEnvoyImage returns the envoy image
func (c *client) GetEnvoyImage() string {
image := c.getMeshConfig().Spec.Sidecar.EnvoyImage
if image == "" {
image = os.Getenv("OSM_DEFAULT_ENVOY_IMAGE")
}
return image
}
// GetEnvoyWindowsImage returns the envoy windows image
func (c *client) GetEnvoyWindowsImage() string {
image := c.getMeshConfig().Spec.Sidecar.EnvoyWindowsImage
if image == "" {
image = os.Getenv("OSM_DEFAULT_ENVOY_WINDOWS_IMAGE")
}
return image
}
// GetInitContainerImage returns the init container image
func (c *client) GetInitContainerImage() string {
image := c.getMeshConfig().Spec.Sidecar.InitContainerImage
if image == "" {
image = os.Getenv("OSM_DEFAULT_INIT_CONTAINER_IMAGE")
}
return image
}
// GetServiceCertValidityPeriod returns the validity duration for service certificates, and a default in case of invalid duration
func (c *client) GetServiceCertValidityPeriod() time.Duration {
durationStr := c.getMeshConfig().Spec.Certificate.ServiceCertValidityDuration
validityDuration, err := time.ParseDuration(durationStr)
if err != nil {
log.Error().Err(err).Msgf("Error parsing service certificate validity duration %s", durationStr)
return defaultServiceCertValidityDuration
}
return validityDuration
}
// GetCertKeyBitSize returns the certificate key bit size to be used
func (c *client) GetCertKeyBitSize() int {
bitSize := c.getMeshConfig().Spec.Certificate.CertKeyBitSize
if bitSize < minCertKeyBitSize || bitSize > maxCertKeyBitSize {
log.Error().Msgf("Invalid key bit size: %d", bitSize)
return defaultCertKeyBitSize
}
return bitSize
}
// IsPrivilegedInitContainer returns whether init containers should be privileged
func (c *client) IsPrivilegedInitContainer() bool {
return c.getMeshConfig().Spec.Sidecar.EnablePrivilegedInitContainer
}
// GetConfigResyncInterval returns the duration for resync interval.
// If error or non-parsable value, returns 0 duration
func (c *client) GetConfigResyncInterval() time.Duration {
resyncDuration := c.getMeshConfig().Spec.Sidecar.ConfigResyncInterval
duration, err := time.ParseDuration(resyncDuration)
if err != nil {
log.Debug().Err(err).Msgf("Error parsing config resync interval: %s", duration)
return time.Duration(0)
}
return duration
}
// GetProxyResources returns the `Resources` configured for proxies, if any
func (c *client) GetProxyResources() corev1.ResourceRequirements {
return c.getMeshConfig().Spec.Sidecar.Resources
}
// GetInboundExternalAuthConfig returns the External Authentication configuration for incoming traffic, if any
func (c *client) GetInboundExternalAuthConfig() auth.ExtAuthConfig {
extAuthConfig := auth.ExtAuthConfig{}
inboundExtAuthzMeshConfig := c.getMeshConfig().Spec.Traffic.InboundExternalAuthorization
extAuthConfig.Enable = inboundExtAuthzMeshConfig.Enable
extAuthConfig.Address = inboundExtAuthzMeshConfig.Address
extAuthConfig.Port = uint16(inboundExtAuthzMeshConfig.Port)
extAuthConfig.StatPrefix = inboundExtAuthzMeshConfig.StatPrefix
extAuthConfig.FailureModeAllow = inboundExtAuthzMeshConfig.FailureModeAllow
duration, err := time.ParseDuration(inboundExtAuthzMeshConfig.Timeout)
if err != nil {
log.Debug().Err(err).Msgf("ExternAuthzTimeout: Not a valid duration %s. defaulting to 1s.", duration)
duration = 1 * time.Second
}
extAuthConfig.AuthzTimeout = duration
return extAuthConfig
}
// GetFeatureFlags returns OSM's feature flags
func (c *client) GetFeatureFlags() configv1alpha2.FeatureFlags {
return c.getMeshConfig().Spec.FeatureFlags
}
// GetOSMLogLevel returns the configured OSM log level
func (c *client) GetOSMLogLevel() string {
return c.getMeshConfig().Spec.Observability.OSMLogLevel
}
|
[
"\"OSM_DEFAULT_ENVOY_IMAGE\"",
"\"OSM_DEFAULT_ENVOY_WINDOWS_IMAGE\"",
"\"OSM_DEFAULT_INIT_CONTAINER_IMAGE\""
] |
[] |
[
"OSM_DEFAULT_INIT_CONTAINER_IMAGE",
"OSM_DEFAULT_ENVOY_WINDOWS_IMAGE",
"OSM_DEFAULT_ENVOY_IMAGE"
] |
[]
|
["OSM_DEFAULT_INIT_CONTAINER_IMAGE", "OSM_DEFAULT_ENVOY_WINDOWS_IMAGE", "OSM_DEFAULT_ENVOY_IMAGE"]
|
go
| 3 | 0 | |
HCmd/logger.py
|
# -*- coding: utf-8 -*-
from logging.handlers import TimedRotatingFileHandler
import logging
import os
import sys
from datetime import datetime
FORMATTER_FILE = logging.Formatter(
"%(asctime)s — %(name)s — %(levelname)s — %(message)s")
FORMATTER_CONSOLE = FORMATTER_FILE
LOG_FILE = os.path.join(os.environ['HOME'],"catkin_ws/src/asdp4_hornet","HCmd.log")
def get_console_handler():
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(FORMATTER_CONSOLE)
return console_handler
def get_file_handler():
file_handler = TimedRotatingFileHandler(LOG_FILE, when='midnight', backupCount=10)
file_handler.setFormatter(FORMATTER_FILE)
return file_handler
class cust_logger(logging.Logger):
def __init__(self, name, level = logging.NOTSET):
return super(cust_logger, self).__init__(name, level)
def error(self, msg, *args, **kwargs):
return super(cust_logger, self).warning(msg, *args, **kwargs)
def get_logger(logger_name):
logging.setLoggerClass(cust_logger)
logging.basicConfig()
logger = logging.getLogger(logger_name)
# better to have too much log than not enough
logger.setLevel(logging.DEBUG)
logger.addHandler(get_console_handler())
logger.addHandler(get_file_handler())
# with this pattern, it's rarely necessary to propagate the error up to parent
logger.propagate = False
return logger
|
[] |
[] |
[
"HOME"
] |
[]
|
["HOME"]
|
python
| 1 | 0 | |
daemon/daemon_unix.go
|
// +build linux freebsd
package daemon // import "github.com/docker/docker/daemon"
import (
"bufio"
"context"
"fmt"
"io/ioutil"
"net"
"os"
"path/filepath"
"runtime"
"runtime/debug"
"strconv"
"strings"
"time"
containerd_cgroups "github.com/containerd/cgroups"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/blkiodev"
pblkiodev "github.com/docker/docker/api/types/blkiodev"
containertypes "github.com/docker/docker/api/types/container"
"github.com/docker/docker/container"
"github.com/docker/docker/daemon/config"
"github.com/docker/docker/daemon/initlayer"
"github.com/docker/docker/opts"
"github.com/docker/docker/pkg/containerfs"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/ioutils"
"github.com/docker/docker/pkg/mount"
"github.com/docker/docker/pkg/parsers"
"github.com/docker/docker/pkg/parsers/kernel"
"github.com/docker/docker/pkg/sysinfo"
"github.com/docker/docker/runconfig"
volumemounts "github.com/docker/docker/volume/mounts"
"github.com/docker/libnetwork"
nwconfig "github.com/docker/libnetwork/config"
"github.com/docker/libnetwork/drivers/bridge"
"github.com/docker/libnetwork/netlabel"
"github.com/docker/libnetwork/netutils"
"github.com/docker/libnetwork/options"
lntypes "github.com/docker/libnetwork/types"
"github.com/opencontainers/runc/libcontainer/cgroups"
rsystem "github.com/opencontainers/runc/libcontainer/system"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/opencontainers/selinux/go-selinux/label"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/vishvananda/netlink"
"golang.org/x/sys/unix"
)
const (
// DefaultShimBinary is the default shim to be used by containerd if none
// is specified
DefaultShimBinary = "docker-containerd-shim"
// DefaultRuntimeBinary is the default runtime to be used by
// containerd if none is specified
DefaultRuntimeBinary = "docker-runc"
// See https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/tree/kernel/sched/sched.h?id=8cd9234c64c584432f6992fe944ca9e46ca8ea76#n269
linuxMinCPUShares = 2
linuxMaxCPUShares = 262144
platformSupported = true
// It's not kernel limit, we want this 4M limit to supply a reasonable functional container
linuxMinMemory = 4194304
// constants for remapped root settings
defaultIDSpecifier string = "default"
defaultRemappedID string = "dockremap"
// constant for cgroup drivers
cgroupFsDriver = "cgroupfs"
cgroupSystemdDriver = "systemd"
// DefaultRuntimeName is the default runtime to be used by
// containerd if none is specified
DefaultRuntimeName = "docker-runc"
)
type containerGetter interface {
GetContainer(string) (*container.Container, error)
}
func getMemoryResources(config containertypes.Resources) *specs.LinuxMemory {
memory := specs.LinuxMemory{}
if config.Memory > 0 {
memory.Limit = &config.Memory
}
if config.MemoryReservation > 0 {
memory.Reservation = &config.MemoryReservation
}
if config.MemorySwap > 0 {
memory.Swap = &config.MemorySwap
}
if config.MemorySwappiness != nil {
swappiness := uint64(*config.MemorySwappiness)
memory.Swappiness = &swappiness
}
if config.OomKillDisable != nil {
memory.DisableOOMKiller = config.OomKillDisable
}
if config.KernelMemory != 0 {
memory.Kernel = &config.KernelMemory
}
return &memory
}
func getCPUResources(config containertypes.Resources) (*specs.LinuxCPU, error) {
cpu := specs.LinuxCPU{}
if config.CPUShares < 0 {
return nil, fmt.Errorf("shares: invalid argument")
}
if config.CPUShares >= 0 {
shares := uint64(config.CPUShares)
cpu.Shares = &shares
}
if config.CpusetCpus != "" {
cpu.Cpus = config.CpusetCpus
}
if config.CpusetMems != "" {
cpu.Mems = config.CpusetMems
}
if config.NanoCPUs > 0 {
// https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt
period := uint64(100 * time.Millisecond / time.Microsecond)
quota := config.NanoCPUs * int64(period) / 1e9
cpu.Period = &period
cpu.Quota = "a
}
if config.CPUPeriod != 0 {
period := uint64(config.CPUPeriod)
cpu.Period = &period
}
if config.CPUQuota != 0 {
q := config.CPUQuota
cpu.Quota = &q
}
if config.CPURealtimePeriod != 0 {
period := uint64(config.CPURealtimePeriod)
cpu.RealtimePeriod = &period
}
if config.CPURealtimeRuntime != 0 {
c := config.CPURealtimeRuntime
cpu.RealtimeRuntime = &c
}
return &cpu, nil
}
func getBlkioWeightDevices(config containertypes.Resources) ([]specs.LinuxWeightDevice, error) {
var stat unix.Stat_t
var blkioWeightDevices []specs.LinuxWeightDevice
for _, weightDevice := range config.BlkioWeightDevice {
if err := unix.Stat(weightDevice.Path, &stat); err != nil {
return nil, err
}
weight := weightDevice.Weight
d := specs.LinuxWeightDevice{Weight: &weight}
d.Major = int64(stat.Rdev / 256)
d.Minor = int64(stat.Rdev % 256)
blkioWeightDevices = append(blkioWeightDevices, d)
}
return blkioWeightDevices, nil
}
func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error {
container.NoNewPrivileges = daemon.configStore.NoNewPrivileges
return parseSecurityOpt(container, hostConfig)
}
func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error {
var (
labelOpts []string
err error
)
for _, opt := range config.SecurityOpt {
if opt == "no-new-privileges" {
container.NoNewPrivileges = true
continue
}
if opt == "disable" {
labelOpts = append(labelOpts, "disable")
continue
}
var con []string
if strings.Contains(opt, "=") {
con = strings.SplitN(opt, "=", 2)
} else if strings.Contains(opt, ":") {
con = strings.SplitN(opt, ":", 2)
logrus.Warn("Security options with `:` as a separator are deprecated and will be completely unsupported in 17.04, use `=` instead.")
}
if len(con) != 2 {
return fmt.Errorf("invalid --security-opt 1: %q", opt)
}
switch con[0] {
case "label":
labelOpts = append(labelOpts, con[1])
case "apparmor":
container.AppArmorProfile = con[1]
case "seccomp":
container.SeccompProfile = con[1]
case "no-new-privileges":
noNewPrivileges, err := strconv.ParseBool(con[1])
if err != nil {
return fmt.Errorf("invalid --security-opt 2: %q", opt)
}
container.NoNewPrivileges = noNewPrivileges
default:
return fmt.Errorf("invalid --security-opt 2: %q", opt)
}
}
container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts)
return err
}
func getBlkioThrottleDevices(devs []*blkiodev.ThrottleDevice) ([]specs.LinuxThrottleDevice, error) {
var throttleDevices []specs.LinuxThrottleDevice
var stat unix.Stat_t
for _, d := range devs {
if err := unix.Stat(d.Path, &stat); err != nil {
return nil, err
}
d := specs.LinuxThrottleDevice{Rate: d.Rate}
d.Major = int64(stat.Rdev / 256)
d.Minor = int64(stat.Rdev % 256)
throttleDevices = append(throttleDevices, d)
}
return throttleDevices, nil
}
func checkKernel() error {
// Check for unsupported kernel versions
// FIXME: it would be cleaner to not test for specific versions, but rather
// test for specific functionalities.
// Unfortunately we can't test for the feature "does not cause a kernel panic"
// without actually causing a kernel panic, so we need this workaround until
// the circumstances of pre-3.10 crashes are clearer.
// For details see https://github.com/docker/docker/issues/407
// Docker 1.11 and above doesn't actually run on kernels older than 3.4,
// due to containerd-shim usage of PR_SET_CHILD_SUBREAPER (introduced in 3.4).
if !kernel.CheckKernelVersion(3, 10, 0) {
v, _ := kernel.GetKernelVersion()
if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" {
logrus.Fatalf("Your Linux kernel version %s is not supported for running docker. Please upgrade your kernel to 3.10.0 or newer.", v.String())
}
}
return nil
}
// adaptContainerSettings is called during container creation to modify any
// settings necessary in the HostConfig structure.
func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error {
if adjustCPUShares && hostConfig.CPUShares > 0 {
// Handle unsupported CPUShares
if hostConfig.CPUShares < linuxMinCPUShares {
logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares)
hostConfig.CPUShares = linuxMinCPUShares
} else if hostConfig.CPUShares > linuxMaxCPUShares {
logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares)
hostConfig.CPUShares = linuxMaxCPUShares
}
}
if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 {
// By default, MemorySwap is set to twice the size of Memory.
hostConfig.MemorySwap = hostConfig.Memory * 2
}
if hostConfig.ShmSize == 0 {
hostConfig.ShmSize = config.DefaultShmSize
if daemon.configStore != nil {
hostConfig.ShmSize = int64(daemon.configStore.ShmSize)
}
}
// Set default IPC mode, if unset for container
if hostConfig.IpcMode.IsEmpty() {
m := config.DefaultIpcMode
if daemon.configStore != nil {
m = daemon.configStore.IpcMode
}
hostConfig.IpcMode = containertypes.IpcMode(m)
}
adaptSharedNamespaceContainer(daemon, hostConfig)
var err error
opts, err := daemon.generateSecurityOpt(hostConfig)
if err != nil {
return err
}
hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, opts...)
if hostConfig.OomKillDisable == nil {
defaultOomKillDisable := false
hostConfig.OomKillDisable = &defaultOomKillDisable
}
return nil
}
// adaptSharedNamespaceContainer replaces container name with its ID in hostConfig.
// To be more precisely, it modifies `container:name` to `container:ID` of PidMode, IpcMode
// and NetworkMode.
//
// When a container shares its namespace with another container, use ID can keep the namespace
// sharing connection between the two containers even the another container is renamed.
func adaptSharedNamespaceContainer(daemon containerGetter, hostConfig *containertypes.HostConfig) {
containerPrefix := "container:"
if hostConfig.PidMode.IsContainer() {
pidContainer := hostConfig.PidMode.Container()
// if there is any error returned here, we just ignore it and leave it to be
// handled in the following logic
if c, err := daemon.GetContainer(pidContainer); err == nil {
hostConfig.PidMode = containertypes.PidMode(containerPrefix + c.ID)
}
}
if hostConfig.IpcMode.IsContainer() {
ipcContainer := hostConfig.IpcMode.Container()
if c, err := daemon.GetContainer(ipcContainer); err == nil {
hostConfig.IpcMode = containertypes.IpcMode(containerPrefix + c.ID)
}
}
if hostConfig.NetworkMode.IsContainer() {
netContainer := hostConfig.NetworkMode.ConnectedContainer()
if c, err := daemon.GetContainer(netContainer); err == nil {
hostConfig.NetworkMode = containertypes.NetworkMode(containerPrefix + c.ID)
}
}
}
func verifyContainerResources(resources *containertypes.Resources, sysInfo *sysinfo.SysInfo, update bool) ([]string, error) {
warnings := []string{}
fixMemorySwappiness(resources)
// memory subsystem checks and adjustments
if resources.Memory != 0 && resources.Memory < linuxMinMemory {
return warnings, fmt.Errorf("Minimum memory limit allowed is 4MB")
}
if resources.Memory > 0 && !sysInfo.MemoryLimit {
warnings = append(warnings, "Your kernel does not support memory limit capabilities or the cgroup is not mounted. Limitation discarded.")
logrus.Warn("Your kernel does not support memory limit capabilities or the cgroup is not mounted. Limitation discarded.")
resources.Memory = 0
resources.MemorySwap = -1
}
if resources.Memory > 0 && resources.MemorySwap != -1 && !sysInfo.SwapLimit {
warnings = append(warnings, "Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.")
logrus.Warn("Your kernel does not support swap limit capabilities,or the cgroup is not mounted. Memory limited without swap.")
resources.MemorySwap = -1
}
if resources.Memory > 0 && resources.MemorySwap > 0 && resources.MemorySwap < resources.Memory {
return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage")
}
if resources.Memory == 0 && resources.MemorySwap > 0 && !update {
return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage")
}
if resources.MemorySwappiness != nil && !sysInfo.MemorySwappiness {
warnings = append(warnings, "Your kernel does not support memory swappiness capabilities or the cgroup is not mounted. Memory swappiness discarded.")
logrus.Warn("Your kernel does not support memory swappiness capabilities, or the cgroup is not mounted. Memory swappiness discarded.")
resources.MemorySwappiness = nil
}
if resources.MemorySwappiness != nil {
swappiness := *resources.MemorySwappiness
if swappiness < 0 || swappiness > 100 {
return warnings, fmt.Errorf("Invalid value: %v, valid memory swappiness range is 0-100", swappiness)
}
}
if resources.MemoryReservation > 0 && !sysInfo.MemoryReservation {
warnings = append(warnings, "Your kernel does not support memory soft limit capabilities or the cgroup is not mounted. Limitation discarded.")
logrus.Warn("Your kernel does not support memory soft limit capabilities or the cgroup is not mounted. Limitation discarded.")
resources.MemoryReservation = 0
}
if resources.MemoryReservation > 0 && resources.MemoryReservation < linuxMinMemory {
return warnings, fmt.Errorf("Minimum memory reservation allowed is 4MB")
}
if resources.Memory > 0 && resources.MemoryReservation > 0 && resources.Memory < resources.MemoryReservation {
return warnings, fmt.Errorf("Minimum memory limit can not be less than memory reservation limit, see usage")
}
if resources.KernelMemory > 0 && !sysInfo.KernelMemory {
warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities or the cgroup is not mounted. Limitation discarded.")
logrus.Warn("Your kernel does not support kernel memory limit capabilities or the cgroup is not mounted. Limitation discarded.")
resources.KernelMemory = 0
}
if resources.KernelMemory > 0 && resources.KernelMemory < linuxMinMemory {
return warnings, fmt.Errorf("Minimum kernel memory limit allowed is 4MB")
}
if resources.KernelMemory > 0 && !kernel.CheckKernelVersion(4, 0, 0) {
warnings = append(warnings, "You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.")
logrus.Warn("You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.")
}
if resources.OomKillDisable != nil && !sysInfo.OomKillDisable {
// only produce warnings if the setting wasn't to *disable* the OOM Kill; no point
// warning the caller if they already wanted the feature to be off
if *resources.OomKillDisable {
warnings = append(warnings, "Your kernel does not support OomKillDisable. OomKillDisable discarded.")
logrus.Warn("Your kernel does not support OomKillDisable. OomKillDisable discarded.")
}
resources.OomKillDisable = nil
}
if resources.PidsLimit != 0 && !sysInfo.PidsLimit {
warnings = append(warnings, "Your kernel does not support pids limit capabilities or the cgroup is not mounted. PIDs limit discarded.")
logrus.Warn("Your kernel does not support pids limit capabilities or the cgroup is not mounted. PIDs limit discarded.")
resources.PidsLimit = 0
}
// cpu subsystem checks and adjustments
if resources.NanoCPUs > 0 && resources.CPUPeriod > 0 {
return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Period cannot both be set")
}
if resources.NanoCPUs > 0 && resources.CPUQuota > 0 {
return warnings, fmt.Errorf("Conflicting options: Nano CPUs and CPU Quota cannot both be set")
}
if resources.NanoCPUs > 0 && (!sysInfo.CPUCfsPeriod || !sysInfo.CPUCfsQuota) {
return warnings, fmt.Errorf("NanoCPUs can not be set, as your kernel does not support CPU cfs period/quota or the cgroup is not mounted")
}
// The highest precision we could get on Linux is 0.001, by setting
// cpu.cfs_period_us=1000ms
// cpu.cfs_quota=1ms
// See the following link for details:
// https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt
// Here we don't set the lower limit and it is up to the underlying platform (e.g., Linux) to return an error.
// The error message is 0.01 so that this is consistent with Windows
if resources.NanoCPUs < 0 || resources.NanoCPUs > int64(sysinfo.NumCPU())*1e9 {
return warnings, fmt.Errorf("Range of CPUs is from 0.01 to %d.00, as there are only %d CPUs available", sysinfo.NumCPU(), sysinfo.NumCPU())
}
if resources.CPUShares > 0 && !sysInfo.CPUShares {
warnings = append(warnings, "Your kernel does not support CPU shares or the cgroup is not mounted. Shares discarded.")
logrus.Warn("Your kernel does not support CPU shares or the cgroup is not mounted. Shares discarded.")
resources.CPUShares = 0
}
if resources.CPUPeriod > 0 && !sysInfo.CPUCfsPeriod {
warnings = append(warnings, "Your kernel does not support CPU cfs period or the cgroup is not mounted. Period discarded.")
logrus.Warn("Your kernel does not support CPU cfs period or the cgroup is not mounted. Period discarded.")
resources.CPUPeriod = 0
}
if resources.CPUPeriod != 0 && (resources.CPUPeriod < 1000 || resources.CPUPeriod > 1000000) {
return warnings, fmt.Errorf("CPU cfs period can not be less than 1ms (i.e. 1000) or larger than 1s (i.e. 1000000)")
}
if resources.CPUQuota > 0 && !sysInfo.CPUCfsQuota {
warnings = append(warnings, "Your kernel does not support CPU cfs quota or the cgroup is not mounted. Quota discarded.")
logrus.Warn("Your kernel does not support CPU cfs quota or the cgroup is not mounted. Quota discarded.")
resources.CPUQuota = 0
}
if resources.CPUQuota > 0 && resources.CPUQuota < 1000 {
return warnings, fmt.Errorf("CPU cfs quota can not be less than 1ms (i.e. 1000)")
}
if resources.CPUPercent > 0 {
warnings = append(warnings, fmt.Sprintf("%s does not support CPU percent. Percent discarded.", runtime.GOOS))
logrus.Warnf("%s does not support CPU percent. Percent discarded.", runtime.GOOS)
resources.CPUPercent = 0
}
// cpuset subsystem checks and adjustments
if (resources.CpusetCpus != "" || resources.CpusetMems != "") && !sysInfo.Cpuset {
warnings = append(warnings, "Your kernel does not support cpuset or the cgroup is not mounted. Cpuset discarded.")
logrus.Warn("Your kernel does not support cpuset or the cgroup is not mounted. Cpuset discarded.")
resources.CpusetCpus = ""
resources.CpusetMems = ""
}
cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(resources.CpusetCpus)
if err != nil {
return warnings, fmt.Errorf("Invalid value %s for cpuset cpus", resources.CpusetCpus)
}
if !cpusAvailable {
return warnings, fmt.Errorf("Requested CPUs are not available - requested %s, available: %s", resources.CpusetCpus, sysInfo.Cpus)
}
memsAvailable, err := sysInfo.IsCpusetMemsAvailable(resources.CpusetMems)
if err != nil {
return warnings, fmt.Errorf("Invalid value %s for cpuset mems", resources.CpusetMems)
}
if !memsAvailable {
return warnings, fmt.Errorf("Requested memory nodes are not available - requested %s, available: %s", resources.CpusetMems, sysInfo.Mems)
}
// blkio subsystem checks and adjustments
if resources.BlkioWeight > 0 && !sysInfo.BlkioWeight {
warnings = append(warnings, "Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.")
logrus.Warn("Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.")
resources.BlkioWeight = 0
}
if resources.BlkioWeight > 0 && (resources.BlkioWeight < 10 || resources.BlkioWeight > 1000) {
return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000")
}
if resources.IOMaximumBandwidth != 0 || resources.IOMaximumIOps != 0 {
return warnings, fmt.Errorf("Invalid QoS settings: %s does not support Maximum IO Bandwidth or Maximum IO IOps", runtime.GOOS)
}
if len(resources.BlkioWeightDevice) > 0 && !sysInfo.BlkioWeightDevice {
warnings = append(warnings, "Your kernel does not support Block I/O weight_device or the cgroup is not mounted. Weight-device discarded.")
logrus.Warn("Your kernel does not support Block I/O weight_device or the cgroup is not mounted. Weight-device discarded.")
resources.BlkioWeightDevice = []*pblkiodev.WeightDevice{}
}
if len(resources.BlkioDeviceReadBps) > 0 && !sysInfo.BlkioReadBpsDevice {
warnings = append(warnings, "Your kernel does not support BPS Block I/O read limit or the cgroup is not mounted. Block I/O BPS read limit discarded.")
logrus.Warn("Your kernel does not support BPS Block I/O read limit or the cgroup is not mounted. Block I/O BPS read limit discarded")
resources.BlkioDeviceReadBps = []*pblkiodev.ThrottleDevice{}
}
if len(resources.BlkioDeviceWriteBps) > 0 && !sysInfo.BlkioWriteBpsDevice {
warnings = append(warnings, "Your kernel does not support BPS Block I/O write limit or the cgroup is not mounted. Block I/O BPS write limit discarded.")
logrus.Warn("Your kernel does not support BPS Block I/O write limit or the cgroup is not mounted. Block I/O BPS write limit discarded.")
resources.BlkioDeviceWriteBps = []*pblkiodev.ThrottleDevice{}
}
if len(resources.BlkioDeviceReadIOps) > 0 && !sysInfo.BlkioReadIOpsDevice {
warnings = append(warnings, "Your kernel does not support IOPS Block read limit or the cgroup is not mounted. Block I/O IOPS read limit discarded.")
logrus.Warn("Your kernel does not support IOPS Block I/O read limit in IO or the cgroup is not mounted. Block I/O IOPS read limit discarded.")
resources.BlkioDeviceReadIOps = []*pblkiodev.ThrottleDevice{}
}
if len(resources.BlkioDeviceWriteIOps) > 0 && !sysInfo.BlkioWriteIOpsDevice {
warnings = append(warnings, "Your kernel does not support IOPS Block write limit or the cgroup is not mounted. Block I/O IOPS write limit discarded.")
logrus.Warn("Your kernel does not support IOPS Block I/O write limit or the cgroup is not mounted. Block I/O IOPS write limit discarded.")
resources.BlkioDeviceWriteIOps = []*pblkiodev.ThrottleDevice{}
}
return warnings, nil
}
func (daemon *Daemon) getCgroupDriver() string {
cgroupDriver := cgroupFsDriver
if UsingSystemd(daemon.configStore) {
cgroupDriver = cgroupSystemdDriver
}
return cgroupDriver
}
// getCD gets the raw value of the native.cgroupdriver option, if set.
func getCD(config *config.Config) string {
for _, option := range config.ExecOptions {
key, val, err := parsers.ParseKeyValueOpt(option)
if err != nil || !strings.EqualFold(key, "native.cgroupdriver") {
continue
}
return val
}
return ""
}
// VerifyCgroupDriver validates native.cgroupdriver
func VerifyCgroupDriver(config *config.Config) error {
cd := getCD(config)
if cd == "" || cd == cgroupFsDriver || cd == cgroupSystemdDriver {
return nil
}
return fmt.Errorf("native.cgroupdriver option %s not supported", cd)
}
// UsingSystemd returns true if cli option includes native.cgroupdriver=systemd
func UsingSystemd(config *config.Config) bool {
return getCD(config) == cgroupSystemdDriver
}
// verifyPlatformContainerSettings performs platform-specific validation of the
// hostconfig and config structures.
func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) {
var warnings []string
sysInfo := sysinfo.New(true)
w, err := verifyContainerResources(&hostConfig.Resources, sysInfo, update)
// no matter err is nil or not, w could have data in itself.
warnings = append(warnings, w...)
if err != nil {
return warnings, err
}
if hostConfig.ShmSize < 0 {
return warnings, fmt.Errorf("SHM size can not be less than 0")
}
if hostConfig.OomScoreAdj < -1000 || hostConfig.OomScoreAdj > 1000 {
return warnings, fmt.Errorf("Invalid value %d, range for oom score adj is [-1000, 1000]", hostConfig.OomScoreAdj)
}
// ip-forwarding does not affect container with '--net=host' (or '--net=none')
if sysInfo.IPv4ForwardingDisabled && !(hostConfig.NetworkMode.IsHost() || hostConfig.NetworkMode.IsNone()) {
warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.")
logrus.Warn("IPv4 forwarding is disabled. Networking will not work")
}
// check for various conflicting options with user namespaces
if daemon.configStore.RemappedRoot != "" && hostConfig.UsernsMode.IsPrivate() {
if hostConfig.Privileged {
return warnings, fmt.Errorf("privileged mode is incompatible with user namespaces. You must run the container in the host namespace when running privileged mode")
}
if hostConfig.NetworkMode.IsHost() && !hostConfig.UsernsMode.IsHost() {
return warnings, fmt.Errorf("cannot share the host's network namespace when user namespaces are enabled")
}
if hostConfig.PidMode.IsHost() && !hostConfig.UsernsMode.IsHost() {
return warnings, fmt.Errorf("cannot share the host PID namespace when user namespaces are enabled")
}
}
if hostConfig.CgroupParent != "" && UsingSystemd(daemon.configStore) {
// CgroupParent for systemd cgroup should be named as "xxx.slice"
if len(hostConfig.CgroupParent) <= 6 || !strings.HasSuffix(hostConfig.CgroupParent, ".slice") {
return warnings, fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"")
}
}
if hostConfig.Runtime == "" {
hostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName()
}
if rt := daemon.configStore.GetRuntime(hostConfig.Runtime); rt == nil {
return warnings, fmt.Errorf("Unknown runtime specified %s", hostConfig.Runtime)
}
parser := volumemounts.NewParser(runtime.GOOS)
for dest := range hostConfig.Tmpfs {
if err := parser.ValidateTmpfsMountDestination(dest); err != nil {
return warnings, err
}
}
return warnings, nil
}
func (daemon *Daemon) loadRuntimes() error {
return daemon.initRuntimes(daemon.configStore.Runtimes)
}
func (daemon *Daemon) initRuntimes(runtimes map[string]types.Runtime) (err error) {
runtimeDir := filepath.Join(daemon.configStore.Root, "runtimes")
// Remove old temp directory if any
os.RemoveAll(runtimeDir + "-old")
tmpDir, err := ioutils.TempDir(daemon.configStore.Root, "gen-runtimes")
if err != nil {
return errors.Wrapf(err, "failed to get temp dir to generate runtime scripts")
}
defer func() {
if err != nil {
if err1 := os.RemoveAll(tmpDir); err1 != nil {
logrus.WithError(err1).WithField("dir", tmpDir).
Warnf("failed to remove tmp dir")
}
return
}
if err = os.Rename(runtimeDir, runtimeDir+"-old"); err != nil {
return
}
if err = os.Rename(tmpDir, runtimeDir); err != nil {
err = errors.Wrapf(err, "failed to setup runtimes dir, new containers may not start")
return
}
if err = os.RemoveAll(runtimeDir + "-old"); err != nil {
logrus.WithError(err).WithField("dir", tmpDir).
Warnf("failed to remove old runtimes dir")
}
}()
for name, rt := range runtimes {
if len(rt.Args) == 0 {
continue
}
script := filepath.Join(tmpDir, name)
content := fmt.Sprintf("#!/bin/sh\n%s %s $@\n", rt.Path, strings.Join(rt.Args, " "))
if err := ioutil.WriteFile(script, []byte(content), 0700); err != nil {
return err
}
}
return nil
}
// verifyDaemonSettings performs validation of daemon config struct
func verifyDaemonSettings(conf *config.Config) error {
// Check for mutually incompatible config options
if conf.BridgeConfig.Iface != "" && conf.BridgeConfig.IP != "" {
return fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one")
}
if !conf.BridgeConfig.EnableIPTables && !conf.BridgeConfig.InterContainerCommunication {
return fmt.Errorf("You specified --iptables=false with --icc=false. ICC=false uses iptables to function. Please set --icc or --iptables to true")
}
if !conf.BridgeConfig.EnableIPTables && conf.BridgeConfig.EnableIPMasq {
conf.BridgeConfig.EnableIPMasq = false
}
if err := VerifyCgroupDriver(conf); err != nil {
return err
}
if conf.CgroupParent != "" && UsingSystemd(conf) {
if len(conf.CgroupParent) <= 6 || !strings.HasSuffix(conf.CgroupParent, ".slice") {
return fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"")
}
}
if conf.DefaultRuntime == "" {
conf.DefaultRuntime = config.StockRuntimeName
}
if conf.Runtimes == nil {
conf.Runtimes = make(map[string]types.Runtime)
}
conf.Runtimes[config.StockRuntimeName] = types.Runtime{Path: DefaultRuntimeName}
return nil
}
// checkSystem validates platform-specific requirements
func checkSystem() error {
if os.Geteuid() != 0 {
return fmt.Errorf("The Docker daemon needs to be run as root")
}
return checkKernel()
}
// configureMaxThreads sets the Go runtime max threads threshold
// which is 90% of the kernel setting from /proc/sys/kernel/threads-max
func configureMaxThreads(config *config.Config) error {
mt, err := ioutil.ReadFile("/proc/sys/kernel/threads-max")
if err != nil {
return err
}
mtint, err := strconv.Atoi(strings.TrimSpace(string(mt)))
if err != nil {
return err
}
maxThreads := (mtint / 100) * 90
debug.SetMaxThreads(maxThreads)
logrus.Debugf("Golang's threads limit set to %d", maxThreads)
return nil
}
func overlaySupportsSelinux() (bool, error) {
f, err := os.Open("/proc/kallsyms")
if err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
defer f.Close()
var symAddr, symType, symName, text string
s := bufio.NewScanner(f)
for s.Scan() {
if err := s.Err(); err != nil {
return false, err
}
text = s.Text()
if _, err := fmt.Sscanf(text, "%s %s %s", &symAddr, &symType, &symName); err != nil {
return false, fmt.Errorf("Scanning '%s' failed: %s", text, err)
}
// Check for presence of symbol security_inode_copy_up.
if symName == "security_inode_copy_up" {
return true, nil
}
}
return false, nil
}
// configureKernelSecuritySupport configures and validates security support for the kernel
func configureKernelSecuritySupport(config *config.Config, driverName string) error {
if config.EnableSelinuxSupport {
if !selinuxEnabled() {
logrus.Warn("Docker could not enable SELinux on the host system")
return nil
}
if driverName == "overlay" || driverName == "overlay2" {
// If driver is overlay or overlay2, make sure kernel
// supports selinux with overlay.
supported, err := overlaySupportsSelinux()
if err != nil {
return err
}
if !supported {
logrus.Warnf("SELinux is not supported with the %v graph driver on this kernel", driverName)
}
}
} else {
selinuxSetDisabled()
}
return nil
}
func (daemon *Daemon) initNetworkController(config *config.Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) {
netOptions, err := daemon.networkOptions(config, daemon.PluginStore, activeSandboxes)
if err != nil {
return nil, err
}
controller, err := libnetwork.New(netOptions...)
if err != nil {
return nil, fmt.Errorf("error obtaining controller instance: %v", err)
}
if len(activeSandboxes) > 0 {
logrus.Info("There are old running containers, the network config will not take affect")
return controller, nil
}
// Initialize default network on "null"
if n, _ := controller.NetworkByName("none"); n == nil {
if _, err := controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(true)); err != nil {
return nil, fmt.Errorf("Error creating default \"null\" network: %v", err)
}
}
// Initialize default network on "host"
if n, _ := controller.NetworkByName("host"); n == nil {
if _, err := controller.NewNetwork("host", "host", "", libnetwork.NetworkOptionPersist(true)); err != nil {
return nil, fmt.Errorf("Error creating default \"host\" network: %v", err)
}
}
// Clear stale bridge network
if n, err := controller.NetworkByName("bridge"); err == nil {
if err = n.Delete(); err != nil {
return nil, fmt.Errorf("could not delete the default bridge network: %v", err)
}
}
if !config.DisableBridge {
// Initialize default driver "bridge"
if err := initBridgeDriver(controller, config); err != nil {
return nil, err
}
} else {
removeDefaultBridgeInterface()
}
return controller, nil
}
func driverOptions(config *config.Config) []nwconfig.Option {
bridgeConfig := options.Generic{
"EnableIPForwarding": config.BridgeConfig.EnableIPForward,
"EnableIPTables": config.BridgeConfig.EnableIPTables,
"EnableUserlandProxy": config.BridgeConfig.EnableUserlandProxy,
"UserlandProxyPath": config.BridgeConfig.UserlandProxyPath}
bridgeOption := options.Generic{netlabel.GenericData: bridgeConfig}
dOptions := []nwconfig.Option{}
dOptions = append(dOptions, nwconfig.OptionDriverConfig("bridge", bridgeOption))
return dOptions
}
func initBridgeDriver(controller libnetwork.NetworkController, config *config.Config) error {
bridgeName := bridge.DefaultBridgeName
if config.BridgeConfig.Iface != "" {
bridgeName = config.BridgeConfig.Iface
}
netOption := map[string]string{
bridge.BridgeName: bridgeName,
bridge.DefaultBridge: strconv.FormatBool(true),
netlabel.DriverMTU: strconv.Itoa(config.Mtu),
bridge.EnableIPMasquerade: strconv.FormatBool(config.BridgeConfig.EnableIPMasq),
bridge.EnableICC: strconv.FormatBool(config.BridgeConfig.InterContainerCommunication),
}
// --ip processing
if config.BridgeConfig.DefaultIP != nil {
netOption[bridge.DefaultBindingIP] = config.BridgeConfig.DefaultIP.String()
}
var (
ipamV4Conf *libnetwork.IpamConf
ipamV6Conf *libnetwork.IpamConf
)
ipamV4Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)}
nwList, nw6List, err := netutils.ElectInterfaceAddresses(bridgeName)
if err != nil {
return errors.Wrap(err, "list bridge addresses failed")
}
nw := nwList[0]
if len(nwList) > 1 && config.BridgeConfig.FixedCIDR != "" {
_, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR)
if err != nil {
return errors.Wrap(err, "parse CIDR failed")
}
// Iterate through in case there are multiple addresses for the bridge
for _, entry := range nwList {
if fCIDR.Contains(entry.IP) {
nw = entry
break
}
}
}
ipamV4Conf.PreferredPool = lntypes.GetIPNetCanonical(nw).String()
hip, _ := lntypes.GetHostPartIP(nw.IP, nw.Mask)
if hip.IsGlobalUnicast() {
ipamV4Conf.Gateway = nw.IP.String()
}
if config.BridgeConfig.IP != "" {
ipamV4Conf.PreferredPool = config.BridgeConfig.IP
ip, _, err := net.ParseCIDR(config.BridgeConfig.IP)
if err != nil {
return err
}
ipamV4Conf.Gateway = ip.String()
} else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" {
logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool)
}
if config.BridgeConfig.FixedCIDR != "" {
_, fCIDR, err := net.ParseCIDR(config.BridgeConfig.FixedCIDR)
if err != nil {
return err
}
ipamV4Conf.SubPool = fCIDR.String()
}
if config.BridgeConfig.DefaultGatewayIPv4 != nil {
ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.BridgeConfig.DefaultGatewayIPv4.String()
}
var deferIPv6Alloc bool
if config.BridgeConfig.FixedCIDRv6 != "" {
_, fCIDRv6, err := net.ParseCIDR(config.BridgeConfig.FixedCIDRv6)
if err != nil {
return err
}
// In case user has specified the daemon flag --fixed-cidr-v6 and the passed network has
// at least 48 host bits, we need to guarantee the current behavior where the containers'
// IPv6 addresses will be constructed based on the containers' interface MAC address.
// We do so by telling libnetwork to defer the IPv6 address allocation for the endpoints
// on this network until after the driver has created the endpoint and returned the
// constructed address. Libnetwork will then reserve this address with the ipam driver.
ones, _ := fCIDRv6.Mask.Size()
deferIPv6Alloc = ones <= 80
if ipamV6Conf == nil {
ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)}
}
ipamV6Conf.PreferredPool = fCIDRv6.String()
// In case the --fixed-cidr-v6 is specified and the current docker0 bridge IPv6
// address belongs to the same network, we need to inform libnetwork about it, so
// that it can be reserved with IPAM and it will not be given away to somebody else
for _, nw6 := range nw6List {
if fCIDRv6.Contains(nw6.IP) {
ipamV6Conf.Gateway = nw6.IP.String()
break
}
}
}
if config.BridgeConfig.DefaultGatewayIPv6 != nil {
if ipamV6Conf == nil {
ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)}
}
ipamV6Conf.AuxAddresses["DefaultGatewayIPv6"] = config.BridgeConfig.DefaultGatewayIPv6.String()
}
v4Conf := []*libnetwork.IpamConf{ipamV4Conf}
v6Conf := []*libnetwork.IpamConf{}
if ipamV6Conf != nil {
v6Conf = append(v6Conf, ipamV6Conf)
}
// Initialize default network on "bridge" with the same name
_, err = controller.NewNetwork("bridge", "bridge", "",
libnetwork.NetworkOptionEnableIPv6(config.BridgeConfig.EnableIPv6),
libnetwork.NetworkOptionDriverOpts(netOption),
libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil),
libnetwork.NetworkOptionDeferIPv6Alloc(deferIPv6Alloc))
if err != nil {
return fmt.Errorf("Error creating default \"bridge\" network: %v", err)
}
return nil
}
// Remove default bridge interface if present (--bridge=none use case)
func removeDefaultBridgeInterface() {
if lnk, err := netlink.LinkByName(bridge.DefaultBridgeName); err == nil {
if err := netlink.LinkDel(lnk); err != nil {
logrus.Warnf("Failed to remove bridge interface (%s): %v", bridge.DefaultBridgeName, err)
}
}
}
func setupInitLayer(idMappings *idtools.IDMappings) func(containerfs.ContainerFS) error {
return func(initPath containerfs.ContainerFS) error {
return initlayer.Setup(initPath, idMappings.RootPair())
}
}
// Parse the remapped root (user namespace) option, which can be one of:
// username - valid username from /etc/passwd
// username:groupname - valid username; valid groupname from /etc/group
// uid - 32-bit unsigned int valid Linux UID value
// uid:gid - uid value; 32-bit unsigned int Linux GID value
//
// If no groupname is specified, and a username is specified, an attempt
// will be made to lookup a gid for that username as a groupname
//
// If names are used, they are verified to exist in passwd/group
func parseRemappedRoot(usergrp string) (string, string, error) {
var (
userID, groupID int
username, groupname string
)
idparts := strings.Split(usergrp, ":")
if len(idparts) > 2 {
return "", "", fmt.Errorf("Invalid user/group specification in --userns-remap: %q", usergrp)
}
if uid, err := strconv.ParseInt(idparts[0], 10, 32); err == nil {
// must be a uid; take it as valid
userID = int(uid)
luser, err := idtools.LookupUID(userID)
if err != nil {
return "", "", fmt.Errorf("Uid %d has no entry in /etc/passwd: %v", userID, err)
}
username = luser.Name
if len(idparts) == 1 {
// if the uid was numeric and no gid was specified, take the uid as the gid
groupID = userID
lgrp, err := idtools.LookupGID(groupID)
if err != nil {
return "", "", fmt.Errorf("Gid %d has no entry in /etc/group: %v", groupID, err)
}
groupname = lgrp.Name
}
} else {
lookupName := idparts[0]
// special case: if the user specified "default", they want Docker to create or
// use (after creation) the "dockremap" user/group for root remapping
if lookupName == defaultIDSpecifier {
lookupName = defaultRemappedID
}
luser, err := idtools.LookupUser(lookupName)
if err != nil && idparts[0] != defaultIDSpecifier {
// error if the name requested isn't the special "dockremap" ID
return "", "", fmt.Errorf("Error during uid lookup for %q: %v", lookupName, err)
} else if err != nil {
// special case-- if the username == "default", then we have been asked
// to create a new entry pair in /etc/{passwd,group} for which the /etc/sub{uid,gid}
// ranges will be used for the user and group mappings in user namespaced containers
_, _, err := idtools.AddNamespaceRangesUser(defaultRemappedID)
if err == nil {
return defaultRemappedID, defaultRemappedID, nil
}
return "", "", fmt.Errorf("Error during %q user creation: %v", defaultRemappedID, err)
}
username = luser.Name
if len(idparts) == 1 {
// we only have a string username, and no group specified; look up gid from username as group
group, err := idtools.LookupGroup(lookupName)
if err != nil {
return "", "", fmt.Errorf("Error during gid lookup for %q: %v", lookupName, err)
}
groupname = group.Name
}
}
if len(idparts) == 2 {
// groupname or gid is separately specified and must be resolved
// to an unsigned 32-bit gid
if gid, err := strconv.ParseInt(idparts[1], 10, 32); err == nil {
// must be a gid, take it as valid
groupID = int(gid)
lgrp, err := idtools.LookupGID(groupID)
if err != nil {
return "", "", fmt.Errorf("Gid %d has no entry in /etc/passwd: %v", groupID, err)
}
groupname = lgrp.Name
} else {
// not a number; attempt a lookup
if _, err := idtools.LookupGroup(idparts[1]); err != nil {
return "", "", fmt.Errorf("Error during groupname lookup for %q: %v", idparts[1], err)
}
groupname = idparts[1]
}
}
return username, groupname, nil
}
func setupRemappedRoot(config *config.Config) (*idtools.IDMappings, error) {
if runtime.GOOS != "linux" && config.RemappedRoot != "" {
return nil, fmt.Errorf("User namespaces are only supported on Linux")
}
// if the daemon was started with remapped root option, parse
// the config option to the int uid,gid values
if config.RemappedRoot != "" {
username, groupname, err := parseRemappedRoot(config.RemappedRoot)
if err != nil {
return nil, err
}
if username == "root" {
// Cannot setup user namespaces with a 1-to-1 mapping; "--root=0:0" is a no-op
// effectively
logrus.Warn("User namespaces: root cannot be remapped with itself; user namespaces are OFF")
return &idtools.IDMappings{}, nil
}
logrus.Infof("User namespaces: ID ranges will be mapped to subuid/subgid ranges of: %s:%s", username, groupname)
// update remapped root setting now that we have resolved them to actual names
config.RemappedRoot = fmt.Sprintf("%s:%s", username, groupname)
mappings, err := idtools.NewIDMappings(username, groupname)
if err != nil {
return nil, errors.Wrapf(err, "Can't create ID mappings: %v")
}
return mappings, nil
}
return &idtools.IDMappings{}, nil
}
func setupDaemonRoot(config *config.Config, rootDir string, rootIDs idtools.IDPair) error {
config.Root = rootDir
// the docker root metadata directory needs to have execute permissions for all users (g+x,o+x)
// so that syscalls executing as non-root, operating on subdirectories of the graph root
// (e.g. mounted layers of a container) can traverse this path.
// The user namespace support will create subdirectories for the remapped root host uid:gid
// pair owned by that same uid:gid pair for proper write access to those needed metadata and
// layer content subtrees.
if _, err := os.Stat(rootDir); err == nil {
// root current exists; verify the access bits are correct by setting them
if err = os.Chmod(rootDir, 0711); err != nil {
return err
}
} else if os.IsNotExist(err) {
// no root exists yet, create it 0711 with root:root ownership
if err := os.MkdirAll(rootDir, 0711); err != nil {
return err
}
}
// if user namespaces are enabled we will create a subtree underneath the specified root
// with any/all specified remapped root uid/gid options on the daemon creating
// a new subdirectory with ownership set to the remapped uid/gid (so as to allow
// `chdir()` to work for containers namespaced to that uid/gid)
if config.RemappedRoot != "" {
config.Root = filepath.Join(rootDir, fmt.Sprintf("%d.%d", rootIDs.UID, rootIDs.GID))
logrus.Debugf("Creating user namespaced daemon root: %s", config.Root)
// Create the root directory if it doesn't exist
if err := idtools.MkdirAllAndChown(config.Root, 0700, rootIDs); err != nil {
return fmt.Errorf("Cannot create daemon root: %s: %v", config.Root, err)
}
// we also need to verify that any pre-existing directories in the path to
// the graphroot won't block access to remapped root--if any pre-existing directory
// has strict permissions that don't allow "x", container start will fail, so
// better to warn and fail now
dirPath := config.Root
for {
dirPath = filepath.Dir(dirPath)
if dirPath == "/" {
break
}
if !idtools.CanAccess(dirPath, rootIDs) {
return fmt.Errorf("a subdirectory in your graphroot path (%s) restricts access to the remapped root uid/gid; please fix by allowing 'o+x' permissions on existing directories", config.Root)
}
}
}
if err := setupDaemonRootPropagation(config); err != nil {
logrus.WithError(err).WithField("dir", config.Root).Warn("Error while setting daemon root propagation, this is not generally critical but may cause some functionality to not work or fallback to less desirable behavior")
}
return nil
}
func setupDaemonRootPropagation(cfg *config.Config) error {
rootParentMount, options, err := getSourceMount(cfg.Root)
if err != nil {
return errors.Wrap(err, "error getting daemon root's parent mount")
}
var cleanupOldFile bool
cleanupFile := getUnmountOnShutdownPath(cfg)
defer func() {
if !cleanupOldFile {
return
}
if err := os.Remove(cleanupFile); err != nil && !os.IsNotExist(err) {
logrus.WithError(err).WithField("file", cleanupFile).Warn("could not clean up old root propagation unmount file")
}
}()
if hasMountinfoOption(options, sharedPropagationOption, slavePropagationOption) {
cleanupOldFile = true
return nil
}
if err := mount.MakeShared(cfg.Root); err != nil {
return errors.Wrap(err, "could not setup daemon root propagation to shared")
}
// check the case where this may have already been a mount to itself.
// If so then the daemon only performed a remount and should not try to unmount this later.
if rootParentMount == cfg.Root {
cleanupOldFile = true
return nil
}
if err := ioutil.WriteFile(cleanupFile, nil, 0600); err != nil {
return errors.Wrap(err, "error writing file to signal mount cleanup on shutdown")
}
return nil
}
// getUnmountOnShutdownPath generates the path to used when writing the file that signals to the daemon that on shutdown
// the daemon root should be unmounted.
func getUnmountOnShutdownPath(config *config.Config) string {
return filepath.Join(config.ExecRoot, "unmount-on-shutdown")
}
// registerLinks writes the links to a file.
func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error {
if hostConfig == nil || hostConfig.NetworkMode.IsUserDefined() {
return nil
}
for _, l := range hostConfig.Links {
name, alias, err := opts.ParseLink(l)
if err != nil {
return err
}
child, err := daemon.GetContainer(name)
if err != nil {
return errors.Wrapf(err, "could not get container for %s", name)
}
for child.HostConfig.NetworkMode.IsContainer() {
parts := strings.SplitN(string(child.HostConfig.NetworkMode), ":", 2)
child, err = daemon.GetContainer(parts[1])
if err != nil {
return errors.Wrapf(err, "Could not get container for %s", parts[1])
}
}
if child.HostConfig.NetworkMode.IsHost() {
return runconfig.ErrConflictHostNetworkAndLinks
}
if err := daemon.registerLink(container, child, alias); err != nil {
return err
}
}
// After we load all the links into the daemon
// set them to nil on the hostconfig
_, err := container.WriteHostConfig()
return err
}
// conditionalMountOnStart is a platform specific helper function during the
// container start to call mount.
func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error {
return daemon.Mount(container)
}
// conditionalUnmountOnCleanup is a platform specific helper function called
// during the cleanup of a container to unmount.
func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error {
return daemon.Unmount(container)
}
func copyBlkioEntry(entries []*containerd_cgroups.BlkIOEntry) []types.BlkioStatEntry {
out := make([]types.BlkioStatEntry, len(entries))
for i, re := range entries {
out[i] = types.BlkioStatEntry{
Major: re.Major,
Minor: re.Minor,
Op: re.Op,
Value: re.Value,
}
}
return out
}
func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) {
if !c.IsRunning() {
return nil, errNotRunning(c.ID)
}
cs, err := daemon.containerd.Stats(context.Background(), c.ID)
if err != nil {
if strings.Contains(err.Error(), "container not found") {
return nil, containerNotFound(c.ID)
}
return nil, err
}
s := &types.StatsJSON{}
s.Read = cs.Read
stats := cs.Metrics
if stats.Blkio != nil {
s.BlkioStats = types.BlkioStats{
IoServiceBytesRecursive: copyBlkioEntry(stats.Blkio.IoServiceBytesRecursive),
IoServicedRecursive: copyBlkioEntry(stats.Blkio.IoServicedRecursive),
IoQueuedRecursive: copyBlkioEntry(stats.Blkio.IoQueuedRecursive),
IoServiceTimeRecursive: copyBlkioEntry(stats.Blkio.IoServiceTimeRecursive),
IoWaitTimeRecursive: copyBlkioEntry(stats.Blkio.IoWaitTimeRecursive),
IoMergedRecursive: copyBlkioEntry(stats.Blkio.IoMergedRecursive),
IoTimeRecursive: copyBlkioEntry(stats.Blkio.IoTimeRecursive),
SectorsRecursive: copyBlkioEntry(stats.Blkio.SectorsRecursive),
}
}
if stats.CPU != nil {
s.CPUStats = types.CPUStats{
CPUUsage: types.CPUUsage{
TotalUsage: stats.CPU.Usage.Total,
PercpuUsage: stats.CPU.Usage.PerCPU,
UsageInKernelmode: stats.CPU.Usage.Kernel,
UsageInUsermode: stats.CPU.Usage.User,
},
ThrottlingData: types.ThrottlingData{
Periods: stats.CPU.Throttling.Periods,
ThrottledPeriods: stats.CPU.Throttling.ThrottledPeriods,
ThrottledTime: stats.CPU.Throttling.ThrottledTime,
},
}
}
if stats.Memory != nil {
raw := make(map[string]uint64)
raw["cache"] = stats.Memory.Cache
raw["rss"] = stats.Memory.RSS
raw["rss_huge"] = stats.Memory.RSSHuge
raw["mapped_file"] = stats.Memory.MappedFile
raw["dirty"] = stats.Memory.Dirty
raw["writeback"] = stats.Memory.Writeback
raw["pgpgin"] = stats.Memory.PgPgIn
raw["pgpgout"] = stats.Memory.PgPgOut
raw["pgfault"] = stats.Memory.PgFault
raw["pgmajfault"] = stats.Memory.PgMajFault
raw["inactive_anon"] = stats.Memory.InactiveAnon
raw["active_anon"] = stats.Memory.ActiveAnon
raw["inactive_file"] = stats.Memory.InactiveFile
raw["active_file"] = stats.Memory.ActiveFile
raw["unevictable"] = stats.Memory.Unevictable
raw["hierarchical_memory_limit"] = stats.Memory.HierarchicalMemoryLimit
raw["hierarchical_memsw_limit"] = stats.Memory.HierarchicalSwapLimit
raw["total_cache"] = stats.Memory.TotalCache
raw["total_rss"] = stats.Memory.TotalRSS
raw["total_rss_huge"] = stats.Memory.TotalRSSHuge
raw["total_mapped_file"] = stats.Memory.TotalMappedFile
raw["total_dirty"] = stats.Memory.TotalDirty
raw["total_writeback"] = stats.Memory.TotalWriteback
raw["total_pgpgin"] = stats.Memory.TotalPgPgIn
raw["total_pgpgout"] = stats.Memory.TotalPgPgOut
raw["total_pgfault"] = stats.Memory.TotalPgFault
raw["total_pgmajfault"] = stats.Memory.TotalPgMajFault
raw["total_inactive_anon"] = stats.Memory.TotalInactiveAnon
raw["total_active_anon"] = stats.Memory.TotalActiveAnon
raw["total_inactive_file"] = stats.Memory.TotalInactiveFile
raw["total_active_file"] = stats.Memory.TotalActiveFile
raw["total_unevictable"] = stats.Memory.TotalUnevictable
if stats.Memory.Usage != nil {
s.MemoryStats = types.MemoryStats{
Stats: raw,
Usage: stats.Memory.Usage.Usage,
MaxUsage: stats.Memory.Usage.Max,
Limit: stats.Memory.Usage.Limit,
Failcnt: stats.Memory.Usage.Failcnt,
}
} else {
s.MemoryStats = types.MemoryStats{
Stats: raw,
}
}
// if the container does not set memory limit, use the machineMemory
if s.MemoryStats.Limit > daemon.machineMemory && daemon.machineMemory > 0 {
s.MemoryStats.Limit = daemon.machineMemory
}
}
if stats.Pids != nil {
s.PidsStats = types.PidsStats{
Current: stats.Pids.Current,
Limit: stats.Pids.Limit,
}
}
return s, nil
}
// setDefaultIsolation determines the default isolation mode for the
// daemon to run in. This is only applicable on Windows
func (daemon *Daemon) setDefaultIsolation() error {
return nil
}
// setupDaemonProcess sets various settings for the daemon's process
func setupDaemonProcess(config *config.Config) error {
// setup the daemons oom_score_adj
if err := setupOOMScoreAdj(config.OOMScoreAdjust); err != nil {
return err
}
if err := setMayDetachMounts(); err != nil {
logrus.WithError(err).Warn("Could not set may_detach_mounts kernel parameter")
}
return nil
}
// This is used to allow removal of mountpoints that may be mounted in other
// namespaces on RHEL based kernels starting from RHEL 7.4.
// Without this setting, removals on these RHEL based kernels may fail with
// "device or resource busy".
// This setting is not available in upstream kernels as it is not configurable,
// but has been in the upstream kernels since 3.15.
func setMayDetachMounts() error {
f, err := os.OpenFile("/proc/sys/fs/may_detach_mounts", os.O_WRONLY, 0)
if err != nil {
if os.IsNotExist(err) {
return nil
}
return errors.Wrap(err, "error opening may_detach_mounts kernel config file")
}
defer f.Close()
_, err = f.WriteString("1")
if os.IsPermission(err) {
// Setting may_detach_mounts does not work in an
// unprivileged container. Ignore the error, but log
// it if we appear not to be in that situation.
if !rsystem.RunningInUserNS() {
logrus.Debugf("Permission denied writing %q to /proc/sys/fs/may_detach_mounts", "1")
}
return nil
}
return err
}
func setupOOMScoreAdj(score int) error {
f, err := os.OpenFile("/proc/self/oom_score_adj", os.O_WRONLY, 0)
if err != nil {
return err
}
defer f.Close()
stringScore := strconv.Itoa(score)
_, err = f.WriteString(stringScore)
if os.IsPermission(err) {
// Setting oom_score_adj does not work in an
// unprivileged container. Ignore the error, but log
// it if we appear not to be in that situation.
if !rsystem.RunningInUserNS() {
logrus.Debugf("Permission denied writing %q to /proc/self/oom_score_adj", stringScore)
}
return nil
}
return err
}
func (daemon *Daemon) initCgroupsPath(path string) error {
if path == "/" || path == "." {
return nil
}
if daemon.configStore.CPURealtimePeriod == 0 && daemon.configStore.CPURealtimeRuntime == 0 {
return nil
}
// Recursively create cgroup to ensure that the system and all parent cgroups have values set
// for the period and runtime as this limits what the children can be set to.
daemon.initCgroupsPath(filepath.Dir(path))
mnt, root, err := cgroups.FindCgroupMountpointAndRoot("cpu")
if err != nil {
return err
}
// When docker is run inside docker, the root is based of the host cgroup.
// Should this be handled in runc/libcontainer/cgroups ?
if strings.HasPrefix(root, "/docker/") {
root = "/"
}
path = filepath.Join(mnt, root, path)
sysinfo := sysinfo.New(true)
if err := maybeCreateCPURealTimeFile(sysinfo.CPURealtimePeriod, daemon.configStore.CPURealtimePeriod, "cpu.rt_period_us", path); err != nil {
return err
}
return maybeCreateCPURealTimeFile(sysinfo.CPURealtimeRuntime, daemon.configStore.CPURealtimeRuntime, "cpu.rt_runtime_us", path)
}
func maybeCreateCPURealTimeFile(sysinfoPresent bool, configValue int64, file string, path string) error {
if sysinfoPresent && configValue != 0 {
if err := os.MkdirAll(path, 0755); err != nil {
return err
}
if err := ioutil.WriteFile(filepath.Join(path, file), []byte(strconv.FormatInt(configValue, 10)), 0700); err != nil {
return err
}
}
return nil
}
func (daemon *Daemon) setupSeccompProfile() error {
if daemon.configStore.SeccompProfile != "" {
daemon.seccompProfilePath = daemon.configStore.SeccompProfile
b, err := ioutil.ReadFile(daemon.configStore.SeccompProfile)
if err != nil {
return fmt.Errorf("opening seccomp profile (%s) failed: %v", daemon.configStore.SeccompProfile, err)
}
daemon.seccompProfile = b
}
return nil
}
|
[
"\"DOCKER_NOWARN_KERNEL_VERSION\""
] |
[] |
[
"DOCKER_NOWARN_KERNEL_VERSION"
] |
[]
|
["DOCKER_NOWARN_KERNEL_VERSION"]
|
go
| 1 | 0 | |
quex/engine/state_machine/algebra/TESTS/additional_laws/TEST/idempotency.py
|
import os
import sys
sys.path.insert(0, os.environ["QUEX_PATH"])
from quex.engine.state_machine.core import DFA
from quex.engine.state_machine.algebra.TESTS.helper import test1, \
union, \
intersection, \
complement, \
identity, \
add_more_DFAs
import sys
if "--hwut-info" in sys.argv:
print "Idempotency;"
print "HAPPY: [0-9]+;"
sys.exit()
count = 0
def idempotency(A):
global count
assert identity(A, union([A, A]))
assert identity(A, intersection([A, A]))
count += 1
add_more_DFAs()
test1(idempotency)
print "<terminated: %i>" % count
|
[] |
[] |
[
"QUEX_PATH"
] |
[]
|
["QUEX_PATH"]
|
python
| 1 | 0 | |
sequential-update-with-route/main.go
|
package main
import (
"context"
"fmt"
"log"
"math/rand"
"net/http"
"os"
"gopkg.in/olivere/elastic.v5"
)
var (
elasticClient *elastic.Client
ctx = context.Background()
)
func init() {
os.Setenv("ELASTIC_URL", "http://0.0.0.0:32769")
var err error
elasticClient, err = elastic.NewClient(elastic.SetSniff(false), elastic.SetURL(os.Getenv("ELASTIC_URL")))
if err != nil {
panic(err)
}
Populate()
}
func main() {
http.HandleFunc("/update", func(w http.ResponseWriter, r *http.Request) {
id := rand.Intn(size)
if err := SequentialUpdate(id, GenerateRandomData()); err != nil {
w.WriteHeader(http.StatusInternalServerError)
}
})
fmt.Println("Running...")
log.Fatal(http.ListenAndServe(":8080", nil))
}
|
[
"\"ELASTIC_URL\""
] |
[] |
[
"ELASTIC_URL"
] |
[]
|
["ELASTIC_URL"]
|
go
| 1 | 0 | |
manage.py
|
#!/usr/bin/env python
# Copyright 2020 Mikhail Dolbnin [email protected]
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'AntiFraudProjsect.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
xyzspaces/__init__.py
|
# Copyright (C) 2019-2020 HERE Europe B.V.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
# License-Filename: LICENSE
"""The XYZ Spaces for Python - manage your XYZ Hub server or HERE Data Hub.
XYZ Spaces for Python allows you to manage XYZ spaces, projects and tokens with the Hub
API, Project API, and Token API, respectively. The Hub API provides the most
features to let you read and write GeoJSON data (features) from and to an
XYZ space, and perform some higher-level operations like return features
inside or clipped by some bounding box or map tile. The Project API and
Token API let you manage your XYZ projects and tokens.
See also:
- XYZ Hub server: https://github.com/heremaps/xyz-hub
- HERE Data Hub: https://developer.here.com/products/data-hub
"""
import os
from typing import Optional
from xyzspaces.__version__ import __version__ # noqa: F401
from xyzspaces.logconf import setup_logging # noqa: F401
from xyzspaces.spaces import Space
from .apis import HubApi
class XYZ:
"""A single interface to interact with your XYZ Hub server or HERE Data Hub."""
def __init__(self, credentials: Optional[str] = None):
"""Instantiate an XYZ object, optionally with access credentials."""
if credentials:
os.environ["XYZ_TOKEN"] = str(credentials)
self.hub_api = HubApi(credentials=credentials)
else:
self.hub_api = HubApi()
self.spaces = Space(api=self.hub_api)
|
[] |
[] |
[
"XYZ_TOKEN"
] |
[]
|
["XYZ_TOKEN"]
|
python
| 1 | 0 | |
vendor/github.com/openshift/library-go/pkg/operator/v1helpers/helpers.go
|
package v1helpers
import (
"errors"
"fmt"
"os"
"sort"
"strings"
"time"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/client-go/util/retry"
"github.com/ghodss/yaml"
configv1 "github.com/openshift/api/config/v1"
operatorv1 "github.com/openshift/api/operator/v1"
)
// SetOperandVersion sets the new version and returns the previous value.
func SetOperandVersion(versions *[]configv1.OperandVersion, operandVersion configv1.OperandVersion) string {
if versions == nil {
versions = &[]configv1.OperandVersion{}
}
existingVersion := FindOperandVersion(*versions, operandVersion.Name)
if existingVersion == nil {
*versions = append(*versions, operandVersion)
return ""
}
previous := existingVersion.Version
existingVersion.Version = operandVersion.Version
return previous
}
func FindOperandVersion(versions []configv1.OperandVersion, name string) *configv1.OperandVersion {
if versions == nil {
return nil
}
for i := range versions {
if versions[i].Name == name {
return &versions[i]
}
}
return nil
}
func SetOperatorCondition(conditions *[]operatorv1.OperatorCondition, newCondition operatorv1.OperatorCondition) {
if conditions == nil {
conditions = &[]operatorv1.OperatorCondition{}
}
existingCondition := FindOperatorCondition(*conditions, newCondition.Type)
if existingCondition == nil {
newCondition.LastTransitionTime = metav1.NewTime(time.Now())
*conditions = append(*conditions, newCondition)
return
}
if existingCondition.Status != newCondition.Status {
existingCondition.Status = newCondition.Status
existingCondition.LastTransitionTime = metav1.NewTime(time.Now())
}
existingCondition.Reason = newCondition.Reason
existingCondition.Message = newCondition.Message
}
func RemoveOperatorCondition(conditions *[]operatorv1.OperatorCondition, conditionType string) {
if conditions == nil {
conditions = &[]operatorv1.OperatorCondition{}
}
newConditions := []operatorv1.OperatorCondition{}
for _, condition := range *conditions {
if condition.Type != conditionType {
newConditions = append(newConditions, condition)
}
}
*conditions = newConditions
}
func FindOperatorCondition(conditions []operatorv1.OperatorCondition, conditionType string) *operatorv1.OperatorCondition {
for i := range conditions {
if conditions[i].Type == conditionType {
return &conditions[i]
}
}
return nil
}
func IsOperatorConditionTrue(conditions []operatorv1.OperatorCondition, conditionType string) bool {
return IsOperatorConditionPresentAndEqual(conditions, conditionType, operatorv1.ConditionTrue)
}
func IsOperatorConditionFalse(conditions []operatorv1.OperatorCondition, conditionType string) bool {
return IsOperatorConditionPresentAndEqual(conditions, conditionType, operatorv1.ConditionFalse)
}
func IsOperatorConditionPresentAndEqual(conditions []operatorv1.OperatorCondition, conditionType string, status operatorv1.ConditionStatus) bool {
for _, condition := range conditions {
if condition.Type == conditionType {
return condition.Status == status
}
}
return false
}
// UpdateOperatorSpecFunc is a func that mutates an operator spec.
type UpdateOperatorSpecFunc func(spec *operatorv1.OperatorSpec) error
// UpdateSpec applies the update funcs to the oldStatus and tries to update via the client.
func UpdateSpec(client OperatorClient, updateFuncs ...UpdateOperatorSpecFunc) (*operatorv1.OperatorSpec, bool, error) {
updated := false
var operatorSpec *operatorv1.OperatorSpec
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
oldSpec, _, resourceVersion, err := client.GetOperatorState()
if err != nil {
return err
}
newSpec := oldSpec.DeepCopy()
for _, update := range updateFuncs {
if err := update(newSpec); err != nil {
return err
}
}
if equality.Semantic.DeepEqual(oldSpec, newSpec) {
return nil
}
operatorSpec, _, err = client.UpdateOperatorSpec(resourceVersion, newSpec)
updated = err == nil
return err
})
return operatorSpec, updated, err
}
// UpdateSpecConfigFn returns a func to update the config.
func UpdateObservedConfigFn(config map[string]interface{}) UpdateOperatorSpecFunc {
return func(oldSpec *operatorv1.OperatorSpec) error {
oldSpec.ObservedConfig = runtime.RawExtension{Object: &unstructured.Unstructured{Object: config}}
return nil
}
}
// UpdateStatusFunc is a func that mutates an operator status.
type UpdateStatusFunc func(status *operatorv1.OperatorStatus) error
// UpdateStatus applies the update funcs to the oldStatus and tries to update via the client.
func UpdateStatus(client OperatorClient, updateFuncs ...UpdateStatusFunc) (*operatorv1.OperatorStatus, bool, error) {
updated := false
var updatedOperatorStatus *operatorv1.OperatorStatus
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
_, oldStatus, resourceVersion, err := client.GetOperatorState()
if err != nil {
return err
}
newStatus := oldStatus.DeepCopy()
for _, update := range updateFuncs {
if err := update(newStatus); err != nil {
return err
}
}
if equality.Semantic.DeepEqual(oldStatus, newStatus) {
// We return the newStatus which is a deep copy of oldStatus but with all update funcs applied.
updatedOperatorStatus = newStatus
return nil
}
updatedOperatorStatus, err = client.UpdateOperatorStatus(resourceVersion, newStatus)
updated = err == nil
return err
})
return updatedOperatorStatus, updated, err
}
// UpdateConditionFunc returns a func to update a condition.
func UpdateConditionFn(cond operatorv1.OperatorCondition) UpdateStatusFunc {
return func(oldStatus *operatorv1.OperatorStatus) error {
SetOperatorCondition(&oldStatus.Conditions, cond)
return nil
}
}
// UpdateStatusFunc is a func that mutates an operator status.
type UpdateStaticPodStatusFunc func(status *operatorv1.StaticPodOperatorStatus) error
// UpdateStaticPodStatus applies the update funcs to the oldStatus abd tries to update via the client.
func UpdateStaticPodStatus(client StaticPodOperatorClient, updateFuncs ...UpdateStaticPodStatusFunc) (*operatorv1.StaticPodOperatorStatus, bool, error) {
updated := false
var updatedOperatorStatus *operatorv1.StaticPodOperatorStatus
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
_, oldStatus, resourceVersion, err := client.GetStaticPodOperatorState()
if err != nil {
return err
}
newStatus := oldStatus.DeepCopy()
for _, update := range updateFuncs {
if err := update(newStatus); err != nil {
return err
}
}
if equality.Semantic.DeepEqual(oldStatus, newStatus) {
// We return the newStatus which is a deep copy of oldStatus but with all update funcs applied.
updatedOperatorStatus = newStatus
return nil
}
updatedOperatorStatus, err = client.UpdateStaticPodOperatorStatus(resourceVersion, newStatus)
updated = err == nil
return err
})
return updatedOperatorStatus, updated, err
}
// UpdateStaticPodConditionFn returns a func to update a condition.
func UpdateStaticPodConditionFn(cond operatorv1.OperatorCondition) UpdateStaticPodStatusFunc {
return func(oldStatus *operatorv1.StaticPodOperatorStatus) error {
SetOperatorCondition(&oldStatus.Conditions, cond)
return nil
}
}
// EnsureFinalizer adds a new finalizer to the operator CR, if it does not exists. No-op otherwise.
// The finalizer name is computed from the controller name and operator name ($OPERATOR_NAME or os.Args[0])
// It re-tries on conflicts.
func EnsureFinalizer(client OperatorClientWithFinalizers, controllerName string) error {
finalizer := getFinalizerName(controllerName)
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
return client.EnsureFinalizer(finalizer)
})
return err
}
// RemoveFinalizer removes a finalizer from the operator CR, if it is there. No-op otherwise.
// The finalizer name is computed from the controller name and operator name ($OPERATOR_NAME or os.Args[0])
// It re-tries on conflicts.
func RemoveFinalizer(client OperatorClientWithFinalizers, controllerName string) error {
finalizer := getFinalizerName(controllerName)
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
return client.RemoveFinalizer(finalizer)
})
return err
}
// getFinalizerName computes a nice finalizer name from controllerName and the operator name ($OPERATOR_NAME or os.Args[0]).
func getFinalizerName(controllerName string) string {
return fmt.Sprintf("%s.operator.openshift.io/%s", getOperatorName(), controllerName)
}
func getOperatorName() string {
if name := os.Getenv("OPERATOR_NAME"); name != "" {
return name
}
return os.Args[0]
}
type aggregate []error
var _ utilerrors.Aggregate = aggregate{}
// NewMultiLineAggregate returns an aggregate error with multi-line output
func NewMultiLineAggregate(errList []error) error {
var errs []error
for _, e := range errList {
if e != nil {
errs = append(errs, e)
}
}
if len(errs) == 0 {
return nil
}
return aggregate(errs)
}
// Error is part of the error interface.
func (agg aggregate) Error() string {
msgs := make([]string, len(agg))
for i := range agg {
msgs[i] = agg[i].Error()
}
return strings.Join(msgs, "\n")
}
// Errors is part of the Aggregate interface.
func (agg aggregate) Errors() []error {
return []error(agg)
}
// Is is part of the Aggregate interface
func (agg aggregate) Is(target error) bool {
return agg.visit(func(err error) bool {
return errors.Is(err, target)
})
}
func (agg aggregate) visit(f func(err error) bool) bool {
for _, err := range agg {
switch err := err.(type) {
case aggregate:
if match := err.visit(f); match {
return match
}
case utilerrors.Aggregate:
for _, nestedErr := range err.Errors() {
if match := f(nestedErr); match {
return match
}
}
default:
if match := f(err); match {
return match
}
}
}
return false
}
// MapToEnvVars converts a string-string map to a slice of corev1.EnvVar-s
func MapToEnvVars(mapEnvVars map[string]string) []corev1.EnvVar {
if mapEnvVars == nil {
return nil
}
envVars := make([]corev1.EnvVar, len(mapEnvVars))
i := 0
for k, v := range mapEnvVars {
envVars[i] = corev1.EnvVar{Name: k, Value: v}
i++
}
// need to sort the slice so that kube-controller-manager-pod configmap does not change all the time
sort.Slice(envVars, func(i, j int) bool { return envVars[i].Name < envVars[j].Name })
return envVars
}
// InjectObservedProxyIntoContainers injects proxy environment variables in containers specified in containerNames.
func InjectObservedProxyIntoContainers(podSpec *corev1.PodSpec, containerNames []string, observedConfig []byte, fields ...string) error {
var config map[string]interface{}
if err := yaml.Unmarshal(observedConfig, &config); err != nil {
return fmt.Errorf("failed to unmarshal the observedConfig: %w", err)
}
proxyConfig, found, err := unstructured.NestedStringMap(config, fields...)
if err != nil {
return fmt.Errorf("couldn't get the proxy config from observedConfig: %w", err)
}
proxyEnvVars := MapToEnvVars(proxyConfig)
if !found || len(proxyEnvVars) < 1 {
// There's no observed proxy config, we should tolerate that
return nil
}
for _, containerName := range containerNames {
for i := range podSpec.InitContainers {
if podSpec.InitContainers[i].Name == containerName {
podSpec.InitContainers[i].Env = append(podSpec.InitContainers[i].Env, proxyEnvVars...)
}
}
for i := range podSpec.Containers {
if podSpec.Containers[i].Name == containerName {
podSpec.Containers[i].Env = append(podSpec.Containers[i].Env, proxyEnvVars...)
}
}
}
return nil
}
|
[
"\"OPERATOR_NAME\""
] |
[] |
[
"OPERATOR_NAME"
] |
[]
|
["OPERATOR_NAME"]
|
go
| 1 | 0 | |
01_prep_package.py
|
#!/usr/bin/env python3
import glob
import os
import os.path
import argparse
import re
import subprocess
import shutil
import sys
from datetime import datetime
# get the of location of script
ampfuzz_bin = os.path.dirname(os.path.realpath(__file__))
cc_bin = os.path.join(ampfuzz_bin, 'pre_clang')
cxx_bin = os.path.join(ampfuzz_bin, 'pre_clang++')
compiler_export_regex = re.compile(r'^export\s+(CC|CXX)\s*=.*')
changelog_version_regex = re.compile(r'^(?P<pkgname>.*) \((.*:)?(?P<upstream_version>.*)\) (?P<remainder>.*)$')
dpkg_parsechangelog = re.compile(r'(dpkg-parsechangelog)(\s+\|)')
ampfuzz_var = '/var/ampfuzz'
# get the environment ready
my_env = os.environ.copy()
my_env['DEBIAN_FRONTEND'] = 'noninteractive'
# parallel=n ? for DEB_BUILD_OPTIONS
my_env['DEB_BUILD_OPTIONS'] = 'nocheck'
my_env['CFLAGS'] = ' -I/usr/include/tirpc'
my_env['CC'] = cc_bin
my_env['CXX'] = cxx_bin
my_env['LLVM_COMPILER'] = 'clang'
my_env['LLVM_CC_NAME'] = 'clang'
my_env['LLVM_CXX_NAME'] = 'clang++'
my_env['LLVM_CPP_NAME'] = 'clang-cpp'
my_env['LLVM_COMPILER_PATH'] = '/usr/lib/llvm-11/bin'
my_env['PATH'] = '/usr/lib/llvm-11/bin:/usr/bin/zsh:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin'
def dparser():
par = argparse.ArgumentParser(
description='fuzz_package expects a debian package name binary name together with path using debian as root, examples are in ranked_packages.json')
par.add_argument('-v', '--version', type=str, help='Package version')
par.add_argument('-f', '--force', action='store_true', help='Force rebuild')
par.add_argument('name', type=str, help='Package name')
return par
def symlink(link, target, backup=True):
if backup and os.path.exists(link):
exec_command('cp', [link, link + ".bak"])
exec_command('ln', ['-sf', target, link], None)
if os.path.realpath(link) == target:
print("Success link for: " + link + "->" + target)
else:
print("Failure link for: " + link + "->" + target)
def unsymlink(link):
if os.path.exists(link):
exec_command('rm', [link])
if os.path.exists(link + '.bak'):
exec_command('mv', [link + ".bak", link])
def exec_command(name, options, input=None, asuser=None, allow_fail=False):
command = []
cmd_env = my_env
if asuser is not None:
command.append('sudo')
command.append('-E')
command.append('-u')
command.append(asuser)
command.append('--')
cmd_env = cmd_env.copy()
cmd_env['HOME'] = os.path.expanduser(f'~{asuser}')
command.append(name)
command.extend(options)
if input is not None:
command.append(input)
print(command)
try:
subprocess.check_call(command,
stdin=sys.stdin.fileno(),
stdout=sys.stdout.fileno(),
stderr=sys.stderr.fileno(),
env=cmd_env)
except subprocess.CalledProcessError as e:
if not allow_fail:
raise e
def prepare(pkg_build_dir, package, version=None):
exec_command('mkdir', ['-p', pkg_build_dir])
exec_command('chown', ['-R', 'user', pkg_build_dir])
# change the working directory to examples
os.chdir(pkg_build_dir)
# update package cache
exec_command('apt-get', ['update', '-y'])
# remove the package
exec_command('apt-get', ['remove', '-y'], package)
# get build dependencies
exec_command('apt-get', ['build-dep', '-y'], package)
# get package's sources
if version is not None:
query = f"{package}={version}"
else:
query = f"{package}"
exec_command('apt-get', ['source'], query, asuser='user')
def mod_rules():
rules_file = 'rules'
rules_file_bak = f'{rules_file}.bak'
if not os.path.exists(rules_file_bak):
shutil.copy2(rules_file, rules_file_bak)
skip_rule = False
with open(rules_file_bak) as in_rules, open(rules_file, 'w') as out_rules:
for line in in_rules:
if line.startswith('override_dh_missing:'):
skip_rule = True
elif compiler_export_regex.match(line):
skip_rule = True
elif not line.startswith('\t'):
skip_rule = False
# prevent dumb packages from parsing our modified version number
line = dpkg_parsechangelog.sub('\\1 | sed \'s/999://\'\\2', line)
if not skip_rule:
out_rules.write(line)
out_rules.write('\noverride_dh_missing:\n')
out_rules.write('\t# DISABLED BY AMPFUZZ\n')
def mod_changelog():
changelog_file = 'changelog'
changelog_file_bak = f'{changelog_file}.bak'
if not os.path.exists(changelog_file_bak):
shutil.copy2(changelog_file, changelog_file_bak)
with open(changelog_file) as f:
changelog = f.read()
for line in changelog.splitlines():
match = changelog_version_regex.match(line)
if match:
break
with open(changelog_file, 'w') as f:
f.write(f"""{match['pkgname']} (999:{match['upstream_version']}) {match['remainder']}
* AmpFuzz Instrumentation
-- Amp Fuzz <ampfuzz@ampfuzz> {datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S +0000')}
""")
f.write(changelog)
def fix_symlinks():
symlink('/usr/bin/clang', cc_bin)
symlink('/usr/bin/gcc-10', cc_bin)
symlink('/usr/bin/gcc', cc_bin)
symlink('/usr/bin/clang++', cxx_bin)
symlink('/usr/bin/g++', cxx_bin)
symlink('/usr/bin/g++-10', cxx_bin)
symlink('/usr/bin/cpp-10', '/usr/bin/clang-cpp-11')
symlink('/usr/bin/cpp', '/usr/bin/clang-cpp-11')
def rollback_symlinks():
unsymlink('/usr/bin/clang')
unsymlink('/usr/bin/gcc')
unsymlink('/usr/bin/clang++')
unsymlink('/usr/bin/g++')
unsymlink('/usr/bin/gcc-10')
unsymlink('/usr/bin/g++-10')
unsymlink('/usr/bin/cpp-10')
unsymlink('/usr/bin/cpp')
def install_local_repository(pkg_build_dir, package):
repo_list = f"/etc/apt/sources.list.d/{package}.list"
with open(repo_list, 'w') as f:
f.write(f"deb [trusted=yes] file://{pkg_build_dir} /")
# update from new local repository only
exec_command("apt-get", ["update", "-o", f"Dir::Etc::sourcelist={repo_list}"])
def install(pkg_build_dir, package):
install_local_repository(pkg_build_dir, package)
# install package from our repository
exec_command("apt-get", ["install", "-f", "-y"], package)
# also install "updates" from our repository
exec_command("apt-get", ["upgrade", "-f", "-y"])
def find_source_dir(pkg_build_dir):
'''
find source directory by looking for debian/changelog
the same way as debuild would
:param pkg_build_dir:
:return:
'''
path = ''
depth = 0
for root, dirs, files in os.walk(pkg_build_dir):
curr_depth = root[len(pkg_build_dir):].count(os.sep)
if os.path.basename(root) == 'debian' and 'changelog' in files:
if not depth == 0:
if curr_depth < depth:
depth = curr_depth
path = root
else:
depth = curr_depth
path = root
if path:
return path
raise ValueError("could not find source dir")
def build(pkg_build_dir, package):
source_dir = find_source_dir(pkg_build_dir)
os.chdir(source_dir)
# modify rules
mod_rules()
# modify changelog
mod_changelog()
# build the package with modified rules
exec_command('debuild', ['--preserve-env', '-ePATH', '--no-lintian', '-rsudo', '-b', '-uc', '-us'], asuser='user')
# create Packages file to local repository
os.chdir(pkg_build_dir)
subprocess.check_call('apt-ftparchive packages . > Packages && gzip < Packages > Packages.gz', shell=True)
def main():
# from IPython import embed; embed()
# get the arg
parser = dparser()
args = parser.parse_args()
package = args.name
version = args.version or None
# remove the package, get its sources and build dependencies
pkg_build_dir = os.path.join(ampfuzz_var, 'pkg_build', package)
packages_file = os.path.join(pkg_build_dir, 'Packages')
if (not os.path.exists(pkg_build_dir) or
args.force or
not os.path.exists(packages_file) or
os.path.getsize(packages_file) == 0):
print(f'(Re-)building package {package}')
prepare(pkg_build_dir, package, version)
fix_symlinks()
build(pkg_build_dir, package)
rollback_symlinks()
install(pkg_build_dir, package)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
contrib/spendfrom/spendfrom.py
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 5332 if testnet else 12332
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"APPDATA"
] |
[]
|
["APPDATA"]
|
python
| 1 | 0 | |
week5-machine_learning/scripts/run_tune_example.py
|
"""
python ./scripts/run_tune_example.py
# To see results
from ray.tune import Analysis
analysis = Analysis(PATH_TO_EXP_DIR)
df = analysis.trial_dataframes
"""
import sys
import os
import numpy as np
from random import shuffle
from collections import deque
from dataclasses import dataclass, asdict
import torch
import torch.nn as nn
from torch import optim
from ray import tune
sys.path.append(".")
from src.utils import load_and_process_digits
from src.models import LogisticRegressionTorch
def simple_loader(inputs, targets, batch_size=128, shuffle_per_iteration=20):
index = 0
while True:
indexes_get = np.arange(index * batch_size, (index + 1) * batch_size) % len(inputs)
x_ = np.take(inputs, indexes_get, axis=0)
y_ = np.take(targets, indexes_get, axis=0)
index += 1
if index % shuffle_per_iteration == 0:
full_index = np.arange(len(x_))
shuffle(full_index)
inputs = np.take(inputs, full_index, axis=0)
targets = np.take(targets, full_index, axis=0)
yield x_, y_
def train_digits(config: dict):
x_train, y_train, x_valid, y_valid, x_test, y_test = load_and_process_digits()
train_loader = simple_loader(x_train, y_train, batch_size=config["batch_size"])
model = LogisticRegressionTorch(input_dim=x_train.shape[-1], output_dim=10)
optimizer = optim.SGD(model.parameters(), lr=config["learning_rate"])
loss_fn = nn.CrossEntropyLoss()
train_losses, valid_losses = [], []
bst_loss = 1e+4
patient_counter = 0
for i_epoch in range(config["num_epochs"]):
loss_record = deque(maxlen=100)
for _ in range(len(x_train) // config["batch_size"]):
x, y = next(train_loader)
logits = model(torch.from_numpy(x))
loss_train = loss_fn(logits, torch.from_numpy(y))
### Do regularization
if config["l1_alpha"] > 0:
l1_term = torch.tensor(0.)
for model_params in model.parameters():
reg = torch.abs(model_params).sum()
l1_term += reg
loss_train = loss_train + config["l1_alpha"] * l1_term
if config["l2_alpha"] > 0:
l2_term = torch.tensor(0.)
for model_params in model.parameters():
reg = torch.norm(model_params)
l2_term += reg
loss_train = loss_train + config["l2_alpha"] * l2_term
optimizer.zero_grad()
loss_train.backward()
optimizer.step()
loss_record.append(loss_train.detach().cpu().numpy())
with torch.no_grad():
yp_logits = model(torch.from_numpy(x_valid))
loss_valid = loss_fn(yp_logits, torch.from_numpy(y_valid))
loss_valid = loss_valid.detach().cpu().numpy()
print("Epoch: {}/{}, Training Loss: {:.3f}, Validation Loss: {:.3f}".format(
str(i_epoch + 1).zfill(4),
config["num_epochs"],
np.mean(loss_record),
loss_valid
), flush=True, end="\r")
train_losses.append(np.mean(loss_record))
valid_losses.append(loss_valid)
tune.report(validation_loss=loss_valid) # validation_loss can be keywords you want
### Do earlystopping
if patient_counter >= config["n_earlystopping_rounds"]:
return model, train_losses, valid_losses
if loss_valid < bst_loss:
bst_loss = loss_valid
patient_counter = 0
else:
patient_counter += 1
return model, train_losses, valid_losses
@dataclass
class TrainConfig:
batch_size: int
learning_rate: float
num_epochs: int = 500
l1_alpha: float = 0.
l2_alpha: float = 0.
n_earlystopping_rounds: int = 1e+8
def to_dict(self):
return asdict(self)
if __name__ == "__main__":
# Force use CPU
os.environ["CUDA_VISIBLE_DEVICES"] = ""
train_config = TrainConfig(
batch_size=tune.choice([64, 128]),
learning_rate=tune.grid_search([0.5, 1, 1.5]),
num_epochs=1000,
l1_alpha=tune.grid_search([0, 0.001, 0.01]),
l2_alpha=tune.grid_search([0, 0.001, 0.01]),
# n_earlystopping_rounds
)
analysis = tune.run(
train_digits,
config=train_config.to_dict(),
num_samples=3,
progress_reporter=tune.CLIReporter(max_error_rows=20)
) # Total num_trials = num_samples**tunable_params
|
[] |
[] |
[
"CUDA_VISIBLE_DEVICES"
] |
[]
|
["CUDA_VISIBLE_DEVICES"]
|
python
| 1 | 0 | |
logger.py
|
# logger.py
import logging
import os
logging.basicConfig(level=os.getenv("LOGLEVEL", "INFO"))
info = logging.info
debug = logging.debug
|
[] |
[] |
[
"LOGLEVEL"
] |
[]
|
["LOGLEVEL"]
|
python
| 1 | 0 | |
release/release_build.py
|
#!/usr/bin/env python
# encoding: utf-8
"""
release_build.py
Created by Jonathan Burke on 2013-08-01.
Copyright (c) 2015 University of Washington. All rights reserved.
"""
# See README-maintainers.html for more information
from release_vars import *
from release_utils import *
# Turned on by the --debug command-line option.
debug = False
ant_debug = ""
def print_usage():
"""Print usage information."""
print "Usage: python release_build.py [projects] [options]"
print_projects(True, 1, 4)
print "\n --auto accepts or chooses the default for all prompts"
print "\n --debug turns on debugging mode which produces verbose output"
print "\n --review-manual review the documentation changes only; don't perform a full build"
def clone_or_update_repos(auto):
"""If the relevant repos do not exist, clone them, otherwise, update them."""
message = """Before building the release, we update the release repositories (or clone them if they are not present).
However, if you have had to run the script multiple times and no files have changed, you may skip this step.
WARNING: IF THIS IS YOUR FIRST RUN OF THE RELEASE ON RELEASE DAY, DO NOT SKIP THIS STEP.
The following repositories will be updated or cloned from their origins:
"""
for live_to_interm in LIVE_TO_INTERM_REPOS:
message += live_to_interm[1] + "\n"
for interm_to_build in INTERM_TO_BUILD_REPOS:
message += interm_to_build[1] + "\n"
message += PLUME_LIB + "\n"
message += PLUME_BIB + "\n\n"
message += "Clone/update repositories?"
if not auto:
if not prompt_yes_no(message, True):
print "WARNING: Continuing without refreshing repositories.\n"
return
for live_to_interm in LIVE_TO_INTERM_REPOS:
clone_or_update(live_to_interm[0], live_to_interm[1], True)
for interm_to_build in INTERM_TO_BUILD_REPOS:
clone_or_update(interm_to_build[0], interm_to_build[1], False)
clone_or_update(LIVE_PLUME_LIB, PLUME_LIB, False)
clone_or_update(LIVE_PLUME_BIB, PLUME_BIB, False)
def copy_cf_logo(cf_release_dir):
dev_releases_png = os.path.join(cf_release_dir, "CFLogo.png")
cmd = "rsync --times %s %s" % (LIVE_CF_LOGO, dev_releases_png)
execute(cmd)
def get_afu_date(building_afu):
if building_afu:
return get_current_date()
else:
afu_site = os.path.join(HTTP_PATH_TO_LIVE_SITE, "annotation-file-utilities")
return extract_from_site(afu_site, "<!-- afu-date -->", "<!-- /afu-date -->")
def get_new_version(project_name, curr_version, auto):
"Queries the user for the new version number; returns old and new version numbers."
print "Current " + project_name + " version: " + curr_version
suggested_version = increment_version(curr_version)
if auto:
new_version = suggested_version
else:
new_version = prompt_w_suggestion("Enter new version", suggested_version, "^\\d+\\.\\d+(?:\\.\\d+){0,2}$")
print "New version: " + new_version
if curr_version == new_version:
curr_version = prompt_w_suggestion("Enter current version", suggested_version, "^\\d+\\.\\d+(?:\\.\\d+){0,2}$")
print "Current version: " + curr_version
return (curr_version, new_version)
def create_dev_website_release_version_dir(project_name, version):
interm_dir = os.path.join(FILE_PATH_TO_DEV_SITE, project_name, "releases", version)
delete_path_if_exists(interm_dir)
execute("mkdir -p %s" % interm_dir, True, False)
return interm_dir
def create_dirs_for_dev_website_release_versions(jsr308_version, afu_version):
# these directories correspond to the /cse/www2/types/dev/<project_name>/releases/<version> dirs
jsr308_interm_dir = create_dev_website_release_version_dir("jsr308", jsr308_version)
afu_interm_dir = create_dev_website_release_version_dir("annotation-file-utilities", afu_version)
checker_framework_interm_dir = create_dev_website_release_version_dir("checker-framework", jsr308_version)
return (jsr308_interm_dir, afu_interm_dir, checker_framework_interm_dir)
def update_project_dev_website_symlink(project_name, release_version):
project_dev_site = os.path.join(FILE_PATH_TO_DEV_SITE, project_name)
link_path = os.path.join(project_dev_site, "current")
dev_website_relative_dir = os.path.join(RELEASES_SUBDIR, release_version)
print "Writing symlink: " + link_path + "\nto point to relative directory: " + dev_website_relative_dir
force_symlink(dev_website_relative_dir, link_path)
def build_jsr308_langtools_release(version, afu_version, afu_release_date, jsr308_interm_dir):
afu_build_properties = os.path.join(ANNO_FILE_UTILITIES, "build.properties")
# update jsr308_langtools versions
ant_props = "-Dlangtools=%s -Drelease.ver=%s -Dafu.version=%s -Dafu.properties=%s -Dafu.release.date=\"%s\"" % (JSR308_LANGTOOLS, version, afu_version, afu_build_properties, afu_release_date)
# IMPORTANT: The release.xml in the directory where the Checker Framework is being built is used. Not the release.xml in the directory you ran release_build.py from.
ant_cmd = "ant %s -f release.xml %s update-langtools-versions " % (ant_debug, ant_props)
execute(ant_cmd, True, False, CHECKER_FRAMEWORK_RELEASE)
# TODO: perhaps make a "dist" target rather than listing out the relevant targets
# build jsr308 binaries and documents but not website, fail if the tests don't pass
ant_cmd = "ant %s -Dhalt.on.test.failure=true -Dlauncher.java=java clean-and-build-all-tools build-javadoc build-doclets" % (ant_debug)
execute(ant_cmd, True, False, JSR308_MAKE)
jsr308ZipName = "jsr308-langtools-%s.zip" % version
# zip up jsr308-langtools project and place it in jsr308_interm_dir
ant_props = "-Dlangtools=%s -Dcheckerframework=%s -Ddest.dir=%s -Dfile.name=%s -Dversion=%s" % (JSR308_LANGTOOLS, CHECKER_FRAMEWORK, jsr308_interm_dir, jsr308ZipName, version)
# IMPORTANT: The release.xml in the directory where the Checker Framework is being built is used. Not the release.xml in the directory you ran release_build.py from.
ant_cmd = "ant %s -f release.xml %s zip-langtools " % (ant_debug, ant_props)
execute(ant_cmd, True, False, CHECKER_FRAMEWORK_RELEASE)
# build jsr308 website
make_cmd = "make jsr308_www=%s jsr308_www_online=%s web-no-checks" % (jsr308_interm_dir, HTTP_PATH_TO_DEV_SITE)
execute(make_cmd, True, False, JSR308_LT_DOC)
# copy remaining website files to jsr308_interm_dir
ant_props = "-Dlangtools=%s -Ddest.dir=%s" % (JSR308_LANGTOOLS, jsr308_interm_dir)
# IMPORTANT: The release.xml in the directory where the Checker Framework is being built is used. Not the release.xml in the directory you ran release_build.py from.
ant_cmd = "ant %s -f release.xml %s langtools-website-docs " % (ant_debug, ant_props)
execute(ant_cmd, True, False, CHECKER_FRAMEWORK_RELEASE)
update_project_dev_website_symlink("jsr308", version)
return
def get_current_date():
return CURRENT_DATE.strftime("%d %b %Y")
def build_annotation_tools_release(version, afu_interm_dir):
execute('java -version', True)
date = get_current_date()
build = os.path.join(ANNO_FILE_UTILITIES, "build.xml")
ant_cmd = "ant %s -buildfile %s -e update-versions -Drelease.ver=\"%s\" -Drelease.date=\"%s\"" % (ant_debug, build, version, date)
execute(ant_cmd)
# Deploy to intermediate site
ant_cmd = "ant %s -buildfile %s -e web-no-checks -Dafu.version=%s -Ddeploy-dir=%s" % (ant_debug, build, version, afu_interm_dir)
execute(ant_cmd)
update_project_dev_website_symlink("annotation-file-utilities", version)
def build_and_locally_deploy_maven(version):
protocol_length = len("file://")
maven_dev_repo_without_protocol = MAVEN_DEV_REPO[protocol_length:]
execute("mkdir -p " + maven_dev_repo_without_protocol)
# Build and then deploy the maven plugin
mvn_install(MAVEN_PLUGIN_DIR)
mvn_deploy_mvn_plugin(MAVEN_PLUGIN_DIR, MAVEN_PLUGIN_POM, version, MAVEN_DEV_REPO)
# Deploy jsr308 and checker-qual jars to maven repo
mvn_deploy(CHECKER_BINARY, CHECKER_BINARY_POM, MAVEN_DEV_REPO)
mvn_deploy(CHECKER_QUAL, CHECKER_QUAL_POM, MAVEN_DEV_REPO)
mvn_deploy(JAVAC_BINARY, JAVAC_BINARY_POM, MAVEN_DEV_REPO)
mvn_deploy(JDK7_BINARY, JDK7_BINARY_POM, MAVEN_DEV_REPO)
mvn_deploy(JDK8_BINARY, JDK8_BINARY_POM, MAVEN_DEV_REPO)
return
def build_checker_framework_release(version, afu_version, afu_release_date, checker_framework_interm_dir, manual_only=False):
checker_dir = os.path.join(CHECKER_FRAMEWORK, "checker")
afu_build_properties = os.path.join(ANNO_FILE_UTILITIES, "build.properties")
# update jsr308_langtools versions
ant_props = "-Dchecker=%s -Drelease.ver=%s -Dafu.version=%s -Dafu.properties=%s -Dafu.release.date=\"%s\"" % (checker_dir, version, afu_version, afu_build_properties, afu_release_date)
# IMPORTANT: The release.xml in the directory where the Checker Framework is being built is used. Not the release.xml in the directory you ran release_build.py from.
ant_cmd = "ant %s -f release.xml %s update-checker-framework-versions " % (ant_debug, ant_props)
execute(ant_cmd, True, False, CHECKER_FRAMEWORK_RELEASE)
if not manual_only:
# ensure all PluginUtil.java files are identical
execute("sh checkPluginUtil.sh", True, False, CHECKER_FRAMEWORK_RELEASE)
# build the checker framework binaries and documents, run checker framework tests
ant_cmd = "ant %s -Dhalt.on.test.failure=true dist-release" % (ant_debug)
execute(ant_cmd, True, False, CHECKER_FRAMEWORK)
# make the Checker Framework Manual
checker_manual_dir = os.path.join(checker_dir, "manual")
execute("make manual.pdf manual.html", True, False, checker_manual_dir)
if not manual_only:
# make the dataflow manual
dataflow_manual_dir = os.path.join(CHECKER_FRAMEWORK, "dataflow", "manual")
execute("make", True, False, dataflow_manual_dir)
# make the checker framework tutorial
checker_tutorial_dir = os.path.join(CHECKER_FRAMEWORK, "tutorial")
execute("make", True, False, checker_tutorial_dir)
cfZipName = "checker-framework-%s.zip" % version
# Create checker-framework-X.Y.Z.zip and put it in checker_framework_interm_dir
ant_props = "-Dchecker=%s -Ddest.dir=%s -Dfile.name=%s -Dversion=%s" % (checker_dir, checker_framework_interm_dir, cfZipName, version)
# IMPORTANT: The release.xml in the directory where the Checker Framework is being built is used. Not the release.xml in the directory you ran release_build.py from.
ant_cmd = "ant %s -f release.xml %s zip-checker-framework " % (ant_debug, ant_props)
execute(ant_cmd, True, False, CHECKER_FRAMEWORK_RELEASE)
ant_props = "-Dchecker=%s -Ddest.dir=%s -Dfile.name=%s -Dversion=%s" % (checker_dir, checker_framework_interm_dir, "mvn-examples.zip", version)
# IMPORTANT: The release.xml in the directory where the Checker Framework is being built is used. Not the release.xml in the directory you ran release_build.py from.
ant_cmd = "ant %s -f release.xml %s zip-maven-examples " % (ant_debug, ant_props)
execute(ant_cmd, True, False, CHECKER_FRAMEWORK_RELEASE)
# copy the remaining checker-framework website files to checker_framework_interm_dir
ant_props = "-Dchecker=%s -Ddest.dir=%s -Dmanual.name=%s -Ddataflow.manual.name=%s -Dchecker.webpage=%s" % (
checker_dir, checker_framework_interm_dir, "checker-framework-manual",
"checker-framework-dataflow-manual", "checker-framework-webpage.html"
)
# IMPORTANT: The release.xml in the directory where the Checker Framework is being built is used. Not the release.xml in the directory you ran release_build.py from.
ant_cmd = "ant %s -f release.xml %s checker-framework-website-docs " % (ant_debug, ant_props)
execute(ant_cmd, True, False, CHECKER_FRAMEWORK_RELEASE)
# clean no longer necessary files left over from building the checker framework tutorial
checker_tutorial_dir = os.path.join(CHECKER_FRAMEWORK, "tutorial")
execute("make clean", True, False, checker_tutorial_dir)
build_and_locally_deploy_maven(version)
update_project_dev_website_symlink("checker-framework", version)
return
def commit_to_interm_projects(jsr308_version, afu_version, projects_to_release):
# Use project definition instead, see find project location find_project_locations
if projects_to_release[LT_OPT]:
commit_tag_and_push(jsr308_version, JSR308_LANGTOOLS, "jsr308-")
if projects_to_release[AFU_OPT]:
commit_tag_and_push(afu_version, ANNO_TOOLS, "")
if projects_to_release[CF_OPT]:
commit_tag_and_push(jsr308_version, CHECKER_FRAMEWORK, "checker-framework-")
def main(argv):
# MANUAL Indicates a manual step
# SEMIAUTO Indicates a mostly automated step with possible prompts. Most of these steps become fully automated when --auto is used.
# AUTO Indicates the step is fully automated.
delete_if_exists(RELEASE_BUILD_COMPLETED_FLAG_FILE)
set_umask()
projects_to_release = read_projects(argv, print_usage)
# Check for a --auto
# If --auto then no prompt and just build a full release
# Otherwise provide all prompts
auto = read_command_line_option(argv, "--auto")
global debug
global ant_debug
debug = read_command_line_option(argv, "--debug")
if debug:
ant_debug = "-debug"
# Indicates whether to review documentation changes only and not perform a build.
review_documentation = read_command_line_option(argv, "--review-manual")
add_project_dependencies(projects_to_release)
afu_date = get_afu_date(projects_to_release[AFU_OPT])
# For each project, build what is necessary but don't push
if not review_documentation:
print "Building a new release of Langtools, Annotation Tools, and the Checker Framework!"
else:
print "Reviewing the documentation for Langtools, Annotation Tools, and the Checker Framework."
print "\nPATH:\n" + os.environ['PATH'] + "\n"
print_step("Build Step 1: Clone the build and intermediate repositories.") # SEMIAUTO
# Recall that there are 3 relevant sets of repositories for the release:
# * build repository - repository where the project is built for release
# * intermediate repository - repository to which release related changes are pushed after the project is built
# * release repository - GitHub/Bitbucket repositories, the central repository.
# Every time we run release_build, changes are committed to the intermediate repository from build but NOT to
# the release repositories. If we are running the build script multiple times without actually committing the
# release then these changes need to be cleaned before we run the release_build script again.
# The "Clone/update repositories" step updates the repositories with respect to the live repositories on
# GitHub/Bitbucket, but it is the "Verify repositories" step that ensures that they are clean,
# i.e. indistinguishable from a freshly cloned repository.
# check we are cloning LIVE -> INTERM, INTERM -> RELEASE
print_step("\n1a: Clone/update repositories.") # SEMIAUTO
clone_or_update_repos(auto)
# This step ensures the previous step worked. It checks to see if we have any modified files, untracked files,
# or outgoing changesets. If so, it fails.
print_step("1b: Verify repositories.") # SEMIAUTO
check_repos(INTERM_REPOS, True, True)
check_repos(BUILD_REPOS, True, False)
# The release script requires a number of common tools (Ant, Maven, make, etc...). This step checks
# to make sure all tools are available on the command line in order to avoid wasting time in the
# event a tool is missing late in execution.
print_step("Build Step 2: Check tools.") # AUTO
check_tools(TOOLS)
# Usually we increment the release by 0.0.1 per release unless there is a major change.
# The release script will read the current version of the Checker Framework/Annotation File Utilities
# from the release website and then suggest the next release version 0.0.1 higher than the current
# version. You can also manually specify a version higher than the current version. Lower or equivalent
# versions are not possible and will be rejected when you try to push the release.
# The jsr308-langtools version ALWAYS matches the Checker Framework version.
# NOTE: If you pass --auto on the command line then the next logical version will be chosen automatically
print_step("Build Step 3: Determine release versions.") # SEMIAUTO
old_jsr308_version = current_distribution(CHECKER_FRAMEWORK)
(old_jsr308_version, jsr308_version) = get_new_version("JSR308/Checker Framework", old_jsr308_version, auto)
if old_jsr308_version == jsr308_version:
print("It is *strongly discouraged* to not update the release version numbers for the Checker Framework " +
"and jsr308-langtools even if no changes were made to these in a month. This would break so much " +
"in the release scripts that they would become unusable.\n")
prompt_until_yes()
old_afu_version = get_afu_version_from_html(AFU_MANUAL)
(old_afu_version, afu_version) = get_new_version("Annotation File Utilities", old_afu_version, auto)
if old_afu_version == afu_version:
print("The AFU version has not changed. It is recommended to include a small bug fix or doc update in every " +
"AFU release so the version number can be updated, but when that is not possible, before and after running " +
"release_build, you must:\n" +
"-Ensure that you are subscribed to the AFU push notifications mailing list.\n" +
"-Verify that the AFU changelog has not been changed.\n" +
"-Grep all the AFU pages on the dev web site for the release date with patterns such as \"29.*Aug\" " +
"and \"Aug.*29\" and fix them to match the previous release date.\n" +
"Keep in mind that in this case, the release scripts will fail in certain places and you must manually " +
"follow a few remaining release steps.\n")
prompt_until_yes()
if review_documentation:
print_step("Build Step 4: Review changelogs.") # SEMIAUTO
print "Verify that all changelog messages follow the guidelines found in README-maintainers.html#changelog_guide\n"
print "Ensure that the changelogs end with a line like"
print "Resolved issues: 200, 300, 332, 336, 357, 359, 373, 374\n"
print("To ensure the jsr308-langtools, AFU and Checker Framework changelogs are correct and complete, " +
"please follow the Content Guidelines found in README-maintainers.html#content_guidelines\n")
prompt_until_yes()
# This step will write out all of the changes that happened to the individual projects' documentation
# to temporary files. Please review these changes for errors.
print_step("Build Step 5: Review documentation changes.") # SEMIAUTO
print "Please review the documentation changes since the last release to ensure that"
print " * All new features mentioned in the manuals appear in the changelogs, and"
print " * All new features mentioned in the changelogs are documented in the manuals."
print ""
if projects_to_release[LT_OPT]:
propose_change_review("the JSR308 documentation updates", old_jsr308_version, JSR308_LANGTOOLS,
JSR308_TAG_PREFIXES, JSR308_LT_DOC, TMP_DIR + "/jsr308.manual")
if projects_to_release[AFU_OPT]:
propose_change_review("the Annotation File Utilities documentation updates", old_afu_version, ANNO_TOOLS,
AFU_TAG_PREFIXES, AFU_MANUAL, TMP_DIR + "/afu.manual")
if projects_to_release[CF_OPT]:
build_checker_framework_release(jsr308_version, afu_version, afu_date, "", manual_only=True)
print ""
print "The built Checker Framework manual (HTML and PDF) can be found at " + CHECKER_MANUAL
print ""
print "Verify that the manual PDF has no lines that are longer than the page width"
print "(it is acceptable for some lines to extend into the right margin)."
print ""
print "If any checkers have been added or removed, then verify that the lists"
print "of checkers in these manual sections are up to date:"
print " * Introduction"
print " * Run-time tests and type refinement"
print "and make sure that the checkers supported in the Eclipse plug-in are up to date"
print "by following the instructions at eclipse/README-developers.html#update_checkers"
print ""
propose_change_review("the Checker Framework documentation updates", old_jsr308_version, CHECKER_FRAMEWORK,
CHECKER_TAG_PREFIXES, CHECKER_MANUAL, TMP_DIR + "/checker-framework.manual")
return
print_step("Build Step 4: Copy entire live site to dev site (~22 minutes).") # SEMIAUTO
if auto or prompt_yes_no("Proceed with copy of live site to dev site?", True):
# ************************************************************************************************
# WARNING: BE EXTREMELY CAREFUL WHEN MODIFYING THIS COMMAND. The --delete option is destructive
# and its work cannot be undone. If, for example, this command were modified to accidentally make
# /cse/www2/types/ the target directory, the entire types directory could be wiped out.
execute("rsync --omit-dir-times --recursive --links --delete --quiet --exclude=dev --exclude=sparta/release/versions /cse/www2/types/ /cse/www2/types/dev")
# ************************************************************************************************
print_step("Build Step 5: Create directories for the current release on the dev site.") # AUTO
version_dirs = create_dirs_for_dev_website_release_versions(jsr308_version, afu_version)
jsr308_interm_dir = version_dirs[0]
afu_interm_dir = version_dirs[1]
checker_framework_interm_dir = version_dirs[2]
# The projects are built in the following order: JSR308-Langtools, Annotation File Utilities,
# and Checker Framework. Furthermore, their manuals and websites are also built and placed in
# their relevant locations at http://types.cs.washington.edu/dev/ This is the most time consuming
# piece of the release. There are no prompts from this step forward; you might want to get a cup
# of coffee and do something else until it is done.
print_step("Build Step 6: Build projects and websites.") # AUTO
print projects_to_release
if projects_to_release[LT_OPT]:
print_step("6a: Build Type Annotations Compiler.")
build_jsr308_langtools_release(jsr308_version, afu_version, afu_date, jsr308_interm_dir)
if projects_to_release[AFU_OPT]:
print_step("6b: Build Annotation File Utilities.")
build_annotation_tools_release(afu_version, afu_interm_dir)
if projects_to_release[CF_OPT]:
print_step("6c: Build Checker Framework.")
build_checker_framework_release(jsr308_version, afu_version, afu_date, checker_framework_interm_dir)
print_step("Build Step 7: Overwrite .htaccess.") # AUTO
# Not "cp -p" because that does not work across filesystems whereas rsync does
execute("rsync --times %s %s" % (RELEASE_HTACCESS, DEV_HTACCESS))
copy_cf_logo(checker_framework_interm_dir)
# Each project has a set of files that are updated for release. Usually these updates include new
# release date and version information. All changed files are committed and pushed to the intermediate
# repositories. Keep this in mind if you have any changed files from steps 1d, 4, or 5. Edits to the
# scripts in the jsr308-release/scripts directory will never be checked in.
print_step("Build Step 8: Commit projects to intermediate repos.") # AUTO
commit_to_interm_projects(jsr308_version, afu_version, projects_to_release)
# Adds read/write/execute group permissions to all of the new dev website directories
# under http://types.cs.washington.edu/dev/ These directories need group read/execute
# permissions in order for them to be served.
print_step("\n\nBuild Step 9: Add group permissions to repos.")
for build in BUILD_REPOS:
ensure_group_access(build)
for interm in INTERM_REPOS:
ensure_group_access(interm)
# At the moment, this will lead to output error messages because some metadata in some of the
# dirs I think is owned by Mike or Werner. We should identify these and have them fix it.
# But as long as the processes return a zero exit status, we should be ok.
print_step("\n\nBuild Step 10: Add group permissions to websites.") # AUTO
ensure_group_access(FILE_PATH_TO_DEV_SITE)
create_empty_file(RELEASE_BUILD_COMPLETED_FLAG_FILE)
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
[] |
[] |
[
"PATH"
] |
[]
|
["PATH"]
|
python
| 1 | 0 | |
tools/gen_projects_json.py
|
# for now, run like:
# GH_USER=user GH_TOKEN=$(cat /path/to/token/file) python tools/gen_entry_json.py
# get a token from here: https://github.com/settings/tokens
import os
import re
import sys
import json
import time
import base64
import urllib2
import datetime
from pprint import pprint
import yaml
from boltons.urlutils import URL
from boltons.fileutils import atomic_save
from boltons.timeutils import isoparse
TOOLS_PATH = os.path.dirname(os.path.abspath(__file__))
PROJ_PATH = os.path.dirname(TOOLS_PATH)
PREFIXES = ['v', # common
'rel-', # theano
'orc-', # orc
'tor-', # tor
'clamav-', # ...
'streamex-', # ...
]
VTAG_RE = re.compile(r'^(?P<major>\d+)\.[0-9a-zA-Z_.]+')
def strip_prefix(tag_name, prefixes):
# TODO: could combine these all into the re
for prefix in prefixes:
if tag_name.startswith(prefix):
tag_name = tag_name[len(prefix):]
break
return tag_name
def match_vtag(tag_name, prefixes):
tag_name = strip_prefix(tag_name, prefixes)
return VTAG_RE.match(tag_name)
def version_key(version, prefixes=PREFIXES):
return tuple([int(x) for x in
re.split('\D', match_vtag(version, prefixes).group(0))
if x and x.isdigit()])
def _get_gh_json(url):
""""
Get paginated results from GitHub, possibly authorized based on
GH_USER/GH_TOKEN env vars.
"""
gh_user = os.getenv('GH_USER', '')
gh_token = os.getenv('GH_TOKEN', '')
req = urllib2.Request(url)
if gh_user and gh_token:
auth_header_val = 'Basic %s' % base64.b64encode('%s:%s' % (gh_user, gh_token))
req.add_header('Authorization', auth_header_val)
resp = urllib2.urlopen(req)
body = resp.read()
res = json.loads(body)
rate_rem = int(resp.info().dict.get('x-ratelimit-remaining', '-1'))
if not isinstance(res, list) or not res:
print(' (( %s requests remaining' % rate_rem)
return res
page = 2
ret = res
while res:
paged_url = url + '?page=%s' % page
req = urllib2.Request(paged_url)
if gh_user and gh_token:
req.add_header('Authorization', auth_header_val)
resp = urllib2.urlopen(req)
body = resp.read()
res = json.loads(body)
ret.extend(res)
page += 1
rate_rem = int(resp.info().dict.get('x-ratelimit-remaining', '-1'))
print(' (( %s requests remaining' % rate_rem)
return ret
def _get_gh_rel_data(rel_info, prefixes):
ret = {}
ret['tag'] = rel_info['name']
ret['version'] = None
if match_vtag(ret['tag'], prefixes):
ret['version'] = strip_prefix(ret['tag'], prefixes)
ret['api_commit_url'] = rel_info['commit']['url']
rel_data = _get_gh_json(ret['api_commit_url'])
ret['date'] = rel_data['commit']['author']['date']
ret['link'] = rel_data['html_url']
return ret
def get_gh_project_info(info):
ret = {}
url = info.get('gh_url')
if url is None:
return ret
org, repo = URL(url.rstrip('/')).path_parts[1:]
gh_url = URL('https://api.github.com/repos')
gh_url.path_parts += (org, repo)
project_url = gh_url.to_text()
project_data = _get_gh_json(project_url)
ret['star_count'] = project_data['stargazers_count']
gh_url.path_parts += ('tags',)
tags_url = gh_url.to_text()
tags_data = _get_gh_json(tags_url)
vtags_data = [td for td in tags_data if match_vtag(td['name'], PREFIXES)]
ret['release_count'] = len(vtags_data)
latest_release = vtags_data[0]
latest_release_data = _get_gh_rel_data(latest_release, PREFIXES)
for k, v in latest_release_data.items():
ret['latest_release_%s' % k] = v
vtags_data.sort(key=lambda x: version_key(x['name'], PREFIXES), reverse=True)
first_release_version = info.get('first_release_version')
if first_release_version is None:
first_release = [v for v in vtags_data
if version_key(v['name']) < version_key(latest_release['name'])][-1]
else:
first_release = [v for v in vtags_data if v['name'] == first_release_version][0]
first_release_data = _get_gh_rel_data(first_release, PREFIXES)
for k, v in first_release_data.items():
ret['first_release_%s' % k] = v
zv_releases = [rel for rel in vtags_data
if match_vtag(rel['name'], PREFIXES).group('major') == '0']
ret['release_count_zv'] = len(zv_releases)
print(' .. %s releases, %s 0ver' % (ret['release_count'], ret['release_count_zv']))
is_zerover = latest_release in zv_releases
ret['is_zerover'] = is_zerover
if is_zerover:
return ret
last_zv_release = zv_releases[0]
first_nonzv_release = vtags_data[vtags_data.index(last_zv_release) - 1]
first_nonzv_release_data = _get_gh_rel_data(first_nonzv_release, PREFIXES)
ret['last_zv_release_version'] = last_zv_release['name']
for k, v in first_nonzv_release_data.items():
ret['first_nonzv_release_%s' % k] = v
#import pdb;pdb.set_trace()
return ret
def _json_default(obj):
# yaml likes to parse some dates
if isinstance(obj, (datetime.datetime, datetime.date)):
return obj.isoformat()
raise TypeError ("%r is not serializable" % obj)
def fetch_entries(projects):
entries = []
for p in projects:
print('processing', p['name'])
info = dict(p)
if info.get('skip'):
continue
info['url'] = info.get('url', info.get('gh_url'))
if info.get('gh_url'):
gh_info = get_gh_project_info(info)
info.update(gh_info)
info['is_zerover'] = info.get('is_zerover', not info.get('emeritus', False))
entries.append(info)
return entries
def _main():
start_time = time.time()
with open(PROJ_PATH + '/projects.yaml') as f:
projects = yaml.load(f)['projects']
#projects = [p for p in projects if p['name'] == 'scikit-learn']
#if not projects:
# return
try:
with open(PROJ_PATH + '/projects.json') as f:
cur_data = json.load(f)
cur_projects = cur_data['projects']
cur_gen_date = isoparse(cur_data['gen_date'])
except (IOError, KeyError):
cur_projects = []
cur_gen_date = None
if cur_gen_date:
fetch_outdated = (datetime.datetime.utcnow() - cur_gen_date) > datetime.timedelta(seconds=3600)
else:
fetch_outdated = True
cur_names = sorted([c['name'] for c in cur_projects])
new_names = sorted([n['name'] for n in projects])
tpr = os.getenv('TRAVIS_PULL_REQUEST')
if tpr and tpr != 'false':
print('Pull request detected. Skipping data update until merged.')
return
if fetch_outdated or cur_names != new_names or os.getenv('ZV_DISABLE_CACHING'):
entries = fetch_entries(projects)
else:
print('Current data already up to date, exiting.')
return
pprint(entries)
res = {'projects': entries,
'gen_date': datetime.datetime.utcnow().isoformat(),
'gen_duration': time.time() - start_time}
with atomic_save(PROJ_PATH + '/projects.json') as f:
f.write(json.dumps(res, indent=2, sort_keys=True, default=_json_default))
return
if __name__ == '__main__':
try:
sys.exit(_main() or 0)
except Exception as e:
if os.getenv('CI'):
raise
print(' !! debugging unexpected %r' % e)
import pdb;pdb.post_mortem()
raise
|
[] |
[] |
[
"TRAVIS_PULL_REQUEST",
"GH_TOKEN",
"CI",
"ZV_DISABLE_CACHING",
"GH_USER"
] |
[]
|
["TRAVIS_PULL_REQUEST", "GH_TOKEN", "CI", "ZV_DISABLE_CACHING", "GH_USER"]
|
python
| 5 | 0 | |
magefile.go
|
//go:build mage
// +build mage
package main
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/aserto-dev/mage-loot/buf"
"github.com/aserto-dev/mage-loot/deps"
"github.com/aserto-dev/mage-loot/fsutil"
"github.com/aserto-dev/mage-loot/mage"
)
func All() error {
Deps()
err := Clean()
if err != nil {
return err
}
return Generate()
}
// install required dependencies.
func Deps() {
deps.GetAllDeps()
}
// Generate go code
func Generate() error {
bufImage := "buf.build/aserto-dev/aserto"
tag, err := buf.GetLatestTag(bufImage)
if err != nil {
fmt.Println("Could not retrieve tags, using latest")
} else {
bufImage = fmt.Sprintf("%s:%s", bufImage, tag.Name)
}
return gen(bufImage, bufImage)
}
// Generates from a dev build.
func GenerateDev() error {
err := BuildDev()
if err != nil {
return err
}
bufImage := filepath.Join(getProtoRepo(), "bin", "aserto.bin#format=bin")
fileSources := filepath.Join(getProtoRepo(), "public#format=dir")
return gen(bufImage, fileSources)
}
// Builds the aserto proto image
func BuildDev() error {
return mage.RunDir(getProtoRepo(), mage.AddArg("build"))
}
func getProtoRepo() string {
protoRepo := os.Getenv("PROTO_REPO")
if protoRepo == "" {
protoRepo = "../proto"
}
return protoRepo
}
func gen(bufImage, fileSources string) error {
files, err := getClientFiles(fileSources)
if err != nil {
return err
}
oldPath := os.Getenv("PATH")
pathSeparator := string(os.PathListSeparator)
path := oldPath +
pathSeparator +
filepath.Dir(deps.GoBinPath("protoc-gen-go-grpc")) +
pathSeparator +
filepath.Dir(deps.GoBinPath("protoc-gen-grpc-gateway")) +
pathSeparator +
filepath.Dir(deps.GoBinPath("protoc-gen-go"))
return buf.RunWithEnv(map[string]string{
"PATH": path,
},
buf.AddArg("generate"),
buf.AddArg("--template"),
buf.AddArg(filepath.Join("buf", "buf.gen.yaml")),
buf.AddArg(bufImage),
buf.AddPaths(files),
)
}
func getClientFiles(fileSources string) ([]string, error) {
var clientFiles []string
bufExportDir, err := ioutil.TempDir("", "bufimage")
if err != nil {
return clientFiles, err
}
bufExportDir = filepath.Join(bufExportDir, "")
defer os.RemoveAll(bufExportDir)
err = buf.Run(
buf.AddArg("export"),
buf.AddArg(fileSources),
buf.AddArg("--exclude-imports"),
buf.AddArg("-o"),
buf.AddArg(bufExportDir),
)
if err != nil {
return clientFiles, err
}
authorizerFiles, err := fsutil.Glob(filepath.Join(bufExportDir, "aserto", "authorizer", "authorizer", "**", "*.proto"), "")
if err != nil {
return clientFiles, err
}
fmt.Printf("found: %v files \n", len(authorizerFiles))
for _, f := range authorizerFiles {
clientFiles = append(clientFiles, strings.TrimPrefix(f, bufExportDir+string(filepath.Separator)))
}
return clientFiles, nil
}
// Removes generated files
func Clean() error {
return os.RemoveAll("aserto")
}
|
[
"\"PROTO_REPO\"",
"\"PATH\""
] |
[] |
[
"PROTO_REPO",
"PATH"
] |
[]
|
["PROTO_REPO", "PATH"]
|
go
| 2 | 0 | |
kubetest/anywhere.go
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"bytes"
"context"
"errors"
"flag"
"fmt"
"io/ioutil"
"log"
"net"
"os"
"os/exec"
"regexp"
"strconv"
"strings"
"text/template"
"time"
"golang.org/x/crypto/ssh"
)
const defaultKubeadmCNI = "weave"
var (
// kubernetes-anywhere specific flags.
kubernetesAnywherePath = flag.String("kubernetes-anywhere-path", "",
"(kubernetes-anywhere only) Path to the kubernetes-anywhere directory. Must be set for kubernetes-anywhere.")
kubernetesAnywherePhase2Provider = flag.String("kubernetes-anywhere-phase2-provider", "ignition",
"(kubernetes-anywhere only) Provider for phase2 bootstrapping. (Defaults to ignition).")
kubernetesAnywhereKubeadmVersion = flag.String("kubernetes-anywhere-kubeadm-version", "stable",
"(kubernetes-anywhere only) Version of kubeadm to use, if phase2-provider is kubeadm. May be \"stable\" or a gs:// link to a custom build.")
kubernetesAnywhereKubernetesVersion = flag.String("kubernetes-anywhere-kubernetes-version", "",
"(kubernetes-anywhere only) Version of Kubernetes to use (e.g. latest, stable, latest-1.6, 1.6.3, etc).")
kubernetesAnywhereKubeletVersion = flag.String("kubernetes-anywhere-kubelet-version", "stable",
"(kubernetes-anywhere only) Version of Kubelet to use, if phase2-provider is kubeadm. May be \"stable\" or a gs:// link to a custom build.")
kubernetesAnywhereKubeletCIVersion = flag.String("kubernetes-anywhere-kubelet-ci-version", "",
"(kubernetes-anywhere only) If specified, the ci version for the kubelet to use. Overrides kubernetes-anywhere-kubelet-version.")
kubernetesAnywhereCluster = flag.String("kubernetes-anywhere-cluster", "",
"(kubernetes-anywhere only) Cluster name. Must be set for kubernetes-anywhere.")
kubernetesAnywhereProxyMode = flag.String("kubernetes-anywhere-proxy-mode", "",
"(kubernetes-anywhere only) Chose kube-proxy mode.")
kubernetesAnywhereUpTimeout = flag.Duration("kubernetes-anywhere-up-timeout", 20*time.Minute,
"(kubernetes-anywhere only) Time limit between starting a cluster and making a successful call to the Kubernetes API.")
kubernetesAnywhereNumNodes = flag.Int("kubernetes-anywhere-num-nodes", 4,
"(kubernetes-anywhere only) Number of nodes to be deployed in the cluster.")
kubernetesAnywhereUpgradeMethod = flag.String("kubernetes-anywhere-upgrade-method", "upgrade",
"(kubernetes-anywhere only) Indicates whether to do the control plane upgrade with kubeadm method \"init\" or \"upgrade\"")
kubernetesAnywhereCNI = flag.String("kubernetes-anywhere-cni", "",
"(kubernetes-anywhere only) The name of the CNI plugin used for the cluster's SDN.")
kubernetesAnywhereDumpClusterLogs = flag.Bool("kubernetes-anywhere-dump-cluster-logs", true,
"(kubernetes-anywhere only) Whether to dump cluster logs.")
kubernetesAnywhereOSImage = flag.String("kubernetes-anywhere-os-image", "ubuntu-1604-xenial-v20171212",
"(kubernetes-anywhere only) The name of the os_image to use for nodes")
kubernetesAnywhereKubeadmFeatureGates = flag.String("kubernetes-anywhere-kubeadm-feature-gates", "",
"(kubernetes-anywhere only) A set of key=value pairs that describes feature gates for kubeadm features. If specified, this flag will pass on to kubeadm.")
)
const kubernetesAnywhereConfigTemplate = `
.phase1.num_nodes={{.NumNodes}}
.phase1.cluster_name="{{.Cluster}}"
.phase1.ssh_user=""
.phase1.cloud_provider="gce"
.phase1.gce.os_image="{{.OSImage}}"
.phase1.gce.instance_type="n1-standard-1"
.phase1.gce.project="{{.Project}}"
.phase1.gce.region="{{.Region}}"
.phase1.gce.zone="{{.Zone}}"
.phase1.gce.network="default"
.phase2.installer_container="docker.io/colemickens/k8s-ignition:latest"
.phase2.docker_registry="k8s.gcr.io"
.phase2.kubernetes_version="{{.KubernetesVersion}}"
.phase2.provider="{{.Phase2Provider}}"
.phase2.kubelet_version="{{.KubeletVersion}}"
.phase2.kubeadm.version="{{.KubeadmVersion}}"
.phase2.kube_context_name="{{.KubeContext}}"
.phase2.proxy_mode="{{.KubeproxyMode}}"
.phase2.kubeadm.master_upgrade.method="{{.UpgradeMethod}}"
.phase2.kubeadm.feature_gates="{{.KubeadmFeatureGates}}"
.phase3.run_addons=y
.phase3.kube_proxy=n
.phase3.dashboard=n
.phase3.heapster=n
.phase3.kube_dns=n
.phase3.cni="{{.CNI}}"
`
const kubernetesAnywhereMultiClusterConfigTemplate = kubernetesAnywhereConfigTemplate + `
.phase2.enable_cloud_provider=y
.phase3.gce_storage_class=y
`
type kubernetesAnywhere struct {
path string
// These are exported only because their use in the config template requires it.
Phase2Provider string
KubeadmVersion string
KubeletVersion string
UpgradeMethod string
KubernetesVersion string
NumNodes int
Project string
Cluster string
Zone string
Region string
KubeContext string
CNI string
KubeproxyMode string
OSImage string
KubeadmFeatureGates string
}
func initializeKubernetesAnywhere(project, zone string) (*kubernetesAnywhere, error) {
if *kubernetesAnywherePath == "" {
return nil, fmt.Errorf("--kubernetes-anywhere-path is required")
}
if *kubernetesAnywhereCluster == "" {
return nil, fmt.Errorf("--kubernetes-anywhere-cluster is required")
}
if project == "" {
return nil, fmt.Errorf("--provider=kubernetes-anywhere requires --gcp-project")
}
if zone == "" {
zone = "us-central1-c"
}
kubeletVersion := *kubernetesAnywhereKubeletVersion
if *kubernetesAnywhereKubeletCIVersion != "" {
// resolvedVersion is EG v1.11.0-alpha.0.1031+d37460147ec956-bazel
resolvedVersion, err := resolveCIVersion(*kubernetesAnywhereKubeletCIVersion)
if err != nil {
return nil, err
}
kubeletVersion = fmt.Sprintf("gs://kubernetes-release-dev/ci/%v/bin/linux/amd64/", resolvedVersion)
}
// preserve backwards compatibility for e2e tests which never provided cni name
if *kubernetesAnywhereCNI == "" && *kubernetesAnywherePhase2Provider == "kubeadm" {
*kubernetesAnywhereCNI = defaultKubeadmCNI
}
k := &kubernetesAnywhere{
path: *kubernetesAnywherePath,
Phase2Provider: *kubernetesAnywherePhase2Provider,
KubeadmVersion: *kubernetesAnywhereKubeadmVersion,
KubeletVersion: kubeletVersion,
UpgradeMethod: *kubernetesAnywhereUpgradeMethod,
KubernetesVersion: *kubernetesAnywhereKubernetesVersion,
NumNodes: *kubernetesAnywhereNumNodes,
Project: project,
Cluster: *kubernetesAnywhereCluster,
Zone: zone,
Region: regexp.MustCompile(`-[^-]+$`).ReplaceAllString(zone, ""),
CNI: *kubernetesAnywhereCNI,
KubeproxyMode: *kubernetesAnywhereProxyMode,
OSImage: *kubernetesAnywhereOSImage,
KubeadmFeatureGates: *kubernetesAnywhereKubeadmFeatureGates,
}
return k, nil
}
func newKubernetesAnywhere(project, zone string) (deployer, error) {
k, err := initializeKubernetesAnywhere(project, zone)
if err != nil {
return nil, err
}
// Set KUBERNETES_CONFORMANCE_TEST so the auth info is picked up
// from kubectl instead of bash inference.
if err := os.Setenv("KUBERNETES_CONFORMANCE_TEST", "yes"); err != nil {
return nil, err
}
// Set NUM_NODES based on the kubernetes-anywhere-num-nodes flag.
// This env variable is then read by hack/ginkgo-e2e.sh.
if err := os.Setenv("NUM_NODES", strconv.Itoa(k.NumNodes)); err != nil {
return nil, err
}
if err := k.writeConfig(kubernetesAnywhereConfigTemplate); err != nil {
return nil, err
}
return k, nil
}
func resolveCIVersion(version string) (string, error) {
file := fmt.Sprintf("gs://kubernetes-release-dev/ci/%v.txt", version)
return readGSFile(file)
}
// Implemented as a function var for testing.
var readGSFile = readGSFileImpl
func readGSFileImpl(filepath string) (string, error) {
contents, err := control.Output(exec.Command("gsutil", "cat", filepath))
if err != nil {
return "", err
}
return strings.TrimSpace(string(contents)), nil
}
func (k *kubernetesAnywhere) getConfig(configTemplate string) ([]byte, error) {
// As needed, plumb through more CLI options to replace these defaults
tmpl, err := template.New("kubernetes-anywhere-config").Parse(configTemplate)
if err != nil {
return nil, fmt.Errorf("Error creating template for KubernetesAnywhere config: %v", err)
}
var buf bytes.Buffer
if err = tmpl.Execute(&buf, k); err != nil {
return nil, fmt.Errorf("Error executing template for KubernetesAnywhere config: %v", err)
}
return buf.Bytes(), nil
}
func (k *kubernetesAnywhere) writeConfig(configTemplate string) error {
config, err := k.getConfig(configTemplate)
if err != nil {
return fmt.Errorf("Could not generate config: %v", err)
}
return ioutil.WriteFile(k.path+"/.config", config, 0644)
}
func (k *kubernetesAnywhere) Up() error {
cmd := exec.Command("make", "-C", k.path, "setup")
if err := control.FinishRunning(cmd); err != nil {
return err
}
cmd = exec.Command("make", "-C", k.path, "WAIT_FOR_KUBECONFIG=y", "deploy")
if err := control.FinishRunning(cmd); err != nil {
return err
}
if err := k.TestSetup(); err != nil {
return err
}
return waitForReadyNodes(k.NumNodes+1, *kubernetesAnywhereUpTimeout, 1)
}
func (k *kubernetesAnywhere) IsUp() error {
return isUp(k)
}
func (k *kubernetesAnywhere) DumpClusterLogs(localPath, gcsPath string) error {
if !*kubernetesAnywhereDumpClusterLogs {
log.Printf("Cluster log dumping disabled for Kubernetes Anywhere.")
return nil
}
privateKeyPath := os.Getenv("JENKINS_GCE_SSH_PRIVATE_KEY_FILE")
if privateKeyPath == "" {
return fmt.Errorf("JENKINS_GCE_SSH_PRIVATE_KEY_FILE is empty")
}
key, err := ioutil.ReadFile(privateKeyPath)
if err != nil {
return fmt.Errorf("error reading private key %q: %v", privateKeyPath, err)
}
signer, err := ssh.ParsePrivateKey(key)
if err != nil {
return fmt.Errorf("error parsing private key %q: %v", privateKeyPath, err)
}
sshConfig := &ssh.ClientConfig{
User: os.Getenv("USER"),
Auth: []ssh.AuthMethod{
ssh.PublicKeys(signer),
},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}
sshClientFactory := &sshClientFactoryImplementation{
sshConfig: sshConfig,
}
logDumper, err := newLogDumper(sshClientFactory, localPath)
if err != nil {
return err
}
ctx, cancel := context.WithCancel(context.TODO())
defer cancel()
finished := make(chan error)
go func() {
finished <- k.dumpAllNodes(ctx, logDumper)
}()
for {
select {
case <-interrupt.C:
cancel()
case err := <-finished:
return err
}
}
}
// dumpAllNodes connects to every node and dumps the logs
func (k *kubernetesAnywhere) dumpAllNodes(ctx context.Context, d *logDumper) error {
// Make sure kubeconfig is set, in particular before calling DumpAllNodes, which calls kubectlGetNodes
if err := k.TestSetup(); err != nil {
return fmt.Errorf("error setting up kubeconfig: %v", err)
}
// try to grab the address of the master from $KUBECONFIG (yikes)
cmd := exec.Command("sh", "-c", "cat ${KUBECONFIG} | grep server")
oBytes, err := control.Output(cmd)
if err != nil {
return fmt.Errorf("failed calling 'cat $KUBECONFIG | grep server': %v", err)
}
o := strings.TrimSpace(string(oBytes))
o = strings.Replace(o, "server: https://", "", -1)
host, _, err := net.SplitHostPort(o)
if err != nil {
return fmt.Errorf("could not extract host from kubeconfig: %v", err)
}
// the rest of the nodes are doable but tricky
additionalIPs := []string{host}
if err := d.DumpAllNodes(ctx, additionalIPs); err != nil {
return err
}
return nil
}
func (k *kubernetesAnywhere) TestSetup() error {
o, err := control.Output(exec.Command("make", "--silent", "-C", k.path, "kubeconfig-path"))
if err != nil {
return fmt.Errorf("Could not get kubeconfig-path: %v", err)
}
kubecfg := strings.TrimSuffix(string(o), "\n")
if err = os.Setenv("KUBECONFIG", kubecfg); err != nil {
return err
}
return nil
}
func (k *kubernetesAnywhere) Down() error {
err := control.FinishRunning(exec.Command("make", "-C", k.path, "kubeconfig-path"))
if err != nil {
// This is expected if the cluster doesn't exist.
return nil
}
return control.FinishRunning(exec.Command("make", "-C", k.path, "FORCE_DESTROY=y", "destroy"))
}
func (k *kubernetesAnywhere) GetClusterCreated(gcpProject string) (time.Time, error) {
return time.Time{}, errors.New("not implemented")
}
func (*kubernetesAnywhere) KubectlCommand() (*exec.Cmd, error) { return nil, nil }
|
[
"\"JENKINS_GCE_SSH_PRIVATE_KEY_FILE\"",
"\"USER\""
] |
[] |
[
"USER",
"JENKINS_GCE_SSH_PRIVATE_KEY_FILE"
] |
[]
|
["USER", "JENKINS_GCE_SSH_PRIVATE_KEY_FILE"]
|
go
| 2 | 0 | |
vendor/github.com/mackerelio/golib/pluginutil/tempfile_test.go
|
package pluginutil
import (
"os"
"testing"
)
func TestGenerateTempfilePathWithBase(t *testing.T) {
origDir := os.Getenv("MACKEREL_PLUGIN_WORKDIR")
os.Setenv("MACKEREL_PLUGIN_WORKDIR", "")
defer os.Setenv("MACKEREL_PLUGIN_WORKDIR", origDir)
expect1 := os.TempDir()
defaultPath := PluginWorkDir()
if defaultPath != expect1 {
t.Errorf("PluginWorkDir() should be %s, but: %s", expect1, defaultPath)
}
os.Setenv("MACKEREL_PLUGIN_WORKDIR", "/SOME-SPECIAL-PATH")
expect2 := "/SOME-SPECIAL-PATH"
pathFromEnv := PluginWorkDir()
if pathFromEnv != expect2 {
t.Errorf("PluginWorkDir() should be %s, but: %s", expect2, pathFromEnv)
}
}
|
[
"\"MACKEREL_PLUGIN_WORKDIR\""
] |
[] |
[
"MACKEREL_PLUGIN_WORKDIR"
] |
[]
|
["MACKEREL_PLUGIN_WORKDIR"]
|
go
| 1 | 0 | |
manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'voting.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
locations/locations_service_test.go
|
package locations
import (
"os"
"testing"
"github.com/Financial-Times/neo-utils-go/neoutils"
"github.com/stretchr/testify/assert"
)
const (
locationUUID = "12345"
newLocationUUID = "123456"
tmeID = "TME_ID"
newTmeID = "NEW_TME_ID"
prefLabel = "Test"
specialCharPrefLabel = "Test 'special chars"
)
var defaultTypes = []string{"Thing", "Concept", "Location"}
func TestConnectivityCheck(t *testing.T) {
locationsDriver := getLocationsCypherDriver(t)
err := locationsDriver.Check()
assert.NoError(t, err, "Unexpected error on connectivity check")
}
func TestPrefLabelIsCorrectlyWritten(t *testing.T) {
locationsDriver := getLocationsCypherDriver(t)
alternativeIdentifiers := alternativeIdentifiers{UUIDS: []string{locationUUID}}
locationToWrite := Location{UUID: locationUUID, PrefLabel: prefLabel, AlternativeIdentifiers: alternativeIdentifiers}
err := locationsDriver.Write(locationToWrite)
assert.NoError(t, err, "ERROR happened during write time")
storedLocation, found, err := locationsDriver.Read(locationUUID)
assert.NoError(t, err, "ERROR happened during read time")
assert.Equal(t, true, found)
assert.NotEmpty(t, storedLocation)
assert.Equal(t, prefLabel, storedLocation.(Location).PrefLabel, "PrefLabel should be "+prefLabel)
cleanUp(t, locationUUID, locationsDriver)
}
func TestPrefLabelSpecialCharactersAreHandledByCreate(t *testing.T) {
locationsDriver := getLocationsCypherDriver(t)
alternativeIdentifiers := alternativeIdentifiers{TME: []string{}, UUIDS: []string{locationUUID}}
locationToWrite := Location{UUID: locationUUID, PrefLabel: specialCharPrefLabel, AlternativeIdentifiers: alternativeIdentifiers}
assert.NoError(t, locationsDriver.Write(locationToWrite), "Failed to write location")
//add default types that will be automatically added by the writer
locationToWrite.Types = defaultTypes
//check if locationToWrite is the same with the one inside the DB
readLocationForUUIDAndCheckFieldsMatch(t, locationsDriver, locationUUID, locationToWrite)
cleanUp(t, locationUUID, locationsDriver)
}
func TestCreateCompleteLocationWithPropsAndIdentifiers(t *testing.T) {
locationsDriver := getLocationsCypherDriver(t)
alternativeIdentifiers := alternativeIdentifiers{TME: []string{tmeID}, UUIDS: []string{locationUUID}}
locationToWrite := Location{UUID: locationUUID, PrefLabel: prefLabel, AlternativeIdentifiers: alternativeIdentifiers}
assert.NoError(t, locationsDriver.Write(locationToWrite), "Failed to write location")
//add default types that will be automatically added by the writer
locationToWrite.Types = defaultTypes
//check if locationToWrite is the same with the one inside the DB
readLocationForUUIDAndCheckFieldsMatch(t, locationsDriver, locationUUID, locationToWrite)
cleanUp(t, locationUUID, locationsDriver)
}
func TestUpdateWillRemovePropertiesAndIdentifiersNoLongerPresent(t *testing.T) {
locationsDriver := getLocationsCypherDriver(t)
allAlternativeIdentifiers := alternativeIdentifiers{TME: []string{}, UUIDS: []string{locationUUID}}
locationToWrite := Location{UUID: locationUUID, PrefLabel: prefLabel, AlternativeIdentifiers: allAlternativeIdentifiers}
assert.NoError(t, locationsDriver.Write(locationToWrite), "Failed to write location")
//add default types that will be automatically added by the writer
locationToWrite.Types = defaultTypes
readLocationForUUIDAndCheckFieldsMatch(t, locationsDriver, locationUUID, locationToWrite)
tmeAlternativeIdentifiers := alternativeIdentifiers{TME: []string{tmeID}, UUIDS: []string{locationUUID}}
updatedLocation := Location{UUID: locationUUID, PrefLabel: specialCharPrefLabel, AlternativeIdentifiers: tmeAlternativeIdentifiers}
assert.NoError(t, locationsDriver.Write(updatedLocation), "Failed to write updated location")
//add default types that will be automatically added by the writer
updatedLocation.Types = defaultTypes
readLocationForUUIDAndCheckFieldsMatch(t, locationsDriver, locationUUID, updatedLocation)
cleanUp(t, locationUUID, locationsDriver)
}
func TestDelete(t *testing.T) {
locationsDriver := getLocationsCypherDriver(t)
alternativeIdentifiers := alternativeIdentifiers{TME: []string{tmeID}, UUIDS: []string{locationUUID}}
locationToDelete := Location{UUID: locationUUID, PrefLabel: prefLabel, AlternativeIdentifiers: alternativeIdentifiers}
assert.NoError(t, locationsDriver.Write(locationToDelete), "Failed to write location")
found, err := locationsDriver.Delete(locationUUID)
assert.True(t, found, "Didn't manage to delete location for uuid %", locationUUID)
assert.NoError(t, err, "Error deleting location for uuid %s", locationUUID)
p, found, err := locationsDriver.Read(locationUUID)
assert.Equal(t, Location{}, p, "Found location %s who should have been deleted", p)
assert.False(t, found, "Found location for uuid %s who should have been deleted", locationUUID)
assert.NoError(t, err, "Error trying to find location for uuid %s", locationUUID)
}
func TestCount(t *testing.T) {
locationsDriver := getLocationsCypherDriver(t)
alternativeIds := alternativeIdentifiers{TME: []string{tmeID}, UUIDS: []string{locationUUID}}
locationOneToCount := Location{UUID: locationUUID, PrefLabel: prefLabel, AlternativeIdentifiers: alternativeIds}
assert.NoError(t, locationsDriver.Write(locationOneToCount), "Failed to write location")
nr, err := locationsDriver.Count()
assert.Equal(t, 1, nr, "Should be 1 locations in DB - count differs")
assert.NoError(t, err, "An unexpected error occurred during count")
newAlternativeIds := alternativeIdentifiers{TME: []string{newTmeID}, UUIDS: []string{newLocationUUID}}
locationTwoToCount := Location{UUID: newLocationUUID, PrefLabel: specialCharPrefLabel, AlternativeIdentifiers: newAlternativeIds}
assert.NoError(t, locationsDriver.Write(locationTwoToCount), "Failed to write location")
nr, err = locationsDriver.Count()
assert.Equal(t, 2, nr, "Should be 2 locations in DB - count differs")
assert.NoError(t, err, "An unexpected error occurred during count")
cleanUp(t, locationUUID, locationsDriver)
cleanUp(t, newLocationUUID, locationsDriver)
}
func readLocationForUUIDAndCheckFieldsMatch(t *testing.T, locationsDriver service, uuid string, expectedLocation Location) {
storedLocation, found, err := locationsDriver.Read(uuid)
assert.NoError(t, err, "Error finding location for uuid %s", uuid)
assert.True(t, found, "Didn't find location for uuid %s", uuid)
assert.Equal(t, expectedLocation, storedLocation, "locations should be the same")
}
func getLocationsCypherDriver(t *testing.T) service {
url := os.Getenv("NEO4J_TEST_URL")
if url == "" {
url = "http://localhost:7474/db/data"
}
conf := neoutils.DefaultConnectionConfig()
conf.Transactional = false
db, err := neoutils.Connect(url, conf)
assert.NoError(t, err, "Failed to connect to Neo4j")
service := NewCypherLocationsService(db)
service.Initialise()
return service
}
func cleanUp(t *testing.T, uuid string, locationsDriver service) {
found, err := locationsDriver.Delete(uuid)
assert.True(t, found, "Didn't manage to delete location for uuid %", uuid)
assert.NoError(t, err, "Error deleting location for uuid %s", uuid)
}
|
[
"\"NEO4J_TEST_URL\""
] |
[] |
[
"NEO4J_TEST_URL"
] |
[]
|
["NEO4J_TEST_URL"]
|
go
| 1 | 0 | |
server/serve.go
|
package server
import (
"log"
"os"
"time"
"github.com/fatih/color"
"github.com/labstack/echo"
"github.com/mholt/archiver"
"github.com/yeo/baja/utils"
)
type Server struct {
staticPath string
}
func KeyAuth() {
}
func Deploy(c echo.Context) {
apikey := c.FormValue("apikey")
if os.Getenv("APIKEY") != apikey {
c.String(401, "Unauthrozied")
return
}
file, err := c.FormFile("bundle")
if err != nil {
c.String(400, "Error")
return
}
src, err := file.Open()
if err != nil {
c.String(400, "Error")
return
}
defer src.Close()
err = archiver.Unarchive("test.tar.gz", "test")
}
func router(e *echo.Echo, s *Server) {
//e.Static("/deploy", Deploy)
e.Static("/", s.staticPath)
}
func Run(addr, public string) {
e := echo.New()
s := &Server{
staticPath: public,
}
router(e, s)
hostname, _ := os.Hostname()
log.Printf("Listen on http://%s:%d", hostname, 2803)
e.Logger.Fatal(e.Start(addr))
}
// Build execute template and content to generate our real static conent
func Serve(addr, directory string) int {
w := utils.Watch([]string{"./content", "./themes"})
// Build our site immediately to serve dev
//go Build()
go func() {
for {
select {
case event := <-w.Event:
color.Yellow("Receive file change event %s. Rebuild", event)
//Build()
case err := <-w.Error:
color.Red("Watch error:%s", err)
case <-w.Closed:
return
}
}
}()
go func() {
// Start the watching process - it'll check for changes every 100ms.
if err := w.Start(time.Millisecond * 100); err != nil {
log.Fatalln(err)
}
}()
Run(addr, directory)
return 0
}
|
[
"\"APIKEY\""
] |
[] |
[
"APIKEY"
] |
[]
|
["APIKEY"]
|
go
| 1 | 0 | |
gsuite/provider.go
|
package gsuite
import (
"encoding/json"
"fmt"
"log"
"os"
"github.com/hashicorp/terraform-plugin-sdk/helper/schema"
"github.com/pkg/errors"
)
// Provider returns the actual provider instance.
func Provider() *schema.Provider {
p := &schema.Provider{
Schema: map[string]*schema.Schema{
"credentials": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.MultiEnvDefaultFunc([]string{
"GOOGLE_CREDENTIALS",
"GOOGLE_CLOUD_KEYFILE_JSON",
"GCLOUD_KEYFILE_JSON",
"GOOGLE_APPLICATION_CREDENTIALS",
}, nil),
ValidateFunc: validateCredentials,
},
"impersonated_user_email": {
Type: schema.TypeString,
Optional: true,
},
"oauth_scopes": {
Type: schema.TypeSet,
Elem: &schema.Schema{Type: schema.TypeString},
Optional: true,
},
"customer_id": {
Type: schema.TypeString,
Optional: true,
},
},
DataSourcesMap: map[string]*schema.Resource{
"gsuite_group": dataGroup(),
"gsuite_group_settings": dataGroupSettings(),
"gsuite_user": dataUser(),
"gsuite_user_attributes": dataUserAttributes(),
},
ResourcesMap: map[string]*schema.Resource{
"gsuite_domain": resourceDomain(),
"gsuite_group": resourceGroup(),
"gsuite_group_member": resourceGroupMember(),
"gsuite_group_members": resourceGroupMembers(),
"gsuite_group_settings": resourceGroupSettings(),
"gsuite_user": resourceUser(),
"gsuite_user_schema": resourceUserSchema(),
},
}
p.ConfigureFunc = func(d *schema.ResourceData) (interface{}, error) {
terraformVersion := p.TerraformVersion
if terraformVersion == "" {
// Terraform 0.12 introduced this field to the protocol
// We can therefore assume that if it's missing it's 0.10 or 0.11
terraformVersion = "0.11+compatible"
}
return providerConfigure(d, terraformVersion)
}
return p
}
func oauthScopesFromConfigOrDefault(oauthScopesSet *schema.Set) []string {
oauthScopes := convertStringSet(oauthScopesSet)
if len(oauthScopes) == 0 {
log.Printf("[INFO] No Oauth Scopes provided. Using default oauth scopes.")
oauthScopes = defaultOauthScopes
}
return oauthScopes
}
func providerConfigure(d *schema.ResourceData, terraformVersion string) (interface{}, error) {
var impersonatedUserEmail string
var customerID string
credentials := d.Get("credentials").(string)
if v, ok := d.GetOk("impersonated_user_email"); ok {
impersonatedUserEmail = v.(string)
} else {
if len(os.Getenv("IMPERSONATED_USER_EMAIL")) > 0 {
impersonatedUserEmail = os.Getenv("IMPERSONATED_USER_EMAIL")
}
}
// There shouldn't be the need to setup customer ID in the configuration,
// but leaving the possibility to specify it explictly.
// By default we use my_customer as customer ID, which means the API will use
// the G Suite customer ID associated with the impersonating account.
if v, ok := d.GetOk("customer_id"); ok {
customerID = v.(string)
} else {
log.Printf("[INFO] No Customer ID provided. Using my_customer.")
customerID = "my_customer"
}
oauthScopes := oauthScopesFromConfigOrDefault(d.Get("oauth_scopes").(*schema.Set))
config := Config{
Credentials: credentials,
ImpersonatedUserEmail: impersonatedUserEmail,
OauthScopes: oauthScopes,
CustomerId: customerID,
}
if err := config.loadAndValidate(terraformVersion); err != nil {
return nil, errors.Wrap(err, "failed to load config")
}
return &config, nil
}
func validateCredentials(v interface{}, k string) (warnings []string, errors []error) {
if v == nil || v.(string) == "" {
return
}
creds := v.(string)
// if this is a path and we can stat it, assume it's ok
if _, err := os.Stat(creds); err == nil {
return
}
var account accountFile
if err := json.Unmarshal([]byte(creds), &account); err != nil {
errors = append(errors,
fmt.Errorf("credentials are not valid JSON '%s': %s", creds, err))
}
return
}
|
[
"\"IMPERSONATED_USER_EMAIL\"",
"\"IMPERSONATED_USER_EMAIL\""
] |
[] |
[
"IMPERSONATED_USER_EMAIL"
] |
[]
|
["IMPERSONATED_USER_EMAIL"]
|
go
| 1 | 0 | |
src/templatesite/settings.py
|
"""
Django settings for templatesite project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import logging
from logging.handlers import SysLogHandler
SYSLOG_ADDRESS = (
os.environ.get('SYSLOG_HOST', 'localhost'),
int(os.environ.get('SYSLOG_PORT', 514)),
)
# Add a special logger to log related occurrences in settings
formatter = logging.Formatter('SETTINGS %(levelname)-8s %(message)s')
settings_logger = logging.getLogger('settings')
if not os.environ.get('CONSOLE_LOGS'):
handler = SysLogHandler(address=SYSLOG_ADDRESS)
handler.setFormatter(formatter)
settings_logger.addHandler(handler)
# Log settings also in stdout
handler = logging.StreamHandler()
handler.setFormatter(formatter)
settings_logger.addHandler(handler)
settings_logger.setLevel(logging.INFO)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(os.environ.get('DEBUG_MODE', False))
if DEBUG:
settings_logger.critical('STARTING SERVER IN DEBUG MODE')
ALLOWED_HOSTS = []
allowed_hosts = os.environ.get('ALLOWED_HOSTS', [])
if allowed_hosts:
ALLOWED_HOSTS = allowed_hosts.split(',')
settings_logger.info('ALLOWED_HOSTS: {}'.format(ALLOWED_HOSTS))
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_prometheus',
'rest_framework',
'web',
]
MIDDLEWARE = [
'django_prometheus.middleware.PrometheusBeforeMiddleware',
'log_request_id.middleware.RequestIDMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django_prometheus.middleware.PrometheusAfterMiddleware',
]
ROOT_URLCONF = 'templatesite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'templatesite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
if 'POSTGRES_HOST' not in os.environ:
settings_logger.warning('No DB configured. this may be initialisation')
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get('POSTGRES_DB', 'WRONG_DB'),
'USER': os.environ.get('POSTGRES_USER', 'WRONG_USER'),
'HOST': os.environ.get('POSTGRES_HOST'),
'PORT': int(os.environ.get('DATABASE_PORT', 5432)),
'PASSWORD': os.environ.get('POSTGRES_PASSWORD', 'WRONG_PASSWORD'),
},
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'ru-ru'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = '/opt/static/'
MEDIA_ROOT = '/opt/media/'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'request_id': {
'()': 'log_request_id.filters.RequestIDFilter'
}
},
'formatters': {
'standard': {
'format': 'templatesite: %(levelname)-8s [%(asctime)s] [%(request_id)s] %(name)s: %(message)s'
},
},
'handlers': {
# Only send to syslog info or higher
'syslog': {
'level': 'INFO',
'class': 'logging.handlers.SysLogHandler',
'address': SYSLOG_ADDRESS,
'filters': ['request_id'],
'formatter': 'standard',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'filters': ['request_id'],
'formatter': 'standard',
},
},
'loggers': {
# Top level for the application. Remember to set on
# all loggers
'': {
'handlers': ['syslog'],
'level': 'DEBUG',
'propagate': False,
},
# For usage on runserver (dev-server)
'django.server': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
},
}
if os.environ.get('CONSOLE_LOGS'):
# Log all to the console as well. This is used while running unit tests
del LOGGING['handlers']['syslog']
LOGGING['loggers']['']['handlers'] = ['console']
LOG_REQUESTS = True
# Origin request will be X-REQUEST-ID
LOG_REQUEST_ID_HEADER = "HTTP_X_REQUEST_ID"
GENERATE_REQUEST_ID_IF_NOT_IN_HEADER = True
REQUEST_ID_RESPONSE_HEADER = "X-REQUEST-ID"
|
[] |
[] |
[
"DEBUG_MODE",
"ALLOWED_HOSTS",
"DJANGO_SECRET_KEY",
"POSTGRES_USER",
"POSTGRES_HOST",
"POSTGRES_DB",
"CONSOLE_LOGS",
"SYSLOG_PORT",
"SYSLOG_HOST",
"DATABASE_PORT",
"POSTGRES_PASSWORD"
] |
[]
|
["DEBUG_MODE", "ALLOWED_HOSTS", "DJANGO_SECRET_KEY", "POSTGRES_USER", "POSTGRES_HOST", "POSTGRES_DB", "CONSOLE_LOGS", "SYSLOG_PORT", "SYSLOG_HOST", "DATABASE_PORT", "POSTGRES_PASSWORD"]
|
python
| 11 | 0 | |
baselines/logger.py
|
import os
import sys
import shutil
import os.path as osp
import json
import time
import datetime
import tempfile
LOG_OUTPUT_FORMATS = ['stdout', 'log', 'csv']
# Also valid: json, tensorboard
DEBUG = 10
INFO = 20
WARN = 30
ERROR = 40
DISABLED = 50
class KVWriter(object):
def writekvs(self, kvs):
raise NotImplementedError
class SeqWriter(object):
def writeseq(self, seq):
raise NotImplementedError
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, 'wt')
self.own_file = True
else:
assert hasattr(filename_or_file, 'read'), 'expected file or str, got %s'%filename_or_file
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
# Create strings for printing
key2str = {}
for (key, val) in sorted(kvs.items()):
if isinstance(val, float):
valstr = '%-8.3g' % (val,)
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
# Find max widths
if len(key2str) == 0:
print('WARNING: tried to write empty key-value dict')
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
# Write out the data
dashes = '-' * (keywidth + valwidth + 7)
lines = [dashes]
for (key, val) in sorted(key2str.items()):
lines.append('| %s%s | %s%s |' % (
key,
' ' * (keywidth - len(key)),
val,
' ' * (valwidth - len(val)),
))
lines.append(dashes)
self.file.write('\n'.join(lines) + '\n')
# Flush the output to the file
self.file.flush()
def _truncate(self, s):
return s[:20] + '...' if len(s) > 23 else s
def writeseq(self, seq):
for arg in seq:
self.file.write(arg)
self.file.write('\n')
self.file.flush()
def close(self):
if self.own_file:
self.file.close()
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'wt')
def writekvs(self, kvs):
for k, v in sorted(kvs.items()):
if hasattr(v, 'dtype'):
v = v.tolist()
kvs[k] = float(v)
self.file.write(json.dumps(kvs) + '\n')
self.file.flush()
def close(self):
self.file.close()
class CSVOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'w+t')
self.keys = []
self.sep = ','
def writekvs(self, kvs):
# Add our current row to the history
extra_keys = kvs.keys() - self.keys
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(',')
self.file.write(k)
self.file.write('\n')
for line in lines[1:]:
self.file.write(line[:-1])
self.file.write(self.sep * len(extra_keys))
self.file.write('\n')
for (i, k) in enumerate(self.keys):
if i > 0:
self.file.write(',')
v = kvs.get(k)
if v:
self.file.write(str(v))
self.file.write('\n')
self.file.flush()
def close(self):
self.file.close()
class TensorBoardOutputFormat(KVWriter):
"""
Dumps key/value pairs into TensorBoard's numeric format.
"""
def __init__(self, dir):
os.makedirs(dir, exist_ok=True)
self.dir = dir
self.step = 1
prefix = 'events'
path = osp.join(osp.abspath(dir), prefix)
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.core.util import event_pb2
from tensorflow.python.util import compat
self.tf = tf
self.event_pb2 = event_pb2
self.pywrap_tensorflow = pywrap_tensorflow
self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
def writekvs(self, kvs):
def summary_val(k, v):
kwargs = {'tag': k, 'simple_value': float(v)}
return self.tf.Summary.Value(**kwargs)
summary = self.tf.Summary(value=[summary_val(k, v) for k, v in kvs.items()])
event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
event.step = self.step # is there any reason why you'd want to specify the step?
self.writer.WriteEvent(event)
self.writer.Flush()
self.step += 1
def close(self):
if self.writer:
self.writer.Close()
self.writer = None
def make_output_format(format, ev_dir):
from mpi4py import MPI
os.makedirs(ev_dir, exist_ok=True)
rank = MPI.COMM_WORLD.Get_rank()
if format == 'stdout':
return HumanOutputFormat(sys.stdout)
elif format == 'log':
suffix = "" if rank==0 else ("-mpi%03i"%rank)
return HumanOutputFormat(osp.join(ev_dir, 'log%s.txt' % suffix))
elif format == 'json':
assert rank==0
return JSONOutputFormat(osp.join(ev_dir, 'progress.json'))
elif format == 'csv':
assert rank==0
return CSVOutputFormat(osp.join(ev_dir, 'progress.csv'))
elif format == 'tensorboard':
assert rank==0
return TensorBoardOutputFormat(osp.join(ev_dir, 'tb'))
else:
raise ValueError('Unknown format specified: %s' % (format,))
# ================================================================
# API
# ================================================================
def logkv(key, val):
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
"""
Logger.CURRENT.logkv(key, val)
def logkvs(d):
"""
Log a dictionary of key-value pairs
"""
for (k, v) in d.items():
logkv(k, v)
def dumpkvs():
"""
Write all of the diagnostics from the current iteration
level: int. (see logger.py docs) If the global logger level is higher than
the level argument here, don't print to stdout.
"""
Logger.CURRENT.dumpkvs()
def getkvs():
return Logger.CURRENT.name2val
def log(*args, level=INFO):
"""
Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).
"""
Logger.CURRENT.log(*args, level=level)
def debug(*args):
log(*args, level=DEBUG)
def info(*args):
log(*args, level=INFO)
def warn(*args):
log(*args, level=WARN)
def error(*args):
log(*args, level=ERROR)
def set_level(level):
"""
Set logging threshold on current logger.
"""
Logger.CURRENT.set_level(level)
def get_dir():
"""
Get directory that log files are being written to.
will be None if there is no output directory (i.e., if you didn't call start)
"""
return Logger.CURRENT.get_dir()
record_tabular = logkv
dump_tabular = dumpkvs
# ================================================================
# Backend
# ================================================================
class Logger(object):
DEFAULT = None # A logger with no output files. (See right below class definition)
# So that you can still log to the terminal without setting up any output files
CURRENT = None # Current logger being used by the free functions above
def __init__(self, dir, output_formats):
self.name2val = {} # values this iteration
self.level = INFO
self.dir = dir
self.output_formats = output_formats
# Logging API, forwarded
# ----------------------------------------
def logkv(self, key, val):
self.name2val[key] = val
def dumpkvs(self):
if self.level == DISABLED: return
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(self.name2val)
self.name2val.clear()
def log(self, *args, level=INFO):
if self.level <= level:
self._do_log(args)
# Configuration
# ----------------------------------------
def set_level(self, level):
self.level = level
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
# Misc
# ----------------------------------------
def _do_log(self, args):
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args))
Logger.DEFAULT = Logger.CURRENT = Logger(dir=None, output_formats=[HumanOutputFormat(sys.stdout)])
def configure(dir=None, format_strs=None):
if dir is None:
dir = os.getenv('OPENAI_LOGDIR')
if dir is None:
dir = osp.join(tempfile.gettempdir(),
datetime.datetime.now().strftime("openai-%Y-%m-%d-%H-%M-%S-%f"))
assert isinstance(dir, str)
os.makedirs(dir, exist_ok=True)
if format_strs is None:
strs = os.getenv('OPENAI_LOG_FORMAT')
format_strs = strs.split(',') if strs else LOG_OUTPUT_FORMATS
output_formats = [make_output_format(f, dir) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats)
log('Logging to %s'%dir)
def reset():
if Logger.CURRENT is not Logger.DEFAULT:
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log('Reset logger')
class scoped_configure(object):
def __init__(self, dir=None, format_strs=None):
self.dir = dir
self.format_strs = format_strs
self.prevlogger = None
def __enter__(self):
self.prevlogger = Logger.CURRENT
configure(dir=self.dir, format_strs=self.format_strs)
def __exit__(self, *args):
Logger.CURRENT.close()
Logger.CURRENT = self.prevlogger
# ================================================================
def _demo():
info("hi")
debug("shouldn't appear")
set_level(DEBUG)
debug("should appear")
dir = "/tmp/testlogging"
if os.path.exists(dir):
shutil.rmtree(dir)
configure(dir=dir)
logkv("a", 3)
logkv("b", 2.5)
dumpkvs()
logkv("b", -2.5)
logkv("a", 5.5)
dumpkvs()
info("^^^ should see a = 5.5")
logkv("b", -2.5)
dumpkvs()
logkv("a", "longasslongasslongasslongasslongasslongassvalue")
dumpkvs()
# ================================================================
# Readers
# ================================================================
def read_json(fname):
import pandas
ds = []
with open(fname, 'rt') as fh:
for line in fh:
ds.append(json.loads(line))
return pandas.DataFrame(ds)
def read_csv(fname):
import pandas
return pandas.read_csv(fname, index_col=None, comment='#')
def read_tb(path):
"""
path : a tensorboard file OR a directory, where we will find all TB files
of the form events.*
"""
import pandas
import numpy as np
from glob import glob
from collections import defaultdict
import tensorflow as tf
if osp.isdir(path):
fnames = glob(osp.join(path, "events.*"))
elif osp.basename(path).startswith("events."):
fnames = [path]
else:
raise NotImplementedError("Expected tensorboard file or directory containing them. Got %s"%path)
tag2pairs = defaultdict(list)
maxstep = 0
for fname in fnames:
for summary in tf.train.summary_iterator(fname):
if summary.step > 0:
for v in summary.summary.value:
pair = (summary.step, v.simple_value)
tag2pairs[v.tag].append(pair)
maxstep = max(summary.step, maxstep)
data = np.empty((maxstep, len(tag2pairs)))
data[:] = np.nan
tags = sorted(tag2pairs.keys())
for (colidx,tag) in enumerate(tags):
pairs = tag2pairs[tag]
for (step, value) in pairs:
data[step-1, colidx] = value
return pandas.DataFrame(data, columns=tags)
if __name__ == "__main__":
_demo()
|
[] |
[] |
[
"OPENAI_LOGDIR",
"OPENAI_LOG_FORMAT"
] |
[]
|
["OPENAI_LOGDIR", "OPENAI_LOG_FORMAT"]
|
python
| 2 | 0 | |
pkg/kubesphere/tasks.go
|
/*
Copyright 2021 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubesphere
import (
"context"
"encoding/base64"
"fmt"
kubekeyapiv1alpha2 "github.com/kubesphere/kubekey/apis/kubekey/v1alpha2"
"github.com/kubesphere/kubekey/pkg/common"
"github.com/kubesphere/kubekey/pkg/core/connector"
"github.com/kubesphere/kubekey/pkg/core/logger"
ksv2 "github.com/kubesphere/kubekey/pkg/kubesphere/v2"
ksv3 "github.com/kubesphere/kubekey/pkg/kubesphere/v3"
"github.com/kubesphere/kubekey/pkg/version/kubesphere"
"github.com/kubesphere/kubekey/pkg/version/kubesphere/templates"
"github.com/pkg/errors"
yamlV2 "gopkg.in/yaml.v2"
"os"
"path/filepath"
"strings"
"time"
)
type AddInstallerConfig struct {
common.KubeAction
}
func (a *AddInstallerConfig) Execute(runtime connector.Runtime) error {
configurationBase64 := base64.StdEncoding.EncodeToString([]byte(a.KubeConf.Cluster.KubeSphere.Configurations))
if _, err := runtime.GetRunner().SudoCmd(
fmt.Sprintf("echo %s | base64 -d >> /etc/kubernetes/addons/kubesphere.yaml", configurationBase64),
false); err != nil {
return errors.Wrap(errors.WithStack(err), "add config to ks-installer manifests failed")
}
return nil
}
type CreateNamespace struct {
common.KubeAction
}
func (c *CreateNamespace) Execute(runtime connector.Runtime) error {
_, err := runtime.GetRunner().SudoCmd(`cat <<EOF | /usr/local/bin/kubectl apply -f -
apiVersion: v1
kind: Namespace
metadata:
name: kubesphere-system
---
apiVersion: v1
kind: Namespace
metadata:
name: kubesphere-monitoring-system
EOF
`, false)
if err != nil {
return errors.Wrap(errors.WithStack(err), "create namespace: kubesphere-system and kubesphere-monitoring-system")
}
return nil
}
type Setup struct {
common.KubeAction
}
func (s *Setup) Execute(runtime connector.Runtime) error {
filePath := filepath.Join(common.KubeAddonsDir, templates.KsInstaller.Name())
var addrList []string
var tlsDisable bool
var port string
switch s.KubeConf.Cluster.Etcd.Type {
case kubekeyapiv1alpha2.KubeKey:
for _, host := range runtime.GetHostsByRole(common.ETCD) {
addrList = append(addrList, host.GetInternalAddress())
}
caFile := "/etc/ssl/etcd/ssl/ca.pem"
certFile := fmt.Sprintf("/etc/ssl/etcd/ssl/node-%s.pem", runtime.GetHostsByRole(common.ETCD)[0].GetName())
keyFile := fmt.Sprintf("/etc/ssl/etcd/ssl/node-%s-key.pem", runtime.GetHostsByRole(common.ETCD)[0].GetName())
if output, err := runtime.GetRunner().SudoCmd(
fmt.Sprintf("/usr/local/bin/kubectl -n kubesphere-monitoring-system create secret generic kube-etcd-client-certs "+
"--from-file=etcd-client-ca.crt=%s "+
"--from-file=etcd-client.crt=%s "+
"--from-file=etcd-client.key=%s", caFile, certFile, keyFile), true); err != nil {
if !strings.Contains(output, "exists") {
return err
}
}
case kubekeyapiv1alpha2.Kubeadm:
for _, host := range runtime.GetHostsByRole(common.Master) {
addrList = append(addrList, host.GetInternalAddress())
}
caFile := "/etc/kubernetes/pki/etcd/ca.crt"
certFile := "/etc/kubernetes/pki/etcd/healthcheck-client.crt"
keyFile := "/etc/kubernetes/pki/etcd/healthcheck-client.key"
if output, err := runtime.GetRunner().SudoCmd(
fmt.Sprintf("/usr/local/bin/kubectl -n kubesphere-monitoring-system create secret generic kube-etcd-client-certs "+
"--from-file=etcd-client-ca.crt=%s "+
"--from-file=etcd-client.crt=%s "+
"--from-file=etcd-client.key=%s", caFile, certFile, keyFile), true); err != nil {
if !strings.Contains(output, "exists") {
return err
}
}
case kubekeyapiv1alpha2.External:
for _, endpoint := range s.KubeConf.Cluster.Etcd.External.Endpoints {
e := strings.Split(strings.TrimSpace(endpoint), "://")
s := strings.Split(e[1], ":")
port = s[1]
addrList = append(addrList, s[0])
if e[0] == "http" {
tlsDisable = true
}
}
if tlsDisable {
if output, err := runtime.GetRunner().SudoCmd("/usr/local/bin/kubectl -n kubesphere-monitoring-system create secret generic kube-etcd-client-certs", true); err != nil {
if !strings.Contains(output, "exists") {
return err
}
}
} else {
caFile := fmt.Sprintf("/etc/ssl/etcd/ssl/%s", filepath.Base(s.KubeConf.Cluster.Etcd.External.CAFile))
certFile := fmt.Sprintf("/etc/ssl/etcd/ssl/%s", filepath.Base(s.KubeConf.Cluster.Etcd.External.CertFile))
keyFile := fmt.Sprintf("/etc/ssl/etcd/ssl/%s", filepath.Base(s.KubeConf.Cluster.Etcd.External.KeyFile))
if output, err := runtime.GetRunner().SudoCmd(
fmt.Sprintf("/usr/local/bin/kubectl -n kubesphere-monitoring-system create secret generic kube-etcd-client-certs "+
"--from-file=etcd-client-ca.crt=%s "+
"--from-file=etcd-client.crt=%s "+
"--from-file=etcd-client.key=%s", caFile, certFile, keyFile), true); err != nil {
if !strings.Contains(output, "exists") {
return err
}
}
}
}
etcdEndPoint := strings.Join(addrList, ",")
if _, err := runtime.GetRunner().SudoCmd(
fmt.Sprintf("sed -i '/endpointIps/s/\\:.*/\\: %s/g' %s", etcdEndPoint, filePath),
false); err != nil {
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("update etcd endpoint failed"))
}
if tlsDisable {
if _, err := runtime.GetRunner().SudoCmd(
fmt.Sprintf("sed -i '/tlsEnable/s/\\:.*/\\: false/g' %s", filePath),
false); err != nil {
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("update etcd tls failed"))
}
}
if len(port) != 0 {
if _, err := runtime.GetRunner().SudoCmd(
fmt.Sprintf("sed -i 's/2379/%s/g' %s", port, filePath),
false); err != nil {
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("update etcd tls failed"))
}
}
if s.KubeConf.Cluster.Registry.PrivateRegistry != "" {
PrivateRegistry := strings.Replace(s.KubeConf.Cluster.Registry.PrivateRegistry, "/", "\\/", -1)
if _, err := runtime.GetRunner().SudoCmd(
fmt.Sprintf("sed -i '/local_registry/s/\\:.*/\\: %s/g' %s", PrivateRegistry, filePath),
false); err != nil {
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("add private registry: %s failed", s.KubeConf.Cluster.Registry.PrivateRegistry))
}
} else {
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("sed -i '/local_registry/d' %s", filePath), false); err != nil {
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("remove private registry failed"))
}
}
if s.KubeConf.Cluster.Registry.NamespaceOverride != "" {
if _, err := runtime.GetRunner().SudoCmd(
fmt.Sprintf("sed -i '/namespace_override/s/\\:.*/\\: %s/g' %s", s.KubeConf.Cluster.Registry.NamespaceOverride, filePath),
false); err != nil {
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("add namespace override: %s failed", s.KubeConf.Cluster.Registry.NamespaceOverride))
}
} else {
if _, err := runtime.GetRunner().SudoCmd(fmt.Sprintf("sed -i '/namespace_override/d' %s", filePath), false); err != nil {
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("remove namespace override failed"))
}
}
_, ok := kubesphere.CNSource[s.KubeConf.Cluster.KubeSphere.Version]
if ok && (os.Getenv("KKZONE") == "cn" || s.KubeConf.Cluster.Registry.PrivateRegistry == "registry.cn-beijing.aliyuncs.com") {
if _, err := runtime.GetRunner().SudoCmd(
fmt.Sprintf("sed -i '/zone/s/\\:.*/\\: %s/g' %s", "cn", filePath),
false); err != nil {
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("add kubekey zone: %s failed", s.KubeConf.Cluster.Registry.PrivateRegistry))
}
} else {
if _, err := runtime.GetRunner().SudoCmd(
fmt.Sprintf("sed -i '/zone/d' %s", filePath),
false); err != nil {
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("remove kubekey zone failed"))
}
}
switch s.KubeConf.Cluster.Kubernetes.ContainerManager {
case "docker", "containerd", "crio":
if _, err := runtime.GetRunner().SudoCmd(
fmt.Sprintf("sed -i '/containerruntime/s/\\:.*/\\: %s/g' /etc/kubernetes/addons/kubesphere.yaml", s.KubeConf.Cluster.Kubernetes.ContainerManager), false); err != nil {
return errors.Wrap(errors.WithStack(err), fmt.Sprintf("set container runtime: %s failed", s.KubeConf.Cluster.Kubernetes.ContainerManager))
}
default:
logger.Log.Message(runtime.RemoteHost().GetName(),
fmt.Sprintf("Currently, the logging module of KubeSphere does not support %s. If %s is used, the logging module will be unavailable.",
s.KubeConf.Cluster.Kubernetes.ContainerManager, s.KubeConf.Cluster.Kubernetes.ContainerManager))
}
return nil
}
type Apply struct {
common.KubeAction
}
func (a *Apply) Execute(runtime connector.Runtime) error {
filePath := filepath.Join(common.KubeAddonsDir, templates.KsInstaller.Name())
deployKubesphereCmd := fmt.Sprintf("/usr/local/bin/kubectl apply -f %s --force", filePath)
if _, err := runtime.GetRunner().SudoCmd(deployKubesphereCmd, true); err != nil {
return errors.Wrapf(errors.WithStack(err), "deploy %s failed", filePath)
}
return nil
}
type Check struct {
common.KubeAction
}
func (c *Check) Execute(runtime connector.Runtime) error {
var (
position = 1
notes = "Please wait for the installation to complete: "
)
ch := make(chan string)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
go CheckKubeSphereStatus(ctx, runtime, ch)
stop := false
for !stop {
select {
case res := <-ch:
fmt.Printf("\033[%dA\033[K", position)
fmt.Println(res)
stop = true
default:
for i := 0; i < 10; i++ {
if i < 5 {
fmt.Printf("\033[%dA\033[K", position)
output := fmt.Sprintf(
"%s%s%s",
notes,
strings.Repeat(" ", i),
">>--->",
)
fmt.Printf("%s \033[K\n", output)
time.Sleep(time.Duration(200) * time.Millisecond)
} else {
fmt.Printf("\033[%dA\033[K", position)
output := fmt.Sprintf(
"%s%s%s",
notes,
strings.Repeat(" ", 10-i),
"<---<<",
)
fmt.Printf("%s \033[K\n", output)
time.Sleep(time.Duration(200) * time.Millisecond)
}
}
}
}
return nil
}
func CheckKubeSphereStatus(ctx context.Context, runtime connector.Runtime, stopChan chan string) {
defer close(stopChan)
for {
select {
case <-ctx.Done():
stopChan <- ""
default:
_, err := runtime.GetRunner().SudoCmd(
"/usr/local/bin/kubectl exec -n kubesphere-system "+
"$(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') "+
"-- ls /kubesphere/playbooks/kubesphere_running", false)
if err == nil {
output, err := runtime.GetRunner().SudoCmd(
"/usr/local/bin/kubectl exec -n kubesphere-system "+
"$(kubectl get pod -n kubesphere-system -l app=ks-install -o jsonpath='{.items[0].metadata.name}') "+
"-- cat /kubesphere/playbooks/kubesphere_running", false)
if err == nil && output != "" {
stopChan <- output
break
}
}
}
}
}
type CleanCC struct {
common.KubeAction
}
func (c *CleanCC) Execute(runtime connector.Runtime) error {
c.KubeConf.Cluster.KubeSphere.Configurations = "\n"
return nil
}
type ConvertV2ToV3 struct {
common.KubeAction
}
func (c *ConvertV2ToV3) Execute(runtime connector.Runtime) error {
configV2Str, err := runtime.GetRunner().SudoCmd(
"/usr/local/bin/kubectl get cm -n kubesphere-system ks-installer -o jsonpath='{.data.ks-config\\.yaml}'",
false)
if err != nil {
return err
}
clusterCfgV2 := ksv2.V2{}
clusterCfgV3 := ksv3.V3{}
if err := yamlV2.Unmarshal([]byte(configV2Str), &clusterCfgV2); err != nil {
return err
}
configV3, err := MigrateConfig2to3(&clusterCfgV2, &clusterCfgV3)
if err != nil {
return err
}
c.KubeConf.Cluster.KubeSphere.Configurations = "---\n" + configV3
return nil
}
func MigrateConfig2to3(v2 *ksv2.V2, v3 *ksv3.V3) (string, error) {
v3.Etcd = ksv3.Etcd(v2.Etcd)
v3.Persistence = ksv3.Persistence(v2.Persistence)
v3.Alerting = ksv3.Alerting(v2.Alerting)
v3.Notification = ksv3.Notification(v2.Notification)
v3.LocalRegistry = v2.LocalRegistry
v3.Servicemesh = ksv3.Servicemesh(v2.Servicemesh)
v3.Devops = ksv3.Devops(v2.Devops)
v3.Openpitrix = ksv3.Openpitrix(v2.Openpitrix)
v3.Console = ksv3.Console(v2.Console)
if v2.MetricsServerNew.Enabled == "" {
if v2.MetricsServerOld.Enabled == "true" || v2.MetricsServerOld.Enabled == "True" {
v3.MetricsServer.Enabled = true
} else {
v3.MetricsServer.Enabled = false
}
} else {
if v2.MetricsServerNew.Enabled == "true" || v2.MetricsServerNew.Enabled == "True" {
v3.MetricsServer.Enabled = true
} else {
v3.MetricsServer.Enabled = false
}
}
v3.Monitoring.PrometheusMemoryRequest = v2.Monitoring.PrometheusMemoryRequest
//v3.Monitoring.PrometheusReplicas = v2.Monitoring.PrometheusReplicas
v3.Monitoring.PrometheusVolumeSize = v2.Monitoring.PrometheusVolumeSize
//v3.Monitoring.AlertmanagerReplicas = 1
v3.Common.EtcdVolumeSize = v2.Common.EtcdVolumeSize
v3.Common.MinioVolumeSize = v2.Common.MinioVolumeSize
v3.Common.MysqlVolumeSize = v2.Common.MysqlVolumeSize
v3.Common.OpenldapVolumeSize = v2.Common.OpenldapVolumeSize
v3.Common.RedisVolumSize = v2.Common.RedisVolumSize
//v3.Common.ES.ElasticsearchDataReplicas = v2.Logging.ElasticsearchDataReplicas
//v3.Common.ES.ElasticsearchMasterReplicas = v2.Logging.ElasticsearchMasterReplicas
v3.Common.ES.ElkPrefix = v2.Logging.ElkPrefix
v3.Common.ES.LogMaxAge = v2.Logging.LogMaxAge
if v2.Logging.ElasticsearchVolumeSize == "" {
v3.Common.ES.ElasticsearchDataVolumeSize = v2.Logging.ElasticsearchDataVolumeSize
v3.Common.ES.ElasticsearchMasterVolumeSize = v2.Logging.ElasticsearchMasterVolumeSize
} else {
v3.Common.ES.ElasticsearchMasterVolumeSize = "4Gi"
v3.Common.ES.ElasticsearchDataVolumeSize = v2.Logging.ElasticsearchVolumeSize
}
v3.Logging.Enabled = v2.Logging.Enabled
v3.Logging.LogsidecarReplicas = v2.Logging.LogsidecarReplicas
v3.Authentication.JwtSecret = ""
v3.Multicluster.ClusterRole = "none"
v3.Events.Ruler.Replicas = 2
var clusterConfiguration = ksv3.ClusterConfig{
ApiVersion: "installer.kubesphere.io/v1alpha1",
Kind: "ClusterConfiguration",
Metadata: ksv3.Metadata{
Name: "ks-installer",
Namespace: "kubesphere-system",
Label: ksv3.Label{Version: "v3.0.0"},
},
Spec: v3,
}
configV3, err := yamlV2.Marshal(clusterConfiguration)
if err != nil {
return "", err
}
return string(configV3), nil
}
|
[
"\"KKZONE\""
] |
[] |
[
"KKZONE"
] |
[]
|
["KKZONE"]
|
go
| 1 | 0 | |
python/rolling_pin/tools.py
|
from typing import Any, Dict, Iterable, List, Union
import pydot
import logging
import os
import re
from collections import OrderedDict
from pathlib import Path
from IPython.display import HTML, Image
LOG_LEVEL = os.environ.get('LOG_LEVEL', 'WARNING').upper()
logging.basicConfig(level=LOG_LEVEL)
LOGGER = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
'''
Contains basic functions for more complex ETL functions and classes.
'''
# COLOR-SCHEME------------------------------------------------------------------
COLOR_SCHEME = dict(
background='#242424',
node='#343434',
node_font='#B6ECF3',
node_value='#343434',
node_value_font='#DE958E',
edge='#B6ECF3',
edge_value='#DE958E',
node_library_font='#DE958E',
node_subpackage_font='#A0D17B',
node_module_font='#B6ECF3',
edge_library='#DE958E',
edge_subpackage='#A0D17B',
edge_module='#B6ECF3',
) # type: Dict[str, str]
COLOR_SCALE = [
'#B6ECF3',
'#DE958E',
'#EBB483',
'#A0D17B',
'#93B6E6',
'#AC92DE',
'#E9EABE',
'#7EC4CF',
'#F77E70',
'#EB9E58',
] # type: List[str]
# PREDICATE-FUNCTIONS-----------------------------------------------------------
def is_iterable(item):
# type: (Any) -> bool
'''
Determines if given item is iterable.
Args:
item (object): Object to be tested.
Returns:
bool: Whether given item is iterable.
'''
if is_listlike(item) or is_dictlike(item):
return True
return False
def is_dictlike(item):
# type: (Any) -> bool
'''
Determines if given item is dict-like.
Args:
item (object): Object to be tested.
Returns:
bool: Whether given item is dict-like.
'''
for type_ in [dict, OrderedDict]:
if isinstance(item, type_):
if item.__class__.__name__ == 'Counter':
return False
return True
return False
def is_listlike(item):
# type: (Any) -> bool
'''
Determines if given item is list-like.
Args:
item (object): Object to be tested.
Returns:
bool: Whether given item is list-like.
'''
for type_ in [list, tuple, set]:
if isinstance(item, type_):
return True
return False
# CORE-FUNCTIONS----------------------------------------------------------------
def flatten(item, separator='/', embed_types=True):
# type: (Iterable, str, bool) -> Dict[str, Any]
'''
Flattens a iterable object into a flat dictionary.
Args:
item (object): Iterable object.
separator (str, optional): Field separator in keys. Default: '/'.
Returns:
dict: Dictionary representation of given object.
'''
output = {} # type: Dict[str, Any]
def recurse(item, cursor):
# type (Iterable, Any) -> None
if is_listlike(item):
if embed_types:
name = item.__class__.__name__
item = [(f'<{name}_{i}>', val) for i, val in enumerate(item)]
item = dict(item)
else:
item = dict(enumerate(item))
if is_dictlike(item):
for key, val in item.items():
new_key = f'{cursor}{separator}{str(key)}'
if is_iterable(val) and len(val) > 0:
recurse(val, new_key)
else:
final_key = re.sub('^' + separator, '', new_key)
output[final_key] = val
recurse(item, '')
return output
def nest(flat_dict, separator='/'):
# type: (Dict[str, Any], str) -> Dict[str, Any]
'''
Converts a flat dictionary into a nested dictionary by splitting keys by a
given separator.
Args:
flat_dict (dict): Flat dictionary.
separator (str, optional): Field separator within given dictionary's
keys. Default: '/'.
Returns:
dict: Nested dictionary.
'''
output = {} # type: Dict[str, Any]
for keys, val in flat_dict.items():
split_keys = list(filter(
lambda x: x != '', keys.split(separator)
))
cursor = output
last = split_keys.pop()
for key in split_keys:
if key not in cursor:
cursor[key] = {}
if not isinstance(cursor[key], dict):
msg = f"Duplicate key conflict. Key: '{key}'."
raise KeyError(msg)
cursor = cursor[key]
cursor[last] = val
return output
def unembed(item):
# type: (Any) -> Any
'''
Convert embeded types in dictionary keys into python types.
Args:
item (object): Dictionary with embedded types.
Returns:
object: Converted object.
'''
lut = {'list': list, 'tuple': tuple, 'set': set}
embed_re = re.compile(r'^<([a-z]+)_(\d+)>$')
if is_dictlike(item) and item != {}:
output = {} # type: Any
keys = list(item.keys())
match = embed_re.match(keys[0])
if match:
indices = [embed_re.match(key).group(2) for key in keys] # type: ignore
indices = map(int, indices) # type: ignore
output = []
for i, key in sorted(zip(indices, keys)):
next_item = item[key]
if is_dictlike(next_item):
next_item = unembed(next_item)
output.append(next_item)
output = lut[match.group(1)](output)
return output
else:
for key, val in item.items():
output[key] = unembed(val)
return output
return item
# FILE-FUNCTIONS----------------------------------------------------------------
def list_all_files(directory):
# type: (Union[str, Path]) -> List[Path]
'''
Recursively lists all files within a give directory.
Args:
directory (str or Path): Directory to be recursed.
Returns:
list[Path]: List of filepaths.
'''
output = [] # type: List[Path]
for root, dirs, files in os.walk(directory):
for file_ in files:
fullpath = Path(root, file_)
output.append(fullpath)
return output
def get_parent_fields(key, separator='/'):
# type: (str, str) -> List[str]
'''
Get all the parent fields of a given key, split by given separator.
Args:
key (str): Key.
separator (str, optional): String that splits key into fields.
Default: '/'.
Returns:
list(str): List of absolute parent fields.
'''
fields = key.split(separator)
output = [] # type: List[str]
for i in range(len(fields) - 1):
output.append(separator.join(fields[:i + 1]))
return output
# EXPORT-FUNCTIONS--------------------------------------------------------------
def dot_to_html(dot, layout='dot', as_png=False):
# type: (pydot.Dot, str, bool) -> Union[HTML, Image]
'''
Converts a given pydot graph into a IPython.display.HTML object.
Used in jupyter lab inline display of graph data.
Args:
dot (pydot.Dot): Pydot Graph instance.
layout (str, optional): Graph layout style.
Options include: circo, dot, fdp, neato, sfdp, twopi.
Default: dot.
as_png (bool, optional): Display graph as a PNG image instead of SVG.
Useful for display on Github. Default: False.
Raises:
ValueError: If invalid layout given.
Returns:
IPython.display.HTML: HTML instance.
'''
layouts = ['circo', 'dot', 'fdp', 'neato', 'sfdp', 'twopi']
if layout not in layouts:
msg = f'Invalid layout value. {layout} not in {layouts}.'
raise ValueError(msg)
if as_png:
return Image(data=dot.create_png())
svg = dot.create_svg(prog=layout)
html = f'<object type="image/svg+xml" data="data:image/svg+xml;{svg}"></object>' # type: Any
html = HTML(html)
html.data = re.sub(r'\\n|\\', '', html.data)
html.data = re.sub('</svg>.*', '</svg>', html.data)
return html
def write_dot_graph(
dot,
fullpath,
layout='dot',
):
# type: (pydot.Dot, Union[str, Path], str) -> None
'''
Writes a pydot.Dot object to a given filepath.
Formats supported: svg, dot, png.
Args:
dot (pydot.Dot): Pydot Dot instance.
fulllpath (str or Path): File to be written to.
layout (str, optional): Graph layout style.
Options include: circo, dot, fdp, neato, sfdp, twopi. Default: dot.
Raises:
ValueError: If invalid file extension given.
'''
if isinstance(fullpath, Path):
fullpath = Path(fullpath).absolute().as_posix()
_, ext = os.path.splitext(fullpath)
ext = re.sub(r'^\.', '', ext)
if re.search('^svg$', ext, re.I):
dot.write_svg(fullpath, prog=layout)
elif re.search('^dot$', ext, re.I):
dot.write_dot(fullpath, prog=layout)
elif re.search('^png$', ext, re.I):
dot.write_png(fullpath, prog=layout)
else:
msg = f'Invalid extension found: {ext}. '
msg += 'Valid extensions include: svg, dot, png.'
raise ValueError(msg)
|
[] |
[] |
[
"LOG_LEVEL"
] |
[]
|
["LOG_LEVEL"]
|
python
| 1 | 0 | |
selfdrive/controls/lib/events.py
|
import os
from enum import IntEnum
from typing import Dict, Union, Callable, List, Optional
from cereal import log, car
import cereal.messaging as messaging
from common.conversions import Conversions as CV
from common.realtime import DT_CTRL
from selfdrive.locationd.calibrationd import MIN_SPEED_FILTER
from selfdrive.version import get_short_branch
AlertSize = log.ControlsState.AlertSize
AlertStatus = log.ControlsState.AlertStatus
VisualAlert = car.CarControl.HUDControl.VisualAlert
AudibleAlert = car.CarControl.HUDControl.AudibleAlert
EventName = car.CarEvent.EventName
# Alert priorities
class Priority(IntEnum):
LOWEST = 0
LOWER = 1
LOW = 2
MID = 3
HIGH = 4
HIGHEST = 5
# Event types
class ET:
ENABLE = 'enable'
PRE_ENABLE = 'preEnable'
OVERRIDE = 'override'
NO_ENTRY = 'noEntry'
WARNING = 'warning'
USER_DISABLE = 'userDisable'
SOFT_DISABLE = 'softDisable'
IMMEDIATE_DISABLE = 'immediateDisable'
PERMANENT = 'permanent'
# get event name from enum
EVENT_NAME = {v: k for k, v in EventName.schema.enumerants.items()}
class Events:
def __init__(self):
self.events: List[int] = []
self.static_events: List[int] = []
self.events_prev = dict.fromkeys(EVENTS.keys(), 0)
@property
def names(self) -> List[int]:
return self.events
def __len__(self) -> int:
return len(self.events)
def add(self, event_name: int, static: bool=False) -> None:
if static:
self.static_events.append(event_name)
self.events.append(event_name)
def clear(self) -> None:
self.events_prev = {k: (v + 1 if k in self.events else 0) for k, v in self.events_prev.items()}
self.events = self.static_events.copy()
def any(self, event_type: str) -> bool:
return any(event_type in EVENTS.get(e, {}) for e in self.events)
def create_alerts(self, event_types: List[str], callback_args=None):
if callback_args is None:
callback_args = []
ret = []
for e in self.events:
types = EVENTS[e].keys()
for et in event_types:
if et in types:
alert = EVENTS[e][et]
if not isinstance(alert, Alert):
alert = alert(*callback_args)
if DT_CTRL * (self.events_prev[e] + 1) >= alert.creation_delay:
alert.alert_type = f"{EVENT_NAME[e]}/{et}"
alert.event_type = et
ret.append(alert)
return ret
def add_from_msg(self, events):
for e in events:
self.events.append(e.name.raw)
def to_msg(self):
ret = []
for event_name in self.events:
event = car.CarEvent.new_message()
event.name = event_name
for event_type in EVENTS.get(event_name, {}):
setattr(event, event_type, True)
ret.append(event)
return ret
class Alert:
def __init__(self,
alert_text_1: str,
alert_text_2: str,
alert_status: log.ControlsState.AlertStatus,
alert_size: log.ControlsState.AlertSize,
priority: Priority,
visual_alert: car.CarControl.HUDControl.VisualAlert,
audible_alert: car.CarControl.HUDControl.AudibleAlert,
duration: float,
alert_rate: float = 0.,
creation_delay: float = 0.):
self.alert_text_1 = alert_text_1
self.alert_text_2 = alert_text_2
self.alert_status = alert_status
self.alert_size = alert_size
self.priority = priority
self.visual_alert = visual_alert
self.audible_alert = audible_alert
self.duration = int(duration / DT_CTRL)
self.alert_rate = alert_rate
self.creation_delay = creation_delay
self.alert_type = ""
self.event_type: Optional[str] = None
def __str__(self) -> str:
return f"{self.alert_text_1}/{self.alert_text_2} {self.priority} {self.visual_alert} {self.audible_alert}"
def __gt__(self, alert2) -> bool:
return self.priority > alert2.priority
class NoEntryAlert(Alert):
def __init__(self, alert_text_2: str, visual_alert: car.CarControl.HUDControl.VisualAlert=VisualAlert.none):
super().__init__("openpilot Unavailable", alert_text_2, AlertStatus.normal,
AlertSize.mid, Priority.LOW, visual_alert,
AudibleAlert.refuse, 3.)
class SoftDisableAlert(Alert):
def __init__(self, alert_text_2: str):
super().__init__("TAKE CONTROL IMMEDIATELY", alert_text_2,
AlertStatus.userPrompt, AlertSize.full,
Priority.MID, VisualAlert.steerRequired,
AudibleAlert.warningSoft, 2.),
# less harsh version of SoftDisable, where the condition is user-triggered
class UserSoftDisableAlert(SoftDisableAlert):
def __init__(self, alert_text_2: str):
super().__init__(alert_text_2),
self.alert_text_1 = "openpilot will disengage"
class ImmediateDisableAlert(Alert):
def __init__(self, alert_text_2: str):
super().__init__("TAKE CONTROL IMMEDIATELY", alert_text_2,
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.steerRequired,
AudibleAlert.warningImmediate, 4.),
class EngagementAlert(Alert):
def __init__(self, audible_alert: car.CarControl.HUDControl.AudibleAlert):
super().__init__("", "",
AlertStatus.normal, AlertSize.none,
Priority.MID, VisualAlert.none,
audible_alert, .2),
class NormalPermanentAlert(Alert):
def __init__(self, alert_text_1: str, alert_text_2: str = "", duration: float = 0.2, priority: Priority = Priority.LOWER, creation_delay: float = 0.):
super().__init__(alert_text_1, alert_text_2,
AlertStatus.normal, AlertSize.mid if len(alert_text_2) else AlertSize.small,
priority, VisualAlert.none, AudibleAlert.none, duration, creation_delay=creation_delay),
class StartupAlert(Alert):
def __init__(self, alert_text_1: str, alert_text_2: str = "Always keep hands on wheel and eyes on road", alert_status=AlertStatus.normal):
super().__init__(alert_text_1, alert_text_2,
alert_status, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, 10.),
# ********** helper functions **********
def get_display_speed(speed_ms: float, metric: bool) -> str:
speed = int(round(speed_ms * (CV.MS_TO_KPH if metric else CV.MS_TO_MPH)))
unit = 'km/h' if metric else 'mph'
return f"{speed} {unit}"
# ********** alert callback functions **********
AlertCallbackType = Callable[[car.CarParams, messaging.SubMaster, bool, int], Alert]
def soft_disable_alert(alert_text_2: str) -> AlertCallbackType:
def func(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
if soft_disable_time < int(0.5 / DT_CTRL):
return ImmediateDisableAlert(alert_text_2)
return SoftDisableAlert(alert_text_2)
return func
def user_soft_disable_alert(alert_text_2: str) -> AlertCallbackType:
def func(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
if soft_disable_time < int(0.5 / DT_CTRL):
return ImmediateDisableAlert(alert_text_2)
return UserSoftDisableAlert(alert_text_2)
return func
def startup_master_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
branch = get_short_branch("")
if "REPLAY" in os.environ:
branch = "replay"
return StartupAlert("WARNING: This branch is not tested", branch, alert_status=AlertStatus.userPrompt)
def below_engage_speed_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return NoEntryAlert(f"Speed Below {get_display_speed(CP.minEnableSpeed, metric)}")
def below_steer_speed_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return Alert(
f"Steer Unavailable Below {get_display_speed(CP.minSteerSpeed, metric)}",
"",
AlertStatus.userPrompt, AlertSize.small,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.prompt, 0.4)
def calibration_incomplete_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
return Alert(
"Calibration in Progress: %d%%" % sm['liveCalibration'].calPerc,
f"Drive Above {get_display_speed(MIN_SPEED_FILTER, metric)}",
AlertStatus.normal, AlertSize.mid,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .2)
def no_gps_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
gps_integrated = sm['peripheralState'].pandaType in (log.PandaState.PandaType.uno, log.PandaState.PandaType.dos)
return Alert(
"Poor GPS reception",
"Hardware malfunctioning if sky is visible" if gps_integrated else "Check GPS antenna placement",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, .2, creation_delay=300.)
def wrong_car_mode_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
text = "Cruise Mode Disabled"
if CP.carName == "honda":
text = "Main Switch Off"
return NoEntryAlert(text)
def joystick_alert(CP: car.CarParams, sm: messaging.SubMaster, metric: bool, soft_disable_time: int) -> Alert:
axes = sm['testJoystick'].axes
gb, steer = list(axes)[:2] if len(axes) else (0., 0.)
vals = f"Gas: {round(gb * 100.)}%, Steer: {round(steer * 100.)}%"
return NormalPermanentAlert("Joystick Mode", vals)
EVENTS: Dict[int, Dict[str, Union[Alert, AlertCallbackType]]] = {
# ********** events with no alerts **********
EventName.stockFcw: {},
# ********** events only containing alerts displayed in all states **********
EventName.joystickDebug: {
ET.WARNING: joystick_alert,
ET.PERMANENT: NormalPermanentAlert("Joystick Mode"),
},
EventName.controlsInitializing: {
ET.NO_ENTRY: NoEntryAlert("System Initializing"),
},
EventName.startup: {
ET.PERMANENT: StartupAlert("Be ready to take over at any time")
},
EventName.startupMaster: {
ET.PERMANENT: startup_master_alert,
},
# Car is recognized, but marked as dashcam only
EventName.startupNoControl: {
ET.PERMANENT: StartupAlert("Dashcam mode"),
},
# Car is not recognized
EventName.startupNoCar: {
ET.PERMANENT: StartupAlert("Dashcam mode for unsupported car"),
},
EventName.startupNoFw: {
ET.PERMANENT: StartupAlert("Car Unrecognized",
"Check comma power connections",
alert_status=AlertStatus.userPrompt),
},
EventName.dashcamMode: {
ET.PERMANENT: NormalPermanentAlert("Dashcam Mode",
priority=Priority.LOWEST),
},
EventName.invalidLkasSetting: {
ET.PERMANENT: NormalPermanentAlert("Stock LKAS is on",
"Turn off stock LKAS to engage"),
},
EventName.cruiseMismatch: {
#ET.PERMANENT: ImmediateDisableAlert("openpilot failed to cancel cruise"),
},
# openpilot doesn't recognize the car. This switches openpilot into a
# read-only mode. This can be solved by adding your fingerprint.
# See https://github.com/commaai/openpilot/wiki/Fingerprinting for more information
EventName.carUnrecognized: {
ET.PERMANENT: NormalPermanentAlert("Dashcam Mode",
"Car Unrecognized",
priority=Priority.LOWEST),
},
EventName.stockAeb: {
ET.PERMANENT: Alert(
"BRAKE!",
"Stock AEB: Risk of Collision",
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.fcw, AudibleAlert.none, 2.),
ET.NO_ENTRY: NoEntryAlert("Stock AEB: Risk of Collision"),
},
EventName.fcw: {
ET.PERMANENT: Alert(
"BRAKE!",
"Risk of Collision",
AlertStatus.critical, AlertSize.full,
Priority.HIGHEST, VisualAlert.fcw, AudibleAlert.warningSoft, 2.),
},
EventName.ldw: {
ET.PERMANENT: Alert(
"Lane Departure Detected",
"",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.ldw, AudibleAlert.prompt, 3.),
},
# ********** events only containing alerts that display while engaged **********
# openpilot tries to learn certain parameters about your car by observing
# how the car behaves to steering inputs from both human and openpilot driving.
# This includes:
# - steer ratio: gear ratio of the steering rack. Steering angle divided by tire angle
# - tire stiffness: how much grip your tires have
# - angle offset: most steering angle sensors are offset and measure a non zero angle when driving straight
# This alert is thrown when any of these values exceed a sanity check. This can be caused by
# bad alignment or bad sensor data. If this happens consistently consider creating an issue on GitHub
EventName.vehicleModelInvalid: {
ET.NO_ENTRY: NoEntryAlert("Vehicle Parameter Identification Failed"),
ET.SOFT_DISABLE: soft_disable_alert("Vehicle Parameter Identification Failed"),
},
EventName.steerTempUnavailableSilent: {
ET.WARNING: Alert(
"Steering Temporarily Unavailable",
"",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.prompt, 1.),
},
EventName.preDriverDistracted: {
ET.WARNING: Alert(
"Pay Attention",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1),
},
EventName.promptDriverDistracted: {
ET.WARNING: Alert(
"Pay Attention",
"Driver Distracted",
AlertStatus.userPrompt, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.promptDistracted, .1),
},
EventName.driverDistracted: {
ET.WARNING: Alert(
"DISENGAGE IMMEDIATELY",
"Driver Distracted",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.warningImmediate, .1),
},
EventName.preDriverUnresponsive: {
ET.WARNING: Alert(
"Touch Steering Wheel: No Face Detected",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.none, .1, alert_rate=0.75),
},
EventName.promptDriverUnresponsive: {
ET.WARNING: Alert(
"Touch Steering Wheel",
"Driver Unresponsive",
AlertStatus.userPrompt, AlertSize.mid,
Priority.MID, VisualAlert.steerRequired, AudibleAlert.promptDistracted, .1),
},
EventName.driverUnresponsive: {
ET.WARNING: Alert(
"DISENGAGE IMMEDIATELY",
"Driver Unresponsive",
AlertStatus.critical, AlertSize.full,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.warningImmediate, .1),
},
EventName.manualRestart: {
ET.WARNING: Alert(
"TAKE CONTROL",
"Resume Driving Manually",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .2),
},
EventName.resumeRequired: {
ET.WARNING: Alert(
"STOPPED",
"Press Resume to Go",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .2),
},
EventName.belowSteerSpeed: {
ET.WARNING: below_steer_speed_alert,
},
EventName.preLaneChangeLeft: {
ET.WARNING: Alert(
"Steer Left to Start Lane Change Once Safe",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1, alert_rate=0.75),
},
EventName.preLaneChangeRight: {
ET.WARNING: Alert(
"Steer Right to Start Lane Change Once Safe",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1, alert_rate=0.75),
},
EventName.laneChangeBlocked: {
ET.WARNING: Alert(
"Car Detected in Blindspot",
"",
AlertStatus.userPrompt, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.prompt, .1),
},
EventName.laneChange: {
ET.WARNING: Alert(
"Changing Lanes",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, .1),
},
EventName.steerSaturated: {
ET.WARNING: Alert(
"Take Control",
"Turn Exceeds Steering Limit",
AlertStatus.userPrompt, AlertSize.mid,
Priority.LOW, VisualAlert.steerRequired, AudibleAlert.promptRepeat, 1.),
},
# Thrown when the fan is driven at >50% but is not rotating
EventName.fanMalfunction: {
ET.PERMANENT: NormalPermanentAlert("Fan Malfunction", "Likely Hardware Issue"),
},
# Camera is not outputting frames at a constant framerate
EventName.cameraMalfunction: {
ET.PERMANENT: NormalPermanentAlert("Camera Malfunction", "Likely Hardware Issue"),
},
# Unused
EventName.gpsMalfunction: {
ET.PERMANENT: NormalPermanentAlert("GPS Malfunction", "Likely Hardware Issue"),
},
# When the GPS position and localizer diverge the localizer is reset to the
# current GPS position. This alert is thrown when the localizer is reset
# more often than expected.
EventName.localizerMalfunction: {
# ET.PERMANENT: NormalPermanentAlert("Sensor Malfunction", "Hardware Malfunction"),
},
# ********** events that affect controls state transitions **********
EventName.pcmEnable: {
ET.ENABLE: EngagementAlert(AudibleAlert.engage),
},
EventName.buttonEnable: {
ET.ENABLE: EngagementAlert(AudibleAlert.engage),
},
EventName.pcmDisable: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
},
EventName.buttonCancel: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
},
EventName.brakeHold: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Brake Hold Active"),
},
EventName.parkBrake: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Parking Brake Engaged"),
},
EventName.pedalPressed: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Pedal Pressed",
visual_alert=VisualAlert.brakePressed),
},
EventName.pedalPressedPreEnable: {
ET.PRE_ENABLE: Alert(
"Release Pedal to Engage",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .1, creation_delay=1.),
},
EventName.gasPressedOverride: {
ET.OVERRIDE: Alert(
"",
"",
AlertStatus.normal, AlertSize.none,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .1),
},
EventName.wrongCarMode: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: wrong_car_mode_alert,
},
EventName.wrongCruiseMode: {
ET.USER_DISABLE: EngagementAlert(AudibleAlert.disengage),
ET.NO_ENTRY: NoEntryAlert("Adaptive Cruise Disabled"),
},
EventName.steerTempUnavailable: {
ET.SOFT_DISABLE: soft_disable_alert("Steering Temporarily Unavailable"),
ET.NO_ENTRY: NoEntryAlert("Steering Temporarily Unavailable"),
},
EventName.outOfSpace: {
ET.PERMANENT: NormalPermanentAlert("Out of Storage"),
ET.NO_ENTRY: NoEntryAlert("Out of Storage"),
},
EventName.belowEngageSpeed: {
ET.NO_ENTRY: below_engage_speed_alert,
},
EventName.sensorDataInvalid: {
ET.PERMANENT: Alert(
"No Data from Device Sensors",
"Reboot your Device",
AlertStatus.normal, AlertSize.mid,
Priority.LOWER, VisualAlert.none, AudibleAlert.none, .2, creation_delay=1.),
ET.NO_ENTRY: NoEntryAlert("No Data from Device Sensors"),
},
EventName.noGps: {
ET.PERMANENT: no_gps_alert,
},
EventName.soundsUnavailable: {
ET.PERMANENT: NormalPermanentAlert("Speaker not found", "Reboot your Device"),
ET.NO_ENTRY: NoEntryAlert("Speaker not found"),
},
EventName.tooDistracted: {
ET.NO_ENTRY: NoEntryAlert("Distraction Level Too High"),
},
EventName.overheat: {
ET.PERMANENT: NormalPermanentAlert("System Overheated"),
ET.SOFT_DISABLE: soft_disable_alert("System Overheated"),
ET.NO_ENTRY: NoEntryAlert("System Overheated"),
},
EventName.wrongGear: {
ET.SOFT_DISABLE: user_soft_disable_alert("Gear not D"),
ET.NO_ENTRY: NoEntryAlert("Gear not D"),
},
# This alert is thrown when the calibration angles are outside of the acceptable range.
# For example if the device is pointed too much to the left or the right.
# Usually this can only be solved by removing the mount from the windshield completely,
# and attaching while making sure the device is pointed straight forward and is level.
# See https://comma.ai/setup for more information
EventName.calibrationInvalid: {
ET.PERMANENT: NormalPermanentAlert("Calibration Invalid", "Remount Device and Recalibrate"),
ET.SOFT_DISABLE: soft_disable_alert("Calibration Invalid: Remount Device & Recalibrate"),
ET.NO_ENTRY: NoEntryAlert("Calibration Invalid: Remount Device & Recalibrate"),
},
EventName.calibrationIncomplete: {
ET.PERMANENT: calibration_incomplete_alert,
ET.SOFT_DISABLE: soft_disable_alert("Calibration in Progress"),
ET.NO_ENTRY: NoEntryAlert("Calibration in Progress"),
},
EventName.doorOpen: {
ET.SOFT_DISABLE: user_soft_disable_alert("Door Open"),
ET.NO_ENTRY: NoEntryAlert("Door Open"),
},
EventName.seatbeltNotLatched: {
ET.SOFT_DISABLE: user_soft_disable_alert("Seatbelt Unlatched"),
ET.NO_ENTRY: NoEntryAlert("Seatbelt Unlatched"),
},
EventName.espDisabled: {
ET.SOFT_DISABLE: soft_disable_alert("ESP Off"),
ET.NO_ENTRY: NoEntryAlert("ESP Off"),
},
EventName.lowBattery: {
ET.SOFT_DISABLE: soft_disable_alert("Low Battery"),
ET.NO_ENTRY: NoEntryAlert("Low Battery"),
},
# Different openpilot services communicate between each other at a certain
# interval. If communication does not follow the regular schedule this alert
# is thrown. This can mean a service crashed, did not broadcast a message for
# ten times the regular interval, or the average interval is more than 10% too high.
EventName.commIssue: {
ET.SOFT_DISABLE: soft_disable_alert("Communication Issue between Processes"),
ET.NO_ENTRY: NoEntryAlert("Communication Issue between Processes"),
},
# Thrown when manager detects a service exited unexpectedly while driving
EventName.processNotRunning: {
ET.NO_ENTRY: NoEntryAlert("System Malfunction: Reboot Your Device"),
},
EventName.radarFault: {
ET.SOFT_DISABLE: soft_disable_alert("Radar Error: Restart the Car"),
ET.NO_ENTRY: NoEntryAlert("Radar Error: Restart the Car"),
},
# Every frame from the camera should be processed by the model. If modeld
# is not processing frames fast enough they have to be dropped. This alert is
# thrown when over 20% of frames are dropped.
EventName.modeldLagging: {
ET.SOFT_DISABLE: soft_disable_alert("Driving model lagging"),
ET.NO_ENTRY: NoEntryAlert("Driving model lagging"),
},
# Besides predicting the path, lane lines and lead car data the model also
# predicts the current velocity and rotation speed of the car. If the model is
# very uncertain about the current velocity while the car is moving, this
# usually means the model has trouble understanding the scene. This is used
# as a heuristic to warn the driver.
EventName.posenetInvalid: {
ET.SOFT_DISABLE: soft_disable_alert("Model Output Uncertain"),
ET.NO_ENTRY: NoEntryAlert("Model Output Uncertain"),
},
# When the localizer detects an acceleration of more than 40 m/s^2 (~4G) we
# alert the driver the device might have fallen from the windshield.
EventName.deviceFalling: {
ET.SOFT_DISABLE: soft_disable_alert("Device Fell Off Mount"),
ET.NO_ENTRY: NoEntryAlert("Device Fell Off Mount"),
},
EventName.lowMemory: {
ET.SOFT_DISABLE: soft_disable_alert("Low Memory: Reboot Your Device"),
ET.PERMANENT: NormalPermanentAlert("Low Memory", "Reboot your Device"),
ET.NO_ENTRY: NoEntryAlert("Low Memory: Reboot Your Device"),
},
EventName.highCpuUsage: {
#ET.SOFT_DISABLE: soft_disable_alert("System Malfunction: Reboot Your Device"),
#ET.PERMANENT: NormalPermanentAlert("System Malfunction", "Reboot your Device"),
ET.NO_ENTRY: NoEntryAlert("System Malfunction: Reboot Your Device"),
},
EventName.accFaulted: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Faulted"),
ET.PERMANENT: NormalPermanentAlert("Cruise Faulted", ""),
ET.NO_ENTRY: NoEntryAlert("Cruise Faulted"),
},
EventName.controlsMismatch: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Controls Mismatch"),
},
EventName.roadCameraError: {
ET.PERMANENT: NormalPermanentAlert("Camera Error",
duration=1.,
creation_delay=30.),
},
EventName.driverCameraError: {
ET.PERMANENT: NormalPermanentAlert("Camera Error",
duration=1.,
creation_delay=30.),
},
EventName.wideRoadCameraError: {
ET.PERMANENT: NormalPermanentAlert("Camera Error",
duration=1.,
creation_delay=30.),
},
# Sometimes the USB stack on the device can get into a bad state
# causing the connection to the panda to be lost
EventName.usbError: {
ET.SOFT_DISABLE: soft_disable_alert("USB Error: Reboot Your Device"),
ET.PERMANENT: NormalPermanentAlert("USB Error: Reboot Your Device", ""),
ET.NO_ENTRY: NoEntryAlert("USB Error: Reboot Your Device"),
},
# This alert can be thrown for the following reasons:
# - No CAN data received at all
# - CAN data is received, but some message are not received at the right frequency
# If you're not writing a new car port, this is usually cause by faulty wiring
EventName.canError: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("CAN Error: Check Connections"),
ET.PERMANENT: Alert(
"CAN Error: Check Connections",
"",
AlertStatus.normal, AlertSize.small,
Priority.LOW, VisualAlert.none, AudibleAlert.none, 1., creation_delay=1.),
ET.NO_ENTRY: NoEntryAlert("CAN Error: Check Connections"),
},
EventName.steerUnavailable: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("LKAS Fault: Restart the Car"),
ET.PERMANENT: NormalPermanentAlert("LKAS Fault: Restart the car to engage"),
ET.NO_ENTRY: NoEntryAlert("LKAS Fault: Restart the Car"),
},
EventName.brakeUnavailable: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Fault: Restart the Car"),
ET.PERMANENT: NormalPermanentAlert("Cruise Fault: Restart the car to engage"),
ET.NO_ENTRY: NoEntryAlert("Cruise Fault: Restart the Car"),
},
EventName.reverseGear: {
ET.PERMANENT: Alert(
"Reverse\nGear",
"",
AlertStatus.normal, AlertSize.full,
Priority.LOWEST, VisualAlert.none, AudibleAlert.none, .2, creation_delay=0.5),
ET.USER_DISABLE: ImmediateDisableAlert("Reverse Gear"),
ET.NO_ENTRY: NoEntryAlert("Reverse Gear"),
},
# On cars that use stock ACC the car can decide to cancel ACC for various reasons.
# When this happens we can no long control the car so the user needs to be warned immediately.
EventName.cruiseDisabled: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Cruise Is Off"),
},
# For planning the trajectory Model Predictive Control (MPC) is used. This is
# an optimization algorithm that is not guaranteed to find a feasible solution.
# If no solution is found or the solution has a very high cost this alert is thrown.
EventName.plannerError: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Planner Solution Error"),
ET.NO_ENTRY: NoEntryAlert("Planner Solution Error"),
},
# When the relay in the harness box opens the CAN bus between the LKAS camera
# and the rest of the car is separated. When messages from the LKAS camera
# are received on the car side this usually means the relay hasn't opened correctly
# and this alert is thrown.
EventName.relayMalfunction: {
ET.IMMEDIATE_DISABLE: ImmediateDisableAlert("Harness Malfunction"),
ET.PERMANENT: NormalPermanentAlert("Harness Malfunction", "Check Hardware"),
ET.NO_ENTRY: NoEntryAlert("Harness Malfunction"),
},
EventName.noTarget: {
ET.IMMEDIATE_DISABLE: Alert(
"openpilot Canceled",
"No close lead car",
AlertStatus.normal, AlertSize.mid,
Priority.HIGH, VisualAlert.none, AudibleAlert.disengage, 3.),
ET.NO_ENTRY: NoEntryAlert("No Close Lead Car"),
},
EventName.speedTooLow: {
ET.IMMEDIATE_DISABLE: Alert(
"openpilot Canceled",
"Speed too low",
AlertStatus.normal, AlertSize.mid,
Priority.HIGH, VisualAlert.none, AudibleAlert.disengage, 3.),
},
# When the car is driving faster than most cars in the training data, the model outputs can be unpredictable.
EventName.speedTooHigh: {
ET.WARNING: Alert(
"Speed Too High",
"Model uncertain at this speed",
AlertStatus.userPrompt, AlertSize.mid,
Priority.HIGH, VisualAlert.steerRequired, AudibleAlert.promptRepeat, 4.),
ET.NO_ENTRY: NoEntryAlert("Slow down to engage"),
},
EventName.lowSpeedLockout: {
ET.PERMANENT: NormalPermanentAlert("Cruise Fault: Restart the car to engage"),
ET.NO_ENTRY: NoEntryAlert("Cruise Fault: Restart the Car"),
},
EventName.lkasDisabled: {
ET.PERMANENT: NormalPermanentAlert("LKAS Disabled: Enable LKAS to engage"),
ET.NO_ENTRY: NoEntryAlert("LKAS Disabled"),
},
}
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
trac/util/autoreload.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C)2006-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
import os
import sys
import threading
import time
import traceback
_SLEEP_TIME = 1
def _reloader_thread(modification_callback, loop_callback):
"""When this function is run from the main thread, it will force other
threads to exit when any modules currently loaded change.
:param modification_callback: a function taking a single argument,
the modified file, which is called
every time a modification is
detected
:param loop_callback: a function taking no arguments, which is
called after every modification check
"""
mtimes = {}
while True:
for filename in filter(None, [getattr(module, '__file__', None)
for module in sys.modules.values()]):
while not os.path.isfile(filename): # Probably in an egg or zip file
filename = os.path.dirname(filename)
if not filename:
break
if not filename: # Couldn't map to physical file, so just ignore
continue
if filename.endswith(('.pyc', '.pyo')):
filename = filename[:-1]
if not os.path.isfile(filename):
# Compiled file for non-existant source
continue
mtime = os.stat(filename).st_mtime
if filename not in mtimes:
mtimes[filename] = mtime
continue
if mtime != mtimes[filename]:
modification_callback(filename)
sys.exit(3)
loop_callback()
time.sleep(_SLEEP_TIME)
def _restart_with_reloader():
while True:
if os.path.isfile(sys.argv[0]):
args = sys.argv if os.access(sys.argv[0], os.X_OK) \
else [sys.executable] + sys.argv
elif sys.platform == 'win32' and \
os.access(sys.argv[0] + '.exe', os.X_OK):
args = [sys.argv[0] + '.exe'] + sys.argv[1:]
else:
args = [sys.executable] + sys.argv
path = args[0]
if sys.platform == 'win32':
args = ['"%s"' % arg for arg in args]
new_environ = os.environ.copy()
new_environ['RUN_MAIN'] = 'true'
# This call reinvokes ourself and goes into the other branch of main as
# a new process.
exit_code = os.spawnve(os.P_WAIT, path, args, new_environ)
if exit_code != 3:
return exit_code
def main(func, modification_callback, *args, **kwargs):
"""Run the given function and restart any time modules are changed."""
if os.environ.get('RUN_MAIN'):
exit_code = []
def main_thread():
try:
func(*args, **kwargs)
exit_code.append(None)
except SystemExit as e:
exit_code.append(e.code)
except:
traceback.print_exception(*sys.exc_info())
exit_code.append(1)
def check_exit():
if exit_code:
sys.exit(exit_code[0])
# Lanch the actual program as a child thread
thread = threading.Thread(target=main_thread, name='Main thread')
thread.setDaemon(True)
thread.start()
try:
# Now wait for a file modification and quit
_reloader_thread(modification_callback, check_exit)
except KeyboardInterrupt:
pass
else:
# Initial invocation just waits around restarting this executable
try:
sys.exit(_restart_with_reloader())
except KeyboardInterrupt:
pass
|
[] |
[] |
[
"RUN_MAIN"
] |
[]
|
["RUN_MAIN"]
|
python
| 1 | 0 | |
qa/rpc-tests/util.py
|
# Copyright (c) 2014 The Bitcoin Core developers
# Copyright (c) 2014-2015 The Dash developers
# Copyright (c) 2015-2017 The PIVX developers
# Copyright (c) 2017 The SecureCloud developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc"))
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def sync_blocks(rpc_connections):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(1)
def sync_mempools(rpc_connections):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(1)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "ctsc.conf"), 'w') as f:
f.write("regtest=1\n");
f.write("rpcuser=rt\n");
f.write("rpcpassword=rt\n");
f.write("port="+str(p2p_port(n))+"\n");
f.write("rpcport="+str(rpc_port(n))+"\n");
return datadir
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
ctscd and ctsc-cli must be in search path.
"""
if not os.path.isdir(os.path.join("cache", "node0")):
devnull = open("/dev/null", "w+")
# Create cache directories, run ctscd:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("BITCOIND", "ctscd"), "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
subprocess.check_call([ os.getenv("BITCOINCLI", "ctsc-cli"), "-datadir="+datadir,
"-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
rpcs = []
for i in range(4):
try:
url = "http://rt:[email protected]:%d"%(rpc_port(i),)
rpcs.append(AuthServiceProxy(url))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 10 minutes apart, starting
# at 1 Jan 2014
block_time = 1388534400
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].setgenerate(True, 1)
block_time += 10*60
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in ctsc.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None):
"""
Start a ctscd and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
args = [ os.getenv("BITCOIND", "ctscd"), "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
devnull = open("/dev/null", "w+")
subprocess.check_call([ os.getenv("BITCOINCLI", "ctsc-cli"), "-datadir="+datadir] +
_rpchost_to_args(rpchost) +
["-rpcwait", "getblockcount"], stdout=devnull)
devnull.close()
url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
proxy = AuthServiceProxy(url)
proxy.url = url # store URL on proxy for info
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None):
"""
Start multiple ctscds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
return [ start_node(i, dirname, extra_args[i], rpchost) for i in range(num_nodes) ]
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using it's output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
|
[] |
[] |
[
"BITCOINCLI",
"BITCOIND"
] |
[]
|
["BITCOINCLI", "BITCOIND"]
|
python
| 2 | 0 | |
api/v1/consume_itunes.py
|
from requests import get
from urllib.parse import urlencode
from datetime import timedelta
import os
import redis
import json
import sys
def redis_connect() -> redis.client.Redis:
try:
client = redis.Redis(
host=os.getenv('HOST'),
port=6379,
db=0,
socket_timeout=5,
)
ping = client.ping()
if ping is True:
return client
except redis.AuthenticationError:
print("Authentication Error")
sys.exit(1)
client = redis_connect()
def get_response(query=None, media='all', limit=50):
"""
Get response given the query and media type requested by user.
"""
if query is None:
return False, []
request_data = {
'url': 'https://itunes.apple.com/search?',
'payload': {
'term': query,
'media': media,
'limit': limit
}
}
request_str = request_data['url'] +\
urlencode(request_data['payload'])
response = get(request_str)
if not response.ok:
return None
data = response.json()
if len(data['results']) == 0:
return None
return data
# def playlist_cache(username):
# val = client.get(username)
# if val is None:
# return None
# return val
# def send_user_cache(username, resource_id):
# playlist = []
# playlist.append(resource_id)
# state = client.setex(username, timedelta(seconds=86400), playlist,)
# if state is None:
# return None
# return state
# def check_cache(query):
# data = get_data_cache(query)
# if data is not None:
# data = json.loads(data)
# data["cache"] = True
# return data
# else:
# data = get_response(query)
# if data is None:
# return None
# else:
# data["cache"] = False
# data = json.dumps(data)
# state = send_data_cache(query, data)
# if state is True:
# return json.loads(data)
# return data
def get_data_cache(query):
val = client.get(query)
if val is None:
return None
return val
def send_data_cache(query, data):
state = client.setex(query, timedelta(seconds=86400), data,)
if state is None:
return None
return state
def check_cache(query):
data = get_data_cache(query)
if data is not None:
data = json.loads(data)
data["cache"] = True
return data
else:
data = get_response(query)
if data is None:
return None
else:
data["cache"] = False
data = json.dumps(data)
state = send_data_cache(query, data)
if state is True:
return json.loads(data)
return data
|
[] |
[] |
[
"HOST"
] |
[]
|
["HOST"]
|
python
| 1 | 0 | |
Magefile.go
|
//+build mage
package main
import (
"fmt"
"os"
//mage:import sdk
_ "github.com/grafana/grafana-plugin-sdk-go/build"
"github.com/magefile/mage/mg"
"github.com/magefile/mage/sh"
)
var (
useDocker bool = os.Getenv("DOCKER") != "0"
docker []string = []string{"docker-compose", "-f", "docker/docker-compose.yml", "exec", "builder"}
)
func run(cmd ...string) error {
if useDocker {
cmd = append(docker, cmd...)
}
if err := sh.RunV(cmd[0], cmd[1:]...); err != nil {
return err
}
return nil
}
type Env mg.Namespace
// Mage Compiles mage in order to avoid Mage dependency on the host.
func (Env) UpdateMage() error {
if err := sh.Run("mage", "-compile", "./mage"); err != nil {
return err
}
return nil
}
// Start starts the development environment
func (Env) Start() error {
if err := sh.RunV("docker-compose", "-f", "docker/docker-compose.yml", "up", "-d"); err != nil {
return err
}
fmt.Printf("\nGrafana: http://localhost:3000\nDruid: http://localhost:8888\n")
return nil
}
// Stop stop the development environment
func (Env) Stop() error {
if err := sh.RunV("docker-compose", "-f", "docker/docker-compose.yml", "down", "-v"); err != nil {
return err
}
return nil
}
// Restart stop & start the development environment
func (Env) Restart() {
e := Env{}
e.Stop()
e.Start()
}
type Frontend mg.Namespace
// Build builds the frontend plugin
func (Frontend) Build() error {
err := run("yarn", "install")
if err == nil {
err = run("npx", "@grafana/toolkit", "plugin:build")
}
return err
}
// Test runs frontend tests
func (Frontend) Test() error {
return run("npx", "@grafana/toolkit", "plugin:test")
}
// Dev frontend dev
func (Frontend) Dev() error {
return run("npx", "@grafana/toolkit", "plugin:dev")
}
// Watch frontend dev watch
func (Frontend) Watch() error {
return run("npx", "@grafana/toolkit", "plugin:dev", "--watch")
}
type Backend mg.Namespace
// Build build a production build for all platforms.
func (Backend) Build() {
run("mage", "sdk:build:backend")
}
// Linux builds the back-end plugin for Linux.
func (Backend) Linux() {
run("mage", "sdk:build:linux")
}
// Darwin builds the back-end plugin for OSX.
func (Backend) Darwin() {
run("mage", "sdk:build:darwin")
}
// Windows builds the back-end plugin for Windows.
func (Backend) Windows() {
run("mage", "sdk:build:windows")
}
// Debug builds the debug version for the current platform.
func (Backend) Debug() {
run("mage", "sdk:build:debug")
}
// BuildAll builds production back-end components.
func (Backend) BuildAll() {
run("mage", "sdk:buildAll")
}
// Clean cleans build artifacts, by deleting the dist directory.
func (Backend) Clean() {
run("mage", "sdk:clean")
}
//Coverage runs backend tests and makes a coverage report.
func (Backend) Coverage() {
run("mage", "sdk:coverage")
}
// Format formats the sources.
func (Backend) Format() {
run("mage", "sdk:format")
}
// Lint audits the source style.
func (Backend) Lint() {
run("mage", "sdk:lint")
}
// ReloadPlugin kills any running instances and waits for grafana to reload the plugin.
func (Backend) ReloadPlugin() error {
if err := sh.RunV("docker-compose", "-f", "docker/docker-compose.yml", "restart", "grafana"); err != nil {
return err
}
return nil
}
//Test runs backend tests.
func (Backend) Test() {
run("mage", "sdk:test")
}
//BuildAll builds the plugin, frontend and backend.
func BuildAll() {
b := Backend{}
f := Frontend{}
mg.Deps(b.BuildAll, f.Build)
}
// Default configures the default target.
var Default = BuildAll
|
[
"\"DOCKER\""
] |
[] |
[
"DOCKER"
] |
[]
|
["DOCKER"]
|
go
| 1 | 0 | |
dafne/data/datasets/icdar15.py
|
from detectron2.data.datasets import register_coco_instances
from dafne.utils.sort_corners import sort_quadrilateral
from detectron2.utils.colormap import colormap
from detectron2.data.datasets.coco import load_coco_json
from detectron2.data import (
DatasetCatalog,
MetadataCatalog,
DatasetMapper,
transforms as T,
)
import cv2
import xml.etree.ElementTree as ET
from detectron2.structures import BoxMode, PolygonMasks, RotatedBoxes
from detectron2.data import detection_utils as utils
import copy
import torch
import contextlib
import datetime
import io
import json
import logging
import numpy as np
import os
import pycocotools.mask as mask_util
from fvcore.common.file_io import PathManager, file_lock
from fvcore.common.timer import Timer
from PIL import Image
import matplotlib.pyplot as plt
import seaborn as sns
import os
logger = logging.getLogger(__name__)
def load_annotation(root_dir, img_id, imageset):
if imageset == "val":
imageset = "train"
filename = os.path.join(root_dir, "Annotations", imageset, "gt_img_" + img_id + ".txt")
boxes, gt_classes = [], []
with open(filename, "r", encoding="utf-8-sig") as f:
content = f.read()
objects = content.split("\n")
for obj in objects:
if len(obj) != 0:
box = obj.split(",")[0:8]
label = 0
box = [eval(x) for x in box]
boxes.append(box)
gt_classes.append(label)
return {"boxes": np.array(boxes, dtype=np.int32), "gt_classes": np.array(gt_classes)}
def xywha2xy4(xywha): # a represents the angle(degree), clockwise, a=0 along the X axis
x, y, w, h, a = xywha
corner = np.array([[-w / 2, -h / 2], [w / 2, -h / 2], [w / 2, h / 2], [-w / 2, h / 2]])
# a = np.deg2rad(a)
transform = np.array([[np.cos(a), -np.sin(a)], [np.sin(a), np.cos(a)]])
return transform.dot(corner.T).T + [x, y]
def norm_angle(angle, range=[-np.pi / 4, np.pi]):
return (angle - range[0]) % range[1] + range[0]
NAMES = ["text"]
label2name = dict((label, name) for label, name in enumerate(NAMES))
name2label = dict((name, label) for label, name in enumerate(NAMES))
def parse_annotation(img_id: str, root: str, image_set: str):
anno = load_annotation(root_dir=root, img_id=img_id, imageset=image_set)
# Construct image and annotation path
if image_set == "val":
image_set = "train" # val images are in the train folder
img_path = os.path.join(root, "images", image_set, f"img_{img_id}.jpg")
# Create new data record for each image
record = {}
record["file_name"] = img_path
record["image_id"] = img_id # Strip starting letter "P"
img = cv2.imread(img_path)
record["width"] = img.shape[1]
record["height"] = img.shape[0]
# Collect annotations
objs = []
num_objects = anno["boxes"].shape[0]
for i in range(num_objects):
obj = {}
obbox = anno["boxes"][i]
label = 0
bbox = np.array(obbox).reshape(1, -1)
xmin, xmax = bbox[:, 0::2].min(), bbox[:, 0::2].max()
ymin, ymax = bbox[:, 1::2].min(), bbox[:, 1::2].max()
w = np.abs(xmax - xmin)
h = np.abs(ymax - ymin)
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16))
is_valid_box = (w > 2) & (h > 2) & (ar < 30)
if not is_valid_box:
continue
area = w * h
bbox = np.array([xmin, ymin, xmax, ymax])
obj["segmentation"] = obbox.reshape(1, -1).tolist()
obj["category_id"] = label
obj["bbox"] = bbox
obj["bbox_mode"] = BoxMode.XYXY_ABS
obj["difficult"] = 0
obj["area"] = area
objs.append(obj)
record["annotations"] = objs
return record
def load_icdar15(root, image_set, cfg):
image_sets = [image_set] if isinstance(image_set, str) else image_set
dataset_dicts = []
for image_set in image_sets:
# Read lines in image set file
with open(os.path.join(root, "ImageSets", f"{image_set}.txt")) as f:
lines = f.read().splitlines()
if cfg.DEBUG.OVERFIT_NUM_IMAGES > 0:
# Select the first N images
lines = lines[: cfg.DEBUG.OVERFIT_NUM_IMAGES]
for img_id in lines:
img_id = img_id.replace("gt_img_", "")
record = parse_annotation(img_id, root, image_set)
dataset_dicts.append(record)
return dataset_dicts
def register_icdar15_instances(name, split, metadata, image_root, cfg):
"""
Register a dataset in COCO's json annotation format for
instance detection, instance segmentation and keypoint detection.
(i.e., Type 1 and 2 in http://cocodataset.org/#format-data.
`instances*.json` and `person_keypoints*.json` in the dataset).
This is an example of how to register a new dataset.
You can do something similar to this function, to register new datasets.
Args:
name (str): the name that identifies a dataset, e.g. "coco_2014_train".
metadata (dict): extra metadata associated with this dataset. You can
leave it as an empty dict.
image_root (str or path-like): directory which contains all the images.
"""
assert isinstance(name, str), name
assert isinstance(image_root, (str, os.PathLike)), image_root
DatasetCatalog.register(
name,
lambda: load_icdar15(
root=metadata["root_dir"],
image_set=split,
cfg=cfg,
),
)
# 2. Optionally, add metadata about this dataset,
# since they might be useful in evaluation, visualization or logging
MetadataCatalog.get(name).set(image_root=image_root, evaluator_type="icdar15", **metadata)
def _make_datasets_dict():
datasets_dict = {}
# Construct datasets dict from currently available datasets
for split in ["train", "test", "val"]:
name = f"icdar15_{split}"
img_dir = "images/train" if split in ["train", "val"] else "images/test"
datasets_dict[name] = {
"root_dir": "icdar-2015",
"img_dir": img_dir,
"ann_file": f"ImageSets/{split}.txt",
"split": split,
"is_test": "test" in name,
}
return datasets_dict
def register_icdar15(cfg):
"""Setup method to register the icdar15 dataset."""
datasets_dict = _make_datasets_dict()
# Get the data directory
data_dir = os.environ["DAFNE_DATA_DIR"]
colors = colormap(rgb=True, maximum=255)
for dataset_name, d in datasets_dict.items():
def reg(name):
register_icdar15_instances(
name=name,
metadata={
"is_test": d["is_test"],
"root_dir": os.path.join(data_dir, d["root_dir"]),
"thing_colors": colors,
},
image_root=os.path.join(data_dir, d["root_dir"], d["img_dir"]),
split=d["split"],
cfg=cfg,
)
# Register normal version
reg(dataset_name)
|
[] |
[] |
[
"DAFNE_DATA_DIR"
] |
[]
|
["DAFNE_DATA_DIR"]
|
python
| 1 | 0 | |
liualgotrader/trading/gemini.py
|
import asyncio
import base64
import hashlib
import hmac
import json
import os
import queue
import ssl
import time
import traceback
from datetime import date, datetime, timedelta
from threading import Thread
from typing import Dict, List, Optional, Tuple
import pandas as pd
import requests
import websocket
from pytz import timezone
from liualgotrader.common import config
from liualgotrader.common.assets import get_asset_min_qty, round_asset
from liualgotrader.common.tlog import tlog
from liualgotrader.common.types import Order, QueueMapper, ThreadFlags, Trade
from liualgotrader.trading.base import Trader
utctz = timezone("UTC")
class GeminiTrader(Trader):
gemini_api_key: Optional[str] = os.getenv("GEMINI_API_KEY")
gemini_api_secret: Optional[str] = os.getenv("GEMINI_API_SECRET")
base_url = "https://api.sandbox.gemini.com"
base_websocket = "wss://api.sandbox.gemini.com"
last_nonce = None
def __init__(self, qm: QueueMapper = None):
self.running_task: Optional[Thread] = None
self.hb_task: Optional[Thread] = None
self.send_hb = True
self.ws = None
self.flags: Optional[ThreadFlags] = None
super().__init__(qm)
@classmethod
def _generate_request_headers(cls, payload: Dict) -> Dict:
if not cls.gemini_api_secret or not cls.gemini_api_key:
raise AssertionError(
"both env variables GEMINI_API_KEY and GEMINI_API_SECRET must be set up"
)
t = datetime.now()
payload_nonce = int(time.mktime(t.timetuple()) * 1000)
if cls.last_nonce and cls.last_nonce == payload_nonce:
payload_nonce += 1
cls.last_nonce = payload_nonce
payload["nonce"] = str(payload_nonce)
encoded_payload = json.dumps(payload).encode()
b64 = base64.b64encode(encoded_payload)
signature = hmac.new(
cls.gemini_api_secret.encode(), b64, hashlib.sha384
).hexdigest()
return {
"Content-Type": "text/plain",
"Content-Length": "0",
"X-GEMINI-APIKEY": cls.gemini_api_key,
"X-GEMINI-PAYLOAD": b64,
"X-GEMINI-SIGNATURE": signature,
"Cache-Control": "no-cache",
}
def _generate_ws_headers(self, payload: Dict) -> Dict:
if not self.gemini_api_secret or not self.gemini_api_key:
raise AssertionError(
"both env variables GEMINI_API_KEY and GEMINI_API_SECRET must be set up"
)
t = datetime.now()
payload_nonce = str(int(time.mktime(t.timetuple()) * 1000))
payload["nonce"] = payload_nonce
encoded_payload = json.dumps(payload).encode()
b64 = base64.b64encode(encoded_payload)
signature = hmac.new(
self.gemini_api_secret.encode(), b64, hashlib.sha384
).hexdigest()
return {
"X-GEMINI-APIKEY": self.gemini_api_key,
"X-GEMINI-PAYLOAD": b64.decode(),
"X-GEMINI-SIGNATURE": signature,
}
@classmethod
def _get_order_event_type(cls, order_data: Dict) -> Order.EventType:
return (
Order.EventType.canceled
if order_data["is_cancelled"] == True
else Order.EventType.fill
if order_data["remaining_amount"] == "0"
else Order.EventType.partial_fill
)
@classmethod
def _get_trade_event_type(cls, trade_data: Dict) -> Order.EventType:
return (
Order.EventType.canceled
if trade_data["type"] == "cancelled"
else Order.EventType.rejected
if trade_data["type"] == "rejected"
else Order.EventType.canceled
if trade_data["type"] == "cancel_rejected"
else Order.EventType.fill
if trade_data["remaining_amount"] == "0"
else Order.EventType.partial_fill
)
@classmethod
def _get_order_side(cls, order_data: Dict) -> Order.FillSide:
return (
Order.FillSide.buy
if order_data["side"] == "buy"
else Order.FillSide.sell
)
@classmethod
def _order_from_dict(cls, order_data: Dict) -> Order:
trades = order_data.get("trades", [])
trade_fees: float = 0.0 + sum(float(t["fee_amount"]) for t in trades)
return Order(
order_id=order_data["order_id"],
symbol=order_data["symbol"].lower(),
filled_qty=float(order_data["executed_amount"]),
event=cls._get_order_event_type(order_data),
price=float(order_data["price"]),
side=cls._get_order_side(order_data),
submitted_at=pd.Timestamp(
ts_input=order_data["timestampms"], unit="ms", tz="UTC"
),
avg_execution_price=float(order_data["avg_execution_price"]),
remaining_amount=float(order_data["remaining_amount"]),
trade_fees=trade_fees,
)
@classmethod
def _trade_from_dict(cls, trade_dict: Dict) -> Trade:
tlog(f"GEMINI GOING TO SEND {trade_dict}")
return Trade(
order_id=trade_dict["order_id"],
symbol=trade_dict["symbol"].lower(),
event=cls._get_trade_event_type(trade_dict),
filled_qty=float(trade_dict["fill"]["amount"])
if "fill" in trade_dict
else 0.0,
trade_fee=float(
trade_dict["fill"]["fee"] if "fill" in trade_dict else 0.0
)
if "fill" in trade_dict
else 0.0,
filled_avg_price=float(trade_dict["avg_execution_price"] or 0.0),
liquidity=trade_dict["fill"]["liquidity"]
if "fill" in trade_dict
else "",
updated_at=pd.Timestamp(
ts_input=trade_dict["timestampms"], unit="ms", tz="UTC"
),
side=Order.FillSide[trade_dict["side"]],
)
async def is_fractionable(self, symbol: str) -> bool:
return True
def check_error(self, result: Dict):
if result.get("result") == "error":
raise AssertionError(
f"[EXCEPTION] {result['reason']}:{result['message']}"
)
async def is_order_completed(
self, order_id: str, external_order_id: Optional[str] = None
) -> Tuple[Order.EventType, float, float, float]:
order = await self.get_order(order_id)
return (
order.event,
order.avg_execution_price,
order.filled_qty,
order.trade_fees,
)
def get_market_schedule(
self,
) -> Tuple[Optional[datetime], Optional[datetime]]:
return (
datetime.today().replace(
hour=0, minute=0, second=0, microsecond=0, tzinfo=utctz
),
datetime.today().replace(
hour=23, minute=59, second=59, microsecond=0, tzinfo=utctz
),
)
def get_trading_days(
self, start_date: date, end_date: date = date.today()
) -> pd.DataFrame:
return pd.DataFrame(
index=pd.date_range(start=start_date, end=end_date)
)
def get_position(self, symbol: str) -> float:
symbol = symbol.lower()
endpoint = "/v1/balances"
url = self.base_url + endpoint
payload = {
"request": endpoint,
}
headers = self._generate_request_headers(payload)
response = requests.post(url, data=None, headers=headers)
if response.status_code == 200:
for b in response.json():
if b["currency"] == symbol:
return float(b["amount"])
return 0.0
raise AssertionError(
f"HTTP ERROR {response.status_code} {response.text}"
)
async def get_order(
self, order_id: str, client_order_id: Optional[str] = None
) -> Order:
endpoint = "/v1/order/status"
url = self.base_url + endpoint
payload = {
"request": endpoint,
"order_id": order_id,
"include_trades": True,
}
headers = self._generate_request_headers(payload)
response = requests.post(url, data=None, headers=headers)
if response.status_code == 200:
order_data = response.json()
self.check_error(order_data)
return self._order_from_dict(order_data)
raise AssertionError(
f"HTTP ERROR {response.status_code} {response.text}"
)
def is_market_open_today(self) -> bool:
return True
def get_time_market_close(self) -> Optional[timedelta]:
return datetime.today().replace(
hour=23, minute=59, second=59, microsecond=0, tzinfo=utctz
) - datetime.now().replace(tzinfo=utctz)
async def reconnect(self):
await self.close()
await self.run()
@classmethod
def heartbeat(cls, flags: ThreadFlags):
tlog("GEMINI HEARTBEAT thread starting")
while flags.run:
tlog("GEMINI HEARTBEAT")
endpoint = "/v1/heartbeat"
url = cls.base_url + endpoint
payload = {
"request": endpoint,
}
headers = cls._generate_request_headers(payload)
response = requests.post(url, data=None, headers=headers)
if response.status_code != 200:
raise AssertionError(
f"HEARTHBEAT HTTP ERROR {response.status_code} {response.text}"
)
time.sleep(20)
tlog("GEMINI HEARTBEAT thread terminated")
@classmethod
def on_message(cls, ws, msgs):
msgs = json.loads(msgs)
if type(msgs) != list:
return
for msg in msgs:
if msg["type"] in [
"fill",
"cancel_rejected",
"cancelled",
"rejected",
]:
trade = cls._trade_from_dict(msg)
tlog(f"GEMINI TRADING UPDATE:{trade}")
to_send = {
"EV": "trade_update",
"symbol": trade.symbol.lower(),
"trade": trade.__dict__,
}
try:
qs = cls.get_instance().queues
if qs:
for q in qs.get_allqueues():
q.put(to_send, timeout=1)
except queue.Full as f:
tlog(
f"[EXCEPTION] process_message(): queue for {symbol} is FULL:{f}, sleeping for 2 seconds and re-trying."
)
raise
@classmethod
def on_error(cls, ws, error):
tlog(f"[ERROR] GeminiTrader {error}")
@classmethod
def on_close(cls, ws, close_status_code, close_msg):
tlog(f"on_close(): status={close_status_code}, close_msg={close_msg}")
async def run(self):
if not self.running_task:
tlog("starting Gemini listener")
endpoint = "/v1/order/events"
payload = {"request": endpoint}
headers = self._generate_ws_headers(payload)
self.ws = websocket.WebSocketApp(
f"{self.base_websocket}{endpoint}?eventTypeFilter=cancel_rejected&eventTypeFilter=cancelled&eventTypeFilter=rejected&eventTypeFilter=fill&eventTypeFilter=closed&heartbeat=true",
on_message=self.on_message,
on_error=self.on_error,
on_close=self.on_close,
header=headers,
)
self.running_task = Thread(
target=self.ws.run_forever,
args=(None, {"cert_reqs": ssl.CERT_NONE}),
)
self.flags = ThreadFlags(run=True)
self.hb_task = Thread(target=self.heartbeat, args=(self.flags,))
self.running_task.start()
self.hb_task.start()
return self.running_task
async def close(self):
if self.running_task and self.running_task.is_alive():
tlog(f"close task {self.running_task}")
self.ws.keep_running = False
self.flags.run = False
self.running_task.join()
self.hb_task.join()
tlog("task terminated")
self.ws = None
self.running_task = None
self.hb_task = None
self.flags = None
async def get_tradeable_symbols(self) -> List[str]:
endpoint = "/v1/symbols"
url = self.base_url + endpoint
response = requests.get(url)
if response.status_code == 200:
return response.json()
raise AssertionError(
f"HTTP ERROR {response.status_code} {response.text}"
)
async def get_shortable_symbols(self) -> List[str]:
return []
async def is_shortable(self, symbol) -> bool:
return False
async def cancel_order(self, order: Order) -> bool:
endpoint = "/v1/order/cancel"
url = self.base_url + endpoint
payload = {"request": endpoint, "order_id": order.order_id}
headers = self._generate_request_headers(payload)
response = requests.post(url, data=None, headers=headers)
if response.status_code == 200:
order_status = response.json()
self.check_error(order_status)
return order_status
raise AssertionError(
f"HTTP ERROR {response.status_code} {response.text}"
)
async def submit_order(
self,
symbol: str,
qty: float,
side: str,
order_type: str,
time_in_force: str = None,
limit_price: str = None,
stop_price: str = None,
client_order_id: str = None,
extended_hours: bool = None,
order_class: str = None,
take_profit: dict = None,
stop_loss: dict = None,
trail_price: str = None,
trail_percent: str = None,
on_behalf_of: str = None,
) -> Order:
symbol = symbol.lower()
if order_type == "market":
raise AssertionError(
"GEMINI does not support market orders, use limit orders"
)
if float(qty) < get_asset_min_qty(symbol):
raise AssertionError(
f"GEMINI requested quantity of {qty} is below minimum for {symbol}"
)
endpoint = "/v1/order/new"
url = self.base_url + endpoint
qty = round_asset(symbol, float(qty))
payload = {
"request": endpoint,
"symbol": symbol,
"amount": str(qty),
"price": str(limit_price)
if order_type == "limit"
else str(60000.0 * qty),
"side": side,
"type": "exchange limit",
"client_order_id": client_order_id,
"options": ["immediate-or-cancel"]
if order_type == "market"
else [],
}
headers = self._generate_request_headers(payload)
response = requests.post(url, data=None, headers=headers)
if response.status_code == 200:
new_order = response.json()
self.check_error(new_order)
return self._order_from_dict(new_order)
if self.flags:
self.flags.run = False
await self.close()
raise AssertionError(
f"HTTP ERROR {response.status_code} {response.text}"
)
|
[] |
[] |
[
"GEMINI_API_SECRET",
"GEMINI_API_KEY"
] |
[]
|
["GEMINI_API_SECRET", "GEMINI_API_KEY"]
|
python
| 2 | 0 | |
utils/wx.go
|
package utils
import (
"bytes"
"context"
"crypto/aes"
"crypto/cipher"
"crypto/sha1"
"encoding/base64"
"encoding/binary"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"math/rand"
"os"
"sort"
"strings"
req "github.com/imroc/req"
be "github.com/pickjunk/brick/error"
)
// WxAPI struct
type WxAPI struct {
Method string
URI string
Query req.QueryParam
Body map[string]interface{}
Headers req.Header
}
var wxURL = "https://api.weixin.qq.com"
// Fetch execute a wx api
func (w *WxAPI) Fetch(ctx context.Context, result interface{}) error {
if os.Getenv("ENV") != "production" {
req.Debug = true
}
defer func() {
req.Debug = false
}()
var res *req.Resp
var err error
switch w.Method {
case "POST":
res, err = req.Post(wxURL+w.URI, w.Headers, w.Query, req.BodyJSON(w.Body), ctx)
default:
res, err = req.Get(wxURL+w.URI, w.Headers, w.Query, ctx)
}
if err != nil {
return err
}
code := res.Response().StatusCode
if !(code >= 200 && code < 300) {
return fmt.Errorf("http status error: %d", code)
}
var e struct {
Errcode int64
Errmsg string
}
err = res.ToJSON(&e)
if err != nil {
return err
}
if e.Errcode > 0 {
var bErr be.BusinessError
err = json.Unmarshal([]byte(e.Errmsg), &bErr)
if err != nil {
return errors.New(e.Errmsg)
}
return &bErr
}
if result != nil {
err = res.ToJSON(result)
if err != nil {
return err
}
}
return nil
}
const letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
const digits = "0123456789"
const lettersAndDigits = letters + digits
func randStr(n int) []byte {
b := make([]byte, n)
for i := range b {
b[i] = lettersAndDigits[rand.Intn(len(lettersAndDigits))]
}
return b
}
// pkcs7Pad 补位
// thanks to https://studygolang.com/articles/4752
func pkcs7Pad(data []byte, blockSize int) []byte {
count := blockSize - len(data)%blockSize
padding := bytes.Repeat([]byte{byte(count)}, count)
return append(data, padding...)
}
// pkcs7Unpad 取消补位
// thanks to https://studygolang.com/articles/4752
func pkcs7Unpad(data []byte, blockSize int) []byte {
dataLen := len(data)
unpadding := int(data[len(data)-1])
return data[:(dataLen - unpadding)]
}
// WxEncrypt 第三方平台消息加密
func WxEncrypt(data []byte, platformKey, platformAppid string) ([]byte, error) {
// base64 decode 密钥
key := platformKey + "="
ekey, _ := base64.StdEncoding.DecodeString(key)
// 把data的长度转化为网络字节序(大端)
dataLen := make([]byte, 4)
binary.BigEndian.PutUint32(dataLen, uint32(len(data)))
// 拼装
var pack bytes.Buffer
pack.Write(randStr(16))
pack.Write(dataLen)
pack.Write(data)
pack.Write([]byte(platformAppid))
// 加密前,补位
target := pkcs7Pad(pack.Bytes(), 32)
// AES CBC 加密
block, err := aes.NewCipher(ekey)
if err != nil {
return nil, err
}
// ekey[:16] 这里为iv,类似于session key
// 这里很可能是微信算法不规范
// 一般来说iv应该是随机且要放在加密体里
cbc := cipher.NewCBCEncrypter(block, ekey[:16])
cipherData := make([]byte, len(target))
cbc.CryptBlocks(cipherData, target)
return cipherData, nil
}
// WxDecrypt 第三方平台消息解密
func WxDecrypt(data []byte, platformKey string) ([]byte, error) {
// base64 decode 密钥
key := platformKey + "="
ekey, _ := base64.StdEncoding.DecodeString(key)
// AES CBC 解密
block, err := aes.NewCipher(ekey)
if err != nil {
return nil, err
}
// ekey[:16] 这里为iv,类似于session key
// 这里很可能是微信算法不规范
// 一般来说iv应该是随机且要放在加密体里
cbc := cipher.NewCBCDecrypter(block, ekey[:16])
target := make([]byte, len(data))
cbc.CryptBlocks(target, data)
// 解密后,取消补位
target = pkcs7Unpad(target, 32)
// 提取data
dataLen := binary.BigEndian.Uint32(target[16:20])
return target[20 : 20+dataLen], nil
}
// WxSign 第三方平台签名
func WxSign(timestamp, nonce, encrypt, token string) string {
s := []string{token, timestamp, nonce, encrypt}
sort.Strings(s)
str := strings.Join(s, "")
hash := sha1.Sum([]byte(str))
return hex.EncodeToString(hash[:])
}
// WxDecryptUserInfo 小程序UserInfo解密
func WxDecryptUserInfo(data string, key string, iv string) (string, error) {
edata, err := base64.StdEncoding.DecodeString(data)
if err != nil {
return "", err
}
ekey, err := base64.StdEncoding.DecodeString(key)
if err != nil {
return "", err
}
eiv, err := base64.StdEncoding.DecodeString(iv)
if err != nil {
return "", err
}
// AES CBC 解密
block, err := aes.NewCipher(ekey)
if err != nil {
return "", err
}
cbc := cipher.NewCBCDecrypter(block, eiv)
target := make([]byte, len(edata))
cbc.CryptBlocks(target, edata)
// 解密后,取消补位
target = pkcs7Unpad(target, 32)
return string(target), nil
}
|
[
"\"ENV\""
] |
[] |
[
"ENV"
] |
[]
|
["ENV"]
|
go
| 1 | 0 | |
src/backend/minioServer.go
|
// MIT License
//
// Copyright (c) 2020 CADCloud
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package main
import (
"base"
"bytes"
"encoding/json"
"encoding/xml"
"fmt"
"io/ioutil"
"log"
"net"
"net/http"
"os"
"os/exec"
"strconv"
"strings"
"sync"
"time"
)
type minIOToken struct {
TokenAuth string
TokenSecret string
}
type minIOEntry struct {
Username string
Ports string
}
var file sync.RWMutex
// This variable contains the storage path name usable for minIO
var minIORoot = os.Getenv("MINIO_ROOT")
func deleteUserContent(username string, r *http.Request) {
// That stuff need to get the bucket list of a user
// Then it needs for each bucket to delete each object
// and remove everybucket
// I first need to get the user credential
// As well as it's miniIO server IP / Port
// get the BucketList
// get for each bucket the content
// WARNING: Could do it just by removing the home directory of the user within the minioStorage
// We need to do that after we stopped the daemon
var updatedData *base.User
address := strings.Split(r.Host, ":")
data := base.HTTPGetRequest("http://" + address[0] + ":9100" + "/user/" + username + "/userGetInternalInfo")
updatedData = new(base.User)
_ = json.Unmarshal([]byte(data), updatedData)
// We need to be sure that the minIO server is up before
conn, err := net.Dial("tcp", updatedData.Server+":"+updatedData.Ports)
for err != nil {
time.Sleep(100 * time.Millisecond)
conn, err = net.Dial("tcp", updatedData.Server+":"+updatedData.Ports)
}
conn.Close()
response, err := base.Request("GET", "http://"+updatedData.Server+":"+updatedData.Ports+"/", "", "application/xml", nil, "", updatedData.TokenAuth, updatedData.TokenSecret)
if err != nil {
log.Fatal(err)
} else {
defer response.Body.Close()
contents, err := ioutil.ReadAll(response.Body)
if err != nil {
log.Fatal(err)
}
// Got an XML content with the various Buckets created by the end user
type Bucket struct {
Name string `xml:"Name"`
}
type Content struct {
XMLName xml.Name `xml:"ListAllMyBucketsResult"`
Buckets []Bucket `xml:"Buckets>Bucket"`
}
XMLcontents := Content{}
in := bytes.NewReader([]byte(contents))
_ = xml.NewDecoder(in).Decode(&XMLcontents)
// Ok Got the bucket list from the user
for i := 0; i < len(XMLcontents.Buckets); i++ {
response, err := base.Request("GET", "http://"+updatedData.Server+":"+updatedData.Ports+"/"+XMLcontents.Buckets[i].Name, "/"+XMLcontents.Buckets[i].Name, "application/xml", nil, "", updatedData.TokenAuth, updatedData.TokenSecret)
if err != nil {
log.Fatal(err)
} else {
contents, _ := ioutil.ReadAll(response.Body)
// Got the current bucket list content into contents as an XML format. Let's loop through it
// and issue the delete command
// and re-issue a get command to check if the directory is empty. As long as it is not
// We reload the directory entries and we remove the file
type object struct {
Name string `xml:"Key"`
}
type bucketContent struct {
XMLName xml.Name `xml:"ListBucketResult"`
Objects []object `xml:"Contents"`
}
XMLbucketcontents := bucketContent{}
in := bytes.NewReader([]byte(contents))
_ = xml.NewDecoder(in).Decode(&XMLbucketcontents)
for len(XMLbucketcontents.Objects) != 0 {
for j := 0; j < len(XMLbucketcontents.Objects); j++ {
// We must delete the object
_, _ = base.Request("DELETE", "http://"+updatedData.Server+":"+updatedData.Ports+"/"+XMLcontents.Buckets[i].Name+"/"+XMLbucketcontents.Objects[j].Name,
"/"+XMLcontents.Buckets[i].Name+"/"+XMLbucketcontents.Objects[j].Name, "application/xml", nil, "", updatedData.TokenAuth, updatedData.TokenSecret)
}
response, _ = base.Request("GET", "http://"+updatedData.Server+":"+updatedData.Ports+"/"+XMLcontents.Buckets[i].Name, "/"+XMLcontents.Buckets[i].Name, "application/xml", nil, "",
updatedData.TokenAuth, updatedData.TokenSecret)
contents, _ := ioutil.ReadAll(response.Body)
XMLbucketcontents = bucketContent{}
in := bytes.NewReader([]byte(contents))
_ = xml.NewDecoder(in).Decode(&XMLbucketcontents)
}
// We can now delete the Bucket
_, _ = base.Request("DELETE", "http://"+updatedData.Server+":"+updatedData.Ports+"/"+XMLcontents.Buckets[i].Name, "/"+XMLcontents.Buckets[i].Name, "application/xml", nil, "",
updatedData.TokenAuth, updatedData.TokenSecret)
}
}
var minIOURI = os.Getenv("MINIO_URI")
var minIOTCPPORT = os.Getenv("MINIO_TCPPORT")
freePort(minIOURI+minIOTCPPORT, username)
os.RemoveAll(minIORoot + "/" + username)
}
return
}
// This function is called when the User API has been started
func startMinIOServer(URI string, r *http.Request) {
var minIOArray [base.MaxMinIOServer]minIOEntry
var allocatedPort [base.MaxMinIOServer]int
for i := 0; i < base.MaxMinIOServer; i++ {
allocatedPort[i] = 0
minIOArray[i].Username = ""
minIOArray[i].Ports = ""
}
_, err := os.Stat(minIORoot + "/" + "master" + URI + ".json")
if !os.IsNotExist(err) {
// The config file exist we have to read it and find the first available Port
b, _ := ioutil.ReadFile(minIORoot + "/" + "master" + URI + ".json")
json.Unmarshal([]byte(b), &minIOArray)
// Initial check - if the username exist, we try to launch the minIO server
for i := 0; i < base.MaxMinIOServer; i++ {
if minIOArray[i].Username != "" {
// Ok the entry is configured, we must start the minIO server for that user
value, _ := strconv.Atoi(minIOArray[i].Ports)
realTCPPort := value + base.MinIOServerBasePort
s := strconv.Itoa(realTCPPort)
sCtrl := strconv.Itoa(realTCPPort + 1000)
address := strings.Split(URI, ":")
credentialsURI := os.Getenv("CREDENTIALS_URI")
credentialsPort := os.Getenv("CREDENTIALS_TCPPORT")
result := base.HTTPGetRequest("http://" + credentialsURI + credentialsPort + "/user/" + minIOArray[i].Username + "/userGetInternalInfo")
// We have to unmarshall the result
var userPtr *base.User
userPtr = new(base.User)
json.Unmarshal([]byte(result), userPtr)
os.Setenv("MINIO_ACCESS_KEY", userPtr.TokenAuth)
os.Setenv("MINIO_SECRET_KEY", userPtr.TokenSecret)
os.Setenv("MINIO_BROWSER", "off")
command := "minio server --address " + address[0] + ":" + s + " " + minIORoot + "/" + minIOArray[i].Username
commandCtrl := "minio server --address " + address[0] + ":" + sCtrl + " " + minIORoot + "ctrl/" + minIOArray[i].Username
// before starting the server we must be checking if it is soon running
// to do that we must look for the command into the process table
args := []string{"-o", "command"}
cmd := exec.Command("ps", args...)
var out bytes.Buffer
cmd.Stdout = &out
cmd.Start()
cmd.Wait()
if !strings.Contains(out.String(), command) {
// Second parameter shall be a string array
args = []string{"server", "--address"}
args = append(args, address[0]+":"+s)
args = append(args, minIORoot+"/"+minIOArray[i].Username)
cmd := exec.Command("minio", args...)
cmd.Start()
done := make(chan error, 1)
go func() {
done <- cmd.Wait()
}()
}
if !strings.Contains(out.String(), commandCtrl) {
// Second parameter shall be a string array
args = []string{"server", "--address"}
args = append(args, address[0]+":"+sCtrl)
args = append(args, minIORoot+"ctrl/"+minIOArray[i].Username)
cmd := exec.Command("minio", args...)
cmd.Start()
done := make(chan error, 1)
go func() {
done <- cmd.Wait()
}()
}
// We must check if the ctrl bucket is properly created or not
// if not we must create it
fullPath := "/ctrl/"
method := "GET"
response, err := base.Request(method, "http://"+address[0]+":"+sCtrl+fullPath, fullPath, "application/octet-stream", nil, "",
userPtr.TokenAuth, userPtr.TokenSecret)
for err != nil {
time.Sleep(1 * time.Second)
response, err = base.Request(method, "http://"+address[0]+":"+sCtrl+fullPath, fullPath, "application/octet-stream", nil, "",
userPtr.TokenAuth, userPtr.TokenSecret)
}
if err != nil {
log.Fatal(err)
} else {
// We can move forward
// We have the list
// Now is there a bucket with the "r" keyword at the end and a number and the same name than the requested one
defer response.Body.Close()
contents, err := ioutil.ReadAll(response.Body)
if err != nil {
log.Fatal(err)
}
// Got an XML content with the various Buckets created by the end user
type Content struct {
XMLName xml.Name `xml:"Error"`
Buckets string `xml:"Code"`
}
XMLcontents := Content{}
in := bytes.NewReader([]byte(contents))
_ = xml.NewDecoder(in).Decode(&XMLcontents)
// Ok Got the bucket list from the user
if XMLcontents.Buckets == "NoSuchBucket" {
fullPath := "/ctrl/"
method := "PUT"
_, _ = base.Request(method, "http://"+address[0]+":"+sCtrl+fullPath, fullPath, "application/octet-stream", nil, "", userPtr.TokenAuth, userPtr.TokenSecret)
}
}
// We must inform the cache API that the minIO server for
// the current user has been started
// we must POST the IP of the server as well as the port number
// we just have to Marshal the minIOArray[i] as the IP will be known through the
// r.host of the calling server
content, _ := json.Marshal(minIOArray[i])
var CacheURI = os.Getenv("CACHE_URI")
var CacheTCPPORT = os.Getenv("CACHE_TCPPORT")
base.HTTPPutRequest("http://"+CacheURI+CacheTCPPORT+"/user/"+minIOArray[i].Username, content, "application/json")
}
}
}
}
func freePort(URI string, username string) {
var minIOArray [base.MaxMinIOServer]minIOEntry
var allocatedPort [base.MaxMinIOServer]int
for i := 0; i < base.MaxMinIOServer; i++ {
allocatedPort[i] = 0
minIOArray[i].Username = ""
minIOArray[i].Ports = ""
}
// That stuff must be synced
file.Lock()
defer file.Unlock()
// We must open the Master configuration file
print(minIORoot + "/" + "master" + URI + ".json" + "\n")
_, err := os.Stat(minIORoot + "/" + "master" + URI + ".json")
if !os.IsNotExist(err) {
// The config file exist we have to read it and find the first available Port
b, _ := ioutil.ReadFile(minIORoot + "/" + "master" + URI + ".json")
json.Unmarshal([]byte(b), &minIOArray)
// Initial check - if the username exist, we return the existing port
for i := 0; i < base.MaxMinIOServer; i++ {
if minIOArray[i].Username == username {
minIOArray[i].Username = ""
minIOArray[i].Ports = ""
}
}
// we must Marshall the data and rewrite the file
content, _ := json.Marshal(minIOArray)
_ = ioutil.WriteFile(minIORoot+"/"+"master"+URI+".json", []byte(content), os.ModePerm)
}
}
func getNewPort(URI string, username string) string {
var minIOArray [base.MaxMinIOServer]minIOEntry
var allocatedPort [base.MaxMinIOServer]int
for i := 0; i < base.MaxMinIOServer; i++ {
allocatedPort[i] = 0
minIOArray[i].Username = ""
minIOArray[i].Ports = ""
}
// That stuff must be synced
file.Lock()
defer file.Unlock()
// We must open the Master configuration file
print(minIORoot + "/" + "master" + URI + ".json" + "\n")
_, err := os.Stat(minIORoot + "/" + "master" + URI + ".json")
if !os.IsNotExist(err) {
// The config file exist we have to read it and find the first available Port
b, _ := ioutil.ReadFile(minIORoot + "/" + "master" + URI + ".json")
json.Unmarshal([]byte(b), &minIOArray)
// Initial check - if the username exist, we return the existing port
for i := 0; i < base.MaxMinIOServer; i++ {
if minIOArray[i].Username == username {
return minIOArray[i].Ports
}
}
for i := 0; i < base.MaxMinIOServer; i++ {
if minIOArray[i].Username != "" {
value, _ := strconv.Atoi(minIOArray[i].Ports)
allocatedPort[value] = 1
}
}
// we must find an available port
// The test shouldn't be != 1 as it is initialized to 0
availablePort := -1
for i := 0; i < base.MaxMinIOServer; i++ {
if allocatedPort[i] != 1 {
availablePort = i
break
}
}
if availablePort == -1 {
// No Port available we must return an error
return "error"
}
// We found a Port
// we can create the entry into the array and save it as a JSON structure
i := 0
for minIOArray[i].Username != "" {
i++
}
minIOArray[i].Username = username
minIOArray[i].Ports = strconv.Itoa(availablePort)
// we must Marshall the data and rewrite the file
content, _ := json.Marshal(minIOArray)
_ = ioutil.WriteFile(minIORoot+"/"+"master"+URI+".json", []byte(content), os.ModePerm)
return string(strconv.Itoa(availablePort))
}
// the Port will be 9400 and we must create the entry into the configuration file
minIOArray[0].Username = username
minIOArray[0].Ports = "00"
content, _ := json.Marshal(minIOArray)
_ = ioutil.WriteFile(minIORoot+"/"+"master"+URI+".json", []byte(content), os.ModePerm)
return "0"
}
func createMinIOServer(username string, URL string, accessToken string, secretToken string) string {
// We shall have as input parameters the AccessToken and SecretToken which
// will be used to spin off the minIO server configuration
// First I must look for an available port
var entry base.MinIOServer
availablePort := getNewPort(URL, username)
if availablePort != "" {
// I got a port
// We must compute the real port
value, _ := strconv.Atoi(availablePort)
realTCPPort := value + base.MinIOServerBasePort
// I must retrieve the User AccessToken and SecretToken
// then I can spawn a minIO Server for that user
os.Setenv("MINIO_ACCESS_KEY", accessToken)
os.Setenv("MINIO_SECRET_KEY", secretToken)
os.Setenv("MINIO_BROWSER", "off")
// we must create the user directory
os.Mkdir(minIORoot+"/"+username, os.ModePerm)
// we must create the ctrl area
os.Mkdir(minIORoot+"ctrl/"+username, os.ModePerm)
s := strconv.Itoa(realTCPPort)
// This is the ctrl TCP Port
sCtrl := strconv.Itoa(realTCPPort + 1000)
address := strings.Split(URL, ":")
command := "minio server --address " + address[0] + ":" + s + " " + minIORoot + "/" + username
commandCtrl := "minio server --address " + address[0] + ":" + sCtrl + " " + minIORoot + "ctrl/" + username
// before starting the server we must be checking if it is soon running
// to do that we must look for the command into the process table
args := []string{"-o", "command"}
cmd := exec.Command("ps", args...)
var out bytes.Buffer
cmd.Stdout = &out
cmd.Start()
cmd.Wait()
// We start the User MiniIO daemon
if !strings.Contains(out.String(), command) {
// Second parameter shall be a string array
args = []string{"server", "--address"}
args = append(args, address[0]+":"+s)
args = append(args, minIORoot+"/"+username)
cmd := exec.Command("minio", args...)
cmd.Start()
done := make(chan error, 1)
go func() {
done <- cmd.Wait()
}()
}
// We start the Ctrl MiniIO daemon
if !strings.Contains(out.String(), commandCtrl) {
// Second parameter shall be a string array
args = []string{"server", "--address"}
args = append(args, address[0]+":"+sCtrl)
args = append(args, minIORoot+"ctrl/"+username)
cmd := exec.Command("minio", args...)
cmd.Start()
done := make(chan error, 1)
go func() {
done <- cmd.Wait()
}()
}
// we must create the ctrl bucket which is used to store previews etc ...
// We must be sure that the minio server is started ...
fullPath := "/ctrl/"
method := "PUT"
response, err := base.Request(method, "http://"+address[0]+":"+sCtrl+fullPath, fullPath, "application/octet-stream", nil, "", accessToken, secretToken)
for err != nil {
time.Sleep(1 * time.Second)
response, err = base.Request(method, "http://"+address[0]+":"+sCtrl+fullPath, fullPath, "application/octet-stream", nil, "", accessToken, secretToken)
}
init := 0
// We can move forward
for init != 1 {
defer response.Body.Close()
contents, err := ioutil.ReadAll(response.Body)
if err != nil {
log.Fatal(err)
}
// Got an XML content with the various Buckets created by the end user
type Content struct {
XMLName xml.Name `xml:"Error"`
Code string `xml:"Code"`
}
XMLcontents := Content{}
in := bytes.NewReader([]byte(contents))
err = xml.NewDecoder(in).Decode(&XMLcontents)
if err == nil {
if XMLcontents.Code == "XMinioServerNotInitialized" {
time.Sleep(1 * time.Second)
response, err = base.Request(method, "http://"+address[0]+":"+sCtrl+fullPath, fullPath, "application/octet-stream", nil, "", accessToken, secretToken)
}
} else {
init = 1
}
}
// We need to return some informations like
// the minIO IP address, the TCPPORT
// as to properly configure the reverse proxy and route the traffic to it
// We also need to implement the user loopback as to configure the reverse proxy
entry.Port = s
// we must split the URL as it does contain the port
entry.URI = address[0]
data, _ := json.Marshal(entry)
type minIOEntry struct {
Username string
Ports string
}
var localEntry minIOEntry
localEntry.Username = username
localEntry.Ports = availablePort
content, _ := json.Marshal(localEntry)
var CacheURI = os.Getenv("CACHE_URI")
var CacheTCPPORT = os.Getenv("CACHE_TCPPORT")
base.HTTPPutRequest("http://"+CacheURI+CacheTCPPORT+"/user/"+username, content, "application/json")
return string(data)
}
return "error"
}
func stopServer(username string, r *http.Request) {
mycontent, _ := ioutil.ReadAll(r.Body)
var myuser base.User
json.Unmarshal([]byte(mycontent), &myuser)
serverIP := myuser.Server
TCPPort := myuser.Ports
// We must check if the daemon is running
command := "minio server --address " + serverIP + ":" + TCPPort + " " + minIORoot + "/" + username
Port, _ := strconv.Atoi(TCPPort)
CtrlPort := strconv.Itoa(Port + 1000)
commandCtrl := "minio server --address " + serverIP + ":" + CtrlPort + " " + minIORoot + "ctrl/" + username
args := []string{"-o", "pid,command"}
cmd := exec.Command("ps", args...)
var out bytes.Buffer
cmd.Stdout = &out
cmd.Start()
cmd.Wait()
// Must find the PID
stringArray := strings.Split(out.String(), "\n")
for i := 0; i < len(stringArray)-1; i++ {
localCommand := strings.SplitN(strings.TrimSpace(stringArray[i]), " ", 2)
if localCommand[1] == command {
pid := localCommand[0]
// We must issue a SIGINT to that PID to stop it
args := []string{"-SIGINT", pid}
cmd = exec.Command("kill", args...)
var out bytes.Buffer
cmd.Stdout = &out
cmd.Start()
cmd.Wait()
}
if localCommand[1] == commandCtrl {
pid := localCommand[0]
// We must issue a SIGINT to that PID to stop it
args := []string{"-SIGINT", pid}
cmd = exec.Command("kill", args...)
var out bytes.Buffer
cmd.Stdout = &out
cmd.Start()
cmd.Wait()
}
}
}
func userCallback(w http.ResponseWriter, r *http.Request) {
var url = r.URL.Path
var username string
var command string
var myminioToekn minIOToken
// Is there a command ?
entries := strings.Split(url[1:], "/")
// The login is always accessible
if len(entries) > 2 {
command = entries[2]
} else {
command = ""
}
username = entries[1]
switch r.Method {
case http.MethodGet:
// Get is providing the TCPPORT of the user
// and the IP address of the minIO server attached to it
// normally this is a static allocation
// but ...
case http.MethodPut:
switch command {
case "stopServer":
stopServer(username, r)
default:
// The content is within the body
mycontent, _ := ioutil.ReadAll(r.Body)
json.Unmarshal([]byte(mycontent), &myminioToekn)
accessToken := myminioToekn.TokenAuth
secretToken := myminioToekn.TokenSecret
// request for a new entry
// the parameters are the access key and the secret key
// this is safe to get them as parameter as we are not running
// on a public network
// Update an existing record.
// WARNING the r.host shall be replaced by an ALLOCATION ALGORITHM
// to determine on which server we can allocate the storage for the user
var minIOURI = os.Getenv("MINIO_URI")
var minIOTCPPORT = os.Getenv("MINIO_TCPPORT")
userParameters := createMinIOServer(username, minIOURI+minIOTCPPORT, accessToken, secretToken)
fmt.Fprint(w, userParameters)
}
case http.MethodDelete:
deleteUserContent(username, r)
default:
}
}
func startCallback(w http.ResponseWriter, r *http.Request) {
var minIOURI = os.Getenv("MINIO_URI")
var minIOTCPPORT = os.Getenv("MINIO_TCPPORT")
startMinIOServer(minIOURI+minIOTCPPORT, r)
}
func main() {
print("================================================ \n")
print("| Starting minIO storage allocation backend |\n")
print("| (c) 2019 CADCloud |\n")
print("| Development version - |\n")
print("| Private use only |\n")
print("================================================ \n")
mux := http.NewServeMux()
var minIOURI = os.Getenv("MINIO_URI")
var minIOTCPPORT = os.Getenv("MINIO_TCPPORT")
if _, err := os.Stat(minIORoot); os.IsNotExist(err) {
os.MkdirAll(minIORoot, os.ModePerm)
}
// We have to start configured server and report there existence to the
// main caching server
mux.HandleFunc("/user/", userCallback)
mux.HandleFunc("/start/", startCallback)
log.Fatal(http.ListenAndServe(minIOURI+minIOTCPPORT, mux))
}
|
[
"\"MINIO_ROOT\"",
"\"MINIO_URI\"",
"\"MINIO_TCPPORT\"",
"\"CREDENTIALS_URI\"",
"\"CREDENTIALS_TCPPORT\"",
"\"CACHE_URI\"",
"\"CACHE_TCPPORT\"",
"\"CACHE_URI\"",
"\"CACHE_TCPPORT\"",
"\"MINIO_URI\"",
"\"MINIO_TCPPORT\"",
"\"MINIO_URI\"",
"\"MINIO_TCPPORT\"",
"\"MINIO_URI\"",
"\"MINIO_TCPPORT\""
] |
[] |
[
"CREDENTIALS_TCPPORT",
"CREDENTIALS_URI",
"MINIO_TCPPORT",
"MINIO_ROOT",
"CACHE_URI",
"CACHE_TCPPORT",
"MINIO_URI"
] |
[]
|
["CREDENTIALS_TCPPORT", "CREDENTIALS_URI", "MINIO_TCPPORT", "MINIO_ROOT", "CACHE_URI", "CACHE_TCPPORT", "MINIO_URI"]
|
go
| 7 | 0 | |
setup.py
|
#!/usr/bin/python3
# coding: utf-8
import os
import sys
from setuptools import setup
from setuptools.command.install import install
VERSION = "4.2.4"
with open("README.md", "r") as fh:
long_description = fh.read()
class VerifyVersionCommand(install):
description = "verify that the git tag matches our version"
def run(self):
tag = os.getenv("CIRCLE_TAG")
if tag != VERSION:
info = "Git tag: {0} does not match the version of this app: {1}".format(
tag, VERSION
)
sys.exit(info)
setup(
name="pycti",
version=VERSION,
description="Python API client for OpenCTI.",
long_description=long_description,
long_description_content_type="text/markdown",
author="OpenCTI",
author_email="[email protected]",
maintainer="OpenCTI",
url="https://github.com/OpenCTI-Platform/client-python",
license="Apache",
packages=["pycti", "pycti.api", "pycti.connector", "pycti.entities", "pycti.utils"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Natural Language :: English",
"Natural Language :: French",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Topic :: Security",
"Topic :: Software Development :: Libraries :: Python Modules",
],
include_package_data=True,
install_requires=[
"requests==2.25.1",
"PyYAML==5.4.1",
"python-dateutil==2.8.1",
"datefinder==0.7.1",
"stix2==2.1.0",
"pytz==2021.1",
"pika==1.2.0",
"sseclient==0.0.27",
"python-magic==0.4.18;sys.platform=='linux' or sys.platform=='darwin'",
"python-magic-bin==0.4.14;sys.platform=='win32'",
],
cmdclass={"verify": VerifyVersionCommand},
extras_require={
"dev": ["black", "wheel", "pytest", "pytest-cov", "pre-commit"],
"doc": ["autoapi", "sphinx_rtd_theme", "sphinx-autodoc-typehints"],
}, # Optional
)
|
[] |
[] |
[
"CIRCLE_TAG"
] |
[]
|
["CIRCLE_TAG"]
|
python
| 1 | 0 | |
cli/epicli.py
|
#!/usr/bin/env python3
import atexit
import sys
import argparse
import json
import os
import time
import subprocess
import platform
import socket
from cli.engine.ApplyEngine import ApplyEngine
from cli.engine.BackupEngine import BackupEngine
from cli.engine.DeleteEngine import DeleteEngine
from cli.engine.InitEngine import InitEngine
from cli.engine.PrepareEngine import PrepareEngine
from cli.engine.RecoveryEngine import RecoveryEngine
from cli.engine.UpgradeEngine import UpgradeEngine
from cli.engine.TestEngine import TestEngine
from cli.helpers.Log import Log
from cli.helpers.Config import Config
from cli.helpers.time_helpers import format_time
from cli.version import VERSION
from cli.licenses import LICENSES
from cli.helpers.query_yes_no import query_yes_no
from cli.helpers.input_query import prompt_for_password
from cli.helpers.build_io import save_to_file, get_output_path
from cli.engine.spec.SpecCommand import SpecCommand
start_time = time.time()
def main():
config = Config()
parser = argparse.ArgumentParser(
description=__doc__,
usage='''epicli <command> [<args>]''',
formatter_class=argparse.RawTextHelpFormatter)
# setup some root arguments
parser.add_argument('--version', action='version', help='Shows the CLI version', version=VERSION)
parser.add_argument('--licenses', action='version',
help='Shows the third party packages and their licenses the CLI is using.',
version=json.dumps(LICENSES, indent=4))
parser.add_argument('-l', '--log-file', dest='log_name', type=str,
help='The name of the log file written to the output directory')
parser.add_argument('--log-format', dest='log_format', type=str,
help='''Format for the logging string. Uses the default Python log formatting,
more information here: https://docs.python.org/3.7/library/logging.html''')
parser.add_argument('--log-date-format', dest='log_date_format', type=str,
help='''Format for the logging date/time. Uses the default Python strftime formatting,
more information here: https://docs.python.org/3.7/library/time.html#time.strftime''')
parser.add_argument('--log-count', dest='log_count', type=str,
help='Roleover count where each CLI run will generate a new log.')
parser.add_argument('--log-type', choices=['plain', 'json'], default='plain',
dest='log_type', action='store', help='''Type of logs that will be written to the output file.
Currently supported formats are plain text or JSON''')
parser.add_argument('--validate-certs', choices=['true', 'false'], default='true', action='store',
dest='validate_certs',
help='''[Experimental]: Disables certificate checks for certain Ansible operations
which might have issues behind proxies (https://github.com/ansible/ansible/issues/32750).
Should NOT be used in production for security reasons.''')
parser.add_argument('--auto-approve', dest='auto_approve', action="store_true",
help='Auto approve any user input queries asked by Epicli')
# set debug verbosity level.
def debug_level(x):
x = int(x)
if x < 0 or x > 4:
raise argparse.ArgumentTypeError("--debug value should be between 0 and 4")
return x
parser.add_argument('--debug', dest='debug', type=debug_level,
help='''Set this flag (0..4) to enable debug output where 0 is no
debug output and 1..4 is debug output with different verbosity levels:
Python : Anything heigher then 0 enables printing of Python stacktraces
Ansible : 1..4 map to following Ansible verbosity levels:
1: -v
2: -vv
3: -vvv
4: -vvvv
Terraform : 1..4 map to the following Terraform verbosity levels:
1: WARN
2: INFO
3: DEBUG
4: TRACE''')
# some arguments we don't want available when running from the docker image.
if not config.docker_cli:
parser.add_argument('-o', '--output', dest='output_dir', type=str,
help='Directory where the CLI should write it`s output.')
# setup subparsers
subparsers = parser.add_subparsers()
prepare_parser(subparsers)
init_parser(subparsers)
apply_parser(subparsers)
upgrade_parser(subparsers)
delete_parser(subparsers)
test_parser(subparsers)
backup_parser(subparsers)
recovery_parser(subparsers)
# check if there were any variables and display full help
if len(sys.argv) < 2:
parser.print_help()
sys.exit(1)
arguments = sys.argv[1:]
# add some arguments to the general config so we can easily use them throughout the CLI
args = parser.parse_args(arguments)
config.output_dir = getattr(args, 'output_dir', None)
config.log_file = args.log_name
config.log_format = args.log_format
config.log_date_format = args.log_date_format
config.log_type = args.log_type
config.log_count = args.log_count
config.validate_certs = True if args.validate_certs == 'true' else False
if 'offline_requirements' in args and not args.offline_requirements is None:
config.offline_requirements = args.offline_requirements
if 'wait_for_pods' in args and not args.wait_for_pods is None:
config.wait_for_pods = args.wait_for_pods
if 'upgrade_components' in args and args.upgrade_components:
config.upgrade_components = args.upgrade_components
config.debug = args.debug
config.auto_approve = args.auto_approve
try:
return args.func(args)
except Exception as e:
logger = Log('epicli')
logger.error(e, exc_info=(config.debug > 0))
dump_debug_info()
return 1
def init_parser(subparsers):
sub_parser = subparsers.add_parser('init', description='Creates configuration file in working directory.')
optional = sub_parser._action_groups.pop()
required = sub_parser.add_argument_group('required arguments')
#required
required.add_argument('-p', '--provider', dest='provider', choices=['aws', 'azure', 'any'], default='any',
type=str,
required=True, help='One of the supported providers: azure|aws|any')
required.add_argument('-n', '--name', dest='name', type=str, required=True,
help='Name of the cluster.')
#optional
sub_parser.add_argument('--full', dest='full_config', action="store_true",
help='Use this flag if you want to create verbose configuration file.')
sub_parser._action_groups.append(optional)
def run_init(args):
Config().output_dir = os.getcwd()
with InitEngine(args) as engine:
return engine.init()
sub_parser.set_defaults(func=run_init)
def prepare_parser(subparsers):
sub_parser = subparsers.add_parser('prepare', description='Creates a folder with all prerequisites to setup the offline requirements to install a cluster offline.')
optional = sub_parser._action_groups.pop()
required = sub_parser.add_argument_group('required arguments')
#required
required.add_argument('--os', type=str, required=True, dest='os', choices=['ubuntu-18.04', 'redhat-7', 'centos-7'],
help='The OS to prepare the offline requirements for: ubuntu-18.04|redhat-7|centos-7')
#optional
sub_parser._action_groups.append(optional)
def run_prepare(args):
adjust_paths_from_output_dir()
with PrepareEngine(args) as engine:
return engine.prepare()
sub_parser.set_defaults(func=run_prepare)
def apply_parser(subparsers):
sub_parser = subparsers.add_parser('apply', description='Applies configuration from file.')
optional = sub_parser._action_groups.pop()
required = sub_parser.add_argument_group('required arguments')
#required
required.add_argument('-f', '--file', dest='file', type=str, required=True,
help='File with infrastructure/configuration definitions to use.')
#optional
optional.add_argument('--no-infra', dest='no_infra', action="store_true",
help='''Skip terraform infrastructure provisioning.
Use this when you already have infrastructure available and only want to run the
Ansible role provisioning.''')
optional.add_argument('--skip-config', dest='skip_config', action="store_true",
help='''Skip Ansible role provisioning.
Use this when you need to create cloud infrastructure and apply manual changes before
you want to run the Ansible role provisioning.''')
optional.add_argument('--offline-requirements', dest='offline_requirements', type=str,
help='Path to the folder with pre-prepared offline requirements.')
optional.add_argument('--vault-password', dest='vault_password', type=str,
help='Password that will be used to encrypt build artifacts.')
optional.add_argument('--profile-ansible-tasks', dest='profile_ansible_tasks', action="store_true",
help='Enable Ansible profile_tasks plugin for timing tasks. (developer/debug option)')
optional.add_argument('--ping-retries', dest='ping_retries', type=int, required=False, action='store', default=5,
help='Number of pings after which Ansible will fail.')
sub_parser._action_groups.append(optional)
def run_apply(args):
adjust_paths_from_file(args)
ensure_vault_password_is_set(args)
with ApplyEngine(args) as engine:
return engine.apply()
sub_parser.set_defaults(func=run_apply)
def delete_parser(subparsers):
sub_parser = subparsers.add_parser('delete', description='Delete a cluster from build artifacts.')
optional = sub_parser._action_groups.pop()
required = sub_parser.add_argument_group('required arguments')
#required
required.add_argument('-b', '--build', dest='build_directory', type=str, required=True,
help='Absolute path to directory with build artifacts.')
#optional
sub_parser._action_groups.append(optional)
def run_delete(args):
if not query_yes_no('Do you really want to delete your cluster?'):
return 0
adjust_paths_from_build(args)
with DeleteEngine(args) as engine:
return engine.delete()
sub_parser.set_defaults(func=run_delete)
def upgrade_parser(subparsers):
sub_parser = subparsers.add_parser('upgrade',
description='Upgrades common and K8s components of an existing Epiphany Platform cluster.')
optional = sub_parser._action_groups.pop()
required = sub_parser.add_argument_group('required arguments')
component_list = sorted([
'elasticsearch_curator',
'filebeat',
'grafana',
'ignite',
'image_registry',
'kafka',
'kibana',
'kubernetes',
'load_balancer',
'logging',
'node_exporter',
'opendistro_for_elasticsearch',
'postgresql',
'rabbitmq',
'zookeeper',
])
def comma_separated_type(choices):
"""Return a function that splits and checks comma-separated values."""
def splitarg(arg):
values = arg.replace(' ','').lower().split(',')
for value in values:
if value not in choices:
raise argparse.ArgumentTypeError(
'invalid choice: {!r} (choose from {})'
.format(value, ', '.join(map(repr, choices))))
return values
return splitarg
#required
required.add_argument('-b', '--build', dest='build_directory', type=str, required=True,
help='Absolute path to directory with build artifacts.')
#optional
optional.add_argument('-f', '--file', dest='file', type=str, required=False,
help='File with upgraded configuration definitions to use for the components to be upgraded.')
optional.add_argument('--offline-requirements', dest='offline_requirements', type=str, required=False,
help='Path to the folder with pre-prepared offline requirements.')
optional.add_argument('--wait-for-pods', dest='wait_for_pods', action="store_true",
help="Waits for all pods to be in the 'Ready' state before proceeding to the next step of the K8s upgrade.")
optional.add_argument('--profile-ansible-tasks', dest='profile_ansible_tasks', action="store_true",
help='Enable Ansible profile_tasks plugin for timing tasks. (developer/debug option)')
optional.add_argument('--upgrade-components', dest='upgrade_components', type=comma_separated_type(component_list), required=False,
help='Provides comma separated list of components for upgrade selected from the following: [' + ', '.join(map(str, component_list)) + ']')
optional.add_argument('--ping-retries', dest='ping_retries', type=int, required=False, action='store', default=5,
help='Number of pings after which Ansible will fail.')
sub_parser._action_groups.append(optional)
def run_upgrade(args):
if not query_yes_no('Has backup been done?', default='no'):
return 0
adjust_paths_from_build(args)
with UpgradeEngine(args) as engine:
return engine.upgrade()
sub_parser.set_defaults(func=run_upgrade)
def test_parser(subparsers):
sub_parser = subparsers.add_parser('test', description='Test a cluster from build artifacts.')
optional = sub_parser._action_groups.pop()
required = sub_parser.add_argument_group('required arguments')
#required
required.add_argument('-b', '--build', dest='build_directory', type=str, required=True,
help='Absolute path to directory with build artifacts.')
#optional
group_list = '{' + ', '.join(SpecCommand.get_spec_groups()) + '}'
optional.add_argument('-g', '--group', choices=SpecCommand.get_spec_groups(), default='all', action='store', dest='group', required=False, metavar=group_list,
help='Group of tests to be run, e.g. kafka.')
sub_parser._action_groups.append(optional)
def run_test(args):
experimental_query()
adjust_paths_from_build(args)
with TestEngine(args) as engine:
return engine.test()
sub_parser.set_defaults(func=run_test)
def backup_parser(subparsers):
"""Configure and execute backup of cluster components."""
sub_parser = subparsers.add_parser('backup',
description='Create backup of cluster components.')
optional = sub_parser._action_groups.pop()
required = sub_parser.add_argument_group('required arguments')
#required
required.add_argument('-f', '--file', dest='file', type=str, required=True,
help='Backup configuration definition file to use.')
required.add_argument('-b', '--build', dest='build_directory', type=str, required=True,
help='Absolute path to directory with build artifacts.',
default=None)
#optional
sub_parser._action_groups.append(optional)
def run_backup(args):
adjust_paths_from_file(args)
with BackupEngine(args) as engine:
return engine.backup()
sub_parser.set_defaults(func=run_backup)
def recovery_parser(subparsers):
"""Configure and execute recovery of cluster components."""
sub_parser = subparsers.add_parser('recovery',
description='Recover from existing backup.')
optional = sub_parser._action_groups.pop()
required = sub_parser.add_argument_group('required arguments')
#required
required.add_argument('-f', '--file', dest='file', type=str, required=True,
help='Recovery configuration definition file to use.')
required.add_argument('-b', '--build', dest='build_directory', type=str, required=True,
help='Absolute path to directory with build artifacts.',
default=None)
#optional
sub_parser._action_groups.append(optional)
def run_recovery(args):
if not query_yes_no('Do you really want to perform recovery?'):
return 0
adjust_paths_from_file(args)
with RecoveryEngine(args) as engine:
return engine.recovery()
sub_parser.set_defaults(func=run_recovery)
def experimental_query():
if not query_yes_no('This is an experimental feature and could change at any time. Do you want to continue?'):
sys.exit(0)
def adjust_paths_from_output_dir():
if not Config().output_dir:
Config().output_dir = os.getcwd() # Default to working dir so we can at least write logs.
def adjust_paths_from_file(args):
if not os.path.isabs(args.file):
args.file = os.path.join(os.getcwd(), args.file)
if not os.path.isfile(args.file):
Config().output_dir = os.getcwd() # Default to working dir so we can at least write logs.
raise Exception(f'File "{args.file}" does not exist')
if Config().output_dir is None:
Config().output_dir = os.path.join(os.path.dirname(args.file), 'build')
def adjust_paths_from_build(args):
if not os.path.isabs(args.build_directory):
args.build_directory = os.path.join(os.getcwd(), args.build_directory)
if not os.path.exists(args.build_directory):
Config().output_dir = os.getcwd() # Default to working dir so we can at least write logs.
raise Exception(f'Build directory "{args.build_directory}" does not exist')
if args.build_directory[-1:] == '/':
args.build_directory = args.build_directory.rstrip('/')
if Config().output_dir is None:
Config().output_dir = os.path.split(args.build_directory)[0]
def ensure_vault_password_is_set(args):
vault_password = args.vault_password
if vault_password is None:
vault_password = prompt_for_password("Provide password to encrypt vault: ")
directory_path = os.path.dirname(Config().vault_password_location)
os.makedirs(directory_path, exist_ok=True)
save_to_file(Config().vault_password_location, vault_password)
def ensure_vault_password_is_cleaned():
if os.path.exists(Config().vault_password_location):
os.remove(Config().vault_password_location)
def log_total_run_time():
if Config().output_dir is not None:
logger = Log('run_time')
passed_time = format_time(time.time()-start_time)
logger.info(f'Total run time: {passed_time}')
def exit_handler():
ensure_vault_password_is_cleaned()
log_total_run_time()
def dump_debug_info():
def dump_external_debug_info(title, args):
dump_file.write(f'\n\n*****{title}******\n')
p = subprocess.Popen(args, stdout=subprocess.PIPE)
out, err = p.communicate()
lines = filter(lambda x: x.strip(), out.decode("utf-8").splitlines(keepends=True))
dump_file.writelines(lines)
try:
logger = Log('dump_debug_info')
config = Config()
timestr = time.strftime("%Y%m%d-%H%M%S")
dump_path = os.getcwd() + f'/epicli_error_{timestr}.dump'
dump_file = open(dump_path, 'w')
dump_file.write('*****EPICLI VERSION******\n')
dump_file.write(f'{VERSION}')
dump_file.write('\n\n*****EPICLI ARGS******\n')
dump_file.write(' '.join([*['epicli'], *sys.argv[1:]]))
dump_file.write('\n\n*****EPICLI CONFIG******\n')
for attr in config.__dict__:
if attr.startswith('_'):
dump_file.write('%s = %r\n' % (attr[1:], getattr(config, attr)))
dump_file.write('\n\n*****SYSTEM******\n')
system_data = {
'platform':platform.system(),
'release':platform.release(),
'type': platform.uname().system,
'arch': platform.uname().machine,
'cpus': json.dumps(os.cpu_count()),
'hostname': socket.gethostname()
}
dump_file.write(json.dumps(dict(system_data), indent=2))
dump_file.write('\n\n*****ENVIROMENT VARS******\n')
dump_file.write(json.dumps(dict(os.environ), indent=2))
dump_file.write('\n\n*****PYTHON******\n')
dump_file.write(f'python_version: {platform.python_version()}\n')
dump_file.write(f'python_build: {platform.python_build()}\n')
dump_file.write(f'python_revision: {platform.python_revision()}\n')
dump_file.write(f'python_compiler: {platform.python_compiler()}\n')
dump_file.write(f'python_branch: {platform.python_branch()}\n')
dump_file.write(f'python_implementation: {platform.python_implementation()}\n')
dump_external_debug_info('ANSIBLE VERSION', ['ansible', '--version'])
dump_external_debug_info('ANSIBLE CONFIG', ['ansible-config', 'dump'])
dump_external_debug_info('ANSIBLE-VAULT VERSION', ['ansible-vault', '--version'])
dump_external_debug_info('TERRAFORM VERSION', ['terraform', '--version'])
dump_external_debug_info('RUBY VERSION', ['ruby', '--version'])
dump_external_debug_info('RUBY GEM VERSION', ['gem', '--version'])
dump_external_debug_info('RUBY INSTALLED GEMS', ['gem', 'query', '--local'])
dump_file.write('\n\n*****LOG******\n')
log_path = os.path.join(get_output_path(), config.log_file)
dump_file.writelines([l for l in open(log_path).readlines()])
finally:
dump_file.close()
logger.info(f'Error dump has been written to: {dump_path}')
logger.warning('This dump might contain sensitive information. Check before sharing.')
if __name__ == '__main__':
atexit.register(exit_handler)
exit(main())
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
run.py
|
import os
from flaskapp import create_app, mongodb
app = create_app(os.getenv("FLASK_CONFIG") or "default")
@app.shell_context_processor
def make_shell_context():
return dict(mongodb=mongodb)
|
[] |
[] |
[
"FLASK_CONFIG"
] |
[]
|
["FLASK_CONFIG"]
|
python
| 1 | 0 | |
get_data/hue_polling_to_db.py
|
import time
import json
import sys
import os
import csv
import urllib2
import sqlite3
import requests
csv.field_size_limit(sys.maxsize)
if "HUE_IP_ADDRESS" in os.environ:
HUE_IP_ADDRESS = os.environ["HUE_IP_ADDRESS"]
else:
HUE_IP_ADDRESS = "set_ip_address_here" # If you don't want to set in environment variables
if "HUE_API_KEY" in os.environ:
HUE_API_KEY = os.environ["HUE_API_KEY"]
else:
HUE_API_KEY = "set_key_here" # If you don't want to set in environment variables
# If the INFLUX_URL is specified and not blank, then log to influx_db:
if 'INFLUX_URL' in os.environ and len(os.environ['INFLUX_URL']):
influx_url = os.environ['INFLUX_URL']
# Create the database:
resp = requests.post(url='{}/query'.format(influx_url),
data={'q':'CREATE DATABASE hue_data'})
print(resp.text)
else:
influx_url = None
DB = "../hue_data.db"
DB_TABLE = "hue_results"
DB_TABLE_KNMI_CACHE = "knmi_cache"
OUT_FILE = "../hue_results.csv"
HUE_API_LOCATION = "http://{}/api/".format(HUE_IP_ADDRESS)
INTERVAL = 10 #seconds between polls
WRITE_FILE = False
print("Polling API Location: {}".format(HUE_API_LOCATION))
def initialize_db():
""" When not available, creates Database and table.
Otherwise, does nothing.
"""
# Set up DB connection
con = sqlite3.connect(DB)
cur = con.cursor()
# Create table (if not exists)
try:
cur.execute("""
CREATE TABLE {0} (
un UNIQUE,
polling_timestamp,
device_name,
device_type,
device_modelid,
device_manufacturer,
device_swversion,
device_uid,
value,
value_timestamp
);
""".format(DB_TABLE))
except:
pass
# Create table (if not exists)
try:
cur.execute("""
CREATE TABLE {0} (
polling_timestamp
);
""".format(DB_TABLE_KNMI_CACHE))
except:
pass
con.close()
def write_db(results):
""" Writes list of CSV lines (results) to database
"""
if influx_url is not None:
log_to_influx_db(results, influx_url)
# Set up DB connection
con = sqlite3.connect(DB)
cur = con.cursor()
time_string = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
# Write to DB
for line in results:
# print('line', line)
try:
split_line = line.split(';')
un = "{0}{1}".format(split_line[0],split_line[7])
insert_data = ','.join(split_line)
#un = "{0}{1}".format(insert_data[0],insert_data[7])
insert_vals = "{},{},{}".format(un, time_string, insert_data)
insert_vals = ','.join(["'{}'".format(val) for val in insert_vals.split(',')])
# print(un)
# print(insert_vals)
query_str = "INSERT OR IGNORE INTO {0} VALUES({1})".format(DB_TABLE, insert_vals)
# print(query_str)
cur.execute(query_str)
except:
print "WARNING: Failed writing line to DB; '{0}'".format(line)
con.commit()
con.close()
def log_to_influx_db(results, influx_url):
influx_log_str = """"""
for line in results:
print(line)
split_line = line.split(';')
value_str = split_line[6]
if value_str == 'True':
value_str = True
elif value_str == 'False' or value_str == '':
value_str = False
value = float(value_str)
influx_log_str+=('{},device_name={},device_type={} value={}\n'.format(
split_line[1],
split_line[0].replace(' ','_'),
split_line[1],
value))
print(influx_log_str)
resp = requests.post(url='{}/write?db=hue_data'.format(influx_url),
data=influx_log_str,
headers={'Content-Type': 'application/octet-stream'})
print(resp.text)
def retrieve_data(request_string):
""" Question Hue API with request_string
"""
try:
#print("{0}{1}/{2}".format(HUE_API_LOCATION, HUE_API_KEY, request_string))
result = urllib2.urlopen("{0}{1}/{2}".format(HUE_API_LOCATION, HUE_API_KEY, request_string)).read()
result_json = json.loads(result)
return result_json
except:
print "Network unreachable. Retrying on next iteration..."
return {}
def write_file(file, lines):
""" Write given lines to given file
"""
time_string = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
for line in lines:
try:
with open(file, "a") as f:
f.write("{0};{1}\n".format(time_string,line))
#print line
except:
print "WARNING: Failed writing line to file; '{0}'".format(line)
def retrieve_knmi_weather_parent():
""" Parent of KNMI polling to make sure only once every 5 minutes is being polled.
In any other situation we will use the last known value
"""
# Check if last KNMI poll < 5 minutes old. Don't retrieve new value.
con = sqlite3.connect(DB)
cur = con.cursor()
query = """
SELECT
MAX(polling_timestamp)
FROM {0};
""".format(DB_TABLE_KNMI_CACHE)
# Execute query
cur.execute(query)
rows = cur.fetchall()
# Parse age
latest_time = "1970-01-01 01:00:00"
for row in rows:
latest_time = row[0]
print(latest_time)
if latest_time is None:
return retrieve_knmi_weather()
if time.strptime(latest_time, "%Y-%m-%d %H:%M:%S") > (time.gmtime()-900):
# Save new latest
try:
time_string = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
cur.execute("INSERT OR IGNORE INTO {0} VALUES({1})".format(DB_TABLE_KNMI_CACHE, time_string))
except:
print "WARNING: Failed writing time to KNMI DB Cache; '{0}'"
# Retrieve
return retrieve_knmi_weather()
else:
return False
con.close()
def retrieve_knmi_weather():
""" Retrieve current weather in Voorschoten from KNMI website
"""
results = []
try:
# retrieve KNMI HTML
url = "http://www.knmi.nl/nederland-nu/weer/waarnemingen"
response = urllib2.urlopen(url)
html = response.read()
# Cut out part containing the info we need
part = html.split("<td class="">Voorschoten</td>")[1]
part = part.split("</tr>")[0]
parts = part.split("<td class=\"\">")
rotterdam_temperature = parts[1].replace("</td>","")
rotterdam_humidity = parts[2].replace("</td>","")
rotterdam_wind_speed = parts[4].replace("</td>","")
rotterdam_wind_direction = parts[3].replace("</td>","")
rotterdam_visibility = parts[5].replace("</td>","")
rotterdam_air_pressure = parts[6].replace("</td>","")
# Add results in correct format
time_string = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime())
results.append("{0};{1};{2};{3};{4};{5};{6};{7}".format(
"KNMI_ROTTERDAM_TEMPERATURE",
"Temperature",
"None",
"KNMI",
"1.0",
"KNMI_RDAM_T_0",
rotterdam_temperature,
time_string
))
results.append("{0};{1};{2};{3};{4};{5};{6};{7}".format(
"KNMI_ROTTERDAM_HUMIDITY",
"Humidity",
"None",
"KNMI",
"1.0",
"KNMI_RDAM_H_0",
rotterdam_humidity,
time_string
))
results.append("{0};{1};{2};{3};{4};{5};{6};{7}".format(
"KNMI_ROTTERDAM_WIND_SPEED",
"Wind speed (m/s)",
"None",
"KNMI",
"1.0",
"KNMI_RDAM_WS_0",
rotterdam_wind_speed,
time_string
))
results.append("{0};{1};{2};{3};{4};{5};{6};{7}".format(
"KNMI_ROTTERDAM_WIND_DIRECTION",
"Wind direction",
"None",
"KNMI",
"1.0",
"KNMI_RDAM_WD_0",
rotterdam_wind_direction,
time_string
))
results.append("{0};{1};{2};{3};{4};{5};{6};{7}".format(
"KNMI_ROTTERDAM_VISIBILITY",
"Visibility (m)",
"None",
"KNMI",
"1.0",
"KNMI_RDAM_V_0",
rotterdam_visibility,
time_string
))
results.append("{0};{1};{2};{3};{4};{5};{6};{7}".format(
"KNMI_ROTTERDAM_PRESSURE",
"Air pressure (hPa)",
"None",
"KNMI",
"1.0",
"KNMI_RDAM_P_0",
rotterdam_air_pressure,
time_string
))
except:
print "Failed retrieving KNMI data"
return results
def parse_results(result):
""" Parse results from Hue API into one CSV line per Hue measurement.
Returns list of CSV lines
"""
results_parsed = []
for device in result:
try:
current = result[device]
device_data = "{0};{1};{2};{3};{4};{5}".format(
current["name"],
current["type"],
current["modelid"],
current["manufacturername"],
current["swversion"],
current["uniqueid"])
device_specific = ";"
if current["type"] == "Daylight":
device_specific = "{0};{1}".format(
current["state"]["daylight"],
current["state"]["lastupdated"].replace("T"," "))
if current["type"] == "ZLLTemperature":
device_specific = "{0};{1}".format(
current["state"]["temperature"],
current["state"]["lastupdated"].replace("T"," "))
if current["type"] == "ZLLPresence":
device_specific = "{0};{1}".format(
current["state"]["presence"],
current["state"]["lastupdated"].replace("T"," "))
if current["type"] == "ZLLLightLevel":
device_specific = "{0};{1}".format(
current["state"]["lightlevel"],
current["state"]["lastupdated"].replace("T"," "))
if current["type"] == "CLIPGenericStatus":
device_specific = "{0};{1}".format(
current["state"]["status"],
current["state"]["lastupdated"].replace("T"," "))
# device_config = json.dumps(current["config"])
device_line = "{0};{1}".format(device_data, device_specific)
results_parsed.append(device_line)
except Exception as e:
print "Device with invalid JSON contents found. Error: {0}".format(e)
return results_parsed
initialize_db()
# Main loop
while True:
# Retrieve Hue data
result = retrieve_data("sensors")
# Parse data
result_parsed = parse_results(result)
print(result_parsed)
# Retrieve and add KNMI data
knmi = retrieve_knmi_weather_parent()
if knmi is not False:
result_parsed = result_parsed + knmi
# Write to CSV
if WRITE_FILE:
write_file(OUT_FILE, result_parsed)
# Write to DB
write_db(result_parsed)
# Finished
print "Wrote results for {0} devices. Continueing...".format(len(result_parsed))
# Sleep, continue
time.sleep(INTERVAL)
|
[] |
[] |
[
"INFLUX_URL",
"HUE_IP_ADDRESS",
"HUE_API_KEY"
] |
[]
|
["INFLUX_URL", "HUE_IP_ADDRESS", "HUE_API_KEY"]
|
python
| 3 | 0 | |
chapter2/touchphat_mqtt_sample2.py
|
# coding: utf-8
"""
required packages
- touchphat
- paho.mqtt
"""
from logging import getLogger
logger = getLogger(__name__)
import paho.mqtt.client as mqtt
import touchphat
import os
from threading import Lock
import time
NAME = 'TouchPhat Sample 1'
MQTT_HOST = os.environ.get('MQTT_HOST')
MQTT_USER = os.environ.get('MQTT_USER')
MQTT_PASSWORD = os.environ.get('MQTT_PASSWORD')
MQTT_PORT = int(os.environ.get('MQTT_PORT'))
TOPIC = 'button'
lock = Lock()
client = mqtt.Client(protocol=mqtt.MQTTv311)
client.username_pw_set(MQTT_USER, password=MQTT_PASSWORD)
def main():
client.connect(MQTT_HOST, MQTT_PORT)
client.loop_forever()
@touchphat.on_release(['Back','A', 'B', 'C', 'D','Enter'])
def handle_touch(event):
with lock:
client.publish(
topic=TOPIC,
payload=event.name
)
time.sleep(1)
if __name__ == '__main__':
main()
|
[] |
[] |
[
"MQTT_HOST",
"MQTT_USER",
"MQTT_PORT",
"MQTT_PASSWORD"
] |
[]
|
["MQTT_HOST", "MQTT_USER", "MQTT_PORT", "MQTT_PASSWORD"]
|
python
| 4 | 0 | |
vendor/github.com/mattn/goveralls/tester/tester.go
|
package tester
import (
"os"
)
func GoverallsTester() string {
s := os.Getenv("GOVERALLS_TESTER")
if s == "" {
s = "hello world"
}
return s
}
|
[
"\"GOVERALLS_TESTER\""
] |
[] |
[
"GOVERALLS_TESTER"
] |
[]
|
["GOVERALLS_TESTER"]
|
go
| 1 | 0 | |
manage.py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cryptotracker.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[] |
[] |
[] |
[]
|
[]
|
python
| 0 | 0 | |
rclpy/guard_conditions/setup.py
|
from setuptools import setup
package_name = 'examples_rclpy_guard_conditions'
setup(
name=package_name,
version='0.16.0',
packages=[package_name],
data_files=[
('share/ament_index/resource_index/packages', ['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
],
install_requires=['setuptools'],
zip_safe=True,
maintainer='Aditya Pande, Shane Loretz',
maintainer_email='[email protected], [email protected]',
keywords=['ROS'],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Topic :: Software Development',
],
description='Examples of using guard conditions.',
license='Apache License, Version 2.0',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'trigger_guard_condition = '
'examples_rclpy_guard_conditions.trigger_guard_condition:main'
],
},
)
|
[] |
[] |
[] |
[]
|
[]
|
python
| null | null | null |
TwitterResources/gradle-4.4.1/src/platform-native/org/gradle/nativeplatform/toolchain/internal/msvcpp/VisualCppPlatformToolProvider.java
|
/*
* Copyright 2014 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.gradle.nativeplatform.toolchain.internal.msvcpp;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import org.gradle.api.Transformer;
import org.gradle.internal.Transformers;
import org.gradle.internal.jvm.Jvm;
import org.gradle.internal.operations.BuildOperationExecutor;
import org.gradle.internal.work.WorkerLeaseService;
import org.gradle.language.base.internal.compile.Compiler;
import org.gradle.language.base.internal.compile.DefaultCompilerVersion;
import org.gradle.language.base.internal.compile.VersionAwareCompiler;
import org.gradle.nativeplatform.internal.CompilerOutputFileNamingSchemeFactory;
import org.gradle.nativeplatform.internal.LinkerSpec;
import org.gradle.nativeplatform.internal.StaticLibraryArchiverSpec;
import org.gradle.nativeplatform.platform.internal.NativePlatformInternal;
import org.gradle.nativeplatform.platform.internal.OperatingSystemInternal;
import org.gradle.nativeplatform.toolchain.internal.AbstractPlatformToolProvider;
import org.gradle.nativeplatform.toolchain.internal.CommandLineToolContext;
import org.gradle.nativeplatform.toolchain.internal.CommandLineToolInvocationWorker;
import org.gradle.nativeplatform.toolchain.internal.DefaultCommandLineToolInvocationWorker;
import org.gradle.nativeplatform.toolchain.internal.DefaultMutableCommandLineToolContext;
import org.gradle.nativeplatform.toolchain.internal.MutableCommandLineToolContext;
import org.gradle.nativeplatform.toolchain.internal.NativeCompileSpec;
import org.gradle.nativeplatform.toolchain.internal.OutputCleaningCompiler;
import org.gradle.nativeplatform.toolchain.internal.PCHUtils;
import org.gradle.nativeplatform.toolchain.internal.SystemIncludesAwarePlatformToolProvider;
import org.gradle.nativeplatform.toolchain.internal.ToolType;
import org.gradle.nativeplatform.toolchain.internal.compilespec.AssembleSpec;
import org.gradle.nativeplatform.toolchain.internal.compilespec.CCompileSpec;
import org.gradle.nativeplatform.toolchain.internal.compilespec.CPCHCompileSpec;
import org.gradle.nativeplatform.toolchain.internal.compilespec.CppCompileSpec;
import org.gradle.nativeplatform.toolchain.internal.compilespec.CppPCHCompileSpec;
import org.gradle.nativeplatform.toolchain.internal.compilespec.WindowsResourceCompileSpec;
import org.gradle.nativeplatform.toolchain.internal.tools.CommandLineToolConfigurationInternal;
import org.gradle.process.internal.ExecActionFactory;
import java.io.File;
import java.util.List;
import java.util.Map;
class VisualCppPlatformToolProvider extends AbstractPlatformToolProvider implements SystemIncludesAwarePlatformToolProvider {
private final Map<ToolType, CommandLineToolConfigurationInternal> commandLineToolConfigurations;
private final VisualCppInstall visualCpp;
private final WindowsSdk sdk;
private final Ucrt ucrt;
private final NativePlatformInternal targetPlatform;
private final ExecActionFactory execActionFactory;
private final CompilerOutputFileNamingSchemeFactory compilerOutputFileNamingSchemeFactory;
private final WorkerLeaseService workerLeaseService;
VisualCppPlatformToolProvider(BuildOperationExecutor buildOperationExecutor, OperatingSystemInternal operatingSystem, Map<ToolType, CommandLineToolConfigurationInternal> commandLineToolConfigurations, VisualCppInstall visualCpp, WindowsSdk sdk, Ucrt ucrt, NativePlatformInternal targetPlatform, ExecActionFactory execActionFactory, CompilerOutputFileNamingSchemeFactory compilerOutputFileNamingSchemeFactory, WorkerLeaseService workerLeaseService) {
super(buildOperationExecutor, operatingSystem);
this.commandLineToolConfigurations = commandLineToolConfigurations;
this.visualCpp = visualCpp;
this.sdk = sdk;
this.ucrt = ucrt;
this.targetPlatform = targetPlatform;
this.execActionFactory = execActionFactory;
this.compilerOutputFileNamingSchemeFactory = compilerOutputFileNamingSchemeFactory;
this.workerLeaseService = workerLeaseService;
}
@Override
public boolean producesImportLibrary() {
return true;
}
@Override
public String getSharedLibraryLinkFileName(String libraryName) {
return getSharedLibraryName(libraryName).replaceFirst("\\.dll$", ".lib");
}
@Override
protected Compiler<CppCompileSpec> createCppCompiler() {
CommandLineToolInvocationWorker commandLineTool = tool("C++ compiler", visualCpp.getCompiler(targetPlatform));
CppCompiler cppCompiler = new CppCompiler(buildOperationExecutor, compilerOutputFileNamingSchemeFactory, commandLineTool, context(commandLineToolConfigurations.get(ToolType.CPP_COMPILER)), addDefinitions(CppCompileSpec.class), getObjectFileExtension(), true, workerLeaseService);
OutputCleaningCompiler<CppCompileSpec> outputCleaningCompiler = new OutputCleaningCompiler<CppCompileSpec>(cppCompiler, compilerOutputFileNamingSchemeFactory, getObjectFileExtension());
return versionAwareCompiler(outputCleaningCompiler);
}
@Override
protected Compiler<?> createCppPCHCompiler() {
CommandLineToolInvocationWorker commandLineTool = tool("C++ PCH compiler", visualCpp.getCompiler(targetPlatform));
CppPCHCompiler cppPCHCompiler = new CppPCHCompiler(buildOperationExecutor, compilerOutputFileNamingSchemeFactory, commandLineTool, context(commandLineToolConfigurations.get(ToolType.CPP_COMPILER)), pchSpecTransforms(CppPCHCompileSpec.class), getPCHFileExtension(), true, workerLeaseService);
OutputCleaningCompiler<CppPCHCompileSpec> outputCleaningCompiler = new OutputCleaningCompiler<CppPCHCompileSpec>(cppPCHCompiler, compilerOutputFileNamingSchemeFactory, getPCHFileExtension());
return versionAwareCompiler(outputCleaningCompiler);
}
@Override
protected Compiler<CCompileSpec> createCCompiler() {
CommandLineToolInvocationWorker commandLineTool = tool("C compiler", visualCpp.getCompiler(targetPlatform));
CCompiler cCompiler = new CCompiler(buildOperationExecutor, compilerOutputFileNamingSchemeFactory, commandLineTool, context(commandLineToolConfigurations.get(ToolType.C_COMPILER)), addDefinitions(CCompileSpec.class), getObjectFileExtension(), true, workerLeaseService);
OutputCleaningCompiler<CCompileSpec> outputCleaningCompiler = new OutputCleaningCompiler<CCompileSpec>(cCompiler, compilerOutputFileNamingSchemeFactory, getObjectFileExtension());
return versionAwareCompiler(outputCleaningCompiler);
}
@Override
protected Compiler<?> createCPCHCompiler() {
CommandLineToolInvocationWorker commandLineTool = tool("C PCH compiler", visualCpp.getCompiler(targetPlatform));
CPCHCompiler cpchCompiler = new CPCHCompiler(buildOperationExecutor, compilerOutputFileNamingSchemeFactory, commandLineTool, context(commandLineToolConfigurations.get(ToolType.C_COMPILER)), pchSpecTransforms(CPCHCompileSpec.class), getPCHFileExtension(), true, workerLeaseService);
OutputCleaningCompiler<CPCHCompileSpec> outputCleaningCompiler = new OutputCleaningCompiler<CPCHCompileSpec>(cpchCompiler, compilerOutputFileNamingSchemeFactory, getPCHFileExtension());
return versionAwareCompiler(outputCleaningCompiler);
}
private <T extends NativeCompileSpec> VersionAwareCompiler<T> versionAwareCompiler(OutputCleaningCompiler<T> outputCleaningCompiler) {
return new VersionAwareCompiler<T>(outputCleaningCompiler, new DefaultCompilerVersion(VisualCppToolChain.DEFAULT_NAME, "Microsoft", visualCpp.getVersion()));
}
@Override
protected Compiler<AssembleSpec> createAssembler() {
CommandLineToolInvocationWorker commandLineTool = tool("Assembler", visualCpp.getAssembler(targetPlatform));
return new Assembler(buildOperationExecutor, compilerOutputFileNamingSchemeFactory, commandLineTool, context(commandLineToolConfigurations.get(ToolType.ASSEMBLER)), addDefinitions(AssembleSpec.class), getObjectFileExtension(), false, workerLeaseService);
}
@Override
protected Compiler<?> createObjectiveCppCompiler() {
throw unavailableTool("Objective-C++ is not available on the Visual C++ toolchain");
}
@Override
protected Compiler<?> createObjectiveCCompiler() {
throw unavailableTool("Objective-C is not available on the Visual C++ toolchain");
}
@Override
protected Compiler<WindowsResourceCompileSpec> createWindowsResourceCompiler() {
CommandLineToolInvocationWorker commandLineTool = tool("Windows resource compiler", sdk.getResourceCompiler(targetPlatform));
String objectFileExtension = ".res";
WindowsResourceCompiler windowsResourceCompiler = new WindowsResourceCompiler(buildOperationExecutor, compilerOutputFileNamingSchemeFactory, commandLineTool, context(commandLineToolConfigurations.get(ToolType.WINDOW_RESOURCES_COMPILER)), addDefinitions(WindowsResourceCompileSpec.class), objectFileExtension, false, workerLeaseService);
return new OutputCleaningCompiler<WindowsResourceCompileSpec>(windowsResourceCompiler, compilerOutputFileNamingSchemeFactory, objectFileExtension);
}
@Override
protected Compiler<LinkerSpec> createLinker() {
CommandLineToolInvocationWorker commandLineTool = tool("Linker", visualCpp.getLinker(targetPlatform));
return new LinkExeLinker(buildOperationExecutor, commandLineTool, context(commandLineToolConfigurations.get(ToolType.LINKER)), addLibraryPath(), workerLeaseService);
}
@Override
protected Compiler<StaticLibraryArchiverSpec> createStaticLibraryArchiver() {
CommandLineToolInvocationWorker commandLineTool = tool("Static library archiver", visualCpp.getArchiver(targetPlatform));
return new LibExeStaticLibraryArchiver(buildOperationExecutor, commandLineTool, context(commandLineToolConfigurations.get(ToolType.STATIC_LIB_ARCHIVER)), Transformers.<StaticLibraryArchiverSpec>noOpTransformer(), workerLeaseService);
}
private CommandLineToolInvocationWorker tool(String toolName, File exe) {
return new DefaultCommandLineToolInvocationWorker(toolName, exe, execActionFactory);
}
private CommandLineToolContext context(CommandLineToolConfigurationInternal commandLineToolConfiguration) {
MutableCommandLineToolContext invocationContext = new DefaultMutableCommandLineToolContext();
// The visual C++ tools use the path to find other executables
// TODO:ADAM - restrict this to the specific path for the target tool
invocationContext.addPath(visualCpp.getPath(targetPlatform));
invocationContext.addPath(sdk.getBinDir(targetPlatform));
// Clear environment variables that might effect cl.exe & link.exe
clearEnvironmentVars(invocationContext, "INCLUDE", "CL", "LIBPATH", "LINK", "LIB");
invocationContext.setArgAction(commandLineToolConfiguration.getArgAction());
return invocationContext;
}
private void clearEnvironmentVars(MutableCommandLineToolContext invocation, String... names) {
// TODO: This check should really be done in the compiler process
Map<String, ?> environmentVariables = Jvm.current().getInheritableEnvironmentVariables(System.getenv());
for (String name : names) {
Object value = environmentVariables.get(name);
if (value != null) {
VisualCppToolChain.LOGGER.warn("Ignoring value '{}' set for environment variable '{}'.", value, name);
invocation.addEnvironmentVar(name, "");
}
}
}
private <T extends NativeCompileSpec> Transformer<T, T> pchSpecTransforms(final Class<T> type) {
return new Transformer<T, T>() {
@Override
public T transform(T original) {
List<Transformer<T, T>> transformers = Lists.newArrayList();
transformers.add(PCHUtils.getHeaderToSourceFileTransformer(type));
transformers.add(addDefinitions(type));
T next = original;
for (Transformer<T, T> transformer : transformers) {
next = transformer.transform(next);
}
return next;
}
};
}
@Override
public List<File> getSystemIncludes() {
ImmutableList.Builder<File> builder = ImmutableList.builder();
builder.add(visualCpp.getIncludePath(targetPlatform));
builder.add(sdk.getIncludeDirs());
if (ucrt != null) {
builder.add(ucrt.getIncludeDirs());
}
return builder.build();
}
private <T extends NativeCompileSpec> Transformer<T, T> addDefinitions(Class<T> type) {
return new Transformer<T, T>() {
public T transform(T original) {
for (Map.Entry<String, String> definition : visualCpp.getDefinitions(targetPlatform).entrySet()) {
original.define(definition.getKey(), definition.getValue());
}
return original;
}
};
}
private Transformer<LinkerSpec, LinkerSpec> addLibraryPath() {
return new Transformer<LinkerSpec, LinkerSpec>() {
public LinkerSpec transform(LinkerSpec original) {
if (ucrt == null) {
original.libraryPath(visualCpp.getLibraryPath(targetPlatform), sdk.getLibDir(targetPlatform));
} else {
original.libraryPath(visualCpp.getLibraryPath(targetPlatform), sdk.getLibDir(targetPlatform), ucrt.getLibDir(targetPlatform));
}
return original;
}
};
}
public String getPCHFileExtension() {
return ".pch";
}
}
|
[] |
[] |
[] |
[]
|
[]
|
java
| 0 | 0 | |
system_tests/secure_manifests/secure_manifests_suite_test.go
|
// Copyright (C) 2015-Present Pivotal Software, Inc. All rights reserved.
// This program and the accompanying materials are made available under
// the terms of the under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package secure_manifests_test
import (
"os"
"testing"
"github.com/pivotal-cf/on-demand-service-broker/system_tests/test_helpers/credhub_helpers"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/pborman/uuid"
. "github.com/pivotal-cf/on-demand-service-broker/system_tests/test_helpers/bosh_helpers"
"github.com/pivotal-cf/on-demand-service-broker/system_tests/test_helpers/service_helpers"
)
func TestSecureManifests(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "SecureManifests Suite")
}
var (
brokerInfo BrokerInfo
credhubCLI *credhub_helpers.CredHubCLI
)
var _ = BeforeSuite(func() {
uniqueID := uuid.New()[:6]
brokerInfo = BrokerInfo{}
brokerInfo = DeployAndRegisterBroker(
"-secure-manifests-"+uniqueID,
BrokerDeploymentOptions{},
service_helpers.Redis,
[]string{"basic_service_catalog.yml", "enable_secure_manifests.yml"},
)
credhubCLI = credhub_helpers.NewCredHubCLI(os.Getenv("CREDHUB_CLIENT"), os.Getenv("CREDHUB_SECRET"))
})
var _ = AfterSuite(func() {
DeregisterAndDeleteBroker(brokerInfo.DeploymentName)
})
|
[
"\"CREDHUB_CLIENT\"",
"\"CREDHUB_SECRET\""
] |
[] |
[
"CREDHUB_CLIENT",
"CREDHUB_SECRET"
] |
[]
|
["CREDHUB_CLIENT", "CREDHUB_SECRET"]
|
go
| 2 | 0 | |
capture.go
|
package main
import (
"fmt"
"os"
"os/exec"
"strconv"
"time"
)
// CaptureImage take a screen shot of terminal
// TODO: Terminal/iTerm or X-Window only?
func CaptureImage(path string) (string, error) {
switch os.Getenv("WINDOWID") {
case "":
return captureByScreencapture(path)
default:
return captureByXwd(path)
}
}
// func captureByScreencapture(dir string, filename string) (img image.Image, err error) {
func captureByScreencapture(path string) (fileType string, err error) {
var program string
switch os.Getenv("TERM_PROGRAM") {
case "iTerm.app":
program = "iTerm"
case "Apple_Terminal":
program = "Terminal"
default:
return "", fmt.Errorf("cannot get screenshot")
}
// get window id
windowID, err := exec.Command("osascript", "-e",
fmt.Sprintf("tell app \"%s\" to id of window 1", program),
).Output()
if err != nil {
return
}
// get screen capture
err = exec.Command("screencapture", "-l", string(windowID), "-o", "-m", "-t", "png", path).Run()
if err != nil {
return
}
// resize image if high resolution (retina display)
getProperty := func(key string) (result float64, err error) {
sips := exec.Command("sips", "-g", key, path)
awk := exec.Command("awk", "/:/ { print $2 }")
sipsOut, err := sips.StdoutPipe()
if err != nil {
return
}
awk.Stdin = sipsOut
sips.Start()
output, err := awk.Output()
if err != nil {
return
}
err = sips.Wait()
if err != nil {
return
}
str := string(output)
result, err = strconv.ParseFloat(str[:len(str)-1], 32)
if err != nil {
return
}
return result, nil
}
properties, err := func() (results map[string]float64, err error) {
results = make(map[string]float64)
for _, key := range []string{"pixelHeight", "pixelWidth", "dpiHeight", "dpiWidth"} {
var property float64
property, err = getProperty(key)
if err != nil {
return
}
results[key] = property
}
return results, nil
}()
if err != nil {
return
}
if properties["dpiHeight"] > 72.0 && properties["dpiWidth"] > 72.0 {
pixelHeight := int(properties["pixelHeight"] * 72.0 / properties["dpiHeight"])
pixelWidth := int(properties["pixelWidth"] * 72.0 / properties["dpiWidth"])
err = exec.Command("sips",
"-s", "dpiWidth", "72.0", "-s", "dpiHeight", "72.0",
"-z", strconv.Itoa(pixelHeight), strconv.Itoa(pixelWidth),
path,
).Run()
if err != nil {
return
}
}
return "png", nil
}
func captureByXwd(path string) (fileType string, err error) {
out, err := exec.Command("which", "xwd").CombinedOutput()
if err != nil {
return "", fmt.Errorf(string(out))
}
var success = false
for i := 0; i < 10; i++ {
err = exec.Command("xwd", "-silent", "-id", os.Getenv("WINDOWID"), "-out", path).Run()
if err == nil {
success = true
break
}
time.Sleep(time.Millisecond * 100)
}
if success {
return "xwd", nil
}
return
}
|
[
"\"WINDOWID\"",
"\"TERM_PROGRAM\"",
"\"WINDOWID\""
] |
[] |
[
"WINDOWID",
"TERM_PROGRAM"
] |
[]
|
["WINDOWID", "TERM_PROGRAM"]
|
go
| 2 | 0 | |
python/ray/tests/test_usage_stats.py
|
import os
import pytest
import sys
import ray
import pathlib
import json
import time
from dataclasses import asdict
from pathlib import Path
from jsonschema import validate
import ray._private.usage.usage_lib as ray_usage_lib
import ray._private.usage.usage_constants as usage_constants
from ray._private.usage.usage_lib import ClusterConfigToReport
from ray._private.usage.usage_lib import UsageStatsEnabledness
from ray.autoscaler._private.cli_logger import cli_logger
from ray._private.test_utils import wait_for_condition
schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"schema_version": {"type": "string"},
"source": {"type": "string"},
"session_id": {"type": "string"},
"ray_version": {"type": "string"},
"git_commit": {"type": "string"},
"os": {"type": "string"},
"python_version": {"type": "string"},
"collect_timestamp_ms": {"type": "integer"},
"session_start_timestamp_ms": {"type": "integer"},
"cloud_provider": {"type": ["null", "string"]},
"min_workers": {"type": ["null", "integer"]},
"max_workers": {"type": ["null", "integer"]},
"head_node_instance_type": {"type": ["null", "string"]},
"worker_node_instance_types": {
"type": ["null", "array"],
"items": {"type": "string"},
},
"total_num_cpus": {"type": ["null", "integer"]},
"total_num_gpus": {"type": ["null", "integer"]},
"total_memory_gb": {"type": ["null", "number"]},
"total_object_store_memory_gb": {"type": ["null", "number"]},
"library_usages": {
"type": ["null", "array"],
"items": {"type": "string"},
},
"total_success": {"type": "integer"},
"total_failed": {"type": "integer"},
"seq_number": {"type": "integer"},
},
}
def file_exists(temp_dir: Path):
for path in temp_dir.iterdir():
if usage_constants.USAGE_STATS_FILE in str(path):
return True
return False
def read_file(temp_dir: Path, column: str):
usage_stats_file = temp_dir / usage_constants.USAGE_STATS_FILE
with usage_stats_file.open() as f:
result = json.load(f)
return result[column]
def print_dashboard_log():
session_dir = ray.worker.global_worker.node.address_info["session_dir"]
session_path = Path(session_dir)
log_dir_path = session_path / "logs"
paths = list(log_dir_path.iterdir())
contents = None
for path in paths:
if "dashboard.log" in str(path):
with open(str(path), "r") as f:
contents = f.readlines()
from pprint import pprint
pprint(contents)
def test_usage_stats_enabledness(monkeypatch, tmp_path):
with monkeypatch.context() as m:
m.setenv("RAY_USAGE_STATS_ENABLED", "1")
assert (
ray_usage_lib._usage_stats_enabledness()
is UsageStatsEnabledness.ENABLED_EXPLICITLY
)
with monkeypatch.context() as m:
m.setenv("RAY_USAGE_STATS_ENABLED", "0")
assert (
ray_usage_lib._usage_stats_enabledness()
is UsageStatsEnabledness.DISABLED_EXPLICITLY
)
with monkeypatch.context() as m:
m.setenv("RAY_USAGE_STATS_ENABLED", "xxx")
with pytest.raises(ValueError):
ray_usage_lib._usage_stats_enabledness()
with monkeypatch.context() as m:
tmp_usage_stats_config_path = tmp_path / "config.json"
monkeypatch.setenv(
"RAY_USAGE_STATS_CONFIG_PATH", str(tmp_usage_stats_config_path)
)
tmp_usage_stats_config_path.write_text('{"usage_stats": true}')
assert (
ray_usage_lib._usage_stats_enabledness()
is UsageStatsEnabledness.ENABLED_EXPLICITLY
)
tmp_usage_stats_config_path.write_text('{"usage_stats": false}')
assert (
ray_usage_lib._usage_stats_enabledness()
is UsageStatsEnabledness.DISABLED_EXPLICITLY
)
tmp_usage_stats_config_path.write_text('{"usage_stats": "xxx"}')
with pytest.raises(ValueError):
ray_usage_lib._usage_stats_enabledness()
tmp_usage_stats_config_path.write_text("")
assert (
ray_usage_lib._usage_stats_enabledness()
is UsageStatsEnabledness.ENABLED_BY_DEFAULT
)
tmp_usage_stats_config_path.unlink()
assert (
ray_usage_lib._usage_stats_enabledness()
is UsageStatsEnabledness.ENABLED_BY_DEFAULT
)
def test_set_usage_stats_enabled_via_config(monkeypatch, tmp_path):
tmp_usage_stats_config_path = tmp_path / "config1.json"
monkeypatch.setenv("RAY_USAGE_STATS_CONFIG_PATH", str(tmp_usage_stats_config_path))
ray_usage_lib.set_usage_stats_enabled_via_config(True)
assert '{"usage_stats": true}' == tmp_usage_stats_config_path.read_text()
ray_usage_lib.set_usage_stats_enabled_via_config(False)
assert '{"usage_stats": false}' == tmp_usage_stats_config_path.read_text()
tmp_usage_stats_config_path.write_text('"xxx"')
ray_usage_lib.set_usage_stats_enabled_via_config(True)
assert '{"usage_stats": true}' == tmp_usage_stats_config_path.read_text()
tmp_usage_stats_config_path.unlink()
os.makedirs(os.path.dirname(tmp_usage_stats_config_path / "xxx.txt"), exist_ok=True)
with pytest.raises(Exception, match="Failed to enable usage stats.*"):
ray_usage_lib.set_usage_stats_enabled_via_config(True)
def test_usage_stats_prompt(monkeypatch, capsys, tmp_path):
"""
Test usage stats prompt is shown in the proper cases.
"""
with monkeypatch.context() as m:
m.setenv("RAY_USAGE_STATS_ENABLED", "1")
m.setenv("RAY_USAGE_STATS_PROMPT_ENABLED", "0")
ray_usage_lib.show_usage_stats_prompt()
captured = capsys.readouterr()
assert usage_constants.USAGE_STATS_ENABLED_MESSAGE not in captured.out
assert usage_constants.USAGE_STATS_ENABLED_MESSAGE not in captured.err
with monkeypatch.context() as m:
m.setenv("RAY_USAGE_STATS_ENABLED", "0")
ray_usage_lib.show_usage_stats_prompt()
captured = capsys.readouterr()
assert usage_constants.USAGE_STATS_DISABLED_MESSAGE in captured.out
with monkeypatch.context() as m:
m.delenv("RAY_USAGE_STATS_ENABLED", raising=False)
tmp_usage_stats_config_path = tmp_path / "config1.json"
monkeypatch.setenv(
"RAY_USAGE_STATS_CONFIG_PATH", str(tmp_usage_stats_config_path)
)
# Usage stats collection is enabled by default.
ray_usage_lib.show_usage_stats_prompt()
captured = capsys.readouterr()
assert usage_constants.USAGE_STATS_ENABLED_BY_DEFAULT_MESSAGE in captured.out
with monkeypatch.context() as m:
# Win impl relies on kbhit() instead of select()
# so the pipe trick won't work.
if sys.platform != "win32":
m.delenv("RAY_USAGE_STATS_ENABLED", raising=False)
saved_interactive = cli_logger.interactive
saved_stdin = sys.stdin
tmp_usage_stats_config_path = tmp_path / "config2.json"
monkeypatch.setenv(
"RAY_USAGE_STATS_CONFIG_PATH", str(tmp_usage_stats_config_path)
)
cli_logger.interactive = True
(r_pipe, w_pipe) = os.pipe()
sys.stdin = open(r_pipe)
os.write(w_pipe, b"y\n")
ray_usage_lib.show_usage_stats_prompt()
captured = capsys.readouterr()
assert usage_constants.USAGE_STATS_CONFIRMATION_MESSAGE in captured.out
assert usage_constants.USAGE_STATS_ENABLED_MESSAGE in captured.out
cli_logger.interactive = saved_interactive
sys.stdin = saved_stdin
with monkeypatch.context() as m:
if sys.platform != "win32":
m.delenv("RAY_USAGE_STATS_ENABLED", raising=False)
saved_interactive = cli_logger.interactive
saved_stdin = sys.stdin
tmp_usage_stats_config_path = tmp_path / "config3.json"
monkeypatch.setenv(
"RAY_USAGE_STATS_CONFIG_PATH", str(tmp_usage_stats_config_path)
)
cli_logger.interactive = True
(r_pipe, w_pipe) = os.pipe()
sys.stdin = open(r_pipe)
os.write(w_pipe, b"n\n")
ray_usage_lib.show_usage_stats_prompt()
captured = capsys.readouterr()
assert usage_constants.USAGE_STATS_CONFIRMATION_MESSAGE in captured.out
assert usage_constants.USAGE_STATS_DISABLED_MESSAGE in captured.out
cli_logger.interactive = saved_interactive
sys.stdin = saved_stdin
with monkeypatch.context() as m:
m.delenv("RAY_USAGE_STATS_ENABLED", raising=False)
saved_interactive = cli_logger.interactive
saved_stdin = sys.stdin
tmp_usage_stats_config_path = tmp_path / "config4.json"
monkeypatch.setenv(
"RAY_USAGE_STATS_CONFIG_PATH", str(tmp_usage_stats_config_path)
)
cli_logger.interactive = True
(r_pipe, w_pipe) = os.pipe()
sys.stdin = open(r_pipe)
ray_usage_lib.show_usage_stats_prompt()
captured = capsys.readouterr()
assert usage_constants.USAGE_STATS_CONFIRMATION_MESSAGE in captured.out
assert usage_constants.USAGE_STATS_ENABLED_MESSAGE in captured.out
cli_logger.interactive = saved_interactive
sys.stdin = saved_stdin
with monkeypatch.context() as m:
# Usage stats is not enabled for ray.init()
ray.init()
ray.shutdown()
captured = capsys.readouterr()
assert (
usage_constants.USAGE_STATS_ENABLED_BY_DEFAULT_MESSAGE not in captured.out
)
assert (
usage_constants.USAGE_STATS_ENABLED_BY_DEFAULT_MESSAGE not in captured.err
)
def test_usage_lib_cluster_metadata_generation(monkeypatch, ray_start_cluster):
with monkeypatch.context() as m:
m.setenv("RAY_USAGE_STATS_ENABLED", "1")
m.setenv("RAY_USAGE_STATS_REPORT_URL", "http://127.0.0.1:8000")
cluster = ray_start_cluster
cluster.add_node(num_cpus=0)
ray.init(address=cluster.address)
"""
Test metadata stored is equivalent to `_generate_cluster_metadata`.
"""
meta = ray_usage_lib._generate_cluster_metadata()
cluster_metadata = ray_usage_lib.get_cluster_metadata(
ray.experimental.internal_kv.internal_kv_get_gcs_client(), num_retries=20
)
# Remove fields that are dynamically changed.
assert meta.pop("session_id")
assert meta.pop("session_start_timestamp_ms")
assert cluster_metadata.pop("session_id")
assert cluster_metadata.pop("session_start_timestamp_ms")
assert meta == cluster_metadata
"""
Make sure put & get works properly.
"""
cluster_metadata = ray_usage_lib.put_cluster_metadata(
ray.experimental.internal_kv.internal_kv_get_gcs_client(), num_retries=20
)
assert cluster_metadata == ray_usage_lib.get_cluster_metadata(
ray.experimental.internal_kv.internal_kv_get_gcs_client(), num_retries=20
)
def test_library_usages():
if os.environ.get("RAY_MINIMAL") == "1":
# Doesn't work with minimal installation
# since we import serve.
return
ray_usage_lib._recorded_library_usages.clear()
ray_usage_lib.record_library_usage("pre_init")
ray.init()
ray_usage_lib.record_library_usage("post_init")
ray.workflow.init()
ray.data.range(10)
from ray import serve
serve.start()
library_usages = ray_usage_lib.get_library_usages_to_report(
ray.experimental.internal_kv.internal_kv_get_gcs_client(), num_retries=20
)
assert set(library_usages) == {
"pre_init",
"post_init",
"dataset",
"workflow",
"serve",
}
serve.shutdown()
ray.shutdown()
def test_usage_lib_cluster_metadata_generation_usage_disabled(
monkeypatch, shutdown_only
):
"""
Make sure only version information is generated when usage stats are not enabled.
"""
with monkeypatch.context() as m:
m.setenv("RAY_USAGE_STATS_ENABLED", "0")
meta = ray_usage_lib._generate_cluster_metadata()
assert "ray_version" in meta
assert "python_version" in meta
assert len(meta) == 2
def test_usage_lib_get_cluster_status_to_report(shutdown_only):
ray.init(num_cpus=3, num_gpus=1, object_store_memory=2 ** 30)
# Wait for monitor.py to update cluster status
wait_for_condition(
lambda: ray_usage_lib.get_cluster_status_to_report(
ray.experimental.internal_kv.internal_kv_get_gcs_client(),
num_retries=20,
).total_num_cpus
== 3,
timeout=10,
)
cluster_status_to_report = ray_usage_lib.get_cluster_status_to_report(
ray.experimental.internal_kv.internal_kv_get_gcs_client(),
num_retries=20,
)
assert cluster_status_to_report.total_num_cpus == 3
assert cluster_status_to_report.total_num_gpus == 1
assert cluster_status_to_report.total_memory_gb > 0
assert cluster_status_to_report.total_object_store_memory_gb == 1.0
def test_usage_lib_get_cluster_config_to_report(monkeypatch, tmp_path):
cluster_config_file_path = tmp_path / "ray_bootstrap_config.yaml"
""" Test minimal cluster config"""
cluster_config_file_path.write_text(
"""
cluster_name: minimal
max_workers: 1
provider:
type: aws
region: us-west-2
availability_zone: us-west-2a
"""
)
cluster_config_to_report = ray_usage_lib.get_cluster_config_to_report(
cluster_config_file_path
)
assert cluster_config_to_report.cloud_provider == "aws"
assert cluster_config_to_report.min_workers is None
assert cluster_config_to_report.max_workers == 1
assert cluster_config_to_report.head_node_instance_type is None
assert cluster_config_to_report.worker_node_instance_types is None
cluster_config_file_path.write_text(
"""
cluster_name: full
min_workers: 1
provider:
type: gcp
head_node_type: head_node
available_node_types:
head_node:
node_config:
InstanceType: m5.large
min_workers: 0
max_workers: 0
aws_worker_node:
node_config:
InstanceType: m3.large
min_workers: 0
max_workers: 0
azure_worker_node:
node_config:
azure_arm_parameters:
vmSize: Standard_D2s_v3
gcp_worker_node:
node_config:
machineType: n1-standard-2
"""
)
cluster_config_to_report = ray_usage_lib.get_cluster_config_to_report(
cluster_config_file_path
)
assert cluster_config_to_report.cloud_provider == "gcp"
assert cluster_config_to_report.min_workers == 1
assert cluster_config_to_report.max_workers is None
assert cluster_config_to_report.head_node_instance_type == "m5.large"
assert cluster_config_to_report.worker_node_instance_types == list(
{"m3.large", "Standard_D2s_v3", "n1-standard-2"}
)
cluster_config_file_path.write_text(
"""
cluster_name: full
head_node_type: head_node
available_node_types:
worker_node_1:
node_config:
ImageId: xyz
worker_node_2:
resources: {}
worker_node_3:
node_config:
InstanceType: m5.large
"""
)
cluster_config_to_report = ray_usage_lib.get_cluster_config_to_report(
cluster_config_file_path
)
assert cluster_config_to_report.cloud_provider is None
assert cluster_config_to_report.min_workers is None
assert cluster_config_to_report.max_workers is None
assert cluster_config_to_report.head_node_instance_type is None
assert cluster_config_to_report.worker_node_instance_types == ["m5.large"]
cluster_config_file_path.write_text("[invalid")
cluster_config_to_report = ray_usage_lib.get_cluster_config_to_report(
cluster_config_file_path
)
assert cluster_config_to_report == ClusterConfigToReport()
cluster_config_to_report = ray_usage_lib.get_cluster_config_to_report(
tmp_path / "does_not_exist.yaml"
)
assert cluster_config_to_report == ClusterConfigToReport()
monkeypatch.setenv("KUBERNETES_SERVICE_HOST", "localhost")
cluster_config_to_report = ray_usage_lib.get_cluster_config_to_report(
tmp_path / "does_not_exist.yaml"
)
assert cluster_config_to_report.cloud_provider == "kubernetes"
assert cluster_config_to_report.min_workers is None
assert cluster_config_to_report.max_workers is None
assert cluster_config_to_report.head_node_instance_type is None
assert cluster_config_to_report.worker_node_instance_types is None
@pytest.mark.skipif(
sys.platform == "win32",
reason="Test depends on runtime env feature not supported on Windows.",
)
def test_usage_lib_report_data(monkeypatch, ray_start_cluster, tmp_path):
with monkeypatch.context() as m:
m.setenv("RAY_USAGE_STATS_ENABLED", "1")
m.setenv("RAY_USAGE_STATS_REPORT_URL", "http://127.0.0.1:8000")
cluster = ray_start_cluster
cluster.add_node(num_cpus=0)
# Runtime env is required to run this test in minimal installation test.
ray.init(address=cluster.address, runtime_env={"pip": ["ray[serve]"]})
"""
Make sure the generated data is following the schema.
"""
cluster_metadata = ray_usage_lib.get_cluster_metadata(
ray.experimental.internal_kv.internal_kv_get_gcs_client(), num_retries=20
)
cluster_config_file_path = tmp_path / "ray_bootstrap_config.yaml"
cluster_config_file_path.write_text(
"""
cluster_name: minimal
max_workers: 1
provider:
type: aws
region: us-west-2
availability_zone: us-west-2a
"""
)
cluster_config_to_report = ray_usage_lib.get_cluster_config_to_report(
cluster_config_file_path
)
d = ray_usage_lib.generate_report_data(
cluster_metadata, cluster_config_to_report, 2, 2, 2
)
validate(instance=asdict(d), schema=schema)
"""
Make sure writing to a file works as expected
"""
client = ray_usage_lib.UsageReportClient()
temp_dir = Path(tmp_path)
client.write_usage_data(d, temp_dir)
wait_for_condition(lambda: file_exists(temp_dir))
"""
Make sure report usage data works as expected
"""
@ray.remote(num_cpus=0)
class ServeInitator:
def __init__(self):
# Start the ray serve server to verify requests are sent
# to the right place.
from ray import serve
serve.start()
@serve.deployment(ray_actor_options={"num_cpus": 0})
async def usage(request):
body = await request.json()
if body == asdict(d):
return True
else:
return False
usage.deploy()
def ready(self):
pass
# We need to start a serve with runtime env to make this test
# work with minimal installation.
s = ServeInitator.remote()
ray.get(s.ready.remote())
# Query our endpoint over HTTP.
r = client.report_usage_data("http://127.0.0.1:8000/usage", d)
r.raise_for_status()
assert json.loads(r.text) is True
@pytest.mark.skipif(
sys.platform == "win32",
reason="Test depends on runtime env feature not supported on Windows.",
)
def test_usage_report_e2e(monkeypatch, ray_start_cluster, tmp_path):
"""
Test usage report works e2e with env vars.
"""
cluster_config_file_path = tmp_path / "ray_bootstrap_config.yaml"
cluster_config_file_path.write_text(
"""
cluster_name: minimal
max_workers: 1
provider:
type: aws
region: us-west-2
availability_zone: us-west-2a
"""
)
with monkeypatch.context() as m:
m.setenv("HOME", str(tmp_path))
m.setenv("RAY_USAGE_STATS_ENABLED", "1")
m.setenv("RAY_USAGE_STATS_REPORT_URL", "http://127.0.0.1:8000/usage")
m.setenv("RAY_USAGE_STATS_REPORT_INTERVAL_S", "1")
cluster = ray_start_cluster
cluster.add_node(num_cpus=3)
ray_usage_lib._recorded_library_usages.clear()
if os.environ.get("RAY_MINIMAL") != "1":
from ray import tune # noqa: F401
from ray.rllib.agents.ppo import PPOTrainer # noqa: F401
from ray import train # noqa: F401
ray.init(address=cluster.address)
@ray.remote(num_cpus=0)
class StatusReporter:
def __init__(self):
self.reported = 0
self.payload = None
def report_payload(self, payload):
self.payload = payload
def reported(self):
self.reported += 1
def get(self):
return self.reported
def get_payload(self):
return self.payload
reporter = StatusReporter.remote()
@ray.remote(num_cpus=0, runtime_env={"pip": ["ray[serve]"]})
class ServeInitator:
def __init__(self):
# This is used in the worker process
# so it won't be tracked as library usage.
from ray import serve
serve.start()
# Usage report should be sent to the URL every 1 second.
@serve.deployment(ray_actor_options={"num_cpus": 0})
async def usage(request):
body = await request.json()
reporter.reported.remote()
reporter.report_payload.remote(body)
return True
usage.deploy()
def ready(self):
pass
# We need to start a serve with runtime env to make this test
# work with minimal installation.
s = ServeInitator.remote()
ray.get(s.ready.remote())
"""
Verify the usage stats are reported to the server.
"""
print("Verifying usage stats report.")
# Since the interval is 1 second, there must have been
# more than 5 requests sent within 30 seconds.
try:
wait_for_condition(lambda: ray.get(reporter.get.remote()) > 5, timeout=30)
except Exception:
print_dashboard_log()
raise
payload = ray.get(reporter.get_payload.remote())
ray_version, python_version = ray._private.utils.compute_version_info()
assert payload["ray_version"] == ray_version
assert payload["python_version"] == python_version
assert payload["schema_version"] == "0.1"
assert payload["os"] == sys.platform
assert payload["source"] == "OSS"
assert payload["cloud_provider"] == "aws"
assert payload["min_workers"] is None
assert payload["max_workers"] == 1
assert payload["head_node_instance_type"] is None
assert payload["worker_node_instance_types"] is None
assert payload["total_num_cpus"] == 3
assert payload["total_num_gpus"] is None
assert payload["total_memory_gb"] > 0
assert payload["total_object_store_memory_gb"] > 0
if os.environ.get("RAY_MINIMAL") == "1":
assert set(payload["library_usages"]) == set()
else:
assert set(payload["library_usages"]) == {"rllib", "train", "tune"}
validate(instance=payload, schema=schema)
"""
Verify the usage_stats.json is updated.
"""
print("Verifying usage stats write.")
global_node = ray.worker._global_node
temp_dir = pathlib.Path(global_node.get_session_dir_path())
wait_for_condition(lambda: file_exists(temp_dir), timeout=30)
timestamp_old = read_file(temp_dir, "usage_stats")["collect_timestamp_ms"]
success_old = read_file(temp_dir, "usage_stats")["total_success"]
# Test if the timestampe has been updated.
wait_for_condition(
lambda: timestamp_old
< read_file(temp_dir, "usage_stats")["collect_timestamp_ms"]
)
wait_for_condition(
lambda: success_old < read_file(temp_dir, "usage_stats")["total_success"]
)
assert read_file(temp_dir, "success")
def test_first_usage_report_delayed(monkeypatch, ray_start_cluster):
with monkeypatch.context() as m:
m.setenv("RAY_USAGE_STATS_ENABLED", "1")
m.setenv("RAY_USAGE_STATS_REPORT_URL", "http://127.0.0.1:8000")
m.setenv("RAY_USAGE_STATS_REPORT_INTERVAL_S", "10")
cluster = ray_start_cluster
cluster.add_node(num_cpus=0)
ray.init(address=cluster.address)
# The first report should be delayed for 10s.
time.sleep(5)
session_dir = ray.worker.global_worker.node.address_info["session_dir"]
session_path = Path(session_dir)
assert not (session_path / usage_constants.USAGE_STATS_FILE).exists()
time.sleep(10)
assert (session_path / usage_constants.USAGE_STATS_FILE).exists()
def test_usage_report_disabled(monkeypatch, ray_start_cluster):
"""
Make sure usage report module is disabled when the env var is not set.
It also verifies that the failure message is not printed (note that
the invalid report url is given as an env var).
"""
with monkeypatch.context() as m:
m.setenv("RAY_USAGE_STATS_ENABLED", "0")
m.setenv("RAY_USAGE_STATS_REPORT_URL", "http://127.0.0.1:8000")
m.setenv("RAY_USAGE_STATS_REPORT_INTERVAL_S", "1")
cluster = ray_start_cluster
cluster.add_node(num_cpus=0)
ray.init(address=cluster.address)
# Wait enough so that usage report should happen.
time.sleep(5)
session_dir = ray.worker.global_worker.node.address_info["session_dir"]
session_path = Path(session_dir)
log_dir_path = session_path / "logs"
paths = list(log_dir_path.iterdir())
contents = None
for path in paths:
if "dashboard.log" in str(path):
with open(str(path), "r") as f:
contents = f.readlines()
assert contents is not None
keyword_found = False
for c in contents:
if "Usage reporting is disabled" in c:
keyword_found = True
# Make sure the module was disabled.
assert keyword_found
for c in contents:
assert "Failed to report usage stats" not in c
def test_usage_file_error_message(monkeypatch, ray_start_cluster):
"""
Make sure the usage report file is generated with a proper
error message when the report is failed.
"""
with monkeypatch.context() as m:
m.setenv("RAY_USAGE_STATS_ENABLED", "1")
m.setenv("RAY_USAGE_STATS_REPORT_URL", "http://127.0.0.1:8000")
m.setenv("RAY_USAGE_STATS_REPORT_INTERVAL_S", "1")
cluster = ray_start_cluster
cluster.add_node(num_cpus=0)
ray.init(address=cluster.address)
global_node = ray.worker._global_node
temp_dir = pathlib.Path(global_node.get_session_dir_path())
try:
wait_for_condition(lambda: file_exists(temp_dir), timeout=30)
except Exception:
print_dashboard_log()
raise
error_message = read_file(temp_dir, "error")
failure_old = read_file(temp_dir, "usage_stats")["total_failed"]
report_success = read_file(temp_dir, "success")
# Test if the timestampe has been updated.
assert (
"HTTPConnectionPool(host='127.0.0.1', port=8000): "
"Max retries exceeded with url:"
) in error_message
assert not report_success
try:
wait_for_condition(
lambda: failure_old < read_file(temp_dir, "usage_stats")["total_failed"]
)
except Exception:
print_dashboard_log()
read_file(temp_dir, "usage_stats")["total_failed"]
raise
assert read_file(temp_dir, "usage_stats")["total_success"] == 0
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
|
[] |
[] |
[
"RAY_MINIMAL"
] |
[]
|
["RAY_MINIMAL"]
|
python
| 1 | 0 | |
src/i18n.py
|
# -*- coding: utf-8 -*-
"""Internationalization.
Use
from i18n import translate as _
to enable string localization. In the future it would be easily
replaced by gettext since translatable strings are marked in the
same way as _("text").
Beside the translation service, this module defines the messages
used in the application, such as the help and the usage messages.
"""
import os
import sys
import atexit
############################################################
# Translation function
############################################################
def translate(text):
"""Translate the text in the current language."""
try:
return _strings[text]
except KeyError:
pass
# try removing the last char since translation loading strips the end spaces
try:
return _strings[text[:-1]] + text[-1]
except KeyError:
_missing_translation.add(text)
s = (text if len(text) < 15 else text[:15] + "...")
print("[TT] Warning: missing translation for '%s'" % s, file=sys.stderr)
return text
############################################################
# Extra functions
############################################################
def register_translation(english_text, language, translation):
"""Function to add translations from other modules."""
if _lang == language:
_strings[english_text] = translation
elif _lang == "en":
_strings[english_text] = english_text
############################################################
# Application messages
############################################################
_strings = {}
_missing_translation = set()
_lang = 'en'
_translations = {
'it': {
"Run tests to verify the correctness of a program.": "Esegue dei test per verificare la correttezza di un programma.",
"set the verbosity level, where the level must be an integer between 0 (minimum) and 4 (maximum). The default value is 3.": "imposto il livello di verbosità. Il livello deve essere un valore intero tra 0 (minimo) e 3 (massimo). Il default è 2.",
"set how many seconds it should be waited for the termination of the program. The default is 10 seconds.": "imposta per quanti secondi bisogna attendere la terminazione del programma. Il default è pari a 10 secondi.",
"cut the output of the program to a maximum of L lines. The default is 10000.": "taglia l'output del programma ad un massimo di L linee. Il default è 10000.",
"reports up to N errors per section (default 4).": "riporta fino ad un massimo di N errori per sezione (default 4).",
"uses the specified configuration file.": "utilizza il file di configurazione specificato.",
"enable or disable colored output (default AUTO).": "abilita o disabilita l'output colorato (default AUTO).",
"use Valgrind (if installed) to check memory usage.": "utilizza Valgrind (se installato) per controllare l'utilizzo della memoria.",
"select the output type.": "seleziona il tipo di output.",
"specify the name of the file used for logging. The default is ~/.pvcheck.log.": "specifica il nome del file usato per il logging. Il default è ~/.pvcheck.log.",
"print this message and exit.": "stampa questo messaggio ed esce.",
"list all the available tests.": "mostra tutti i test disponibili.",
"run only the selected test.": "esegue solo il test indicato.",
"export in a file the input arguments from the selected test.": "salva in un file gli argomenti di input dal test indicato.",
"ERROR": "ERRORE",
"OK": "OK",
"WARNING!": "ATTENZIONE!",
"COMMAND LINE": "RIGA DI COMANDO",
"<temp.file>": "<file temp.>",
"INPUT": "INPUT",
"Input:": "Input:",
"OUTPUT": "OUTPUT",
"TEMPORARY FILE": "FILE TEMPORANEO",
"TEST": "TEST",
"line %d": " riga %d",
"(expected '%s'": " (atteso '%s'",
", got '%s')": "ottenuto '%s')",
"unexpected line '%s'": "riga inattesa '%s'",
"missing line (expected '%s')": "riga mancante (atteso '%s')",
"wrong number of lines (expected %d, got %d)": "numero di righe errato (atteso %d, ottenuto %d)",
"line %d is wrong (expected '%s', got '%s')": "riga %d errata (atteso '%s', ottenuto '%s')",
"The first %d lines matched correctly": "Le prime %d righe sono corrette",
"(... plus other %d errors ...)": "(... più altri %d errori ...)",
"ACTUAL OUTPUT": "OUTPUT EFFETTIVO",
"EXPECTED OUTPUT": "OUTPUT ATTESO",
"detailed comparison": "confronto dettagliato",
"<nothing>": "<niente>",
"missing section": "sezione mancante",
"empty section": "sezione vuota",
"extra section": "sezione extra",
"Invalid parameter": "Parametro non valido",
"Invalid parameter ('%s')": "Parametro non valido ('%s')",
"Invalid parameter ('%d')": "Parametro non valido('%d')",
"Invalid parameter ('%f')": "Parametro non valido('%f')",
"TIMEOUT EXPIRED: PROCESS TERMINATED": "TEMPO LIMITE SCADUTO: PROCESSO TERMINATO",
"TOO MANY OUTPUT LINES": "TROPPE LINEE DI OUTPUT",
"PROCESS ENDED WITH A FAILURE": "PROCESSO TERMINATO CON UN FALLIMENTO",
"(SEGMENTATION FAULT)": "(SEGMENTATION FAULT)",
"(ERROR CODE {status})": "(CODICE D'ERRORE {status})",
"FAILED TO RUN THE FILE '{progname}'": "IMPOSSIBILE ESEGUIRE IL FILE '{progname}'",
"(the file does not exist)": "(file inesistente)",
"(... plus other %d lines ...)": "(... più altre %d righe ...)",
"SUMMARY": "RIEPILOGO",
"Summary": "Riepilogo",
"summary": "riepilogo",
"successes": "successi",
"Successes": "Successi",
"warnings": "avvertimenti",
"Warnings": "Avvertimenti",
"errors": "errori",
"Errors": "Errori",
"<program>": "<programma>",
"CODE": "CODICE",
"TOTAL": "TOTALE",
"Test number %d doesn't exist.": "Il test numero %d non esiste.",
"Use './pvcheck info' to list all the available tests.": "Utilizza './pvcheck info' per vedere tutti i test disponibili.",
"Error: Can't export test number %d.": "Errore: Impossibile esportare il test numero %d.",
"file containing the tests to be performed (default pvcheck.test).": "file contenente i test da eseguire (default pvcheck.test).",
"file containing the tests to be performed.": "file contenente i test da eseguire.",
"program to be tested.": "programma da testare.",
"any arguments of the program to be tested.": "eventuali argomenti del programma da testare.",
"[run|info|export] --help for command help (default=run)": "[run|info|export] --help per l'help di un comando (default=run)",
"test a program.": "testa un programma.",
"Test Result": "Risultato Test",
"positional arguments": "argomenti posizionali",
"optional arguments": "argomenti opzionali",
"show this help message and exit": "mostra questo messaggio ed esce",
"show program's version number and exit": "mostra la versione del programma ed esce",
"unrecognized arguments: %s'": "argomento %s non riconosciuto",
"not allowed with argument %s": "l'argomento %s non é consentito",
"ignored explicit argument %r": "ignorato l'argomento esplicito %r",
"too few arguments": "troppi pochi argomenti ",
"argument %s is required": "é necessario l'argomento %s",
"one of the arguments %s is required": "é necessario uno dei seguenti argomenti %s",
"expected one argument": "atteso un argomento",
"expected at most one argument": "atteso al piú un argomento",
"expected at least one argument": "atteso almeno un argomento",
"expected %s argument(s)": "atteso argomento %s",
"ambiguous option: %s could match %s": "opzione ambigua: %s puó coincidere con %s",
"unexpected option string: %s": "opzione string non attesa: %s",
"%r is not callable": "%r non é chiamabile",
"invalid %s value: %r": "non valido %s valore: %r",
"invalid choice: %r (choose from %s)": "scelta non valida: %r (i parametri disponibili sono %s)",
"%s: error: %s": "%s: errore: %s",
"unrecognized arguments: %s": "argomento non riconosciuto: %s",
"Command line: %s": "Riga di comando: %s",
"FAILED TO RUN THE FILE '{progname}' the file does not exist)": "ERRORE NELL'ESECUZIONE DEL FILE '{progname}' il file non esiste)",
"Lines %d-%d/%d": "Righe %d-%d/%d",
"PROCESS ENDED WITH A FAILURE (SEGMENTATION FAULT)": "IL PROCESSO E` TERMINATO CON UN FALLIMENTO (SEGMENTATION FAULT)",
"PROGRAM'S OUTPUT:": "OUTPUT DEL PROGRAMMA:",
"SUMMARY:": "SOMMARIO:",
"TEMP_FILE": "FILE_TEMPORANEO",
"Temporary file:": "File temporaneo:",
"Test case %d of %d (%s)": "Caso di test %d di %d (%s) ",
"Test title: %s": "Titolo del test: %s",
"[Press 'h' for help]": "[Premere 'h' per l'aiuto]",
"enables the interactive mode.": "abilita la modalita` interattiva.",
"expected": "atteso",
"missing line": "riga mancante",
"passes": "passati",
"this line was not expected": "questa riga e` inattesa",
"TEST RUNNING": "TEST IN ESECUZIONE",
"TEST COMPLETED": "TEST TERMINATO",
"section [%s] is missing": "sezione [%s] non trovata",
"execution failed": "esecuzione fallita",
"%(prog)s: error: %(message)s\n": "%(prog)s: errore: %(message)s\n",
"the following arguments are required: %s": "i seguenti argomenti sono richiesti: %s",
"usage: ": "utilizzo: ",
'file containing the tests to be exported.': "file contenente i test da salvare.",
"number of the test to export as returned by the 'info' command.": "numero del test da salvare come restituito dal comando 'info'."
}
}
############################################################
# Setup and shutdown
############################################################
def _install_lang():
global _lang
global _strings
_lang = os.environ.get('LANG', 'en').partition('_')[0]
try:
_strings = _translations[_lang]
except KeyError:
# Fallback to english, where the translation is the identity
# function.
_lang = 'en'
for t in _translations.values():
_strings = {k: k for k in t}
break
_install_lang()
@atexit.register
def _save_missing_translations():
if _missing_translation:
import pprint
with open('missing_translations.txt', 'wt') as f:
d = dict.fromkeys(_missing_translation, "")
pprint.pprint({_lang: d}, stream=f)
|
[] |
[] |
[
"LANG"
] |
[]
|
["LANG"]
|
python
| 1 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.